From 4223430d9168223029c7639149025c79e69b4f37 Mon Sep 17 00:00:00 2001 From: ahmad Date: Tue, 4 Dec 2018 16:16:31 -0800 Subject: [PATCH] fixed adding file problem --- .../demo/dependencies/libcds-2.3.2/.gitignore | 20 + .../dependencies/libcds-2.3.2/.travis.yml | 265 + .../dependencies/libcds-2.3.2/CMakeLists.txt | 255 + .../demo/dependencies/libcds-2.3.2/LICENSE | 24 + .../libcds-2.3.2/build-release/CMakeCache.txt | 501 ++ .../CMakeFiles/3.5.1/CMakeCCompiler.cmake | 67 + .../CMakeFiles/3.5.1/CMakeCXXCompiler.cmake | 68 + .../3.5.1/CMakeDetermineCompilerABI_C.bin | Bin 0 -> 8640 bytes .../3.5.1/CMakeDetermineCompilerABI_CXX.bin | Bin 0 -> 8656 bytes .../CMakeFiles/3.5.1/CMakeSystem.cmake | 15 + .../3.5.1/CompilerIdC/CMakeCCompilerId.c | 544 ++ .../CMakeFiles/3.5.1/CompilerIdC/a.out | Bin 0 -> 8800 bytes .../CompilerIdCXX/CMakeCXXCompilerId.cpp | 533 ++ .../CMakeFiles/3.5.1/CompilerIdCXX/a.out | Bin 0 -> 8808 bytes .../CMakeDirectoryInformation.cmake | 16 + .../build-release/CMakeFiles/CMakeError.log | 59 + .../build-release/CMakeFiles/CMakeOutput.log | 608 ++ .../cmake/LibCDS/LibCDSConfig-release.cmake | 29 + .../lib/cmake/LibCDS/LibCDSConfig.cmake | 102 + .../build-release/CMakeFiles/Makefile.cmake | 134 + .../build-release/CMakeFiles/Makefile2 | 145 + .../CMakeFiles/TargetDirectories.txt | 10 + .../CMakeFiles/cds-s.dir/CXX.includecache | 888 +++ .../CMakeFiles/cds-s.dir/DependInfo.cmake | 31 + .../CMakeFiles/cds-s.dir/build.make | 357 + .../CMakeFiles/cds-s.dir/cmake_clean.cmake | 19 + .../cds-s.dir/cmake_clean_target.cmake | 3 + .../CMakeFiles/cds-s.dir/depend.internal | 1122 +++ .../CMakeFiles/cds-s.dir/depend.make | 1122 +++ .../CMakeFiles/cds-s.dir/flags.make | 10 + .../CMakeFiles/cds-s.dir/link.txt | 2 + .../CMakeFiles/cds-s.dir/progress.make | 12 + .../CMakeFiles/cds-s.dir/src/dhp.cpp.o | Bin 0 -> 26688 bytes .../CMakeFiles/cds-s.dir/src/dllmain.cpp.o | Bin 0 -> 952 bytes .../CMakeFiles/cds-s.dir/src/hp.cpp.o | Bin 0 -> 21432 bytes .../CMakeFiles/cds-s.dir/src/init.cpp.o | Bin 0 -> 2384 bytes .../cds-s.dir/src/thread_data.cpp.o | Bin 0 -> 4160 bytes .../cds-s.dir/src/topology_hpux.cpp.o | Bin 0 -> 952 bytes .../cds-s.dir/src/topology_linux.cpp.o | Bin 0 -> 1752 bytes .../cds-s.dir/src/topology_osx.cpp.o | Bin 0 -> 952 bytes .../CMakeFiles/cds-s.dir/src/urcu_gp.cpp.o | Bin 0 -> 1264 bytes .../CMakeFiles/cds-s.dir/src/urcu_sh.cpp.o | Bin 0 -> 1784 bytes .../CMakeFiles/cds.dir/CXX.includecache | 888 +++ .../CMakeFiles/cds.dir/DependInfo.cmake | 37 + .../CMakeFiles/cds.dir/build.make | 360 + .../CMakeFiles/cds.dir/cmake_clean.cmake | 20 + .../CMakeFiles/cds.dir/depend.internal | 1122 +++ .../CMakeFiles/cds.dir/depend.make | 1122 +++ .../CMakeFiles/cds.dir/flags.make | 10 + .../build-release/CMakeFiles/cds.dir/link.txt | 1 + .../CMakeFiles/cds.dir/progress.make | 12 + .../CMakeFiles/cds.dir/src/dhp.cpp.o | Bin 0 -> 25424 bytes .../CMakeFiles/cds.dir/src/dllmain.cpp.o | Bin 0 -> 952 bytes .../CMakeFiles/cds.dir/src/hp.cpp.o | Bin 0 -> 21536 bytes .../CMakeFiles/cds.dir/src/init.cpp.o | Bin 0 -> 2384 bytes .../CMakeFiles/cds.dir/src/thread_data.cpp.o | Bin 0 -> 4240 bytes .../cds.dir/src/topology_hpux.cpp.o | Bin 0 -> 952 bytes .../cds.dir/src/topology_linux.cpp.o | Bin 0 -> 1792 bytes .../CMakeFiles/cds.dir/src/topology_osx.cpp.o | Bin 0 -> 952 bytes .../CMakeFiles/cds.dir/src/urcu_gp.cpp.o | Bin 0 -> 1264 bytes .../CMakeFiles/cds.dir/src/urcu_sh.cpp.o | Bin 0 -> 1840 bytes .../CMakeFiles/cmake.check_cache | 1 + .../CMakeFiles/feature_tests.bin | Bin 0 -> 12696 bytes .../build-release/CMakeFiles/feature_tests.c | 34 + .../CMakeFiles/feature_tests.cxx | 405 + .../build-release/CMakeFiles/progress.marks | 1 + .../build-release/CPackConfig.cmake | 108 + .../build-release/CPackSourceConfig.cmake | 115 + .../libcds-2.3.2/build-release/Makefile | 564 ++ .../libcds-2.3.2/build-release/arch.c | 46 + .../libcds-2.3.2/build-release/bin/libcds-s.a | Bin 0 -> 67134 bytes .../libcds-2.3.2/build-release/bin/libcds.so | 1 + .../build-release/bin/libcds.so.2.3.2 | Bin 0 -> 51280 bytes .../build-release/cmake_install.cmake | 103 + .../libcds-2.3.2/build/CI/VASEx-CI-2/cds-libs | 180 + .../libcds-2.3.2/build/CI/VASEx-CI-2/ci-build | 73 + .../libcds-2.3.2/build/CI/VASEx-CI-2/ci-env | 31 + .../libcds-2.3.2/build/CI/VASEx-CI/cds-libs | 162 + .../libcds-2.3.2/build/CI/VASEx-CI/ci-build | 71 + .../libcds-2.3.2/build/CI/VASEx-CI/ci-env | 32 + .../libcds-2.3.2/build/CI/cmake-gen | 111 + .../libcds-2.3.2/build/CI/gen-all | 36 + .../build/CI/travis-ci/install.sh | 31 + .../libcds-2.3.2/build/CI/travis-ci/run.sh | 23 + .../libcds-2.3.2/build/cmake/TargetArch.cmake | 141 + .../libcds-2.3.2/build/cmake/description.txt | 1 + .../build/cmake/post_install_script.sh | 1 + .../build/cmake/post_uninstall_script.sh | 1 + .../libcds-2.3.2/build/cmake/readme.md | 104 + .../libcds-2.3.2/cds/algo/atomic.h | 521 ++ .../libcds-2.3.2/cds/algo/backoff_strategy.h | 464 ++ .../dependencies/libcds-2.3.2/cds/algo/base.h | 43 + .../libcds-2.3.2/cds/algo/bit_reversal.h | 184 + .../libcds-2.3.2/cds/algo/bitop.h | 168 + .../libcds-2.3.2/cds/algo/elimination.h | 86 + .../libcds-2.3.2/cds/algo/elimination_opt.h | 65 + .../libcds-2.3.2/cds/algo/elimination_tls.h | 62 + .../libcds-2.3.2/cds/algo/flat_combining.h | 36 + .../cds/algo/flat_combining/defs.h | 92 + .../cds/algo/flat_combining/kernel.h | 900 +++ .../cds/algo/flat_combining/wait_strategy.h | 442 ++ .../libcds-2.3.2/cds/algo/int_algo.h | 172 + .../libcds-2.3.2/cds/algo/split_bitstring.h | 470 ++ .../libcds-2.3.2/cds/compiler/backoff.h | 64 + .../libcds-2.3.2/cds/compiler/bitop.h | 68 + .../libcds-2.3.2/cds/compiler/clang/defs.h | 152 + .../libcds-2.3.2/cds/compiler/cxx11_atomic.h | 2232 ++++++ .../libcds-2.3.2/cds/compiler/defs.h | 76 + .../libcds-2.3.2/cds/compiler/feature_tsan.h | 116 + .../cds/compiler/gcc/amd64/backoff.h | 60 + .../cds/compiler/gcc/amd64/bitop.h | 184 + .../cds/compiler/gcc/amd64/cxx11_atomic.h | 228 + .../cds/compiler/gcc/arm7/backoff.h | 52 + .../cds/compiler/gcc/arm8/backoff.h | 52 + .../cds/compiler/gcc/compiler_barriers.h | 38 + .../cds/compiler/gcc/compiler_macro.h | 186 + .../libcds-2.3.2/cds/compiler/gcc/defs.h | 132 + .../cds/compiler/gcc/ia64/backoff.h | 59 + .../cds/compiler/gcc/ia64/bitop.h | 90 + .../cds/compiler/gcc/ia64/cxx11_atomic.h | 678 ++ .../cds/compiler/gcc/ppc64/backoff.h | 54 + .../cds/compiler/gcc/ppc64/bitop.h | 45 + .../cds/compiler/gcc/sparc/backoff.h | 54 + .../cds/compiler/gcc/sparc/bitop.h | 70 + .../cds/compiler/gcc/sparc/cxx11_atomic.h | 635 ++ .../cds/compiler/gcc/x86/backoff.h | 60 + .../libcds-2.3.2/cds/compiler/gcc/x86/bitop.h | 114 + .../cds/compiler/gcc/x86/cxx11_atomic.h | 210 + .../cds/compiler/gcc/x86/cxx11_atomic32.h | 502 ++ .../cds/compiler/icl/compiler_barriers.h | 55 + .../libcds-2.3.2/cds/compiler/icl/defs.h | 158 + .../cds/compiler/vc/amd64/backoff.h | 60 + .../cds/compiler/vc/amd64/bitop.h | 154 + .../cds/compiler/vc/amd64/cxx11_atomic.h | 609 ++ .../cds/compiler/vc/compiler_barriers.h | 57 + .../libcds-2.3.2/cds/compiler/vc/defs.h | 164 + .../cds/compiler/vc/x86/backoff.h | 60 + .../libcds-2.3.2/cds/compiler/vc/x86/bitop.h | 111 + .../cds/compiler/vc/x86/cxx11_atomic.h | 581 ++ .../libcds-2.3.2/cds/container/basket_queue.h | 481 ++ .../cds/container/bronson_avltree_map_rcu.h | 712 ++ .../libcds-2.3.2/cds/container/cuckoo_map.h | 772 ++ .../libcds-2.3.2/cds/container/cuckoo_set.h | 850 +++ .../libcds-2.3.2/cds/container/details/base.h | 103 + .../container/details/bronson_avltree_base.h | 528 ++ .../cds/container/details/cuckoo_base.h | 269 + .../container/details/ellen_bintree_base.h | 460 ++ .../container/details/feldman_hashmap_base.h | 391 + .../container/details/feldman_hashset_base.h | 230 + .../cds/container/details/guarded_ptr_cast.h | 58 + .../container/details/iterable_list_base.h | 152 + .../cds/container/details/lazy_list_base.h | 189 + .../container/details/make_iterable_kvlist.h | 103 + .../container/details/make_iterable_list.h | 82 + .../cds/container/details/make_lazy_kvlist.h | 160 + .../cds/container/details/make_lazy_list.h | 129 + .../container/details/make_michael_kvlist.h | 134 + .../cds/container/details/make_michael_list.h | 108 + .../container/details/make_skip_list_map.h | 149 + .../container/details/make_skip_list_set.h | 122 + .../container/details/make_split_list_set.h | 61 + .../make_split_list_set_iterable_list.h | 138 + .../details/make_split_list_set_lazy_list.h | 147 + .../make_split_list_set_michael_list.h | 141 + .../cds/container/details/michael_list_base.h | 159 + .../cds/container/details/michael_map_base.h | 66 + .../cds/container/details/michael_set_base.h | 66 + .../cds/container/details/skip_list_base.h | 356 + .../cds/container/details/split_list_base.h | 217 + .../cds/container/ellen_bintree_map_dhp.h | 37 + .../cds/container/ellen_bintree_map_hp.h | 37 + .../cds/container/ellen_bintree_map_rcu.h | 603 ++ .../cds/container/ellen_bintree_set_dhp.h | 37 + .../cds/container/ellen_bintree_set_hp.h | 37 + .../cds/container/ellen_bintree_set_rcu.h | 653 ++ .../libcds-2.3.2/cds/container/fcdeque.h | 607 ++ .../cds/container/fcpriority_queue.h | 350 + .../libcds-2.3.2/cds/container/fcqueue.h | 442 ++ .../libcds-2.3.2/cds/container/fcstack.h | 427 ++ .../cds/container/feldman_hashmap_dhp.h | 37 + .../cds/container/feldman_hashmap_hp.h | 37 + .../cds/container/feldman_hashmap_rcu.h | 825 ++ .../cds/container/feldman_hashset_dhp.h | 37 + .../cds/container/feldman_hashset_hp.h | 37 + .../cds/container/feldman_hashset_rcu.h | 598 ++ .../container/impl/bronson_avltree_map_rcu.h | 2248 ++++++ .../cds/container/impl/ellen_bintree_map.h | 581 ++ .../cds/container/impl/ellen_bintree_set.h | 629 ++ .../cds/container/impl/feldman_hashmap.h | 847 +++ .../cds/container/impl/feldman_hashset.h | 610 ++ .../cds/container/impl/iterable_kvlist.h | 751 ++ .../cds/container/impl/iterable_list.h | 881 +++ .../cds/container/impl/lazy_kvlist.h | 885 +++ .../cds/container/impl/lazy_list.h | 868 +++ .../cds/container/impl/michael_kvlist.h | 886 +++ .../cds/container/impl/michael_list.h | 847 +++ .../cds/container/impl/skip_list_map.h | 703 ++ .../cds/container/impl/skip_list_set.h | 760 ++ .../cds/container/iterable_kvlist_dhp.h | 39 + .../cds/container/iterable_kvlist_hp.h | 39 + .../cds/container/iterable_list_dhp.h | 39 + .../cds/container/iterable_list_hp.h | 39 + .../cds/container/lazy_kvlist_dhp.h | 39 + .../cds/container/lazy_kvlist_hp.h | 39 + .../cds/container/lazy_kvlist_nogc.h | 642 ++ .../cds/container/lazy_kvlist_rcu.h | 905 +++ .../cds/container/lazy_list_dhp.h | 39 + .../libcds-2.3.2/cds/container/lazy_list_hp.h | 39 + .../cds/container/lazy_list_nogc.h | 526 ++ .../cds/container/lazy_list_rcu.h | 891 +++ .../cds/container/michael_kvlist_dhp.h | 39 + .../cds/container/michael_kvlist_hp.h | 39 + .../cds/container/michael_kvlist_nogc.h | 619 ++ .../cds/container/michael_kvlist_rcu.h | 954 +++ .../cds/container/michael_list_dhp.h | 39 + .../cds/container/michael_list_hp.h | 39 + .../cds/container/michael_list_nogc.h | 510 ++ .../cds/container/michael_list_rcu.h | 904 +++ .../libcds-2.3.2/cds/container/michael_map.h | 1007 +++ .../cds/container/michael_map_nogc.h | 607 ++ .../cds/container/michael_map_rcu.h | 872 +++ .../libcds-2.3.2/cds/container/michael_set.h | 994 +++ .../cds/container/michael_set_nogc.h | 457 ++ .../cds/container/michael_set_rcu.h | 824 ++ .../libcds-2.3.2/cds/container/moir_queue.h | 323 + .../cds/container/mspriority_queue.h | 344 + .../libcds-2.3.2/cds/container/msqueue.h | 435 ++ .../cds/container/optimistic_queue.h | 432 ++ .../libcds-2.3.2/cds/container/rwqueue.h | 415 ++ .../cds/container/segmented_queue.h | 442 ++ .../cds/container/skip_list_map_dhp.h | 39 + .../cds/container/skip_list_map_hp.h | 39 + .../cds/container/skip_list_map_nogc.h | 394 + .../cds/container/skip_list_map_rcu.h | 706 ++ .../cds/container/skip_list_set_dhp.h | 39 + .../cds/container/skip_list_set_hp.h | 39 + .../cds/container/skip_list_set_nogc.h | 444 ++ .../cds/container/skip_list_set_rcu.h | 777 ++ .../cds/container/split_list_map.h | 807 ++ .../cds/container/split_list_map_nogc.h | 389 + .../cds/container/split_list_map_rcu.h | 720 ++ .../cds/container/split_list_set.h | 1004 +++ .../cds/container/split_list_set_nogc.h | 458 ++ .../cds/container/split_list_set_rcu.h | 1005 +++ .../libcds-2.3.2/cds/container/striped_map.h | 927 +++ .../container/striped_map/boost_flat_map.h | 80 + .../cds/container/striped_map/boost_list.h | 298 + .../cds/container/striped_map/boost_map.h | 80 + .../cds/container/striped_map/boost_slist.h | 308 + .../striped_map/boost_unordered_map.h | 76 + .../cds/container/striped_map/std_hash_map.h | 217 + .../cds/container/striped_map/std_list.h | 330 + .../cds/container/striped_map/std_map.h | 216 + .../libcds-2.3.2/cds/container/striped_set.h | 975 +++ .../cds/container/striped_set/adapter.h | 535 ++ .../container/striped_set/boost_flat_set.h | 83 + .../cds/container/striped_set/boost_list.h | 291 + .../cds/container/striped_set/boost_set.h | 82 + .../cds/container/striped_set/boost_slist.h | 302 + .../striped_set/boost_stable_vector.h | 284 + .../striped_set/boost_unordered_set.h | 73 + .../cds/container/striped_set/boost_vector.h | 286 + .../cds/container/striped_set/std_hash_set.h | 201 + .../cds/container/striped_set/std_list.h | 327 + .../cds/container/striped_set/std_set.h | 201 + .../cds/container/striped_set/std_vector.h | 282 + .../cds/container/treiber_stack.h | 409 + .../cds/container/vyukov_mpmc_cycle_queue.h | 523 ++ .../cds/container/weak_ringbuffer.h | 1010 +++ .../cds/details/aligned_allocator.h | 126 + .../libcds-2.3.2/cds/details/aligned_type.h | 108 + .../libcds-2.3.2/cds/details/allocator.h | 207 + .../cds/details/binary_functor_wrapper.h | 95 + .../cds/details/bit_reverse_counter.h | 107 + .../libcds-2.3.2/cds/details/bitop_generic.h | 300 + .../libcds-2.3.2/cds/details/bounded_array.h | 142 + .../cds/details/bounded_container.h | 43 + .../libcds-2.3.2/cds/details/defs.h | 388 + .../libcds-2.3.2/cds/details/is_aligned.h | 64 + .../libcds-2.3.2/cds/details/lib.h | 54 + .../cds/details/make_const_type.h | 58 + .../libcds-2.3.2/cds/details/marked_ptr.h | 396 + .../libcds-2.3.2/cds/details/size_t_cast.h | 63 + .../libcds-2.3.2/cds/details/static_functor.h | 49 + .../cds/details/throw_exception.h | 88 + .../libcds-2.3.2/cds/details/trivial_assign.h | 50 + .../libcds-2.3.2/cds/details/type_padding.h | 87 + .../libcds-2.3.2/cds/gc/default_gc.h | 44 + .../libcds-2.3.2/cds/gc/details/hp_common.h | 184 + .../libcds-2.3.2/cds/gc/details/retired_ptr.h | 148 + .../dependencies/libcds-2.3.2/cds/gc/dhp.h | 1539 ++++ .../dependencies/libcds-2.3.2/cds/gc/hp.h | 1535 ++++ .../dependencies/libcds-2.3.2/cds/gc/nogc.h | 57 + .../demo/dependencies/libcds-2.3.2/cds/init.h | 97 + .../libcds-2.3.2/cds/intrusive/basket_queue.h | 812 ++ .../libcds-2.3.2/cds/intrusive/cuckoo_set.h | 2829 +++++++ .../libcds-2.3.2/cds/intrusive/details/base.h | 338 + .../intrusive/details/ellen_bintree_base.h | 762 ++ .../intrusive/details/feldman_hashset_base.h | 699 ++ .../intrusive/details/iterable_list_base.h | 292 + .../cds/intrusive/details/lazy_list_base.h | 474 ++ .../cds/intrusive/details/michael_list_base.h | 439 ++ .../cds/intrusive/details/michael_set_base.h | 236 + .../cds/intrusive/details/node_traits.h | 195 + .../cds/intrusive/details/raw_ptr_disposer.h | 103 + .../intrusive/details/single_link_struct.h | 196 + .../cds/intrusive/details/skip_list_base.h | 784 ++ .../cds/intrusive/details/split_list_base.h | 1326 ++++ .../cds/intrusive/ellen_bintree_dhp.h | 37 + .../cds/intrusive/ellen_bintree_hp.h | 37 + .../cds/intrusive/ellen_bintree_rcu.h | 2015 +++++ .../libcds-2.3.2/cds/intrusive/fcqueue.h | 411 + .../libcds-2.3.2/cds/intrusive/fcstack.h | 384 + .../cds/intrusive/feldman_hashset_dhp.h | 37 + .../cds/intrusive/feldman_hashset_hp.h | 37 + .../cds/intrusive/feldman_hashset_rcu.h | 1243 ++++ .../libcds-2.3.2/cds/intrusive/free_list.h | 246 + .../cds/intrusive/free_list_cached.h | 192 + .../cds/intrusive/free_list_selector.h | 54 + .../cds/intrusive/free_list_tagged.h | 205 + .../cds/intrusive/impl/ellen_bintree.h | 1597 ++++ .../cds/intrusive/impl/feldman_hashset.h | 1263 ++++ .../cds/intrusive/impl/iterable_list.h | 1467 ++++ .../cds/intrusive/impl/lazy_list.h | 1273 ++++ .../cds/intrusive/impl/michael_list.h | 1256 ++++ .../cds/intrusive/impl/skip_list.h | 1791 +++++ .../cds/intrusive/iterable_list_dhp.h | 37 + .../cds/intrusive/iterable_list_hp.h | 37 + .../cds/intrusive/lazy_list_dhp.h | 37 + .../libcds-2.3.2/cds/intrusive/lazy_list_hp.h | 37 + .../cds/intrusive/lazy_list_nogc.h | 853 +++ .../cds/intrusive/lazy_list_rcu.h | 1303 ++++ .../cds/intrusive/michael_list_dhp.h | 37 + .../cds/intrusive/michael_list_hp.h | 37 + .../cds/intrusive/michael_list_nogc.h | 737 ++ .../cds/intrusive/michael_list_rcu.h | 1292 ++++ .../libcds-2.3.2/cds/intrusive/michael_set.h | 1014 +++ .../cds/intrusive/michael_set_nogc.h | 474 ++ .../cds/intrusive/michael_set_rcu.h | 772 ++ .../libcds-2.3.2/cds/intrusive/moir_queue.h | 196 + .../cds/intrusive/mspriority_queue.h | 543 ++ .../libcds-2.3.2/cds/intrusive/msqueue.h | 624 ++ .../cds/intrusive/optimistic_queue.h | 717 ++ .../libcds-2.3.2/cds/intrusive/options.h | 189 + .../cds/intrusive/segmented_queue.h | 725 ++ .../cds/intrusive/skip_list_dhp.h | 37 + .../libcds-2.3.2/cds/intrusive/skip_list_hp.h | 37 + .../cds/intrusive/skip_list_nogc.h | 994 +++ .../cds/intrusive/skip_list_rcu.h | 2076 ++++++ .../libcds-2.3.2/cds/intrusive/split_list.h | 1464 ++++ .../cds/intrusive/split_list_nogc.h | 743 ++ .../cds/intrusive/split_list_rcu.h | 1129 +++ .../libcds-2.3.2/cds/intrusive/striped_set.h | 909 +++ .../cds/intrusive/striped_set/adapter.h | 349 + .../cds/intrusive/striped_set/boost_avl_set.h | 65 + .../cds/intrusive/striped_set/boost_list.h | 244 + .../cds/intrusive/striped_set/boost_set.h | 65 + .../cds/intrusive/striped_set/boost_sg_set.h | 64 + .../cds/intrusive/striped_set/boost_slist.h | 260 + .../intrusive/striped_set/boost_splay_set.h | 66 + .../intrusive/striped_set/boost_treap_set.h | 64 + .../striped_set/boost_unordered_set.h | 236 + .../intrusive/striped_set/resizing_policy.h | 293 + .../intrusive/striped_set/striping_policy.h | 389 + .../cds/intrusive/treiber_stack.h | 862 +++ .../cds/intrusive/vyukov_mpmc_cycle_queue.h | 258 + .../libcds-2.3.2/cds/lock/array.h | 58 + .../libcds-2.3.2/cds/lock/spinlock.h | 84 + .../libcds-2.3.2/cds/memory/pool_allocator.h | 150 + .../cds/memory/vyukov_queue_pool.h | 549 ++ .../libcds-2.3.2/cds/opt/buffer.h | 576 ++ .../libcds-2.3.2/cds/opt/compare.h | 336 + .../dependencies/libcds-2.3.2/cds/opt/hash.h | 195 + .../libcds-2.3.2/cds/opt/options.h | 1219 +++ .../libcds-2.3.2/cds/opt/permutation.h | 329 + .../libcds-2.3.2/cds/opt/value_cleaner.h | 130 + .../libcds-2.3.2/cds/os/aix/alloc_aligned.h | 37 + .../libcds-2.3.2/cds/os/aix/timer.h | 116 + .../libcds-2.3.2/cds/os/aix/topology.h | 104 + .../libcds-2.3.2/cds/os/alloc_aligned.h | 222 + .../cds/os/details/fake_topology.h | 67 + .../cds/os/free_bsd/alloc_aligned.h | 37 + .../libcds-2.3.2/cds/os/free_bsd/timer.h | 120 + .../libcds-2.3.2/cds/os/free_bsd/topology.h | 103 + .../libcds-2.3.2/cds/os/hpux/alloc_aligned.h | 37 + .../libcds-2.3.2/cds/os/hpux/timer.h | 36 + .../libcds-2.3.2/cds/os/hpux/topology.h | 105 + .../libcds-2.3.2/cds/os/libc/alloc_aligned.h | 61 + .../libcds-2.3.2/cds/os/linux/alloc_aligned.h | 43 + .../libcds-2.3.2/cds/os/linux/timer.h | 114 + .../libcds-2.3.2/cds/os/linux/topology.h | 117 + .../libcds-2.3.2/cds/os/osx/timer.h | 127 + .../libcds-2.3.2/cds/os/osx/topology.h | 85 + .../libcds-2.3.2/cds/os/posix/alloc_aligned.h | 70 + .../libcds-2.3.2/cds/os/posix/fake_topology.h | 77 + .../libcds-2.3.2/cds/os/posix/thread.h | 58 + .../libcds-2.3.2/cds/os/posix/timer.h | 113 + .../libcds-2.3.2/cds/os/sunos/alloc_aligned.h | 37 + .../libcds-2.3.2/cds/os/sunos/timer.h | 95 + .../libcds-2.3.2/cds/os/sunos/topology.h | 85 + .../dependencies/libcds-2.3.2/cds/os/thread.h | 52 + .../dependencies/libcds-2.3.2/cds/os/timer.h | 59 + .../libcds-2.3.2/cds/os/topology.h | 56 + .../libcds-2.3.2/cds/os/win/alloc_aligned.h | 61 + .../libcds-2.3.2/cds/os/win/thread.h | 59 + .../libcds-2.3.2/cds/os/win/timer.h | 130 + .../libcds-2.3.2/cds/os/win/topology.h | 97 + .../libcds-2.3.2/cds/sync/injecting_monitor.h | 98 + .../libcds-2.3.2/cds/sync/lock_array.h | 353 + .../libcds-2.3.2/cds/sync/monitor.h | 146 + .../libcds-2.3.2/cds/sync/pool_monitor.h | 290 + .../libcds-2.3.2/cds/sync/spinlock.h | 407 + .../cds/threading/details/_common.h | 193 + .../cds/threading/details/auto_detect.h | 73 + .../cds/threading/details/cxx11.h | 36 + .../cds/threading/details/cxx11_manager.h | 139 + .../libcds-2.3.2/cds/threading/details/gcc.h | 36 + .../cds/threading/details/gcc_manager.h | 139 + .../libcds-2.3.2/cds/threading/details/msvc.h | 36 + .../cds/threading/details/msvc_manager.h | 138 + .../cds/threading/details/pthread.h | 36 + .../cds/threading/details/pthread_manager.h | 234 + .../cds/threading/details/wintls.h | 37 + .../cds/threading/details/wintls_manager.h | 231 + .../libcds-2.3.2/cds/threading/model.h | 82 + .../libcds-2.3.2/cds/urcu/details/base.h | 472 ++ .../cds/urcu/details/check_deadlock.h | 70 + .../libcds-2.3.2/cds/urcu/details/gp.h | 137 + .../libcds-2.3.2/cds/urcu/details/gp_decl.h | 209 + .../libcds-2.3.2/cds/urcu/details/gpb.h | 270 + .../libcds-2.3.2/cds/urcu/details/gpi.h | 210 + .../libcds-2.3.2/cds/urcu/details/gpt.h | 284 + .../libcds-2.3.2/cds/urcu/details/sh.h | 190 + .../libcds-2.3.2/cds/urcu/details/sh_decl.h | 231 + .../cds/urcu/details/sig_buffered.h | 285 + .../libcds-2.3.2/cds/urcu/dispose_thread.h | 226 + .../libcds-2.3.2/cds/urcu/exempt_ptr.h | 195 + .../libcds-2.3.2/cds/urcu/general_buffered.h | 191 + .../libcds-2.3.2/cds/urcu/general_instant.h | 179 + .../libcds-2.3.2/cds/urcu/general_threaded.h | 198 + .../libcds-2.3.2/cds/urcu/options.h | 93 + .../libcds-2.3.2/cds/urcu/raw_ptr.h | 297 + .../libcds-2.3.2/cds/urcu/signal_buffered.h | 205 + .../libcds-2.3.2/cds/user_setup/allocator.h | 70 + .../libcds-2.3.2/cds/user_setup/cache_line.h | 58 + .../libcds-2.3.2/cds/user_setup/threading.h | 52 + .../dependencies/libcds-2.3.2/cds/version.h | 46 + .../demo/dependencies/libcds-2.3.2/change.log | 378 + .../dependencies/libcds-2.3.2/conanfile.txt | 40 + .../libcds-2.3.2/doxygen/cds.doxy | 1660 +++++ .../libcds-2.3.2/doxygen/footer.html | 9 + .../doxygen/image/feldman_hashset.png | Bin 0 -> 64016 bytes .../libcds-2.3.2/doxygen/images.odp | Bin 0 -> 18624 bytes .../libcds-2.3.2/doxygen/index.html | 10 + .../projects/Win/build-msbuild.cmd | 14 + .../libcds-2.3.2/projects/Win/build-vc14.cmd | 2 + .../libcds-2.3.2/projects/Win/vc141/cds.sln | 1970 +++++ .../projects/Win/vc141/cds.vcxproj | 993 +++ .../projects/Win/vc141/cds.vcxproj.filters | 1265 ++++ .../projects/Win/vc141/gtest-deque.vcxproj | 386 + .../Win/vc141/gtest-deque.vcxproj.filters | 21 + .../Win/vc141/gtest-ilist-iterable.vcxproj | 409 + .../Win/vc141/gtest-ilist-lazy.vcxproj | 414 ++ .../Win/vc141/gtest-ilist-michael.vcxproj | 414 ++ .../Win/vc141/gtest-iset-feldman.vcxproj | 418 ++ .../vc141/gtest-iset-michael-iterable.vcxproj | 408 + .../Win/vc141/gtest-iset-michael-lazy.vcxproj | 436 ++ .../Win/vc141/gtest-iset-michael.vcxproj | 436 ++ .../Win/vc141/gtest-iset-skip.vcxproj | 425 ++ .../vc141/gtest-iset-split-iterable.vcxproj | 408 + .../Win/vc141/gtest-iset-split-lazy.vcxproj | 480 ++ .../vc141/gtest-iset-split-michael.vcxproj | 480 ++ .../Win/vc141/gtest-list-iterable.vcxproj | 404 + .../Win/vc141/gtest-list-lazy.vcxproj | 438 ++ .../Win/vc141/gtest-list-michael.vcxproj | 460 ++ .../Win/vc141/gtest-map-feldman.vcxproj | 415 ++ .../vc141/gtest-map-michael-iterable.vcxproj | 411 + .../Win/vc141/gtest-map-michael-lazy.vcxproj | 426 ++ .../Win/vc141/gtest-map-michael.vcxproj | 437 ++ .../projects/Win/vc141/gtest-map-skip.vcxproj | 440 ++ .../vc141/gtest-map-split-iterable.vcxproj | 407 + .../Win/vc141/gtest-map-split-lazy.vcxproj | 481 ++ .../Win/vc141/gtest-map-split-michael.vcxproj | 481 ++ .../projects/Win/vc141/gtest-misc.vcxproj | 413 + .../Win/vc141/gtest-misc.vcxproj.filters | 50 + .../projects/Win/vc141/gtest-pqueue.vcxproj | 399 + .../Win/vc141/gtest-pqueue.vcxproj.filters | 41 + .../projects/Win/vc141/gtest-queue.vcxproj | 427 ++ .../Win/vc141/gtest-queue.vcxproj.filters | 116 + .../Win/vc141/gtest-set-feldman.vcxproj | 417 ++ .../vc141/gtest-set-michael-iterable.vcxproj | 412 + .../Win/vc141/gtest-set-michael-lazy.vcxproj | 438 ++ .../Win/vc141/gtest-set-michael.vcxproj | 438 ++ .../projects/Win/vc141/gtest-set-skip.vcxproj | 439 ++ .../vc141/gtest-set-split-iterable.vcxproj | 407 + .../Win/vc141/gtest-set-split-lazy.vcxproj | 482 ++ .../Win/vc141/gtest-set-split-michael.vcxproj | 482 ++ .../projects/Win/vc141/gtest-stack.vcxproj | 400 + .../Win/vc141/gtest-stack.vcxproj.filters | 44 + .../Win/vc141/gtest-striped-map-boost.vcxproj | 400 + .../vc141/gtest-striped-map-cuckoo.vcxproj | 417 ++ .../Win/vc141/gtest-striped-map-std.vcxproj | 398 + .../Win/vc141/gtest-striped-set-boost.vcxproj | 411 + .../vc141/gtest-striped-set-cuckoo.vcxproj | 430 ++ .../Win/vc141/gtest-striped-set-std.vcxproj | 398 + .../Win/vc141/gtest-tree-bronson.vcxproj | 408 + .../Win/vc141/gtest-tree-ellen.vcxproj | 527 ++ .../vc141/gtest-tree-ellen.vcxproj.filters | 128 + .../Win/vc141/stress-framework.vcxproj | 465 ++ .../vc141/stress-framework.vcxproj.filters | 41 + .../Win/vc141/stress-freelist.vcxproj | 393 + .../Win/vc141/stress-map-del3.vcxproj | 427 ++ .../Win/vc141/stress-map-delodd.vcxproj | 427 ++ .../Win/vc141/stress-map-find_string.vcxproj | 451 ++ .../Win/vc141/stress-map-insdel-func.vcxproj | 428 ++ .../vc141/stress-map-insdel-item-int.vcxproj | 428 ++ .../vc141/stress-map-insdel-string.vcxproj | 440 ++ .../Win/vc141/stress-map-insdelfind.vcxproj | 434 ++ .../Win/vc141/stress-map-iter-erase.vcxproj | 401 + .../Win/vc141/stress-map-minmax.vcxproj | 401 + .../projects/Win/vc141/stress-pqueue.vcxproj | 409 + .../Win/vc141/stress-pqueue.vcxproj.filters | 35 + .../Win/vc141/stress-queue-bounded.vcxproj | 397 + .../Win/vc141/stress-queue-pop.vcxproj | 409 + .../Win/vc141/stress-queue-push.vcxproj | 409 + .../Win/vc141/stress-queue-pushpop.vcxproj | 410 + .../Win/vc141/stress-queue-random.vcxproj | 409 + .../Win/vc141/stress-set-del3.vcxproj | 426 ++ .../Win/vc141/stress-set-delodd.vcxproj | 431 ++ .../Win/vc141/stress-set-insdel_func.vcxproj | 427 ++ .../vc141/stress-set-insdel_string.vcxproj | 439 ++ .../Win/vc141/stress-set-insdelfind.vcxproj | 438 ++ .../Win/vc141/stress-set-iter-erase.vcxproj | 401 + .../Win/vc141/stress-set-iteration.vcxproj | 401 + .../Win/vc141/stress-spsc-queue.vcxproj | 398 + .../projects/Win/vc141/stress-stack.vcxproj | 400 + .../Win/vc141/stress-stack.vcxproj.filters | 41 + .../demo/dependencies/libcds-2.3.2/readme.md | 142 + .../dependencies/libcds-2.3.2/src/dhp.cpp | 547 ++ .../dependencies/libcds-2.3.2/src/dllmain.cpp | 255 + .../demo/dependencies/libcds-2.3.2/src/hp.cpp | 517 ++ .../dependencies/libcds-2.3.2/src/init.cpp | 98 + .../libcds-2.3.2/src/thread_data.cpp | 93 + .../libcds-2.3.2/src/topology_hpux.cpp | 107 + .../libcds-2.3.2/src/topology_linux.cpp | 79 + .../libcds-2.3.2/src/topology_osx.cpp | 53 + .../dependencies/libcds-2.3.2/src/urcu_gp.cpp | 39 + .../dependencies/libcds-2.3.2/src/urcu_sh.cpp | 51 + .../libcds-2.3.2/test/CMakeLists.txt | 92 + .../test/include/cds_test/check_size.h | 71 + .../libcds-2.3.2/test/include/cds_test/city.h | 116 + .../test/include/cds_test/citycrc.h | 43 + .../test/include/cds_test/ext_byteswap.h | 45 + .../test/include/cds_test/ext_gtest.h | 47 + .../test/include/cds_test/fc_hevy_value.h | 79 + .../test/include/cds_test/fixture.h | 67 + .../test/include/cds_test/hash_func.h | 139 + .../cds_test/stat_bronson_avltree_out.h | 90 + .../test/include/cds_test/stat_cuckoo_out.h | 116 + .../test/include/cds_test/stat_dhp_out.h | 87 + .../include/cds_test/stat_ellenbintree_out.h | 79 + .../cds_test/stat_feldman_hashset_out.h | 87 + .../cds_test/stat_flat_combining_out.h | 64 + .../test/include/cds_test/stat_hp_out.h | 78 + .../include/cds_test/stat_iterable_list_out.h | 75 + .../include/cds_test/stat_lazy_list_out.h | 71 + .../include/cds_test/stat_michael_list_out.h | 71 + .../test/include/cds_test/stat_skiplist_out.h | 91 + .../include/cds_test/stat_splitlist_out.h | 66 + .../include/cds_test/stat_sync_monitor_out.h | 73 + .../test/include/cds_test/stress_test.h | 215 + .../test/include/cds_test/thread.h | 323 + .../libcds-2.3.2/test/stress/CMakeLists.txt | 42 + .../libcds-2.3.2/test/stress/data/split.pl | 42 + .../test/stress/data/test-debug-gccfarm.conf | 257 + .../test/stress/data/test-debug.conf | 257 + .../stress/data/test-express-gccfarm.conf | 255 + .../test/stress/data/test-express-x86.conf | 256 + .../test/stress/data/test-express.conf | 255 + .../test/stress/data/test-gccfarm.conf | 256 + .../libcds-2.3.2/test/stress/data/test.conf | 256 + .../libcds-2.3.2/test/stress/data/text.txt | 6611 +++++++++++++++++ .../test/stress/framework/city.cpp | 631 ++ .../libcds-2.3.2/test/stress/framework/city.h | 1 + .../test/stress/framework/citycrc.h | 43 + .../test/stress/framework/config.cpp | 158 + .../ellen_bintree_update_desc_pool.cpp | 40 + .../ellen_bintree_update_desc_pool.h | 131 + .../test/stress/framework/stress_test.cpp | 107 + .../test/stress/freelist/CMakeLists.txt | 16 + .../test/stress/freelist/put_get.cpp | 189 + .../test/stress/freelist/put_get_single.cpp | 167 + .../test/stress/lock/win32_lock.h | 71 + .../libcds-2.3.2/test/stress/main.cpp | 125 + .../test/stress/map/CMakeLists.txt | 25 + .../test/stress/map/del3/CMakeLists.txt | 23 + .../test/stress/map/del3/map_del3.cpp | 130 + .../test/stress/map/del3/map_del3.h | 881 +++ .../map/del3/map_del3_bronsonavltree.cpp | 38 + .../test/stress/map/del3/map_del3_cuckoo.cpp | 38 + .../stress/map/del3/map_del3_ellentree.cpp | 38 + .../map/del3/map_del3_feldman_hashmap.cpp | 50 + .../test/stress/map/del3/map_del3_michael.cpp | 38 + .../test/stress/map/del3/map_del3_skip.cpp | 38 + .../test/stress/map/del3/map_del3_split.cpp | 39 + .../test/stress/map/delodd/CMakeLists.txt | 23 + .../test/stress/map/delodd/map_delodd.cpp | 131 + .../test/stress/map/delodd/map_delodd.h | 881 +++ .../map/delodd/map_delodd_bronsonavltree.cpp | 38 + .../stress/map/delodd/map_delodd_cuckoo.cpp | 38 + .../map/delodd/map_delodd_ellentree.cpp | 38 + .../map/delodd/map_delodd_feldman_hashmap.cpp | 50 + .../stress/map/delodd/map_delodd_michael.cpp | 38 + .../stress/map/delodd/map_delodd_skip.cpp | 38 + .../stress/map/delodd/map_delodd_split.cpp | 39 + .../stress/map/find_string/CMakeLists.txt | 25 + .../map/find_string/map_find_string.cpp | 209 + .../stress/map/find_string/map_find_string.h | 297 + .../map_find_string_bronsonavltree.cpp | 38 + .../find_string/map_find_string_cuckoo.cpp | 41 + .../find_string/map_find_string_ellentree.cpp | 38 + .../map_find_string_feldman_hashset.cpp | 42 + .../find_string/map_find_string_michael.cpp | 39 + .../map/find_string/map_find_string_skip.cpp | 39 + .../map/find_string/map_find_string_split.cpp | 40 + .../map/find_string/map_find_string_std.cpp | 39 + .../find_string/map_find_string_striped.cpp | 38 + .../stress/map/insdel_func/CMakeLists.txt | 24 + .../map/insdel_func/map_insdel_func.cpp | 135 + .../stress/map/insdel_func/map_insdel_func.h | 572 ++ .../map_insdel_func_bronsonavltree.cpp | 38 + .../insdel_func/map_insdel_func_cuckoo.cpp | 38 + .../insdel_func/map_insdel_func_ellentree.cpp | 38 + .../map_insdel_func_feldman_hashset.cpp | 38 + .../insdel_func/map_insdel_func_michael.cpp | 38 + .../map/insdel_func/map_insdel_func_skip.cpp | 38 + .../map/insdel_func/map_insdel_func_split.cpp | 39 + .../insdel_func/map_insdel_func_striped.cpp | 38 + .../stress/map/insdel_item_int/CMakeLists.txt | 24 + .../insdel_item_int/map_insdel_item_int.cpp | 120 + .../map/insdel_item_int/map_insdel_item_int.h | 303 + .../map_insdel_item_int_bronsonavltree.cpp | 38 + .../map_insdel_item_int_cuckoo.cpp | 38 + .../map_insdel_item_int_ellentree.cpp | 38 + .../map_insdel_item_int_feldman_hashset.cpp | 38 + .../map_insdel_item_int_michael.cpp | 38 + .../map_insdel_item_int_skip.cpp | 38 + .../map_insdel_item_int_split.cpp | 39 + .../map_insdel_item_int_striped.cpp | 38 + .../stress/map/insdel_string/CMakeLists.txt | 25 + .../map/insdel_string/map_insdel_string.cpp | 191 + .../map/insdel_string/map_insdel_string.h | 320 + .../map_insdel_string_bronsonavltree.cpp | 38 + .../map_insdel_string_cuckoo.cpp | 38 + .../map_insdel_string_ellentree.cpp | 38 + .../map_insdel_string_feldman_hashset.cpp | 42 + .../map_insdel_string_michael.cpp | 38 + .../insdel_string/map_insdel_string_skip.cpp | 38 + .../insdel_string/map_insdel_string_split.cpp | 39 + .../insdel_string/map_insdel_string_std.cpp | 38 + .../map_insdel_string_striped.cpp | 38 + .../test/stress/map/insdelfind/CMakeLists.txt | 45 + .../stress/map/insdelfind/map_insdelfind.cpp | 143 + .../stress/map/insdelfind/map_insdelfind.h | 262 + .../map_insdelfind_bronsonavltree.cpp | 38 + .../map/insdelfind/map_insdelfind_cuckoo.cpp | 38 + .../map_insdelfind_ellentree_hp.cpp | 38 + .../map_insdelfind_ellentree_rcu.cpp | 38 + .../map_insdelfind_feldman_hashset_hp.cpp | 38 + .../map_insdelfind_feldman_hashset_rcu.cpp | 38 + .../insdelfind/map_insdelfind_michael_hp.cpp | 38 + .../insdelfind/map_insdelfind_michael_rcu.cpp | 38 + .../map/insdelfind/map_insdelfind_skip_hp.cpp | 38 + .../insdelfind/map_insdelfind_skip_rcu.cpp | 38 + .../insdelfind/map_insdelfind_split_hp.cpp | 39 + .../insdelfind/map_insdelfind_split_rcu.cpp | 38 + .../map/insdelfind/map_insdelfind_std.cpp | 38 + .../map/insdelfind/map_insdelfind_striped.cpp | 38 + .../test/stress/map/iter_erase/CMakeLists.txt | 19 + .../stress/map/iter_erase/map_iter_erase.cpp | 117 + .../stress/map/iter_erase/map_iter_erase.h | 871 +++ .../map_iter_erase_feldman_hashmap.cpp | 63 + .../map/iter_erase/map_iter_erase_michael.cpp | 39 + .../map/iter_erase/map_iter_erase_split.cpp | 38 + .../libcds-2.3.2/test/stress/map/map_type.h | 235 + .../stress/map/map_type_bronson_avltree.h | 258 + .../test/stress/map/map_type_cuckoo.h | 364 + .../test/stress/map/map_type_ellen_bintree.h | 324 + .../stress/map/map_type_feldman_hashmap.h | 385 + .../test/stress/map/map_type_iterable_list.h | 102 + .../test/stress/map/map_type_lazy_list.h | 154 + .../test/stress/map/map_type_michael.h | 421 ++ .../test/stress/map/map_type_michael_list.h | 145 + .../test/stress/map/map_type_skip_list.h | 538 ++ .../test/stress/map/map_type_split_list.h | 789 ++ .../test/stress/map/map_type_std.h | 273 + .../test/stress/map/map_type_striped.h | 311 + .../test/stress/map/minmax/CMakeLists.txt | 19 + .../test/stress/map/minmax/map_minmax.cpp | 72 + .../test/stress/map/minmax/map_minmax.h | 402 + .../map/minmax/map_minmax_bronsonavltree.cpp | 38 + .../map/minmax/map_minmax_ellentree.cpp | 38 + .../stress/map/minmax/map_minmax_skip.cpp | 38 + .../test/stress/pqueue/CMakeLists.txt | 19 + .../libcds-2.3.2/test/stress/pqueue/item.h | 84 + .../libcds-2.3.2/test/stress/pqueue/pop.cpp | 296 + .../test/stress/pqueue/pqueue_type.h | 745 ++ .../libcds-2.3.2/test/stress/pqueue/push.cpp | 258 + .../test/stress/pqueue/push_pop.cpp | 331 + .../test/stress/queue/CMakeLists.txt | 77 + .../stress/queue/bounded_queue_fulness.cpp | 161 + .../test/stress/queue/intrusive_push_pop.cpp | 529 ++ .../test/stress/queue/intrusive_queue_type.h | 499 ++ .../libcds-2.3.2/test/stress/queue/pop.cpp | 259 + .../test/stress/queue/print_stat.h | 125 + .../libcds-2.3.2/test/stress/queue/push.cpp | 262 + .../test/stress/queue/push_pop.cpp | 440 ++ .../test/stress/queue/queue_type.h | 895 +++ .../libcds-2.3.2/test/stress/queue/random.cpp | 340 + .../test/stress/queue/spsc_buffer.cpp | 302 + .../test/stress/queue/spsc_queue.cpp | 227 + .../test/stress/queue/std_queue.h | 102 + .../test/stress/set/CMakeLists.txt | 21 + .../test/stress/set/del3/CMakeLists.txt | 22 + .../test/stress/set/del3/set_del3.cpp | 133 + .../test/stress/set/del3/set_del3.h | 921 +++ .../test/stress/set/del3/set_del3_cuckoo.cpp | 38 + .../stress/set/del3/set_del3_ellentree.cpp | 38 + .../set/del3/set_del3_feldman_hashset.cpp | 50 + .../test/stress/set/del3/set_del3_michael.cpp | 39 + .../test/stress/set/del3/set_del3_skip.cpp | 38 + .../test/stress/set/del3/set_del3_split.cpp | 40 + .../test/stress/set/delodd/CMakeLists.txt | 22 + .../test/stress/set/delodd/set_delodd.cpp | 131 + .../test/stress/set/delodd/set_delodd.h | 921 +++ .../stress/set/delodd/set_delodd_cuckoo.cpp | 38 + .../set/delodd/set_delodd_ellentree.cpp | 38 + .../set/delodd/set_delodd_feldman_hashset.cpp | 50 + .../stress/set/delodd/set_delodd_michael.cpp | 39 + .../stress/set/delodd/set_delodd_skip.cpp | 38 + .../stress/set/delodd/set_delodd_split.cpp | 40 + .../stress/set/insdel_find/CMakeLists.txt | 56 + .../stress/set/insdel_find/set_insdelfind.cpp | 136 + .../stress/set/insdel_find/set_insdelfind.h | 233 + .../set/insdel_find/set_insdelfind_cuckoo.cpp | 38 + .../set_insdelfind_ellentree_hp.cpp | 38 + .../set_insdelfind_ellentree_rcu.cpp | 38 + .../set_insdelfind_feldman_hashset_hp.cpp | 38 + .../set_insdelfind_feldman_hashset_rcu.cpp | 38 + .../insdel_find/set_insdelfind_michael_hp.cpp | 39 + .../set_insdelfind_michael_rcu.cpp | 38 + .../insdel_find/set_insdelfind_skip_hp.cpp | 38 + .../insdel_find/set_insdelfind_skip_rcu.cpp | 38 + .../insdel_find/set_insdelfind_split_hp.cpp | 39 + .../insdel_find/set_insdelfind_split_rcu.cpp | 38 + .../set/insdel_find/set_insdelfind_std.cpp | 37 + .../insdel_find/set_insdelfind_striped.cpp | 39 + .../stress/set/insdel_func/CMakeLists.txt | 23 + .../set/insdel_func/set_insdel_func.cpp | 122 + .../stress/set/insdel_func/set_insdel_func.h | 564 ++ .../insdel_func/set_insdel_func_cuckoo.cpp | 38 + .../insdel_func/set_insdel_func_ellentree.cpp | 38 + .../set_insdel_func_feldman_hashset.cpp | 38 + .../insdel_func/set_insdel_func_michael.cpp | 39 + .../set/insdel_func/set_insdel_func_skip.cpp | 38 + .../set/insdel_func/set_insdel_func_split.cpp | 39 + .../insdel_func/set_insdel_func_striped.cpp | 39 + .../stress/set/insdel_string/CMakeLists.txt | 24 + .../set/insdel_string/set_insdel_string.cpp | 128 + .../set/insdel_string/set_insdel_string.h | 521 ++ .../set_insdel_string_cuckoo.cpp | 38 + .../set_insdel_string_ellentree.cpp | 38 + .../set_insdel_string_feldman_hashset.cpp | 39 + .../set_insdel_string_michael.cpp | 39 + .../insdel_string/set_insdel_string_skip.cpp | 38 + .../insdel_string/set_insdel_string_split.cpp | 39 + .../insdel_string/set_insdel_string_std.cpp | 37 + .../set_insdel_string_striped.cpp | 39 + .../test/stress/set/iter_erase/CMakeLists.txt | 19 + .../stress/set/iter_erase/set_iter_erase.cpp | 116 + .../stress/set/iter_erase/set_iter_erase.h | 885 +++ .../set_iter_erase_feldman_hashset.cpp | 64 + .../set/iter_erase/set_iter_erase_michael.cpp | 39 + .../set/iter_erase/set_iter_erase_split.cpp | 38 + .../test/stress/set/iteration/CMakeLists.txt | 18 + .../stress/set/iteration/set_iteration.cpp | 126 + .../test/stress/set/iteration/set_iteration.h | 681 ++ .../set_iteration_feldman_hashset.cpp | 39 + .../set/iteration/set_iteration_michael.cpp | 38 + .../set/iteration/set_iteration_split.cpp | 38 + .../libcds-2.3.2/test/stress/set/set_type.h | 325 + .../test/stress/set/set_type_cuckoo.h | 244 + .../test/stress/set/set_type_ellen_bintree.h | 353 + .../stress/set/set_type_feldman_hashset.h | 484 ++ .../test/stress/set/set_type_iterable_list.h | 99 + .../test/stress/set/set_type_lazy_list.h | 137 + .../test/stress/set/set_type_michael.h | 365 + .../test/stress/set/set_type_michael_list.h | 135 + .../test/stress/set/set_type_skip_list.h | 475 ++ .../test/stress/set/set_type_split_list.h | 738 ++ .../test/stress/set/set_type_std.h | 265 + .../test/stress/set/set_type_striped.h | 644 ++ .../test/stress/stack/CMakeLists.txt | 18 + .../test/stress/stack/intrusive_push_pop.cpp | 182 + .../stack/intrusive_push_pop_fcstack.cpp | 165 + .../stress/stack/intrusive_stack_push_pop.h | 297 + .../test/stress/stack/intrusive_stack_type.h | 485 ++ .../libcds-2.3.2/test/stress/stack/push.cpp | 209 + .../test/stress/stack/push_pop.cpp | 286 + .../test/stress/stack/stack_type.h | 569 ++ .../libcds-2.3.2/test/unit/CMakeLists.txt | 29 + .../test/unit/deque/CMakeLists.txt | 15 + .../libcds-2.3.2/test/unit/deque/fcdeque.cpp | 314 + .../test/unit/intrusive-list/CMakeLists.txt | 55 + .../intrusive-list/intrusive_iterable_dhp.cpp | 170 + .../intrusive-list/intrusive_iterable_hp.cpp | 192 + .../intrusive-list/intrusive_lazy_dhp.cpp | 345 + .../unit/intrusive-list/intrusive_lazy_hp.cpp | 343 + .../intrusive-list/intrusive_lazy_nogc.cpp | 309 + .../intrusive-list/intrusive_lazy_rcu_gpb.cpp | 43 + .../intrusive-list/intrusive_lazy_rcu_gpi.cpp | 43 + .../intrusive-list/intrusive_lazy_rcu_gpt.cpp | 43 + .../intrusive-list/intrusive_lazy_rcu_shb.cpp | 47 + .../intrusive-list/intrusive_michael_dhp.cpp | 319 + .../intrusive-list/intrusive_michael_hp.cpp | 320 + .../intrusive-list/intrusive_michael_nogc.cpp | 205 + .../intrusive_michael_rcu_gpb.cpp | 43 + .../intrusive_michael_rcu_gpi.cpp | 43 + .../intrusive_michael_rcu_gpt.cpp | 43 + .../intrusive_michael_rcu_shb.cpp | 47 + .../test_intrusive_iterable_list.h | 572 ++ .../test_intrusive_iterable_list_hp.h | 110 + .../intrusive-list/test_intrusive_lazy_rcu.h | 310 + .../unit/intrusive-list/test_intrusive_list.h | 506 ++ .../intrusive-list/test_intrusive_list_hp.h | 110 + .../intrusive-list/test_intrusive_list_nogc.h | 493 ++ .../intrusive-list/test_intrusive_list_rcu.h | 146 + .../test_intrusive_michael_rcu.h | 306 + .../test/unit/intrusive-set/CMakeLists.txt | 139 + .../intrusive_feldman_hashset_dhp.cpp | 203 + .../intrusive_feldman_hashset_hp.cpp | 204 + .../intrusive_feldman_hashset_rcu_gpb.cpp | 44 + .../intrusive_feldman_hashset_rcu_gpi.cpp | 43 + .../intrusive_feldman_hashset_rcu_gpt.cpp | 43 + .../intrusive_feldman_hashset_rcu_shb.cpp | 47 + .../intrusive_michael_iterable_dhp.cpp | 191 + .../intrusive_michael_iterable_hp.cpp | 194 + .../intrusive_michael_lazy_dhp.cpp | 345 + .../intrusive_michael_lazy_hp.cpp | 347 + .../intrusive_michael_lazy_nogc.cpp | 332 + .../intrusive_michael_lazy_rcu_gpb.cpp | 43 + .../intrusive_michael_lazy_rcu_gpi.cpp | 43 + .../intrusive_michael_lazy_rcu_gpt.cpp | 43 + .../intrusive_michael_lazy_rcu_shb.cpp | 48 + .../intrusive_michael_michael_dhp.cpp | 300 + .../intrusive_michael_michael_hp.cpp | 300 + .../intrusive_michael_michael_nogc.cpp | 283 + .../intrusive_michael_michael_rcu_gpb.cpp | 43 + .../intrusive_michael_michael_rcu_gpi.cpp | 43 + .../intrusive_michael_michael_rcu_gpt.cpp | 43 + .../intrusive_michael_michael_rcu_shb.cpp | 48 + .../intrusive-set/intrusive_skiplist_dhp.cpp | 359 + .../intrusive-set/intrusive_skiplist_hp.cpp | 360 + .../intrusive-set/intrusive_skiplist_nogc.cpp | 345 + .../intrusive_skiplist_rcu_gpb.cpp | 43 + .../intrusive_skiplist_rcu_gpi.cpp | 43 + .../intrusive_skiplist_rcu_gpt.cpp | 43 + .../intrusive_skiplist_rcu_shb.cpp | 47 + .../intrusive_split_iterable_dhp.cpp | 264 + .../intrusive_split_iterable_hp.cpp | 265 + .../intrusive_split_lazy_dhp.cpp | 400 + .../intrusive-set/intrusive_split_lazy_hp.cpp | 411 + .../intrusive_split_lazy_nogc.cpp | 392 + .../intrusive_split_lazy_rcu_gpb.cpp | 43 + .../intrusive_split_lazy_rcu_gpi.cpp | 43 + .../intrusive_split_lazy_rcu_gpt.cpp | 43 + .../intrusive_split_lazy_rcu_shb.cpp | 47 + .../intrusive_split_michael_dhp.cpp | 424 ++ .../intrusive_split_michael_hp.cpp | 425 ++ .../intrusive_split_michael_nogc.cpp | 412 + .../intrusive_split_michael_rcu_gpb.cpp | 43 + .../intrusive_split_michael_rcu_gpi.cpp | 43 + .../intrusive_split_michael_rcu_gpt.cpp | 43 + .../intrusive_split_michael_rcu_shb.cpp | 47 + .../test_intrusive_feldman_hashset.h | 376 + .../test_intrusive_feldman_hashset_hp.h | 165 + .../test_intrusive_feldman_hashset_rcu.h | 288 + .../test_intrusive_michael_iterable.h | 417 ++ .../test_intrusive_michael_iterable_hp.h | 178 + .../test_intrusive_michael_lazy_rcu.h | 408 + .../test_intrusive_michael_michael_rcu.h | 354 + .../unit/intrusive-set/test_intrusive_set.h | 450 ++ .../intrusive-set/test_intrusive_set_hp.h | 160 + .../intrusive-set/test_intrusive_set_nogc.h | 374 + .../intrusive-set/test_intrusive_set_rcu.h | 212 + .../test_intrusive_skiplist_rcu.h | 473 ++ .../test_intrusive_split_iterable_set.h | 421 ++ .../test_intrusive_split_iterable_set_hp.h | 160 + .../test_intrusive_split_lazy_rcu.h | 479 ++ .../test_intrusive_split_michael_rcu.h | 505 ++ .../test/unit/list/CMakeLists.txt | 72 + .../test/unit/list/iterable_dhp.cpp | 169 + .../test/unit/list/iterable_hp.cpp | 170 + .../test/unit/list/kv_iterable_dhp.cpp | 167 + .../test/unit/list/kv_iterable_hp.cpp | 168 + .../test/unit/list/kv_lazy_dhp.cpp | 195 + .../test/unit/list/kv_lazy_hp.cpp | 196 + .../test/unit/list/kv_lazy_nogc.cpp | 171 + .../test/unit/list/kv_lazy_rcu_gpb.cpp | 43 + .../test/unit/list/kv_lazy_rcu_gpi.cpp | 43 + .../test/unit/list/kv_lazy_rcu_gpt.cpp | 43 + .../test/unit/list/kv_lazy_rcu_shb.cpp | 47 + .../test/unit/list/kv_michael_dhp.cpp | 179 + .../test/unit/list/kv_michael_hp.cpp | 180 + .../test/unit/list/kv_michael_nogc.cpp | 156 + .../test/unit/list/kv_michael_rcu_gpb.cpp | 43 + .../test/unit/list/kv_michael_rcu_gpi.cpp | 43 + .../test/unit/list/kv_michael_rcu_gpt.cpp | 43 + .../test/unit/list/kv_michael_rcu_shb.cpp | 47 + .../libcds-2.3.2/test/unit/list/lazy_dhp.cpp | 196 + .../libcds-2.3.2/test/unit/list/lazy_hp.cpp | 196 + .../libcds-2.3.2/test/unit/list/lazy_nogc.cpp | 171 + .../test/unit/list/lazy_rcu_gpb.cpp | 43 + .../test/unit/list/lazy_rcu_gpi.cpp | 43 + .../test/unit/list/lazy_rcu_gpt.cpp | 43 + .../test/unit/list/lazy_rcu_shb.cpp | 47 + .../test/unit/list/michael_dhp.cpp | 181 + .../test/unit/list/michael_hp.cpp | 182 + .../test/unit/list/michael_nogc.cpp | 156 + .../test/unit/list/michael_rcu_gpb.cpp | 43 + .../test/unit/list/michael_rcu_gpi.cpp | 43 + .../test/unit/list/michael_rcu_gpt.cpp | 43 + .../test/unit/list/michael_rcu_shb.cpp | 47 + .../test/unit/list/test_iterable_list.h | 410 + .../test/unit/list/test_iterable_list_hp.h | 136 + .../test/unit/list/test_kv_iterable_list.h | 454 ++ .../test/unit/list/test_kv_iterable_list_hp.h | 135 + .../test/unit/list/test_kv_lazy_rcu.h | 207 + .../test/unit/list/test_kv_list.h | 419 ++ .../test/unit/list/test_kv_list_hp.h | 135 + .../test/unit/list/test_kv_list_nogc.h | 364 + .../test/unit/list/test_kv_list_rcu.h | 153 + .../test/unit/list/test_kv_michael_rcu.h | 190 + .../test/unit/list/test_lazy_rcu.h | 207 + .../libcds-2.3.2/test/unit/list/test_list.h | 373 + .../test/unit/list/test_list_hp.h | 136 + .../test/unit/list/test_list_nogc.h | 335 + .../test/unit/list/test_list_rcu.h | 150 + .../test/unit/list/test_michael_rcu.h | 190 + .../libcds-2.3.2/test/unit/main.cpp | 51 + .../libcds-2.3.2/test/unit/map/CMakeLists.txt | 137 + .../test/unit/map/feldman_hashmap_dhp.cpp | 185 + .../test/unit/map/feldman_hashmap_hp.cpp | 196 + .../test/unit/map/feldman_hashset_rcu_gpb.cpp | 43 + .../test/unit/map/feldman_hashset_rcu_gpi.cpp | 43 + .../test/unit/map/feldman_hashset_rcu_gpt.cpp | 43 + .../test/unit/map/feldman_hashset_rcu_shb.cpp | 47 + .../test/unit/map/michael_iterable_dhp.cpp | 199 + .../test/unit/map/michael_iterable_hp.cpp | 200 + .../test/unit/map/michael_lazy_dhp.cpp | 220 + .../test/unit/map/michael_lazy_hp.cpp | 221 + .../test/unit/map/michael_lazy_nogc.cpp | 213 + .../test/unit/map/michael_lazy_rcu_gpb.cpp | 43 + .../test/unit/map/michael_lazy_rcu_gpi.cpp | 43 + .../test/unit/map/michael_lazy_rcu_gpt.cpp | 43 + .../test/unit/map/michael_lazy_rcu_shb.cpp | 47 + .../test/unit/map/michael_michael_dhp.cpp | 199 + .../test/unit/map/michael_michael_hp.cpp | 201 + .../test/unit/map/michael_michael_nogc.cpp | 192 + .../test/unit/map/michael_michael_rcu_gpb.cpp | 43 + .../test/unit/map/michael_michael_rcu_gpi.cpp | 43 + .../test/unit/map/michael_michael_rcu_gpt.cpp | 43 + .../test/unit/map/michael_michael_rcu_shb.cpp | 47 + .../test/unit/map/skiplist_dhp.cpp | 61 + .../test/unit/map/skiplist_hp.cpp | 63 + .../test/unit/map/skiplist_hp_inl.h | 191 + .../test/unit/map/skiplist_nogc.cpp | 276 + .../test/unit/map/skiplist_rcu_gpb.cpp | 43 + .../test/unit/map/skiplist_rcu_gpi.cpp | 43 + .../test/unit/map/skiplist_rcu_gpt.cpp | 43 + .../test/unit/map/skiplist_rcu_shb.cpp | 47 + .../test/unit/map/split_iterable_dhp.cpp | 257 + .../test/unit/map/split_iterable_hp.cpp | 258 + .../test/unit/map/split_lazy_dhp.cpp | 277 + .../test/unit/map/split_lazy_hp.cpp | 278 + .../test/unit/map/split_lazy_nogc.cpp | 261 + .../test/unit/map/split_lazy_rcu_gpb.cpp | 43 + .../test/unit/map/split_lazy_rcu_gpi.cpp | 43 + .../test/unit/map/split_lazy_rcu_gpt.cpp | 43 + .../test/unit/map/split_lazy_rcu_shb.cpp | 47 + .../test/unit/map/split_michael_dhp.cpp | 311 + .../test/unit/map/split_michael_hp.cpp | 313 + .../test/unit/map/split_michael_nogc.cpp | 239 + .../test/unit/map/split_michael_rcu_gpb.cpp | 43 + .../test/unit/map/split_michael_rcu_gpi.cpp | 43 + .../test/unit/map/split_michael_rcu_gpt.cpp | 43 + .../test/unit/map/split_michael_rcu_shb.cpp | 47 + .../test/unit/map/test_feldman_hashmap.h | 482 ++ .../test/unit/map/test_feldman_hashmap_hp.h | 216 + .../test/unit/map/test_feldman_hashmap_rcu.h | 347 + .../libcds-2.3.2/test/unit/map/test_map.h | 391 + .../test/unit/map/test_map_data.h | 245 + .../libcds-2.3.2/test/unit/map/test_map_hp.h | 141 + .../test/unit/map/test_map_nogc.h | 259 + .../libcds-2.3.2/test/unit/map/test_map_rcu.h | 223 + .../test/unit/map/test_michael_iterable.h | 424 ++ .../test/unit/map/test_michael_iterable_hp.h | 154 + .../test/unit/map/test_michael_lazy_rcu.h | 265 + .../test/unit/map/test_michael_michael_rcu.h | 237 + .../test/unit/map/test_skiplist_hp.h | 106 + .../test/unit/map/test_skiplist_rcu.h | 334 + .../test/unit/map/test_split_lazy_rcu.h | 330 + .../test/unit/map/test_split_michael_rcu.h | 374 + .../test/unit/misc/CMakeLists.txt | 21 + .../test/unit/misc/asan_errors.cpp | 79 + .../test/unit/misc/bit_reversal.cpp | 96 + .../libcds-2.3.2/test/unit/misc/bitop.cpp | 160 + .../test/unit/misc/cxx11_atomic_class.cpp | 828 +++ .../test/unit/misc/cxx11_atomic_func.cpp | 754 ++ .../unit/misc/cxx11_convert_memory_order.h | 87 + .../test/unit/misc/find_option.cpp | 209 + .../test/unit/misc/hash_tuple.cpp | 142 + .../test/unit/misc/permutation_generator.cpp | 95 + .../test/unit/misc/split_bitstring.cpp | 862 +++ .../test/unit/pqueue/CMakeLists.txt | 19 + .../pqueue/fcpqueue_boost_stable_vector.cpp | 199 + .../test/unit/pqueue/fcpqueue_deque.cpp | 162 + .../test/unit/pqueue/fcpqueue_vector.cpp | 159 + .../test/unit/pqueue/intrusive_mspqueue.cpp | 270 + .../test/unit/pqueue/mspqueue.cpp | 285 + .../libcds-2.3.2/test/unit/pqueue/test_data.h | 131 + .../test/unit/pqueue/test_fcpqueue.h | 110 + .../test/unit/queue/CMakeLists.txt | 40 + .../test/unit/queue/basket_queue_dhp.cpp | 140 + .../test/unit/queue/basket_queue_hp.cpp | 140 + .../libcds-2.3.2/test/unit/queue/fcqueue.cpp | 464 ++ .../unit/queue/intrusive_basket_queue_dhp.cpp | 187 + .../unit/queue/intrusive_basket_queue_hp.cpp | 200 + .../test/unit/queue/intrusive_fcqueue.cpp | 450 ++ .../unit/queue/intrusive_moirqueue_dhp.cpp | 187 + .../unit/queue/intrusive_moirqueue_hp.cpp | 200 + .../test/unit/queue/intrusive_msqueue_dhp.cpp | 187 + .../test/unit/queue/intrusive_msqueue_hp.cpp | 200 + .../unit/queue/intrusive_optqueue_dhp.cpp | 187 + .../test/unit/queue/intrusive_optqueue_hp.cpp | 200 + .../queue/intrusive_segmented_queue_dhp.cpp | 166 + .../queue/intrusive_segmented_queue_hp.cpp | 166 + .../unit/queue/intrusive_vyukov_queue.cpp | 112 + .../test/unit/queue/moirqueue_dhp.cpp | 140 + .../test/unit/queue/moirqueue_hp.cpp | 140 + .../test/unit/queue/msqueue_dhp.cpp | 140 + .../test/unit/queue/msqueue_hp.cpp | 140 + .../test/unit/queue/optimistic_queue_dhp.cpp | 140 + .../test/unit/queue/optimistic_queue_hp.cpp | 140 + .../libcds-2.3.2/test/unit/queue/rwqueue.cpp | 107 + .../test/unit/queue/segmented_queue_dhp.cpp | 136 + .../test/unit/queue/segmented_queue_hp.cpp | 136 + .../test/unit/queue/test_bounded_queue.h | 222 + .../test/unit/queue/test_generic_queue.h | 206 + .../unit/queue/test_intrusive_bounded_queue.h | 151 + .../test/unit/queue/test_intrusive_msqueue.h | 159 + .../queue/test_intrusive_segmented_queue.h | 191 + .../test/unit/queue/test_segmented_queue.h | 231 + .../test/unit/queue/vyukov_mpmc_queue.cpp | 183 + .../test/unit/queue/weak_ringbuffer.cpp | 323 + .../libcds-2.3.2/test/unit/set/CMakeLists.txt | 134 + .../test/unit/set/feldman_hashset_dhp.cpp | 208 + .../test/unit/set/feldman_hashset_hp.cpp | 209 + .../test/unit/set/feldman_hashset_rcu_gpb.cpp | 43 + .../test/unit/set/feldman_hashset_rcu_gpi.cpp | 43 + .../test/unit/set/feldman_hashset_rcu_gpt.cpp | 43 + .../test/unit/set/feldman_hashset_rcu_shb.cpp | 47 + .../test/unit/set/michael_iterable_dhp.cpp | 220 + .../test/unit/set/michael_iterable_hp.cpp | 221 + .../test/unit/set/michael_lazy_dhp.cpp | 241 + .../test/unit/set/michael_lazy_hp.cpp | 242 + .../test/unit/set/michael_lazy_nogc.cpp | 232 + .../test/unit/set/michael_lazy_rcu_gpb.cpp | 43 + .../test/unit/set/michael_lazy_rcu_gpi.cpp | 43 + .../test/unit/set/michael_lazy_rcu_gpt.cpp | 43 + .../test/unit/set/michael_lazy_rcu_shb.cpp | 47 + .../test/unit/set/michael_michael_dhp.cpp | 220 + .../test/unit/set/michael_michael_hp.cpp | 221 + .../test/unit/set/michael_michael_nogc.cpp | 211 + .../test/unit/set/michael_michael_rcu_gpb.cpp | 43 + .../test/unit/set/michael_michael_rcu_gpi.cpp | 43 + .../test/unit/set/michael_michael_rcu_gpt.cpp | 43 + .../test/unit/set/michael_michael_rcu_shb.cpp | 47 + .../test/unit/set/skiplist_dhp.cpp | 62 + .../test/unit/set/skiplist_hp.cpp | 63 + .../test/unit/set/skiplist_hp_inl.h | 207 + .../test/unit/set/skiplist_nogc.cpp | 229 + .../test/unit/set/skiplist_rcu_gpb.cpp | 43 + .../test/unit/set/skiplist_rcu_gpi.cpp | 43 + .../test/unit/set/skiplist_rcu_gpt.cpp | 43 + .../test/unit/set/skiplist_rcu_shb.cpp | 47 + .../test/unit/set/split_iterable_dhp.cpp | 255 + .../test/unit/set/split_iterable_hp.cpp | 256 + .../test/unit/set/split_lazy_dhp.cpp | 278 + .../test/unit/set/split_lazy_hp.cpp | 258 + .../test/unit/set/split_lazy_nogc.cpp | 263 + .../test/unit/set/split_lazy_rcu_gpb.cpp | 43 + .../test/unit/set/split_lazy_rcu_gpi.cpp | 43 + .../test/unit/set/split_lazy_rcu_gpt.cpp | 43 + .../test/unit/set/split_lazy_rcu_shb.cpp | 47 + .../test/unit/set/split_michael_dhp.cpp | 320 + .../test/unit/set/split_michael_hp.cpp | 323 + .../test/unit/set/split_michael_nogc.cpp | 304 + .../test/unit/set/split_michael_rcu_gpb.cpp | 43 + .../test/unit/set/split_michael_rcu_gpi.cpp | 43 + .../test/unit/set/split_michael_rcu_gpt.cpp | 43 + .../test/unit/set/split_michael_rcu_shb.cpp | 47 + .../test/unit/set/test_feldman_hashset.h | 516 ++ .../test/unit/set/test_feldman_hashset_hp.h | 148 + .../test/unit/set/test_feldman_hashset_rcu.h | 351 + .../test/unit/set/test_michael_iterable.h | 379 + .../test/unit/set/test_michael_iterable_hp.h | 168 + .../test/unit/set/test_michael_lazy_rcu.h | 277 + .../test/unit/set/test_michael_michael_rcu.h | 253 + .../test/unit/set/test_ordered_set_hp.h | 117 + .../libcds-2.3.2/test/unit/set/test_set.h | 322 + .../test/unit/set/test_set_data.h | 229 + .../libcds-2.3.2/test/unit/set/test_set_hp.h | 153 + .../test/unit/set/test_set_nogc.h | 349 + .../libcds-2.3.2/test/unit/set/test_set_rcu.h | 199 + .../test/unit/set/test_skiplist_rcu.h | 350 + .../test/unit/set/test_split_iterable.h | 395 + .../test/unit/set/test_split_iterable_hp.h | 168 + .../test/unit/set/test_split_lazy_rcu.h | 321 + .../test/unit/set/test_split_michael_rcu.h | 369 + .../test/unit/stack/CMakeLists.txt | 20 + .../libcds-2.3.2/test/unit/stack/fcstack.cpp | 304 + .../test/unit/stack/intrusive_fcstack.cpp | 475 ++ .../stack/intrusive_treiber_stack_dhp.cpp | 288 + .../unit/stack/intrusive_treiber_stack_hp.cpp | 295 + .../unit/stack/test_intrusive_treiber_stack.h | 134 + .../test/unit/stack/test_treiber_stack.h | 87 + .../test/unit/stack/treiber_stack_dhp.cpp | 179 + .../test/unit/stack/treiber_stack_hp.cpp | 173 + .../test/unit/striped-map/CMakeLists.txt | 25 + .../test/unit/striped-map/cuckoo_map.cpp | 517 ++ .../unit/striped-map/map_boost_flat_map.cpp | 72 + .../test/unit/striped-map/map_boost_list.cpp | 68 + .../test/unit/striped-map/map_boost_map.cpp | 72 + .../test/unit/striped-map/map_boost_slist.cpp | 68 + .../striped-map/map_boost_unordered_map.cpp | 64 + .../test/unit/striped-map/map_std_list.cpp | 55 + .../test/unit/striped-map/map_std_map.cpp | 55 + .../striped-map/map_std_unordered_map.cpp | 60 + .../test/unit/striped-map/test_map.h | 394 + .../test/unit/striped-map/test_map_data.h | 302 + .../test/unit/striped-map/test_striped_map.h | 828 +++ .../test/unit/striped-set/CMakeLists.txt | 37 + .../test/unit/striped-set/cuckoo_set.cpp | 517 ++ .../striped-set/intrusive_boost_avl_set.cpp | 59 + .../unit/striped-set/intrusive_boost_list.cpp | 57 + .../unit/striped-set/intrusive_boost_set.cpp | 83 + .../striped-set/intrusive_boost_sg_set.cpp | 59 + .../striped-set/intrusive_boost_slist.cpp | 57 + .../striped-set/intrusive_boost_splay_set.cpp | 68 + .../striped-set/intrusive_boost_treap_set.cpp | 73 + .../intrusive_boost_unordered_set.cpp | 326 + .../unit/striped-set/intrusive_cuckoo_set.cpp | 1406 ++++ .../unit/striped-set/set_boost_flatset.cpp | 68 + .../test/unit/striped-set/set_boost_list.cpp | 68 + .../test/unit/striped-set/set_boost_set.cpp | 68 + .../test/unit/striped-set/set_boost_slist.cpp | 68 + .../striped-set/set_boost_stable_vector.cpp | 68 + .../striped-set/set_boost_unordered_set.cpp | 59 + .../unit/striped-set/set_boost_vector.cpp | 68 + .../test/unit/striped-set/set_std_list.cpp | 55 + .../test/unit/striped-set/set_std_set.cpp | 55 + .../striped-set/set_std_unordered_set.cpp | 55 + .../test/unit/striped-set/set_std_vector.cpp | 55 + .../unit/striped-set/test_intrusive_set.h | 509 ++ .../striped-set/test_intrusive_striped_set.h | 405 + .../test/unit/striped-set/test_set.h | 561 ++ .../test/unit/striped-set/test_striped_set.h | 768 ++ .../test/unit/tree/CMakeLists.txt | 43 + .../tree/bronson_avltree_map_ptr_rcu_gpb.cpp | 43 + .../tree/bronson_avltree_map_ptr_rcu_gpi.cpp | 43 + .../tree/bronson_avltree_map_ptr_rcu_gpt.cpp | 43 + .../tree/bronson_avltree_map_ptr_rcu_shb.cpp | 47 + .../unit/tree/bronson_avltree_map_rcu_gpb.cpp | 43 + .../unit/tree/bronson_avltree_map_rcu_gpi.cpp | 43 + .../unit/tree/bronson_avltree_map_rcu_gpt.cpp | 43 + .../unit/tree/bronson_avltree_map_rcu_shb.cpp | 47 + .../test/unit/tree/ellen_bintree_map_dhp.cpp | 202 + .../test/unit/tree/ellen_bintree_map_hp.cpp | 203 + .../unit/tree/ellen_bintree_map_rcu_gpb.cpp | 43 + .../unit/tree/ellen_bintree_map_rcu_gpi.cpp | 43 + .../unit/tree/ellen_bintree_map_rcu_gpt.cpp | 43 + .../unit/tree/ellen_bintree_map_rcu_shb.cpp | 47 + .../test/unit/tree/ellen_bintree_set_dhp.cpp | 180 + .../test/unit/tree/ellen_bintree_set_hp.cpp | 181 + .../unit/tree/ellen_bintree_set_rcu_gpb.cpp | 43 + .../unit/tree/ellen_bintree_set_rcu_gpi.cpp | 43 + .../unit/tree/ellen_bintree_set_rcu_gpt.cpp | 43 + .../unit/tree/ellen_bintree_set_rcu_shb.cpp | 47 + .../tree/ellen_bintree_update_desc_pool.cpp | 37 + .../unit/tree/intrusive_ellenbintree_dhp.cpp | 293 + .../unit/tree/intrusive_ellenbintree_hp.cpp | 295 + .../tree/intrusive_ellenbintree_rcu_gpb.cpp | 43 + .../tree/intrusive_ellenbintree_rcu_gpi.cpp | 43 + .../tree/intrusive_ellenbintree_rcu_gpt.cpp | 43 + .../tree/intrusive_ellenbintree_rcu_shb.cpp | 47 + .../test/unit/tree/test_bronson_avltree_map.h | 752 ++ .../unit/tree/test_bronson_avltree_map_ptr.h | 727 ++ .../unit/tree/test_ellen_bintree_map_rcu.h | 234 + .../unit/tree/test_ellen_bintree_set_rcu.h | 239 + .../test_ellen_bintree_update_desc_pool.h | 74 + .../tree/test_intrusive_ellen_bintree_rcu.h | 379 + .../test/unit/tree/test_intrusive_tree.h | 480 ++ .../test/unit/tree/test_intrusive_tree_hp.h | 219 + .../test/unit/tree/test_intrusive_tree_rcu.h | 227 + .../test/unit/tree/test_tree_map.h | 388 + .../test/unit/tree/test_tree_map_data.h | 254 + .../test/unit/tree/test_tree_map_hp.h | 170 + .../test/unit/tree/test_tree_map_rcu.h | 256 + .../test/unit/tree/test_tree_set.h | 519 ++ .../test/unit/tree/test_tree_set_hp.h | 188 + .../test/unit/tree/test_tree_set_rcu.h | 233 + .../demo/dependencies/libcds-2.3.2/thanks | 22 + .../libcds-2.3.2/tools/brush_cds.pl | 67 + .../libcds-2.3.2/tools/make_distrib.bat | 4 + .../libcds-2.3.2/tools/make_distrib.pl | 89 + .../libcds-2.3.2/tools/make_docs.bat | 7 + .../libcds-2.3.2/tools/tsan-suppression | 20 + .../rapidjson-1.1.0/.gitattributes | 22 + .../dependencies/rapidjson-1.1.0/.gitignore | 25 + .../dependencies/rapidjson-1.1.0/.gitmodules | 3 + .../dependencies/rapidjson-1.1.0/.travis.yml | 98 + .../dependencies/rapidjson-1.1.0/CHANGELOG.md | 158 + .../rapidjson-1.1.0/CMakeLists.txt | 173 + .../CMakeModules/FindGTestSrc.cmake | 30 + .../rapidjson-1.1.0/RapidJSON.pc.in | 7 + .../rapidjson-1.1.0/RapidJSONConfig.cmake.in | 3 + .../RapidJSONConfigVersion.cmake.in | 10 + .../dependencies/rapidjson-1.1.0/appveyor.yml | 41 + .../rapidjson-1.1.0/bin/data/glossary.json | 22 + .../rapidjson-1.1.0/bin/data/menu.json | 27 + .../rapidjson-1.1.0/bin/data/readme.txt | 1 + .../rapidjson-1.1.0/bin/data/sample.json | 3315 +++++++++ .../rapidjson-1.1.0/bin/data/webapp.json | 88 + .../rapidjson-1.1.0/bin/data/widget.json | 26 + .../rapidjson-1.1.0/bin/draft-04/schema | 150 + .../bin/encodings/utf16be.json | Bin 0 -> 368 bytes .../bin/encodings/utf16bebom.json | Bin 0 -> 370 bytes .../bin/encodings/utf16le.json | Bin 0 -> 368 bytes .../bin/encodings/utf16lebom.json | Bin 0 -> 370 bytes .../bin/encodings/utf32be.json | Bin 0 -> 736 bytes .../bin/encodings/utf32bebom.json | Bin 0 -> 740 bytes .../bin/encodings/utf32le.json | Bin 0 -> 736 bytes .../bin/encodings/utf32lebom.json | Bin 0 -> 740 bytes .../rapidjson-1.1.0/bin/encodings/utf8.json | 7 + .../bin/encodings/utf8bom.json | 7 + .../bin/jsonchecker/fail1.json | 1 + .../bin/jsonchecker/fail10.json | 1 + .../bin/jsonchecker/fail11.json | 1 + .../bin/jsonchecker/fail12.json | 1 + .../bin/jsonchecker/fail13.json | 1 + .../bin/jsonchecker/fail14.json | 1 + .../bin/jsonchecker/fail15.json | 1 + .../bin/jsonchecker/fail16.json | 1 + .../bin/jsonchecker/fail17.json | 1 + .../bin/jsonchecker/fail18.json | 1 + .../bin/jsonchecker/fail19.json | 1 + .../bin/jsonchecker/fail2.json | 1 + .../bin/jsonchecker/fail20.json | 1 + .../bin/jsonchecker/fail21.json | 1 + .../bin/jsonchecker/fail22.json | 1 + .../bin/jsonchecker/fail23.json | 1 + .../bin/jsonchecker/fail24.json | 1 + .../bin/jsonchecker/fail25.json | 1 + .../bin/jsonchecker/fail26.json | 1 + .../bin/jsonchecker/fail27.json | 2 + .../bin/jsonchecker/fail28.json | 2 + .../bin/jsonchecker/fail29.json | 1 + .../bin/jsonchecker/fail3.json | 1 + .../bin/jsonchecker/fail30.json | 1 + .../bin/jsonchecker/fail31.json | 1 + .../bin/jsonchecker/fail32.json | 1 + .../bin/jsonchecker/fail33.json | 1 + .../bin/jsonchecker/fail4.json | 1 + .../bin/jsonchecker/fail5.json | 1 + .../bin/jsonchecker/fail6.json | 1 + .../bin/jsonchecker/fail7.json | 1 + .../bin/jsonchecker/fail8.json | 1 + .../bin/jsonchecker/fail9.json | 1 + .../bin/jsonchecker/pass1.json | 58 + .../bin/jsonchecker/pass2.json | 1 + .../bin/jsonchecker/pass3.json | 6 + .../bin/jsonchecker/readme.txt | 3 + .../rapidjson-1.1.0/bin/jsonschema/.gitignore | 1 + .../bin/jsonschema/.travis.yml | 4 + .../rapidjson-1.1.0/bin/jsonschema/LICENSE | 19 + .../rapidjson-1.1.0/bin/jsonschema/README.md | 148 + .../bin/jsonschema/bin/jsonschema_suite | 283 + .../bin/jsonschema/remotes/.DS_Store | Bin 0 -> 6148 bytes .../remotes/folder/folderInteger.json | 3 + .../bin/jsonschema/remotes/integer.json | 3 + .../bin/jsonschema/remotes/subSchemas.json | 8 + .../bin/jsonschema/tests/.DS_Store | Bin 0 -> 6148 bytes .../tests/draft3/additionalItems.json | 82 + .../tests/draft3/additionalProperties.json | 88 + .../bin/jsonschema/tests/draft3/default.json | 49 + .../jsonschema/tests/draft3/dependencies.json | 108 + .../bin/jsonschema/tests/draft3/disallow.json | 80 + .../jsonschema/tests/draft3/divisibleBy.json | 60 + .../bin/jsonschema/tests/draft3/enum.json | 71 + .../bin/jsonschema/tests/draft3/extends.json | 94 + .../bin/jsonschema/tests/draft3/items.json | 46 + .../bin/jsonschema/tests/draft3/maxItems.json | 28 + .../jsonschema/tests/draft3/maxLength.json | 33 + .../bin/jsonschema/tests/draft3/maximum.json | 42 + .../bin/jsonschema/tests/draft3/minItems.json | 28 + .../jsonschema/tests/draft3/minLength.json | 33 + .../bin/jsonschema/tests/draft3/minimum.json | 42 + .../tests/draft3/optional/bignum.json | 107 + .../tests/draft3/optional/format.json | 222 + .../tests/draft3/optional/jsregex.json | 18 + .../draft3/optional/zeroTerminatedFloats.json | 15 + .../bin/jsonschema/tests/draft3/pattern.json | 34 + .../tests/draft3/patternProperties.json | 110 + .../jsonschema/tests/draft3/properties.json | 92 + .../bin/jsonschema/tests/draft3/ref.json | 159 + .../jsonschema/tests/draft3/refRemote.json | 74 + .../bin/jsonschema/tests/draft3/required.json | 53 + .../bin/jsonschema/tests/draft3/type.json | 474 ++ .../jsonschema/tests/draft3/uniqueItems.json | 79 + .../bin/jsonschema/tests/draft4/.DS_Store | Bin 0 -> 6148 bytes .../tests/draft4/additionalItems.json | 82 + .../tests/draft4/additionalProperties.json | 88 + .../bin/jsonschema/tests/draft4/allOf.json | 112 + .../bin/jsonschema/tests/draft4/anyOf.json | 68 + .../bin/jsonschema/tests/draft4/default.json | 49 + .../jsonschema/tests/draft4/definitions.json | 32 + .../jsonschema/tests/draft4/dependencies.json | 113 + .../bin/jsonschema/tests/draft4/enum.json | 72 + .../bin/jsonschema/tests/draft4/items.json | 46 + .../bin/jsonschema/tests/draft4/maxItems.json | 28 + .../jsonschema/tests/draft4/maxLength.json | 33 + .../tests/draft4/maxProperties.json | 28 + .../bin/jsonschema/tests/draft4/maximum.json | 42 + .../bin/jsonschema/tests/draft4/minItems.json | 28 + .../jsonschema/tests/draft4/minLength.json | 33 + .../tests/draft4/minProperties.json | 28 + .../bin/jsonschema/tests/draft4/minimum.json | 42 + .../jsonschema/tests/draft4/multipleOf.json | 60 + .../bin/jsonschema/tests/draft4/not.json | 96 + .../bin/jsonschema/tests/draft4/oneOf.json | 68 + .../tests/draft4/optional/bignum.json | 107 + .../tests/draft4/optional/format.json | 148 + .../draft4/optional/zeroTerminatedFloats.json | 15 + .../bin/jsonschema/tests/draft4/pattern.json | 34 + .../tests/draft4/patternProperties.json | 110 + .../jsonschema/tests/draft4/properties.json | 92 + .../bin/jsonschema/tests/draft4/ref.json | 159 + .../jsonschema/tests/draft4/refRemote.json | 74 + .../bin/jsonschema/tests/draft4/required.json | 39 + .../bin/jsonschema/tests/draft4/type.json | 330 + .../jsonschema/tests/draft4/uniqueItems.json | 79 + .../rapidjson-1.1.0/bin/jsonschema/tox.ini | 8 + .../rapidjson-1.1.0/bin/types/booleans.json | 102 + .../rapidjson-1.1.0/bin/types/floats.json | 102 + .../rapidjson-1.1.0/bin/types/guids.json | 102 + .../rapidjson-1.1.0/bin/types/integers.json | 102 + .../rapidjson-1.1.0/bin/types/mixed.json | 592 ++ .../rapidjson-1.1.0/bin/types/nulls.json | 102 + .../rapidjson-1.1.0/bin/types/paragraphs.json | 102 + .../rapidjson-1.1.0/bin/types/readme.txt | 1 + .../rapidjson-1.1.0/doc/CMakeLists.txt | 25 + .../rapidjson-1.1.0/doc/Doxyfile.in | 2369 ++++++ .../rapidjson-1.1.0/doc/Doxyfile.zh-cn.in | 2369 ++++++ .../doc/diagram/architecture.dot | 50 + .../doc/diagram/architecture.png | Bin 0 -> 16569 bytes .../doc/diagram/insituparsing.dot | 65 + .../doc/diagram/insituparsing.png | Bin 0 -> 37281 bytes .../iterative-parser-states-diagram.dot | 62 + .../iterative-parser-states-diagram.png | Bin 0 -> 92378 bytes .../rapidjson-1.1.0/doc/diagram/makefile | 8 + .../rapidjson-1.1.0/doc/diagram/move1.dot | 47 + .../rapidjson-1.1.0/doc/diagram/move1.png | Bin 0 -> 16081 bytes .../rapidjson-1.1.0/doc/diagram/move2.dot | 62 + .../rapidjson-1.1.0/doc/diagram/move2.png | Bin 0 -> 41517 bytes .../rapidjson-1.1.0/doc/diagram/move3.dot | 60 + .../rapidjson-1.1.0/doc/diagram/move3.png | Bin 0 -> 36371 bytes .../doc/diagram/normalparsing.dot | 56 + .../doc/diagram/normalparsing.png | Bin 0 -> 32887 bytes .../rapidjson-1.1.0/doc/diagram/simpledom.dot | 54 + .../rapidjson-1.1.0/doc/diagram/simpledom.png | Bin 0 -> 43670 bytes .../rapidjson-1.1.0/doc/diagram/tutorial.dot | 58 + .../rapidjson-1.1.0/doc/diagram/tutorial.png | Bin 0 -> 44634 bytes .../doc/diagram/utilityclass.dot | 73 + .../doc/diagram/utilityclass.png | Bin 0 -> 99993 bytes .../dependencies/rapidjson-1.1.0/doc/dom.md | 280 + .../rapidjson-1.1.0/doc/dom.zh-cn.md | 284 + .../rapidjson-1.1.0/doc/encoding.md | 146 + .../rapidjson-1.1.0/doc/encoding.zh-cn.md | 152 + .../dependencies/rapidjson-1.1.0/doc/faq.md | 289 + .../rapidjson-1.1.0/doc/faq.zh-cn.md | 290 + .../rapidjson-1.1.0/doc/features.md | 104 + .../rapidjson-1.1.0/doc/features.zh-cn.md | 103 + .../rapidjson-1.1.0/doc/internals.md | 365 + .../rapidjson-1.1.0/doc/logo/rapidjson.png | Bin 0 -> 5259 bytes .../rapidjson-1.1.0/doc/logo/rapidjson.svg | 119 + .../doc/misc/DoxygenLayout.xml | 194 + .../rapidjson-1.1.0/doc/misc/doxygenextra.css | 274 + .../rapidjson-1.1.0/doc/misc/footer.html | 11 + .../rapidjson-1.1.0/doc/misc/header.html | 24 + .../dependencies/rapidjson-1.1.0/doc/npm.md | 31 + .../rapidjson-1.1.0/doc/performance.md | 26 + .../rapidjson-1.1.0/doc/performance.zh-cn.md | 26 + .../rapidjson-1.1.0/doc/pointer.md | 234 + .../rapidjson-1.1.0/doc/pointer.zh-cn.md | 234 + .../dependencies/rapidjson-1.1.0/doc/sax.md | 486 ++ .../rapidjson-1.1.0/doc/sax.zh-cn.md | 487 ++ .../rapidjson-1.1.0/doc/schema.md | 237 + .../rapidjson-1.1.0/doc/schema.zh-cn.md | 237 + .../rapidjson-1.1.0/doc/stream.md | 426 ++ .../rapidjson-1.1.0/doc/stream.zh-cn.md | 426 ++ .../rapidjson-1.1.0/doc/tutorial.md | 536 ++ .../rapidjson-1.1.0/doc/tutorial.zh-cn.md | 534 ++ .../rapidjson-1.1.0/docker/debian/Dockerfile | 8 + .../rapidjson-1.1.0/example/CMakeLists.txt | 42 + .../example/capitalize/capitalize.cpp | 67 + .../example/condense/condense.cpp | 32 + .../example/filterkey/filterkey.cpp | 135 + .../example/filterkeydom/filterkeydom.cpp | 170 + .../rapidjson-1.1.0/example/jsonx/jsonx.cpp | 207 + .../example/messagereader/messagereader.cpp | 105 + .../example/parsebyparts/parsebyparts.cpp | 173 + .../rapidjson-1.1.0/example/pretty/pretty.cpp | 30 + .../example/prettyauto/prettyauto.cpp | 56 + .../schemavalidator/schemavalidator.cpp | 72 + .../example/serialize/serialize.cpp | 173 + .../example/simpledom/simpledom.cpp | 29 + .../example/simplereader/simplereader.cpp | 42 + .../example/simplewriter/simplewriter.cpp | 36 + .../example/tutorial/tutorial.cpp | 151 + .../include/rapidjson/allocators.h | 271 + .../include/rapidjson/document.h | 2575 +++++++ .../include/rapidjson/encodedstream.h | 299 + .../include/rapidjson/encodings.h | 716 ++ .../include/rapidjson/error/en.h | 74 + .../include/rapidjson/error/error.h | 155 + .../include/rapidjson/filereadstream.h | 99 + .../include/rapidjson/filewritestream.h | 104 + .../rapidjson-1.1.0/include/rapidjson/fwd.h | 151 + .../include/rapidjson/internal/biginteger.h | 290 + .../include/rapidjson/internal/diyfp.h | 258 + .../include/rapidjson/internal/dtoa.h | 245 + .../include/rapidjson/internal/ieee754.h | 78 + .../include/rapidjson/internal/itoa.h | 304 + .../include/rapidjson/internal/meta.h | 181 + .../include/rapidjson/internal/pow10.h | 55 + .../include/rapidjson/internal/regex.h | 701 ++ .../include/rapidjson/internal/stack.h | 230 + .../include/rapidjson/internal/strfunc.h | 55 + .../include/rapidjson/internal/strtod.h | 269 + .../include/rapidjson/internal/swap.h | 46 + .../include/rapidjson/istreamwrapper.h | 115 + .../include/rapidjson/memorybuffer.h | 70 + .../include/rapidjson/memorystream.h | 71 + .../include/rapidjson/msinttypes/inttypes.h | 316 + .../include/rapidjson/msinttypes/stdint.h | 300 + .../include/rapidjson/ostreamwrapper.h | 81 + .../include/rapidjson/pointer.h | 1358 ++++ .../include/rapidjson/prettywriter.h | 255 + .../include/rapidjson/rapidjson.h | 615 ++ .../include/rapidjson/reader.h | 1879 +++++ .../include/rapidjson/schema.h | 2006 +++++ .../include/rapidjson/stream.h | 179 + .../include/rapidjson/stringbuffer.h | 117 + .../include/rapidjson/writer.h | 610 ++ .../rapidjson-1.1.0/include_dirs.js | 2 + .../dependencies/rapidjson-1.1.0/library.json | 12 + .../dependencies/rapidjson-1.1.0/license.txt | 57 + .../dependencies/rapidjson-1.1.0/package.json | 24 + .../rapidjson-1.1.0/rapidjson.autopkg | 75 + .../dependencies/rapidjson-1.1.0/readme.md | 160 + .../rapidjson-1.1.0/readme.zh-cn.md | 152 + .../rapidjson-1.1.0/test/CMakeLists.txt | 20 + .../test/perftest/CMakeLists.txt | 26 + .../test/perftest/misctest.cpp | 974 +++ .../test/perftest/perftest.cpp | 24 + .../rapidjson-1.1.0/test/perftest/perftest.h | 182 + .../test/perftest/platformtest.cpp | 166 + .../test/perftest/rapidjsontest.cpp | 441 ++ .../test/perftest/schematest.cpp | 216 + .../test/unittest/CMakeLists.txt | 92 + .../test/unittest/allocatorstest.cpp | 102 + .../test/unittest/bigintegertest.cpp | 133 + .../test/unittest/documenttest.cpp | 652 ++ .../test/unittest/dtoatest.cpp | 98 + .../test/unittest/encodedstreamtest.cpp | 313 + .../test/unittest/encodingstest.cpp | 451 ++ .../test/unittest/filestreamtest.cpp | 112 + .../rapidjson-1.1.0/test/unittest/fwdtest.cpp | 227 + .../test/unittest/istreamwrappertest.cpp | 181 + .../test/unittest/itoatest.cpp | 160 + .../test/unittest/jsoncheckertest.cpp | 99 + .../test/unittest/namespacetest.cpp | 70 + .../test/unittest/ostreamwrappertest.cpp | 91 + .../test/unittest/pointertest.cpp | 1524 ++++ .../test/unittest/prettywritertest.cpp | 203 + .../test/unittest/readertest.cpp | 1844 +++++ .../test/unittest/regextest.cpp | 592 ++ .../test/unittest/schematest.cpp | 1313 ++++ .../test/unittest/simdtest.cpp | 215 + .../test/unittest/strfunctest.cpp | 30 + .../test/unittest/stringbuffertest.cpp | 170 + .../test/unittest/strtodtest.cpp | 132 + .../test/unittest/unittest.cpp | 51 + .../rapidjson-1.1.0/test/unittest/unittest.h | 135 + .../test/unittest/valuetest.cpp | 1792 +++++ .../test/unittest/writertest.cpp | 497 ++ .../rapidjson-1.1.0/travis-doxygen.sh | 122 + .../websocketpp-0.7.0/.gitattributes | 18 + .../dependencies/websocketpp-0.7.0/.gitignore | 94 + .../websocketpp-0.7.0/.travis.yml | 21 + .../websocketpp-0.7.0/CMakeLists.txt | 261 + .../dependencies/websocketpp-0.7.0/COPYING | 145 + .../dependencies/websocketpp-0.7.0/Doxyfile | 2355 ++++++ .../dependencies/websocketpp-0.7.0/SConstruct | 281 + .../websocketpp-0.7.0/changelog.md | 342 + .../cmake/CMakeHelpers.cmake | 109 + .../websocketpp-0.7.0/docs/faq.dox | 86 + .../docs/getting_started.dox | 27 + .../websocketpp-0.7.0/docs/handlers.dox | 165 + .../websocketpp-0.7.0/docs/manual.css | 22 + .../websocketpp-0.7.0/docs/manual.dox | 21 + .../docs/simple_broadcast_server.cpp | 52 + .../docs/simple_count_server_thread.cpp | 65 + .../websocketpp-0.7.0/docs/tutorials.dox | 10 + .../associative_storage/CMakeLists.txt | 12 + .../associative_storage.cpp | 88 + .../examples/broadcast_server/CMakeLists.txt | 12 + .../examples/broadcast_server/SConscript | 23 + .../broadcast_server/broadcast_server.cpp | 160 + .../examples/debug_client/CMakeLists.txt | 17 + .../examples/debug_client/SConscript | 24 + .../examples/debug_client/debug_client.cpp | 167 + .../examples/debug_server/CMakeLists.txt | 12 + .../examples/debug_server/SConscript | 23 + .../examples/debug_server/debug_server.cpp | 174 + .../examples/dev/CMakeLists.txt | 12 + .../websocketpp-0.7.0/examples/dev/SConscript | 18 + .../websocketpp-0.7.0/examples/dev/main.cpp | 200 + .../examples/echo_client/CMakeLists.txt | 12 + .../examples/echo_client/SConscript | 23 + .../examples/echo_client/echo_client.cpp | 97 + .../examples/echo_server/CMakeLists.txt | 12 + .../examples/echo_server/SConscript | 23 + .../examples/echo_server/echo_handler.hpp | 37 + .../examples/echo_server/echo_server.cpp | 65 + .../examples/echo_server_both/CMakeLists.txt | 18 + .../examples/echo_server_both/SConscript | 24 + .../echo_server_both/echo_server_both.cpp | 87 + .../examples/echo_server_both/server.pem | 58 + .../examples/echo_server_tls/CMakeLists.txt | 18 + .../examples/echo_server_tls/SConscript | 24 + .../examples/echo_server_tls/dh.pem | 8 + .../echo_server_tls/echo_server_tls.cpp | 154 + .../examples/echo_server_tls/server.pem | 55 + .../examples/enriched_storage/CMakeLists.txt | 12 + .../enriched_storage/enriched_storage.cpp | 87 + .../external_io_service/CMakeLists.txt | 12 + .../examples/external_io_service/SConscript | 23 + .../external_io_service.cpp | 85 + .../external_io_service/tcp_echo_server.hpp | 97 + .../examples/handler_switch/CMakeLists.txt | 12 + .../handler_switch/handler_switch.cpp | 42 + .../examples/iostream_server/CMakeLists.txt | 12 + .../examples/iostream_server/SConscript | 23 + .../iostream_server/iostream_server.cpp | 89 + .../examples/print_server/CMakeLists.txt | 12 + .../examples/print_server/SConscript | 23 + .../examples/print_server/print_server.cpp | 24 + .../examples/scratch_client/SConscript | 24 + .../scratch_client/scratch_client.cpp | 270 + .../examples/scratch_server/SConscript | 24 + .../scratch_server/scratch_server.cpp | 106 + .../simple_broadcast_server/CMakeLists.txt | 12 + .../simple_broadcast_server.cpp | 51 + .../examples/sip_client/CMakeLists.txt | 12 + .../examples/sip_client/README.txt | 22 + .../examples/sip_client/SConscript | 23 + .../examples/sip_client/sip_client.cpp | 84 + .../subprotocol_server/CMakeLists.txt | 12 + .../examples/subprotocol_server/SConscript | 23 + .../subprotocol_server/subprotocol_server.cpp | 48 + .../examples/telemetry_client/CMakeLists.txt | 12 + .../examples/telemetry_client/SConscript | 23 + .../telemetry_client/telemetry_client.cpp | 156 + .../examples/telemetry_server/CMakeLists.txt | 12 + .../examples/telemetry_server/SConscript | 23 + .../examples/telemetry_server/index.html | 85 + .../telemetry_server/telemetry_server.cpp | 203 + .../examples/testee_client/CMakeLists.txt | 17 + .../examples/testee_client/SConscript | 23 + .../examples/testee_client/testee_client.cpp | 145 + .../examples/testee_server/CMakeLists.txt | 17 + .../examples/testee_server/SConscript | 23 + .../examples/testee_server/testee_server.cpp | 145 + .../examples/utility_client/CMakeLists.txt | 13 + .../examples/utility_client/SConscript | 23 + .../utility_client/utility_client.cpp | 325 + .../dependencies/websocketpp-0.7.0/readme.md | 49 + .../dependencies/websocketpp-0.7.0/roadmap.md | 43 + .../test/connection/CMakeLists.txt | 12 + .../test/connection/SConscript | 25 + .../test/connection/connection.cpp | 530 ++ .../test/connection/connection_tu2.cpp | 62 + .../test/connection/connection_tu2.hpp | 51 + .../test/endpoint/CMakeLists.txt | 17 + .../test/endpoint/SConscript | 24 + .../test/endpoint/endpoint.cpp | 155 + .../test/extension/CMakeLists.txt | 22 + .../test/extension/SConscript | 27 + .../test/extension/extension.cpp | 37 + .../test/extension/permessage_deflate.cpp | 649 ++ .../test/http/CMakeLists.txt | 11 + .../websocketpp-0.7.0/test/http/SConscript | 23 + .../websocketpp-0.7.0/test/http/a.out | Bin 0 -> 120748 bytes .../websocketpp-0.7.0/test/http/parser.cpp | 1129 +++ .../test/http/parser_perf.cpp | 141 + .../websocketpp-0.7.0/test/http/perf.out | Bin 0 -> 60208 bytes .../websocketpp-0.7.0/test/http/test.out | Bin 0 -> 1599720 bytes .../test/logger/CMakeLists.txt | 12 + .../websocketpp-0.7.0/test/logger/SConscript | 23 + .../websocketpp-0.7.0/test/logger/basic.cpp | 145 + .../test/message_buffer/CMakeLists.txt | 17 + .../test/message_buffer/SConscript | 27 + .../test/message_buffer/alloc.cpp | 96 + .../test/message_buffer/message.cpp | 72 + .../test/message_buffer/pool.cpp | 156 + .../test/processors/CMakeLists.txt | 59 + .../test/processors/SConscript | 47 + .../extension_permessage_compress.cpp | 198 + .../test/processors/hybi00.cpp | 274 + .../test/processors/hybi07.cpp | 193 + .../test/processors/hybi08.cpp | 197 + .../test/processors/hybi13.cpp | 693 ++ .../test/processors/processor.cpp | 135 + .../test/random/CMakeLists.txt | 17 + .../websocketpp-0.7.0/test/random/SConscript | 27 + .../websocketpp-0.7.0/test/random/none.cpp | 40 + .../test/random/random_device.cpp | 50 + .../test/roles/CMakeLists.txt | 17 + .../websocketpp-0.7.0/test/roles/SConscript | 27 + .../websocketpp-0.7.0/test/roles/client.cpp | 194 + .../websocketpp-0.7.0/test/roles/server.cpp | 247 + .../test/transport/CMakeLists.txt | 71 + .../test/transport/SConscript | 24 + .../test/transport/asio/SConscript | 32 + .../test/transport/asio/base.cpp | 49 + .../test/transport/asio/security.cpp | 69 + .../test/transport/asio/timers.cpp | 187 + .../test/transport/hybi_util.cpp | 98 + .../test/transport/integration.cpp | 617 ++ .../test/transport/iostream/SConscript | 31 + .../test/transport/iostream/base.cpp | 33 + .../test/transport/iostream/connection.cpp | 609 ++ .../test/transport/iostream/endpoint.cpp | 41 + .../test/utility/CMakeLists.txt | 53 + .../websocketpp-0.7.0/test/utility/SConscript | 40 + .../websocketpp-0.7.0/test/utility/close.cpp | 125 + .../websocketpp-0.7.0/test/utility/error.cpp | 54 + .../websocketpp-0.7.0/test/utility/frame.cpp | 538 ++ .../websocketpp-0.7.0/test/utility/sha1.cpp | 81 + .../websocketpp-0.7.0/test/utility/uri.cpp | 246 + .../test/utility/utilities.cpp | 73 + .../broadcast_tutorial/broadcast_tutorial.md | 17 + .../tutorials/chat_tutorial/chat_tutorial.md | 13 + .../tutorials/utility_client/step1.cpp | 56 + .../tutorials/utility_client/step2.cpp | 61 + .../tutorials/utility_client/step3.cpp | 81 + .../tutorials/utility_client/step4.cpp | 202 + .../tutorials/utility_client/step5.cpp | 280 + .../tutorials/utility_client/step6.cpp | 335 + .../utility_client/utility_client.md | 862 +++ .../tutorials/utility_server/step1.cpp | 71 + .../tutorials/utility_server/step2.cpp | 82 + .../utility_server/utility_server.md | 181 + .../websocketpp-config.cmake.in | 7 + .../websocketpp-configVersion.cmake.in | 11 + .../websocketpp/CMakeLists.txt | 2 + .../websocketpp/base64/base64.hpp | 178 + .../websocketpp-0.7.0/websocketpp/client.hpp | 33 + .../websocketpp-0.7.0/websocketpp/close.hpp | 342 + .../websocketpp/common/asio.hpp | 131 + .../websocketpp/common/asio_ssl.hpp | 39 + .../websocketpp/common/chrono.hpp | 68 + .../websocketpp/common/connection_hdl.hpp | 52 + .../websocketpp/common/cpp11.hpp | 162 + .../websocketpp/common/functional.hpp | 105 + .../websocketpp/common/md5.hpp | 448 ++ .../websocketpp/common/memory.hpp | 89 + .../websocketpp/common/network.hpp | 106 + .../websocketpp/common/platforms.hpp | 46 + .../websocketpp/common/random.hpp | 82 + .../websocketpp/common/regex.hpp | 59 + .../websocketpp/common/stdint.hpp | 73 + .../websocketpp/common/system_error.hpp | 84 + .../websocketpp/common/thread.hpp | 84 + .../websocketpp/common/time.hpp | 56 + .../websocketpp/common/type_traits.hpp | 65 + .../websocketpp/concurrency/basic.hpp | 46 + .../websocketpp/concurrency/none.hpp | 80 + .../websocketpp/config/asio.hpp | 77 + .../websocketpp/config/asio_client.hpp | 77 + .../websocketpp/config/asio_no_tls.hpp | 73 + .../websocketpp/config/asio_no_tls_client.hpp | 73 + .../websocketpp/config/boost_config.hpp | 72 + .../websocketpp/config/core.hpp | 285 + .../websocketpp/config/core_client.hpp | 294 + .../websocketpp/config/debug.hpp | 286 + .../websocketpp/config/debug_asio.hpp | 77 + .../websocketpp/config/debug_asio_no_tls.hpp | 73 + .../websocketpp/config/minimal_client.hpp | 72 + .../websocketpp/config/minimal_server.hpp | 312 + .../websocketpp/connection.hpp | 1651 ++++ .../websocketpp/connection_base.hpp | 38 + .../websocketpp/endpoint.hpp | 700 ++ .../websocketpp/endpoint_base.hpp | 38 + .../websocketpp-0.7.0/websocketpp/error.hpp | 277 + .../websocketpp/extensions/extension.hpp | 102 + .../permessage_deflate/disabled.hpp | 128 + .../extensions/permessage_deflate/enabled.hpp | 752 ++ .../websocketpp-0.7.0/websocketpp/frame.hpp | 861 +++ .../websocketpp/http/constants.hpp | 308 + .../websocketpp/http/impl/parser.hpp | 196 + .../websocketpp/http/impl/request.hpp | 191 + .../websocketpp/http/impl/response.hpp | 266 + .../websocketpp/http/parser.hpp | 619 ++ .../websocketpp/http/request.hpp | 124 + .../websocketpp/http/response.hpp | 188 + .../websocketpp/impl/connection_impl.hpp | 2372 ++++++ .../websocketpp/impl/endpoint_impl.hpp | 269 + .../websocketpp/impl/utilities_impl.hpp | 87 + .../websocketpp/logger/basic.hpp | 199 + .../websocketpp/logger/levels.hpp | 203 + .../websocketpp/logger/stub.hpp | 119 + .../websocketpp/logger/syslog.hpp | 146 + .../websocketpp/message_buffer/alloc.hpp | 105 + .../websocketpp/message_buffer/message.hpp | 340 + .../websocketpp/message_buffer/pool.hpp | 229 + .../websocketpp/processors/base.hpp | 299 + .../websocketpp/processors/hybi00.hpp | 462 ++ .../websocketpp/processors/hybi07.hpp | 78 + .../websocketpp/processors/hybi08.hpp | 83 + .../websocketpp/processors/hybi13.hpp | 1056 +++ .../websocketpp/processors/processor.hpp | 407 + .../websocketpp/random/none.hpp | 60 + .../websocketpp/random/random_device.hpp | 80 + .../websocketpp/roles/client_endpoint.hpp | 173 + .../websocketpp/roles/server_endpoint.hpp | 190 + .../websocketpp-0.7.0/websocketpp/server.hpp | 33 + .../websocketpp/sha1/sha1.hpp | 189 + .../websocketpp/transport/asio/base.hpp | 232 + .../websocketpp/transport/asio/connection.hpp | 1204 +++ .../websocketpp/transport/asio/endpoint.hpp | 1147 +++ .../transport/asio/security/base.hpp | 159 + .../transport/asio/security/none.hpp | 370 + .../transport/asio/security/tls.hpp | 484 ++ .../websocketpp/transport/base/connection.hpp | 238 + .../websocketpp/transport/base/endpoint.hpp | 77 + .../websocketpp/transport/debug/base.hpp | 104 + .../transport/debug/connection.hpp | 412 + .../websocketpp/transport/debug/endpoint.hpp | 140 + .../websocketpp/transport/iostream/base.hpp | 133 + .../transport/iostream/connection.hpp | 714 ++ .../transport/iostream/endpoint.hpp | 222 + .../websocketpp/transport/stub/base.hpp | 95 + .../websocketpp/transport/stub/connection.hpp | 286 + .../websocketpp/transport/stub/endpoint.hpp | 140 + .../websocketpp-0.7.0/websocketpp/uri.hpp | 355 + .../websocketpp/utf8_validator.hpp | 154 + .../websocketpp/utilities.hpp | 182 + .../websocketpp-0.7.0/websocketpp/version.hpp | 61 + 1781 files changed, 398495 insertions(+) create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/.gitignore create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/.travis.yml create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/CMakeLists.txt create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/LICENSE create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/build-release/CMakeCache.txt create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/build-release/CMakeFiles/3.5.1/CMakeCCompiler.cmake create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/build-release/CMakeFiles/3.5.1/CMakeCXXCompiler.cmake create mode 100755 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/build-release/CMakeFiles/3.5.1/CMakeDetermineCompilerABI_C.bin create mode 100755 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/build-release/CMakeFiles/3.5.1/CMakeDetermineCompilerABI_CXX.bin create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/build-release/CMakeFiles/3.5.1/CMakeSystem.cmake create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/build-release/CMakeFiles/3.5.1/CompilerIdC/CMakeCCompilerId.c create mode 100755 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/build-release/CMakeFiles/3.5.1/CompilerIdC/a.out create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/build-release/CMakeFiles/3.5.1/CompilerIdCXX/CMakeCXXCompilerId.cpp create mode 100755 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/build-release/CMakeFiles/3.5.1/CompilerIdCXX/a.out create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/build-release/CMakeFiles/CMakeDirectoryInformation.cmake create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/build-release/CMakeFiles/CMakeError.log create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/build-release/CMakeFiles/CMakeOutput.log create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/build-release/CMakeFiles/Export/lib/cmake/LibCDS/LibCDSConfig-release.cmake create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/build-release/CMakeFiles/Export/lib/cmake/LibCDS/LibCDSConfig.cmake create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/build-release/CMakeFiles/Makefile.cmake create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/build-release/CMakeFiles/Makefile2 create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/build-release/CMakeFiles/TargetDirectories.txt create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/build-release/CMakeFiles/cds-s.dir/CXX.includecache create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/build-release/CMakeFiles/cds-s.dir/DependInfo.cmake create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/build-release/CMakeFiles/cds-s.dir/build.make create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/build-release/CMakeFiles/cds-s.dir/cmake_clean.cmake create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/build-release/CMakeFiles/cds-s.dir/cmake_clean_target.cmake create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/build-release/CMakeFiles/cds-s.dir/depend.internal create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/build-release/CMakeFiles/cds-s.dir/depend.make create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/build-release/CMakeFiles/cds-s.dir/flags.make create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/build-release/CMakeFiles/cds-s.dir/link.txt create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/build-release/CMakeFiles/cds-s.dir/progress.make create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/build-release/CMakeFiles/cds-s.dir/src/dhp.cpp.o create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/build-release/CMakeFiles/cds-s.dir/src/dllmain.cpp.o create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/build-release/CMakeFiles/cds-s.dir/src/hp.cpp.o create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/build-release/CMakeFiles/cds-s.dir/src/init.cpp.o create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/build-release/CMakeFiles/cds-s.dir/src/thread_data.cpp.o create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/build-release/CMakeFiles/cds-s.dir/src/topology_hpux.cpp.o create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/build-release/CMakeFiles/cds-s.dir/src/topology_linux.cpp.o create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/build-release/CMakeFiles/cds-s.dir/src/topology_osx.cpp.o create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/build-release/CMakeFiles/cds-s.dir/src/urcu_gp.cpp.o create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/build-release/CMakeFiles/cds-s.dir/src/urcu_sh.cpp.o create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/build-release/CMakeFiles/cds.dir/CXX.includecache create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/build-release/CMakeFiles/cds.dir/DependInfo.cmake create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/build-release/CMakeFiles/cds.dir/build.make create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/build-release/CMakeFiles/cds.dir/cmake_clean.cmake create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/build-release/CMakeFiles/cds.dir/depend.internal create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/build-release/CMakeFiles/cds.dir/depend.make create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/build-release/CMakeFiles/cds.dir/flags.make create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/build-release/CMakeFiles/cds.dir/link.txt create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/build-release/CMakeFiles/cds.dir/progress.make create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/build-release/CMakeFiles/cds.dir/src/dhp.cpp.o create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/build-release/CMakeFiles/cds.dir/src/dllmain.cpp.o create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/build-release/CMakeFiles/cds.dir/src/hp.cpp.o create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/build-release/CMakeFiles/cds.dir/src/init.cpp.o create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/build-release/CMakeFiles/cds.dir/src/thread_data.cpp.o create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/build-release/CMakeFiles/cds.dir/src/topology_hpux.cpp.o create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/build-release/CMakeFiles/cds.dir/src/topology_linux.cpp.o create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/build-release/CMakeFiles/cds.dir/src/topology_osx.cpp.o create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/build-release/CMakeFiles/cds.dir/src/urcu_gp.cpp.o create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/build-release/CMakeFiles/cds.dir/src/urcu_sh.cpp.o create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/build-release/CMakeFiles/cmake.check_cache create mode 100755 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/build-release/CMakeFiles/feature_tests.bin create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/build-release/CMakeFiles/feature_tests.c create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/build-release/CMakeFiles/feature_tests.cxx create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/build-release/CMakeFiles/progress.marks create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/build-release/CPackConfig.cmake create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/build-release/CPackSourceConfig.cmake create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/build-release/Makefile create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/build-release/arch.c create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/build-release/bin/libcds-s.a create mode 120000 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/build-release/bin/libcds.so create mode 100755 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/build-release/bin/libcds.so.2.3.2 create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/build-release/cmake_install.cmake create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/build/CI/VASEx-CI-2/cds-libs create mode 100755 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/build/CI/VASEx-CI-2/ci-build create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/build/CI/VASEx-CI-2/ci-env create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/build/CI/VASEx-CI/cds-libs create mode 100755 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/build/CI/VASEx-CI/ci-build create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/build/CI/VASEx-CI/ci-env create mode 100755 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/build/CI/cmake-gen create mode 100755 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/build/CI/gen-all create mode 100755 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/build/CI/travis-ci/install.sh create mode 100755 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/build/CI/travis-ci/run.sh create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/build/cmake/TargetArch.cmake create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/build/cmake/description.txt create mode 100755 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/build/cmake/post_install_script.sh create mode 100755 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/build/cmake/post_uninstall_script.sh create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/build/cmake/readme.md create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/algo/atomic.h create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/algo/backoff_strategy.h create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/algo/base.h create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/algo/bit_reversal.h create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/algo/bitop.h create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/algo/elimination.h create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/algo/elimination_opt.h create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/algo/elimination_tls.h create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/algo/flat_combining.h create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/algo/flat_combining/defs.h create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/algo/flat_combining/kernel.h create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/algo/flat_combining/wait_strategy.h create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/algo/int_algo.h create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/algo/split_bitstring.h create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/compiler/backoff.h create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/compiler/bitop.h create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/compiler/clang/defs.h create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/compiler/cxx11_atomic.h create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/compiler/defs.h create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/compiler/feature_tsan.h create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/compiler/gcc/amd64/backoff.h create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/compiler/gcc/amd64/bitop.h create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/compiler/gcc/amd64/cxx11_atomic.h create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/compiler/gcc/arm7/backoff.h create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/compiler/gcc/arm8/backoff.h create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/compiler/gcc/compiler_barriers.h create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/compiler/gcc/compiler_macro.h create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/compiler/gcc/defs.h create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/compiler/gcc/ia64/backoff.h create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/compiler/gcc/ia64/bitop.h create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/compiler/gcc/ia64/cxx11_atomic.h create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/compiler/gcc/ppc64/backoff.h create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/compiler/gcc/ppc64/bitop.h create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/compiler/gcc/sparc/backoff.h create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/compiler/gcc/sparc/bitop.h create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/compiler/gcc/sparc/cxx11_atomic.h create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/compiler/gcc/x86/backoff.h create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/compiler/gcc/x86/bitop.h create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/compiler/gcc/x86/cxx11_atomic.h create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/compiler/gcc/x86/cxx11_atomic32.h create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/compiler/icl/compiler_barriers.h create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/compiler/icl/defs.h create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/compiler/vc/amd64/backoff.h create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/compiler/vc/amd64/bitop.h create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/compiler/vc/amd64/cxx11_atomic.h create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/compiler/vc/compiler_barriers.h create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/compiler/vc/defs.h create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/compiler/vc/x86/backoff.h create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/compiler/vc/x86/bitop.h create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/compiler/vc/x86/cxx11_atomic.h create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/container/basket_queue.h create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/container/bronson_avltree_map_rcu.h create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/container/cuckoo_map.h create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/container/cuckoo_set.h create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/container/details/base.h create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/container/details/bronson_avltree_base.h create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/container/details/cuckoo_base.h create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/container/details/ellen_bintree_base.h create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/container/details/feldman_hashmap_base.h create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/container/details/feldman_hashset_base.h create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/container/details/guarded_ptr_cast.h create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/container/details/iterable_list_base.h create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/container/details/lazy_list_base.h create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/container/details/make_iterable_kvlist.h create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/container/details/make_iterable_list.h create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/container/details/make_lazy_kvlist.h create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/container/details/make_lazy_list.h create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/container/details/make_michael_kvlist.h create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/container/details/make_michael_list.h create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/container/details/make_skip_list_map.h create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/container/details/make_skip_list_set.h create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/container/details/make_split_list_set.h create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/container/details/make_split_list_set_iterable_list.h create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/container/details/make_split_list_set_lazy_list.h create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/container/details/make_split_list_set_michael_list.h create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/container/details/michael_list_base.h create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/container/details/michael_map_base.h create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/container/details/michael_set_base.h create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/container/details/skip_list_base.h create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/container/details/split_list_base.h create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/container/ellen_bintree_map_dhp.h create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/container/ellen_bintree_map_hp.h create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/container/ellen_bintree_map_rcu.h create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/container/ellen_bintree_set_dhp.h create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/container/ellen_bintree_set_hp.h create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/container/ellen_bintree_set_rcu.h create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/container/fcdeque.h create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/container/fcpriority_queue.h create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/container/fcqueue.h create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/container/fcstack.h create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/container/feldman_hashmap_dhp.h create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/container/feldman_hashmap_hp.h create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/container/feldman_hashmap_rcu.h create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/container/feldman_hashset_dhp.h create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/container/feldman_hashset_hp.h create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/container/feldman_hashset_rcu.h create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/container/impl/bronson_avltree_map_rcu.h create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/container/impl/ellen_bintree_map.h create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/container/impl/ellen_bintree_set.h create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/container/impl/feldman_hashmap.h create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/container/impl/feldman_hashset.h create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/container/impl/iterable_kvlist.h create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/container/impl/iterable_list.h create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/container/impl/lazy_kvlist.h create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/container/impl/lazy_list.h create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/container/impl/michael_kvlist.h create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/container/impl/michael_list.h create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/container/impl/skip_list_map.h create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/container/impl/skip_list_set.h create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/container/iterable_kvlist_dhp.h create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/container/iterable_kvlist_hp.h create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/container/iterable_list_dhp.h create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/container/iterable_list_hp.h create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/container/lazy_kvlist_dhp.h create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/container/lazy_kvlist_hp.h create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/container/lazy_kvlist_nogc.h create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/container/lazy_kvlist_rcu.h create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/container/lazy_list_dhp.h create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/container/lazy_list_hp.h create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/container/lazy_list_nogc.h create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/container/lazy_list_rcu.h create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/container/michael_kvlist_dhp.h create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/container/michael_kvlist_hp.h create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/container/michael_kvlist_nogc.h create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/container/michael_kvlist_rcu.h create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/container/michael_list_dhp.h create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/container/michael_list_hp.h create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/container/michael_list_nogc.h create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/container/michael_list_rcu.h create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/container/michael_map.h create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/container/michael_map_nogc.h create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/container/michael_map_rcu.h create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/container/michael_set.h create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/container/michael_set_nogc.h create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/container/michael_set_rcu.h create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/container/moir_queue.h create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/container/mspriority_queue.h create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/container/msqueue.h create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/container/optimistic_queue.h create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/container/rwqueue.h create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/container/segmented_queue.h create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/container/skip_list_map_dhp.h create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/container/skip_list_map_hp.h create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/container/skip_list_map_nogc.h create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/container/skip_list_map_rcu.h create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/container/skip_list_set_dhp.h create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/container/skip_list_set_hp.h create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/container/skip_list_set_nogc.h create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/container/skip_list_set_rcu.h create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/container/split_list_map.h create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/container/split_list_map_nogc.h create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/container/split_list_map_rcu.h create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/container/split_list_set.h create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/container/split_list_set_nogc.h create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/container/split_list_set_rcu.h create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/container/striped_map.h create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/container/striped_map/boost_flat_map.h create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/container/striped_map/boost_list.h create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/container/striped_map/boost_map.h create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/container/striped_map/boost_slist.h create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/container/striped_map/boost_unordered_map.h create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/container/striped_map/std_hash_map.h create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/container/striped_map/std_list.h create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/container/striped_map/std_map.h create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/container/striped_set.h create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/container/striped_set/adapter.h create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/container/striped_set/boost_flat_set.h create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/container/striped_set/boost_list.h create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/container/striped_set/boost_set.h create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/container/striped_set/boost_slist.h create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/container/striped_set/boost_stable_vector.h create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/container/striped_set/boost_unordered_set.h create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/container/striped_set/boost_vector.h create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/container/striped_set/std_hash_set.h create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/container/striped_set/std_list.h create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/container/striped_set/std_set.h create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/container/striped_set/std_vector.h create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/container/treiber_stack.h create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/container/vyukov_mpmc_cycle_queue.h create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/container/weak_ringbuffer.h create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/details/aligned_allocator.h create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/details/aligned_type.h create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/details/allocator.h create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/details/binary_functor_wrapper.h create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/details/bit_reverse_counter.h create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/details/bitop_generic.h create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/details/bounded_array.h create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/details/bounded_container.h create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/details/defs.h create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/details/is_aligned.h create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/details/lib.h create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/details/make_const_type.h create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/details/marked_ptr.h create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/details/size_t_cast.h create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/details/static_functor.h create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/details/throw_exception.h create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/details/trivial_assign.h create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/details/type_padding.h create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/gc/default_gc.h create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/gc/details/hp_common.h create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/gc/details/retired_ptr.h create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/gc/dhp.h create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/gc/hp.h create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/gc/nogc.h create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/init.h create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/intrusive/basket_queue.h create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/intrusive/cuckoo_set.h create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/intrusive/details/base.h create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/intrusive/details/ellen_bintree_base.h create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/intrusive/details/feldman_hashset_base.h create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/intrusive/details/iterable_list_base.h create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/intrusive/details/lazy_list_base.h create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/intrusive/details/michael_list_base.h create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/intrusive/details/michael_set_base.h create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/intrusive/details/node_traits.h create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/intrusive/details/raw_ptr_disposer.h create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/intrusive/details/single_link_struct.h create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/intrusive/details/skip_list_base.h create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/intrusive/details/split_list_base.h create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/intrusive/ellen_bintree_dhp.h create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/intrusive/ellen_bintree_hp.h create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/intrusive/ellen_bintree_rcu.h create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/intrusive/fcqueue.h create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/intrusive/fcstack.h create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/intrusive/feldman_hashset_dhp.h create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/intrusive/feldman_hashset_hp.h create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/intrusive/feldman_hashset_rcu.h create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/intrusive/free_list.h create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/intrusive/free_list_cached.h create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/intrusive/free_list_selector.h create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/intrusive/free_list_tagged.h create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/intrusive/impl/ellen_bintree.h create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/intrusive/impl/feldman_hashset.h create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/intrusive/impl/iterable_list.h create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/intrusive/impl/lazy_list.h create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/intrusive/impl/michael_list.h create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/intrusive/impl/skip_list.h create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/intrusive/iterable_list_dhp.h create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/intrusive/iterable_list_hp.h create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/intrusive/lazy_list_dhp.h create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/intrusive/lazy_list_hp.h create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/intrusive/lazy_list_nogc.h create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/intrusive/lazy_list_rcu.h create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/intrusive/michael_list_dhp.h create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/intrusive/michael_list_hp.h create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/intrusive/michael_list_nogc.h create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/intrusive/michael_list_rcu.h create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/intrusive/michael_set.h create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/intrusive/michael_set_nogc.h create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/intrusive/michael_set_rcu.h create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/intrusive/moir_queue.h create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/intrusive/mspriority_queue.h create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/intrusive/msqueue.h create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/intrusive/optimistic_queue.h create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/intrusive/options.h create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/intrusive/segmented_queue.h create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/intrusive/skip_list_dhp.h create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/intrusive/skip_list_hp.h create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/intrusive/skip_list_nogc.h create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/intrusive/skip_list_rcu.h create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/intrusive/split_list.h create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/intrusive/split_list_nogc.h create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/intrusive/split_list_rcu.h create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/intrusive/striped_set.h create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/intrusive/striped_set/adapter.h create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/intrusive/striped_set/boost_avl_set.h create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/intrusive/striped_set/boost_list.h create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/intrusive/striped_set/boost_set.h create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/intrusive/striped_set/boost_sg_set.h create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/intrusive/striped_set/boost_slist.h create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/intrusive/striped_set/boost_splay_set.h create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/intrusive/striped_set/boost_treap_set.h create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/intrusive/striped_set/boost_unordered_set.h create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/intrusive/striped_set/resizing_policy.h create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/intrusive/striped_set/striping_policy.h create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/intrusive/treiber_stack.h create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/intrusive/vyukov_mpmc_cycle_queue.h create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/lock/array.h create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/lock/spinlock.h create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/memory/pool_allocator.h create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/memory/vyukov_queue_pool.h create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/opt/buffer.h create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/opt/compare.h create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/opt/hash.h create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/opt/options.h create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/opt/permutation.h create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/opt/value_cleaner.h create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/os/aix/alloc_aligned.h create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/os/aix/timer.h create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/os/aix/topology.h create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/os/alloc_aligned.h create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/os/details/fake_topology.h create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/os/free_bsd/alloc_aligned.h create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/os/free_bsd/timer.h create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/os/free_bsd/topology.h create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/os/hpux/alloc_aligned.h create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/os/hpux/timer.h create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/os/hpux/topology.h create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/os/libc/alloc_aligned.h create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/os/linux/alloc_aligned.h create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/os/linux/timer.h create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/os/linux/topology.h create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/os/osx/timer.h create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/os/osx/topology.h create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/os/posix/alloc_aligned.h create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/os/posix/fake_topology.h create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/os/posix/thread.h create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/os/posix/timer.h create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/os/sunos/alloc_aligned.h create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/os/sunos/timer.h create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/os/sunos/topology.h create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/os/thread.h create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/os/timer.h create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/os/topology.h create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/os/win/alloc_aligned.h create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/os/win/thread.h create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/os/win/timer.h create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/os/win/topology.h create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/sync/injecting_monitor.h create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/sync/lock_array.h create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/sync/monitor.h create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/sync/pool_monitor.h create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/sync/spinlock.h create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/threading/details/_common.h create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/threading/details/auto_detect.h create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/threading/details/cxx11.h create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/threading/details/cxx11_manager.h create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/threading/details/gcc.h create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/threading/details/gcc_manager.h create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/threading/details/msvc.h create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/threading/details/msvc_manager.h create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/threading/details/pthread.h create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/threading/details/pthread_manager.h create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/threading/details/wintls.h create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/threading/details/wintls_manager.h create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/threading/model.h create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/urcu/details/base.h create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/urcu/details/check_deadlock.h create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/urcu/details/gp.h create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/urcu/details/gp_decl.h create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/urcu/details/gpb.h create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/urcu/details/gpi.h create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/urcu/details/gpt.h create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/urcu/details/sh.h create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/urcu/details/sh_decl.h create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/urcu/details/sig_buffered.h create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/urcu/dispose_thread.h create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/urcu/exempt_ptr.h create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/urcu/general_buffered.h create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/urcu/general_instant.h create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/urcu/general_threaded.h create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/urcu/options.h create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/urcu/raw_ptr.h create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/urcu/signal_buffered.h create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/user_setup/allocator.h create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/user_setup/cache_line.h create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/user_setup/threading.h create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/version.h create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/change.log create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/conanfile.txt create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/doxygen/cds.doxy create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/doxygen/footer.html create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/doxygen/image/feldman_hashset.png create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/doxygen/images.odp create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/doxygen/index.html create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/projects/Win/build-msbuild.cmd create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/projects/Win/build-vc14.cmd create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/projects/Win/vc141/cds.sln create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/projects/Win/vc141/cds.vcxproj create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/projects/Win/vc141/cds.vcxproj.filters create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/projects/Win/vc141/gtest-deque.vcxproj create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/projects/Win/vc141/gtest-deque.vcxproj.filters create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/projects/Win/vc141/gtest-ilist-iterable.vcxproj create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/projects/Win/vc141/gtest-ilist-lazy.vcxproj create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/projects/Win/vc141/gtest-ilist-michael.vcxproj create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/projects/Win/vc141/gtest-iset-feldman.vcxproj create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/projects/Win/vc141/gtest-iset-michael-iterable.vcxproj create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/projects/Win/vc141/gtest-iset-michael-lazy.vcxproj create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/projects/Win/vc141/gtest-iset-michael.vcxproj create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/projects/Win/vc141/gtest-iset-skip.vcxproj create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/projects/Win/vc141/gtest-iset-split-iterable.vcxproj create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/projects/Win/vc141/gtest-iset-split-lazy.vcxproj create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/projects/Win/vc141/gtest-iset-split-michael.vcxproj create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/projects/Win/vc141/gtest-list-iterable.vcxproj create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/projects/Win/vc141/gtest-list-lazy.vcxproj create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/projects/Win/vc141/gtest-list-michael.vcxproj create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/projects/Win/vc141/gtest-map-feldman.vcxproj create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/projects/Win/vc141/gtest-map-michael-iterable.vcxproj create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/projects/Win/vc141/gtest-map-michael-lazy.vcxproj create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/projects/Win/vc141/gtest-map-michael.vcxproj create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/projects/Win/vc141/gtest-map-skip.vcxproj create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/projects/Win/vc141/gtest-map-split-iterable.vcxproj create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/projects/Win/vc141/gtest-map-split-lazy.vcxproj create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/projects/Win/vc141/gtest-map-split-michael.vcxproj create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/projects/Win/vc141/gtest-misc.vcxproj create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/projects/Win/vc141/gtest-misc.vcxproj.filters create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/projects/Win/vc141/gtest-pqueue.vcxproj create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/projects/Win/vc141/gtest-pqueue.vcxproj.filters create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/projects/Win/vc141/gtest-queue.vcxproj create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/projects/Win/vc141/gtest-queue.vcxproj.filters create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/projects/Win/vc141/gtest-set-feldman.vcxproj create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/projects/Win/vc141/gtest-set-michael-iterable.vcxproj create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/projects/Win/vc141/gtest-set-michael-lazy.vcxproj create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/projects/Win/vc141/gtest-set-michael.vcxproj create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/projects/Win/vc141/gtest-set-skip.vcxproj create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/projects/Win/vc141/gtest-set-split-iterable.vcxproj create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/projects/Win/vc141/gtest-set-split-lazy.vcxproj create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/projects/Win/vc141/gtest-set-split-michael.vcxproj create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/projects/Win/vc141/gtest-stack.vcxproj create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/projects/Win/vc141/gtest-stack.vcxproj.filters create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/projects/Win/vc141/gtest-striped-map-boost.vcxproj create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/projects/Win/vc141/gtest-striped-map-cuckoo.vcxproj create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/projects/Win/vc141/gtest-striped-map-std.vcxproj create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/projects/Win/vc141/gtest-striped-set-boost.vcxproj create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/projects/Win/vc141/gtest-striped-set-cuckoo.vcxproj create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/projects/Win/vc141/gtest-striped-set-std.vcxproj create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/projects/Win/vc141/gtest-tree-bronson.vcxproj create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/projects/Win/vc141/gtest-tree-ellen.vcxproj create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/projects/Win/vc141/gtest-tree-ellen.vcxproj.filters create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/projects/Win/vc141/stress-framework.vcxproj create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/projects/Win/vc141/stress-framework.vcxproj.filters create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/projects/Win/vc141/stress-freelist.vcxproj create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/projects/Win/vc141/stress-map-del3.vcxproj create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/projects/Win/vc141/stress-map-delodd.vcxproj create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/projects/Win/vc141/stress-map-find_string.vcxproj create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/projects/Win/vc141/stress-map-insdel-func.vcxproj create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/projects/Win/vc141/stress-map-insdel-item-int.vcxproj create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/projects/Win/vc141/stress-map-insdel-string.vcxproj create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/projects/Win/vc141/stress-map-insdelfind.vcxproj create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/projects/Win/vc141/stress-map-iter-erase.vcxproj create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/projects/Win/vc141/stress-map-minmax.vcxproj create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/projects/Win/vc141/stress-pqueue.vcxproj create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/projects/Win/vc141/stress-pqueue.vcxproj.filters create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/projects/Win/vc141/stress-queue-bounded.vcxproj create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/projects/Win/vc141/stress-queue-pop.vcxproj create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/projects/Win/vc141/stress-queue-push.vcxproj create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/projects/Win/vc141/stress-queue-pushpop.vcxproj create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/projects/Win/vc141/stress-queue-random.vcxproj create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/projects/Win/vc141/stress-set-del3.vcxproj create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/projects/Win/vc141/stress-set-delodd.vcxproj create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/projects/Win/vc141/stress-set-insdel_func.vcxproj create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/projects/Win/vc141/stress-set-insdel_string.vcxproj create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/projects/Win/vc141/stress-set-insdelfind.vcxproj create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/projects/Win/vc141/stress-set-iter-erase.vcxproj create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/projects/Win/vc141/stress-set-iteration.vcxproj create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/projects/Win/vc141/stress-spsc-queue.vcxproj create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/projects/Win/vc141/stress-stack.vcxproj create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/projects/Win/vc141/stress-stack.vcxproj.filters create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/readme.md create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/src/dhp.cpp create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/src/dllmain.cpp create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/src/hp.cpp create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/src/init.cpp create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/src/thread_data.cpp create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/src/topology_hpux.cpp create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/src/topology_linux.cpp create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/src/topology_osx.cpp create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/src/urcu_gp.cpp create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/src/urcu_sh.cpp create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/test/CMakeLists.txt create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/test/include/cds_test/check_size.h create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/test/include/cds_test/city.h create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/test/include/cds_test/citycrc.h create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/test/include/cds_test/ext_byteswap.h create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/test/include/cds_test/ext_gtest.h create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/test/include/cds_test/fc_hevy_value.h create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/test/include/cds_test/fixture.h create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/test/include/cds_test/hash_func.h create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/test/include/cds_test/stat_bronson_avltree_out.h create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/test/include/cds_test/stat_cuckoo_out.h create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/test/include/cds_test/stat_dhp_out.h create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/test/include/cds_test/stat_ellenbintree_out.h create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/test/include/cds_test/stat_feldman_hashset_out.h create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/test/include/cds_test/stat_flat_combining_out.h create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/test/include/cds_test/stat_hp_out.h create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/test/include/cds_test/stat_iterable_list_out.h create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/test/include/cds_test/stat_lazy_list_out.h create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/test/include/cds_test/stat_michael_list_out.h create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/test/include/cds_test/stat_skiplist_out.h create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/test/include/cds_test/stat_splitlist_out.h create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/test/include/cds_test/stat_sync_monitor_out.h create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/test/include/cds_test/stress_test.h create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/test/include/cds_test/thread.h create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/test/stress/CMakeLists.txt create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/test/stress/data/split.pl create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/test/stress/data/test-debug-gccfarm.conf create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/test/stress/data/test-debug.conf create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/test/stress/data/test-express-gccfarm.conf create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/test/stress/data/test-express-x86.conf create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/test/stress/data/test-express.conf create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/test/stress/data/test-gccfarm.conf create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/test/stress/data/test.conf create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/test/stress/data/text.txt create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/test/stress/framework/city.cpp create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/test/stress/framework/city.h create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/test/stress/framework/citycrc.h create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/test/stress/framework/config.cpp create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/test/stress/framework/ellen_bintree_update_desc_pool.cpp create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/test/stress/framework/ellen_bintree_update_desc_pool.h create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/test/stress/framework/stress_test.cpp create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/test/stress/freelist/CMakeLists.txt create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/test/stress/freelist/put_get.cpp create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/test/stress/freelist/put_get_single.cpp create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/test/stress/lock/win32_lock.h create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/test/stress/main.cpp create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/test/stress/map/CMakeLists.txt create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/test/stress/map/del3/CMakeLists.txt create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/test/stress/map/del3/map_del3.cpp create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/test/stress/map/del3/map_del3.h create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/test/stress/map/del3/map_del3_bronsonavltree.cpp create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/test/stress/map/del3/map_del3_cuckoo.cpp create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/test/stress/map/del3/map_del3_ellentree.cpp create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/test/stress/map/del3/map_del3_feldman_hashmap.cpp create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/test/stress/map/del3/map_del3_michael.cpp create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/test/stress/map/del3/map_del3_skip.cpp create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/test/stress/map/del3/map_del3_split.cpp create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/test/stress/map/delodd/CMakeLists.txt create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/test/stress/map/delodd/map_delodd.cpp create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/test/stress/map/delodd/map_delodd.h create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/test/stress/map/delodd/map_delodd_bronsonavltree.cpp create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/test/stress/map/delodd/map_delodd_cuckoo.cpp create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/test/stress/map/delodd/map_delodd_ellentree.cpp create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/test/stress/map/delodd/map_delodd_feldman_hashmap.cpp create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/test/stress/map/delodd/map_delodd_michael.cpp create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/test/stress/map/delodd/map_delodd_skip.cpp create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/test/stress/map/delodd/map_delodd_split.cpp create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/test/stress/map/find_string/CMakeLists.txt create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/test/stress/map/find_string/map_find_string.cpp create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/test/stress/map/find_string/map_find_string.h create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/test/stress/map/find_string/map_find_string_bronsonavltree.cpp create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/test/stress/map/find_string/map_find_string_cuckoo.cpp create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/test/stress/map/find_string/map_find_string_ellentree.cpp create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/test/stress/map/find_string/map_find_string_feldman_hashset.cpp create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/test/stress/map/find_string/map_find_string_michael.cpp create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/test/stress/map/find_string/map_find_string_skip.cpp create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/test/stress/map/find_string/map_find_string_split.cpp create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/test/stress/map/find_string/map_find_string_std.cpp create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/test/stress/map/find_string/map_find_string_striped.cpp create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/test/stress/map/insdel_func/CMakeLists.txt create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/test/stress/map/insdel_func/map_insdel_func.cpp create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/test/stress/map/insdel_func/map_insdel_func.h create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/test/stress/map/insdel_func/map_insdel_func_bronsonavltree.cpp create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/test/stress/map/insdel_func/map_insdel_func_cuckoo.cpp create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/test/stress/map/insdel_func/map_insdel_func_ellentree.cpp create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/test/stress/map/insdel_func/map_insdel_func_feldman_hashset.cpp create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/test/stress/map/insdel_func/map_insdel_func_michael.cpp create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/test/stress/map/insdel_func/map_insdel_func_skip.cpp create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/test/stress/map/insdel_func/map_insdel_func_split.cpp create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/test/stress/map/insdel_func/map_insdel_func_striped.cpp create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/test/stress/map/insdel_item_int/CMakeLists.txt create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/test/stress/map/insdel_item_int/map_insdel_item_int.cpp create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/test/stress/map/insdel_item_int/map_insdel_item_int.h create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/test/stress/map/insdel_item_int/map_insdel_item_int_bronsonavltree.cpp create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/test/stress/map/insdel_item_int/map_insdel_item_int_cuckoo.cpp create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/test/stress/map/insdel_item_int/map_insdel_item_int_ellentree.cpp create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/test/stress/map/insdel_item_int/map_insdel_item_int_feldman_hashset.cpp create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/test/stress/map/insdel_item_int/map_insdel_item_int_michael.cpp create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/test/stress/map/insdel_item_int/map_insdel_item_int_skip.cpp create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/test/stress/map/insdel_item_int/map_insdel_item_int_split.cpp create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/test/stress/map/insdel_item_int/map_insdel_item_int_striped.cpp create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/test/stress/map/insdel_string/CMakeLists.txt create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/test/stress/map/insdel_string/map_insdel_string.cpp create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/test/stress/map/insdel_string/map_insdel_string.h create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/test/stress/map/insdel_string/map_insdel_string_bronsonavltree.cpp create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/test/stress/map/insdel_string/map_insdel_string_cuckoo.cpp create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/test/stress/map/insdel_string/map_insdel_string_ellentree.cpp create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/test/stress/map/insdel_string/map_insdel_string_feldman_hashset.cpp create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/test/stress/map/insdel_string/map_insdel_string_michael.cpp create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/test/stress/map/insdel_string/map_insdel_string_skip.cpp create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/test/stress/map/insdel_string/map_insdel_string_split.cpp create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/test/stress/map/insdel_string/map_insdel_string_std.cpp create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/test/stress/map/insdel_string/map_insdel_string_striped.cpp create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/test/stress/map/insdelfind/CMakeLists.txt create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/test/stress/map/insdelfind/map_insdelfind.cpp create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/test/stress/map/insdelfind/map_insdelfind.h create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/test/stress/map/insdelfind/map_insdelfind_bronsonavltree.cpp create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/test/stress/map/insdelfind/map_insdelfind_cuckoo.cpp create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/test/stress/map/insdelfind/map_insdelfind_ellentree_hp.cpp create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/test/stress/map/insdelfind/map_insdelfind_ellentree_rcu.cpp create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/test/stress/map/insdelfind/map_insdelfind_feldman_hashset_hp.cpp create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/test/stress/map/insdelfind/map_insdelfind_feldman_hashset_rcu.cpp create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/test/stress/map/insdelfind/map_insdelfind_michael_hp.cpp create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/test/stress/map/insdelfind/map_insdelfind_michael_rcu.cpp create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/test/stress/map/insdelfind/map_insdelfind_skip_hp.cpp create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/test/stress/map/insdelfind/map_insdelfind_skip_rcu.cpp create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/test/stress/map/insdelfind/map_insdelfind_split_hp.cpp create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/test/stress/map/insdelfind/map_insdelfind_split_rcu.cpp create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/test/stress/map/insdelfind/map_insdelfind_std.cpp create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/test/stress/map/insdelfind/map_insdelfind_striped.cpp create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/test/stress/map/iter_erase/CMakeLists.txt create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/test/stress/map/iter_erase/map_iter_erase.cpp create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/test/stress/map/iter_erase/map_iter_erase.h create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/test/stress/map/iter_erase/map_iter_erase_feldman_hashmap.cpp create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/test/stress/map/iter_erase/map_iter_erase_michael.cpp create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/test/stress/map/iter_erase/map_iter_erase_split.cpp create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/test/stress/map/map_type.h create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/test/stress/map/map_type_bronson_avltree.h create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/test/stress/map/map_type_cuckoo.h create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/test/stress/map/map_type_ellen_bintree.h create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/test/stress/map/map_type_feldman_hashmap.h create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/test/stress/map/map_type_iterable_list.h create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/test/stress/map/map_type_lazy_list.h create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/test/stress/map/map_type_michael.h create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/test/stress/map/map_type_michael_list.h create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/test/stress/map/map_type_skip_list.h create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/test/stress/map/map_type_split_list.h create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/test/stress/map/map_type_std.h create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/test/stress/map/map_type_striped.h create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/test/stress/map/minmax/CMakeLists.txt create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/test/stress/map/minmax/map_minmax.cpp create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/test/stress/map/minmax/map_minmax.h create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/test/stress/map/minmax/map_minmax_bronsonavltree.cpp create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/test/stress/map/minmax/map_minmax_ellentree.cpp create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/test/stress/map/minmax/map_minmax_skip.cpp create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/test/stress/pqueue/CMakeLists.txt create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/test/stress/pqueue/item.h create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/test/stress/pqueue/pop.cpp create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/test/stress/pqueue/pqueue_type.h create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/test/stress/pqueue/push.cpp create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/test/stress/pqueue/push_pop.cpp create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/test/stress/queue/CMakeLists.txt create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/test/stress/queue/bounded_queue_fulness.cpp create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/test/stress/queue/intrusive_push_pop.cpp create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/test/stress/queue/intrusive_queue_type.h create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/test/stress/queue/pop.cpp create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/test/stress/queue/print_stat.h create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/test/stress/queue/push.cpp create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/test/stress/queue/push_pop.cpp create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/test/stress/queue/queue_type.h create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/test/stress/queue/random.cpp create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/test/stress/queue/spsc_buffer.cpp create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/test/stress/queue/spsc_queue.cpp create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/test/stress/queue/std_queue.h create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/test/stress/set/CMakeLists.txt create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/test/stress/set/del3/CMakeLists.txt create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/test/stress/set/del3/set_del3.cpp create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/test/stress/set/del3/set_del3.h create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/test/stress/set/del3/set_del3_cuckoo.cpp create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/test/stress/set/del3/set_del3_ellentree.cpp create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/test/stress/set/del3/set_del3_feldman_hashset.cpp create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/test/stress/set/del3/set_del3_michael.cpp create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/test/stress/set/del3/set_del3_skip.cpp create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/test/stress/set/del3/set_del3_split.cpp create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/test/stress/set/delodd/CMakeLists.txt create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/test/stress/set/delodd/set_delodd.cpp create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/test/stress/set/delodd/set_delodd.h create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/test/stress/set/delodd/set_delodd_cuckoo.cpp create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/test/stress/set/delodd/set_delodd_ellentree.cpp create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/test/stress/set/delodd/set_delodd_feldman_hashset.cpp create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/test/stress/set/delodd/set_delodd_michael.cpp create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/test/stress/set/delodd/set_delodd_skip.cpp create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/test/stress/set/delodd/set_delodd_split.cpp create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/test/stress/set/insdel_find/CMakeLists.txt create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/test/stress/set/insdel_find/set_insdelfind.cpp create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/test/stress/set/insdel_find/set_insdelfind.h create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/test/stress/set/insdel_find/set_insdelfind_cuckoo.cpp create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/test/stress/set/insdel_find/set_insdelfind_ellentree_hp.cpp create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/test/stress/set/insdel_find/set_insdelfind_ellentree_rcu.cpp create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/test/stress/set/insdel_find/set_insdelfind_feldman_hashset_hp.cpp create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/test/stress/set/insdel_find/set_insdelfind_feldman_hashset_rcu.cpp create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/test/stress/set/insdel_find/set_insdelfind_michael_hp.cpp create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/test/stress/set/insdel_find/set_insdelfind_michael_rcu.cpp create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/test/stress/set/insdel_find/set_insdelfind_skip_hp.cpp create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/test/stress/set/insdel_find/set_insdelfind_skip_rcu.cpp create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/test/stress/set/insdel_find/set_insdelfind_split_hp.cpp create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/test/stress/set/insdel_find/set_insdelfind_split_rcu.cpp create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/test/stress/set/insdel_find/set_insdelfind_std.cpp create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/test/stress/set/insdel_find/set_insdelfind_striped.cpp create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/test/stress/set/insdel_func/CMakeLists.txt create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/test/stress/set/insdel_func/set_insdel_func.cpp create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/test/stress/set/insdel_func/set_insdel_func.h create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/test/stress/set/insdel_func/set_insdel_func_cuckoo.cpp create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/test/stress/set/insdel_func/set_insdel_func_ellentree.cpp create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/test/stress/set/insdel_func/set_insdel_func_feldman_hashset.cpp create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/test/stress/set/insdel_func/set_insdel_func_michael.cpp create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/test/stress/set/insdel_func/set_insdel_func_skip.cpp create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/test/stress/set/insdel_func/set_insdel_func_split.cpp create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/test/stress/set/insdel_func/set_insdel_func_striped.cpp create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/test/stress/set/insdel_string/CMakeLists.txt create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/test/stress/set/insdel_string/set_insdel_string.cpp create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/test/stress/set/insdel_string/set_insdel_string.h create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/test/stress/set/insdel_string/set_insdel_string_cuckoo.cpp create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/test/stress/set/insdel_string/set_insdel_string_ellentree.cpp create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/test/stress/set/insdel_string/set_insdel_string_feldman_hashset.cpp create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/test/stress/set/insdel_string/set_insdel_string_michael.cpp create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/test/stress/set/insdel_string/set_insdel_string_skip.cpp create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/test/stress/set/insdel_string/set_insdel_string_split.cpp create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/test/stress/set/insdel_string/set_insdel_string_std.cpp create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/test/stress/set/insdel_string/set_insdel_string_striped.cpp create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/test/stress/set/iter_erase/CMakeLists.txt create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/test/stress/set/iter_erase/set_iter_erase.cpp create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/test/stress/set/iter_erase/set_iter_erase.h create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/test/stress/set/iter_erase/set_iter_erase_feldman_hashset.cpp create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/test/stress/set/iter_erase/set_iter_erase_michael.cpp create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/test/stress/set/iter_erase/set_iter_erase_split.cpp create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/test/stress/set/iteration/CMakeLists.txt create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/test/stress/set/iteration/set_iteration.cpp create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/test/stress/set/iteration/set_iteration.h create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/test/stress/set/iteration/set_iteration_feldman_hashset.cpp create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/test/stress/set/iteration/set_iteration_michael.cpp create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/test/stress/set/iteration/set_iteration_split.cpp create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/test/stress/set/set_type.h create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/test/stress/set/set_type_cuckoo.h create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/test/stress/set/set_type_ellen_bintree.h create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/test/stress/set/set_type_feldman_hashset.h create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/test/stress/set/set_type_iterable_list.h create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/test/stress/set/set_type_lazy_list.h create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/test/stress/set/set_type_michael.h create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/test/stress/set/set_type_michael_list.h create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/test/stress/set/set_type_skip_list.h create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/test/stress/set/set_type_split_list.h create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/test/stress/set/set_type_std.h create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/test/stress/set/set_type_striped.h create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/test/stress/stack/CMakeLists.txt create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/test/stress/stack/intrusive_push_pop.cpp create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/test/stress/stack/intrusive_push_pop_fcstack.cpp create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/test/stress/stack/intrusive_stack_push_pop.h create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/test/stress/stack/intrusive_stack_type.h create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/test/stress/stack/push.cpp create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/test/stress/stack/push_pop.cpp create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/test/stress/stack/stack_type.h create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/test/unit/CMakeLists.txt create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/test/unit/deque/CMakeLists.txt create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/test/unit/deque/fcdeque.cpp create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/test/unit/intrusive-list/CMakeLists.txt create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/test/unit/intrusive-list/intrusive_iterable_dhp.cpp create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/test/unit/intrusive-list/intrusive_iterable_hp.cpp create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/test/unit/intrusive-list/intrusive_lazy_dhp.cpp create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/test/unit/intrusive-list/intrusive_lazy_hp.cpp create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/test/unit/intrusive-list/intrusive_lazy_nogc.cpp create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/test/unit/intrusive-list/intrusive_lazy_rcu_gpb.cpp create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/test/unit/intrusive-list/intrusive_lazy_rcu_gpi.cpp create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/test/unit/intrusive-list/intrusive_lazy_rcu_gpt.cpp create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/test/unit/intrusive-list/intrusive_lazy_rcu_shb.cpp create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/test/unit/intrusive-list/intrusive_michael_dhp.cpp create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/test/unit/intrusive-list/intrusive_michael_hp.cpp create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/test/unit/intrusive-list/intrusive_michael_nogc.cpp create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/test/unit/intrusive-list/intrusive_michael_rcu_gpb.cpp create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/test/unit/intrusive-list/intrusive_michael_rcu_gpi.cpp create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/test/unit/intrusive-list/intrusive_michael_rcu_gpt.cpp create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/test/unit/intrusive-list/intrusive_michael_rcu_shb.cpp create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/test/unit/intrusive-list/test_intrusive_iterable_list.h create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/test/unit/intrusive-list/test_intrusive_iterable_list_hp.h create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/test/unit/intrusive-list/test_intrusive_lazy_rcu.h create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/test/unit/intrusive-list/test_intrusive_list.h create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/test/unit/intrusive-list/test_intrusive_list_hp.h create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/test/unit/intrusive-list/test_intrusive_list_nogc.h create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/test/unit/intrusive-list/test_intrusive_list_rcu.h create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/test/unit/intrusive-list/test_intrusive_michael_rcu.h create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/test/unit/intrusive-set/CMakeLists.txt create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/test/unit/intrusive-set/intrusive_feldman_hashset_dhp.cpp create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/test/unit/intrusive-set/intrusive_feldman_hashset_hp.cpp create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/test/unit/intrusive-set/intrusive_feldman_hashset_rcu_gpb.cpp create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/test/unit/intrusive-set/intrusive_feldman_hashset_rcu_gpi.cpp create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/test/unit/intrusive-set/intrusive_feldman_hashset_rcu_gpt.cpp create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/test/unit/intrusive-set/intrusive_feldman_hashset_rcu_shb.cpp create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/test/unit/intrusive-set/intrusive_michael_iterable_dhp.cpp create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/test/unit/intrusive-set/intrusive_michael_iterable_hp.cpp create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/test/unit/intrusive-set/intrusive_michael_lazy_dhp.cpp create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/test/unit/intrusive-set/intrusive_michael_lazy_hp.cpp create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/test/unit/intrusive-set/intrusive_michael_lazy_nogc.cpp create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/test/unit/intrusive-set/intrusive_michael_lazy_rcu_gpb.cpp create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/test/unit/intrusive-set/intrusive_michael_lazy_rcu_gpi.cpp create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/test/unit/intrusive-set/intrusive_michael_lazy_rcu_gpt.cpp create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/test/unit/intrusive-set/intrusive_michael_lazy_rcu_shb.cpp create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/test/unit/intrusive-set/intrusive_michael_michael_dhp.cpp create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/test/unit/intrusive-set/intrusive_michael_michael_hp.cpp create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/test/unit/intrusive-set/intrusive_michael_michael_nogc.cpp create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/test/unit/intrusive-set/intrusive_michael_michael_rcu_gpb.cpp create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/test/unit/intrusive-set/intrusive_michael_michael_rcu_gpi.cpp create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/test/unit/intrusive-set/intrusive_michael_michael_rcu_gpt.cpp create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/test/unit/intrusive-set/intrusive_michael_michael_rcu_shb.cpp create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/test/unit/intrusive-set/intrusive_skiplist_dhp.cpp create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/test/unit/intrusive-set/intrusive_skiplist_hp.cpp create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/test/unit/intrusive-set/intrusive_skiplist_nogc.cpp create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/test/unit/intrusive-set/intrusive_skiplist_rcu_gpb.cpp create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/test/unit/intrusive-set/intrusive_skiplist_rcu_gpi.cpp create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/test/unit/intrusive-set/intrusive_skiplist_rcu_gpt.cpp create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/test/unit/intrusive-set/intrusive_skiplist_rcu_shb.cpp create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/test/unit/intrusive-set/intrusive_split_iterable_dhp.cpp create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/test/unit/intrusive-set/intrusive_split_iterable_hp.cpp create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/test/unit/intrusive-set/intrusive_split_lazy_dhp.cpp create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/test/unit/intrusive-set/intrusive_split_lazy_hp.cpp create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/test/unit/intrusive-set/intrusive_split_lazy_nogc.cpp create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/test/unit/intrusive-set/intrusive_split_lazy_rcu_gpb.cpp create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/test/unit/intrusive-set/intrusive_split_lazy_rcu_gpi.cpp create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/test/unit/intrusive-set/intrusive_split_lazy_rcu_gpt.cpp create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/test/unit/intrusive-set/intrusive_split_lazy_rcu_shb.cpp create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/test/unit/intrusive-set/intrusive_split_michael_dhp.cpp create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/test/unit/intrusive-set/intrusive_split_michael_hp.cpp create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/test/unit/intrusive-set/intrusive_split_michael_nogc.cpp create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/test/unit/intrusive-set/intrusive_split_michael_rcu_gpb.cpp create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/test/unit/intrusive-set/intrusive_split_michael_rcu_gpi.cpp create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/test/unit/intrusive-set/intrusive_split_michael_rcu_gpt.cpp create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/test/unit/intrusive-set/intrusive_split_michael_rcu_shb.cpp create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/test/unit/intrusive-set/test_intrusive_feldman_hashset.h create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/test/unit/intrusive-set/test_intrusive_feldman_hashset_hp.h create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/test/unit/intrusive-set/test_intrusive_feldman_hashset_rcu.h create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/test/unit/intrusive-set/test_intrusive_michael_iterable.h create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/test/unit/intrusive-set/test_intrusive_michael_iterable_hp.h create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/test/unit/intrusive-set/test_intrusive_michael_lazy_rcu.h create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/test/unit/intrusive-set/test_intrusive_michael_michael_rcu.h create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/test/unit/intrusive-set/test_intrusive_set.h create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/test/unit/intrusive-set/test_intrusive_set_hp.h create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/test/unit/intrusive-set/test_intrusive_set_nogc.h create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/test/unit/intrusive-set/test_intrusive_set_rcu.h create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/test/unit/intrusive-set/test_intrusive_skiplist_rcu.h create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/test/unit/intrusive-set/test_intrusive_split_iterable_set.h create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/test/unit/intrusive-set/test_intrusive_split_iterable_set_hp.h create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/test/unit/intrusive-set/test_intrusive_split_lazy_rcu.h create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/test/unit/intrusive-set/test_intrusive_split_michael_rcu.h create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/test/unit/list/CMakeLists.txt create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/test/unit/list/iterable_dhp.cpp create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/test/unit/list/iterable_hp.cpp create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/test/unit/list/kv_iterable_dhp.cpp create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/test/unit/list/kv_iterable_hp.cpp create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/test/unit/list/kv_lazy_dhp.cpp create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/test/unit/list/kv_lazy_hp.cpp create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/test/unit/list/kv_lazy_nogc.cpp create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/test/unit/list/kv_lazy_rcu_gpb.cpp create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/test/unit/list/kv_lazy_rcu_gpi.cpp create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/test/unit/list/kv_lazy_rcu_gpt.cpp create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/test/unit/list/kv_lazy_rcu_shb.cpp create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/test/unit/list/kv_michael_dhp.cpp create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/test/unit/list/kv_michael_hp.cpp create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/test/unit/list/kv_michael_nogc.cpp create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/test/unit/list/kv_michael_rcu_gpb.cpp create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/test/unit/list/kv_michael_rcu_gpi.cpp create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/test/unit/list/kv_michael_rcu_gpt.cpp create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/test/unit/list/kv_michael_rcu_shb.cpp create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/test/unit/list/lazy_dhp.cpp create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/test/unit/list/lazy_hp.cpp create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/test/unit/list/lazy_nogc.cpp create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/test/unit/list/lazy_rcu_gpb.cpp create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/test/unit/list/lazy_rcu_gpi.cpp create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/test/unit/list/lazy_rcu_gpt.cpp create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/test/unit/list/lazy_rcu_shb.cpp create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/test/unit/list/michael_dhp.cpp create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/test/unit/list/michael_hp.cpp create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/test/unit/list/michael_nogc.cpp create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/test/unit/list/michael_rcu_gpb.cpp create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/test/unit/list/michael_rcu_gpi.cpp create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/test/unit/list/michael_rcu_gpt.cpp create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/test/unit/list/michael_rcu_shb.cpp create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/test/unit/list/test_iterable_list.h create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/test/unit/list/test_iterable_list_hp.h create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/test/unit/list/test_kv_iterable_list.h create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/test/unit/list/test_kv_iterable_list_hp.h create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/test/unit/list/test_kv_lazy_rcu.h create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/test/unit/list/test_kv_list.h create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/test/unit/list/test_kv_list_hp.h create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/test/unit/list/test_kv_list_nogc.h create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/test/unit/list/test_kv_list_rcu.h create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/test/unit/list/test_kv_michael_rcu.h create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/test/unit/list/test_lazy_rcu.h create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/test/unit/list/test_list.h create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/test/unit/list/test_list_hp.h create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/test/unit/list/test_list_nogc.h create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/test/unit/list/test_list_rcu.h create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/test/unit/list/test_michael_rcu.h create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/test/unit/main.cpp create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/test/unit/map/CMakeLists.txt create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/test/unit/map/feldman_hashmap_dhp.cpp create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/test/unit/map/feldman_hashmap_hp.cpp create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/test/unit/map/feldman_hashset_rcu_gpb.cpp create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/test/unit/map/feldman_hashset_rcu_gpi.cpp create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/test/unit/map/feldman_hashset_rcu_gpt.cpp create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/test/unit/map/feldman_hashset_rcu_shb.cpp create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/test/unit/map/michael_iterable_dhp.cpp create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/test/unit/map/michael_iterable_hp.cpp create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/test/unit/map/michael_lazy_dhp.cpp create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/test/unit/map/michael_lazy_hp.cpp create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/test/unit/map/michael_lazy_nogc.cpp create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/test/unit/map/michael_lazy_rcu_gpb.cpp create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/test/unit/map/michael_lazy_rcu_gpi.cpp create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/test/unit/map/michael_lazy_rcu_gpt.cpp create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/test/unit/map/michael_lazy_rcu_shb.cpp create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/test/unit/map/michael_michael_dhp.cpp create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/test/unit/map/michael_michael_hp.cpp create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/test/unit/map/michael_michael_nogc.cpp create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/test/unit/map/michael_michael_rcu_gpb.cpp create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/test/unit/map/michael_michael_rcu_gpi.cpp create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/test/unit/map/michael_michael_rcu_gpt.cpp create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/test/unit/map/michael_michael_rcu_shb.cpp create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/test/unit/map/skiplist_dhp.cpp create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/test/unit/map/skiplist_hp.cpp create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/test/unit/map/skiplist_hp_inl.h create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/test/unit/map/skiplist_nogc.cpp create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/test/unit/map/skiplist_rcu_gpb.cpp create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/test/unit/map/skiplist_rcu_gpi.cpp create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/test/unit/map/skiplist_rcu_gpt.cpp create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/test/unit/map/skiplist_rcu_shb.cpp create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/test/unit/map/split_iterable_dhp.cpp create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/test/unit/map/split_iterable_hp.cpp create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/test/unit/map/split_lazy_dhp.cpp create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/test/unit/map/split_lazy_hp.cpp create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/test/unit/map/split_lazy_nogc.cpp create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/test/unit/map/split_lazy_rcu_gpb.cpp create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/test/unit/map/split_lazy_rcu_gpi.cpp create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/test/unit/map/split_lazy_rcu_gpt.cpp create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/test/unit/map/split_lazy_rcu_shb.cpp create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/test/unit/map/split_michael_dhp.cpp create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/test/unit/map/split_michael_hp.cpp create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/test/unit/map/split_michael_nogc.cpp create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/test/unit/map/split_michael_rcu_gpb.cpp create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/test/unit/map/split_michael_rcu_gpi.cpp create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/test/unit/map/split_michael_rcu_gpt.cpp create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/test/unit/map/split_michael_rcu_shb.cpp create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/test/unit/map/test_feldman_hashmap.h create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/test/unit/map/test_feldman_hashmap_hp.h create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/test/unit/map/test_feldman_hashmap_rcu.h create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/test/unit/map/test_map.h create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/test/unit/map/test_map_data.h create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/test/unit/map/test_map_hp.h create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/test/unit/map/test_map_nogc.h create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/test/unit/map/test_map_rcu.h create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/test/unit/map/test_michael_iterable.h create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/test/unit/map/test_michael_iterable_hp.h create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/test/unit/map/test_michael_lazy_rcu.h create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/test/unit/map/test_michael_michael_rcu.h create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/test/unit/map/test_skiplist_hp.h create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/test/unit/map/test_skiplist_rcu.h create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/test/unit/map/test_split_lazy_rcu.h create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/test/unit/map/test_split_michael_rcu.h create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/test/unit/misc/CMakeLists.txt create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/test/unit/misc/asan_errors.cpp create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/test/unit/misc/bit_reversal.cpp create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/test/unit/misc/bitop.cpp create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/test/unit/misc/cxx11_atomic_class.cpp create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/test/unit/misc/cxx11_atomic_func.cpp create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/test/unit/misc/cxx11_convert_memory_order.h create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/test/unit/misc/find_option.cpp create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/test/unit/misc/hash_tuple.cpp create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/test/unit/misc/permutation_generator.cpp create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/test/unit/misc/split_bitstring.cpp create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/test/unit/pqueue/CMakeLists.txt create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/test/unit/pqueue/fcpqueue_boost_stable_vector.cpp create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/test/unit/pqueue/fcpqueue_deque.cpp create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/test/unit/pqueue/fcpqueue_vector.cpp create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/test/unit/pqueue/intrusive_mspqueue.cpp create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/test/unit/pqueue/mspqueue.cpp create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/test/unit/pqueue/test_data.h create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/test/unit/pqueue/test_fcpqueue.h create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/test/unit/queue/CMakeLists.txt create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/test/unit/queue/basket_queue_dhp.cpp create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/test/unit/queue/basket_queue_hp.cpp create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/test/unit/queue/fcqueue.cpp create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/test/unit/queue/intrusive_basket_queue_dhp.cpp create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/test/unit/queue/intrusive_basket_queue_hp.cpp create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/test/unit/queue/intrusive_fcqueue.cpp create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/test/unit/queue/intrusive_moirqueue_dhp.cpp create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/test/unit/queue/intrusive_moirqueue_hp.cpp create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/test/unit/queue/intrusive_msqueue_dhp.cpp create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/test/unit/queue/intrusive_msqueue_hp.cpp create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/test/unit/queue/intrusive_optqueue_dhp.cpp create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/test/unit/queue/intrusive_optqueue_hp.cpp create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/test/unit/queue/intrusive_segmented_queue_dhp.cpp create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/test/unit/queue/intrusive_segmented_queue_hp.cpp create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/test/unit/queue/intrusive_vyukov_queue.cpp create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/test/unit/queue/moirqueue_dhp.cpp create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/test/unit/queue/moirqueue_hp.cpp create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/test/unit/queue/msqueue_dhp.cpp create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/test/unit/queue/msqueue_hp.cpp create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/test/unit/queue/optimistic_queue_dhp.cpp create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/test/unit/queue/optimistic_queue_hp.cpp create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/test/unit/queue/rwqueue.cpp create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/test/unit/queue/segmented_queue_dhp.cpp create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/test/unit/queue/segmented_queue_hp.cpp create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/test/unit/queue/test_bounded_queue.h create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/test/unit/queue/test_generic_queue.h create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/test/unit/queue/test_intrusive_bounded_queue.h create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/test/unit/queue/test_intrusive_msqueue.h create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/test/unit/queue/test_intrusive_segmented_queue.h create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/test/unit/queue/test_segmented_queue.h create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/test/unit/queue/vyukov_mpmc_queue.cpp create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/test/unit/queue/weak_ringbuffer.cpp create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/test/unit/set/CMakeLists.txt create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/test/unit/set/feldman_hashset_dhp.cpp create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/test/unit/set/feldman_hashset_hp.cpp create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/test/unit/set/feldman_hashset_rcu_gpb.cpp create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/test/unit/set/feldman_hashset_rcu_gpi.cpp create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/test/unit/set/feldman_hashset_rcu_gpt.cpp create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/test/unit/set/feldman_hashset_rcu_shb.cpp create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/test/unit/set/michael_iterable_dhp.cpp create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/test/unit/set/michael_iterable_hp.cpp create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/test/unit/set/michael_lazy_dhp.cpp create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/test/unit/set/michael_lazy_hp.cpp create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/test/unit/set/michael_lazy_nogc.cpp create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/test/unit/set/michael_lazy_rcu_gpb.cpp create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/test/unit/set/michael_lazy_rcu_gpi.cpp create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/test/unit/set/michael_lazy_rcu_gpt.cpp create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/test/unit/set/michael_lazy_rcu_shb.cpp create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/test/unit/set/michael_michael_dhp.cpp create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/test/unit/set/michael_michael_hp.cpp create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/test/unit/set/michael_michael_nogc.cpp create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/test/unit/set/michael_michael_rcu_gpb.cpp create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/test/unit/set/michael_michael_rcu_gpi.cpp create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/test/unit/set/michael_michael_rcu_gpt.cpp create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/test/unit/set/michael_michael_rcu_shb.cpp create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/test/unit/set/skiplist_dhp.cpp create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/test/unit/set/skiplist_hp.cpp create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/test/unit/set/skiplist_hp_inl.h create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/test/unit/set/skiplist_nogc.cpp create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/test/unit/set/skiplist_rcu_gpb.cpp create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/test/unit/set/skiplist_rcu_gpi.cpp create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/test/unit/set/skiplist_rcu_gpt.cpp create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/test/unit/set/skiplist_rcu_shb.cpp create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/test/unit/set/split_iterable_dhp.cpp create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/test/unit/set/split_iterable_hp.cpp create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/test/unit/set/split_lazy_dhp.cpp create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/test/unit/set/split_lazy_hp.cpp create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/test/unit/set/split_lazy_nogc.cpp create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/test/unit/set/split_lazy_rcu_gpb.cpp create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/test/unit/set/split_lazy_rcu_gpi.cpp create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/test/unit/set/split_lazy_rcu_gpt.cpp create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/test/unit/set/split_lazy_rcu_shb.cpp create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/test/unit/set/split_michael_dhp.cpp create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/test/unit/set/split_michael_hp.cpp create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/test/unit/set/split_michael_nogc.cpp create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/test/unit/set/split_michael_rcu_gpb.cpp create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/test/unit/set/split_michael_rcu_gpi.cpp create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/test/unit/set/split_michael_rcu_gpt.cpp create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/test/unit/set/split_michael_rcu_shb.cpp create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/test/unit/set/test_feldman_hashset.h create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/test/unit/set/test_feldman_hashset_hp.h create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/test/unit/set/test_feldman_hashset_rcu.h create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/test/unit/set/test_michael_iterable.h create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/test/unit/set/test_michael_iterable_hp.h create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/test/unit/set/test_michael_lazy_rcu.h create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/test/unit/set/test_michael_michael_rcu.h create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/test/unit/set/test_ordered_set_hp.h create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/test/unit/set/test_set.h create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/test/unit/set/test_set_data.h create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/test/unit/set/test_set_hp.h create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/test/unit/set/test_set_nogc.h create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/test/unit/set/test_set_rcu.h create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/test/unit/set/test_skiplist_rcu.h create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/test/unit/set/test_split_iterable.h create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/test/unit/set/test_split_iterable_hp.h create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/test/unit/set/test_split_lazy_rcu.h create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/test/unit/set/test_split_michael_rcu.h create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/test/unit/stack/CMakeLists.txt create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/test/unit/stack/fcstack.cpp create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/test/unit/stack/intrusive_fcstack.cpp create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/test/unit/stack/intrusive_treiber_stack_dhp.cpp create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/test/unit/stack/intrusive_treiber_stack_hp.cpp create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/test/unit/stack/test_intrusive_treiber_stack.h create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/test/unit/stack/test_treiber_stack.h create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/test/unit/stack/treiber_stack_dhp.cpp create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/test/unit/stack/treiber_stack_hp.cpp create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/test/unit/striped-map/CMakeLists.txt create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/test/unit/striped-map/cuckoo_map.cpp create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/test/unit/striped-map/map_boost_flat_map.cpp create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/test/unit/striped-map/map_boost_list.cpp create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/test/unit/striped-map/map_boost_map.cpp create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/test/unit/striped-map/map_boost_slist.cpp create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/test/unit/striped-map/map_boost_unordered_map.cpp create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/test/unit/striped-map/map_std_list.cpp create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/test/unit/striped-map/map_std_map.cpp create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/test/unit/striped-map/map_std_unordered_map.cpp create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/test/unit/striped-map/test_map.h create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/test/unit/striped-map/test_map_data.h create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/test/unit/striped-map/test_striped_map.h create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/test/unit/striped-set/CMakeLists.txt create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/test/unit/striped-set/cuckoo_set.cpp create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/test/unit/striped-set/intrusive_boost_avl_set.cpp create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/test/unit/striped-set/intrusive_boost_list.cpp create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/test/unit/striped-set/intrusive_boost_set.cpp create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/test/unit/striped-set/intrusive_boost_sg_set.cpp create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/test/unit/striped-set/intrusive_boost_slist.cpp create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/test/unit/striped-set/intrusive_boost_splay_set.cpp create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/test/unit/striped-set/intrusive_boost_treap_set.cpp create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/test/unit/striped-set/intrusive_boost_unordered_set.cpp create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/test/unit/striped-set/intrusive_cuckoo_set.cpp create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/test/unit/striped-set/set_boost_flatset.cpp create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/test/unit/striped-set/set_boost_list.cpp create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/test/unit/striped-set/set_boost_set.cpp create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/test/unit/striped-set/set_boost_slist.cpp create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/test/unit/striped-set/set_boost_stable_vector.cpp create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/test/unit/striped-set/set_boost_unordered_set.cpp create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/test/unit/striped-set/set_boost_vector.cpp create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/test/unit/striped-set/set_std_list.cpp create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/test/unit/striped-set/set_std_set.cpp create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/test/unit/striped-set/set_std_unordered_set.cpp create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/test/unit/striped-set/set_std_vector.cpp create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/test/unit/striped-set/test_intrusive_set.h create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/test/unit/striped-set/test_intrusive_striped_set.h create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/test/unit/striped-set/test_set.h create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/test/unit/striped-set/test_striped_set.h create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/test/unit/tree/CMakeLists.txt create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/test/unit/tree/bronson_avltree_map_ptr_rcu_gpb.cpp create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/test/unit/tree/bronson_avltree_map_ptr_rcu_gpi.cpp create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/test/unit/tree/bronson_avltree_map_ptr_rcu_gpt.cpp create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/test/unit/tree/bronson_avltree_map_ptr_rcu_shb.cpp create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/test/unit/tree/bronson_avltree_map_rcu_gpb.cpp create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/test/unit/tree/bronson_avltree_map_rcu_gpi.cpp create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/test/unit/tree/bronson_avltree_map_rcu_gpt.cpp create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/test/unit/tree/bronson_avltree_map_rcu_shb.cpp create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/test/unit/tree/ellen_bintree_map_dhp.cpp create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/test/unit/tree/ellen_bintree_map_hp.cpp create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/test/unit/tree/ellen_bintree_map_rcu_gpb.cpp create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/test/unit/tree/ellen_bintree_map_rcu_gpi.cpp create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/test/unit/tree/ellen_bintree_map_rcu_gpt.cpp create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/test/unit/tree/ellen_bintree_map_rcu_shb.cpp create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/test/unit/tree/ellen_bintree_set_dhp.cpp create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/test/unit/tree/ellen_bintree_set_hp.cpp create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/test/unit/tree/ellen_bintree_set_rcu_gpb.cpp create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/test/unit/tree/ellen_bintree_set_rcu_gpi.cpp create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/test/unit/tree/ellen_bintree_set_rcu_gpt.cpp create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/test/unit/tree/ellen_bintree_set_rcu_shb.cpp create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/test/unit/tree/ellen_bintree_update_desc_pool.cpp create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/test/unit/tree/intrusive_ellenbintree_dhp.cpp create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/test/unit/tree/intrusive_ellenbintree_hp.cpp create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/test/unit/tree/intrusive_ellenbintree_rcu_gpb.cpp create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/test/unit/tree/intrusive_ellenbintree_rcu_gpi.cpp create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/test/unit/tree/intrusive_ellenbintree_rcu_gpt.cpp create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/test/unit/tree/intrusive_ellenbintree_rcu_shb.cpp create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/test/unit/tree/test_bronson_avltree_map.h create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/test/unit/tree/test_bronson_avltree_map_ptr.h create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/test/unit/tree/test_ellen_bintree_map_rcu.h create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/test/unit/tree/test_ellen_bintree_set_rcu.h create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/test/unit/tree/test_ellen_bintree_update_desc_pool.h create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/test/unit/tree/test_intrusive_ellen_bintree_rcu.h create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/test/unit/tree/test_intrusive_tree.h create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/test/unit/tree/test_intrusive_tree_hp.h create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/test/unit/tree/test_intrusive_tree_rcu.h create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/test/unit/tree/test_tree_map.h create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/test/unit/tree/test_tree_map_data.h create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/test/unit/tree/test_tree_map_hp.h create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/test/unit/tree/test_tree_map_rcu.h create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/test/unit/tree/test_tree_set.h create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/test/unit/tree/test_tree_set_hp.h create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/test/unit/tree/test_tree_set_rcu.h create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/thanks create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/tools/brush_cds.pl create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/tools/make_distrib.bat create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/tools/make_distrib.pl create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/tools/make_docs.bat create mode 100644 gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/tools/tsan-suppression create mode 100644 gdax-orderbook-hpp/demo/dependencies/rapidjson-1.1.0/.gitattributes create mode 100644 gdax-orderbook-hpp/demo/dependencies/rapidjson-1.1.0/.gitignore create mode 100644 gdax-orderbook-hpp/demo/dependencies/rapidjson-1.1.0/.gitmodules create mode 100644 gdax-orderbook-hpp/demo/dependencies/rapidjson-1.1.0/.travis.yml create mode 100644 gdax-orderbook-hpp/demo/dependencies/rapidjson-1.1.0/CHANGELOG.md create mode 100644 gdax-orderbook-hpp/demo/dependencies/rapidjson-1.1.0/CMakeLists.txt create mode 100644 gdax-orderbook-hpp/demo/dependencies/rapidjson-1.1.0/CMakeModules/FindGTestSrc.cmake create mode 100644 gdax-orderbook-hpp/demo/dependencies/rapidjson-1.1.0/RapidJSON.pc.in create mode 100644 gdax-orderbook-hpp/demo/dependencies/rapidjson-1.1.0/RapidJSONConfig.cmake.in create mode 100644 gdax-orderbook-hpp/demo/dependencies/rapidjson-1.1.0/RapidJSONConfigVersion.cmake.in create mode 100644 gdax-orderbook-hpp/demo/dependencies/rapidjson-1.1.0/appveyor.yml create mode 100644 gdax-orderbook-hpp/demo/dependencies/rapidjson-1.1.0/bin/data/glossary.json create mode 100644 gdax-orderbook-hpp/demo/dependencies/rapidjson-1.1.0/bin/data/menu.json create mode 100644 gdax-orderbook-hpp/demo/dependencies/rapidjson-1.1.0/bin/data/readme.txt create mode 100644 gdax-orderbook-hpp/demo/dependencies/rapidjson-1.1.0/bin/data/sample.json create mode 100644 gdax-orderbook-hpp/demo/dependencies/rapidjson-1.1.0/bin/data/webapp.json create mode 100644 gdax-orderbook-hpp/demo/dependencies/rapidjson-1.1.0/bin/data/widget.json create mode 100644 gdax-orderbook-hpp/demo/dependencies/rapidjson-1.1.0/bin/draft-04/schema create mode 100644 gdax-orderbook-hpp/demo/dependencies/rapidjson-1.1.0/bin/encodings/utf16be.json create mode 100644 gdax-orderbook-hpp/demo/dependencies/rapidjson-1.1.0/bin/encodings/utf16bebom.json create mode 100644 gdax-orderbook-hpp/demo/dependencies/rapidjson-1.1.0/bin/encodings/utf16le.json create mode 100644 gdax-orderbook-hpp/demo/dependencies/rapidjson-1.1.0/bin/encodings/utf16lebom.json create mode 100644 gdax-orderbook-hpp/demo/dependencies/rapidjson-1.1.0/bin/encodings/utf32be.json create mode 100644 gdax-orderbook-hpp/demo/dependencies/rapidjson-1.1.0/bin/encodings/utf32bebom.json create mode 100644 gdax-orderbook-hpp/demo/dependencies/rapidjson-1.1.0/bin/encodings/utf32le.json create mode 100644 gdax-orderbook-hpp/demo/dependencies/rapidjson-1.1.0/bin/encodings/utf32lebom.json create mode 100644 gdax-orderbook-hpp/demo/dependencies/rapidjson-1.1.0/bin/encodings/utf8.json create mode 100644 gdax-orderbook-hpp/demo/dependencies/rapidjson-1.1.0/bin/encodings/utf8bom.json create mode 100644 gdax-orderbook-hpp/demo/dependencies/rapidjson-1.1.0/bin/jsonchecker/fail1.json create mode 100644 gdax-orderbook-hpp/demo/dependencies/rapidjson-1.1.0/bin/jsonchecker/fail10.json create mode 100644 gdax-orderbook-hpp/demo/dependencies/rapidjson-1.1.0/bin/jsonchecker/fail11.json create mode 100644 gdax-orderbook-hpp/demo/dependencies/rapidjson-1.1.0/bin/jsonchecker/fail12.json create mode 100644 gdax-orderbook-hpp/demo/dependencies/rapidjson-1.1.0/bin/jsonchecker/fail13.json create mode 100644 gdax-orderbook-hpp/demo/dependencies/rapidjson-1.1.0/bin/jsonchecker/fail14.json create mode 100644 gdax-orderbook-hpp/demo/dependencies/rapidjson-1.1.0/bin/jsonchecker/fail15.json create mode 100644 gdax-orderbook-hpp/demo/dependencies/rapidjson-1.1.0/bin/jsonchecker/fail16.json create mode 100644 gdax-orderbook-hpp/demo/dependencies/rapidjson-1.1.0/bin/jsonchecker/fail17.json create mode 100644 gdax-orderbook-hpp/demo/dependencies/rapidjson-1.1.0/bin/jsonchecker/fail18.json create mode 100644 gdax-orderbook-hpp/demo/dependencies/rapidjson-1.1.0/bin/jsonchecker/fail19.json create mode 100644 gdax-orderbook-hpp/demo/dependencies/rapidjson-1.1.0/bin/jsonchecker/fail2.json create mode 100644 gdax-orderbook-hpp/demo/dependencies/rapidjson-1.1.0/bin/jsonchecker/fail20.json create mode 100644 gdax-orderbook-hpp/demo/dependencies/rapidjson-1.1.0/bin/jsonchecker/fail21.json create mode 100644 gdax-orderbook-hpp/demo/dependencies/rapidjson-1.1.0/bin/jsonchecker/fail22.json create mode 100644 gdax-orderbook-hpp/demo/dependencies/rapidjson-1.1.0/bin/jsonchecker/fail23.json create mode 100644 gdax-orderbook-hpp/demo/dependencies/rapidjson-1.1.0/bin/jsonchecker/fail24.json create mode 100644 gdax-orderbook-hpp/demo/dependencies/rapidjson-1.1.0/bin/jsonchecker/fail25.json create mode 100644 gdax-orderbook-hpp/demo/dependencies/rapidjson-1.1.0/bin/jsonchecker/fail26.json create mode 100644 gdax-orderbook-hpp/demo/dependencies/rapidjson-1.1.0/bin/jsonchecker/fail27.json create mode 100644 gdax-orderbook-hpp/demo/dependencies/rapidjson-1.1.0/bin/jsonchecker/fail28.json create mode 100644 gdax-orderbook-hpp/demo/dependencies/rapidjson-1.1.0/bin/jsonchecker/fail29.json create mode 100644 gdax-orderbook-hpp/demo/dependencies/rapidjson-1.1.0/bin/jsonchecker/fail3.json create mode 100644 gdax-orderbook-hpp/demo/dependencies/rapidjson-1.1.0/bin/jsonchecker/fail30.json create mode 100644 gdax-orderbook-hpp/demo/dependencies/rapidjson-1.1.0/bin/jsonchecker/fail31.json create mode 100644 gdax-orderbook-hpp/demo/dependencies/rapidjson-1.1.0/bin/jsonchecker/fail32.json create mode 100644 gdax-orderbook-hpp/demo/dependencies/rapidjson-1.1.0/bin/jsonchecker/fail33.json create mode 100644 gdax-orderbook-hpp/demo/dependencies/rapidjson-1.1.0/bin/jsonchecker/fail4.json create mode 100644 gdax-orderbook-hpp/demo/dependencies/rapidjson-1.1.0/bin/jsonchecker/fail5.json create mode 100644 gdax-orderbook-hpp/demo/dependencies/rapidjson-1.1.0/bin/jsonchecker/fail6.json create mode 100644 gdax-orderbook-hpp/demo/dependencies/rapidjson-1.1.0/bin/jsonchecker/fail7.json create mode 100644 gdax-orderbook-hpp/demo/dependencies/rapidjson-1.1.0/bin/jsonchecker/fail8.json create mode 100644 gdax-orderbook-hpp/demo/dependencies/rapidjson-1.1.0/bin/jsonchecker/fail9.json create mode 100644 gdax-orderbook-hpp/demo/dependencies/rapidjson-1.1.0/bin/jsonchecker/pass1.json create mode 100644 gdax-orderbook-hpp/demo/dependencies/rapidjson-1.1.0/bin/jsonchecker/pass2.json create mode 100644 gdax-orderbook-hpp/demo/dependencies/rapidjson-1.1.0/bin/jsonchecker/pass3.json create mode 100644 gdax-orderbook-hpp/demo/dependencies/rapidjson-1.1.0/bin/jsonchecker/readme.txt create mode 100644 gdax-orderbook-hpp/demo/dependencies/rapidjson-1.1.0/bin/jsonschema/.gitignore create mode 100644 gdax-orderbook-hpp/demo/dependencies/rapidjson-1.1.0/bin/jsonschema/.travis.yml create mode 100644 gdax-orderbook-hpp/demo/dependencies/rapidjson-1.1.0/bin/jsonschema/LICENSE create mode 100644 gdax-orderbook-hpp/demo/dependencies/rapidjson-1.1.0/bin/jsonschema/README.md create mode 100755 gdax-orderbook-hpp/demo/dependencies/rapidjson-1.1.0/bin/jsonschema/bin/jsonschema_suite create mode 100644 gdax-orderbook-hpp/demo/dependencies/rapidjson-1.1.0/bin/jsonschema/remotes/.DS_Store create mode 100644 gdax-orderbook-hpp/demo/dependencies/rapidjson-1.1.0/bin/jsonschema/remotes/folder/folderInteger.json create mode 100644 gdax-orderbook-hpp/demo/dependencies/rapidjson-1.1.0/bin/jsonschema/remotes/integer.json create mode 100644 gdax-orderbook-hpp/demo/dependencies/rapidjson-1.1.0/bin/jsonschema/remotes/subSchemas.json create mode 100644 gdax-orderbook-hpp/demo/dependencies/rapidjson-1.1.0/bin/jsonschema/tests/.DS_Store create mode 100644 gdax-orderbook-hpp/demo/dependencies/rapidjson-1.1.0/bin/jsonschema/tests/draft3/additionalItems.json create mode 100644 gdax-orderbook-hpp/demo/dependencies/rapidjson-1.1.0/bin/jsonschema/tests/draft3/additionalProperties.json create mode 100644 gdax-orderbook-hpp/demo/dependencies/rapidjson-1.1.0/bin/jsonschema/tests/draft3/default.json create mode 100644 gdax-orderbook-hpp/demo/dependencies/rapidjson-1.1.0/bin/jsonschema/tests/draft3/dependencies.json create mode 100644 gdax-orderbook-hpp/demo/dependencies/rapidjson-1.1.0/bin/jsonschema/tests/draft3/disallow.json create mode 100644 gdax-orderbook-hpp/demo/dependencies/rapidjson-1.1.0/bin/jsonschema/tests/draft3/divisibleBy.json create mode 100644 gdax-orderbook-hpp/demo/dependencies/rapidjson-1.1.0/bin/jsonschema/tests/draft3/enum.json create mode 100644 gdax-orderbook-hpp/demo/dependencies/rapidjson-1.1.0/bin/jsonschema/tests/draft3/extends.json create mode 100644 gdax-orderbook-hpp/demo/dependencies/rapidjson-1.1.0/bin/jsonschema/tests/draft3/items.json create mode 100644 gdax-orderbook-hpp/demo/dependencies/rapidjson-1.1.0/bin/jsonschema/tests/draft3/maxItems.json create mode 100644 gdax-orderbook-hpp/demo/dependencies/rapidjson-1.1.0/bin/jsonschema/tests/draft3/maxLength.json create mode 100644 gdax-orderbook-hpp/demo/dependencies/rapidjson-1.1.0/bin/jsonschema/tests/draft3/maximum.json create mode 100644 gdax-orderbook-hpp/demo/dependencies/rapidjson-1.1.0/bin/jsonschema/tests/draft3/minItems.json create mode 100644 gdax-orderbook-hpp/demo/dependencies/rapidjson-1.1.0/bin/jsonschema/tests/draft3/minLength.json create mode 100644 gdax-orderbook-hpp/demo/dependencies/rapidjson-1.1.0/bin/jsonschema/tests/draft3/minimum.json create mode 100644 gdax-orderbook-hpp/demo/dependencies/rapidjson-1.1.0/bin/jsonschema/tests/draft3/optional/bignum.json create mode 100644 gdax-orderbook-hpp/demo/dependencies/rapidjson-1.1.0/bin/jsonschema/tests/draft3/optional/format.json create mode 100644 gdax-orderbook-hpp/demo/dependencies/rapidjson-1.1.0/bin/jsonschema/tests/draft3/optional/jsregex.json create mode 100644 gdax-orderbook-hpp/demo/dependencies/rapidjson-1.1.0/bin/jsonschema/tests/draft3/optional/zeroTerminatedFloats.json create mode 100644 gdax-orderbook-hpp/demo/dependencies/rapidjson-1.1.0/bin/jsonschema/tests/draft3/pattern.json create mode 100644 gdax-orderbook-hpp/demo/dependencies/rapidjson-1.1.0/bin/jsonschema/tests/draft3/patternProperties.json create mode 100644 gdax-orderbook-hpp/demo/dependencies/rapidjson-1.1.0/bin/jsonschema/tests/draft3/properties.json create mode 100644 gdax-orderbook-hpp/demo/dependencies/rapidjson-1.1.0/bin/jsonschema/tests/draft3/ref.json create mode 100644 gdax-orderbook-hpp/demo/dependencies/rapidjson-1.1.0/bin/jsonschema/tests/draft3/refRemote.json create mode 100644 gdax-orderbook-hpp/demo/dependencies/rapidjson-1.1.0/bin/jsonschema/tests/draft3/required.json create mode 100644 gdax-orderbook-hpp/demo/dependencies/rapidjson-1.1.0/bin/jsonschema/tests/draft3/type.json create mode 100644 gdax-orderbook-hpp/demo/dependencies/rapidjson-1.1.0/bin/jsonschema/tests/draft3/uniqueItems.json create mode 100644 gdax-orderbook-hpp/demo/dependencies/rapidjson-1.1.0/bin/jsonschema/tests/draft4/.DS_Store create mode 100644 gdax-orderbook-hpp/demo/dependencies/rapidjson-1.1.0/bin/jsonschema/tests/draft4/additionalItems.json create mode 100644 gdax-orderbook-hpp/demo/dependencies/rapidjson-1.1.0/bin/jsonschema/tests/draft4/additionalProperties.json create mode 100644 gdax-orderbook-hpp/demo/dependencies/rapidjson-1.1.0/bin/jsonschema/tests/draft4/allOf.json create mode 100644 gdax-orderbook-hpp/demo/dependencies/rapidjson-1.1.0/bin/jsonschema/tests/draft4/anyOf.json create mode 100644 gdax-orderbook-hpp/demo/dependencies/rapidjson-1.1.0/bin/jsonschema/tests/draft4/default.json create mode 100644 gdax-orderbook-hpp/demo/dependencies/rapidjson-1.1.0/bin/jsonschema/tests/draft4/definitions.json create mode 100644 gdax-orderbook-hpp/demo/dependencies/rapidjson-1.1.0/bin/jsonschema/tests/draft4/dependencies.json create mode 100644 gdax-orderbook-hpp/demo/dependencies/rapidjson-1.1.0/bin/jsonschema/tests/draft4/enum.json create mode 100644 gdax-orderbook-hpp/demo/dependencies/rapidjson-1.1.0/bin/jsonschema/tests/draft4/items.json create mode 100644 gdax-orderbook-hpp/demo/dependencies/rapidjson-1.1.0/bin/jsonschema/tests/draft4/maxItems.json create mode 100644 gdax-orderbook-hpp/demo/dependencies/rapidjson-1.1.0/bin/jsonschema/tests/draft4/maxLength.json create mode 100644 gdax-orderbook-hpp/demo/dependencies/rapidjson-1.1.0/bin/jsonschema/tests/draft4/maxProperties.json create mode 100644 gdax-orderbook-hpp/demo/dependencies/rapidjson-1.1.0/bin/jsonschema/tests/draft4/maximum.json create mode 100644 gdax-orderbook-hpp/demo/dependencies/rapidjson-1.1.0/bin/jsonschema/tests/draft4/minItems.json create mode 100644 gdax-orderbook-hpp/demo/dependencies/rapidjson-1.1.0/bin/jsonschema/tests/draft4/minLength.json create mode 100644 gdax-orderbook-hpp/demo/dependencies/rapidjson-1.1.0/bin/jsonschema/tests/draft4/minProperties.json create mode 100644 gdax-orderbook-hpp/demo/dependencies/rapidjson-1.1.0/bin/jsonschema/tests/draft4/minimum.json create mode 100644 gdax-orderbook-hpp/demo/dependencies/rapidjson-1.1.0/bin/jsonschema/tests/draft4/multipleOf.json create mode 100644 gdax-orderbook-hpp/demo/dependencies/rapidjson-1.1.0/bin/jsonschema/tests/draft4/not.json create mode 100644 gdax-orderbook-hpp/demo/dependencies/rapidjson-1.1.0/bin/jsonschema/tests/draft4/oneOf.json create mode 100644 gdax-orderbook-hpp/demo/dependencies/rapidjson-1.1.0/bin/jsonschema/tests/draft4/optional/bignum.json create mode 100644 gdax-orderbook-hpp/demo/dependencies/rapidjson-1.1.0/bin/jsonschema/tests/draft4/optional/format.json create mode 100644 gdax-orderbook-hpp/demo/dependencies/rapidjson-1.1.0/bin/jsonschema/tests/draft4/optional/zeroTerminatedFloats.json create mode 100644 gdax-orderbook-hpp/demo/dependencies/rapidjson-1.1.0/bin/jsonschema/tests/draft4/pattern.json create mode 100644 gdax-orderbook-hpp/demo/dependencies/rapidjson-1.1.0/bin/jsonschema/tests/draft4/patternProperties.json create mode 100644 gdax-orderbook-hpp/demo/dependencies/rapidjson-1.1.0/bin/jsonschema/tests/draft4/properties.json create mode 100644 gdax-orderbook-hpp/demo/dependencies/rapidjson-1.1.0/bin/jsonschema/tests/draft4/ref.json create mode 100644 gdax-orderbook-hpp/demo/dependencies/rapidjson-1.1.0/bin/jsonschema/tests/draft4/refRemote.json create mode 100644 gdax-orderbook-hpp/demo/dependencies/rapidjson-1.1.0/bin/jsonschema/tests/draft4/required.json create mode 100644 gdax-orderbook-hpp/demo/dependencies/rapidjson-1.1.0/bin/jsonschema/tests/draft4/type.json create mode 100644 gdax-orderbook-hpp/demo/dependencies/rapidjson-1.1.0/bin/jsonschema/tests/draft4/uniqueItems.json create mode 100644 gdax-orderbook-hpp/demo/dependencies/rapidjson-1.1.0/bin/jsonschema/tox.ini create mode 100755 gdax-orderbook-hpp/demo/dependencies/rapidjson-1.1.0/bin/types/booleans.json create mode 100755 gdax-orderbook-hpp/demo/dependencies/rapidjson-1.1.0/bin/types/floats.json create mode 100755 gdax-orderbook-hpp/demo/dependencies/rapidjson-1.1.0/bin/types/guids.json create mode 100755 gdax-orderbook-hpp/demo/dependencies/rapidjson-1.1.0/bin/types/integers.json create mode 100755 gdax-orderbook-hpp/demo/dependencies/rapidjson-1.1.0/bin/types/mixed.json create mode 100755 gdax-orderbook-hpp/demo/dependencies/rapidjson-1.1.0/bin/types/nulls.json create mode 100755 gdax-orderbook-hpp/demo/dependencies/rapidjson-1.1.0/bin/types/paragraphs.json create mode 100644 gdax-orderbook-hpp/demo/dependencies/rapidjson-1.1.0/bin/types/readme.txt create mode 100644 gdax-orderbook-hpp/demo/dependencies/rapidjson-1.1.0/doc/CMakeLists.txt create mode 100644 gdax-orderbook-hpp/demo/dependencies/rapidjson-1.1.0/doc/Doxyfile.in create mode 100644 gdax-orderbook-hpp/demo/dependencies/rapidjson-1.1.0/doc/Doxyfile.zh-cn.in create mode 100644 gdax-orderbook-hpp/demo/dependencies/rapidjson-1.1.0/doc/diagram/architecture.dot create mode 100644 gdax-orderbook-hpp/demo/dependencies/rapidjson-1.1.0/doc/diagram/architecture.png create mode 100644 gdax-orderbook-hpp/demo/dependencies/rapidjson-1.1.0/doc/diagram/insituparsing.dot create mode 100644 gdax-orderbook-hpp/demo/dependencies/rapidjson-1.1.0/doc/diagram/insituparsing.png create mode 100644 gdax-orderbook-hpp/demo/dependencies/rapidjson-1.1.0/doc/diagram/iterative-parser-states-diagram.dot create mode 100644 gdax-orderbook-hpp/demo/dependencies/rapidjson-1.1.0/doc/diagram/iterative-parser-states-diagram.png create mode 100644 gdax-orderbook-hpp/demo/dependencies/rapidjson-1.1.0/doc/diagram/makefile create mode 100644 gdax-orderbook-hpp/demo/dependencies/rapidjson-1.1.0/doc/diagram/move1.dot create mode 100644 gdax-orderbook-hpp/demo/dependencies/rapidjson-1.1.0/doc/diagram/move1.png create mode 100644 gdax-orderbook-hpp/demo/dependencies/rapidjson-1.1.0/doc/diagram/move2.dot create mode 100644 gdax-orderbook-hpp/demo/dependencies/rapidjson-1.1.0/doc/diagram/move2.png create mode 100644 gdax-orderbook-hpp/demo/dependencies/rapidjson-1.1.0/doc/diagram/move3.dot create mode 100644 gdax-orderbook-hpp/demo/dependencies/rapidjson-1.1.0/doc/diagram/move3.png create mode 100644 gdax-orderbook-hpp/demo/dependencies/rapidjson-1.1.0/doc/diagram/normalparsing.dot create mode 100644 gdax-orderbook-hpp/demo/dependencies/rapidjson-1.1.0/doc/diagram/normalparsing.png create mode 100644 gdax-orderbook-hpp/demo/dependencies/rapidjson-1.1.0/doc/diagram/simpledom.dot create mode 100644 gdax-orderbook-hpp/demo/dependencies/rapidjson-1.1.0/doc/diagram/simpledom.png create mode 100644 gdax-orderbook-hpp/demo/dependencies/rapidjson-1.1.0/doc/diagram/tutorial.dot create mode 100644 gdax-orderbook-hpp/demo/dependencies/rapidjson-1.1.0/doc/diagram/tutorial.png create mode 100644 gdax-orderbook-hpp/demo/dependencies/rapidjson-1.1.0/doc/diagram/utilityclass.dot create mode 100644 gdax-orderbook-hpp/demo/dependencies/rapidjson-1.1.0/doc/diagram/utilityclass.png create mode 100644 gdax-orderbook-hpp/demo/dependencies/rapidjson-1.1.0/doc/dom.md create mode 100644 gdax-orderbook-hpp/demo/dependencies/rapidjson-1.1.0/doc/dom.zh-cn.md create mode 100644 gdax-orderbook-hpp/demo/dependencies/rapidjson-1.1.0/doc/encoding.md create mode 100644 gdax-orderbook-hpp/demo/dependencies/rapidjson-1.1.0/doc/encoding.zh-cn.md create mode 100644 gdax-orderbook-hpp/demo/dependencies/rapidjson-1.1.0/doc/faq.md create mode 100644 gdax-orderbook-hpp/demo/dependencies/rapidjson-1.1.0/doc/faq.zh-cn.md create mode 100644 gdax-orderbook-hpp/demo/dependencies/rapidjson-1.1.0/doc/features.md create mode 100644 gdax-orderbook-hpp/demo/dependencies/rapidjson-1.1.0/doc/features.zh-cn.md create mode 100644 gdax-orderbook-hpp/demo/dependencies/rapidjson-1.1.0/doc/internals.md create mode 100644 gdax-orderbook-hpp/demo/dependencies/rapidjson-1.1.0/doc/logo/rapidjson.png create mode 100644 gdax-orderbook-hpp/demo/dependencies/rapidjson-1.1.0/doc/logo/rapidjson.svg create mode 100644 gdax-orderbook-hpp/demo/dependencies/rapidjson-1.1.0/doc/misc/DoxygenLayout.xml create mode 100644 gdax-orderbook-hpp/demo/dependencies/rapidjson-1.1.0/doc/misc/doxygenextra.css create mode 100644 gdax-orderbook-hpp/demo/dependencies/rapidjson-1.1.0/doc/misc/footer.html create mode 100644 gdax-orderbook-hpp/demo/dependencies/rapidjson-1.1.0/doc/misc/header.html create mode 100644 gdax-orderbook-hpp/demo/dependencies/rapidjson-1.1.0/doc/npm.md create mode 100644 gdax-orderbook-hpp/demo/dependencies/rapidjson-1.1.0/doc/performance.md create mode 100644 gdax-orderbook-hpp/demo/dependencies/rapidjson-1.1.0/doc/performance.zh-cn.md create mode 100644 gdax-orderbook-hpp/demo/dependencies/rapidjson-1.1.0/doc/pointer.md create mode 100644 gdax-orderbook-hpp/demo/dependencies/rapidjson-1.1.0/doc/pointer.zh-cn.md create mode 100644 gdax-orderbook-hpp/demo/dependencies/rapidjson-1.1.0/doc/sax.md create mode 100644 gdax-orderbook-hpp/demo/dependencies/rapidjson-1.1.0/doc/sax.zh-cn.md create mode 100644 gdax-orderbook-hpp/demo/dependencies/rapidjson-1.1.0/doc/schema.md create mode 100644 gdax-orderbook-hpp/demo/dependencies/rapidjson-1.1.0/doc/schema.zh-cn.md create mode 100644 gdax-orderbook-hpp/demo/dependencies/rapidjson-1.1.0/doc/stream.md create mode 100644 gdax-orderbook-hpp/demo/dependencies/rapidjson-1.1.0/doc/stream.zh-cn.md create mode 100644 gdax-orderbook-hpp/demo/dependencies/rapidjson-1.1.0/doc/tutorial.md create mode 100644 gdax-orderbook-hpp/demo/dependencies/rapidjson-1.1.0/doc/tutorial.zh-cn.md create mode 100644 gdax-orderbook-hpp/demo/dependencies/rapidjson-1.1.0/docker/debian/Dockerfile create mode 100644 gdax-orderbook-hpp/demo/dependencies/rapidjson-1.1.0/example/CMakeLists.txt create mode 100644 gdax-orderbook-hpp/demo/dependencies/rapidjson-1.1.0/example/capitalize/capitalize.cpp create mode 100644 gdax-orderbook-hpp/demo/dependencies/rapidjson-1.1.0/example/condense/condense.cpp create mode 100644 gdax-orderbook-hpp/demo/dependencies/rapidjson-1.1.0/example/filterkey/filterkey.cpp create mode 100644 gdax-orderbook-hpp/demo/dependencies/rapidjson-1.1.0/example/filterkeydom/filterkeydom.cpp create mode 100644 gdax-orderbook-hpp/demo/dependencies/rapidjson-1.1.0/example/jsonx/jsonx.cpp create mode 100644 gdax-orderbook-hpp/demo/dependencies/rapidjson-1.1.0/example/messagereader/messagereader.cpp create mode 100644 gdax-orderbook-hpp/demo/dependencies/rapidjson-1.1.0/example/parsebyparts/parsebyparts.cpp create mode 100644 gdax-orderbook-hpp/demo/dependencies/rapidjson-1.1.0/example/pretty/pretty.cpp create mode 100644 gdax-orderbook-hpp/demo/dependencies/rapidjson-1.1.0/example/prettyauto/prettyauto.cpp create mode 100644 gdax-orderbook-hpp/demo/dependencies/rapidjson-1.1.0/example/schemavalidator/schemavalidator.cpp create mode 100644 gdax-orderbook-hpp/demo/dependencies/rapidjson-1.1.0/example/serialize/serialize.cpp create mode 100644 gdax-orderbook-hpp/demo/dependencies/rapidjson-1.1.0/example/simpledom/simpledom.cpp create mode 100644 gdax-orderbook-hpp/demo/dependencies/rapidjson-1.1.0/example/simplereader/simplereader.cpp create mode 100644 gdax-orderbook-hpp/demo/dependencies/rapidjson-1.1.0/example/simplewriter/simplewriter.cpp create mode 100644 gdax-orderbook-hpp/demo/dependencies/rapidjson-1.1.0/example/tutorial/tutorial.cpp create mode 100644 gdax-orderbook-hpp/demo/dependencies/rapidjson-1.1.0/include/rapidjson/allocators.h create mode 100644 gdax-orderbook-hpp/demo/dependencies/rapidjson-1.1.0/include/rapidjson/document.h create mode 100644 gdax-orderbook-hpp/demo/dependencies/rapidjson-1.1.0/include/rapidjson/encodedstream.h create mode 100644 gdax-orderbook-hpp/demo/dependencies/rapidjson-1.1.0/include/rapidjson/encodings.h create mode 100644 gdax-orderbook-hpp/demo/dependencies/rapidjson-1.1.0/include/rapidjson/error/en.h create mode 100644 gdax-orderbook-hpp/demo/dependencies/rapidjson-1.1.0/include/rapidjson/error/error.h create mode 100644 gdax-orderbook-hpp/demo/dependencies/rapidjson-1.1.0/include/rapidjson/filereadstream.h create mode 100644 gdax-orderbook-hpp/demo/dependencies/rapidjson-1.1.0/include/rapidjson/filewritestream.h create mode 100644 gdax-orderbook-hpp/demo/dependencies/rapidjson-1.1.0/include/rapidjson/fwd.h create mode 100644 gdax-orderbook-hpp/demo/dependencies/rapidjson-1.1.0/include/rapidjson/internal/biginteger.h create mode 100644 gdax-orderbook-hpp/demo/dependencies/rapidjson-1.1.0/include/rapidjson/internal/diyfp.h create mode 100644 gdax-orderbook-hpp/demo/dependencies/rapidjson-1.1.0/include/rapidjson/internal/dtoa.h create mode 100644 gdax-orderbook-hpp/demo/dependencies/rapidjson-1.1.0/include/rapidjson/internal/ieee754.h create mode 100644 gdax-orderbook-hpp/demo/dependencies/rapidjson-1.1.0/include/rapidjson/internal/itoa.h create mode 100644 gdax-orderbook-hpp/demo/dependencies/rapidjson-1.1.0/include/rapidjson/internal/meta.h create mode 100644 gdax-orderbook-hpp/demo/dependencies/rapidjson-1.1.0/include/rapidjson/internal/pow10.h create mode 100644 gdax-orderbook-hpp/demo/dependencies/rapidjson-1.1.0/include/rapidjson/internal/regex.h create mode 100644 gdax-orderbook-hpp/demo/dependencies/rapidjson-1.1.0/include/rapidjson/internal/stack.h create mode 100644 gdax-orderbook-hpp/demo/dependencies/rapidjson-1.1.0/include/rapidjson/internal/strfunc.h create mode 100644 gdax-orderbook-hpp/demo/dependencies/rapidjson-1.1.0/include/rapidjson/internal/strtod.h create mode 100644 gdax-orderbook-hpp/demo/dependencies/rapidjson-1.1.0/include/rapidjson/internal/swap.h create mode 100644 gdax-orderbook-hpp/demo/dependencies/rapidjson-1.1.0/include/rapidjson/istreamwrapper.h create mode 100644 gdax-orderbook-hpp/demo/dependencies/rapidjson-1.1.0/include/rapidjson/memorybuffer.h create mode 100644 gdax-orderbook-hpp/demo/dependencies/rapidjson-1.1.0/include/rapidjson/memorystream.h create mode 100644 gdax-orderbook-hpp/demo/dependencies/rapidjson-1.1.0/include/rapidjson/msinttypes/inttypes.h create mode 100644 gdax-orderbook-hpp/demo/dependencies/rapidjson-1.1.0/include/rapidjson/msinttypes/stdint.h create mode 100644 gdax-orderbook-hpp/demo/dependencies/rapidjson-1.1.0/include/rapidjson/ostreamwrapper.h create mode 100644 gdax-orderbook-hpp/demo/dependencies/rapidjson-1.1.0/include/rapidjson/pointer.h create mode 100644 gdax-orderbook-hpp/demo/dependencies/rapidjson-1.1.0/include/rapidjson/prettywriter.h create mode 100644 gdax-orderbook-hpp/demo/dependencies/rapidjson-1.1.0/include/rapidjson/rapidjson.h create mode 100644 gdax-orderbook-hpp/demo/dependencies/rapidjson-1.1.0/include/rapidjson/reader.h create mode 100644 gdax-orderbook-hpp/demo/dependencies/rapidjson-1.1.0/include/rapidjson/schema.h create mode 100644 gdax-orderbook-hpp/demo/dependencies/rapidjson-1.1.0/include/rapidjson/stream.h create mode 100644 gdax-orderbook-hpp/demo/dependencies/rapidjson-1.1.0/include/rapidjson/stringbuffer.h create mode 100644 gdax-orderbook-hpp/demo/dependencies/rapidjson-1.1.0/include/rapidjson/writer.h create mode 100644 gdax-orderbook-hpp/demo/dependencies/rapidjson-1.1.0/include_dirs.js create mode 100644 gdax-orderbook-hpp/demo/dependencies/rapidjson-1.1.0/library.json create mode 100644 gdax-orderbook-hpp/demo/dependencies/rapidjson-1.1.0/license.txt create mode 100644 gdax-orderbook-hpp/demo/dependencies/rapidjson-1.1.0/package.json create mode 100644 gdax-orderbook-hpp/demo/dependencies/rapidjson-1.1.0/rapidjson.autopkg create mode 100644 gdax-orderbook-hpp/demo/dependencies/rapidjson-1.1.0/readme.md create mode 100644 gdax-orderbook-hpp/demo/dependencies/rapidjson-1.1.0/readme.zh-cn.md create mode 100644 gdax-orderbook-hpp/demo/dependencies/rapidjson-1.1.0/test/CMakeLists.txt create mode 100644 gdax-orderbook-hpp/demo/dependencies/rapidjson-1.1.0/test/perftest/CMakeLists.txt create mode 100644 gdax-orderbook-hpp/demo/dependencies/rapidjson-1.1.0/test/perftest/misctest.cpp create mode 100644 gdax-orderbook-hpp/demo/dependencies/rapidjson-1.1.0/test/perftest/perftest.cpp create mode 100644 gdax-orderbook-hpp/demo/dependencies/rapidjson-1.1.0/test/perftest/perftest.h create mode 100644 gdax-orderbook-hpp/demo/dependencies/rapidjson-1.1.0/test/perftest/platformtest.cpp create mode 100644 gdax-orderbook-hpp/demo/dependencies/rapidjson-1.1.0/test/perftest/rapidjsontest.cpp create mode 100644 gdax-orderbook-hpp/demo/dependencies/rapidjson-1.1.0/test/perftest/schematest.cpp create mode 100644 gdax-orderbook-hpp/demo/dependencies/rapidjson-1.1.0/test/unittest/CMakeLists.txt create mode 100644 gdax-orderbook-hpp/demo/dependencies/rapidjson-1.1.0/test/unittest/allocatorstest.cpp create mode 100644 gdax-orderbook-hpp/demo/dependencies/rapidjson-1.1.0/test/unittest/bigintegertest.cpp create mode 100644 gdax-orderbook-hpp/demo/dependencies/rapidjson-1.1.0/test/unittest/documenttest.cpp create mode 100644 gdax-orderbook-hpp/demo/dependencies/rapidjson-1.1.0/test/unittest/dtoatest.cpp create mode 100644 gdax-orderbook-hpp/demo/dependencies/rapidjson-1.1.0/test/unittest/encodedstreamtest.cpp create mode 100644 gdax-orderbook-hpp/demo/dependencies/rapidjson-1.1.0/test/unittest/encodingstest.cpp create mode 100644 gdax-orderbook-hpp/demo/dependencies/rapidjson-1.1.0/test/unittest/filestreamtest.cpp create mode 100644 gdax-orderbook-hpp/demo/dependencies/rapidjson-1.1.0/test/unittest/fwdtest.cpp create mode 100644 gdax-orderbook-hpp/demo/dependencies/rapidjson-1.1.0/test/unittest/istreamwrappertest.cpp create mode 100644 gdax-orderbook-hpp/demo/dependencies/rapidjson-1.1.0/test/unittest/itoatest.cpp create mode 100644 gdax-orderbook-hpp/demo/dependencies/rapidjson-1.1.0/test/unittest/jsoncheckertest.cpp create mode 100644 gdax-orderbook-hpp/demo/dependencies/rapidjson-1.1.0/test/unittest/namespacetest.cpp create mode 100644 gdax-orderbook-hpp/demo/dependencies/rapidjson-1.1.0/test/unittest/ostreamwrappertest.cpp create mode 100644 gdax-orderbook-hpp/demo/dependencies/rapidjson-1.1.0/test/unittest/pointertest.cpp create mode 100644 gdax-orderbook-hpp/demo/dependencies/rapidjson-1.1.0/test/unittest/prettywritertest.cpp create mode 100644 gdax-orderbook-hpp/demo/dependencies/rapidjson-1.1.0/test/unittest/readertest.cpp create mode 100644 gdax-orderbook-hpp/demo/dependencies/rapidjson-1.1.0/test/unittest/regextest.cpp create mode 100644 gdax-orderbook-hpp/demo/dependencies/rapidjson-1.1.0/test/unittest/schematest.cpp create mode 100644 gdax-orderbook-hpp/demo/dependencies/rapidjson-1.1.0/test/unittest/simdtest.cpp create mode 100644 gdax-orderbook-hpp/demo/dependencies/rapidjson-1.1.0/test/unittest/strfunctest.cpp create mode 100644 gdax-orderbook-hpp/demo/dependencies/rapidjson-1.1.0/test/unittest/stringbuffertest.cpp create mode 100644 gdax-orderbook-hpp/demo/dependencies/rapidjson-1.1.0/test/unittest/strtodtest.cpp create mode 100644 gdax-orderbook-hpp/demo/dependencies/rapidjson-1.1.0/test/unittest/unittest.cpp create mode 100644 gdax-orderbook-hpp/demo/dependencies/rapidjson-1.1.0/test/unittest/unittest.h create mode 100644 gdax-orderbook-hpp/demo/dependencies/rapidjson-1.1.0/test/unittest/valuetest.cpp create mode 100644 gdax-orderbook-hpp/demo/dependencies/rapidjson-1.1.0/test/unittest/writertest.cpp create mode 100755 gdax-orderbook-hpp/demo/dependencies/rapidjson-1.1.0/travis-doxygen.sh create mode 100644 gdax-orderbook-hpp/demo/dependencies/websocketpp-0.7.0/.gitattributes create mode 100644 gdax-orderbook-hpp/demo/dependencies/websocketpp-0.7.0/.gitignore create mode 100644 gdax-orderbook-hpp/demo/dependencies/websocketpp-0.7.0/.travis.yml create mode 100644 gdax-orderbook-hpp/demo/dependencies/websocketpp-0.7.0/CMakeLists.txt create mode 100644 gdax-orderbook-hpp/demo/dependencies/websocketpp-0.7.0/COPYING create mode 100644 gdax-orderbook-hpp/demo/dependencies/websocketpp-0.7.0/Doxyfile create mode 100644 gdax-orderbook-hpp/demo/dependencies/websocketpp-0.7.0/SConstruct create mode 100644 gdax-orderbook-hpp/demo/dependencies/websocketpp-0.7.0/changelog.md create mode 100644 gdax-orderbook-hpp/demo/dependencies/websocketpp-0.7.0/cmake/CMakeHelpers.cmake create mode 100644 gdax-orderbook-hpp/demo/dependencies/websocketpp-0.7.0/docs/faq.dox create mode 100644 gdax-orderbook-hpp/demo/dependencies/websocketpp-0.7.0/docs/getting_started.dox create mode 100644 gdax-orderbook-hpp/demo/dependencies/websocketpp-0.7.0/docs/handlers.dox create mode 100644 gdax-orderbook-hpp/demo/dependencies/websocketpp-0.7.0/docs/manual.css create mode 100644 gdax-orderbook-hpp/demo/dependencies/websocketpp-0.7.0/docs/manual.dox create mode 100644 gdax-orderbook-hpp/demo/dependencies/websocketpp-0.7.0/docs/simple_broadcast_server.cpp create mode 100644 gdax-orderbook-hpp/demo/dependencies/websocketpp-0.7.0/docs/simple_count_server_thread.cpp create mode 100644 gdax-orderbook-hpp/demo/dependencies/websocketpp-0.7.0/docs/tutorials.dox create mode 100644 gdax-orderbook-hpp/demo/dependencies/websocketpp-0.7.0/examples/associative_storage/CMakeLists.txt create mode 100644 gdax-orderbook-hpp/demo/dependencies/websocketpp-0.7.0/examples/associative_storage/associative_storage.cpp create mode 100644 gdax-orderbook-hpp/demo/dependencies/websocketpp-0.7.0/examples/broadcast_server/CMakeLists.txt create mode 100644 gdax-orderbook-hpp/demo/dependencies/websocketpp-0.7.0/examples/broadcast_server/SConscript create mode 100644 gdax-orderbook-hpp/demo/dependencies/websocketpp-0.7.0/examples/broadcast_server/broadcast_server.cpp create mode 100644 gdax-orderbook-hpp/demo/dependencies/websocketpp-0.7.0/examples/debug_client/CMakeLists.txt create mode 100644 gdax-orderbook-hpp/demo/dependencies/websocketpp-0.7.0/examples/debug_client/SConscript create mode 100644 gdax-orderbook-hpp/demo/dependencies/websocketpp-0.7.0/examples/debug_client/debug_client.cpp create mode 100644 gdax-orderbook-hpp/demo/dependencies/websocketpp-0.7.0/examples/debug_server/CMakeLists.txt create mode 100644 gdax-orderbook-hpp/demo/dependencies/websocketpp-0.7.0/examples/debug_server/SConscript create mode 100644 gdax-orderbook-hpp/demo/dependencies/websocketpp-0.7.0/examples/debug_server/debug_server.cpp create mode 100644 gdax-orderbook-hpp/demo/dependencies/websocketpp-0.7.0/examples/dev/CMakeLists.txt create mode 100644 gdax-orderbook-hpp/demo/dependencies/websocketpp-0.7.0/examples/dev/SConscript create mode 100644 gdax-orderbook-hpp/demo/dependencies/websocketpp-0.7.0/examples/dev/main.cpp create mode 100644 gdax-orderbook-hpp/demo/dependencies/websocketpp-0.7.0/examples/echo_client/CMakeLists.txt create mode 100644 gdax-orderbook-hpp/demo/dependencies/websocketpp-0.7.0/examples/echo_client/SConscript create mode 100644 gdax-orderbook-hpp/demo/dependencies/websocketpp-0.7.0/examples/echo_client/echo_client.cpp create mode 100644 gdax-orderbook-hpp/demo/dependencies/websocketpp-0.7.0/examples/echo_server/CMakeLists.txt create mode 100644 gdax-orderbook-hpp/demo/dependencies/websocketpp-0.7.0/examples/echo_server/SConscript create mode 100644 gdax-orderbook-hpp/demo/dependencies/websocketpp-0.7.0/examples/echo_server/echo_handler.hpp create mode 100644 gdax-orderbook-hpp/demo/dependencies/websocketpp-0.7.0/examples/echo_server/echo_server.cpp create mode 100644 gdax-orderbook-hpp/demo/dependencies/websocketpp-0.7.0/examples/echo_server_both/CMakeLists.txt create mode 100644 gdax-orderbook-hpp/demo/dependencies/websocketpp-0.7.0/examples/echo_server_both/SConscript create mode 100644 gdax-orderbook-hpp/demo/dependencies/websocketpp-0.7.0/examples/echo_server_both/echo_server_both.cpp create mode 100644 gdax-orderbook-hpp/demo/dependencies/websocketpp-0.7.0/examples/echo_server_both/server.pem create mode 100644 gdax-orderbook-hpp/demo/dependencies/websocketpp-0.7.0/examples/echo_server_tls/CMakeLists.txt create mode 100644 gdax-orderbook-hpp/demo/dependencies/websocketpp-0.7.0/examples/echo_server_tls/SConscript create mode 100644 gdax-orderbook-hpp/demo/dependencies/websocketpp-0.7.0/examples/echo_server_tls/dh.pem create mode 100644 gdax-orderbook-hpp/demo/dependencies/websocketpp-0.7.0/examples/echo_server_tls/echo_server_tls.cpp create mode 100644 gdax-orderbook-hpp/demo/dependencies/websocketpp-0.7.0/examples/echo_server_tls/server.pem create mode 100644 gdax-orderbook-hpp/demo/dependencies/websocketpp-0.7.0/examples/enriched_storage/CMakeLists.txt create mode 100644 gdax-orderbook-hpp/demo/dependencies/websocketpp-0.7.0/examples/enriched_storage/enriched_storage.cpp create mode 100644 gdax-orderbook-hpp/demo/dependencies/websocketpp-0.7.0/examples/external_io_service/CMakeLists.txt create mode 100644 gdax-orderbook-hpp/demo/dependencies/websocketpp-0.7.0/examples/external_io_service/SConscript create mode 100644 gdax-orderbook-hpp/demo/dependencies/websocketpp-0.7.0/examples/external_io_service/external_io_service.cpp create mode 100644 gdax-orderbook-hpp/demo/dependencies/websocketpp-0.7.0/examples/external_io_service/tcp_echo_server.hpp create mode 100644 gdax-orderbook-hpp/demo/dependencies/websocketpp-0.7.0/examples/handler_switch/CMakeLists.txt create mode 100644 gdax-orderbook-hpp/demo/dependencies/websocketpp-0.7.0/examples/handler_switch/handler_switch.cpp create mode 100644 gdax-orderbook-hpp/demo/dependencies/websocketpp-0.7.0/examples/iostream_server/CMakeLists.txt create mode 100644 gdax-orderbook-hpp/demo/dependencies/websocketpp-0.7.0/examples/iostream_server/SConscript create mode 100644 gdax-orderbook-hpp/demo/dependencies/websocketpp-0.7.0/examples/iostream_server/iostream_server.cpp create mode 100644 gdax-orderbook-hpp/demo/dependencies/websocketpp-0.7.0/examples/print_server/CMakeLists.txt create mode 100644 gdax-orderbook-hpp/demo/dependencies/websocketpp-0.7.0/examples/print_server/SConscript create mode 100644 gdax-orderbook-hpp/demo/dependencies/websocketpp-0.7.0/examples/print_server/print_server.cpp create mode 100644 gdax-orderbook-hpp/demo/dependencies/websocketpp-0.7.0/examples/scratch_client/SConscript create mode 100644 gdax-orderbook-hpp/demo/dependencies/websocketpp-0.7.0/examples/scratch_client/scratch_client.cpp create mode 100644 gdax-orderbook-hpp/demo/dependencies/websocketpp-0.7.0/examples/scratch_server/SConscript create mode 100644 gdax-orderbook-hpp/demo/dependencies/websocketpp-0.7.0/examples/scratch_server/scratch_server.cpp create mode 100644 gdax-orderbook-hpp/demo/dependencies/websocketpp-0.7.0/examples/simple_broadcast_server/CMakeLists.txt create mode 100644 gdax-orderbook-hpp/demo/dependencies/websocketpp-0.7.0/examples/simple_broadcast_server/simple_broadcast_server.cpp create mode 100644 gdax-orderbook-hpp/demo/dependencies/websocketpp-0.7.0/examples/sip_client/CMakeLists.txt create mode 100644 gdax-orderbook-hpp/demo/dependencies/websocketpp-0.7.0/examples/sip_client/README.txt create mode 100644 gdax-orderbook-hpp/demo/dependencies/websocketpp-0.7.0/examples/sip_client/SConscript create mode 100644 gdax-orderbook-hpp/demo/dependencies/websocketpp-0.7.0/examples/sip_client/sip_client.cpp create mode 100644 gdax-orderbook-hpp/demo/dependencies/websocketpp-0.7.0/examples/subprotocol_server/CMakeLists.txt create mode 100644 gdax-orderbook-hpp/demo/dependencies/websocketpp-0.7.0/examples/subprotocol_server/SConscript create mode 100644 gdax-orderbook-hpp/demo/dependencies/websocketpp-0.7.0/examples/subprotocol_server/subprotocol_server.cpp create mode 100644 gdax-orderbook-hpp/demo/dependencies/websocketpp-0.7.0/examples/telemetry_client/CMakeLists.txt create mode 100644 gdax-orderbook-hpp/demo/dependencies/websocketpp-0.7.0/examples/telemetry_client/SConscript create mode 100644 gdax-orderbook-hpp/demo/dependencies/websocketpp-0.7.0/examples/telemetry_client/telemetry_client.cpp create mode 100644 gdax-orderbook-hpp/demo/dependencies/websocketpp-0.7.0/examples/telemetry_server/CMakeLists.txt create mode 100644 gdax-orderbook-hpp/demo/dependencies/websocketpp-0.7.0/examples/telemetry_server/SConscript create mode 100644 gdax-orderbook-hpp/demo/dependencies/websocketpp-0.7.0/examples/telemetry_server/index.html create mode 100644 gdax-orderbook-hpp/demo/dependencies/websocketpp-0.7.0/examples/telemetry_server/telemetry_server.cpp create mode 100644 gdax-orderbook-hpp/demo/dependencies/websocketpp-0.7.0/examples/testee_client/CMakeLists.txt create mode 100644 gdax-orderbook-hpp/demo/dependencies/websocketpp-0.7.0/examples/testee_client/SConscript create mode 100644 gdax-orderbook-hpp/demo/dependencies/websocketpp-0.7.0/examples/testee_client/testee_client.cpp create mode 100644 gdax-orderbook-hpp/demo/dependencies/websocketpp-0.7.0/examples/testee_server/CMakeLists.txt create mode 100644 gdax-orderbook-hpp/demo/dependencies/websocketpp-0.7.0/examples/testee_server/SConscript create mode 100644 gdax-orderbook-hpp/demo/dependencies/websocketpp-0.7.0/examples/testee_server/testee_server.cpp create mode 100644 gdax-orderbook-hpp/demo/dependencies/websocketpp-0.7.0/examples/utility_client/CMakeLists.txt create mode 100644 gdax-orderbook-hpp/demo/dependencies/websocketpp-0.7.0/examples/utility_client/SConscript create mode 100644 gdax-orderbook-hpp/demo/dependencies/websocketpp-0.7.0/examples/utility_client/utility_client.cpp create mode 100644 gdax-orderbook-hpp/demo/dependencies/websocketpp-0.7.0/readme.md create mode 100644 gdax-orderbook-hpp/demo/dependencies/websocketpp-0.7.0/roadmap.md create mode 100644 gdax-orderbook-hpp/demo/dependencies/websocketpp-0.7.0/test/connection/CMakeLists.txt create mode 100644 gdax-orderbook-hpp/demo/dependencies/websocketpp-0.7.0/test/connection/SConscript create mode 100644 gdax-orderbook-hpp/demo/dependencies/websocketpp-0.7.0/test/connection/connection.cpp create mode 100644 gdax-orderbook-hpp/demo/dependencies/websocketpp-0.7.0/test/connection/connection_tu2.cpp create mode 100644 gdax-orderbook-hpp/demo/dependencies/websocketpp-0.7.0/test/connection/connection_tu2.hpp create mode 100644 gdax-orderbook-hpp/demo/dependencies/websocketpp-0.7.0/test/endpoint/CMakeLists.txt create mode 100644 gdax-orderbook-hpp/demo/dependencies/websocketpp-0.7.0/test/endpoint/SConscript create mode 100644 gdax-orderbook-hpp/demo/dependencies/websocketpp-0.7.0/test/endpoint/endpoint.cpp create mode 100644 gdax-orderbook-hpp/demo/dependencies/websocketpp-0.7.0/test/extension/CMakeLists.txt create mode 100644 gdax-orderbook-hpp/demo/dependencies/websocketpp-0.7.0/test/extension/SConscript create mode 100644 gdax-orderbook-hpp/demo/dependencies/websocketpp-0.7.0/test/extension/extension.cpp create mode 100644 gdax-orderbook-hpp/demo/dependencies/websocketpp-0.7.0/test/extension/permessage_deflate.cpp create mode 100644 gdax-orderbook-hpp/demo/dependencies/websocketpp-0.7.0/test/http/CMakeLists.txt create mode 100644 gdax-orderbook-hpp/demo/dependencies/websocketpp-0.7.0/test/http/SConscript create mode 100755 gdax-orderbook-hpp/demo/dependencies/websocketpp-0.7.0/test/http/a.out create mode 100644 gdax-orderbook-hpp/demo/dependencies/websocketpp-0.7.0/test/http/parser.cpp create mode 100644 gdax-orderbook-hpp/demo/dependencies/websocketpp-0.7.0/test/http/parser_perf.cpp create mode 100755 gdax-orderbook-hpp/demo/dependencies/websocketpp-0.7.0/test/http/perf.out create mode 100755 gdax-orderbook-hpp/demo/dependencies/websocketpp-0.7.0/test/http/test.out create mode 100644 gdax-orderbook-hpp/demo/dependencies/websocketpp-0.7.0/test/logger/CMakeLists.txt create mode 100644 gdax-orderbook-hpp/demo/dependencies/websocketpp-0.7.0/test/logger/SConscript create mode 100644 gdax-orderbook-hpp/demo/dependencies/websocketpp-0.7.0/test/logger/basic.cpp create mode 100644 gdax-orderbook-hpp/demo/dependencies/websocketpp-0.7.0/test/message_buffer/CMakeLists.txt create mode 100644 gdax-orderbook-hpp/demo/dependencies/websocketpp-0.7.0/test/message_buffer/SConscript create mode 100644 gdax-orderbook-hpp/demo/dependencies/websocketpp-0.7.0/test/message_buffer/alloc.cpp create mode 100644 gdax-orderbook-hpp/demo/dependencies/websocketpp-0.7.0/test/message_buffer/message.cpp create mode 100644 gdax-orderbook-hpp/demo/dependencies/websocketpp-0.7.0/test/message_buffer/pool.cpp create mode 100644 gdax-orderbook-hpp/demo/dependencies/websocketpp-0.7.0/test/processors/CMakeLists.txt create mode 100644 gdax-orderbook-hpp/demo/dependencies/websocketpp-0.7.0/test/processors/SConscript create mode 100644 gdax-orderbook-hpp/demo/dependencies/websocketpp-0.7.0/test/processors/extension_permessage_compress.cpp create mode 100644 gdax-orderbook-hpp/demo/dependencies/websocketpp-0.7.0/test/processors/hybi00.cpp create mode 100644 gdax-orderbook-hpp/demo/dependencies/websocketpp-0.7.0/test/processors/hybi07.cpp create mode 100644 gdax-orderbook-hpp/demo/dependencies/websocketpp-0.7.0/test/processors/hybi08.cpp create mode 100644 gdax-orderbook-hpp/demo/dependencies/websocketpp-0.7.0/test/processors/hybi13.cpp create mode 100644 gdax-orderbook-hpp/demo/dependencies/websocketpp-0.7.0/test/processors/processor.cpp create mode 100644 gdax-orderbook-hpp/demo/dependencies/websocketpp-0.7.0/test/random/CMakeLists.txt create mode 100644 gdax-orderbook-hpp/demo/dependencies/websocketpp-0.7.0/test/random/SConscript create mode 100644 gdax-orderbook-hpp/demo/dependencies/websocketpp-0.7.0/test/random/none.cpp create mode 100644 gdax-orderbook-hpp/demo/dependencies/websocketpp-0.7.0/test/random/random_device.cpp create mode 100644 gdax-orderbook-hpp/demo/dependencies/websocketpp-0.7.0/test/roles/CMakeLists.txt create mode 100644 gdax-orderbook-hpp/demo/dependencies/websocketpp-0.7.0/test/roles/SConscript create mode 100644 gdax-orderbook-hpp/demo/dependencies/websocketpp-0.7.0/test/roles/client.cpp create mode 100644 gdax-orderbook-hpp/demo/dependencies/websocketpp-0.7.0/test/roles/server.cpp create mode 100644 gdax-orderbook-hpp/demo/dependencies/websocketpp-0.7.0/test/transport/CMakeLists.txt create mode 100644 gdax-orderbook-hpp/demo/dependencies/websocketpp-0.7.0/test/transport/SConscript create mode 100644 gdax-orderbook-hpp/demo/dependencies/websocketpp-0.7.0/test/transport/asio/SConscript create mode 100644 gdax-orderbook-hpp/demo/dependencies/websocketpp-0.7.0/test/transport/asio/base.cpp create mode 100644 gdax-orderbook-hpp/demo/dependencies/websocketpp-0.7.0/test/transport/asio/security.cpp create mode 100644 gdax-orderbook-hpp/demo/dependencies/websocketpp-0.7.0/test/transport/asio/timers.cpp create mode 100644 gdax-orderbook-hpp/demo/dependencies/websocketpp-0.7.0/test/transport/hybi_util.cpp create mode 100644 gdax-orderbook-hpp/demo/dependencies/websocketpp-0.7.0/test/transport/integration.cpp create mode 100644 gdax-orderbook-hpp/demo/dependencies/websocketpp-0.7.0/test/transport/iostream/SConscript create mode 100644 gdax-orderbook-hpp/demo/dependencies/websocketpp-0.7.0/test/transport/iostream/base.cpp create mode 100644 gdax-orderbook-hpp/demo/dependencies/websocketpp-0.7.0/test/transport/iostream/connection.cpp create mode 100644 gdax-orderbook-hpp/demo/dependencies/websocketpp-0.7.0/test/transport/iostream/endpoint.cpp create mode 100644 gdax-orderbook-hpp/demo/dependencies/websocketpp-0.7.0/test/utility/CMakeLists.txt create mode 100644 gdax-orderbook-hpp/demo/dependencies/websocketpp-0.7.0/test/utility/SConscript create mode 100644 gdax-orderbook-hpp/demo/dependencies/websocketpp-0.7.0/test/utility/close.cpp create mode 100644 gdax-orderbook-hpp/demo/dependencies/websocketpp-0.7.0/test/utility/error.cpp create mode 100644 gdax-orderbook-hpp/demo/dependencies/websocketpp-0.7.0/test/utility/frame.cpp create mode 100644 gdax-orderbook-hpp/demo/dependencies/websocketpp-0.7.0/test/utility/sha1.cpp create mode 100644 gdax-orderbook-hpp/demo/dependencies/websocketpp-0.7.0/test/utility/uri.cpp create mode 100644 gdax-orderbook-hpp/demo/dependencies/websocketpp-0.7.0/test/utility/utilities.cpp create mode 100644 gdax-orderbook-hpp/demo/dependencies/websocketpp-0.7.0/tutorials/broadcast_tutorial/broadcast_tutorial.md create mode 100644 gdax-orderbook-hpp/demo/dependencies/websocketpp-0.7.0/tutorials/chat_tutorial/chat_tutorial.md create mode 100644 gdax-orderbook-hpp/demo/dependencies/websocketpp-0.7.0/tutorials/utility_client/step1.cpp create mode 100644 gdax-orderbook-hpp/demo/dependencies/websocketpp-0.7.0/tutorials/utility_client/step2.cpp create mode 100644 gdax-orderbook-hpp/demo/dependencies/websocketpp-0.7.0/tutorials/utility_client/step3.cpp create mode 100644 gdax-orderbook-hpp/demo/dependencies/websocketpp-0.7.0/tutorials/utility_client/step4.cpp create mode 100644 gdax-orderbook-hpp/demo/dependencies/websocketpp-0.7.0/tutorials/utility_client/step5.cpp create mode 100644 gdax-orderbook-hpp/demo/dependencies/websocketpp-0.7.0/tutorials/utility_client/step6.cpp create mode 100644 gdax-orderbook-hpp/demo/dependencies/websocketpp-0.7.0/tutorials/utility_client/utility_client.md create mode 100644 gdax-orderbook-hpp/demo/dependencies/websocketpp-0.7.0/tutorials/utility_server/step1.cpp create mode 100644 gdax-orderbook-hpp/demo/dependencies/websocketpp-0.7.0/tutorials/utility_server/step2.cpp create mode 100644 gdax-orderbook-hpp/demo/dependencies/websocketpp-0.7.0/tutorials/utility_server/utility_server.md create mode 100644 gdax-orderbook-hpp/demo/dependencies/websocketpp-0.7.0/websocketpp-config.cmake.in create mode 100644 gdax-orderbook-hpp/demo/dependencies/websocketpp-0.7.0/websocketpp-configVersion.cmake.in create mode 100644 gdax-orderbook-hpp/demo/dependencies/websocketpp-0.7.0/websocketpp/CMakeLists.txt create mode 100644 gdax-orderbook-hpp/demo/dependencies/websocketpp-0.7.0/websocketpp/base64/base64.hpp create mode 100644 gdax-orderbook-hpp/demo/dependencies/websocketpp-0.7.0/websocketpp/client.hpp create mode 100644 gdax-orderbook-hpp/demo/dependencies/websocketpp-0.7.0/websocketpp/close.hpp create mode 100644 gdax-orderbook-hpp/demo/dependencies/websocketpp-0.7.0/websocketpp/common/asio.hpp create mode 100644 gdax-orderbook-hpp/demo/dependencies/websocketpp-0.7.0/websocketpp/common/asio_ssl.hpp create mode 100644 gdax-orderbook-hpp/demo/dependencies/websocketpp-0.7.0/websocketpp/common/chrono.hpp create mode 100644 gdax-orderbook-hpp/demo/dependencies/websocketpp-0.7.0/websocketpp/common/connection_hdl.hpp create mode 100644 gdax-orderbook-hpp/demo/dependencies/websocketpp-0.7.0/websocketpp/common/cpp11.hpp create mode 100644 gdax-orderbook-hpp/demo/dependencies/websocketpp-0.7.0/websocketpp/common/functional.hpp create mode 100644 gdax-orderbook-hpp/demo/dependencies/websocketpp-0.7.0/websocketpp/common/md5.hpp create mode 100644 gdax-orderbook-hpp/demo/dependencies/websocketpp-0.7.0/websocketpp/common/memory.hpp create mode 100644 gdax-orderbook-hpp/demo/dependencies/websocketpp-0.7.0/websocketpp/common/network.hpp create mode 100644 gdax-orderbook-hpp/demo/dependencies/websocketpp-0.7.0/websocketpp/common/platforms.hpp create mode 100644 gdax-orderbook-hpp/demo/dependencies/websocketpp-0.7.0/websocketpp/common/random.hpp create mode 100644 gdax-orderbook-hpp/demo/dependencies/websocketpp-0.7.0/websocketpp/common/regex.hpp create mode 100644 gdax-orderbook-hpp/demo/dependencies/websocketpp-0.7.0/websocketpp/common/stdint.hpp create mode 100644 gdax-orderbook-hpp/demo/dependencies/websocketpp-0.7.0/websocketpp/common/system_error.hpp create mode 100644 gdax-orderbook-hpp/demo/dependencies/websocketpp-0.7.0/websocketpp/common/thread.hpp create mode 100644 gdax-orderbook-hpp/demo/dependencies/websocketpp-0.7.0/websocketpp/common/time.hpp create mode 100644 gdax-orderbook-hpp/demo/dependencies/websocketpp-0.7.0/websocketpp/common/type_traits.hpp create mode 100644 gdax-orderbook-hpp/demo/dependencies/websocketpp-0.7.0/websocketpp/concurrency/basic.hpp create mode 100644 gdax-orderbook-hpp/demo/dependencies/websocketpp-0.7.0/websocketpp/concurrency/none.hpp create mode 100644 gdax-orderbook-hpp/demo/dependencies/websocketpp-0.7.0/websocketpp/config/asio.hpp create mode 100644 gdax-orderbook-hpp/demo/dependencies/websocketpp-0.7.0/websocketpp/config/asio_client.hpp create mode 100644 gdax-orderbook-hpp/demo/dependencies/websocketpp-0.7.0/websocketpp/config/asio_no_tls.hpp create mode 100644 gdax-orderbook-hpp/demo/dependencies/websocketpp-0.7.0/websocketpp/config/asio_no_tls_client.hpp create mode 100644 gdax-orderbook-hpp/demo/dependencies/websocketpp-0.7.0/websocketpp/config/boost_config.hpp create mode 100644 gdax-orderbook-hpp/demo/dependencies/websocketpp-0.7.0/websocketpp/config/core.hpp create mode 100644 gdax-orderbook-hpp/demo/dependencies/websocketpp-0.7.0/websocketpp/config/core_client.hpp create mode 100644 gdax-orderbook-hpp/demo/dependencies/websocketpp-0.7.0/websocketpp/config/debug.hpp create mode 100644 gdax-orderbook-hpp/demo/dependencies/websocketpp-0.7.0/websocketpp/config/debug_asio.hpp create mode 100644 gdax-orderbook-hpp/demo/dependencies/websocketpp-0.7.0/websocketpp/config/debug_asio_no_tls.hpp create mode 100644 gdax-orderbook-hpp/demo/dependencies/websocketpp-0.7.0/websocketpp/config/minimal_client.hpp create mode 100644 gdax-orderbook-hpp/demo/dependencies/websocketpp-0.7.0/websocketpp/config/minimal_server.hpp create mode 100644 gdax-orderbook-hpp/demo/dependencies/websocketpp-0.7.0/websocketpp/connection.hpp create mode 100644 gdax-orderbook-hpp/demo/dependencies/websocketpp-0.7.0/websocketpp/connection_base.hpp create mode 100644 gdax-orderbook-hpp/demo/dependencies/websocketpp-0.7.0/websocketpp/endpoint.hpp create mode 100644 gdax-orderbook-hpp/demo/dependencies/websocketpp-0.7.0/websocketpp/endpoint_base.hpp create mode 100644 gdax-orderbook-hpp/demo/dependencies/websocketpp-0.7.0/websocketpp/error.hpp create mode 100644 gdax-orderbook-hpp/demo/dependencies/websocketpp-0.7.0/websocketpp/extensions/extension.hpp create mode 100644 gdax-orderbook-hpp/demo/dependencies/websocketpp-0.7.0/websocketpp/extensions/permessage_deflate/disabled.hpp create mode 100644 gdax-orderbook-hpp/demo/dependencies/websocketpp-0.7.0/websocketpp/extensions/permessage_deflate/enabled.hpp create mode 100644 gdax-orderbook-hpp/demo/dependencies/websocketpp-0.7.0/websocketpp/frame.hpp create mode 100644 gdax-orderbook-hpp/demo/dependencies/websocketpp-0.7.0/websocketpp/http/constants.hpp create mode 100644 gdax-orderbook-hpp/demo/dependencies/websocketpp-0.7.0/websocketpp/http/impl/parser.hpp create mode 100644 gdax-orderbook-hpp/demo/dependencies/websocketpp-0.7.0/websocketpp/http/impl/request.hpp create mode 100644 gdax-orderbook-hpp/demo/dependencies/websocketpp-0.7.0/websocketpp/http/impl/response.hpp create mode 100644 gdax-orderbook-hpp/demo/dependencies/websocketpp-0.7.0/websocketpp/http/parser.hpp create mode 100644 gdax-orderbook-hpp/demo/dependencies/websocketpp-0.7.0/websocketpp/http/request.hpp create mode 100644 gdax-orderbook-hpp/demo/dependencies/websocketpp-0.7.0/websocketpp/http/response.hpp create mode 100644 gdax-orderbook-hpp/demo/dependencies/websocketpp-0.7.0/websocketpp/impl/connection_impl.hpp create mode 100644 gdax-orderbook-hpp/demo/dependencies/websocketpp-0.7.0/websocketpp/impl/endpoint_impl.hpp create mode 100644 gdax-orderbook-hpp/demo/dependencies/websocketpp-0.7.0/websocketpp/impl/utilities_impl.hpp create mode 100644 gdax-orderbook-hpp/demo/dependencies/websocketpp-0.7.0/websocketpp/logger/basic.hpp create mode 100644 gdax-orderbook-hpp/demo/dependencies/websocketpp-0.7.0/websocketpp/logger/levels.hpp create mode 100644 gdax-orderbook-hpp/demo/dependencies/websocketpp-0.7.0/websocketpp/logger/stub.hpp create mode 100644 gdax-orderbook-hpp/demo/dependencies/websocketpp-0.7.0/websocketpp/logger/syslog.hpp create mode 100644 gdax-orderbook-hpp/demo/dependencies/websocketpp-0.7.0/websocketpp/message_buffer/alloc.hpp create mode 100644 gdax-orderbook-hpp/demo/dependencies/websocketpp-0.7.0/websocketpp/message_buffer/message.hpp create mode 100644 gdax-orderbook-hpp/demo/dependencies/websocketpp-0.7.0/websocketpp/message_buffer/pool.hpp create mode 100644 gdax-orderbook-hpp/demo/dependencies/websocketpp-0.7.0/websocketpp/processors/base.hpp create mode 100644 gdax-orderbook-hpp/demo/dependencies/websocketpp-0.7.0/websocketpp/processors/hybi00.hpp create mode 100644 gdax-orderbook-hpp/demo/dependencies/websocketpp-0.7.0/websocketpp/processors/hybi07.hpp create mode 100644 gdax-orderbook-hpp/demo/dependencies/websocketpp-0.7.0/websocketpp/processors/hybi08.hpp create mode 100644 gdax-orderbook-hpp/demo/dependencies/websocketpp-0.7.0/websocketpp/processors/hybi13.hpp create mode 100644 gdax-orderbook-hpp/demo/dependencies/websocketpp-0.7.0/websocketpp/processors/processor.hpp create mode 100644 gdax-orderbook-hpp/demo/dependencies/websocketpp-0.7.0/websocketpp/random/none.hpp create mode 100644 gdax-orderbook-hpp/demo/dependencies/websocketpp-0.7.0/websocketpp/random/random_device.hpp create mode 100644 gdax-orderbook-hpp/demo/dependencies/websocketpp-0.7.0/websocketpp/roles/client_endpoint.hpp create mode 100644 gdax-orderbook-hpp/demo/dependencies/websocketpp-0.7.0/websocketpp/roles/server_endpoint.hpp create mode 100644 gdax-orderbook-hpp/demo/dependencies/websocketpp-0.7.0/websocketpp/server.hpp create mode 100644 gdax-orderbook-hpp/demo/dependencies/websocketpp-0.7.0/websocketpp/sha1/sha1.hpp create mode 100644 gdax-orderbook-hpp/demo/dependencies/websocketpp-0.7.0/websocketpp/transport/asio/base.hpp create mode 100644 gdax-orderbook-hpp/demo/dependencies/websocketpp-0.7.0/websocketpp/transport/asio/connection.hpp create mode 100644 gdax-orderbook-hpp/demo/dependencies/websocketpp-0.7.0/websocketpp/transport/asio/endpoint.hpp create mode 100644 gdax-orderbook-hpp/demo/dependencies/websocketpp-0.7.0/websocketpp/transport/asio/security/base.hpp create mode 100644 gdax-orderbook-hpp/demo/dependencies/websocketpp-0.7.0/websocketpp/transport/asio/security/none.hpp create mode 100644 gdax-orderbook-hpp/demo/dependencies/websocketpp-0.7.0/websocketpp/transport/asio/security/tls.hpp create mode 100644 gdax-orderbook-hpp/demo/dependencies/websocketpp-0.7.0/websocketpp/transport/base/connection.hpp create mode 100644 gdax-orderbook-hpp/demo/dependencies/websocketpp-0.7.0/websocketpp/transport/base/endpoint.hpp create mode 100644 gdax-orderbook-hpp/demo/dependencies/websocketpp-0.7.0/websocketpp/transport/debug/base.hpp create mode 100644 gdax-orderbook-hpp/demo/dependencies/websocketpp-0.7.0/websocketpp/transport/debug/connection.hpp create mode 100644 gdax-orderbook-hpp/demo/dependencies/websocketpp-0.7.0/websocketpp/transport/debug/endpoint.hpp create mode 100644 gdax-orderbook-hpp/demo/dependencies/websocketpp-0.7.0/websocketpp/transport/iostream/base.hpp create mode 100644 gdax-orderbook-hpp/demo/dependencies/websocketpp-0.7.0/websocketpp/transport/iostream/connection.hpp create mode 100644 gdax-orderbook-hpp/demo/dependencies/websocketpp-0.7.0/websocketpp/transport/iostream/endpoint.hpp create mode 100644 gdax-orderbook-hpp/demo/dependencies/websocketpp-0.7.0/websocketpp/transport/stub/base.hpp create mode 100644 gdax-orderbook-hpp/demo/dependencies/websocketpp-0.7.0/websocketpp/transport/stub/connection.hpp create mode 100644 gdax-orderbook-hpp/demo/dependencies/websocketpp-0.7.0/websocketpp/transport/stub/endpoint.hpp create mode 100644 gdax-orderbook-hpp/demo/dependencies/websocketpp-0.7.0/websocketpp/uri.hpp create mode 100644 gdax-orderbook-hpp/demo/dependencies/websocketpp-0.7.0/websocketpp/utf8_validator.hpp create mode 100644 gdax-orderbook-hpp/demo/dependencies/websocketpp-0.7.0/websocketpp/utilities.hpp create mode 100644 gdax-orderbook-hpp/demo/dependencies/websocketpp-0.7.0/websocketpp/version.hpp diff --git a/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/.gitignore b/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/.gitignore new file mode 100644 index 0000000..75c0f0d --- /dev/null +++ b/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/.gitignore @@ -0,0 +1,20 @@ +/doc +/sandbox +*.o +*.d +/bin +/obj +/projects/Win/vc14/cds.sdf +/projects/Win/vc14/cds.v14.suo +/projects/Win/vc14/*.user +/projects/Win/vc14/*.opensdf +/projects/Win/vc14/.vs/ +/projects/Win/vc141/.vs/ +/projects/Win/vc141/*.user +*.log +/.project +/projects/Win/vc14/*.opendb +/test/stress/data/dictionary.txt +/projects/Win/vc14/cds.VC.db +/.cproject +/.settings/ diff --git a/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/.travis.yml b/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/.travis.yml new file mode 100644 index 0000000..9102859 --- /dev/null +++ b/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/.travis.yml @@ -0,0 +1,265 @@ +language: cpp + +install: + - chmod +x ./build/CI/travis-ci/install.sh + - ./build/CI/travis-ci/install.sh + +script: + - chmod +x ./build/CI/travis-ci/run.sh + - ./build/CI/travis-ci/run.sh + +linux: &linux_gcc + os: linux + addons: + apt: + sources: + - ubuntu-toolchain-r-test + packages: + - g++-6 + compiler: + - g++-6 + before_install: + - eval "CC=gcc-6 && CXX=g++-6" + + +linux: &linux_clang + os: linux + addons: + apt: + sources: + - ubuntu-toolchain-r-test + - llvm-toolchain-trusty-4.0 + packages: + - clang-4.0 + compiler: + - clang-4.0 + before_install: + - eval "CC=clang-4.0 && CXX=clang++-4.0" + + +osx: &osx + os: osx + osx_image: xcode8.3 + compiler: + - clang + before_install: + - eval "CC=clang && CXX=clang++" + + +matrix: + include: +## BUILD_TYPE=Release CXX_COMPILER=g++-6 + - <<: *linux_gcc + env: TARGET=unit-deque BUILD_TYPE=Release + - <<: *linux_gcc + env: TARGET=unit-ilist BUILD_TYPE=Release + - <<: *linux_gcc + env: TARGET=unit-list BUILD_TYPE=Release + - <<: *linux_gcc + env: TARGET=unit-map BUILD_TYPE=Release + - <<: *linux_gcc + env: TARGET=unit-misc BUILD_TYPE=Release + - <<: *linux_gcc + env: TARGET=unit-pqueue BUILD_TYPE=Release + - <<: *linux_gcc + env: TARGET=unit-queue BUILD_TYPE=Release + - <<: *linux_gcc + env: TARGET=unit-iset-feldman BUILD_TYPE=Release + - <<: *linux_gcc + env: TARGET=unit-iset-michael-michael BUILD_TYPE=Release + - <<: *linux_gcc + env: TARGET=unit-iset-michael-lazy BUILD_TYPE=Release + - <<: *linux_gcc + env: TARGET=unit-iset-michael-iterable BUILD_TYPE=Release + - <<: *linux_gcc + env: TARGET=unit-iset-skip BUILD_TYPE=Release + - <<: *linux_gcc + env: TARGET=unit-iset-split-michael BUILD_TYPE=Release + - <<: *linux_gcc + env: TARGET=unit-iset-split-lazy BUILD_TYPE=Release + - <<: *linux_gcc + env: TARGET=unit-iset-split-iterable BUILD_TYPE=Release + - <<: *linux_gcc + env: TARGET=unit-set BUILD_TYPE=Release + - <<: *linux_gcc + env: TARGET=unit-striped-set BUILD_TYPE=Release + - <<: *linux_gcc + env: TARGET=unit-stack BUILD_TYPE=Release + - <<: *linux_gcc + env: TARGET=unit-tree BUILD_TYPE=Release + +## BUILD_TYPE=Debug CXX_COMPILER=g++-6 + - <<: *linux_gcc + env: TARGET=unit-deque BUILD_TYPE=Debug + - <<: *linux_gcc + env: TARGET=unit-ilist BUILD_TYPE=Debug + - <<: *linux_gcc + env: TARGET=unit-list BUILD_TYPE=Debug + - <<: *linux_gcc + env: TARGET=unit-map BUILD_TYPE=Debug + - <<: *linux_gcc + env: TARGET=unit-misc BUILD_TYPE=Debug + - <<: *linux_gcc + env: TARGET=unit-pqueue BUILD_TYPE=Debug + - <<: *linux_gcc + env: TARGET=unit-queue BUILD_TYPE=Debug + - <<: *linux_gcc + env: TARGET=unit-iset BUILD_TYPE=Debug + - <<: *linux_gcc + env: TARGET=unit-set BUILD_TYPE=Debug + - <<: *linux_gcc + env: TARGET=unit-striped-set BUILD_TYPE=Debug + - <<: *linux_gcc + env: TARGET=unit-stack BUILD_TYPE=Debug + - <<: *linux_gcc + env: TARGET=unit-tree BUILD_TYPE=Debug + +## BUILD_TYPE=Release CXX_COMPILER=clang-4.0 + - <<: *linux_clang + env: TARGET=unit-deque BUILD_TYPE=Release + - <<: *linux_clang + env: TARGET=unit-ilist BUILD_TYPE=Release + - <<: *linux_clang + env: TARGET=unit-list BUILD_TYPE=Release + - <<: *linux_clang + env: TARGET=unit-misc BUILD_TYPE=Release LINKER_FLAGS=-latomic + - <<: *linux_clang + env: TARGET=unit-pqueue BUILD_TYPE=Release + - <<: *linux_clang + env: TARGET=unit-queue BUILD_TYPE=Release + - <<: *linux_clang + env: TARGET=unit-set-feldman BUILD_TYPE=Release + - <<: *linux_clang + env: TARGET=unit-set-michael-michael BUILD_TYPE=Release + - <<: *linux_clang + env: TARGET=unit-set-michael-iterable BUILD_TYPE=Release + - <<: *linux_clang + env: TARGET=unit-set-michael-lazy BUILD_TYPE=Release + - <<: *linux_clang + env: TARGET=unit-set-skip BUILD_TYPE=Release + - <<: *linux_clang + env: TARGET=unit-set-split-iterable BUILD_TYPE=Release + - <<: *linux_clang + env: TARGET=unit-set-split-michael BUILD_TYPE=Release + - <<: *linux_clang + env: TARGET=unit-set-split-lazy BUILD_TYPE=Release + - <<: *linux_clang + env: TARGET=unit-striped-set BUILD_TYPE=Release + - <<: *linux_clang + env: TARGET=unit-stack BUILD_TYPE=Release +# FIXME: building too long. Travis-ci will stop building. +# - BUILD_TYPE=Release TARGET=unit-map +# - BUILD_TYPE=Release TARGET=unit-iset +# - BUILD_TYPE=Release TARGET=unit-tree + +## BUILD_TYPE=Debug CXX_COMPILER=clang-4.0 + - <<: *linux_clang + env: TARGET=unit-deque BUILD_TYPE=Debug + - <<: *linux_clang + env: TARGET=unit-ilist BUILD_TYPE=Debug + - <<: *linux_clang + env: TARGET=unit-list BUILD_TYPE=Debug + - <<: *linux_clang + env: TARGET=unit-map BUILD_TYPE=Debug + - <<: *linux_clang + env: TARGET=unit-misc BUILD_TYPE=Debug LINKER_FLAGS=-latomic + - <<: *linux_clang + env: TARGET=unit-pqueue BUILD_TYPE=Debug + - <<: *linux_clang + env: TARGET=unit-queue BUILD_TYPE=Debug + - <<: *linux_clang + env: TARGET=unit-iset BUILD_TYPE=Debug + - <<: *linux_clang + env: TARGET=unit-set BUILD_TYPE=Debug + - <<: *linux_clang + env: TARGET=unit-striped-set BUILD_TYPE=Debug + - <<: *linux_clang + env: TARGET=unit-stack BUILD_TYPE=Debug + - <<: *linux_clang + env: TARGET=unit-tree BUILD_TYPE=Debug + +# RELEASE + - <<: *osx + env: BUILD_TYPE=Release TARGET=unit-deque + - <<: *osx + env: BUILD_TYPE=Release TARGET=unit-ilist + - <<: *osx + env: BUILD_TYPE=Release TARGET=unit-list + - <<: *osx + env: BUILD_TYPE=Release TARGET=unit-misc + - <<: *osx + env: BUILD_TYPE=Release TARGET=unit-pqueue + - <<: *osx + env: BUILD_TYPE=Release TARGET=unit-queue + - <<: *osx + env: BUILD_TYPE=Release TARGET=unit-iset + - <<: *osx + env: BUILD_TYPE=Release TARGET=unit-iset-feldman + - <<: *osx + env: BUILD_TYPE=Release TARGET=unit-iset-michael-michael + - <<: *osx + env: BUILD_TYPE=Release TARGET=unit-iset-michael-lazy + - <<: *osx + env: BUILD_TYPE=Release TARGET=unit-iset-michael-iterable + - <<: *osx + env: BUILD_TYPE=Release TARGET=unit-iset-skip + - <<: *osx + env: BUILD_TYPE=Release TARGET=unit-iset-split-michael + - <<: *osx + env: BUILD_TYPE=Release TARGET=unit-iset-split-lazy + - <<: *osx + env: BUILD_TYPE=Release TARGET=unit-iset-split-iterable + - <<: *osx + env: BUILD_TYPE=Release TARGET=unit-striped-set + - <<: *osx + env: BUILD_TYPE=Release TARGET=unit-stack +# FIXME: building too long. Travis-ci will stop building. +# - <<: *osx +# env: BUILD_TYPE=Release TARGET=unit-map +# - <<: *osx +# env: BUILD_TYPE=Release TARGET=unit-set +# - <<: *osx +# env: BUILD_TYPE=Release TARGET=unit-tree + +# DEBUG + - <<: *osx + env: BUILD_TYPE=Debug TARGET=unit-deque + - <<: *osx + env: BUILD_TYPE=Debug TARGET=unit-ilist + - <<: *osx + env: BUILD_TYPE=Debug TARGET=unit-list + - <<: *osx + env: BUILD_TYPE=Debug TARGET=unit-map + - <<: *osx + env: BUILD_TYPE=Debug TARGET=unit-misc + - <<: *osx + env: BUILD_TYPE=Debug TARGET=unit-pqueue + - <<: *osx + env: BUILD_TYPE=Debug TARGET=unit-queue + - <<: *osx + env: BUILD_TYPE=Debug TARGET=unit-iset + - <<: *osx + env: BUILD_TYPE=Debug TARGET=unit-iset-feldman + - <<: *osx + env: BUILD_TYPE=Debug TARGET=unit-iset-michael-michael + - <<: *osx + env: BUILD_TYPE=Debug TARGET=unit-iset-michael-lazy + - <<: *osx + env: BUILD_TYPE=Debug TARGET=unit-iset-michael-iterable + - <<: *osx + env: BUILD_TYPE=Debug TARGET=unit-iset-skip + - <<: *osx + env: BUILD_TYPE=Debug TARGET=unit-iset-split-michael + - <<: *osx + env: BUILD_TYPE=Debug TARGET=unit-iset-split-lazy + - <<: *osx + env: BUILD_TYPE=Debug TARGET=unit-iset-split-iterable + - <<: *osx + env: BUILD_TYPE=Debug TARGET=unit-set + - <<: *osx + env: BUILD_TYPE=Debug TARGET=unit-striped-set + - <<: *osx + env: BUILD_TYPE=Debug TARGET=unit-stack + - <<: *osx + env: BUILD_TYPE=Debug TARGET=unit-tree + diff --git a/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/CMakeLists.txt b/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/CMakeLists.txt new file mode 100644 index 0000000..a198e26 --- /dev/null +++ b/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/CMakeLists.txt @@ -0,0 +1,255 @@ +cmake_minimum_required(VERSION 2.8.12) + +cmake_policy(SET CMP0016 NEW) +if(POLICY CMP0042) + cmake_policy(SET CMP0042 NEW) +endif() + +set(CMAKE_MODULE_PATH ${CMAKE_CURRENT_SOURCE_DIR}/build/cmake ${CMAKE_MODULE_PATH}) +include(TargetArch) + +project(cds) + +set(PROJECT_VERSION 2.3.2) + +# Options +option(WITH_TESTS "Build unit tests" OFF) +option(WITH_TESTS_COVERAGE "Analyze test coverage using gcov (only for gcc)" OFF) +option(WITH_BOOST_ATOMIC "Use boost atomics (only for boost >= 1.54)" OFF) +option(WITH_ASAN "Build ASan+UBSan instrumented code" OFF) +option(WITH_TSAN "Build TSan instrumented code" OFF) +option(ENABLE_UNIT_TEST "Enable unit test" ON) +option(ENABLE_STRESS_TEST "Enable stress test" ON) +set(CMAKE_TARGET_ARCHITECTURE "" CACHE string "Target build architecture") + +find_package(Threads) + +if(TARGET boost::system AND TARGET boost::thread) + link_libraries(boost::system boost::thread) +else() + find_package(Boost 1.50 COMPONENTS system thread) +endif() + +include_directories(${Boost_INCLUDE_DIRS}) + +if(NOT CMAKE_TARGET_ARCHITECTURE) + target_architecture(CMAKE_TARGET_ARCHITECTURE) +endif() + +if(APPLE) + set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -D_DARWIN_C_SOURCE") + set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -D_DARWIN_C_SOURCE") +endif() + +if(WITH_BOOST_ATOMIC) + if(Boost_FOUND) + if(${Boost_MINOR_VERSION} GREATER 53) + set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -DCDS_USE_BOOST_ATOMIC") + message(STATUS "Boost version allows using of boost.atomic: activated") + endif() + else() + if(TARGET boost::atomic) + link_libraries(boost::atomic) + endif() + endif() +endif(WITH_BOOST_ATOMIC) + +if(WITH_ASAN) + if(CMAKE_COMPILER_IS_GNUCXX OR CMAKE_CXX_COMPILER_ID STREQUAL "Clang") + set(CMAKE_CXX_FLAGS_DEBUG "-D_DEBUG") + set(CMAKE_CXX_FLAGS_RELEASE "-DNDEBUG") + set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -O1 -fPIC -fsanitize=address,undefined -g -DCDS_ADDRESS_SANITIZER_ENABLED -fno-omit-frame-pointer -fno-optimize-sibling-calls") + set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -O1 -fsanitize=address,undefined -g -DCDS_ASAN_ENABLED -fno-omit-frame-pointer -fno-optimize-sibling-calls") + set(CMAKE_EXE_LINKER_FLAGS "${CMAKE_EXE_LINKER_FLAGS} -fsanitize=address,undefined -pie") + else() + message(WARNING "Compiler does not support AddressSanitizer") + endif() +endif(WITH_ASAN) + +if(WITH_TSAN) + if(CMAKE_COMPILER_IS_GNUCXX OR CMAKE_CXX_COMPILER_ID STREQUAL "Clang") + set(CMAKE_CXX_FLAGS_DEBUG "-D_DEBUG") + set(CMAKE_CXX_FLAGS_RELEASE "-DNDEBUG") + set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -O1 -fPIC -fsanitize=thread -g -DCDS_THREAD_SANITIZER_ENABLED -fno-omit-frame-pointer") + set(CMAKE_C_FLAGS "${CMAKE_CXX_FLAGS} -O1 -fPIC -fsanitize=thread -g -DCDS_THREAD_SANITIZER_ENABLED -fno-omit-frame-pointer") + set(CMAKE_EXE_LINKER_FLAGS "${CMAKE_EXE_LINKER_FLAGS} -fsanitize=thread -pie") + else() + message(WARNING "Compiler does not support ThreadSanitizer") + endif() +endif(WITH_TSAN) + +if(WITH_TESTS_COVERAGE) + if(CMAKE_COMPILER_IS_GNUCXX) + set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -fprofile-arcs -ftest-coverage") + set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -fprofile-arcs -ftest-coverage") + set(CMAKE_EXE_LINKER_FLAGS "${CMAKE_EXE_LINKER_FLAGS} -fprofile-arcs -ftest-coverage") + message(STATUS "Test coverage analysis: activated") + else() + message(WARNING "Compiler is not GNU gcc! Test coverage couldn't be analyzed") + endif() +endif(WITH_TESTS_COVERAGE) + +set(CDS_SHARED_LIBRARY ${PROJECT_NAME}) +set(CDS_STATIC_LIBRARY ${PROJECT_NAME}-s) + +set(CMAKE_INSTALL_RPATH_USE_LINK_PATH TRUE) +set(CMAKE_INCLUDE_CURRENT_DIR ON) + +if(CDS_BIN_DIR) + set(EXECUTABLE_OUTPUT_PATH ${CDS_BIN_DIR}) + set(LIBRARY_OUTPUT_PATH ${CDS_BIN_DIR}) +else() + set(EXECUTABLE_OUTPUT_PATH ${PROJECT_BINARY_DIR}/bin) + set(LIBRARY_OUTPUT_PATH ${PROJECT_BINARY_DIR}/bin) +endif() +message(STATUS "Binary output path: ${EXECUTABLE_OUTPUT_PATH}") + +if(NOT CMAKE_BUILD_TYPE) + set(CMAKE_BUILD_TYPE Debug CACHE STRING "Default build type to Debug" FORCE) +endif() + +if(CMAKE_COMPILER_IS_GNUCXX OR CMAKE_CXX_COMPILER_ID STREQUAL "Clang") + string(REGEX MATCHALL "-std=[^ ]+" cxx_std_found ${CMAKE_CXX_FLAGS} " dummy@rg") + if(cxx_std_found) + message("C++ std: ${cxx_std_found}") + else() + list(APPEND LIBCDS_PUBLIC_CXX_FLAGS "-std=c++11") + message("C++ std: -std=c++11 (default)") + endif() + + list(APPEND LIBCDS_PRIVATE_CXX_FLAGS "-Wall" "-Wextra" "-pedantic") + + if(CMAKE_TARGET_ARCHITECTURE STREQUAL "x86_64") + list(APPEND LIBCDS_PUBLIC_CXX_FLAGS "-mcx16") + set(LIB_SUFFIX "64") + + # GCC-7: 128-bit atomics support is implemented via libatomic on amd64 + # see https://gcc.gnu.org/ml/gcc/2017-01/msg00167.html + # Maybe, it will be changed in future + if(CMAKE_COMPILER_IS_GNUCXX AND CMAKE_CXX_COMPILER_VERSION VERSION_GREATER "7.0.0" AND CMAKE_CXX_COMPILER_VERSION VERSION_LESS "8.0.0") + set(CMAKE_EXE_LINKER_FLAGS "${CMAKE_EXE_LINKER_FLAGS} -latomic") + endif() + endif() + + if(CMAKE_COMPILER_IS_GNUCXX AND CMAKE_CXX_COMPILER_VERSION VERSION_LESS "8.0.0") + # gcc 4.8 - 6: disable noise -Wunused-local-typedefs + list(APPEND LIBCDS_PRIVATE_CXX_FLAGS "-Wno-unused-local-typedefs") + endif() +endif() + +set(CMAKE_CXX_FLAGS_DEBUG "${CMAKE_CXX_FLAGS_DEBUG} -D_DEBUG") + +message("Build type -- ${CMAKE_BUILD_TYPE}") +message("Compiler version: ${CMAKE_CXX_COMPILER_ID} ${CMAKE_CXX_COMPILER_VERSION}") +message("System: ${CMAKE_SYSTEM_NAME} version: ${CMAKE_SYSTEM_VERSION}") +message("Target architecture: ${CMAKE_TARGET_ARCHITECTURE}") +if(CMAKE_BUILD_TYPE STREQUAL "Debug") + message("Compiler flags: ${CMAKE_CXX_FLAGS} ${CMAKE_CXX_FLAGS_DEBUG} ${LIBCDS_PUBLIC_CXX_FLAGS} ${LIBCDS_PRIVATE_CXX_FLAGS}") +else() + message("Compiler flags: ${CMAKE_CXX_FLAGS} ${CMAKE_CXX_FLAGS_RELEASE} ${LIBCDS_PUBLIC_CXX_FLAGS} ${LIBCDS_PRIVATE_CXX_FLAGS}") +endif() +message("Exe flags: ${CMAKE_EXE_LINKER_FLAGS}") + +if(Boost_FOUND) + message("Boost: ${Boost_LIB_VERSION} in ${Boost_INCLUDE_DIRS}, lib ${Boost_LIBRARY_DIRS}") +else() + message("Boost: Using CMake-fied boost") +endif() + +if(CMAKE_SYSTEM_NAME STREQUAL "AIX") + set(CMAKE_CXX_ARCHIVE_CREATE " -q -c ${CMAKE_STATIC_LINKER_FLAGS} -o ") +endif() + +# Component names for separate distribution in rpms, debs etc. +set(LIBRARIES_COMPONENT lib) +set(HEADERS_COMPONENT devel) + +set(SOURCES src/init.cpp + src/hp.cpp + src/dhp.cpp + src/urcu_gp.cpp + src/urcu_sh.cpp + src/thread_data.cpp + src/topology_hpux.cpp + src/topology_linux.cpp + src/topology_osx.cpp + src/dllmain.cpp) + +add_library(${CDS_SHARED_LIBRARY} SHARED ${SOURCES}) +set_target_properties(${CDS_SHARED_LIBRARY} PROPERTIES VERSION ${PROJECT_VERSION} + DEBUG_POSTFIX "_d") +add_library(${CDS_STATIC_LIBRARY} STATIC ${SOURCES}) +set_target_properties(${CDS_STATIC_LIBRARY} PROPERTIES DEBUG_POSTFIX "_d") +target_link_libraries(${CDS_SHARED_LIBRARY} PRIVATE ${CMAKE_THREAD_LIBS_INIT}) +target_link_libraries(${CDS_STATIC_LIBRARY} PRIVATE ${CMAKE_THREAD_LIBS_INIT}) +target_include_directories(${CDS_SHARED_LIBRARY} INTERFACE "$" + $) +target_include_directories(${CDS_STATIC_LIBRARY} INTERFACE "$" + $) +target_compile_options(${CDS_SHARED_LIBRARY} PUBLIC "${LIBCDS_PUBLIC_CXX_FLAGS}") +target_compile_options(${CDS_STATIC_LIBRARY} PUBLIC "${LIBCDS_PUBLIC_CXX_FLAGS}") +target_compile_options(${CDS_SHARED_LIBRARY} PRIVATE "${LIBCDS_PRIVATE_CXX_FLAGS}") +target_compile_options(${CDS_STATIC_LIBRARY} PRIVATE "${LIBCDS_PRIVATE_CXX_FLAGS}") + +install(TARGETS ${CDS_SHARED_LIBRARY} EXPORT LibCDSConfig LIBRARY DESTINATION lib${LIB_SUFFIX} COMPONENT ${LIBRARIES_COMPONENT} NAMELINK_SKIP) +install(TARGETS ${CDS_SHARED_LIBRARY} EXPORT LibCDSConfig LIBRARY DESTINATION lib${LIB_SUFFIX} COMPONENT ${HEADERS_COMPONENT} NAMELINK_ONLY) +install(TARGETS ${CDS_STATIC_LIBRARY} EXPORT LibCDSConfig DESTINATION lib${LIB_SUFFIX} COMPONENT ${LIBRARIES_COMPONENT}) +install(EXPORT LibCDSConfig FILE LibCDSConfig.cmake NAMESPACE LibCDS:: DESTINATION lib/cmake/LibCDS) +install(DIRECTORY ${PROJECT_SOURCE_DIR}/cds DESTINATION include COMPONENT ${HEADERS_COMPONENT}) + +if(WITH_TESTS) + enable_testing() + add_subdirectory(${PROJECT_SOURCE_DIR}/test) + message(STATUS "Build tests: activated") +endif(WITH_TESTS) + +### FOR PACKAGING in RPM, TGZ, DEB, NSYS...############################################################################### +set(CPACK_PACKAGE_VERSION ${PROJECT_VERSION}) +set(CPACK_PACKAGE_NAME ${PROJECT_NAME}) +set(CPACK_PACKAGE_CONTACT "Max Khizhinsky ") +set(CPACK_PACKAGE_RELEASE 1) +set(CPACK_PACKAGE_INSTALL_DIRECTORY "cds") +set(CPACK_PACKAGE_DESCRIPTION_FILE "${PROJECT_SOURCE_DIR}/build/cmake/description.txt") +set(CPACK_PACKAGE_DESCRIPTION_SUMMARY "Library of concurrent data structures") +set(CPACK_PACKAGE_FILE_NAME "${CPACK_PACKAGE_NAME}-${CPACK_PACKAGE_VERSION}-${CPACK_PACKAGE_RELEASE}") +set(DEPLOY_PACKAGE_FILE_NAME "${CPACK_PACKAGE_FILE_NAME}") + +# TGZ specific +set(CPACK_ARCHIVE_COMPONENT_INSTALL ON) + +# RPM specific +set(CPACK_RPM_COMPONENT_INSTALL ON) +set(CPACK_RPM_PACKAGE_RELEASE ${CPACK_PACKAGE_RELEASE}) +set(CPACK_RPM_POST_INSTALL_SCRIPT_FILE "${PROJECT_SOURCE_DIR}/build/cmake/post_install_script.sh") +set(CPACK_RPM_POST_UNINSTALL_SCRIPT_FILE "${PROJECT_SOURCE_DIR}/build/cmake/post_uninstall_script.sh") +set(CPACK_RPM_PACKAGE_URL https://github.com/khizmax/libcds) +set(CPACK_RPM_PACKAGE_LICENSE GPL) +set(CPACK_RPM_PACKAGE_GROUP "System Environment/Base") +set(CPACK_RPM_PACKAGE_REQUIRES "boost >= 1.50") +set(CPACK_RPM_EXCLUDE_FROM_AUTO_FILELIST_ADDITION ${CPACK_PACKAGING_INSTALL_PREFIX}) +set(CPACK_RPM_EXCLUDE_FROM_AUTO_FILELIST_ADDITION /usr/local) +set(CPACK_RPM_devel_PACKAGE_REQUIRES "boost >= 1.50, cds-lib = ${PROJECT_VERSION}") + +# DEB specific +set(CPACK_DEB_COMPONENT_INSTALL ON) +set(CPACK_DEBIAN_PACKAGE_DEPENDS "boost (>= 1.50)") +set(CPACK_DEBIAN_PACKAGE_HOMEPAGE "https://github.com/khizmax/libcds") +set(CPACK_DEBIAN_PACKAGE_CONTROL_EXTRA "${PROJECT_SOURCE_DIR}/build/cmake/post_install_script.sh;;${PROJECT_SOURCE_DIR}/build/cmake/post_uninstall_script.sh;") + +# NSYS specific +set(CPACK_NSIS_PACKAGE_NAME "${CPACK_PACKAGE_NAME}") +set(CPACK_NSIS_DISPLAY_NAME "${CPACK_PACKAGE_NAME}") +set(CPACK_NSIS_CONTACT ${CPACK_PACKAGE_CONTACT}) +set(CPACK_NSIS_ENABLE_UNINSTALL_BEFORE_INSTALL ON) +set(CPACK_NSIS_MODIFY_PATH ON) + +# Components grouping for Mac OS X and Windows installers +set(CPACK_COMPONENT_${LIBRARIES_COMPONENT}_GROUP "Runtime") +set(CPACK_COMPONENT_${HEADERS_COMPONENT}_GROUP "Development") +set(CPACK_COMPONENT_${LIBRARIES_COMPONENT}_DISPLAY_NAME "Libraries") +set(CPACK_COMPONENT_${HEADERS_COMPONENT}_DISPLAY_NAME "C++ Headers") +set(CPACK_COMPONENT_${HEADERS_COMPONENT}_DEPENDS ${LIBRARIES_COMPONENT}) +set(CPACK_COMPONENT_GROUP_DEVELOPMENT_DESCRIPTION "All of the tools you'll ever need to develop lock-free oriented software with libcds") +set(CPACK_COMPONENT_GROUP_RUNTIME_DESCRIPTION "Only libcds library for runtime") + +include(CPack) diff --git a/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/LICENSE b/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/LICENSE new file mode 100644 index 0000000..2959fe8 --- /dev/null +++ b/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/LICENSE @@ -0,0 +1,24 @@ +Copyright (c) 2014, Maxim Khizhinsky +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + +* Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. + +* Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + diff --git a/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/build-release/CMakeCache.txt b/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/build-release/CMakeCache.txt new file mode 100644 index 0000000..a1410d4 --- /dev/null +++ b/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/build-release/CMakeCache.txt @@ -0,0 +1,501 @@ +# This is the CMakeCache file. +# For build in directory: /home/ahmad/Downloads/benchmark-fuzzying-tool/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/build-release +# It was generated by CMake: /usr/bin/cmake +# You can edit this file to change values found and used by cmake. +# If you do not want to change any of the values, simply exit the editor. +# If you do want to change a value, simply edit, save, and exit the editor. +# The syntax for the file is as follows: +# KEY:TYPE=VALUE +# KEY is the name of a variable in the cache. +# TYPE is a hint to GUIs for the type of VALUE, DO NOT EDIT TYPE!. +# VALUE is the current value for the KEY. + +######################## +# EXTERNAL cache entries +######################## + +//The threading library used by boost-thread +BOOST_THREAD_LIBRARY:FILEPATH=/usr/lib/x86_64-linux-gnu/libpthread.so + +//Boost atomic library (debug) +Boost_ATOMIC_LIBRARY_DEBUG:FILEPATH=/usr/lib/x86_64-linux-gnu/libboost_atomic.so + +//Boost atomic library (release) +Boost_ATOMIC_LIBRARY_RELEASE:FILEPATH=/usr/lib/x86_64-linux-gnu/libboost_atomic.so + +//Boost chrono library (debug) +Boost_CHRONO_LIBRARY_DEBUG:FILEPATH=/usr/lib/x86_64-linux-gnu/libboost_chrono.so + +//Boost chrono library (release) +Boost_CHRONO_LIBRARY_RELEASE:FILEPATH=/usr/lib/x86_64-linux-gnu/libboost_chrono.so + +//Boost date_time library (debug) +Boost_DATE_TIME_LIBRARY_DEBUG:FILEPATH=/usr/lib/x86_64-linux-gnu/libboost_date_time.so + +//Boost date_time library (release) +Boost_DATE_TIME_LIBRARY_RELEASE:FILEPATH=/usr/lib/x86_64-linux-gnu/libboost_date_time.so + +//The directory containing a CMake configuration file for Boost. +Boost_DIR:PATH=Boost_DIR-NOTFOUND + +//Path to a file. +Boost_INCLUDE_DIR:PATH=/usr/include + +//Boost library directory DEBUG +Boost_LIBRARY_DIR_DEBUG:PATH=/usr/lib/x86_64-linux-gnu + +//Boost library directory RELEASE +Boost_LIBRARY_DIR_RELEASE:PATH=/usr/lib/x86_64-linux-gnu + +//Boost system library (debug) +Boost_SYSTEM_LIBRARY_DEBUG:FILEPATH=/usr/lib/x86_64-linux-gnu/libboost_system.so + +//Boost system library (release) +Boost_SYSTEM_LIBRARY_RELEASE:FILEPATH=/usr/lib/x86_64-linux-gnu/libboost_system.so + +//Boost thread library (debug) +Boost_THREAD_LIBRARY_DEBUG:FILEPATH=/usr/lib/x86_64-linux-gnu/libboost_thread.so + +//Boost thread library (release) +Boost_THREAD_LIBRARY_RELEASE:FILEPATH=/usr/lib/x86_64-linux-gnu/libboost_thread.so + +//Path to a program. +CMAKE_AR:FILEPATH=/usr/bin/ar + +//Choose the type of build, options are: None(CMAKE_CXX_FLAGS or +// CMAKE_C_FLAGS used) Debug Release RelWithDebInfo MinSizeRel. +CMAKE_BUILD_TYPE:STRING=RELEASE + +//Enable/Disable color output during build. +CMAKE_COLOR_MAKEFILE:BOOL=ON + +//CXX compiler +CMAKE_CXX_COMPILER:FILEPATH=/usr/bin/c++ + +//Flags used by the compiler during all build types. +CMAKE_CXX_FLAGS:STRING= + +//Flags used by the compiler during debug builds. +CMAKE_CXX_FLAGS_DEBUG:STRING=-g + +//Flags used by the compiler during release builds for minimum +// size. +CMAKE_CXX_FLAGS_MINSIZEREL:STRING=-Os -DNDEBUG + +//Flags used by the compiler during release builds. +CMAKE_CXX_FLAGS_RELEASE:STRING=-O3 -DNDEBUG + +//Flags used by the compiler during release builds with debug info. +CMAKE_CXX_FLAGS_RELWITHDEBINFO:STRING=-O2 -g -DNDEBUG + +//C compiler +CMAKE_C_COMPILER:FILEPATH=/usr/bin/cc + +//Flags used by the compiler during all build types. +CMAKE_C_FLAGS:STRING= + +//Flags used by the compiler during debug builds. +CMAKE_C_FLAGS_DEBUG:STRING=-g + +//Flags used by the compiler during release builds for minimum +// size. +CMAKE_C_FLAGS_MINSIZEREL:STRING=-Os -DNDEBUG + +//Flags used by the compiler during release builds. +CMAKE_C_FLAGS_RELEASE:STRING=-O3 -DNDEBUG + +//Flags used by the compiler during release builds with debug info. +CMAKE_C_FLAGS_RELWITHDEBINFO:STRING=-O2 -g -DNDEBUG + +//Flags used by the linker. +CMAKE_EXE_LINKER_FLAGS:STRING= + +//Flags used by the linker during debug builds. +CMAKE_EXE_LINKER_FLAGS_DEBUG:STRING= + +//Flags used by the linker during release minsize builds. +CMAKE_EXE_LINKER_FLAGS_MINSIZEREL:STRING= + +//Flags used by the linker during release builds. +CMAKE_EXE_LINKER_FLAGS_RELEASE:STRING= + +//Flags used by the linker during Release with Debug Info builds. +CMAKE_EXE_LINKER_FLAGS_RELWITHDEBINFO:STRING= + +//Enable/Disable output of compile commands during generation. +CMAKE_EXPORT_COMPILE_COMMANDS:BOOL=OFF + +//Install path prefix, prepended onto install directories. +CMAKE_INSTALL_PREFIX:PATH=/usr/local + +//Path to a program. +CMAKE_LINKER:FILEPATH=/usr/bin/ld + +//Path to a program. +CMAKE_MAKE_PROGRAM:FILEPATH=/usr/bin/make + +//Flags used by the linker during the creation of modules. +CMAKE_MODULE_LINKER_FLAGS:STRING= + +//Flags used by the linker during debug builds. +CMAKE_MODULE_LINKER_FLAGS_DEBUG:STRING= + +//Flags used by the linker during release minsize builds. +CMAKE_MODULE_LINKER_FLAGS_MINSIZEREL:STRING= + +//Flags used by the linker during release builds. +CMAKE_MODULE_LINKER_FLAGS_RELEASE:STRING= + +//Flags used by the linker during Release with Debug Info builds. +CMAKE_MODULE_LINKER_FLAGS_RELWITHDEBINFO:STRING= + +//Path to a program. +CMAKE_NM:FILEPATH=/usr/bin/nm + +//Path to a program. +CMAKE_OBJCOPY:FILEPATH=/usr/bin/objcopy + +//Path to a program. +CMAKE_OBJDUMP:FILEPATH=/usr/bin/objdump + +//Value Computed by CMake +CMAKE_PROJECT_NAME:STATIC=cds + +//Path to a program. +CMAKE_RANLIB:FILEPATH=/usr/bin/ranlib + +//Flags used by the linker during the creation of dll's. +CMAKE_SHARED_LINKER_FLAGS:STRING= + +//Flags used by the linker during debug builds. +CMAKE_SHARED_LINKER_FLAGS_DEBUG:STRING= + +//Flags used by the linker during release minsize builds. +CMAKE_SHARED_LINKER_FLAGS_MINSIZEREL:STRING= + +//Flags used by the linker during release builds. +CMAKE_SHARED_LINKER_FLAGS_RELEASE:STRING= + +//Flags used by the linker during Release with Debug Info builds. +CMAKE_SHARED_LINKER_FLAGS_RELWITHDEBINFO:STRING= + +//If set, runtime paths are not added when installing shared libraries, +// but are added when building. +CMAKE_SKIP_INSTALL_RPATH:BOOL=NO + +//If set, runtime paths are not added when using shared libraries. +CMAKE_SKIP_RPATH:BOOL=NO + +//Flags used by the linker during the creation of static libraries. +CMAKE_STATIC_LINKER_FLAGS:STRING= + +//Flags used by the linker during debug builds. +CMAKE_STATIC_LINKER_FLAGS_DEBUG:STRING= + +//Flags used by the linker during release minsize builds. +CMAKE_STATIC_LINKER_FLAGS_MINSIZEREL:STRING= + +//Flags used by the linker during release builds. +CMAKE_STATIC_LINKER_FLAGS_RELEASE:STRING= + +//Flags used by the linker during Release with Debug Info builds. +CMAKE_STATIC_LINKER_FLAGS_RELWITHDEBINFO:STRING= + +//Path to a program. +CMAKE_STRIP:FILEPATH=/usr/bin/strip + +//Target build architecture +CMAKE_TARGET_ARCHITECTURE:STRING= + +//If this value is on, makefiles will be generated without the +// .SILENT directive, and all commands will be echoed to the console +// during the make. This is useful for debugging only. With Visual +// Studio IDE projects all commands are done without /nologo. +CMAKE_VERBOSE_MAKEFILE:BOOL=FALSE + +//Enable to build Debian packages +CPACK_BINARY_DEB:BOOL=OFF + +//Enable to build IFW packages +CPACK_BINARY_IFW:BOOL=OFF + +//Enable to build NSIS packages +CPACK_BINARY_NSIS:BOOL=OFF + +//Enable to build RPM packages +CPACK_BINARY_RPM:BOOL=OFF + +//Enable to build STGZ packages +CPACK_BINARY_STGZ:BOOL=ON + +//Enable to build TBZ2 packages +CPACK_BINARY_TBZ2:BOOL=OFF + +//Enable to build TGZ packages +CPACK_BINARY_TGZ:BOOL=ON + +//Enable to build TXZ packages +CPACK_BINARY_TXZ:BOOL=OFF + +//Enable to build TZ packages +CPACK_BINARY_TZ:BOOL=ON + +//Enable to build TBZ2 source packages +CPACK_SOURCE_TBZ2:BOOL=ON + +//Enable to build TGZ source packages +CPACK_SOURCE_TGZ:BOOL=ON + +//Enable to build TXZ source packages +CPACK_SOURCE_TXZ:BOOL=ON + +//Enable to build TZ source packages +CPACK_SOURCE_TZ:BOOL=ON + +//Enable to build ZIP source packages +CPACK_SOURCE_ZIP:BOOL=OFF + +//Enable stress test +ENABLE_STRESS_TEST:BOOL=ON + +//Enable unit test +ENABLE_UNIT_TEST:BOOL=ON + +//Build ASan+UBSan instrumented code +WITH_ASAN:BOOL=OFF + +//Use boost atomics (only for boost >= 1.54) +WITH_BOOST_ATOMIC:BOOL=OFF + +//Build unit tests +WITH_TESTS:BOOL=OFF + +//Analyze test coverage using gcov (only for gcc) +WITH_TESTS_COVERAGE:BOOL=OFF + +//Build TSan instrumented code +WITH_TSAN:BOOL=OFF + +//Dependencies for the target +cds-s_LIB_DEPENDS:STATIC=general;-lpthread; + +//Value Computed by CMake +cds_BINARY_DIR:STATIC=/home/ahmad/Downloads/benchmark-fuzzying-tool/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/build-release + +//Dependencies for the target +cds_LIB_DEPENDS:STATIC=general;-lpthread; + +//Value Computed by CMake +cds_SOURCE_DIR:STATIC=/home/ahmad/Downloads/benchmark-fuzzying-tool/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2 + + +######################## +# INTERNAL cache entries +######################## + +//ADVANCED property for variable: Boost_ATOMIC_LIBRARY_DEBUG +Boost_ATOMIC_LIBRARY_DEBUG-ADVANCED:INTERNAL=1 +//ADVANCED property for variable: Boost_ATOMIC_LIBRARY_RELEASE +Boost_ATOMIC_LIBRARY_RELEASE-ADVANCED:INTERNAL=1 +//ADVANCED property for variable: Boost_CHRONO_LIBRARY_DEBUG +Boost_CHRONO_LIBRARY_DEBUG-ADVANCED:INTERNAL=1 +//ADVANCED property for variable: Boost_CHRONO_LIBRARY_RELEASE +Boost_CHRONO_LIBRARY_RELEASE-ADVANCED:INTERNAL=1 +//ADVANCED property for variable: Boost_DATE_TIME_LIBRARY_DEBUG +Boost_DATE_TIME_LIBRARY_DEBUG-ADVANCED:INTERNAL=1 +//ADVANCED property for variable: Boost_DATE_TIME_LIBRARY_RELEASE +Boost_DATE_TIME_LIBRARY_RELEASE-ADVANCED:INTERNAL=1 +//ADVANCED property for variable: Boost_DIR +Boost_DIR-ADVANCED:INTERNAL=1 +//ADVANCED property for variable: Boost_INCLUDE_DIR +Boost_INCLUDE_DIR-ADVANCED:INTERNAL=1 +//ADVANCED property for variable: Boost_LIBRARY_DIR_DEBUG +Boost_LIBRARY_DIR_DEBUG-ADVANCED:INTERNAL=1 +//ADVANCED property for variable: Boost_LIBRARY_DIR_RELEASE +Boost_LIBRARY_DIR_RELEASE-ADVANCED:INTERNAL=1 +//ADVANCED property for variable: Boost_SYSTEM_LIBRARY_DEBUG +Boost_SYSTEM_LIBRARY_DEBUG-ADVANCED:INTERNAL=1 +//ADVANCED property for variable: Boost_SYSTEM_LIBRARY_RELEASE +Boost_SYSTEM_LIBRARY_RELEASE-ADVANCED:INTERNAL=1 +//ADVANCED property for variable: Boost_THREAD_LIBRARY_DEBUG +Boost_THREAD_LIBRARY_DEBUG-ADVANCED:INTERNAL=1 +//ADVANCED property for variable: Boost_THREAD_LIBRARY_RELEASE +Boost_THREAD_LIBRARY_RELEASE-ADVANCED:INTERNAL=1 +//ADVANCED property for variable: CMAKE_AR +CMAKE_AR-ADVANCED:INTERNAL=1 +//This is the directory where this CMakeCache.txt was created +CMAKE_CACHEFILE_DIR:INTERNAL=/home/ahmad/Downloads/benchmark-fuzzying-tool/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/build-release +//Major version of cmake used to create the current loaded cache +CMAKE_CACHE_MAJOR_VERSION:INTERNAL=3 +//Minor version of cmake used to create the current loaded cache +CMAKE_CACHE_MINOR_VERSION:INTERNAL=5 +//Patch version of cmake used to create the current loaded cache +CMAKE_CACHE_PATCH_VERSION:INTERNAL=1 +//ADVANCED property for variable: CMAKE_COLOR_MAKEFILE +CMAKE_COLOR_MAKEFILE-ADVANCED:INTERNAL=1 +//Path to CMake executable. +CMAKE_COMMAND:INTERNAL=/usr/bin/cmake +//Path to cpack program executable. +CMAKE_CPACK_COMMAND:INTERNAL=/usr/bin/cpack +//Path to ctest program executable. +CMAKE_CTEST_COMMAND:INTERNAL=/usr/bin/ctest +//ADVANCED property for variable: CMAKE_CXX_COMPILER +CMAKE_CXX_COMPILER-ADVANCED:INTERNAL=1 +//ADVANCED property for variable: CMAKE_CXX_FLAGS +CMAKE_CXX_FLAGS-ADVANCED:INTERNAL=1 +//ADVANCED property for variable: CMAKE_CXX_FLAGS_DEBUG +CMAKE_CXX_FLAGS_DEBUG-ADVANCED:INTERNAL=1 +//ADVANCED property for variable: CMAKE_CXX_FLAGS_MINSIZEREL +CMAKE_CXX_FLAGS_MINSIZEREL-ADVANCED:INTERNAL=1 +//ADVANCED property for variable: CMAKE_CXX_FLAGS_RELEASE +CMAKE_CXX_FLAGS_RELEASE-ADVANCED:INTERNAL=1 +//ADVANCED property for variable: CMAKE_CXX_FLAGS_RELWITHDEBINFO +CMAKE_CXX_FLAGS_RELWITHDEBINFO-ADVANCED:INTERNAL=1 +//ADVANCED property for variable: CMAKE_C_COMPILER +CMAKE_C_COMPILER-ADVANCED:INTERNAL=1 +//ADVANCED property for variable: CMAKE_C_FLAGS +CMAKE_C_FLAGS-ADVANCED:INTERNAL=1 +//ADVANCED property for variable: CMAKE_C_FLAGS_DEBUG +CMAKE_C_FLAGS_DEBUG-ADVANCED:INTERNAL=1 +//ADVANCED property for variable: CMAKE_C_FLAGS_MINSIZEREL +CMAKE_C_FLAGS_MINSIZEREL-ADVANCED:INTERNAL=1 +//ADVANCED property for variable: CMAKE_C_FLAGS_RELEASE +CMAKE_C_FLAGS_RELEASE-ADVANCED:INTERNAL=1 +//ADVANCED property for variable: CMAKE_C_FLAGS_RELWITHDEBINFO +CMAKE_C_FLAGS_RELWITHDEBINFO-ADVANCED:INTERNAL=1 +//Executable file format +CMAKE_EXECUTABLE_FORMAT:INTERNAL=ELF +//ADVANCED property for variable: CMAKE_EXE_LINKER_FLAGS +CMAKE_EXE_LINKER_FLAGS-ADVANCED:INTERNAL=1 +//ADVANCED property for variable: CMAKE_EXE_LINKER_FLAGS_DEBUG +CMAKE_EXE_LINKER_FLAGS_DEBUG-ADVANCED:INTERNAL=1 +//ADVANCED property for variable: CMAKE_EXE_LINKER_FLAGS_MINSIZEREL +CMAKE_EXE_LINKER_FLAGS_MINSIZEREL-ADVANCED:INTERNAL=1 +//ADVANCED property for variable: CMAKE_EXE_LINKER_FLAGS_RELEASE +CMAKE_EXE_LINKER_FLAGS_RELEASE-ADVANCED:INTERNAL=1 +//ADVANCED property for variable: CMAKE_EXE_LINKER_FLAGS_RELWITHDEBINFO +CMAKE_EXE_LINKER_FLAGS_RELWITHDEBINFO-ADVANCED:INTERNAL=1 +//ADVANCED property for variable: CMAKE_EXPORT_COMPILE_COMMANDS +CMAKE_EXPORT_COMPILE_COMMANDS-ADVANCED:INTERNAL=1 +//Name of external makefile project generator. +CMAKE_EXTRA_GENERATOR:INTERNAL= +//Name of generator. +CMAKE_GENERATOR:INTERNAL=Unix Makefiles +//Name of generator platform. +CMAKE_GENERATOR_PLATFORM:INTERNAL= +//Name of generator toolset. +CMAKE_GENERATOR_TOOLSET:INTERNAL= +//Have symbol pthread_create +CMAKE_HAVE_LIBC_CREATE:INTERNAL= +//Have library pthreads +CMAKE_HAVE_PTHREADS_CREATE:INTERNAL= +//Have library pthread +CMAKE_HAVE_PTHREAD_CREATE:INTERNAL=1 +//Have include pthread.h +CMAKE_HAVE_PTHREAD_H:INTERNAL=1 +//Source directory with the top level CMakeLists.txt file for this +// project +CMAKE_HOME_DIRECTORY:INTERNAL=/home/ahmad/Downloads/benchmark-fuzzying-tool/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2 +//Install .so files without execute permission. +CMAKE_INSTALL_SO_NO_EXE:INTERNAL=1 +//ADVANCED property for variable: CMAKE_LINKER +CMAKE_LINKER-ADVANCED:INTERNAL=1 +//ADVANCED property for variable: CMAKE_MAKE_PROGRAM +CMAKE_MAKE_PROGRAM-ADVANCED:INTERNAL=1 +//ADVANCED property for variable: CMAKE_MODULE_LINKER_FLAGS +CMAKE_MODULE_LINKER_FLAGS-ADVANCED:INTERNAL=1 +//ADVANCED property for variable: CMAKE_MODULE_LINKER_FLAGS_DEBUG +CMAKE_MODULE_LINKER_FLAGS_DEBUG-ADVANCED:INTERNAL=1 +//ADVANCED property for variable: CMAKE_MODULE_LINKER_FLAGS_MINSIZEREL +CMAKE_MODULE_LINKER_FLAGS_MINSIZEREL-ADVANCED:INTERNAL=1 +//ADVANCED property for variable: CMAKE_MODULE_LINKER_FLAGS_RELEASE +CMAKE_MODULE_LINKER_FLAGS_RELEASE-ADVANCED:INTERNAL=1 +//ADVANCED property for variable: CMAKE_MODULE_LINKER_FLAGS_RELWITHDEBINFO +CMAKE_MODULE_LINKER_FLAGS_RELWITHDEBINFO-ADVANCED:INTERNAL=1 +//ADVANCED property for variable: CMAKE_NM +CMAKE_NM-ADVANCED:INTERNAL=1 +//number of local generators +CMAKE_NUMBER_OF_MAKEFILES:INTERNAL=1 +//ADVANCED property for variable: CMAKE_OBJCOPY +CMAKE_OBJCOPY-ADVANCED:INTERNAL=1 +//ADVANCED property for variable: CMAKE_OBJDUMP +CMAKE_OBJDUMP-ADVANCED:INTERNAL=1 +//ADVANCED property for variable: CMAKE_RANLIB +CMAKE_RANLIB-ADVANCED:INTERNAL=1 +//Path to CMake installation. +CMAKE_ROOT:INTERNAL=/usr/share/cmake-3.5 +//ADVANCED property for variable: CMAKE_SHARED_LINKER_FLAGS +CMAKE_SHARED_LINKER_FLAGS-ADVANCED:INTERNAL=1 +//ADVANCED property for variable: CMAKE_SHARED_LINKER_FLAGS_DEBUG +CMAKE_SHARED_LINKER_FLAGS_DEBUG-ADVANCED:INTERNAL=1 +//ADVANCED property for variable: CMAKE_SHARED_LINKER_FLAGS_MINSIZEREL +CMAKE_SHARED_LINKER_FLAGS_MINSIZEREL-ADVANCED:INTERNAL=1 +//ADVANCED property for variable: CMAKE_SHARED_LINKER_FLAGS_RELEASE +CMAKE_SHARED_LINKER_FLAGS_RELEASE-ADVANCED:INTERNAL=1 +//ADVANCED property for variable: CMAKE_SHARED_LINKER_FLAGS_RELWITHDEBINFO +CMAKE_SHARED_LINKER_FLAGS_RELWITHDEBINFO-ADVANCED:INTERNAL=1 +//ADVANCED property for variable: CMAKE_SKIP_INSTALL_RPATH +CMAKE_SKIP_INSTALL_RPATH-ADVANCED:INTERNAL=1 +//ADVANCED property for variable: CMAKE_SKIP_RPATH +CMAKE_SKIP_RPATH-ADVANCED:INTERNAL=1 +//ADVANCED property for variable: CMAKE_STATIC_LINKER_FLAGS +CMAKE_STATIC_LINKER_FLAGS-ADVANCED:INTERNAL=1 +//ADVANCED property for variable: CMAKE_STATIC_LINKER_FLAGS_DEBUG +CMAKE_STATIC_LINKER_FLAGS_DEBUG-ADVANCED:INTERNAL=1 +//ADVANCED property for variable: CMAKE_STATIC_LINKER_FLAGS_MINSIZEREL +CMAKE_STATIC_LINKER_FLAGS_MINSIZEREL-ADVANCED:INTERNAL=1 +//ADVANCED property for variable: CMAKE_STATIC_LINKER_FLAGS_RELEASE +CMAKE_STATIC_LINKER_FLAGS_RELEASE-ADVANCED:INTERNAL=1 +//ADVANCED property for variable: CMAKE_STATIC_LINKER_FLAGS_RELWITHDEBINFO +CMAKE_STATIC_LINKER_FLAGS_RELWITHDEBINFO-ADVANCED:INTERNAL=1 +//ADVANCED property for variable: CMAKE_STRIP +CMAKE_STRIP-ADVANCED:INTERNAL=1 +//uname command +CMAKE_UNAME:INTERNAL=/bin/uname +//ADVANCED property for variable: CMAKE_VERBOSE_MAKEFILE +CMAKE_VERBOSE_MAKEFILE-ADVANCED:INTERNAL=1 +//ADVANCED property for variable: CPACK_BINARY_DEB +CPACK_BINARY_DEB-ADVANCED:INTERNAL=1 +//ADVANCED property for variable: CPACK_BINARY_IFW +CPACK_BINARY_IFW-ADVANCED:INTERNAL=1 +//ADVANCED property for variable: CPACK_BINARY_NSIS +CPACK_BINARY_NSIS-ADVANCED:INTERNAL=1 +//ADVANCED property for variable: CPACK_BINARY_RPM +CPACK_BINARY_RPM-ADVANCED:INTERNAL=1 +//ADVANCED property for variable: CPACK_BINARY_STGZ +CPACK_BINARY_STGZ-ADVANCED:INTERNAL=1 +//ADVANCED property for variable: CPACK_BINARY_TBZ2 +CPACK_BINARY_TBZ2-ADVANCED:INTERNAL=1 +//ADVANCED property for variable: CPACK_BINARY_TGZ +CPACK_BINARY_TGZ-ADVANCED:INTERNAL=1 +//ADVANCED property for variable: CPACK_BINARY_TXZ +CPACK_BINARY_TXZ-ADVANCED:INTERNAL=1 +//ADVANCED property for variable: CPACK_BINARY_TZ +CPACK_BINARY_TZ-ADVANCED:INTERNAL=1 +//ADVANCED property for variable: CPACK_SOURCE_TBZ2 +CPACK_SOURCE_TBZ2-ADVANCED:INTERNAL=1 +//ADVANCED property for variable: CPACK_SOURCE_TGZ +CPACK_SOURCE_TGZ-ADVANCED:INTERNAL=1 +//ADVANCED property for variable: CPACK_SOURCE_TXZ +CPACK_SOURCE_TXZ-ADVANCED:INTERNAL=1 +//ADVANCED property for variable: CPACK_SOURCE_TZ +CPACK_SOURCE_TZ-ADVANCED:INTERNAL=1 +//ADVANCED property for variable: CPACK_SOURCE_ZIP +CPACK_SOURCE_ZIP-ADVANCED:INTERNAL=1 +//Details about finding Threads +FIND_PACKAGE_MESSAGE_DETAILS_Threads:INTERNAL=[TRUE][v()] +//Components requested for this build tree. +_Boost_COMPONENTS_SEARCHED:INTERNAL=atomic;chrono;date_time;system;thread +//Last used Boost_INCLUDE_DIR value. +_Boost_INCLUDE_DIR_LAST:INTERNAL=/usr/include +//Last used Boost_LIBRARY_DIR_DEBUG value. +_Boost_LIBRARY_DIR_DEBUG_LAST:INTERNAL=/usr/lib/x86_64-linux-gnu +//Last used Boost_LIBRARY_DIR_RELEASE value. +_Boost_LIBRARY_DIR_RELEASE_LAST:INTERNAL=/usr/lib/x86_64-linux-gnu +//Last used Boost_NAMESPACE value. +_Boost_NAMESPACE_LAST:INTERNAL=boost +//Last used Boost_USE_MULTITHREADED value. +_Boost_USE_MULTITHREADED_LAST:INTERNAL=TRUE +//Result of TRY_COMPILE +compile_result_unused:INTERNAL=FALSE + diff --git a/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/build-release/CMakeFiles/3.5.1/CMakeCCompiler.cmake b/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/build-release/CMakeFiles/3.5.1/CMakeCCompiler.cmake new file mode 100644 index 0000000..f40522e --- /dev/null +++ b/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/build-release/CMakeFiles/3.5.1/CMakeCCompiler.cmake @@ -0,0 +1,67 @@ +set(CMAKE_C_COMPILER "/usr/bin/cc") +set(CMAKE_C_COMPILER_ARG1 "") +set(CMAKE_C_COMPILER_ID "GNU") +set(CMAKE_C_COMPILER_VERSION "5.4.0") +set(CMAKE_C_COMPILER_WRAPPER "") +set(CMAKE_C_STANDARD_COMPUTED_DEFAULT "11") +set(CMAKE_C_COMPILE_FEATURES "c_function_prototypes;c_restrict;c_variadic_macros;c_static_assert") +set(CMAKE_C90_COMPILE_FEATURES "c_function_prototypes") +set(CMAKE_C99_COMPILE_FEATURES "c_restrict;c_variadic_macros") +set(CMAKE_C11_COMPILE_FEATURES "c_static_assert") + +set(CMAKE_C_PLATFORM_ID "Linux") +set(CMAKE_C_SIMULATE_ID "") +set(CMAKE_C_SIMULATE_VERSION "") + +set(CMAKE_AR "/usr/bin/ar") +set(CMAKE_RANLIB "/usr/bin/ranlib") +set(CMAKE_LINKER "/usr/bin/ld") +set(CMAKE_COMPILER_IS_GNUCC 1) +set(CMAKE_C_COMPILER_LOADED 1) +set(CMAKE_C_COMPILER_WORKS TRUE) +set(CMAKE_C_ABI_COMPILED TRUE) +set(CMAKE_COMPILER_IS_MINGW ) +set(CMAKE_COMPILER_IS_CYGWIN ) +if(CMAKE_COMPILER_IS_CYGWIN) + set(CYGWIN 1) + set(UNIX 1) +endif() + +set(CMAKE_C_COMPILER_ENV_VAR "CC") + +if(CMAKE_COMPILER_IS_MINGW) + set(MINGW 1) +endif() +set(CMAKE_C_COMPILER_ID_RUN 1) +set(CMAKE_C_SOURCE_FILE_EXTENSIONS c;m) +set(CMAKE_C_IGNORE_EXTENSIONS h;H;o;O;obj;OBJ;def;DEF;rc;RC) +set(CMAKE_C_LINKER_PREFERENCE 10) + +# Save compiler ABI information. +set(CMAKE_C_SIZEOF_DATA_PTR "8") +set(CMAKE_C_COMPILER_ABI "ELF") +set(CMAKE_C_LIBRARY_ARCHITECTURE "x86_64-linux-gnu") + +if(CMAKE_C_SIZEOF_DATA_PTR) + set(CMAKE_SIZEOF_VOID_P "${CMAKE_C_SIZEOF_DATA_PTR}") +endif() + +if(CMAKE_C_COMPILER_ABI) + set(CMAKE_INTERNAL_PLATFORM_ABI "${CMAKE_C_COMPILER_ABI}") +endif() + +if(CMAKE_C_LIBRARY_ARCHITECTURE) + set(CMAKE_LIBRARY_ARCHITECTURE "x86_64-linux-gnu") +endif() + +set(CMAKE_C_CL_SHOWINCLUDES_PREFIX "") +if(CMAKE_C_CL_SHOWINCLUDES_PREFIX) + set(CMAKE_CL_SHOWINCLUDES_PREFIX "${CMAKE_C_CL_SHOWINCLUDES_PREFIX}") +endif() + + + + +set(CMAKE_C_IMPLICIT_LINK_LIBRARIES "c") +set(CMAKE_C_IMPLICIT_LINK_DIRECTORIES "/usr/lib/gcc/x86_64-linux-gnu/5;/usr/lib/x86_64-linux-gnu;/usr/lib;/lib/x86_64-linux-gnu;/lib") +set(CMAKE_C_IMPLICIT_LINK_FRAMEWORK_DIRECTORIES "") diff --git a/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/build-release/CMakeFiles/3.5.1/CMakeCXXCompiler.cmake b/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/build-release/CMakeFiles/3.5.1/CMakeCXXCompiler.cmake new file mode 100644 index 0000000..013ee92 --- /dev/null +++ b/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/build-release/CMakeFiles/3.5.1/CMakeCXXCompiler.cmake @@ -0,0 +1,68 @@ +set(CMAKE_CXX_COMPILER "/usr/bin/c++") +set(CMAKE_CXX_COMPILER_ARG1 "") +set(CMAKE_CXX_COMPILER_ID "GNU") +set(CMAKE_CXX_COMPILER_VERSION "5.4.0") +set(CMAKE_CXX_COMPILER_WRAPPER "") +set(CMAKE_CXX_STANDARD_COMPUTED_DEFAULT "98") +set(CMAKE_CXX_COMPILE_FEATURES "cxx_template_template_parameters;cxx_alias_templates;cxx_alignas;cxx_alignof;cxx_attributes;cxx_auto_type;cxx_constexpr;cxx_decltype;cxx_decltype_incomplete_return_types;cxx_default_function_template_args;cxx_defaulted_functions;cxx_defaulted_move_initializers;cxx_delegating_constructors;cxx_deleted_functions;cxx_enum_forward_declarations;cxx_explicit_conversions;cxx_extended_friend_declarations;cxx_extern_templates;cxx_final;cxx_func_identifier;cxx_generalized_initializers;cxx_inheriting_constructors;cxx_inline_namespaces;cxx_lambdas;cxx_local_type_template_args;cxx_long_long_type;cxx_noexcept;cxx_nonstatic_member_init;cxx_nullptr;cxx_override;cxx_range_for;cxx_raw_string_literals;cxx_reference_qualified_functions;cxx_right_angle_brackets;cxx_rvalue_references;cxx_sizeof_member;cxx_static_assert;cxx_strong_enums;cxx_thread_local;cxx_trailing_return_types;cxx_unicode_literals;cxx_uniform_initialization;cxx_unrestricted_unions;cxx_user_literals;cxx_variadic_macros;cxx_variadic_templates;cxx_aggregate_default_initializers;cxx_attribute_deprecated;cxx_binary_literals;cxx_contextual_conversions;cxx_decltype_auto;cxx_digit_separators;cxx_generic_lambdas;cxx_lambda_init_captures;cxx_relaxed_constexpr;cxx_return_type_deduction;cxx_variable_templates") +set(CMAKE_CXX98_COMPILE_FEATURES "cxx_template_template_parameters") +set(CMAKE_CXX11_COMPILE_FEATURES "cxx_alias_templates;cxx_alignas;cxx_alignof;cxx_attributes;cxx_auto_type;cxx_constexpr;cxx_decltype;cxx_decltype_incomplete_return_types;cxx_default_function_template_args;cxx_defaulted_functions;cxx_defaulted_move_initializers;cxx_delegating_constructors;cxx_deleted_functions;cxx_enum_forward_declarations;cxx_explicit_conversions;cxx_extended_friend_declarations;cxx_extern_templates;cxx_final;cxx_func_identifier;cxx_generalized_initializers;cxx_inheriting_constructors;cxx_inline_namespaces;cxx_lambdas;cxx_local_type_template_args;cxx_long_long_type;cxx_noexcept;cxx_nonstatic_member_init;cxx_nullptr;cxx_override;cxx_range_for;cxx_raw_string_literals;cxx_reference_qualified_functions;cxx_right_angle_brackets;cxx_rvalue_references;cxx_sizeof_member;cxx_static_assert;cxx_strong_enums;cxx_thread_local;cxx_trailing_return_types;cxx_unicode_literals;cxx_uniform_initialization;cxx_unrestricted_unions;cxx_user_literals;cxx_variadic_macros;cxx_variadic_templates") +set(CMAKE_CXX14_COMPILE_FEATURES "cxx_aggregate_default_initializers;cxx_attribute_deprecated;cxx_binary_literals;cxx_contextual_conversions;cxx_decltype_auto;cxx_digit_separators;cxx_generic_lambdas;cxx_lambda_init_captures;cxx_relaxed_constexpr;cxx_return_type_deduction;cxx_variable_templates") + +set(CMAKE_CXX_PLATFORM_ID "Linux") +set(CMAKE_CXX_SIMULATE_ID "") +set(CMAKE_CXX_SIMULATE_VERSION "") + +set(CMAKE_AR "/usr/bin/ar") +set(CMAKE_RANLIB "/usr/bin/ranlib") +set(CMAKE_LINKER "/usr/bin/ld") +set(CMAKE_COMPILER_IS_GNUCXX 1) +set(CMAKE_CXX_COMPILER_LOADED 1) +set(CMAKE_CXX_COMPILER_WORKS TRUE) +set(CMAKE_CXX_ABI_COMPILED TRUE) +set(CMAKE_COMPILER_IS_MINGW ) +set(CMAKE_COMPILER_IS_CYGWIN ) +if(CMAKE_COMPILER_IS_CYGWIN) + set(CYGWIN 1) + set(UNIX 1) +endif() + +set(CMAKE_CXX_COMPILER_ENV_VAR "CXX") + +if(CMAKE_COMPILER_IS_MINGW) + set(MINGW 1) +endif() +set(CMAKE_CXX_COMPILER_ID_RUN 1) +set(CMAKE_CXX_IGNORE_EXTENSIONS inl;h;hpp;HPP;H;o;O;obj;OBJ;def;DEF;rc;RC) +set(CMAKE_CXX_SOURCE_FILE_EXTENSIONS C;M;c++;cc;cpp;cxx;mm;CPP) +set(CMAKE_CXX_LINKER_PREFERENCE 30) +set(CMAKE_CXX_LINKER_PREFERENCE_PROPAGATES 1) + +# Save compiler ABI information. +set(CMAKE_CXX_SIZEOF_DATA_PTR "8") +set(CMAKE_CXX_COMPILER_ABI "ELF") +set(CMAKE_CXX_LIBRARY_ARCHITECTURE "x86_64-linux-gnu") + +if(CMAKE_CXX_SIZEOF_DATA_PTR) + set(CMAKE_SIZEOF_VOID_P "${CMAKE_CXX_SIZEOF_DATA_PTR}") +endif() + +if(CMAKE_CXX_COMPILER_ABI) + set(CMAKE_INTERNAL_PLATFORM_ABI "${CMAKE_CXX_COMPILER_ABI}") +endif() + +if(CMAKE_CXX_LIBRARY_ARCHITECTURE) + set(CMAKE_LIBRARY_ARCHITECTURE "x86_64-linux-gnu") +endif() + +set(CMAKE_CXX_CL_SHOWINCLUDES_PREFIX "") +if(CMAKE_CXX_CL_SHOWINCLUDES_PREFIX) + set(CMAKE_CL_SHOWINCLUDES_PREFIX "${CMAKE_CXX_CL_SHOWINCLUDES_PREFIX}") +endif() + + + + +set(CMAKE_CXX_IMPLICIT_LINK_LIBRARIES "stdc++;m;c") +set(CMAKE_CXX_IMPLICIT_LINK_DIRECTORIES "/usr/lib/gcc/x86_64-linux-gnu/5;/usr/lib/x86_64-linux-gnu;/usr/lib;/lib/x86_64-linux-gnu;/lib") +set(CMAKE_CXX_IMPLICIT_LINK_FRAMEWORK_DIRECTORIES "") diff --git a/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/build-release/CMakeFiles/3.5.1/CMakeDetermineCompilerABI_C.bin b/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/build-release/CMakeFiles/3.5.1/CMakeDetermineCompilerABI_C.bin new file mode 100755 index 0000000000000000000000000000000000000000..8fadb3a4377be9080de10797e815ab49faade975 GIT binary patch literal 8640 zcmeHNeQXrR6`#BF$2m6Vg8(rfE!!3vL*>ocHI5-_+4I?EO>9GrP191dKKsu0k@KB< zw?_<08Y(9!rv!qMs6|y9QB{?wlveaZEm7KF0t&QhBu5o(8uAAXC71w>Q4(ZD#Mk#` z=iSG6krG8!|6#1#H}5y^_vT|~XJ_8}(U$gBr^CT0Ioa14YNI6@hpa1PmvU8LjjW7m zYz14+N&qQ_pTK{+v@dj6=WaJkW@rFs)ZyTs>rJrylTNCIVl=OlKO{z!(X@X*X`6mLtR6` zn)x*+(H7H|i)IM(!@|+9TSFLhOV)FZF_Ogl2Jo<*pEP-eVV9`yR^u`)`bScz?4D?( zzoGt~Xs9L{iKWJB#u^)H8tT<#T&?5%M*nELv9pKQMB}J{hHYL5Kb!+t{>S!r{&8cI z^UX^$m!Hl2_U;1{fBjPzmSKO8R|>y_-o0-2g~jFWqM~_E6fq6U@K$3zO)Y1y?ko%Z z{iO@fJY2EX&qQ2@SlS8!eSt|vr5c(=MY+R33;1 zlgV(>YLjh?Hnz7l1&lhiPF-vDmd3~dPnP{3K6ChIJr~GIEselq;$-J67i^}okLU3D z!Doz8m6%8PtWiB!rjK8B>rWQHR;Doh@d>kd>MI;PkN*At za0e%5UEn}ZPwOY9Ht3F5_19<3MOkm<f4>*! zdF9I!j+UE*JdlUngl1&B!pFn^P5(B3I^*y0cen3f)mfrI=l0j2+}f7D(Uv~jzWO4+ zXD2>(O>LOXWc0_cnM&HBb>xj(SLTka^B9gMYgwr*aZ_tWfi6FgN{-!;QTMaC-!9kHG(91T=X6Vp4?U3S7i;oRt-#fcACs zgiiajB|@k3)iR;u8j6XIlU4Bg#DwcECMy4MHWNqvkW)j!-y0|=iZm4bY=Cmcsi9zZ z@GE1Ngv9+irYcJ?+D8gCxA%eV0VZ05$)4ULxF5oFmnE3HMB_Qar;9C}+D&{sn_wax zmU-TvNnsDKIIh#UP7D2rD7V=E`zxJe<(jsMe#-d9Kw!O6-P50nnJHzhTCe(Q8d6-V z{a$T@>Z@04eXFekrOsE|;A`-$lYPj#3uaZLQ4!AaD^jTHxUo~2E;aYF*0tmuhRLREn%}xGxV{>ck z)|PIg+uzjQf>rZ4+GmWKNwGLh%MWLLmY`Z*Jk1L7eX5`uQr#si1YZP8XgvtYB&T=NVBNc?6Me?-#Gwg zP)%?oG5~Feo9G@83}&hji)1Ry^AVW4Hmk#@hwyy|k7Razq>q@3Je!bC``bxDPy}CYzknI;N3f~Hr+x6W;8Q=Te!2gT z34W7sNargzRtPDdk9nDY05BXAVV?cYpa_7@nP9?`7*$G? h_R~7>c_^%dAN5DL=ijjH>OZsC@|OF51WT9e|1S#bm#zQ+ literal 0 HcmV?d00001 diff --git a/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/build-release/CMakeFiles/3.5.1/CMakeDetermineCompilerABI_CXX.bin b/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/build-release/CMakeFiles/3.5.1/CMakeDetermineCompilerABI_CXX.bin new file mode 100755 index 0000000000000000000000000000000000000000..f89cba0f5e50283f60499d801dbc6711babc578c GIT binary patch literal 8656 zcmeHMeQZZ(c8CMNX{);~I?ss%(<*rZ8R6$Kl!(s?*TmgI&jq{;etxF`_?-$b5MO4%(1JnKL*&?AffP|RxL#$5 z#2J$~fOgnduZN4k_woX9N%W&ZaI!-Y?HYt#gRmohQFM$rjSt6$zmV`3aw;I9tRQ1; z{C|O?E>iDRJ%VLbI8xmT!XO*GnM;fjC%bQf9rp9{l2<79iT3U{FSo?_hzpUeiN^+8 zn%Bf5jqz9_GtoHVZ)t34meWajEgv`fN8PPG{k$cbM+qeC^8)zc9>Dt5&+e?-|Kr8K zuX?Ha-PHqo`qwP*OIU~FLAy%$9rI4P*`o`p++}6+o-Shw*5OiPJFV@PZtSZHzV-5z zpFUBu>6g_neDlhgfBfyr^J@p+x%u|@b}ifYR$ngy4izy9XOVkN=Qh8h8r zZ18Hp>TK`|x%=QJ?}K*&?q!cKpLZ{m3e*cl#3c)UgyTz?--7o6F4^eE0iO?^ zykZ~GzWD**bHICP^GJ)rfV80FF_G)38hGi|Shf}&X7LFwd8A@lg;aDQZw9Zhs z7Kx^!BeAp|O@+FH@nj+z3J=DkOp8XsdKgTQ8BRbL8A~RFM-9xkLpc&lXqj|0V)hQs zb?}fta4?-F^KJXJKG9Y%9!{sDX`>6a9oX8{*&5W=%4_BId~h^!2Rtu!f5r?LHO78N zB(o$2+uzClYE-k?+P%Dl&jmg^MD=2S;4?%mVVydC!>t}E|Ex-4>Y+1w`L%T%e9e6h zYYUWHe;HJAE8m0vwaXPKp*)HTAI#){R=x=GFm4M@O*cEUZ|qUCm(}U_ZuN#b&!52+ zR?nZs4W^zykNe}=pTO?Z;o9YGjGfv59qd;dao?-cvmP~jb^p>+=nA_vf8aD$@D@|wJoaSb@h!oePP~PHF>LD&IZmI?N5Du z74)BF=J$i)wFGo*;PvuF069M4EuEI&z*ncYuuS>&pMhQ8*=;n)T^#t}0N7wT`@V#Zt7CgJ2W3%*$=z2ciO;1Zoxm*>2nTs zhx>w-2;Kg0t10P0qxweLZhF+dUJoEflKcw+9iy2_H=CDl#U&UCWp1ixSrbW z^W%|c;Nij8Zn!@n;8^O~jPIm!_`Q8Emzx6ahVJ3{H3$3za200i0`OD7y$&b<;d=nd zaiEVmCcKWNHI?pTj!G}$`0hUmzE-1xAbWkrRzN-K_kzBJx*l(br+!;)_4nOVY|E0( ztJkgg3c{!Z+m7`>pZFOS@OTe9gYzo(L&Ksj0(=7Mqrh3K$NQ|a)l>hBtKB0VEo=2O z{G?p<_zqX79{+S@w?|2N`~i}NT3J$tsXZY*9&0&8}MU(?$G4}dN2bIX5hgL z{6Efs1n*}~#6>5?aSiVWgK7k)d)+)C)4lBxAya&{RLFS7B2hS5FXAaYmyxLcv$O8TSw7}FQD(?^&q11XD#+MF{6AxF36A%5qC<}#?@dUH z4aR256<#9zf{>}-WG}+deEaYk;Nv+V^x+lAWtz`hLOv+!MRWgiMscjQp_U583UPUB zFt|x-=pW1^^o+D#ZkBzGEg3E~9c*fmea&){Z&L7;-(Lm?4N3A*a7*B2FtqFbG&r_Jjy4qbbmH;ymX&$75*`NG}8`_m+rS1;FuR- zVt(koF$uV2b|A{HaJ+PX{sM5jd`}wiI-BSBIgXb$@Urk@%@17w+^#==2iz_X^giHr zd7uiwG2dg&Go2Os*8I>Np>Lf}CrlLjx8{c`0k`Yt()-|;hpb~Z=jTvLPwScCVR;C0 zGuwiFT36?ekfyOh8c5T}v>{9cL3T!qB(;%vaxfg%B6>2F*20+yHk2G2k4N=rMBcD| zW3#;yPcUiWR4P0Pc_2MC$%a$mv8WcwjEzl#iAmBhTU4YP><&L34er_%goa}AXe!X! zDG!a0Gwr)wO)as+a8ffx+KBo&2w^sO?&u42w`=V^Z7?Wfgj6nS2UAm#bW$4)CnA{c zYWq%4pt~~&s7a0I&H#e!tJbax*J@iIK#(k>Mz(fsZw+*5+dDdTw1>1%ptY+Vo95ZK zFP3YwBAom@MFd3!MaC(%3(`bbd=&7L)-8{W#oI{hp`3O(>?ZJxuHel7h=90 z(tjM(FjgU%?z<<1fn>Ph-)7AH7cj&733iq2>0W$6*wZ+vee3w2685daA;nuWHumVt zYX1yi*spwiDB`JEVNY?Va9^^2vKD)(LgA{Ca42!9@XsrX#D5O5xqU_0^AA=~NBt!a zR{7T!dwRe2iuPYb=O&_d3HmJ*%aqg?)=z8Oql7 z|7fwN@0RpEv)N*AZU3~zp5l)w(f+3vd+YrD1?u5;#-AZnrThbZ-?ZvmG)^|328_Qj=F1~Sf$WWMZiPy+Cq5slsO^FlWB0_tRYQuR(jxM<>De*ckBa{96$|%es9Xm> X8jo<#ziHdGe_@f~ZN0_0wf%nsz9@%^ literal 0 HcmV?d00001 diff --git a/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/build-release/CMakeFiles/3.5.1/CMakeSystem.cmake b/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/build-release/CMakeFiles/3.5.1/CMakeSystem.cmake new file mode 100644 index 0000000..1a20aeb --- /dev/null +++ b/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/build-release/CMakeFiles/3.5.1/CMakeSystem.cmake @@ -0,0 +1,15 @@ +set(CMAKE_HOST_SYSTEM "Linux-4.4.0-119-generic") +set(CMAKE_HOST_SYSTEM_NAME "Linux") +set(CMAKE_HOST_SYSTEM_VERSION "4.4.0-119-generic") +set(CMAKE_HOST_SYSTEM_PROCESSOR "x86_64") + + + +set(CMAKE_SYSTEM "Linux-4.4.0-119-generic") +set(CMAKE_SYSTEM_NAME "Linux") +set(CMAKE_SYSTEM_VERSION "4.4.0-119-generic") +set(CMAKE_SYSTEM_PROCESSOR "x86_64") + +set(CMAKE_CROSSCOMPILING "FALSE") + +set(CMAKE_SYSTEM_LOADED 1) diff --git a/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/build-release/CMakeFiles/3.5.1/CompilerIdC/CMakeCCompilerId.c b/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/build-release/CMakeFiles/3.5.1/CompilerIdC/CMakeCCompilerId.c new file mode 100644 index 0000000..570a15e --- /dev/null +++ b/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/build-release/CMakeFiles/3.5.1/CompilerIdC/CMakeCCompilerId.c @@ -0,0 +1,544 @@ +#ifdef __cplusplus +# error "A C++ compiler has been selected for C." +#endif + +#if defined(__18CXX) +# define ID_VOID_MAIN +#endif + + +/* Version number components: V=Version, R=Revision, P=Patch + Version date components: YYYY=Year, MM=Month, DD=Day */ + +#if defined(__INTEL_COMPILER) || defined(__ICC) +# define COMPILER_ID "Intel" +# if defined(_MSC_VER) +# define SIMULATE_ID "MSVC" +# endif + /* __INTEL_COMPILER = VRP */ +# define COMPILER_VERSION_MAJOR DEC(__INTEL_COMPILER/100) +# define COMPILER_VERSION_MINOR DEC(__INTEL_COMPILER/10 % 10) +# if defined(__INTEL_COMPILER_UPDATE) +# define COMPILER_VERSION_PATCH DEC(__INTEL_COMPILER_UPDATE) +# else +# define COMPILER_VERSION_PATCH DEC(__INTEL_COMPILER % 10) +# endif +# if defined(__INTEL_COMPILER_BUILD_DATE) + /* __INTEL_COMPILER_BUILD_DATE = YYYYMMDD */ +# define COMPILER_VERSION_TWEAK DEC(__INTEL_COMPILER_BUILD_DATE) +# endif +# if defined(_MSC_VER) + /* _MSC_VER = VVRR */ +# define SIMULATE_VERSION_MAJOR DEC(_MSC_VER / 100) +# define SIMULATE_VERSION_MINOR DEC(_MSC_VER % 100) +# endif + +#elif defined(__PATHCC__) +# define COMPILER_ID "PathScale" +# define COMPILER_VERSION_MAJOR DEC(__PATHCC__) +# define COMPILER_VERSION_MINOR DEC(__PATHCC_MINOR__) +# if defined(__PATHCC_PATCHLEVEL__) +# define COMPILER_VERSION_PATCH DEC(__PATHCC_PATCHLEVEL__) +# endif + +#elif defined(__BORLANDC__) && defined(__CODEGEARC_VERSION__) +# define COMPILER_ID "Embarcadero" +# define COMPILER_VERSION_MAJOR HEX(__CODEGEARC_VERSION__>>24 & 0x00FF) +# define COMPILER_VERSION_MINOR HEX(__CODEGEARC_VERSION__>>16 & 0x00FF) +# define COMPILER_VERSION_PATCH DEC(__CODEGEARC_VERSION__ & 0xFFFF) + +#elif defined(__BORLANDC__) +# define COMPILER_ID "Borland" + /* __BORLANDC__ = 0xVRR */ +# define COMPILER_VERSION_MAJOR HEX(__BORLANDC__>>8) +# define COMPILER_VERSION_MINOR HEX(__BORLANDC__ & 0xFF) + +#elif defined(__WATCOMC__) && __WATCOMC__ < 1200 +# define COMPILER_ID "Watcom" + /* __WATCOMC__ = VVRR */ +# define COMPILER_VERSION_MAJOR DEC(__WATCOMC__ / 100) +# define COMPILER_VERSION_MINOR DEC((__WATCOMC__ / 10) % 10) +# if (__WATCOMC__ % 10) > 0 +# define COMPILER_VERSION_PATCH DEC(__WATCOMC__ % 10) +# endif + +#elif defined(__WATCOMC__) +# define COMPILER_ID "OpenWatcom" + /* __WATCOMC__ = VVRP + 1100 */ +# define COMPILER_VERSION_MAJOR DEC((__WATCOMC__ - 1100) / 100) +# define COMPILER_VERSION_MINOR DEC((__WATCOMC__ / 10) % 10) +# if (__WATCOMC__ % 10) > 0 +# define COMPILER_VERSION_PATCH DEC(__WATCOMC__ % 10) +# endif + +#elif defined(__SUNPRO_C) +# define COMPILER_ID "SunPro" +# if __SUNPRO_C >= 0x5100 + /* __SUNPRO_C = 0xVRRP */ +# define COMPILER_VERSION_MAJOR HEX(__SUNPRO_C>>12) +# define COMPILER_VERSION_MINOR HEX(__SUNPRO_C>>4 & 0xFF) +# define COMPILER_VERSION_PATCH HEX(__SUNPRO_C & 0xF) +# else + /* __SUNPRO_CC = 0xVRP */ +# define COMPILER_VERSION_MAJOR HEX(__SUNPRO_C>>8) +# define COMPILER_VERSION_MINOR HEX(__SUNPRO_C>>4 & 0xF) +# define COMPILER_VERSION_PATCH HEX(__SUNPRO_C & 0xF) +# endif + +#elif defined(__HP_cc) +# define COMPILER_ID "HP" + /* __HP_cc = VVRRPP */ +# define COMPILER_VERSION_MAJOR DEC(__HP_cc/10000) +# define COMPILER_VERSION_MINOR DEC(__HP_cc/100 % 100) +# define COMPILER_VERSION_PATCH DEC(__HP_cc % 100) + +#elif defined(__DECC) +# define COMPILER_ID "Compaq" + /* __DECC_VER = VVRRTPPPP */ +# define COMPILER_VERSION_MAJOR DEC(__DECC_VER/10000000) +# define COMPILER_VERSION_MINOR DEC(__DECC_VER/100000 % 100) +# define COMPILER_VERSION_PATCH DEC(__DECC_VER % 10000) + +#elif defined(__IBMC__) && defined(__COMPILER_VER__) +# define COMPILER_ID "zOS" + /* __IBMC__ = VRP */ +# define COMPILER_VERSION_MAJOR DEC(__IBMC__/100) +# define COMPILER_VERSION_MINOR DEC(__IBMC__/10 % 10) +# define COMPILER_VERSION_PATCH DEC(__IBMC__ % 10) + +#elif defined(__IBMC__) && !defined(__COMPILER_VER__) && __IBMC__ >= 800 +# define COMPILER_ID "XL" + /* __IBMC__ = VRP */ +# define COMPILER_VERSION_MAJOR DEC(__IBMC__/100) +# define COMPILER_VERSION_MINOR DEC(__IBMC__/10 % 10) +# define COMPILER_VERSION_PATCH DEC(__IBMC__ % 10) + +#elif defined(__IBMC__) && !defined(__COMPILER_VER__) && __IBMC__ < 800 +# define COMPILER_ID "VisualAge" + /* __IBMC__ = VRP */ +# define COMPILER_VERSION_MAJOR DEC(__IBMC__/100) +# define COMPILER_VERSION_MINOR DEC(__IBMC__/10 % 10) +# define COMPILER_VERSION_PATCH DEC(__IBMC__ % 10) + +#elif defined(__PGI) +# define COMPILER_ID "PGI" +# define COMPILER_VERSION_MAJOR DEC(__PGIC__) +# define COMPILER_VERSION_MINOR DEC(__PGIC_MINOR__) +# if defined(__PGIC_PATCHLEVEL__) +# define COMPILER_VERSION_PATCH DEC(__PGIC_PATCHLEVEL__) +# endif + +#elif defined(_CRAYC) +# define COMPILER_ID "Cray" +# define COMPILER_VERSION_MAJOR DEC(_RELEASE_MAJOR) +# define COMPILER_VERSION_MINOR DEC(_RELEASE_MINOR) + +#elif defined(__TI_COMPILER_VERSION__) +# define COMPILER_ID "TI" + /* __TI_COMPILER_VERSION__ = VVVRRRPPP */ +# define COMPILER_VERSION_MAJOR DEC(__TI_COMPILER_VERSION__/1000000) +# define COMPILER_VERSION_MINOR DEC(__TI_COMPILER_VERSION__/1000 % 1000) +# define COMPILER_VERSION_PATCH DEC(__TI_COMPILER_VERSION__ % 1000) + +#elif defined(__FUJITSU) || defined(__FCC_VERSION) || defined(__fcc_version) +# define COMPILER_ID "Fujitsu" + +#elif defined(__TINYC__) +# define COMPILER_ID "TinyCC" + +#elif defined(__SCO_VERSION__) +# define COMPILER_ID "SCO" + +#elif defined(__clang__) && defined(__apple_build_version__) +# define COMPILER_ID "AppleClang" +# if defined(_MSC_VER) +# define SIMULATE_ID "MSVC" +# endif +# define COMPILER_VERSION_MAJOR DEC(__clang_major__) +# define COMPILER_VERSION_MINOR DEC(__clang_minor__) +# define COMPILER_VERSION_PATCH DEC(__clang_patchlevel__) +# if defined(_MSC_VER) + /* _MSC_VER = VVRR */ +# define SIMULATE_VERSION_MAJOR DEC(_MSC_VER / 100) +# define SIMULATE_VERSION_MINOR DEC(_MSC_VER % 100) +# endif +# define COMPILER_VERSION_TWEAK DEC(__apple_build_version__) + +#elif defined(__clang__) +# define COMPILER_ID "Clang" +# if defined(_MSC_VER) +# define SIMULATE_ID "MSVC" +# endif +# define COMPILER_VERSION_MAJOR DEC(__clang_major__) +# define COMPILER_VERSION_MINOR DEC(__clang_minor__) +# define COMPILER_VERSION_PATCH DEC(__clang_patchlevel__) +# if defined(_MSC_VER) + /* _MSC_VER = VVRR */ +# define SIMULATE_VERSION_MAJOR DEC(_MSC_VER / 100) +# define SIMULATE_VERSION_MINOR DEC(_MSC_VER % 100) +# endif + +#elif defined(__GNUC__) +# define COMPILER_ID "GNU" +# define COMPILER_VERSION_MAJOR DEC(__GNUC__) +# if defined(__GNUC_MINOR__) +# define COMPILER_VERSION_MINOR DEC(__GNUC_MINOR__) +# endif +# if defined(__GNUC_PATCHLEVEL__) +# define COMPILER_VERSION_PATCH DEC(__GNUC_PATCHLEVEL__) +# endif + +#elif defined(_MSC_VER) +# define COMPILER_ID "MSVC" + /* _MSC_VER = VVRR */ +# define COMPILER_VERSION_MAJOR DEC(_MSC_VER / 100) +# define COMPILER_VERSION_MINOR DEC(_MSC_VER % 100) +# if defined(_MSC_FULL_VER) +# if _MSC_VER >= 1400 + /* _MSC_FULL_VER = VVRRPPPPP */ +# define COMPILER_VERSION_PATCH DEC(_MSC_FULL_VER % 100000) +# else + /* _MSC_FULL_VER = VVRRPPPP */ +# define COMPILER_VERSION_PATCH DEC(_MSC_FULL_VER % 10000) +# endif +# endif +# if defined(_MSC_BUILD) +# define COMPILER_VERSION_TWEAK DEC(_MSC_BUILD) +# endif + +#elif defined(__VISUALDSPVERSION__) || defined(__ADSPBLACKFIN__) || defined(__ADSPTS__) || defined(__ADSP21000__) +# define COMPILER_ID "ADSP" +#if defined(__VISUALDSPVERSION__) + /* __VISUALDSPVERSION__ = 0xVVRRPP00 */ +# define COMPILER_VERSION_MAJOR HEX(__VISUALDSPVERSION__>>24) +# define COMPILER_VERSION_MINOR HEX(__VISUALDSPVERSION__>>16 & 0xFF) +# define COMPILER_VERSION_PATCH HEX(__VISUALDSPVERSION__>>8 & 0xFF) +#endif + +#elif defined(__IAR_SYSTEMS_ICC__ ) || defined(__IAR_SYSTEMS_ICC) +# define COMPILER_ID "IAR" + +#elif defined(__ARMCC_VERSION) +# define COMPILER_ID "ARMCC" +#if __ARMCC_VERSION >= 1000000 + /* __ARMCC_VERSION = VRRPPPP */ + # define COMPILER_VERSION_MAJOR DEC(__ARMCC_VERSION/1000000) + # define COMPILER_VERSION_MINOR DEC(__ARMCC_VERSION/10000 % 100) + # define COMPILER_VERSION_PATCH DEC(__ARMCC_VERSION % 10000) +#else + /* __ARMCC_VERSION = VRPPPP */ + # define COMPILER_VERSION_MAJOR DEC(__ARMCC_VERSION/100000) + # define COMPILER_VERSION_MINOR DEC(__ARMCC_VERSION/10000 % 10) + # define COMPILER_VERSION_PATCH DEC(__ARMCC_VERSION % 10000) +#endif + + +#elif defined(SDCC) +# define COMPILER_ID "SDCC" + /* SDCC = VRP */ +# define COMPILER_VERSION_MAJOR DEC(SDCC/100) +# define COMPILER_VERSION_MINOR DEC(SDCC/10 % 10) +# define COMPILER_VERSION_PATCH DEC(SDCC % 10) + +#elif defined(_SGI_COMPILER_VERSION) || defined(_COMPILER_VERSION) +# define COMPILER_ID "MIPSpro" +# if defined(_SGI_COMPILER_VERSION) + /* _SGI_COMPILER_VERSION = VRP */ +# define COMPILER_VERSION_MAJOR DEC(_SGI_COMPILER_VERSION/100) +# define COMPILER_VERSION_MINOR DEC(_SGI_COMPILER_VERSION/10 % 10) +# define COMPILER_VERSION_PATCH DEC(_SGI_COMPILER_VERSION % 10) +# else + /* _COMPILER_VERSION = VRP */ +# define COMPILER_VERSION_MAJOR DEC(_COMPILER_VERSION/100) +# define COMPILER_VERSION_MINOR DEC(_COMPILER_VERSION/10 % 10) +# define COMPILER_VERSION_PATCH DEC(_COMPILER_VERSION % 10) +# endif + + +/* These compilers are either not known or too old to define an + identification macro. Try to identify the platform and guess that + it is the native compiler. */ +#elif defined(__sgi) +# define COMPILER_ID "MIPSpro" + +#elif defined(__hpux) || defined(__hpua) +# define COMPILER_ID "HP" + +#else /* unknown compiler */ +# define COMPILER_ID "" +#endif + +/* Construct the string literal in pieces to prevent the source from + getting matched. Store it in a pointer rather than an array + because some compilers will just produce instructions to fill the + array rather than assigning a pointer to a static array. */ +char const* info_compiler = "INFO" ":" "compiler[" COMPILER_ID "]"; +#ifdef SIMULATE_ID +char const* info_simulate = "INFO" ":" "simulate[" SIMULATE_ID "]"; +#endif + +#ifdef __QNXNTO__ +char const* qnxnto = "INFO" ":" "qnxnto[]"; +#endif + +#if defined(__CRAYXE) || defined(__CRAYXC) +char const *info_cray = "INFO" ":" "compiler_wrapper[CrayPrgEnv]"; +#endif + +#define STRINGIFY_HELPER(X) #X +#define STRINGIFY(X) STRINGIFY_HELPER(X) + +/* Identify known platforms by name. */ +#if defined(__linux) || defined(__linux__) || defined(linux) +# define PLATFORM_ID "Linux" + +#elif defined(__CYGWIN__) +# define PLATFORM_ID "Cygwin" + +#elif defined(__MINGW32__) +# define PLATFORM_ID "MinGW" + +#elif defined(__APPLE__) +# define PLATFORM_ID "Darwin" + +#elif defined(_WIN32) || defined(__WIN32__) || defined(WIN32) +# define PLATFORM_ID "Windows" + +#elif defined(__FreeBSD__) || defined(__FreeBSD) +# define PLATFORM_ID "FreeBSD" + +#elif defined(__NetBSD__) || defined(__NetBSD) +# define PLATFORM_ID "NetBSD" + +#elif defined(__OpenBSD__) || defined(__OPENBSD) +# define PLATFORM_ID "OpenBSD" + +#elif defined(__sun) || defined(sun) +# define PLATFORM_ID "SunOS" + +#elif defined(_AIX) || defined(__AIX) || defined(__AIX__) || defined(__aix) || defined(__aix__) +# define PLATFORM_ID "AIX" + +#elif defined(__sgi) || defined(__sgi__) || defined(_SGI) +# define PLATFORM_ID "IRIX" + +#elif defined(__hpux) || defined(__hpux__) +# define PLATFORM_ID "HP-UX" + +#elif defined(__HAIKU__) +# define PLATFORM_ID "Haiku" + +#elif defined(__BeOS) || defined(__BEOS__) || defined(_BEOS) +# define PLATFORM_ID "BeOS" + +#elif defined(__QNX__) || defined(__QNXNTO__) +# define PLATFORM_ID "QNX" + +#elif defined(__tru64) || defined(_tru64) || defined(__TRU64__) +# define PLATFORM_ID "Tru64" + +#elif defined(__riscos) || defined(__riscos__) +# define PLATFORM_ID "RISCos" + +#elif defined(__sinix) || defined(__sinix__) || defined(__SINIX__) +# define PLATFORM_ID "SINIX" + +#elif defined(__UNIX_SV__) +# define PLATFORM_ID "UNIX_SV" + +#elif defined(__bsdos__) +# define PLATFORM_ID "BSDOS" + +#elif defined(_MPRAS) || defined(MPRAS) +# define PLATFORM_ID "MP-RAS" + +#elif defined(__osf) || defined(__osf__) +# define PLATFORM_ID "OSF1" + +#elif defined(_SCO_SV) || defined(SCO_SV) || defined(sco_sv) +# define PLATFORM_ID "SCO_SV" + +#elif defined(__ultrix) || defined(__ultrix__) || defined(_ULTRIX) +# define PLATFORM_ID "ULTRIX" + +#elif defined(__XENIX__) || defined(_XENIX) || defined(XENIX) +# define PLATFORM_ID "Xenix" + +#elif defined(__WATCOMC__) +# if defined(__LINUX__) +# define PLATFORM_ID "Linux" + +# elif defined(__DOS__) +# define PLATFORM_ID "DOS" + +# elif defined(__OS2__) +# define PLATFORM_ID "OS2" + +# elif defined(__WINDOWS__) +# define PLATFORM_ID "Windows3x" + +# else /* unknown platform */ +# define PLATFORM_ID "" +# endif + +#else /* unknown platform */ +# define PLATFORM_ID "" + +#endif + +/* For windows compilers MSVC and Intel we can determine + the architecture of the compiler being used. This is because + the compilers do not have flags that can change the architecture, + but rather depend on which compiler is being used +*/ +#if defined(_WIN32) && defined(_MSC_VER) +# if defined(_M_IA64) +# define ARCHITECTURE_ID "IA64" + +# elif defined(_M_X64) || defined(_M_AMD64) +# define ARCHITECTURE_ID "x64" + +# elif defined(_M_IX86) +# define ARCHITECTURE_ID "X86" + +# elif defined(_M_ARM) +# if _M_ARM == 4 +# define ARCHITECTURE_ID "ARMV4I" +# elif _M_ARM == 5 +# define ARCHITECTURE_ID "ARMV5I" +# else +# define ARCHITECTURE_ID "ARMV" STRINGIFY(_M_ARM) +# endif + +# elif defined(_M_MIPS) +# define ARCHITECTURE_ID "MIPS" + +# elif defined(_M_SH) +# define ARCHITECTURE_ID "SHx" + +# else /* unknown architecture */ +# define ARCHITECTURE_ID "" +# endif + +#elif defined(__WATCOMC__) +# if defined(_M_I86) +# define ARCHITECTURE_ID "I86" + +# elif defined(_M_IX86) +# define ARCHITECTURE_ID "X86" + +# else /* unknown architecture */ +# define ARCHITECTURE_ID "" +# endif + +#else +# define ARCHITECTURE_ID "" +#endif + +/* Convert integer to decimal digit literals. */ +#define DEC(n) \ + ('0' + (((n) / 10000000)%10)), \ + ('0' + (((n) / 1000000)%10)), \ + ('0' + (((n) / 100000)%10)), \ + ('0' + (((n) / 10000)%10)), \ + ('0' + (((n) / 1000)%10)), \ + ('0' + (((n) / 100)%10)), \ + ('0' + (((n) / 10)%10)), \ + ('0' + ((n) % 10)) + +/* Convert integer to hex digit literals. */ +#define HEX(n) \ + ('0' + ((n)>>28 & 0xF)), \ + ('0' + ((n)>>24 & 0xF)), \ + ('0' + ((n)>>20 & 0xF)), \ + ('0' + ((n)>>16 & 0xF)), \ + ('0' + ((n)>>12 & 0xF)), \ + ('0' + ((n)>>8 & 0xF)), \ + ('0' + ((n)>>4 & 0xF)), \ + ('0' + ((n) & 0xF)) + +/* Construct a string literal encoding the version number components. */ +#ifdef COMPILER_VERSION_MAJOR +char const info_version[] = { + 'I', 'N', 'F', 'O', ':', + 'c','o','m','p','i','l','e','r','_','v','e','r','s','i','o','n','[', + COMPILER_VERSION_MAJOR, +# ifdef COMPILER_VERSION_MINOR + '.', COMPILER_VERSION_MINOR, +# ifdef COMPILER_VERSION_PATCH + '.', COMPILER_VERSION_PATCH, +# ifdef COMPILER_VERSION_TWEAK + '.', COMPILER_VERSION_TWEAK, +# endif +# endif +# endif + ']','\0'}; +#endif + +/* Construct a string literal encoding the version number components. */ +#ifdef SIMULATE_VERSION_MAJOR +char const info_simulate_version[] = { + 'I', 'N', 'F', 'O', ':', + 's','i','m','u','l','a','t','e','_','v','e','r','s','i','o','n','[', + SIMULATE_VERSION_MAJOR, +# ifdef SIMULATE_VERSION_MINOR + '.', SIMULATE_VERSION_MINOR, +# ifdef SIMULATE_VERSION_PATCH + '.', SIMULATE_VERSION_PATCH, +# ifdef SIMULATE_VERSION_TWEAK + '.', SIMULATE_VERSION_TWEAK, +# endif +# endif +# endif + ']','\0'}; +#endif + +/* Construct the string literal in pieces to prevent the source from + getting matched. Store it in a pointer rather than an array + because some compilers will just produce instructions to fill the + array rather than assigning a pointer to a static array. */ +char const* info_platform = "INFO" ":" "platform[" PLATFORM_ID "]"; +char const* info_arch = "INFO" ":" "arch[" ARCHITECTURE_ID "]"; + + + + +const char* info_language_dialect_default = "INFO" ":" "dialect_default[" +#if !defined(__STDC_VERSION__) + "90" +#elif __STDC_VERSION__ >= 201000L + "11" +#elif __STDC_VERSION__ >= 199901L + "99" +#else +#endif +"]"; + +/*--------------------------------------------------------------------------*/ + +#ifdef ID_VOID_MAIN +void main() {} +#else +int main(int argc, char* argv[]) +{ + int require = 0; + require += info_compiler[argc]; + require += info_platform[argc]; + require += info_arch[argc]; +#ifdef COMPILER_VERSION_MAJOR + require += info_version[argc]; +#endif +#ifdef SIMULATE_ID + require += info_simulate[argc]; +#endif +#ifdef SIMULATE_VERSION_MAJOR + require += info_simulate_version[argc]; +#endif +#if defined(__CRAYXE) || defined(__CRAYXC) + require += info_cray[argc]; +#endif + require += info_language_dialect_default[argc]; + (void)argv; + return require; +} +#endif diff --git a/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/build-release/CMakeFiles/3.5.1/CompilerIdC/a.out b/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/build-release/CMakeFiles/3.5.1/CompilerIdC/a.out new file mode 100755 index 0000000000000000000000000000000000000000..afc42a94bbc7371ac5a573a3b9eb6b0812ecca21 GIT binary patch literal 8800 zcmeHMZ){W76~DIgkAIFs%Nn2?j}D}?TOLjpQqyYXIZnbea)1SgZp+NeOY9_G9UIxt zETC>@7#0l`JHp_pLgFo_kFu3((87)1Q)k>N+7r3(WJ<}3vqsvWJwkftAr-D zh=)ZTNNVBIWD8Qe#`G?kX4*h{9-uXF1>CkE;MQb?>BF`dJlKUJM2h}OMWd=se_3y{ zOodRS0C5_y!gaL8*&M7O+` zDC%N4aLtNi{Y7*tsa8QdBqsz@#(fEJ1R;(8OnlVbPyUu0m)q1oro@UJspN2H$BtCI zHI+=~CtD{2ovoc6YA&O;%YLJM)ZMdhKzibFRDi>nAA}3n0JcB=>b3K4fA!Sc=QqE! z_12-E95nuL>M6wX1=UYn)_~^vD%zh0nNmeApdTX`cBe25tQ$GY%v#2TnM{LZY$B5; zi6Qnx!d)SwU2Ru)f+c%mT@WS4$>W*dlw5^vpC?~;8;4{K=Z2PoRX5<_l;I!)c0L#D?MmC_g7%#O5%HQI+}Ev3)@Q&OfAqOZPosKrh_ZFJ0fQ zyDsZ*-nTX^T3f@`8n=kpobCU_7k|A*5hC9-0Kn^M2rPEF_QxP{eJDL$RJ<72ZjrBj z@4J9oH1+`kiu18|j{^qlg>T-}3;BgBJ@;hvr#A$_%ITh)`Z?V--}stne0|o{b5B3L zp=}xY)@pKTpz*a6N}X1+P+VQb^68#?vAOBs)4@Z*LNPcHj7H9Ef4M<{!8y~4^kBGf zCtP?d^5``==a)YATrYlHEb7nSvXr(z@_0rHHzI{Q-C&{k;GguVIhVfcz5ESa5QDMc z*MhO&;h-@$T^j!nVVy{Ov}9Zrt4>Z?#Vx$AcmES3nTaFGR3bYFPh+g2SyZ$eM-$mx zGLs(k+hwOpl@6}>V>aSQ%Ctr^*@?jj?hYh0vm@h!RF5akRAR(3;)zi+pRxwq+G4`B z+4H0y+~fYf4Tj=bpi!U^pnLBZi$4T<73dp4yJ42s!h?hR2o~4ze&L$*xi+t@Z5dcDni8`pfJ;e^<|>B+~wuA#lkU*EI`?zn*=~!2xB=o!V0)L>_maEI+kDp3<5kYoba`7|tku2#lXbc`Fje2@ z)w13|(CZI+Te`do=z)Hhw?X#n=K%MEo$`GHO|F!c5m*_4l@VAOft3;XUyr~Hd=Fxo zCMp@Y@i!{dwbU@}(PV8M@mJxq63a$;$9!u@);1H*`4Oj+&D1X6)u6-e|GHnyAmp4| zLsH85$RB=BW4Ykgkcidt4*4We%mc7A+eB<2$^>%{S>SUO3%_GopT86E`y9(dHW8EL zpXEcubKH!FBtRDC;WaM%qme#*zDV9hdbf!WP&}Rey)t=6}>~P}==%o&HY$E~jrN?}619aBEI6 z$TPbgX^9Ze6B@LpuasP7*ukbncQM8Qm0}i=La_7F=E$x@$#5k}Vgv(WBJdcAM z?aw&upOpGb=jBHruhy^S`g=jzSubYlHK>)!WvEsg2h^x$|92Alz>@XwS7~SIyx$~! zexFDdZuW#MU7zcr1FkCTp#|jYVSMlu1Pkx`0gx+FQh9q;%9pO6D9C;AmUFJ3c95?Z zOP{YnDX+AR+;OtwT%V^vt}Im--v@b>le9c3%XaU7iXTbI%Qt@OKeU)_UtAX0_)v3Bx<=eA8SznUW+F_N`%R(^C; z9f9M)y`g?15_gfWuJq!T&Nh-ZwkRA$&r8F4F<%^7BXQs8k* zDq$t!>f<|ibyQa}Mw98JVP>=DF(Z+-vd6?|)|^Nf@%+TZF@ThD254Fps-ZsfnM5c= zr#j)djJ3Bv*wV}Eb&!JeoQ4R%F((1Sco9m(ZoPlOSU_Q7Efk5RNQgd-F+hf@h*B;uxJGNg3= zs%hmXt}vFMWgo_ZZpFzgwB<6!xS5Wlx$u6_iYL=XK9`7#WO_7X@OczF!MLzf^jO{;gTb#nv=YYNqeef?dMIV)a&pR{Z#>M{>8f3pcIhW_+(tD$c>x1aiPYK zrNN#}t*qU`PNf<#2DD`pDH8!yJCd@5Do03#sydbd)=Esm9Tz4jW#pWxiE&zr<8d&; zoDH?tq>bSE*GE7N+?W%|5eO(_p?Q$t0#IS_g$k1~0W03|y#Kc>zHi~tFYWVg4Y<;O zC(b*h5N@bjUOo#9e(S?_AWPJ(5tU@g-o*Q^-3sULP$|drcd11Nx3MBoC%zlxcOW-l z|L5GA)I3K5j=xq)OZgMAVjom67h?Pcw+0;MO_B)RCh{K__MP7k@FgY7A-H3%#CYM+ zq+AN27O=U+yL0>wLks3QjOXv>03~2`gyi^HkLfeej=2-_8u`}pV{1D-}pX}e+|K|wbMFuAcKUoEjww(C0Aj7!0pVNe&COqfCWM0O~b)@)yEXU6f ze#X&%#_)eArr&em=LtVgc#fCxPW}fD`~u+@2>(wEuB@X{wiGPpYfc>@c-2ghrZMQMF)PiPLqgP>d;3HymS11 z1MT?hn1*XHFXI2LPJJi;7GN98@SHz2qfsbMeJB5C(82z*f6h~sPl*3JV6m%==X{L+ zhtH#NC_KOHpVz@TXe;;6dD~s=6cpCCxf0Lv_o1O2znI^V1BHP^8OQXl1JB>t3gP|B z;8}bhDrk%Q&)=H|2+w|4pN4~3;Q;~4oPHROf0D}!@e>1|8HvZeBuBA literal 0 HcmV?d00001 diff --git a/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/build-release/CMakeFiles/3.5.1/CompilerIdCXX/CMakeCXXCompilerId.cpp b/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/build-release/CMakeFiles/3.5.1/CompilerIdCXX/CMakeCXXCompilerId.cpp new file mode 100644 index 0000000..e6d8536 --- /dev/null +++ b/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/build-release/CMakeFiles/3.5.1/CompilerIdCXX/CMakeCXXCompilerId.cpp @@ -0,0 +1,533 @@ +/* This source file must have a .cpp extension so that all C++ compilers + recognize the extension without flags. Borland does not know .cxx for + example. */ +#ifndef __cplusplus +# error "A C compiler has been selected for C++." +#endif + + +/* Version number components: V=Version, R=Revision, P=Patch + Version date components: YYYY=Year, MM=Month, DD=Day */ + +#if defined(__COMO__) +# define COMPILER_ID "Comeau" + /* __COMO_VERSION__ = VRR */ +# define COMPILER_VERSION_MAJOR DEC(__COMO_VERSION__ / 100) +# define COMPILER_VERSION_MINOR DEC(__COMO_VERSION__ % 100) + +#elif defined(__INTEL_COMPILER) || defined(__ICC) +# define COMPILER_ID "Intel" +# if defined(_MSC_VER) +# define SIMULATE_ID "MSVC" +# endif + /* __INTEL_COMPILER = VRP */ +# define COMPILER_VERSION_MAJOR DEC(__INTEL_COMPILER/100) +# define COMPILER_VERSION_MINOR DEC(__INTEL_COMPILER/10 % 10) +# if defined(__INTEL_COMPILER_UPDATE) +# define COMPILER_VERSION_PATCH DEC(__INTEL_COMPILER_UPDATE) +# else +# define COMPILER_VERSION_PATCH DEC(__INTEL_COMPILER % 10) +# endif +# if defined(__INTEL_COMPILER_BUILD_DATE) + /* __INTEL_COMPILER_BUILD_DATE = YYYYMMDD */ +# define COMPILER_VERSION_TWEAK DEC(__INTEL_COMPILER_BUILD_DATE) +# endif +# if defined(_MSC_VER) + /* _MSC_VER = VVRR */ +# define SIMULATE_VERSION_MAJOR DEC(_MSC_VER / 100) +# define SIMULATE_VERSION_MINOR DEC(_MSC_VER % 100) +# endif + +#elif defined(__PATHCC__) +# define COMPILER_ID "PathScale" +# define COMPILER_VERSION_MAJOR DEC(__PATHCC__) +# define COMPILER_VERSION_MINOR DEC(__PATHCC_MINOR__) +# if defined(__PATHCC_PATCHLEVEL__) +# define COMPILER_VERSION_PATCH DEC(__PATHCC_PATCHLEVEL__) +# endif + +#elif defined(__BORLANDC__) && defined(__CODEGEARC_VERSION__) +# define COMPILER_ID "Embarcadero" +# define COMPILER_VERSION_MAJOR HEX(__CODEGEARC_VERSION__>>24 & 0x00FF) +# define COMPILER_VERSION_MINOR HEX(__CODEGEARC_VERSION__>>16 & 0x00FF) +# define COMPILER_VERSION_PATCH DEC(__CODEGEARC_VERSION__ & 0xFFFF) + +#elif defined(__BORLANDC__) +# define COMPILER_ID "Borland" + /* __BORLANDC__ = 0xVRR */ +# define COMPILER_VERSION_MAJOR HEX(__BORLANDC__>>8) +# define COMPILER_VERSION_MINOR HEX(__BORLANDC__ & 0xFF) + +#elif defined(__WATCOMC__) && __WATCOMC__ < 1200 +# define COMPILER_ID "Watcom" + /* __WATCOMC__ = VVRR */ +# define COMPILER_VERSION_MAJOR DEC(__WATCOMC__ / 100) +# define COMPILER_VERSION_MINOR DEC((__WATCOMC__ / 10) % 10) +# if (__WATCOMC__ % 10) > 0 +# define COMPILER_VERSION_PATCH DEC(__WATCOMC__ % 10) +# endif + +#elif defined(__WATCOMC__) +# define COMPILER_ID "OpenWatcom" + /* __WATCOMC__ = VVRP + 1100 */ +# define COMPILER_VERSION_MAJOR DEC((__WATCOMC__ - 1100) / 100) +# define COMPILER_VERSION_MINOR DEC((__WATCOMC__ / 10) % 10) +# if (__WATCOMC__ % 10) > 0 +# define COMPILER_VERSION_PATCH DEC(__WATCOMC__ % 10) +# endif + +#elif defined(__SUNPRO_CC) +# define COMPILER_ID "SunPro" +# if __SUNPRO_CC >= 0x5100 + /* __SUNPRO_CC = 0xVRRP */ +# define COMPILER_VERSION_MAJOR HEX(__SUNPRO_CC>>12) +# define COMPILER_VERSION_MINOR HEX(__SUNPRO_CC>>4 & 0xFF) +# define COMPILER_VERSION_PATCH HEX(__SUNPRO_CC & 0xF) +# else + /* __SUNPRO_CC = 0xVRP */ +# define COMPILER_VERSION_MAJOR HEX(__SUNPRO_CC>>8) +# define COMPILER_VERSION_MINOR HEX(__SUNPRO_CC>>4 & 0xF) +# define COMPILER_VERSION_PATCH HEX(__SUNPRO_CC & 0xF) +# endif + +#elif defined(__HP_aCC) +# define COMPILER_ID "HP" + /* __HP_aCC = VVRRPP */ +# define COMPILER_VERSION_MAJOR DEC(__HP_aCC/10000) +# define COMPILER_VERSION_MINOR DEC(__HP_aCC/100 % 100) +# define COMPILER_VERSION_PATCH DEC(__HP_aCC % 100) + +#elif defined(__DECCXX) +# define COMPILER_ID "Compaq" + /* __DECCXX_VER = VVRRTPPPP */ +# define COMPILER_VERSION_MAJOR DEC(__DECCXX_VER/10000000) +# define COMPILER_VERSION_MINOR DEC(__DECCXX_VER/100000 % 100) +# define COMPILER_VERSION_PATCH DEC(__DECCXX_VER % 10000) + +#elif defined(__IBMCPP__) && defined(__COMPILER_VER__) +# define COMPILER_ID "zOS" + /* __IBMCPP__ = VRP */ +# define COMPILER_VERSION_MAJOR DEC(__IBMCPP__/100) +# define COMPILER_VERSION_MINOR DEC(__IBMCPP__/10 % 10) +# define COMPILER_VERSION_PATCH DEC(__IBMCPP__ % 10) + +#elif defined(__IBMCPP__) && !defined(__COMPILER_VER__) && __IBMCPP__ >= 800 +# define COMPILER_ID "XL" + /* __IBMCPP__ = VRP */ +# define COMPILER_VERSION_MAJOR DEC(__IBMCPP__/100) +# define COMPILER_VERSION_MINOR DEC(__IBMCPP__/10 % 10) +# define COMPILER_VERSION_PATCH DEC(__IBMCPP__ % 10) + +#elif defined(__IBMCPP__) && !defined(__COMPILER_VER__) && __IBMCPP__ < 800 +# define COMPILER_ID "VisualAge" + /* __IBMCPP__ = VRP */ +# define COMPILER_VERSION_MAJOR DEC(__IBMCPP__/100) +# define COMPILER_VERSION_MINOR DEC(__IBMCPP__/10 % 10) +# define COMPILER_VERSION_PATCH DEC(__IBMCPP__ % 10) + +#elif defined(__PGI) +# define COMPILER_ID "PGI" +# define COMPILER_VERSION_MAJOR DEC(__PGIC__) +# define COMPILER_VERSION_MINOR DEC(__PGIC_MINOR__) +# if defined(__PGIC_PATCHLEVEL__) +# define COMPILER_VERSION_PATCH DEC(__PGIC_PATCHLEVEL__) +# endif + +#elif defined(_CRAYC) +# define COMPILER_ID "Cray" +# define COMPILER_VERSION_MAJOR DEC(_RELEASE_MAJOR) +# define COMPILER_VERSION_MINOR DEC(_RELEASE_MINOR) + +#elif defined(__TI_COMPILER_VERSION__) +# define COMPILER_ID "TI" + /* __TI_COMPILER_VERSION__ = VVVRRRPPP */ +# define COMPILER_VERSION_MAJOR DEC(__TI_COMPILER_VERSION__/1000000) +# define COMPILER_VERSION_MINOR DEC(__TI_COMPILER_VERSION__/1000 % 1000) +# define COMPILER_VERSION_PATCH DEC(__TI_COMPILER_VERSION__ % 1000) + +#elif defined(__FUJITSU) || defined(__FCC_VERSION) || defined(__fcc_version) +# define COMPILER_ID "Fujitsu" + +#elif defined(__SCO_VERSION__) +# define COMPILER_ID "SCO" + +#elif defined(__clang__) && defined(__apple_build_version__) +# define COMPILER_ID "AppleClang" +# if defined(_MSC_VER) +# define SIMULATE_ID "MSVC" +# endif +# define COMPILER_VERSION_MAJOR DEC(__clang_major__) +# define COMPILER_VERSION_MINOR DEC(__clang_minor__) +# define COMPILER_VERSION_PATCH DEC(__clang_patchlevel__) +# if defined(_MSC_VER) + /* _MSC_VER = VVRR */ +# define SIMULATE_VERSION_MAJOR DEC(_MSC_VER / 100) +# define SIMULATE_VERSION_MINOR DEC(_MSC_VER % 100) +# endif +# define COMPILER_VERSION_TWEAK DEC(__apple_build_version__) + +#elif defined(__clang__) +# define COMPILER_ID "Clang" +# if defined(_MSC_VER) +# define SIMULATE_ID "MSVC" +# endif +# define COMPILER_VERSION_MAJOR DEC(__clang_major__) +# define COMPILER_VERSION_MINOR DEC(__clang_minor__) +# define COMPILER_VERSION_PATCH DEC(__clang_patchlevel__) +# if defined(_MSC_VER) + /* _MSC_VER = VVRR */ +# define SIMULATE_VERSION_MAJOR DEC(_MSC_VER / 100) +# define SIMULATE_VERSION_MINOR DEC(_MSC_VER % 100) +# endif + +#elif defined(__GNUC__) +# define COMPILER_ID "GNU" +# define COMPILER_VERSION_MAJOR DEC(__GNUC__) +# if defined(__GNUC_MINOR__) +# define COMPILER_VERSION_MINOR DEC(__GNUC_MINOR__) +# endif +# if defined(__GNUC_PATCHLEVEL__) +# define COMPILER_VERSION_PATCH DEC(__GNUC_PATCHLEVEL__) +# endif + +#elif defined(_MSC_VER) +# define COMPILER_ID "MSVC" + /* _MSC_VER = VVRR */ +# define COMPILER_VERSION_MAJOR DEC(_MSC_VER / 100) +# define COMPILER_VERSION_MINOR DEC(_MSC_VER % 100) +# if defined(_MSC_FULL_VER) +# if _MSC_VER >= 1400 + /* _MSC_FULL_VER = VVRRPPPPP */ +# define COMPILER_VERSION_PATCH DEC(_MSC_FULL_VER % 100000) +# else + /* _MSC_FULL_VER = VVRRPPPP */ +# define COMPILER_VERSION_PATCH DEC(_MSC_FULL_VER % 10000) +# endif +# endif +# if defined(_MSC_BUILD) +# define COMPILER_VERSION_TWEAK DEC(_MSC_BUILD) +# endif + +#elif defined(__VISUALDSPVERSION__) || defined(__ADSPBLACKFIN__) || defined(__ADSPTS__) || defined(__ADSP21000__) +# define COMPILER_ID "ADSP" +#if defined(__VISUALDSPVERSION__) + /* __VISUALDSPVERSION__ = 0xVVRRPP00 */ +# define COMPILER_VERSION_MAJOR HEX(__VISUALDSPVERSION__>>24) +# define COMPILER_VERSION_MINOR HEX(__VISUALDSPVERSION__>>16 & 0xFF) +# define COMPILER_VERSION_PATCH HEX(__VISUALDSPVERSION__>>8 & 0xFF) +#endif + +#elif defined(__IAR_SYSTEMS_ICC__ ) || defined(__IAR_SYSTEMS_ICC) +# define COMPILER_ID "IAR" + +#elif defined(__ARMCC_VERSION) +# define COMPILER_ID "ARMCC" +#if __ARMCC_VERSION >= 1000000 + /* __ARMCC_VERSION = VRRPPPP */ + # define COMPILER_VERSION_MAJOR DEC(__ARMCC_VERSION/1000000) + # define COMPILER_VERSION_MINOR DEC(__ARMCC_VERSION/10000 % 100) + # define COMPILER_VERSION_PATCH DEC(__ARMCC_VERSION % 10000) +#else + /* __ARMCC_VERSION = VRPPPP */ + # define COMPILER_VERSION_MAJOR DEC(__ARMCC_VERSION/100000) + # define COMPILER_VERSION_MINOR DEC(__ARMCC_VERSION/10000 % 10) + # define COMPILER_VERSION_PATCH DEC(__ARMCC_VERSION % 10000) +#endif + + +#elif defined(_SGI_COMPILER_VERSION) || defined(_COMPILER_VERSION) +# define COMPILER_ID "MIPSpro" +# if defined(_SGI_COMPILER_VERSION) + /* _SGI_COMPILER_VERSION = VRP */ +# define COMPILER_VERSION_MAJOR DEC(_SGI_COMPILER_VERSION/100) +# define COMPILER_VERSION_MINOR DEC(_SGI_COMPILER_VERSION/10 % 10) +# define COMPILER_VERSION_PATCH DEC(_SGI_COMPILER_VERSION % 10) +# else + /* _COMPILER_VERSION = VRP */ +# define COMPILER_VERSION_MAJOR DEC(_COMPILER_VERSION/100) +# define COMPILER_VERSION_MINOR DEC(_COMPILER_VERSION/10 % 10) +# define COMPILER_VERSION_PATCH DEC(_COMPILER_VERSION % 10) +# endif + + +/* These compilers are either not known or too old to define an + identification macro. Try to identify the platform and guess that + it is the native compiler. */ +#elif defined(__sgi) +# define COMPILER_ID "MIPSpro" + +#elif defined(__hpux) || defined(__hpua) +# define COMPILER_ID "HP" + +#else /* unknown compiler */ +# define COMPILER_ID "" +#endif + +/* Construct the string literal in pieces to prevent the source from + getting matched. Store it in a pointer rather than an array + because some compilers will just produce instructions to fill the + array rather than assigning a pointer to a static array. */ +char const* info_compiler = "INFO" ":" "compiler[" COMPILER_ID "]"; +#ifdef SIMULATE_ID +char const* info_simulate = "INFO" ":" "simulate[" SIMULATE_ID "]"; +#endif + +#ifdef __QNXNTO__ +char const* qnxnto = "INFO" ":" "qnxnto[]"; +#endif + +#if defined(__CRAYXE) || defined(__CRAYXC) +char const *info_cray = "INFO" ":" "compiler_wrapper[CrayPrgEnv]"; +#endif + +#define STRINGIFY_HELPER(X) #X +#define STRINGIFY(X) STRINGIFY_HELPER(X) + +/* Identify known platforms by name. */ +#if defined(__linux) || defined(__linux__) || defined(linux) +# define PLATFORM_ID "Linux" + +#elif defined(__CYGWIN__) +# define PLATFORM_ID "Cygwin" + +#elif defined(__MINGW32__) +# define PLATFORM_ID "MinGW" + +#elif defined(__APPLE__) +# define PLATFORM_ID "Darwin" + +#elif defined(_WIN32) || defined(__WIN32__) || defined(WIN32) +# define PLATFORM_ID "Windows" + +#elif defined(__FreeBSD__) || defined(__FreeBSD) +# define PLATFORM_ID "FreeBSD" + +#elif defined(__NetBSD__) || defined(__NetBSD) +# define PLATFORM_ID "NetBSD" + +#elif defined(__OpenBSD__) || defined(__OPENBSD) +# define PLATFORM_ID "OpenBSD" + +#elif defined(__sun) || defined(sun) +# define PLATFORM_ID "SunOS" + +#elif defined(_AIX) || defined(__AIX) || defined(__AIX__) || defined(__aix) || defined(__aix__) +# define PLATFORM_ID "AIX" + +#elif defined(__sgi) || defined(__sgi__) || defined(_SGI) +# define PLATFORM_ID "IRIX" + +#elif defined(__hpux) || defined(__hpux__) +# define PLATFORM_ID "HP-UX" + +#elif defined(__HAIKU__) +# define PLATFORM_ID "Haiku" + +#elif defined(__BeOS) || defined(__BEOS__) || defined(_BEOS) +# define PLATFORM_ID "BeOS" + +#elif defined(__QNX__) || defined(__QNXNTO__) +# define PLATFORM_ID "QNX" + +#elif defined(__tru64) || defined(_tru64) || defined(__TRU64__) +# define PLATFORM_ID "Tru64" + +#elif defined(__riscos) || defined(__riscos__) +# define PLATFORM_ID "RISCos" + +#elif defined(__sinix) || defined(__sinix__) || defined(__SINIX__) +# define PLATFORM_ID "SINIX" + +#elif defined(__UNIX_SV__) +# define PLATFORM_ID "UNIX_SV" + +#elif defined(__bsdos__) +# define PLATFORM_ID "BSDOS" + +#elif defined(_MPRAS) || defined(MPRAS) +# define PLATFORM_ID "MP-RAS" + +#elif defined(__osf) || defined(__osf__) +# define PLATFORM_ID "OSF1" + +#elif defined(_SCO_SV) || defined(SCO_SV) || defined(sco_sv) +# define PLATFORM_ID "SCO_SV" + +#elif defined(__ultrix) || defined(__ultrix__) || defined(_ULTRIX) +# define PLATFORM_ID "ULTRIX" + +#elif defined(__XENIX__) || defined(_XENIX) || defined(XENIX) +# define PLATFORM_ID "Xenix" + +#elif defined(__WATCOMC__) +# if defined(__LINUX__) +# define PLATFORM_ID "Linux" + +# elif defined(__DOS__) +# define PLATFORM_ID "DOS" + +# elif defined(__OS2__) +# define PLATFORM_ID "OS2" + +# elif defined(__WINDOWS__) +# define PLATFORM_ID "Windows3x" + +# else /* unknown platform */ +# define PLATFORM_ID "" +# endif + +#else /* unknown platform */ +# define PLATFORM_ID "" + +#endif + +/* For windows compilers MSVC and Intel we can determine + the architecture of the compiler being used. This is because + the compilers do not have flags that can change the architecture, + but rather depend on which compiler is being used +*/ +#if defined(_WIN32) && defined(_MSC_VER) +# if defined(_M_IA64) +# define ARCHITECTURE_ID "IA64" + +# elif defined(_M_X64) || defined(_M_AMD64) +# define ARCHITECTURE_ID "x64" + +# elif defined(_M_IX86) +# define ARCHITECTURE_ID "X86" + +# elif defined(_M_ARM) +# if _M_ARM == 4 +# define ARCHITECTURE_ID "ARMV4I" +# elif _M_ARM == 5 +# define ARCHITECTURE_ID "ARMV5I" +# else +# define ARCHITECTURE_ID "ARMV" STRINGIFY(_M_ARM) +# endif + +# elif defined(_M_MIPS) +# define ARCHITECTURE_ID "MIPS" + +# elif defined(_M_SH) +# define ARCHITECTURE_ID "SHx" + +# else /* unknown architecture */ +# define ARCHITECTURE_ID "" +# endif + +#elif defined(__WATCOMC__) +# if defined(_M_I86) +# define ARCHITECTURE_ID "I86" + +# elif defined(_M_IX86) +# define ARCHITECTURE_ID "X86" + +# else /* unknown architecture */ +# define ARCHITECTURE_ID "" +# endif + +#else +# define ARCHITECTURE_ID "" +#endif + +/* Convert integer to decimal digit literals. */ +#define DEC(n) \ + ('0' + (((n) / 10000000)%10)), \ + ('0' + (((n) / 1000000)%10)), \ + ('0' + (((n) / 100000)%10)), \ + ('0' + (((n) / 10000)%10)), \ + ('0' + (((n) / 1000)%10)), \ + ('0' + (((n) / 100)%10)), \ + ('0' + (((n) / 10)%10)), \ + ('0' + ((n) % 10)) + +/* Convert integer to hex digit literals. */ +#define HEX(n) \ + ('0' + ((n)>>28 & 0xF)), \ + ('0' + ((n)>>24 & 0xF)), \ + ('0' + ((n)>>20 & 0xF)), \ + ('0' + ((n)>>16 & 0xF)), \ + ('0' + ((n)>>12 & 0xF)), \ + ('0' + ((n)>>8 & 0xF)), \ + ('0' + ((n)>>4 & 0xF)), \ + ('0' + ((n) & 0xF)) + +/* Construct a string literal encoding the version number components. */ +#ifdef COMPILER_VERSION_MAJOR +char const info_version[] = { + 'I', 'N', 'F', 'O', ':', + 'c','o','m','p','i','l','e','r','_','v','e','r','s','i','o','n','[', + COMPILER_VERSION_MAJOR, +# ifdef COMPILER_VERSION_MINOR + '.', COMPILER_VERSION_MINOR, +# ifdef COMPILER_VERSION_PATCH + '.', COMPILER_VERSION_PATCH, +# ifdef COMPILER_VERSION_TWEAK + '.', COMPILER_VERSION_TWEAK, +# endif +# endif +# endif + ']','\0'}; +#endif + +/* Construct a string literal encoding the version number components. */ +#ifdef SIMULATE_VERSION_MAJOR +char const info_simulate_version[] = { + 'I', 'N', 'F', 'O', ':', + 's','i','m','u','l','a','t','e','_','v','e','r','s','i','o','n','[', + SIMULATE_VERSION_MAJOR, +# ifdef SIMULATE_VERSION_MINOR + '.', SIMULATE_VERSION_MINOR, +# ifdef SIMULATE_VERSION_PATCH + '.', SIMULATE_VERSION_PATCH, +# ifdef SIMULATE_VERSION_TWEAK + '.', SIMULATE_VERSION_TWEAK, +# endif +# endif +# endif + ']','\0'}; +#endif + +/* Construct the string literal in pieces to prevent the source from + getting matched. Store it in a pointer rather than an array + because some compilers will just produce instructions to fill the + array rather than assigning a pointer to a static array. */ +char const* info_platform = "INFO" ":" "platform[" PLATFORM_ID "]"; +char const* info_arch = "INFO" ":" "arch[" ARCHITECTURE_ID "]"; + + + + +const char* info_language_dialect_default = "INFO" ":" "dialect_default[" +#if __cplusplus >= 201402L + "14" +#elif __cplusplus >= 201103L + "11" +#else + "98" +#endif +"]"; + +/*--------------------------------------------------------------------------*/ + +int main(int argc, char* argv[]) +{ + int require = 0; + require += info_compiler[argc]; + require += info_platform[argc]; +#ifdef COMPILER_VERSION_MAJOR + require += info_version[argc]; +#endif +#ifdef SIMULATE_ID + require += info_simulate[argc]; +#endif +#ifdef SIMULATE_VERSION_MAJOR + require += info_simulate_version[argc]; +#endif +#if defined(__CRAYXE) || defined(__CRAYXC) + require += info_cray[argc]; +#endif + require += info_language_dialect_default[argc]; + (void)argv; + return require; +} diff --git a/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/build-release/CMakeFiles/3.5.1/CompilerIdCXX/a.out b/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/build-release/CMakeFiles/3.5.1/CompilerIdCXX/a.out new file mode 100755 index 0000000000000000000000000000000000000000..648b86701f00871e7148b5f6fcce25d049005826 GIT binary patch literal 8808 zcmeHMeQZN9lDd>ZEGQq^YCQRDwXT{xhY2(0^77vkluk|mi>tP`5p zF76j~AgP5*lNzK}kLfD8X4*h@E}%7V`J9%(=hS3}>HU@%B3O+_h!o?MnnqoV{-;GjmDu9WgN1IYb9JO;g~Mbz?kyIv-BP z+LDP>ZnkaK*VWe5sb>r622g)xxMn%?=i_gz%>C=e z4^s6{?fB`Y6Nu#tx}UnO2g^-WtUn7frHWj@JjQX@UBWQ1Z)DAA#x$m*i4;gCr_(8t z7-C;2*c~uB)DCqwc(NX>14fDC6^47`b`5&SPoeU?;poD31UfTNgDnSUMi!}_=XCLNr=?E(jS{c$${f+Ct)^J?x-TK%=U);F^#ptsKt{;QnXHnpH zdrmq78|$1&h-~R%AYKK19QEC9&v%^N?v@u_y>8`nO}D%Cg<9S1Jz1x_eRK8wZY|^X z`Q2W>yS3Y`fF0O(yBp-ZUIDlQ^DX~wpv$}MZU*jV;BE%)X5elH{?{2;fc+r0d7_em z8}Ct>ZXm_HOOw5g#9xGMCAKZ{j`h~4>}?~S>mx2FTSzZ|tHFl#|F~U9Bjk)zLsF{w zC?4*ov7L2lNW?wz4)r8atOKyMSVS}vWrDSbY_MI$#=o&_&-(=2pJUr>5iv{gSw2cU zkDKvO1jxob{EW-_Xk-uD7s)%w?mF>4(v$fAPPmS>2fB2panxzjbcpCA(Jaw@fxx3m z>+o1EW#*LKDr^kfx^j|h|7Lra>g`n9y$@OnrNi6ay=JXPp?W>gS%~daATvFZjKx zly49VRsC!fOI79W;yF&GpGLv&X{FpFmg)VilsCb7gjOz+eHT>Pi&f?K7T^1FkPCLT z{JvMpn+2A2_U00wfpeE?^38(l>T-+=-*$SRE9F~+QdPcHV3}%fF2P@`lEwku8u3w~ zB(Zw<9dy9g#Xf%g{yBwPcx>NM^o;UBn_MNi{kz*E{g=jxNT>o$lFrBHb1DX=4VC1?p?D^ z{we*eT@Ox~khSw_mh!dhwiV=#D(mENkZ*##;pqrA{yi7~xgsT%x9>{%+I96b$UX4u zXJ1!3$Tx|#@7=RfUg;aTdGcdlw=aQQS!*s{0eO>ii)gj2=hsjUBjWeEhOmE-@-4zg z_Wb+x7RWIVcsztn*04Ztz~vEJ#UY#jACmpDO9KlbxYmA`{CjQwZ-5;A*zl%|!LVV7SUeM-Ok~Y?#xSRi@nkv`&w@oPZA>Q9W6`7$Gt-%@5zWmCJZ?$G z&3H_GWcQxV>PE&yB9$qemDZ+%!T^{ zn743rEEyL@JQg*h3@M$;YFha@EMyq0tRq^mtvIg*T{dk@MN=_!7d!w~u|&$qW#cg+ z)4-=x90bROle4sZDhArIY?eaEb72WiMpKiy=p;On%A+h{>_1`*(OL*3quH!n2rxHJ zGfukdl%-!hB@;r;9-TI$V?fP}MW?uzNWmhT5o#)J##MiJu+5B45;vL3sbjeWoXRI+ zLPc&Wnw=7A>}U%7S=7u}8am6XU<}Y@;>jois5g@|g({PxLRXzk18c@-;f^a5jMDNk zsqraVlT$Hp!kh)Q7N>>a{?p@N24SMpiE$W1+C=vt!Dp^QzJ&_;pN6Gxd%^!p7Jql) zRWE+mc|XbjKXLuR?$NfqJpc^u`Qfu5TiB@)m1NDgh`+Oz7S7?JQ;z5TsYwoZvLR7B zz6aztp+?~NXPla}JVOE=f31|3>L_Bx0Ccb>V*FXB1{~H;k_g=*^1m*eJO3WQj!L#C z;EpvD|) z<02)T>t(jLum4{Wo}VA@XO;<%XXWKhDQBSn8@Mn&t^bd^tX^gEOEjkI(hnDozR-+gn_TXZhbjQI0Rxc@#ilAW_CKUA5tP zU#$?{dj~v=Z$k%t@%(vzIY@Yp!}gR8W`zp`D6_|5JpQ9x-Wboc36vbW-0nwo(1!no ql{XtJWAoZCw|kz(|1v$e&w&!h$n)XvBEFy1;(O^}Y|`f39{=B25Op{J literal 0 HcmV?d00001 diff --git a/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/build-release/CMakeFiles/CMakeDirectoryInformation.cmake b/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/build-release/CMakeFiles/CMakeDirectoryInformation.cmake new file mode 100644 index 0000000..f8c4e69 --- /dev/null +++ b/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/build-release/CMakeFiles/CMakeDirectoryInformation.cmake @@ -0,0 +1,16 @@ +# CMAKE generated file: DO NOT EDIT! +# Generated by "Unix Makefiles" Generator, CMake Version 3.5 + +# Relative path conversion top directories. +set(CMAKE_RELATIVE_PATH_TOP_SOURCE "/home/ahmad/Downloads/benchmark-fuzzying-tool/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2") +set(CMAKE_RELATIVE_PATH_TOP_BINARY "/home/ahmad/Downloads/benchmark-fuzzying-tool/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/build-release") + +# Force unix paths in dependencies. +set(CMAKE_FORCE_UNIX_PATHS 1) + + +# The C and CXX include file regular expressions for this directory. +set(CMAKE_C_INCLUDE_REGEX_SCAN "^.*$") +set(CMAKE_C_INCLUDE_REGEX_COMPLAIN "^$") +set(CMAKE_CXX_INCLUDE_REGEX_SCAN ${CMAKE_C_INCLUDE_REGEX_SCAN}) +set(CMAKE_CXX_INCLUDE_REGEX_COMPLAIN ${CMAKE_C_INCLUDE_REGEX_COMPLAIN}) diff --git a/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/build-release/CMakeFiles/CMakeError.log b/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/build-release/CMakeFiles/CMakeError.log new file mode 100644 index 0000000..b53845d --- /dev/null +++ b/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/build-release/CMakeFiles/CMakeError.log @@ -0,0 +1,59 @@ +Determining if the pthread_create exist failed with the following output: +Change Dir: /home/ahmad/Downloads/benchmark-fuzzying-tool/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/build-release/CMakeFiles/CMakeTmp + +Run Build Command:"/usr/bin/make" "cmTC_4bf38/fast" +make[1]: Entering directory '/home/ahmad/Downloads/benchmark-fuzzying-tool/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/build-release/CMakeFiles/CMakeTmp' +/usr/bin/make -f CMakeFiles/cmTC_4bf38.dir/build.make CMakeFiles/cmTC_4bf38.dir/build +make[2]: Entering directory '/home/ahmad/Downloads/benchmark-fuzzying-tool/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/build-release/CMakeFiles/CMakeTmp' +Building C object CMakeFiles/cmTC_4bf38.dir/CheckSymbolExists.c.o +/usr/bin/cc -o CMakeFiles/cmTC_4bf38.dir/CheckSymbolExists.c.o -c /home/ahmad/Downloads/benchmark-fuzzying-tool/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/build-release/CMakeFiles/CMakeTmp/CheckSymbolExists.c +Linking C executable cmTC_4bf38 +/usr/bin/cmake -E cmake_link_script CMakeFiles/cmTC_4bf38.dir/link.txt --verbose=1 +/usr/bin/cc CMakeFiles/cmTC_4bf38.dir/CheckSymbolExists.c.o -o cmTC_4bf38 -rdynamic +CMakeFiles/cmTC_4bf38.dir/CheckSymbolExists.c.o: In function `main': +CheckSymbolExists.c:(.text+0x16): undefined reference to `pthread_create' +collect2: error: ld returned 1 exit status +CMakeFiles/cmTC_4bf38.dir/build.make:97: recipe for target 'cmTC_4bf38' failed +make[2]: *** [cmTC_4bf38] Error 1 +make[2]: Leaving directory '/home/ahmad/Downloads/benchmark-fuzzying-tool/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/build-release/CMakeFiles/CMakeTmp' +Makefile:126: recipe for target 'cmTC_4bf38/fast' failed +make[1]: *** [cmTC_4bf38/fast] Error 2 +make[1]: Leaving directory '/home/ahmad/Downloads/benchmark-fuzzying-tool/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/build-release/CMakeFiles/CMakeTmp' + +File /home/ahmad/Downloads/benchmark-fuzzying-tool/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/build-release/CMakeFiles/CMakeTmp/CheckSymbolExists.c: +/* */ +#include + +int main(int argc, char** argv) +{ + (void)argv; +#ifndef pthread_create + return ((int*)(&pthread_create))[argc]; +#else + (void)argc; + return 0; +#endif +} + +Determining if the function pthread_create exists in the pthreads failed with the following output: +Change Dir: /home/ahmad/Downloads/benchmark-fuzzying-tool/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/build-release/CMakeFiles/CMakeTmp + +Run Build Command:"/usr/bin/make" "cmTC_6de54/fast" +make[1]: Entering directory '/home/ahmad/Downloads/benchmark-fuzzying-tool/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/build-release/CMakeFiles/CMakeTmp' +/usr/bin/make -f CMakeFiles/cmTC_6de54.dir/build.make CMakeFiles/cmTC_6de54.dir/build +make[2]: Entering directory '/home/ahmad/Downloads/benchmark-fuzzying-tool/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/build-release/CMakeFiles/CMakeTmp' +Building C object CMakeFiles/cmTC_6de54.dir/CheckFunctionExists.c.o +/usr/bin/cc -DCHECK_FUNCTION_EXISTS=pthread_create -o CMakeFiles/cmTC_6de54.dir/CheckFunctionExists.c.o -c /usr/share/cmake-3.5/Modules/CheckFunctionExists.c +Linking C executable cmTC_6de54 +/usr/bin/cmake -E cmake_link_script CMakeFiles/cmTC_6de54.dir/link.txt --verbose=1 +/usr/bin/cc -DCHECK_FUNCTION_EXISTS=pthread_create CMakeFiles/cmTC_6de54.dir/CheckFunctionExists.c.o -o cmTC_6de54 -rdynamic -lpthreads +/usr/bin/ld: cannot find -lpthreads +collect2: error: ld returned 1 exit status +CMakeFiles/cmTC_6de54.dir/build.make:97: recipe for target 'cmTC_6de54' failed +make[2]: *** [cmTC_6de54] Error 1 +make[2]: Leaving directory '/home/ahmad/Downloads/benchmark-fuzzying-tool/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/build-release/CMakeFiles/CMakeTmp' +Makefile:126: recipe for target 'cmTC_6de54/fast' failed +make[1]: *** [cmTC_6de54/fast] Error 2 +make[1]: Leaving directory '/home/ahmad/Downloads/benchmark-fuzzying-tool/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/build-release/CMakeFiles/CMakeTmp' + + diff --git a/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/build-release/CMakeFiles/CMakeOutput.log b/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/build-release/CMakeFiles/CMakeOutput.log new file mode 100644 index 0000000..3d5531d --- /dev/null +++ b/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/build-release/CMakeFiles/CMakeOutput.log @@ -0,0 +1,608 @@ +The system is: Linux - 4.4.0-119-generic - x86_64 +Compiling the C compiler identification source file "CMakeCCompilerId.c" succeeded. +Compiler: /usr/bin/cc +Build flags: +Id flags: + +The output was: +0 + + +Compilation of the C compiler identification source "CMakeCCompilerId.c" produced "a.out" + +The C compiler identification is GNU, found in "/home/ahmad/Downloads/benchmark-fuzzying-tool/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/build-release/CMakeFiles/3.5.1/CompilerIdC/a.out" + +Compiling the CXX compiler identification source file "CMakeCXXCompilerId.cpp" succeeded. +Compiler: /usr/bin/c++ +Build flags: +Id flags: + +The output was: +0 + + +Compilation of the CXX compiler identification source "CMakeCXXCompilerId.cpp" produced "a.out" + +The CXX compiler identification is GNU, found in "/home/ahmad/Downloads/benchmark-fuzzying-tool/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/build-release/CMakeFiles/3.5.1/CompilerIdCXX/a.out" + +Determining if the C compiler works passed with the following output: +Change Dir: /home/ahmad/Downloads/benchmark-fuzzying-tool/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/build-release/CMakeFiles/CMakeTmp + +Run Build Command:"/usr/bin/make" "cmTC_70c9a/fast" +make[1]: Entering directory '/home/ahmad/Downloads/benchmark-fuzzying-tool/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/build-release/CMakeFiles/CMakeTmp' +/usr/bin/make -f CMakeFiles/cmTC_70c9a.dir/build.make CMakeFiles/cmTC_70c9a.dir/build +make[2]: Entering directory '/home/ahmad/Downloads/benchmark-fuzzying-tool/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/build-release/CMakeFiles/CMakeTmp' +Building C object CMakeFiles/cmTC_70c9a.dir/testCCompiler.c.o +/usr/bin/cc -o CMakeFiles/cmTC_70c9a.dir/testCCompiler.c.o -c /home/ahmad/Downloads/benchmark-fuzzying-tool/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/build-release/CMakeFiles/CMakeTmp/testCCompiler.c +Linking C executable cmTC_70c9a +/usr/bin/cmake -E cmake_link_script CMakeFiles/cmTC_70c9a.dir/link.txt --verbose=1 +/usr/bin/cc CMakeFiles/cmTC_70c9a.dir/testCCompiler.c.o -o cmTC_70c9a -rdynamic +make[2]: Leaving directory '/home/ahmad/Downloads/benchmark-fuzzying-tool/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/build-release/CMakeFiles/CMakeTmp' +make[1]: Leaving directory '/home/ahmad/Downloads/benchmark-fuzzying-tool/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/build-release/CMakeFiles/CMakeTmp' + + +Detecting C compiler ABI info compiled with the following output: +Change Dir: /home/ahmad/Downloads/benchmark-fuzzying-tool/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/build-release/CMakeFiles/CMakeTmp + +Run Build Command:"/usr/bin/make" "cmTC_9812f/fast" +make[1]: Entering directory '/home/ahmad/Downloads/benchmark-fuzzying-tool/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/build-release/CMakeFiles/CMakeTmp' +/usr/bin/make -f CMakeFiles/cmTC_9812f.dir/build.make CMakeFiles/cmTC_9812f.dir/build +make[2]: Entering directory '/home/ahmad/Downloads/benchmark-fuzzying-tool/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/build-release/CMakeFiles/CMakeTmp' +Building C object CMakeFiles/cmTC_9812f.dir/CMakeCCompilerABI.c.o +/usr/bin/cc -o CMakeFiles/cmTC_9812f.dir/CMakeCCompilerABI.c.o -c /usr/share/cmake-3.5/Modules/CMakeCCompilerABI.c +Linking C executable cmTC_9812f +/usr/bin/cmake -E cmake_link_script CMakeFiles/cmTC_9812f.dir/link.txt --verbose=1 +/usr/bin/cc -v CMakeFiles/cmTC_9812f.dir/CMakeCCompilerABI.c.o -o cmTC_9812f -rdynamic +Using built-in specs. +COLLECT_GCC=/usr/bin/cc +COLLECT_LTO_WRAPPER=/usr/lib/gcc/x86_64-linux-gnu/5/lto-wrapper +Target: x86_64-linux-gnu +Configured with: ../src/configure -v --with-pkgversion='Ubuntu 5.4.0-6ubuntu1~16.04.10' --with-bugurl=file:///usr/share/doc/gcc-5/README.Bugs --enable-languages=c,ada,c++,java,go,d,fortran,objc,obj-c++ --prefix=/usr --program-suffix=-5 --enable-shared --enable-linker-build-id --libexecdir=/usr/lib --without-included-gettext --enable-threads=posix --libdir=/usr/lib --enable-nls --with-sysroot=/ --enable-clocale=gnu --enable-libstdcxx-debug --enable-libstdcxx-time=yes --with-default-libstdcxx-abi=new --enable-gnu-unique-object --disable-vtable-verify --enable-libmpx --enable-plugin --with-system-zlib --disable-browser-plugin --enable-java-awt=gtk --enable-gtk-cairo --with-java-home=/usr/lib/jvm/java-1.5.0-gcj-5-amd64/jre --enable-java-home --with-jvm-root-dir=/usr/lib/jvm/java-1.5.0-gcj-5-amd64 --with-jvm-jar-dir=/usr/lib/jvm-exports/java-1.5.0-gcj-5-amd64 --with-arch-directory=amd64 --with-ecj-jar=/usr/share/java/eclipse-ecj.jar --enable-objc-gc --enable-multiarch --disable-werror --with-arch-32=i686 --with-abi=m64 --with-multilib-list=m32,m64,mx32 --enable-multilib --with-tune=generic --enable-checking=release --build=x86_64-linux-gnu --host=x86_64-linux-gnu --target=x86_64-linux-gnu +Thread model: posix +gcc version 5.4.0 20160609 (Ubuntu 5.4.0-6ubuntu1~16.04.10) +COMPILER_PATH=/usr/lib/gcc/x86_64-linux-gnu/5/:/usr/lib/gcc/x86_64-linux-gnu/5/:/usr/lib/gcc/x86_64-linux-gnu/:/usr/lib/gcc/x86_64-linux-gnu/5/:/usr/lib/gcc/x86_64-linux-gnu/ +LIBRARY_PATH=/usr/lib/gcc/x86_64-linux-gnu/5/:/usr/lib/gcc/x86_64-linux-gnu/5/../../../x86_64-linux-gnu/:/usr/lib/gcc/x86_64-linux-gnu/5/../../../../lib/:/lib/x86_64-linux-gnu/:/lib/../lib/:/usr/lib/x86_64-linux-gnu/:/usr/lib/../lib/:/usr/lib/gcc/x86_64-linux-gnu/5/../../../:/lib/:/usr/lib/ +COLLECT_GCC_OPTIONS='-v' '-o' 'cmTC_9812f' '-rdynamic' '-mtune=generic' '-march=x86-64' + /usr/lib/gcc/x86_64-linux-gnu/5/collect2 -plugin /usr/lib/gcc/x86_64-linux-gnu/5/liblto_plugin.so -plugin-opt=/usr/lib/gcc/x86_64-linux-gnu/5/lto-wrapper -plugin-opt=-fresolution=/tmp/ccjWUUXi.res -plugin-opt=-pass-through=-lgcc -plugin-opt=-pass-through=-lgcc_s -plugin-opt=-pass-through=-lc -plugin-opt=-pass-through=-lgcc -plugin-opt=-pass-through=-lgcc_s --sysroot=/ --build-id --eh-frame-hdr -m elf_x86_64 --hash-style=gnu --as-needed -export-dynamic -dynamic-linker /lib64/ld-linux-x86-64.so.2 -z relro -o cmTC_9812f /usr/lib/gcc/x86_64-linux-gnu/5/../../../x86_64-linux-gnu/crt1.o /usr/lib/gcc/x86_64-linux-gnu/5/../../../x86_64-linux-gnu/crti.o /usr/lib/gcc/x86_64-linux-gnu/5/crtbegin.o -L/usr/lib/gcc/x86_64-linux-gnu/5 -L/usr/lib/gcc/x86_64-linux-gnu/5/../../../x86_64-linux-gnu -L/usr/lib/gcc/x86_64-linux-gnu/5/../../../../lib -L/lib/x86_64-linux-gnu -L/lib/../lib -L/usr/lib/x86_64-linux-gnu -L/usr/lib/../lib -L/usr/lib/gcc/x86_64-linux-gnu/5/../../.. CMakeFiles/cmTC_9812f.dir/CMakeCCompilerABI.c.o -lgcc --as-needed -lgcc_s --no-as-needed -lc -lgcc --as-needed -lgcc_s --no-as-needed /usr/lib/gcc/x86_64-linux-gnu/5/crtend.o /usr/lib/gcc/x86_64-linux-gnu/5/../../../x86_64-linux-gnu/crtn.o +make[2]: Leaving directory '/home/ahmad/Downloads/benchmark-fuzzying-tool/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/build-release/CMakeFiles/CMakeTmp' +make[1]: Leaving directory '/home/ahmad/Downloads/benchmark-fuzzying-tool/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/build-release/CMakeFiles/CMakeTmp' + + +Parsed C implicit link information from above output: + link line regex: [^( *|.*[/\])(ld|([^/\]+-)?ld|collect2)[^/\]*( |$)] + ignore line: [Change Dir: /home/ahmad/Downloads/benchmark-fuzzying-tool/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/build-release/CMakeFiles/CMakeTmp] + ignore line: [] + ignore line: [Run Build Command:"/usr/bin/make" "cmTC_9812f/fast"] + ignore line: [make[1]: Entering directory '/home/ahmad/Downloads/benchmark-fuzzying-tool/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/build-release/CMakeFiles/CMakeTmp'] + ignore line: [/usr/bin/make -f CMakeFiles/cmTC_9812f.dir/build.make CMakeFiles/cmTC_9812f.dir/build] + ignore line: [make[2]: Entering directory '/home/ahmad/Downloads/benchmark-fuzzying-tool/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/build-release/CMakeFiles/CMakeTmp'] + ignore line: [Building C object CMakeFiles/cmTC_9812f.dir/CMakeCCompilerABI.c.o] + ignore line: [/usr/bin/cc -o CMakeFiles/cmTC_9812f.dir/CMakeCCompilerABI.c.o -c /usr/share/cmake-3.5/Modules/CMakeCCompilerABI.c] + ignore line: [Linking C executable cmTC_9812f] + ignore line: [/usr/bin/cmake -E cmake_link_script CMakeFiles/cmTC_9812f.dir/link.txt --verbose=1] + ignore line: [/usr/bin/cc -v CMakeFiles/cmTC_9812f.dir/CMakeCCompilerABI.c.o -o cmTC_9812f -rdynamic ] + ignore line: [Using built-in specs.] + ignore line: [COLLECT_GCC=/usr/bin/cc] + ignore line: [COLLECT_LTO_WRAPPER=/usr/lib/gcc/x86_64-linux-gnu/5/lto-wrapper] + ignore line: [Target: x86_64-linux-gnu] + ignore line: [Configured with: ../src/configure -v --with-pkgversion='Ubuntu 5.4.0-6ubuntu1~16.04.10' --with-bugurl=file:///usr/share/doc/gcc-5/README.Bugs --enable-languages=c,ada,c++,java,go,d,fortran,objc,obj-c++ --prefix=/usr --program-suffix=-5 --enable-shared --enable-linker-build-id --libexecdir=/usr/lib --without-included-gettext --enable-threads=posix --libdir=/usr/lib --enable-nls --with-sysroot=/ --enable-clocale=gnu --enable-libstdcxx-debug --enable-libstdcxx-time=yes --with-default-libstdcxx-abi=new --enable-gnu-unique-object --disable-vtable-verify --enable-libmpx --enable-plugin --with-system-zlib --disable-browser-plugin --enable-java-awt=gtk --enable-gtk-cairo --with-java-home=/usr/lib/jvm/java-1.5.0-gcj-5-amd64/jre --enable-java-home --with-jvm-root-dir=/usr/lib/jvm/java-1.5.0-gcj-5-amd64 --with-jvm-jar-dir=/usr/lib/jvm-exports/java-1.5.0-gcj-5-amd64 --with-arch-directory=amd64 --with-ecj-jar=/usr/share/java/eclipse-ecj.jar --enable-objc-gc --enable-multiarch --disable-werror --with-arch-32=i686 --with-abi=m64 --with-multilib-list=m32,m64,mx32 --enable-multilib --with-tune=generic --enable-checking=release --build=x86_64-linux-gnu --host=x86_64-linux-gnu --target=x86_64-linux-gnu] + ignore line: [Thread model: posix] + ignore line: [gcc version 5.4.0 20160609 (Ubuntu 5.4.0-6ubuntu1~16.04.10) ] + ignore line: [COMPILER_PATH=/usr/lib/gcc/x86_64-linux-gnu/5/:/usr/lib/gcc/x86_64-linux-gnu/5/:/usr/lib/gcc/x86_64-linux-gnu/:/usr/lib/gcc/x86_64-linux-gnu/5/:/usr/lib/gcc/x86_64-linux-gnu/] + ignore line: [LIBRARY_PATH=/usr/lib/gcc/x86_64-linux-gnu/5/:/usr/lib/gcc/x86_64-linux-gnu/5/../../../x86_64-linux-gnu/:/usr/lib/gcc/x86_64-linux-gnu/5/../../../../lib/:/lib/x86_64-linux-gnu/:/lib/../lib/:/usr/lib/x86_64-linux-gnu/:/usr/lib/../lib/:/usr/lib/gcc/x86_64-linux-gnu/5/../../../:/lib/:/usr/lib/] + ignore line: [COLLECT_GCC_OPTIONS='-v' '-o' 'cmTC_9812f' '-rdynamic' '-mtune=generic' '-march=x86-64'] + link line: [ /usr/lib/gcc/x86_64-linux-gnu/5/collect2 -plugin /usr/lib/gcc/x86_64-linux-gnu/5/liblto_plugin.so -plugin-opt=/usr/lib/gcc/x86_64-linux-gnu/5/lto-wrapper -plugin-opt=-fresolution=/tmp/ccjWUUXi.res -plugin-opt=-pass-through=-lgcc -plugin-opt=-pass-through=-lgcc_s -plugin-opt=-pass-through=-lc -plugin-opt=-pass-through=-lgcc -plugin-opt=-pass-through=-lgcc_s --sysroot=/ --build-id --eh-frame-hdr -m elf_x86_64 --hash-style=gnu --as-needed -export-dynamic -dynamic-linker /lib64/ld-linux-x86-64.so.2 -z relro -o cmTC_9812f /usr/lib/gcc/x86_64-linux-gnu/5/../../../x86_64-linux-gnu/crt1.o /usr/lib/gcc/x86_64-linux-gnu/5/../../../x86_64-linux-gnu/crti.o /usr/lib/gcc/x86_64-linux-gnu/5/crtbegin.o -L/usr/lib/gcc/x86_64-linux-gnu/5 -L/usr/lib/gcc/x86_64-linux-gnu/5/../../../x86_64-linux-gnu -L/usr/lib/gcc/x86_64-linux-gnu/5/../../../../lib -L/lib/x86_64-linux-gnu -L/lib/../lib -L/usr/lib/x86_64-linux-gnu -L/usr/lib/../lib -L/usr/lib/gcc/x86_64-linux-gnu/5/../../.. CMakeFiles/cmTC_9812f.dir/CMakeCCompilerABI.c.o -lgcc --as-needed -lgcc_s --no-as-needed -lc -lgcc --as-needed -lgcc_s --no-as-needed /usr/lib/gcc/x86_64-linux-gnu/5/crtend.o /usr/lib/gcc/x86_64-linux-gnu/5/../../../x86_64-linux-gnu/crtn.o] + arg [/usr/lib/gcc/x86_64-linux-gnu/5/collect2] ==> ignore + arg [-plugin] ==> ignore + arg [/usr/lib/gcc/x86_64-linux-gnu/5/liblto_plugin.so] ==> ignore + arg [-plugin-opt=/usr/lib/gcc/x86_64-linux-gnu/5/lto-wrapper] ==> ignore + arg [-plugin-opt=-fresolution=/tmp/ccjWUUXi.res] ==> ignore + arg [-plugin-opt=-pass-through=-lgcc] ==> ignore + arg [-plugin-opt=-pass-through=-lgcc_s] ==> ignore + arg [-plugin-opt=-pass-through=-lc] ==> ignore + arg [-plugin-opt=-pass-through=-lgcc] ==> ignore + arg [-plugin-opt=-pass-through=-lgcc_s] ==> ignore + arg [--sysroot=/] ==> ignore + arg [--build-id] ==> ignore + arg [--eh-frame-hdr] ==> ignore + arg [-m] ==> ignore + arg [elf_x86_64] ==> ignore + arg [--hash-style=gnu] ==> ignore + arg [--as-needed] ==> ignore + arg [-export-dynamic] ==> ignore + arg [-dynamic-linker] ==> ignore + arg [/lib64/ld-linux-x86-64.so.2] ==> ignore + arg [-zrelro] ==> ignore + arg [-o] ==> ignore + arg [cmTC_9812f] ==> ignore + arg [/usr/lib/gcc/x86_64-linux-gnu/5/../../../x86_64-linux-gnu/crt1.o] ==> ignore + arg [/usr/lib/gcc/x86_64-linux-gnu/5/../../../x86_64-linux-gnu/crti.o] ==> ignore + arg [/usr/lib/gcc/x86_64-linux-gnu/5/crtbegin.o] ==> ignore + arg [-L/usr/lib/gcc/x86_64-linux-gnu/5] ==> dir [/usr/lib/gcc/x86_64-linux-gnu/5] + arg [-L/usr/lib/gcc/x86_64-linux-gnu/5/../../../x86_64-linux-gnu] ==> dir [/usr/lib/gcc/x86_64-linux-gnu/5/../../../x86_64-linux-gnu] + arg [-L/usr/lib/gcc/x86_64-linux-gnu/5/../../../../lib] ==> dir [/usr/lib/gcc/x86_64-linux-gnu/5/../../../../lib] + arg [-L/lib/x86_64-linux-gnu] ==> dir [/lib/x86_64-linux-gnu] + arg [-L/lib/../lib] ==> dir [/lib/../lib] + arg [-L/usr/lib/x86_64-linux-gnu] ==> dir [/usr/lib/x86_64-linux-gnu] + arg [-L/usr/lib/../lib] ==> dir [/usr/lib/../lib] + arg [-L/usr/lib/gcc/x86_64-linux-gnu/5/../../..] ==> dir [/usr/lib/gcc/x86_64-linux-gnu/5/../../..] + arg [CMakeFiles/cmTC_9812f.dir/CMakeCCompilerABI.c.o] ==> ignore + arg [-lgcc] ==> lib [gcc] + arg [--as-needed] ==> ignore + arg [-lgcc_s] ==> lib [gcc_s] + arg [--no-as-needed] ==> ignore + arg [-lc] ==> lib [c] + arg [-lgcc] ==> lib [gcc] + arg [--as-needed] ==> ignore + arg [-lgcc_s] ==> lib [gcc_s] + arg [--no-as-needed] ==> ignore + arg [/usr/lib/gcc/x86_64-linux-gnu/5/crtend.o] ==> ignore + arg [/usr/lib/gcc/x86_64-linux-gnu/5/../../../x86_64-linux-gnu/crtn.o] ==> ignore + remove lib [gcc] + remove lib [gcc_s] + remove lib [gcc] + remove lib [gcc_s] + collapse library dir [/usr/lib/gcc/x86_64-linux-gnu/5] ==> [/usr/lib/gcc/x86_64-linux-gnu/5] + collapse library dir [/usr/lib/gcc/x86_64-linux-gnu/5/../../../x86_64-linux-gnu] ==> [/usr/lib/x86_64-linux-gnu] + collapse library dir [/usr/lib/gcc/x86_64-linux-gnu/5/../../../../lib] ==> [/usr/lib] + collapse library dir [/lib/x86_64-linux-gnu] ==> [/lib/x86_64-linux-gnu] + collapse library dir [/lib/../lib] ==> [/lib] + collapse library dir [/usr/lib/x86_64-linux-gnu] ==> [/usr/lib/x86_64-linux-gnu] + collapse library dir [/usr/lib/../lib] ==> [/usr/lib] + collapse library dir [/usr/lib/gcc/x86_64-linux-gnu/5/../../..] ==> [/usr/lib] + implicit libs: [c] + implicit dirs: [/usr/lib/gcc/x86_64-linux-gnu/5;/usr/lib/x86_64-linux-gnu;/usr/lib;/lib/x86_64-linux-gnu;/lib] + implicit fwks: [] + + + + +Detecting C [-std=c11] compiler features compiled with the following output: +Change Dir: /home/ahmad/Downloads/benchmark-fuzzying-tool/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/build-release/CMakeFiles/CMakeTmp + +Run Build Command:"/usr/bin/make" "cmTC_3640d/fast" +make[1]: Entering directory '/home/ahmad/Downloads/benchmark-fuzzying-tool/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/build-release/CMakeFiles/CMakeTmp' +/usr/bin/make -f CMakeFiles/cmTC_3640d.dir/build.make CMakeFiles/cmTC_3640d.dir/build +make[2]: Entering directory '/home/ahmad/Downloads/benchmark-fuzzying-tool/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/build-release/CMakeFiles/CMakeTmp' +Building C object CMakeFiles/cmTC_3640d.dir/feature_tests.c.o +/usr/bin/cc -std=c11 -o CMakeFiles/cmTC_3640d.dir/feature_tests.c.o -c /home/ahmad/Downloads/benchmark-fuzzying-tool/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/build-release/CMakeFiles/feature_tests.c +Linking C executable cmTC_3640d +/usr/bin/cmake -E cmake_link_script CMakeFiles/cmTC_3640d.dir/link.txt --verbose=1 +/usr/bin/cc CMakeFiles/cmTC_3640d.dir/feature_tests.c.o -o cmTC_3640d -rdynamic +make[2]: Leaving directory '/home/ahmad/Downloads/benchmark-fuzzying-tool/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/build-release/CMakeFiles/CMakeTmp' +make[1]: Leaving directory '/home/ahmad/Downloads/benchmark-fuzzying-tool/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/build-release/CMakeFiles/CMakeTmp' + + + Feature record: C_FEATURE:1c_function_prototypes + Feature record: C_FEATURE:1c_restrict + Feature record: C_FEATURE:1c_static_assert + Feature record: C_FEATURE:1c_variadic_macros + + +Detecting C [-std=c99] compiler features compiled with the following output: +Change Dir: /home/ahmad/Downloads/benchmark-fuzzying-tool/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/build-release/CMakeFiles/CMakeTmp + +Run Build Command:"/usr/bin/make" "cmTC_8a646/fast" +make[1]: Entering directory '/home/ahmad/Downloads/benchmark-fuzzying-tool/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/build-release/CMakeFiles/CMakeTmp' +/usr/bin/make -f CMakeFiles/cmTC_8a646.dir/build.make CMakeFiles/cmTC_8a646.dir/build +make[2]: Entering directory '/home/ahmad/Downloads/benchmark-fuzzying-tool/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/build-release/CMakeFiles/CMakeTmp' +Building C object CMakeFiles/cmTC_8a646.dir/feature_tests.c.o +/usr/bin/cc -std=c99 -o CMakeFiles/cmTC_8a646.dir/feature_tests.c.o -c /home/ahmad/Downloads/benchmark-fuzzying-tool/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/build-release/CMakeFiles/feature_tests.c +Linking C executable cmTC_8a646 +/usr/bin/cmake -E cmake_link_script CMakeFiles/cmTC_8a646.dir/link.txt --verbose=1 +/usr/bin/cc CMakeFiles/cmTC_8a646.dir/feature_tests.c.o -o cmTC_8a646 -rdynamic +make[2]: Leaving directory '/home/ahmad/Downloads/benchmark-fuzzying-tool/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/build-release/CMakeFiles/CMakeTmp' +make[1]: Leaving directory '/home/ahmad/Downloads/benchmark-fuzzying-tool/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/build-release/CMakeFiles/CMakeTmp' + + + Feature record: C_FEATURE:1c_function_prototypes + Feature record: C_FEATURE:1c_restrict + Feature record: C_FEATURE:0c_static_assert + Feature record: C_FEATURE:1c_variadic_macros + + +Detecting C [-std=c90] compiler features compiled with the following output: +Change Dir: /home/ahmad/Downloads/benchmark-fuzzying-tool/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/build-release/CMakeFiles/CMakeTmp + +Run Build Command:"/usr/bin/make" "cmTC_9c12a/fast" +make[1]: Entering directory '/home/ahmad/Downloads/benchmark-fuzzying-tool/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/build-release/CMakeFiles/CMakeTmp' +/usr/bin/make -f CMakeFiles/cmTC_9c12a.dir/build.make CMakeFiles/cmTC_9c12a.dir/build +make[2]: Entering directory '/home/ahmad/Downloads/benchmark-fuzzying-tool/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/build-release/CMakeFiles/CMakeTmp' +Building C object CMakeFiles/cmTC_9c12a.dir/feature_tests.c.o +/usr/bin/cc -std=c90 -o CMakeFiles/cmTC_9c12a.dir/feature_tests.c.o -c /home/ahmad/Downloads/benchmark-fuzzying-tool/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/build-release/CMakeFiles/feature_tests.c +Linking C executable cmTC_9c12a +/usr/bin/cmake -E cmake_link_script CMakeFiles/cmTC_9c12a.dir/link.txt --verbose=1 +/usr/bin/cc CMakeFiles/cmTC_9c12a.dir/feature_tests.c.o -o cmTC_9c12a -rdynamic +make[2]: Leaving directory '/home/ahmad/Downloads/benchmark-fuzzying-tool/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/build-release/CMakeFiles/CMakeTmp' +make[1]: Leaving directory '/home/ahmad/Downloads/benchmark-fuzzying-tool/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/build-release/CMakeFiles/CMakeTmp' + + + Feature record: C_FEATURE:1c_function_prototypes + Feature record: C_FEATURE:0c_restrict + Feature record: C_FEATURE:0c_static_assert + Feature record: C_FEATURE:0c_variadic_macros +Determining if the CXX compiler works passed with the following output: +Change Dir: /home/ahmad/Downloads/benchmark-fuzzying-tool/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/build-release/CMakeFiles/CMakeTmp + +Run Build Command:"/usr/bin/make" "cmTC_8afc5/fast" +make[1]: Entering directory '/home/ahmad/Downloads/benchmark-fuzzying-tool/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/build-release/CMakeFiles/CMakeTmp' +/usr/bin/make -f CMakeFiles/cmTC_8afc5.dir/build.make CMakeFiles/cmTC_8afc5.dir/build +make[2]: Entering directory '/home/ahmad/Downloads/benchmark-fuzzying-tool/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/build-release/CMakeFiles/CMakeTmp' +Building CXX object CMakeFiles/cmTC_8afc5.dir/testCXXCompiler.cxx.o +/usr/bin/c++ -o CMakeFiles/cmTC_8afc5.dir/testCXXCompiler.cxx.o -c /home/ahmad/Downloads/benchmark-fuzzying-tool/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/build-release/CMakeFiles/CMakeTmp/testCXXCompiler.cxx +Linking CXX executable cmTC_8afc5 +/usr/bin/cmake -E cmake_link_script CMakeFiles/cmTC_8afc5.dir/link.txt --verbose=1 +/usr/bin/c++ CMakeFiles/cmTC_8afc5.dir/testCXXCompiler.cxx.o -o cmTC_8afc5 -rdynamic +make[2]: Leaving directory '/home/ahmad/Downloads/benchmark-fuzzying-tool/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/build-release/CMakeFiles/CMakeTmp' +make[1]: Leaving directory '/home/ahmad/Downloads/benchmark-fuzzying-tool/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/build-release/CMakeFiles/CMakeTmp' + + +Detecting CXX compiler ABI info compiled with the following output: +Change Dir: /home/ahmad/Downloads/benchmark-fuzzying-tool/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/build-release/CMakeFiles/CMakeTmp + +Run Build Command:"/usr/bin/make" "cmTC_54547/fast" +make[1]: Entering directory '/home/ahmad/Downloads/benchmark-fuzzying-tool/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/build-release/CMakeFiles/CMakeTmp' +/usr/bin/make -f CMakeFiles/cmTC_54547.dir/build.make CMakeFiles/cmTC_54547.dir/build +make[2]: Entering directory '/home/ahmad/Downloads/benchmark-fuzzying-tool/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/build-release/CMakeFiles/CMakeTmp' +Building CXX object CMakeFiles/cmTC_54547.dir/CMakeCXXCompilerABI.cpp.o +/usr/bin/c++ -o CMakeFiles/cmTC_54547.dir/CMakeCXXCompilerABI.cpp.o -c /usr/share/cmake-3.5/Modules/CMakeCXXCompilerABI.cpp +Linking CXX executable cmTC_54547 +/usr/bin/cmake -E cmake_link_script CMakeFiles/cmTC_54547.dir/link.txt --verbose=1 +/usr/bin/c++ -v CMakeFiles/cmTC_54547.dir/CMakeCXXCompilerABI.cpp.o -o cmTC_54547 -rdynamic +Using built-in specs. +COLLECT_GCC=/usr/bin/c++ +COLLECT_LTO_WRAPPER=/usr/lib/gcc/x86_64-linux-gnu/5/lto-wrapper +Target: x86_64-linux-gnu +Configured with: ../src/configure -v --with-pkgversion='Ubuntu 5.4.0-6ubuntu1~16.04.10' --with-bugurl=file:///usr/share/doc/gcc-5/README.Bugs --enable-languages=c,ada,c++,java,go,d,fortran,objc,obj-c++ --prefix=/usr --program-suffix=-5 --enable-shared --enable-linker-build-id --libexecdir=/usr/lib --without-included-gettext --enable-threads=posix --libdir=/usr/lib --enable-nls --with-sysroot=/ --enable-clocale=gnu --enable-libstdcxx-debug --enable-libstdcxx-time=yes --with-default-libstdcxx-abi=new --enable-gnu-unique-object --disable-vtable-verify --enable-libmpx --enable-plugin --with-system-zlib --disable-browser-plugin --enable-java-awt=gtk --enable-gtk-cairo --with-java-home=/usr/lib/jvm/java-1.5.0-gcj-5-amd64/jre --enable-java-home --with-jvm-root-dir=/usr/lib/jvm/java-1.5.0-gcj-5-amd64 --with-jvm-jar-dir=/usr/lib/jvm-exports/java-1.5.0-gcj-5-amd64 --with-arch-directory=amd64 --with-ecj-jar=/usr/share/java/eclipse-ecj.jar --enable-objc-gc --enable-multiarch --disable-werror --with-arch-32=i686 --with-abi=m64 --with-multilib-list=m32,m64,mx32 --enable-multilib --with-tune=generic --enable-checking=release --build=x86_64-linux-gnu --host=x86_64-linux-gnu --target=x86_64-linux-gnu +Thread model: posix +gcc version 5.4.0 20160609 (Ubuntu 5.4.0-6ubuntu1~16.04.10) +COMPILER_PATH=/usr/lib/gcc/x86_64-linux-gnu/5/:/usr/lib/gcc/x86_64-linux-gnu/5/:/usr/lib/gcc/x86_64-linux-gnu/:/usr/lib/gcc/x86_64-linux-gnu/5/:/usr/lib/gcc/x86_64-linux-gnu/ +LIBRARY_PATH=/usr/lib/gcc/x86_64-linux-gnu/5/:/usr/lib/gcc/x86_64-linux-gnu/5/../../../x86_64-linux-gnu/:/usr/lib/gcc/x86_64-linux-gnu/5/../../../../lib/:/lib/x86_64-linux-gnu/:/lib/../lib/:/usr/lib/x86_64-linux-gnu/:/usr/lib/../lib/:/usr/lib/gcc/x86_64-linux-gnu/5/../../../:/lib/:/usr/lib/ +COLLECT_GCC_OPTIONS='-v' '-o' 'cmTC_54547' '-rdynamic' '-shared-libgcc' '-mtune=generic' '-march=x86-64' + /usr/lib/gcc/x86_64-linux-gnu/5/collect2 -plugin /usr/lib/gcc/x86_64-linux-gnu/5/liblto_plugin.so -plugin-opt=/usr/lib/gcc/x86_64-linux-gnu/5/lto-wrapper -plugin-opt=-fresolution=/tmp/ccy01wkG.res -plugin-opt=-pass-through=-lgcc_s -plugin-opt=-pass-through=-lgcc -plugin-opt=-pass-through=-lc -plugin-opt=-pass-through=-lgcc_s -plugin-opt=-pass-through=-lgcc --sysroot=/ --build-id --eh-frame-hdr -m elf_x86_64 --hash-style=gnu --as-needed -export-dynamic -dynamic-linker /lib64/ld-linux-x86-64.so.2 -z relro -o cmTC_54547 /usr/lib/gcc/x86_64-linux-gnu/5/../../../x86_64-linux-gnu/crt1.o /usr/lib/gcc/x86_64-linux-gnu/5/../../../x86_64-linux-gnu/crti.o /usr/lib/gcc/x86_64-linux-gnu/5/crtbegin.o -L/usr/lib/gcc/x86_64-linux-gnu/5 -L/usr/lib/gcc/x86_64-linux-gnu/5/../../../x86_64-linux-gnu -L/usr/lib/gcc/x86_64-linux-gnu/5/../../../../lib -L/lib/x86_64-linux-gnu -L/lib/../lib -L/usr/lib/x86_64-linux-gnu -L/usr/lib/../lib -L/usr/lib/gcc/x86_64-linux-gnu/5/../../.. CMakeFiles/cmTC_54547.dir/CMakeCXXCompilerABI.cpp.o -lstdc++ -lm -lgcc_s -lgcc -lc -lgcc_s -lgcc /usr/lib/gcc/x86_64-linux-gnu/5/crtend.o /usr/lib/gcc/x86_64-linux-gnu/5/../../../x86_64-linux-gnu/crtn.o +make[2]: Leaving directory '/home/ahmad/Downloads/benchmark-fuzzying-tool/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/build-release/CMakeFiles/CMakeTmp' +make[1]: Leaving directory '/home/ahmad/Downloads/benchmark-fuzzying-tool/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/build-release/CMakeFiles/CMakeTmp' + + +Parsed CXX implicit link information from above output: + link line regex: [^( *|.*[/\])(ld|([^/\]+-)?ld|collect2)[^/\]*( |$)] + ignore line: [Change Dir: /home/ahmad/Downloads/benchmark-fuzzying-tool/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/build-release/CMakeFiles/CMakeTmp] + ignore line: [] + ignore line: [Run Build Command:"/usr/bin/make" "cmTC_54547/fast"] + ignore line: [make[1]: Entering directory '/home/ahmad/Downloads/benchmark-fuzzying-tool/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/build-release/CMakeFiles/CMakeTmp'] + ignore line: [/usr/bin/make -f CMakeFiles/cmTC_54547.dir/build.make CMakeFiles/cmTC_54547.dir/build] + ignore line: [make[2]: Entering directory '/home/ahmad/Downloads/benchmark-fuzzying-tool/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/build-release/CMakeFiles/CMakeTmp'] + ignore line: [Building CXX object CMakeFiles/cmTC_54547.dir/CMakeCXXCompilerABI.cpp.o] + ignore line: [/usr/bin/c++ -o CMakeFiles/cmTC_54547.dir/CMakeCXXCompilerABI.cpp.o -c /usr/share/cmake-3.5/Modules/CMakeCXXCompilerABI.cpp] + ignore line: [Linking CXX executable cmTC_54547] + ignore line: [/usr/bin/cmake -E cmake_link_script CMakeFiles/cmTC_54547.dir/link.txt --verbose=1] + ignore line: [/usr/bin/c++ -v CMakeFiles/cmTC_54547.dir/CMakeCXXCompilerABI.cpp.o -o cmTC_54547 -rdynamic ] + ignore line: [Using built-in specs.] + ignore line: [COLLECT_GCC=/usr/bin/c++] + ignore line: [COLLECT_LTO_WRAPPER=/usr/lib/gcc/x86_64-linux-gnu/5/lto-wrapper] + ignore line: [Target: x86_64-linux-gnu] + ignore line: [Configured with: ../src/configure -v --with-pkgversion='Ubuntu 5.4.0-6ubuntu1~16.04.10' --with-bugurl=file:///usr/share/doc/gcc-5/README.Bugs --enable-languages=c,ada,c++,java,go,d,fortran,objc,obj-c++ --prefix=/usr --program-suffix=-5 --enable-shared --enable-linker-build-id --libexecdir=/usr/lib --without-included-gettext --enable-threads=posix --libdir=/usr/lib --enable-nls --with-sysroot=/ --enable-clocale=gnu --enable-libstdcxx-debug --enable-libstdcxx-time=yes --with-default-libstdcxx-abi=new --enable-gnu-unique-object --disable-vtable-verify --enable-libmpx --enable-plugin --with-system-zlib --disable-browser-plugin --enable-java-awt=gtk --enable-gtk-cairo --with-java-home=/usr/lib/jvm/java-1.5.0-gcj-5-amd64/jre --enable-java-home --with-jvm-root-dir=/usr/lib/jvm/java-1.5.0-gcj-5-amd64 --with-jvm-jar-dir=/usr/lib/jvm-exports/java-1.5.0-gcj-5-amd64 --with-arch-directory=amd64 --with-ecj-jar=/usr/share/java/eclipse-ecj.jar --enable-objc-gc --enable-multiarch --disable-werror --with-arch-32=i686 --with-abi=m64 --with-multilib-list=m32,m64,mx32 --enable-multilib --with-tune=generic --enable-checking=release --build=x86_64-linux-gnu --host=x86_64-linux-gnu --target=x86_64-linux-gnu] + ignore line: [Thread model: posix] + ignore line: [gcc version 5.4.0 20160609 (Ubuntu 5.4.0-6ubuntu1~16.04.10) ] + ignore line: [COMPILER_PATH=/usr/lib/gcc/x86_64-linux-gnu/5/:/usr/lib/gcc/x86_64-linux-gnu/5/:/usr/lib/gcc/x86_64-linux-gnu/:/usr/lib/gcc/x86_64-linux-gnu/5/:/usr/lib/gcc/x86_64-linux-gnu/] + ignore line: [LIBRARY_PATH=/usr/lib/gcc/x86_64-linux-gnu/5/:/usr/lib/gcc/x86_64-linux-gnu/5/../../../x86_64-linux-gnu/:/usr/lib/gcc/x86_64-linux-gnu/5/../../../../lib/:/lib/x86_64-linux-gnu/:/lib/../lib/:/usr/lib/x86_64-linux-gnu/:/usr/lib/../lib/:/usr/lib/gcc/x86_64-linux-gnu/5/../../../:/lib/:/usr/lib/] + ignore line: [COLLECT_GCC_OPTIONS='-v' '-o' 'cmTC_54547' '-rdynamic' '-shared-libgcc' '-mtune=generic' '-march=x86-64'] + link line: [ /usr/lib/gcc/x86_64-linux-gnu/5/collect2 -plugin /usr/lib/gcc/x86_64-linux-gnu/5/liblto_plugin.so -plugin-opt=/usr/lib/gcc/x86_64-linux-gnu/5/lto-wrapper -plugin-opt=-fresolution=/tmp/ccy01wkG.res -plugin-opt=-pass-through=-lgcc_s -plugin-opt=-pass-through=-lgcc -plugin-opt=-pass-through=-lc -plugin-opt=-pass-through=-lgcc_s -plugin-opt=-pass-through=-lgcc --sysroot=/ --build-id --eh-frame-hdr -m elf_x86_64 --hash-style=gnu --as-needed -export-dynamic -dynamic-linker /lib64/ld-linux-x86-64.so.2 -z relro -o cmTC_54547 /usr/lib/gcc/x86_64-linux-gnu/5/../../../x86_64-linux-gnu/crt1.o /usr/lib/gcc/x86_64-linux-gnu/5/../../../x86_64-linux-gnu/crti.o /usr/lib/gcc/x86_64-linux-gnu/5/crtbegin.o -L/usr/lib/gcc/x86_64-linux-gnu/5 -L/usr/lib/gcc/x86_64-linux-gnu/5/../../../x86_64-linux-gnu -L/usr/lib/gcc/x86_64-linux-gnu/5/../../../../lib -L/lib/x86_64-linux-gnu -L/lib/../lib -L/usr/lib/x86_64-linux-gnu -L/usr/lib/../lib -L/usr/lib/gcc/x86_64-linux-gnu/5/../../.. CMakeFiles/cmTC_54547.dir/CMakeCXXCompilerABI.cpp.o -lstdc++ -lm -lgcc_s -lgcc -lc -lgcc_s -lgcc /usr/lib/gcc/x86_64-linux-gnu/5/crtend.o /usr/lib/gcc/x86_64-linux-gnu/5/../../../x86_64-linux-gnu/crtn.o] + arg [/usr/lib/gcc/x86_64-linux-gnu/5/collect2] ==> ignore + arg [-plugin] ==> ignore + arg [/usr/lib/gcc/x86_64-linux-gnu/5/liblto_plugin.so] ==> ignore + arg [-plugin-opt=/usr/lib/gcc/x86_64-linux-gnu/5/lto-wrapper] ==> ignore + arg [-plugin-opt=-fresolution=/tmp/ccy01wkG.res] ==> ignore + arg [-plugin-opt=-pass-through=-lgcc_s] ==> ignore + arg [-plugin-opt=-pass-through=-lgcc] ==> ignore + arg [-plugin-opt=-pass-through=-lc] ==> ignore + arg [-plugin-opt=-pass-through=-lgcc_s] ==> ignore + arg [-plugin-opt=-pass-through=-lgcc] ==> ignore + arg [--sysroot=/] ==> ignore + arg [--build-id] ==> ignore + arg [--eh-frame-hdr] ==> ignore + arg [-m] ==> ignore + arg [elf_x86_64] ==> ignore + arg [--hash-style=gnu] ==> ignore + arg [--as-needed] ==> ignore + arg [-export-dynamic] ==> ignore + arg [-dynamic-linker] ==> ignore + arg [/lib64/ld-linux-x86-64.so.2] ==> ignore + arg [-zrelro] ==> ignore + arg [-o] ==> ignore + arg [cmTC_54547] ==> ignore + arg [/usr/lib/gcc/x86_64-linux-gnu/5/../../../x86_64-linux-gnu/crt1.o] ==> ignore + arg [/usr/lib/gcc/x86_64-linux-gnu/5/../../../x86_64-linux-gnu/crti.o] ==> ignore + arg [/usr/lib/gcc/x86_64-linux-gnu/5/crtbegin.o] ==> ignore + arg [-L/usr/lib/gcc/x86_64-linux-gnu/5] ==> dir [/usr/lib/gcc/x86_64-linux-gnu/5] + arg [-L/usr/lib/gcc/x86_64-linux-gnu/5/../../../x86_64-linux-gnu] ==> dir [/usr/lib/gcc/x86_64-linux-gnu/5/../../../x86_64-linux-gnu] + arg [-L/usr/lib/gcc/x86_64-linux-gnu/5/../../../../lib] ==> dir [/usr/lib/gcc/x86_64-linux-gnu/5/../../../../lib] + arg [-L/lib/x86_64-linux-gnu] ==> dir [/lib/x86_64-linux-gnu] + arg [-L/lib/../lib] ==> dir [/lib/../lib] + arg [-L/usr/lib/x86_64-linux-gnu] ==> dir [/usr/lib/x86_64-linux-gnu] + arg [-L/usr/lib/../lib] ==> dir [/usr/lib/../lib] + arg [-L/usr/lib/gcc/x86_64-linux-gnu/5/../../..] ==> dir [/usr/lib/gcc/x86_64-linux-gnu/5/../../..] + arg [CMakeFiles/cmTC_54547.dir/CMakeCXXCompilerABI.cpp.o] ==> ignore + arg [-lstdc++] ==> lib [stdc++] + arg [-lm] ==> lib [m] + arg [-lgcc_s] ==> lib [gcc_s] + arg [-lgcc] ==> lib [gcc] + arg [-lc] ==> lib [c] + arg [-lgcc_s] ==> lib [gcc_s] + arg [-lgcc] ==> lib [gcc] + arg [/usr/lib/gcc/x86_64-linux-gnu/5/crtend.o] ==> ignore + arg [/usr/lib/gcc/x86_64-linux-gnu/5/../../../x86_64-linux-gnu/crtn.o] ==> ignore + remove lib [gcc_s] + remove lib [gcc] + remove lib [gcc_s] + remove lib [gcc] + collapse library dir [/usr/lib/gcc/x86_64-linux-gnu/5] ==> [/usr/lib/gcc/x86_64-linux-gnu/5] + collapse library dir [/usr/lib/gcc/x86_64-linux-gnu/5/../../../x86_64-linux-gnu] ==> [/usr/lib/x86_64-linux-gnu] + collapse library dir [/usr/lib/gcc/x86_64-linux-gnu/5/../../../../lib] ==> [/usr/lib] + collapse library dir [/lib/x86_64-linux-gnu] ==> [/lib/x86_64-linux-gnu] + collapse library dir [/lib/../lib] ==> [/lib] + collapse library dir [/usr/lib/x86_64-linux-gnu] ==> [/usr/lib/x86_64-linux-gnu] + collapse library dir [/usr/lib/../lib] ==> [/usr/lib] + collapse library dir [/usr/lib/gcc/x86_64-linux-gnu/5/../../..] ==> [/usr/lib] + implicit libs: [stdc++;m;c] + implicit dirs: [/usr/lib/gcc/x86_64-linux-gnu/5;/usr/lib/x86_64-linux-gnu;/usr/lib;/lib/x86_64-linux-gnu;/lib] + implicit fwks: [] + + + + +Detecting CXX [-std=c++14] compiler features compiled with the following output: +Change Dir: /home/ahmad/Downloads/benchmark-fuzzying-tool/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/build-release/CMakeFiles/CMakeTmp + +Run Build Command:"/usr/bin/make" "cmTC_8f563/fast" +make[1]: Entering directory '/home/ahmad/Downloads/benchmark-fuzzying-tool/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/build-release/CMakeFiles/CMakeTmp' +/usr/bin/make -f CMakeFiles/cmTC_8f563.dir/build.make CMakeFiles/cmTC_8f563.dir/build +make[2]: Entering directory '/home/ahmad/Downloads/benchmark-fuzzying-tool/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/build-release/CMakeFiles/CMakeTmp' +Building CXX object CMakeFiles/cmTC_8f563.dir/feature_tests.cxx.o +/usr/bin/c++ -std=c++14 -o CMakeFiles/cmTC_8f563.dir/feature_tests.cxx.o -c /home/ahmad/Downloads/benchmark-fuzzying-tool/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/build-release/CMakeFiles/feature_tests.cxx +Linking CXX executable cmTC_8f563 +/usr/bin/cmake -E cmake_link_script CMakeFiles/cmTC_8f563.dir/link.txt --verbose=1 +/usr/bin/c++ CMakeFiles/cmTC_8f563.dir/feature_tests.cxx.o -o cmTC_8f563 -rdynamic +make[2]: Leaving directory '/home/ahmad/Downloads/benchmark-fuzzying-tool/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/build-release/CMakeFiles/CMakeTmp' +make[1]: Leaving directory '/home/ahmad/Downloads/benchmark-fuzzying-tool/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/build-release/CMakeFiles/CMakeTmp' + + + Feature record: CXX_FEATURE:1cxx_aggregate_default_initializers + Feature record: CXX_FEATURE:1cxx_alias_templates + Feature record: CXX_FEATURE:1cxx_alignas + Feature record: CXX_FEATURE:1cxx_alignof + Feature record: CXX_FEATURE:1cxx_attributes + Feature record: CXX_FEATURE:1cxx_attribute_deprecated + Feature record: CXX_FEATURE:1cxx_auto_type + Feature record: CXX_FEATURE:1cxx_binary_literals + Feature record: CXX_FEATURE:1cxx_constexpr + Feature record: CXX_FEATURE:1cxx_contextual_conversions + Feature record: CXX_FEATURE:1cxx_decltype + Feature record: CXX_FEATURE:1cxx_decltype_auto + Feature record: CXX_FEATURE:1cxx_decltype_incomplete_return_types + Feature record: CXX_FEATURE:1cxx_default_function_template_args + Feature record: CXX_FEATURE:1cxx_defaulted_functions + Feature record: CXX_FEATURE:1cxx_defaulted_move_initializers + Feature record: CXX_FEATURE:1cxx_delegating_constructors + Feature record: CXX_FEATURE:1cxx_deleted_functions + Feature record: CXX_FEATURE:1cxx_digit_separators + Feature record: CXX_FEATURE:1cxx_enum_forward_declarations + Feature record: CXX_FEATURE:1cxx_explicit_conversions + Feature record: CXX_FEATURE:1cxx_extended_friend_declarations + Feature record: CXX_FEATURE:1cxx_extern_templates + Feature record: CXX_FEATURE:1cxx_final + Feature record: CXX_FEATURE:1cxx_func_identifier + Feature record: CXX_FEATURE:1cxx_generalized_initializers + Feature record: CXX_FEATURE:1cxx_generic_lambdas + Feature record: CXX_FEATURE:1cxx_inheriting_constructors + Feature record: CXX_FEATURE:1cxx_inline_namespaces + Feature record: CXX_FEATURE:1cxx_lambdas + Feature record: CXX_FEATURE:1cxx_lambda_init_captures + Feature record: CXX_FEATURE:1cxx_local_type_template_args + Feature record: CXX_FEATURE:1cxx_long_long_type + Feature record: CXX_FEATURE:1cxx_noexcept + Feature record: CXX_FEATURE:1cxx_nonstatic_member_init + Feature record: CXX_FEATURE:1cxx_nullptr + Feature record: CXX_FEATURE:1cxx_override + Feature record: CXX_FEATURE:1cxx_range_for + Feature record: CXX_FEATURE:1cxx_raw_string_literals + Feature record: CXX_FEATURE:1cxx_reference_qualified_functions + Feature record: CXX_FEATURE:1cxx_relaxed_constexpr + Feature record: CXX_FEATURE:1cxx_return_type_deduction + Feature record: CXX_FEATURE:1cxx_right_angle_brackets + Feature record: CXX_FEATURE:1cxx_rvalue_references + Feature record: CXX_FEATURE:1cxx_sizeof_member + Feature record: CXX_FEATURE:1cxx_static_assert + Feature record: CXX_FEATURE:1cxx_strong_enums + Feature record: CXX_FEATURE:1cxx_template_template_parameters + Feature record: CXX_FEATURE:1cxx_thread_local + Feature record: CXX_FEATURE:1cxx_trailing_return_types + Feature record: CXX_FEATURE:1cxx_unicode_literals + Feature record: CXX_FEATURE:1cxx_uniform_initialization + Feature record: CXX_FEATURE:1cxx_unrestricted_unions + Feature record: CXX_FEATURE:1cxx_user_literals + Feature record: CXX_FEATURE:1cxx_variable_templates + Feature record: CXX_FEATURE:1cxx_variadic_macros + Feature record: CXX_FEATURE:1cxx_variadic_templates + + +Detecting CXX [-std=c++11] compiler features compiled with the following output: +Change Dir: /home/ahmad/Downloads/benchmark-fuzzying-tool/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/build-release/CMakeFiles/CMakeTmp + +Run Build Command:"/usr/bin/make" "cmTC_6e6e1/fast" +make[1]: Entering directory '/home/ahmad/Downloads/benchmark-fuzzying-tool/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/build-release/CMakeFiles/CMakeTmp' +/usr/bin/make -f CMakeFiles/cmTC_6e6e1.dir/build.make CMakeFiles/cmTC_6e6e1.dir/build +make[2]: Entering directory '/home/ahmad/Downloads/benchmark-fuzzying-tool/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/build-release/CMakeFiles/CMakeTmp' +Building CXX object CMakeFiles/cmTC_6e6e1.dir/feature_tests.cxx.o +/usr/bin/c++ -std=c++11 -o CMakeFiles/cmTC_6e6e1.dir/feature_tests.cxx.o -c /home/ahmad/Downloads/benchmark-fuzzying-tool/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/build-release/CMakeFiles/feature_tests.cxx +Linking CXX executable cmTC_6e6e1 +/usr/bin/cmake -E cmake_link_script CMakeFiles/cmTC_6e6e1.dir/link.txt --verbose=1 +/usr/bin/c++ CMakeFiles/cmTC_6e6e1.dir/feature_tests.cxx.o -o cmTC_6e6e1 -rdynamic +make[2]: Leaving directory '/home/ahmad/Downloads/benchmark-fuzzying-tool/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/build-release/CMakeFiles/CMakeTmp' +make[1]: Leaving directory '/home/ahmad/Downloads/benchmark-fuzzying-tool/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/build-release/CMakeFiles/CMakeTmp' + + + Feature record: CXX_FEATURE:0cxx_aggregate_default_initializers + Feature record: CXX_FEATURE:1cxx_alias_templates + Feature record: CXX_FEATURE:1cxx_alignas + Feature record: CXX_FEATURE:1cxx_alignof + Feature record: CXX_FEATURE:1cxx_attributes + Feature record: CXX_FEATURE:0cxx_attribute_deprecated + Feature record: CXX_FEATURE:1cxx_auto_type + Feature record: CXX_FEATURE:0cxx_binary_literals + Feature record: CXX_FEATURE:1cxx_constexpr + Feature record: CXX_FEATURE:0cxx_contextual_conversions + Feature record: CXX_FEATURE:1cxx_decltype + Feature record: CXX_FEATURE:0cxx_decltype_auto + Feature record: CXX_FEATURE:1cxx_decltype_incomplete_return_types + Feature record: CXX_FEATURE:1cxx_default_function_template_args + Feature record: CXX_FEATURE:1cxx_defaulted_functions + Feature record: CXX_FEATURE:1cxx_defaulted_move_initializers + Feature record: CXX_FEATURE:1cxx_delegating_constructors + Feature record: CXX_FEATURE:1cxx_deleted_functions + Feature record: CXX_FEATURE:0cxx_digit_separators + Feature record: CXX_FEATURE:1cxx_enum_forward_declarations + Feature record: CXX_FEATURE:1cxx_explicit_conversions + Feature record: CXX_FEATURE:1cxx_extended_friend_declarations + Feature record: CXX_FEATURE:1cxx_extern_templates + Feature record: CXX_FEATURE:1cxx_final + Feature record: CXX_FEATURE:1cxx_func_identifier + Feature record: CXX_FEATURE:1cxx_generalized_initializers + Feature record: CXX_FEATURE:0cxx_generic_lambdas + Feature record: CXX_FEATURE:1cxx_inheriting_constructors + Feature record: CXX_FEATURE:1cxx_inline_namespaces + Feature record: CXX_FEATURE:1cxx_lambdas + Feature record: CXX_FEATURE:0cxx_lambda_init_captures + Feature record: CXX_FEATURE:1cxx_local_type_template_args + Feature record: CXX_FEATURE:1cxx_long_long_type + Feature record: CXX_FEATURE:1cxx_noexcept + Feature record: CXX_FEATURE:1cxx_nonstatic_member_init + Feature record: CXX_FEATURE:1cxx_nullptr + Feature record: CXX_FEATURE:1cxx_override + Feature record: CXX_FEATURE:1cxx_range_for + Feature record: CXX_FEATURE:1cxx_raw_string_literals + Feature record: CXX_FEATURE:1cxx_reference_qualified_functions + Feature record: CXX_FEATURE:0cxx_relaxed_constexpr + Feature record: CXX_FEATURE:0cxx_return_type_deduction + Feature record: CXX_FEATURE:1cxx_right_angle_brackets + Feature record: CXX_FEATURE:1cxx_rvalue_references + Feature record: CXX_FEATURE:1cxx_sizeof_member + Feature record: CXX_FEATURE:1cxx_static_assert + Feature record: CXX_FEATURE:1cxx_strong_enums + Feature record: CXX_FEATURE:1cxx_template_template_parameters + Feature record: CXX_FEATURE:1cxx_thread_local + Feature record: CXX_FEATURE:1cxx_trailing_return_types + Feature record: CXX_FEATURE:1cxx_unicode_literals + Feature record: CXX_FEATURE:1cxx_uniform_initialization + Feature record: CXX_FEATURE:1cxx_unrestricted_unions + Feature record: CXX_FEATURE:1cxx_user_literals + Feature record: CXX_FEATURE:0cxx_variable_templates + Feature record: CXX_FEATURE:1cxx_variadic_macros + Feature record: CXX_FEATURE:1cxx_variadic_templates + + +Detecting CXX [-std=c++98] compiler features compiled with the following output: +Change Dir: /home/ahmad/Downloads/benchmark-fuzzying-tool/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/build-release/CMakeFiles/CMakeTmp + +Run Build Command:"/usr/bin/make" "cmTC_059a0/fast" +make[1]: Entering directory '/home/ahmad/Downloads/benchmark-fuzzying-tool/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/build-release/CMakeFiles/CMakeTmp' +/usr/bin/make -f CMakeFiles/cmTC_059a0.dir/build.make CMakeFiles/cmTC_059a0.dir/build +make[2]: Entering directory '/home/ahmad/Downloads/benchmark-fuzzying-tool/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/build-release/CMakeFiles/CMakeTmp' +Building CXX object CMakeFiles/cmTC_059a0.dir/feature_tests.cxx.o +/usr/bin/c++ -std=c++98 -o CMakeFiles/cmTC_059a0.dir/feature_tests.cxx.o -c /home/ahmad/Downloads/benchmark-fuzzying-tool/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/build-release/CMakeFiles/feature_tests.cxx +Linking CXX executable cmTC_059a0 +/usr/bin/cmake -E cmake_link_script CMakeFiles/cmTC_059a0.dir/link.txt --verbose=1 +/usr/bin/c++ CMakeFiles/cmTC_059a0.dir/feature_tests.cxx.o -o cmTC_059a0 -rdynamic +make[2]: Leaving directory '/home/ahmad/Downloads/benchmark-fuzzying-tool/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/build-release/CMakeFiles/CMakeTmp' +make[1]: Leaving directory '/home/ahmad/Downloads/benchmark-fuzzying-tool/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/build-release/CMakeFiles/CMakeTmp' + + + Feature record: CXX_FEATURE:0cxx_aggregate_default_initializers + Feature record: CXX_FEATURE:0cxx_alias_templates + Feature record: CXX_FEATURE:0cxx_alignas + Feature record: CXX_FEATURE:0cxx_alignof + Feature record: CXX_FEATURE:0cxx_attributes + Feature record: CXX_FEATURE:0cxx_attribute_deprecated + Feature record: CXX_FEATURE:0cxx_auto_type + Feature record: CXX_FEATURE:0cxx_binary_literals + Feature record: CXX_FEATURE:0cxx_constexpr + Feature record: CXX_FEATURE:0cxx_contextual_conversions + Feature record: CXX_FEATURE:0cxx_decltype + Feature record: CXX_FEATURE:0cxx_decltype_auto + Feature record: CXX_FEATURE:0cxx_decltype_incomplete_return_types + Feature record: CXX_FEATURE:0cxx_default_function_template_args + Feature record: CXX_FEATURE:0cxx_defaulted_functions + Feature record: CXX_FEATURE:0cxx_defaulted_move_initializers + Feature record: CXX_FEATURE:0cxx_delegating_constructors + Feature record: CXX_FEATURE:0cxx_deleted_functions + Feature record: CXX_FEATURE:0cxx_digit_separators + Feature record: CXX_FEATURE:0cxx_enum_forward_declarations + Feature record: CXX_FEATURE:0cxx_explicit_conversions + Feature record: CXX_FEATURE:0cxx_extended_friend_declarations + Feature record: CXX_FEATURE:0cxx_extern_templates + Feature record: CXX_FEATURE:0cxx_final + Feature record: CXX_FEATURE:0cxx_func_identifier + Feature record: CXX_FEATURE:0cxx_generalized_initializers + Feature record: CXX_FEATURE:0cxx_generic_lambdas + Feature record: CXX_FEATURE:0cxx_inheriting_constructors + Feature record: CXX_FEATURE:0cxx_inline_namespaces + Feature record: CXX_FEATURE:0cxx_lambdas + Feature record: CXX_FEATURE:0cxx_lambda_init_captures + Feature record: CXX_FEATURE:0cxx_local_type_template_args + Feature record: CXX_FEATURE:0cxx_long_long_type + Feature record: CXX_FEATURE:0cxx_noexcept + Feature record: CXX_FEATURE:0cxx_nonstatic_member_init + Feature record: CXX_FEATURE:0cxx_nullptr + Feature record: CXX_FEATURE:0cxx_override + Feature record: CXX_FEATURE:0cxx_range_for + Feature record: CXX_FEATURE:0cxx_raw_string_literals + Feature record: CXX_FEATURE:0cxx_reference_qualified_functions + Feature record: CXX_FEATURE:0cxx_relaxed_constexpr + Feature record: CXX_FEATURE:0cxx_return_type_deduction + Feature record: CXX_FEATURE:0cxx_right_angle_brackets + Feature record: CXX_FEATURE:0cxx_rvalue_references + Feature record: CXX_FEATURE:0cxx_sizeof_member + Feature record: CXX_FEATURE:0cxx_static_assert + Feature record: CXX_FEATURE:0cxx_strong_enums + Feature record: CXX_FEATURE:1cxx_template_template_parameters + Feature record: CXX_FEATURE:0cxx_thread_local + Feature record: CXX_FEATURE:0cxx_trailing_return_types + Feature record: CXX_FEATURE:0cxx_unicode_literals + Feature record: CXX_FEATURE:0cxx_uniform_initialization + Feature record: CXX_FEATURE:0cxx_unrestricted_unions + Feature record: CXX_FEATURE:0cxx_user_literals + Feature record: CXX_FEATURE:0cxx_variable_templates + Feature record: CXX_FEATURE:0cxx_variadic_macros + Feature record: CXX_FEATURE:0cxx_variadic_templates +Determining if the include file pthread.h exists passed with the following output: +Change Dir: /home/ahmad/Downloads/benchmark-fuzzying-tool/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/build-release/CMakeFiles/CMakeTmp + +Run Build Command:"/usr/bin/make" "cmTC_4a616/fast" +make[1]: Entering directory '/home/ahmad/Downloads/benchmark-fuzzying-tool/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/build-release/CMakeFiles/CMakeTmp' +/usr/bin/make -f CMakeFiles/cmTC_4a616.dir/build.make CMakeFiles/cmTC_4a616.dir/build +make[2]: Entering directory '/home/ahmad/Downloads/benchmark-fuzzying-tool/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/build-release/CMakeFiles/CMakeTmp' +Building C object CMakeFiles/cmTC_4a616.dir/CheckIncludeFile.c.o +/usr/bin/cc -o CMakeFiles/cmTC_4a616.dir/CheckIncludeFile.c.o -c /home/ahmad/Downloads/benchmark-fuzzying-tool/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/build-release/CMakeFiles/CMakeTmp/CheckIncludeFile.c +Linking C executable cmTC_4a616 +/usr/bin/cmake -E cmake_link_script CMakeFiles/cmTC_4a616.dir/link.txt --verbose=1 +/usr/bin/cc CMakeFiles/cmTC_4a616.dir/CheckIncludeFile.c.o -o cmTC_4a616 -rdynamic +make[2]: Leaving directory '/home/ahmad/Downloads/benchmark-fuzzying-tool/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/build-release/CMakeFiles/CMakeTmp' +make[1]: Leaving directory '/home/ahmad/Downloads/benchmark-fuzzying-tool/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/build-release/CMakeFiles/CMakeTmp' + + +Determining if the function pthread_create exists in the pthread passed with the following output: +Change Dir: /home/ahmad/Downloads/benchmark-fuzzying-tool/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/build-release/CMakeFiles/CMakeTmp + +Run Build Command:"/usr/bin/make" "cmTC_a0c53/fast" +make[1]: Entering directory '/home/ahmad/Downloads/benchmark-fuzzying-tool/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/build-release/CMakeFiles/CMakeTmp' +/usr/bin/make -f CMakeFiles/cmTC_a0c53.dir/build.make CMakeFiles/cmTC_a0c53.dir/build +make[2]: Entering directory '/home/ahmad/Downloads/benchmark-fuzzying-tool/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/build-release/CMakeFiles/CMakeTmp' +Building C object CMakeFiles/cmTC_a0c53.dir/CheckFunctionExists.c.o +/usr/bin/cc -DCHECK_FUNCTION_EXISTS=pthread_create -o CMakeFiles/cmTC_a0c53.dir/CheckFunctionExists.c.o -c /usr/share/cmake-3.5/Modules/CheckFunctionExists.c +Linking C executable cmTC_a0c53 +/usr/bin/cmake -E cmake_link_script CMakeFiles/cmTC_a0c53.dir/link.txt --verbose=1 +/usr/bin/cc -DCHECK_FUNCTION_EXISTS=pthread_create CMakeFiles/cmTC_a0c53.dir/CheckFunctionExists.c.o -o cmTC_a0c53 -rdynamic -lpthread +make[2]: Leaving directory '/home/ahmad/Downloads/benchmark-fuzzying-tool/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/build-release/CMakeFiles/CMakeTmp' +make[1]: Leaving directory '/home/ahmad/Downloads/benchmark-fuzzying-tool/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/build-release/CMakeFiles/CMakeTmp' + + diff --git a/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/build-release/CMakeFiles/Export/lib/cmake/LibCDS/LibCDSConfig-release.cmake b/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/build-release/CMakeFiles/Export/lib/cmake/LibCDS/LibCDSConfig-release.cmake new file mode 100644 index 0000000..08ddde6 --- /dev/null +++ b/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/build-release/CMakeFiles/Export/lib/cmake/LibCDS/LibCDSConfig-release.cmake @@ -0,0 +1,29 @@ +#---------------------------------------------------------------- +# Generated CMake target import file for configuration "RELEASE". +#---------------------------------------------------------------- + +# Commands may need to know the format version. +set(CMAKE_IMPORT_FILE_VERSION 1) + +# Import target "LibCDS::cds" for configuration "RELEASE" +set_property(TARGET LibCDS::cds APPEND PROPERTY IMPORTED_CONFIGURATIONS RELEASE) +set_target_properties(LibCDS::cds PROPERTIES + IMPORTED_LOCATION_RELEASE "${_IMPORT_PREFIX}/lib64/libcds.so.2.3.2" + IMPORTED_SONAME_RELEASE "libcds.so.2.3.2" + ) + +list(APPEND _IMPORT_CHECK_TARGETS LibCDS::cds ) +list(APPEND _IMPORT_CHECK_FILES_FOR_LibCDS::cds "${_IMPORT_PREFIX}/lib64/libcds.so.2.3.2" ) + +# Import target "LibCDS::cds-s" for configuration "RELEASE" +set_property(TARGET LibCDS::cds-s APPEND PROPERTY IMPORTED_CONFIGURATIONS RELEASE) +set_target_properties(LibCDS::cds-s PROPERTIES + IMPORTED_LINK_INTERFACE_LANGUAGES_RELEASE "CXX" + IMPORTED_LOCATION_RELEASE "${_IMPORT_PREFIX}/lib64/libcds-s.a" + ) + +list(APPEND _IMPORT_CHECK_TARGETS LibCDS::cds-s ) +list(APPEND _IMPORT_CHECK_FILES_FOR_LibCDS::cds-s "${_IMPORT_PREFIX}/lib64/libcds-s.a" ) + +# Commands beyond this point should not need to know the version. +set(CMAKE_IMPORT_FILE_VERSION) diff --git a/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/build-release/CMakeFiles/Export/lib/cmake/LibCDS/LibCDSConfig.cmake b/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/build-release/CMakeFiles/Export/lib/cmake/LibCDS/LibCDSConfig.cmake new file mode 100644 index 0000000..f58eb6f --- /dev/null +++ b/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/build-release/CMakeFiles/Export/lib/cmake/LibCDS/LibCDSConfig.cmake @@ -0,0 +1,102 @@ +# Generated by CMake 3.5.1 + +if("${CMAKE_MAJOR_VERSION}.${CMAKE_MINOR_VERSION}" LESS 2.5) + message(FATAL_ERROR "CMake >= 2.6.0 required") +endif() +cmake_policy(PUSH) +cmake_policy(VERSION 2.6) +#---------------------------------------------------------------- +# Generated CMake target import file. +#---------------------------------------------------------------- + +# Commands may need to know the format version. +set(CMAKE_IMPORT_FILE_VERSION 1) + +# Protect against multiple inclusion, which would fail when already imported targets are added once more. +set(_targetsDefined) +set(_targetsNotDefined) +set(_expectedTargets) +foreach(_expectedTarget LibCDS::cds LibCDS::cds-s) + list(APPEND _expectedTargets ${_expectedTarget}) + if(NOT TARGET ${_expectedTarget}) + list(APPEND _targetsNotDefined ${_expectedTarget}) + endif() + if(TARGET ${_expectedTarget}) + list(APPEND _targetsDefined ${_expectedTarget}) + endif() +endforeach() +if("${_targetsDefined}" STREQUAL "${_expectedTargets}") + set(CMAKE_IMPORT_FILE_VERSION) + cmake_policy(POP) + return() +endif() +if(NOT "${_targetsDefined}" STREQUAL "") + message(FATAL_ERROR "Some (but not all) targets in this export set were already defined.\nTargets Defined: ${_targetsDefined}\nTargets not yet defined: ${_targetsNotDefined}\n") +endif() +unset(_targetsDefined) +unset(_targetsNotDefined) +unset(_expectedTargets) + + +# Compute the installation prefix relative to this file. +get_filename_component(_IMPORT_PREFIX "${CMAKE_CURRENT_LIST_FILE}" PATH) +get_filename_component(_IMPORT_PREFIX "${_IMPORT_PREFIX}" PATH) +get_filename_component(_IMPORT_PREFIX "${_IMPORT_PREFIX}" PATH) +get_filename_component(_IMPORT_PREFIX "${_IMPORT_PREFIX}" PATH) + +# Create imported target LibCDS::cds +add_library(LibCDS::cds SHARED IMPORTED) + +set_target_properties(LibCDS::cds PROPERTIES + INTERFACE_COMPILE_OPTIONS "-std=c++11;-mcx16" + INTERFACE_INCLUDE_DIRECTORIES "${_IMPORT_PREFIX}/include" +) + +# Create imported target LibCDS::cds-s +add_library(LibCDS::cds-s STATIC IMPORTED) + +set_target_properties(LibCDS::cds-s PROPERTIES + INTERFACE_COMPILE_OPTIONS "-std=c++11;-mcx16" + INTERFACE_INCLUDE_DIRECTORIES "${_IMPORT_PREFIX}/include" + INTERFACE_LINK_LIBRARIES "\$" +) + +if(CMAKE_VERSION VERSION_LESS 2.8.12) + message(FATAL_ERROR "This file relies on consumers using CMake 2.8.12 or greater.") +endif() + +# Load information for each installed configuration. +get_filename_component(_DIR "${CMAKE_CURRENT_LIST_FILE}" PATH) +file(GLOB CONFIG_FILES "${_DIR}/LibCDSConfig-*.cmake") +foreach(f ${CONFIG_FILES}) + include(${f}) +endforeach() + +# Cleanup temporary variables. +set(_IMPORT_PREFIX) + +# Loop over all imported files and verify that they actually exist +foreach(target ${_IMPORT_CHECK_TARGETS} ) + foreach(file ${_IMPORT_CHECK_FILES_FOR_${target}} ) + if(NOT EXISTS "${file}" ) + message(FATAL_ERROR "The imported target \"${target}\" references the file + \"${file}\" +but this file does not exist. Possible reasons include: +* The file was deleted, renamed, or moved to another location. +* An install or uninstall procedure did not complete successfully. +* The installation package was faulty and contained + \"${CMAKE_CURRENT_LIST_FILE}\" +but not all the files it references. +") + endif() + endforeach() + unset(_IMPORT_CHECK_FILES_FOR_${target}) +endforeach() +unset(_IMPORT_CHECK_TARGETS) + +# This file does not depend on other imported targets which have +# been exported from the same project but in a separate export set. + +# Commands beyond this point should not need to know the version. +set(CMAKE_IMPORT_FILE_VERSION) +cmake_policy(POP) diff --git a/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/build-release/CMakeFiles/Makefile.cmake b/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/build-release/CMakeFiles/Makefile.cmake new file mode 100644 index 0000000..1895511 --- /dev/null +++ b/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/build-release/CMakeFiles/Makefile.cmake @@ -0,0 +1,134 @@ +# CMAKE generated file: DO NOT EDIT! +# Generated by "Unix Makefiles" Generator, CMake Version 3.5 + +# The generator used is: +set(CMAKE_DEPENDS_GENERATOR "Unix Makefiles") + +# The top level Makefile was generated from the following files: +set(CMAKE_MAKEFILE_DEPENDS + "CMakeCache.txt" + "../CMakeLists.txt" + "CMakeFiles/3.5.1/CMakeCCompiler.cmake" + "CMakeFiles/3.5.1/CMakeCXXCompiler.cmake" + "CMakeFiles/3.5.1/CMakeSystem.cmake" + "CMakeFiles/feature_tests.c" + "CMakeFiles/feature_tests.cxx" + "arch.c" + "../build/cmake/TargetArch.cmake" + "/usr/share/cmake-3.5/Modules/CMakeCCompiler.cmake.in" + "/usr/share/cmake-3.5/Modules/CMakeCCompilerABI.c" + "/usr/share/cmake-3.5/Modules/CMakeCInformation.cmake" + "/usr/share/cmake-3.5/Modules/CMakeCXXCompiler.cmake.in" + "/usr/share/cmake-3.5/Modules/CMakeCXXCompilerABI.cpp" + "/usr/share/cmake-3.5/Modules/CMakeCXXInformation.cmake" + "/usr/share/cmake-3.5/Modules/CMakeCommonLanguageInclude.cmake" + "/usr/share/cmake-3.5/Modules/CMakeCompilerIdDetection.cmake" + "/usr/share/cmake-3.5/Modules/CMakeConfigurableFile.in" + "/usr/share/cmake-3.5/Modules/CMakeDetermineCCompiler.cmake" + "/usr/share/cmake-3.5/Modules/CMakeDetermineCXXCompiler.cmake" + "/usr/share/cmake-3.5/Modules/CMakeDetermineCompileFeatures.cmake" + "/usr/share/cmake-3.5/Modules/CMakeDetermineCompiler.cmake" + "/usr/share/cmake-3.5/Modules/CMakeDetermineCompilerABI.cmake" + "/usr/share/cmake-3.5/Modules/CMakeDetermineCompilerId.cmake" + "/usr/share/cmake-3.5/Modules/CMakeDetermineSystem.cmake" + "/usr/share/cmake-3.5/Modules/CMakeFindBinUtils.cmake" + "/usr/share/cmake-3.5/Modules/CMakeFindDependencyMacro.cmake" + "/usr/share/cmake-3.5/Modules/CMakeGenericSystem.cmake" + "/usr/share/cmake-3.5/Modules/CMakeLanguageInformation.cmake" + "/usr/share/cmake-3.5/Modules/CMakeParseArguments.cmake" + "/usr/share/cmake-3.5/Modules/CMakeParseImplicitLinkInfo.cmake" + "/usr/share/cmake-3.5/Modules/CMakeSystem.cmake.in" + "/usr/share/cmake-3.5/Modules/CMakeSystemSpecificInformation.cmake" + "/usr/share/cmake-3.5/Modules/CMakeSystemSpecificInitialize.cmake" + "/usr/share/cmake-3.5/Modules/CMakeTestCCompiler.cmake" + "/usr/share/cmake-3.5/Modules/CMakeTestCXXCompiler.cmake" + "/usr/share/cmake-3.5/Modules/CMakeTestCompilerCommon.cmake" + "/usr/share/cmake-3.5/Modules/CMakeUnixFindMake.cmake" + "/usr/share/cmake-3.5/Modules/CPack.cmake" + "/usr/share/cmake-3.5/Modules/CPackComponent.cmake" + "/usr/share/cmake-3.5/Modules/CheckFunctionExists.c" + "/usr/share/cmake-3.5/Modules/CheckIncludeFile.c.in" + "/usr/share/cmake-3.5/Modules/CheckIncludeFile.cmake" + "/usr/share/cmake-3.5/Modules/CheckLibraryExists.cmake" + "/usr/share/cmake-3.5/Modules/CheckSymbolExists.cmake" + "/usr/share/cmake-3.5/Modules/Compiler/ADSP-DetermineCompiler.cmake" + "/usr/share/cmake-3.5/Modules/Compiler/ARMCC-DetermineCompiler.cmake" + "/usr/share/cmake-3.5/Modules/Compiler/AppleClang-DetermineCompiler.cmake" + "/usr/share/cmake-3.5/Modules/Compiler/Borland-DetermineCompiler.cmake" + "/usr/share/cmake-3.5/Modules/Compiler/Clang-DetermineCompiler.cmake" + "/usr/share/cmake-3.5/Modules/Compiler/Clang-DetermineCompilerInternal.cmake" + "/usr/share/cmake-3.5/Modules/Compiler/Comeau-CXX-DetermineCompiler.cmake" + "/usr/share/cmake-3.5/Modules/Compiler/Compaq-C-DetermineCompiler.cmake" + "/usr/share/cmake-3.5/Modules/Compiler/Compaq-CXX-DetermineCompiler.cmake" + "/usr/share/cmake-3.5/Modules/Compiler/Cray-DetermineCompiler.cmake" + "/usr/share/cmake-3.5/Modules/Compiler/Embarcadero-DetermineCompiler.cmake" + "/usr/share/cmake-3.5/Modules/Compiler/Fujitsu-DetermineCompiler.cmake" + "/usr/share/cmake-3.5/Modules/Compiler/GHS-DetermineCompiler.cmake" + "/usr/share/cmake-3.5/Modules/Compiler/GNU-C-FeatureTests.cmake" + "/usr/share/cmake-3.5/Modules/Compiler/GNU-C.cmake" + "/usr/share/cmake-3.5/Modules/Compiler/GNU-CXX-FeatureTests.cmake" + "/usr/share/cmake-3.5/Modules/Compiler/GNU-CXX.cmake" + "/usr/share/cmake-3.5/Modules/Compiler/GNU-DetermineCompiler.cmake" + "/usr/share/cmake-3.5/Modules/Compiler/GNU.cmake" + "/usr/share/cmake-3.5/Modules/Compiler/HP-C-DetermineCompiler.cmake" + "/usr/share/cmake-3.5/Modules/Compiler/HP-CXX-DetermineCompiler.cmake" + "/usr/share/cmake-3.5/Modules/Compiler/IAR-DetermineCompiler.cmake" + "/usr/share/cmake-3.5/Modules/Compiler/IBMCPP-C-DetermineVersionInternal.cmake" + "/usr/share/cmake-3.5/Modules/Compiler/IBMCPP-CXX-DetermineVersionInternal.cmake" + "/usr/share/cmake-3.5/Modules/Compiler/Intel-DetermineCompiler.cmake" + "/usr/share/cmake-3.5/Modules/Compiler/MIPSpro-DetermineCompiler.cmake" + "/usr/share/cmake-3.5/Modules/Compiler/MSVC-DetermineCompiler.cmake" + "/usr/share/cmake-3.5/Modules/Compiler/OpenWatcom-DetermineCompiler.cmake" + "/usr/share/cmake-3.5/Modules/Compiler/PGI-DetermineCompiler.cmake" + "/usr/share/cmake-3.5/Modules/Compiler/PathScale-DetermineCompiler.cmake" + "/usr/share/cmake-3.5/Modules/Compiler/SCO-DetermineCompiler.cmake" + "/usr/share/cmake-3.5/Modules/Compiler/SDCC-C-DetermineCompiler.cmake" + "/usr/share/cmake-3.5/Modules/Compiler/SunPro-C-DetermineCompiler.cmake" + "/usr/share/cmake-3.5/Modules/Compiler/SunPro-CXX-DetermineCompiler.cmake" + "/usr/share/cmake-3.5/Modules/Compiler/TI-DetermineCompiler.cmake" + "/usr/share/cmake-3.5/Modules/Compiler/TinyCC-C-DetermineCompiler.cmake" + "/usr/share/cmake-3.5/Modules/Compiler/VisualAge-C-DetermineCompiler.cmake" + "/usr/share/cmake-3.5/Modules/Compiler/VisualAge-CXX-DetermineCompiler.cmake" + "/usr/share/cmake-3.5/Modules/Compiler/Watcom-DetermineCompiler.cmake" + "/usr/share/cmake-3.5/Modules/Compiler/XL-C-DetermineCompiler.cmake" + "/usr/share/cmake-3.5/Modules/Compiler/XL-CXX-DetermineCompiler.cmake" + "/usr/share/cmake-3.5/Modules/Compiler/zOS-C-DetermineCompiler.cmake" + "/usr/share/cmake-3.5/Modules/Compiler/zOS-CXX-DetermineCompiler.cmake" + "/usr/share/cmake-3.5/Modules/FindBoost.cmake" + "/usr/share/cmake-3.5/Modules/FindPackageHandleStandardArgs.cmake" + "/usr/share/cmake-3.5/Modules/FindPackageMessage.cmake" + "/usr/share/cmake-3.5/Modules/FindThreads.cmake" + "/usr/share/cmake-3.5/Modules/Internal/FeatureTesting.cmake" + "/usr/share/cmake-3.5/Modules/MultiArchCross.cmake" + "/usr/share/cmake-3.5/Modules/Platform/Linux-CXX.cmake" + "/usr/share/cmake-3.5/Modules/Platform/Linux-GNU-C.cmake" + "/usr/share/cmake-3.5/Modules/Platform/Linux-GNU-CXX.cmake" + "/usr/share/cmake-3.5/Modules/Platform/Linux-GNU.cmake" + "/usr/share/cmake-3.5/Modules/Platform/Linux.cmake" + "/usr/share/cmake-3.5/Modules/Platform/UnixPaths.cmake" + "/usr/share/cmake-3.5/Templates/CPackConfig.cmake.in" + ) + +# The corresponding makefile is: +set(CMAKE_MAKEFILE_OUTPUTS + "Makefile" + "CMakeFiles/cmake.check_cache" + ) + +# Byproducts of CMake generate step: +set(CMAKE_MAKEFILE_PRODUCTS + "CMakeFiles/3.5.1/CMakeSystem.cmake" + "CMakeFiles/3.5.1/CMakeCCompiler.cmake" + "CMakeFiles/3.5.1/CMakeCXXCompiler.cmake" + "CMakeFiles/3.5.1/CMakeCCompiler.cmake" + "CMakeFiles/3.5.1/CMakeCXXCompiler.cmake" + "CPackConfig.cmake" + "CPackSourceConfig.cmake" + "CMakeFiles/CMakeDirectoryInformation.cmake" + ) + +# Dependency information for all targets: +set(CMAKE_DEPEND_INFO_FILES + "CMakeFiles/cds.dir/DependInfo.cmake" + "CMakeFiles/cds-s.dir/DependInfo.cmake" + ) diff --git a/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/build-release/CMakeFiles/Makefile2 b/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/build-release/CMakeFiles/Makefile2 new file mode 100644 index 0000000..1704ad2 --- /dev/null +++ b/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/build-release/CMakeFiles/Makefile2 @@ -0,0 +1,145 @@ +# CMAKE generated file: DO NOT EDIT! +# Generated by "Unix Makefiles" Generator, CMake Version 3.5 + +# Default target executed when no arguments are given to make. +default_target: all + +.PHONY : default_target + +# The main recursive all target +all: + +.PHONY : all + +# The main recursive preinstall target +preinstall: + +.PHONY : preinstall + +#============================================================================= +# Special targets provided by cmake. + +# Disable implicit rules so canonical targets will work. +.SUFFIXES: + + +# Remove some rules from gmake that .SUFFIXES does not remove. +SUFFIXES = + +.SUFFIXES: .hpux_make_needs_suffix_list + + +# Suppress display of executed commands. +$(VERBOSE).SILENT: + + +# A target that is always out of date. +cmake_force: + +.PHONY : cmake_force + +#============================================================================= +# Set environment variables for the build. + +# The shell in which to execute make rules. +SHELL = /bin/sh + +# The CMake executable. +CMAKE_COMMAND = /usr/bin/cmake + +# The command to remove a file. +RM = /usr/bin/cmake -E remove -f + +# Escaping for special characters. +EQUALS = = + +# The top-level source directory on which CMake was run. +CMAKE_SOURCE_DIR = /home/ahmad/Downloads/benchmark-fuzzying-tool/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2 + +# The top-level build directory on which CMake was run. +CMAKE_BINARY_DIR = /home/ahmad/Downloads/benchmark-fuzzying-tool/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/build-release + +#============================================================================= +# Target rules for target CMakeFiles/cds.dir + +# All Build rule for target. +CMakeFiles/cds.dir/all: + $(MAKE) -f CMakeFiles/cds.dir/build.make CMakeFiles/cds.dir/depend + $(MAKE) -f CMakeFiles/cds.dir/build.make CMakeFiles/cds.dir/build + @$(CMAKE_COMMAND) -E cmake_echo_color --switch=$(COLOR) --progress-dir=/home/ahmad/Downloads/benchmark-fuzzying-tool/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/build-release/CMakeFiles --progress-num=1,2,3,4,5,6,7,8,9,10,11 "Built target cds" +.PHONY : CMakeFiles/cds.dir/all + +# Include target in all. +all: CMakeFiles/cds.dir/all + +.PHONY : all + +# Build rule for subdir invocation for target. +CMakeFiles/cds.dir/rule: cmake_check_build_system + $(CMAKE_COMMAND) -E cmake_progress_start /home/ahmad/Downloads/benchmark-fuzzying-tool/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/build-release/CMakeFiles 11 + $(MAKE) -f CMakeFiles/Makefile2 CMakeFiles/cds.dir/all + $(CMAKE_COMMAND) -E cmake_progress_start /home/ahmad/Downloads/benchmark-fuzzying-tool/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/build-release/CMakeFiles 0 +.PHONY : CMakeFiles/cds.dir/rule + +# Convenience name for target. +cds: CMakeFiles/cds.dir/rule + +.PHONY : cds + +# clean rule for target. +CMakeFiles/cds.dir/clean: + $(MAKE) -f CMakeFiles/cds.dir/build.make CMakeFiles/cds.dir/clean +.PHONY : CMakeFiles/cds.dir/clean + +# clean rule for target. +clean: CMakeFiles/cds.dir/clean + +.PHONY : clean + +#============================================================================= +# Target rules for target CMakeFiles/cds-s.dir + +# All Build rule for target. +CMakeFiles/cds-s.dir/all: + $(MAKE) -f CMakeFiles/cds-s.dir/build.make CMakeFiles/cds-s.dir/depend + $(MAKE) -f CMakeFiles/cds-s.dir/build.make CMakeFiles/cds-s.dir/build + @$(CMAKE_COMMAND) -E cmake_echo_color --switch=$(COLOR) --progress-dir=/home/ahmad/Downloads/benchmark-fuzzying-tool/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/build-release/CMakeFiles --progress-num=12,13,14,15,16,17,18,19,20,21,22 "Built target cds-s" +.PHONY : CMakeFiles/cds-s.dir/all + +# Include target in all. +all: CMakeFiles/cds-s.dir/all + +.PHONY : all + +# Build rule for subdir invocation for target. +CMakeFiles/cds-s.dir/rule: cmake_check_build_system + $(CMAKE_COMMAND) -E cmake_progress_start /home/ahmad/Downloads/benchmark-fuzzying-tool/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/build-release/CMakeFiles 11 + $(MAKE) -f CMakeFiles/Makefile2 CMakeFiles/cds-s.dir/all + $(CMAKE_COMMAND) -E cmake_progress_start /home/ahmad/Downloads/benchmark-fuzzying-tool/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/build-release/CMakeFiles 0 +.PHONY : CMakeFiles/cds-s.dir/rule + +# Convenience name for target. +cds-s: CMakeFiles/cds-s.dir/rule + +.PHONY : cds-s + +# clean rule for target. +CMakeFiles/cds-s.dir/clean: + $(MAKE) -f CMakeFiles/cds-s.dir/build.make CMakeFiles/cds-s.dir/clean +.PHONY : CMakeFiles/cds-s.dir/clean + +# clean rule for target. +clean: CMakeFiles/cds-s.dir/clean + +.PHONY : clean + +#============================================================================= +# Special targets to cleanup operation of make. + +# Special rule to run CMake to check the build system integrity. +# No rule that depends on this can have commands that come from listfiles +# because they might be regenerated. +cmake_check_build_system: + $(CMAKE_COMMAND) -H$(CMAKE_SOURCE_DIR) -B$(CMAKE_BINARY_DIR) --check-build-system CMakeFiles/Makefile.cmake 0 +.PHONY : cmake_check_build_system + diff --git a/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/build-release/CMakeFiles/TargetDirectories.txt b/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/build-release/CMakeFiles/TargetDirectories.txt new file mode 100644 index 0000000..6982794 --- /dev/null +++ b/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/build-release/CMakeFiles/TargetDirectories.txt @@ -0,0 +1,10 @@ +/home/ahmad/Downloads/benchmark-fuzzying-tool/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/build-release/CMakeFiles/edit_cache.dir +/home/ahmad/Downloads/benchmark-fuzzying-tool/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/build-release/CMakeFiles/package_source.dir +/home/ahmad/Downloads/benchmark-fuzzying-tool/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/build-release/CMakeFiles/rebuild_cache.dir +/home/ahmad/Downloads/benchmark-fuzzying-tool/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/build-release/CMakeFiles/list_install_components.dir +/home/ahmad/Downloads/benchmark-fuzzying-tool/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/build-release/CMakeFiles/install.dir +/home/ahmad/Downloads/benchmark-fuzzying-tool/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/build-release/CMakeFiles/package.dir +/home/ahmad/Downloads/benchmark-fuzzying-tool/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/build-release/CMakeFiles/cds.dir +/home/ahmad/Downloads/benchmark-fuzzying-tool/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/build-release/CMakeFiles/cds-s.dir +/home/ahmad/Downloads/benchmark-fuzzying-tool/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/build-release/CMakeFiles/install/strip.dir +/home/ahmad/Downloads/benchmark-fuzzying-tool/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/build-release/CMakeFiles/install/local.dir diff --git a/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/build-release/CMakeFiles/cds-s.dir/CXX.includecache b/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/build-release/CMakeFiles/cds-s.dir/CXX.includecache new file mode 100644 index 0000000..e5872a2 --- /dev/null +++ b/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/build-release/CMakeFiles/cds-s.dir/CXX.includecache @@ -0,0 +1,888 @@ +#IncludeRegexLine: ^[ ]*#[ ]*(include|import)[ ]*[<"]([^">]+)([">]) + +#IncludeRegexScan: ^.*$ + +#IncludeRegexComplain: ^$ + +#IncludeRegexTransform: + +..//cds/algo/atomic.h +cds/details/defs.h +- +cds/user_setup/cache_line.h +- +boost/version.hpp +- +boost/atomic.hpp +- +cds/compiler/cxx11_atomic.h +- +atomic +- + +..//cds/algo/backoff_strategy.h +utility +- +thread +- +chrono +- +cds/compiler/backoff.h +- +cds/algo/backoff_strategy.h +- +cds/algo/backoff_strategy.h +- + +..//cds/algo/base.h +cds/details/defs.h +- + +..//cds/algo/bitop.h +cds/details/defs.h +- +cds/compiler/bitop.h +- + +..//cds/algo/elimination_tls.h +cds/algo/base.h +- + +..//cds/algo/int_algo.h +cds/algo/bitop.h +- + +..//cds/compiler/backoff.h +cds/details/defs.h +- +cds/compiler/vc/x86/backoff.h +- +cds/compiler/vc/amd64/backoff.h +- +cds/compiler/gcc/x86/backoff.h +- +cds/compiler/gcc/amd64/backoff.h +- +cds/compiler/gcc/ia64/backoff.h +- +cds/compiler/gcc/sparc/backoff.h +- +cds/compiler/gcc/ppc64/backoff.h +- +cds/compiler/gcc/arm7/backoff.h +- +cds/compiler/gcc/arm8/backoff.h +- + +..//cds/compiler/bitop.h +cds/compiler/vc/x86/bitop.h +- +cds/compiler/vc/amd64/bitop.h +- +cds/compiler/gcc/x86/bitop.h +- +cds/compiler/gcc/amd64/bitop.h +- +cds/compiler/gcc/sparc/bitop.h +- +cds/compiler/gcc/ia64/bitop.h +- +cds/compiler/gcc/ppc64/bitop.h +- +cds/details/bitop_generic.h +- + +..//cds/compiler/clang/defs.h +cds/compiler/gcc/compiler_macro.h +- +cds/compiler/gcc/compiler_barriers.h +- + +..//cds/compiler/cxx11_atomic.h +type_traits +- +cds/details/defs.h +- +cds/details/aligned_type.h +- +cds/compiler/vc/x86/cxx11_atomic.h +- +cds/compiler/vc/amd64/cxx11_atomic.h +- +cds/compiler/gcc/x86/cxx11_atomic.h +- +cds/compiler/gcc/amd64/cxx11_atomic.h +- +cds/compiler/gcc/ia64/cxx11_atomic.h +- +cds/compiler/gcc/sparc/cxx11_atomic.h +- +cds/compiler/gcc/ppc64/cxx11_atomic.h +- + +..//cds/compiler/defs.h +cds/compiler/vc/defs.h +- +cds/compiler/gcc/defs.h +- +cds/compiler/icl/defs.h +- +cds/compiler/clang/defs.h +- +cds/compiler/feature_tsan.h +- + +..//cds/compiler/feature_tsan.h + +..//cds/compiler/gcc/amd64/backoff.h + +..//cds/compiler/gcc/amd64/bitop.h + +..//cds/compiler/gcc/amd64/cxx11_atomic.h +cstdint +- +cds/compiler/gcc/x86/cxx11_atomic32.h +- + +..//cds/compiler/gcc/arm7/backoff.h + +..//cds/compiler/gcc/arm8/backoff.h + +..//cds/compiler/gcc/compiler_barriers.h + +..//cds/compiler/gcc/compiler_macro.h + +..//cds/compiler/gcc/defs.h +cds/compiler/gcc/compiler_macro.h +- +cds/compiler/gcc/compiler_barriers.h +- + +..//cds/compiler/gcc/ia64/backoff.h + +..//cds/compiler/gcc/ia64/bitop.h + +..//cds/compiler/gcc/ia64/cxx11_atomic.h +cstdint +- + +..//cds/compiler/gcc/ppc64/backoff.h + +..//cds/compiler/gcc/ppc64/bitop.h + +..//cds/compiler/gcc/sparc/backoff.h + +..//cds/compiler/gcc/sparc/bitop.h + +..//cds/compiler/gcc/sparc/cxx11_atomic.h +cstdint +- + +..//cds/compiler/gcc/x86/backoff.h + +..//cds/compiler/gcc/x86/bitop.h + +..//cds/compiler/gcc/x86/cxx11_atomic.h +cstdint +- +cds/compiler/gcc/x86/cxx11_atomic32.h +- + +..//cds/compiler/gcc/x86/cxx11_atomic32.h +cstdint +- +cds/details/is_aligned.h +- + +..//cds/compiler/icl/compiler_barriers.h +intrin.h +- +atomic +- + +..//cds/compiler/icl/defs.h +cds/compiler/icl/compiler_barriers.h +- + +..//cds/compiler/vc/amd64/backoff.h +intrin.h +- + +..//cds/compiler/vc/amd64/bitop.h +intrin.h +- +intrin.h +- + +..//cds/compiler/vc/amd64/cxx11_atomic.h +intrin.h +- +emmintrin.h +- +cds/details/is_aligned.h +- + +..//cds/compiler/vc/compiler_barriers.h +intrin.h +- +atomic +- + +..//cds/compiler/vc/defs.h +stdlib.h +- +crtdbg.h +- +cds/compiler/vc/compiler_barriers.h +- + +..//cds/compiler/vc/x86/backoff.h +intrin.h +- + +..//cds/compiler/vc/x86/bitop.h +intrin.h +- + +..//cds/compiler/vc/x86/cxx11_atomic.h +intrin.h +- +emmintrin.h +- +cds/details/is_aligned.h +- + +..//cds/container/details/base.h +cds/intrusive/details/base.h +- + +..//cds/container/vyukov_mpmc_cycle_queue.h +cds/container/details/base.h +- +cds/opt/buffer.h +- +cds/opt/value_cleaner.h +- +cds/algo/atomic.h +- +cds/details/bounded_container.h +- + +..//cds/details/aligned_type.h +cds/details/defs.h +- + +..//cds/details/allocator.h +type_traits +- +memory +- +cds/details/defs.h +- +cds/user_setup/allocator.h +- + +..//cds/details/bitop_generic.h +cstdlib +- + +..//cds/details/bounded_container.h + +..//cds/details/defs.h +stddef.h +- +stdlib.h +- +assert.h +- +cstdint +- +exception +- +stdexcept +- +string +- +memory +- +cds/version.h +- +cds/init.h +- +cds/gc/hp.h +- +cds/gc/hp.h +- +cds/gc/hp.h +- +cds/compiler/defs.h +- + +..//cds/details/is_aligned.h +cds/details/defs.h +- + +..//cds/details/lib.h +cds/details/defs.h +- + +..//cds/details/marked_ptr.h +cds/algo/atomic.h +- + +..//cds/details/static_functor.h + +..//cds/details/throw_exception.h +cds/details/defs.h +- +stdio.h +- + +..//cds/gc/details/hp_common.h +cds/algo/atomic.h +- +cds/gc/details/retired_ptr.h +- + +..//cds/gc/details/retired_ptr.h +cds/details/defs.h +- +cds/details/static_functor.h +- + +..//cds/gc/dhp.h +exception +- +cds/gc/details/hp_common.h +- +cds/details/lib.h +- +cds/threading/model.h +- +cds/intrusive/free_list_selector.h +- +cds/details/throw_exception.h +- +cds/details/static_functor.h +- +cds/details/marked_ptr.h +- +cds/user_setup/cache_line.h +- + +..//cds/gc/hp.h +exception +- +cds/gc/details/hp_common.h +- +cds/details/lib.h +- +cds/threading/model.h +- +cds/details/throw_exception.h +- +cds/details/static_functor.h +- +cds/details/marked_ptr.h +- +cds/user_setup/cache_line.h +- + +..//cds/init.h +cds/details/defs.h +- +cds/os/topology.h +- +cds/threading/model.h +- +cds/details/lib.h +- + +..//cds/intrusive/details/base.h +cds/intrusive/details/node_traits.h +- +cds/details/allocator.h +- +cds/algo/backoff_strategy.h +- + +..//cds/intrusive/details/node_traits.h +cds/intrusive/options.h +- + +..//cds/intrusive/free_list.h +cds/algo/atomic.h +- +cds/intrusive/free_list.h +- + +..//cds/intrusive/free_list_selector.h +cds/details/defs.h +- +cds/intrusive/free_list_tagged.h +- +cds/intrusive/free_list.h +- + +..//cds/intrusive/free_list_tagged.h +cds/algo/atomic.h +- +cds/intrusive/free_list_tagged.h +- + +..//cds/intrusive/options.h +cds/opt/options.h +- +cds/details/allocator.h +- + +..//cds/opt/buffer.h +memory.h +- +cds/details/defs.h +- +cds/user_setup/allocator.h +- +cds/details/allocator.h +- +cds/algo/int_algo.h +- + +..//cds/opt/options.h +cstdlib +- +cds/details/aligned_type.h +- +cds/user_setup/allocator.h +- +cds/user_setup/cache_line.h +- +cds/algo/atomic.h +- +cds/opt/options.h +- +type_traits +- +cds/opt/options.h +- + +..//cds/opt/value_cleaner.h +cds/details/defs.h +- + +..//cds/os/aix/alloc_aligned.h +cds/os/posix/alloc_aligned.h +- + +..//cds/os/aix/topology.h +cds/os/details/fake_topology.h +- +unistd.h +- +sys/processor.h +- + +..//cds/os/alloc_aligned.h +cds/details/defs.h +- +cds/os/win/alloc_aligned.h +- +cds/os/linux/alloc_aligned.h +- +cds/os/sunos/alloc_aligned.h +- +cds/os/hpux/alloc_aligned.h +- +cds/os/aix/alloc_aligned.h +- +cds/os/free_bsd/alloc_aligned.h +- +cds/os/posix/alloc_aligned.h +- +memory +- +cds/details/is_aligned.h +- +cds/algo/int_algo.h +- +cds/details/throw_exception.h +- + +..//cds/os/details/fake_topology.h +cds/details/defs.h +- +cds/threading/model.h +- + +..//cds/os/free_bsd/alloc_aligned.h +cds/os/posix/alloc_aligned.h +- + +..//cds/os/free_bsd/topology.h +cds/os/details/fake_topology.h +- +sys/types.h +- +sys/sysctl.h +- + +..//cds/os/hpux/alloc_aligned.h +cds/os/libc/alloc_aligned.h +- + +..//cds/os/hpux/topology.h +sys/mpctl.h +- + +..//cds/os/libc/alloc_aligned.h +stdlib.h +- + +..//cds/os/linux/alloc_aligned.h +cds/os/libc/alloc_aligned.h +- +cds/os/posix/alloc_aligned.h +- + +..//cds/os/linux/topology.h +cds/details/defs.h +- +cds/threading/model.h +- +sys/syscall.h +- +sched.h +- + +..//cds/os/osx/topology.h +cds/os/details/fake_topology.h +- + +..//cds/os/posix/alloc_aligned.h +stdlib.h +- + +..//cds/os/posix/fake_topology.h +cds/os/details/fake_topology.h +- +unistd.h +- + +..//cds/os/posix/thread.h +pthread.h +- +signal.h +- + +..//cds/os/sunos/alloc_aligned.h +cds/os/libc/alloc_aligned.h +- + +..//cds/os/sunos/topology.h +sys/processor.h +- +unistd.h +- + +..//cds/os/thread.h +thread +- +cds/details/defs.h +- +cds/os/win/thread.h +- +cds/os/posix/thread.h +- + +..//cds/os/topology.h +cds/details/defs.h +- +cds/os/win/topology.h +- +cds/os/linux/topology.h +- +cds/os/sunos/topology.h +- +cds/os/hpux/topology.h +- +cds/os/aix/topology.h +- +cds/os/free_bsd/topology.h +- +cds/os/osx/topology.h +- +cds/os/posix/fake_topology.h +- + +..//cds/os/win/alloc_aligned.h +malloc.h +- + +..//cds/os/win/thread.h +windows.h +- + +..//cds/os/win/topology.h +cds/details/defs.h +- +windows.h +- + +..//cds/threading/details/_common.h +cds/urcu/details/gp_decl.h +- +cds/urcu/details/sh_decl.h +- +cds/algo/elimination_tls.h +- + +..//cds/threading/details/auto_detect.h +cds/threading/details/msvc.h +- +cds/threading/details/wintls.h +- +cds/threading/details/pthread.h +- +cds/threading/details/gcc.h +- +cds/threading/details/cxx11.h +- + +..//cds/threading/details/cxx11.h +cds/threading/details/cxx11_manager.h +- + +..//cds/threading/details/cxx11_manager.h +cds/threading/details/_common.h +- + +..//cds/threading/details/gcc.h +cds/threading/details/gcc_manager.h +- + +..//cds/threading/details/gcc_manager.h +cds/threading/details/_common.h +- + +..//cds/threading/details/msvc.h +cds/threading/details/msvc_manager.h +- + +..//cds/threading/details/msvc_manager.h +cds/threading/details/_common.h +- + +..//cds/threading/details/pthread.h +cds/threading/details/pthread_manager.h +- + +..//cds/threading/details/pthread_manager.h +system_error +- +stdio.h +- +pthread.h +- +cds/threading/details/_common.h +- +cds/details/throw_exception.h +- + +..//cds/threading/details/wintls.h +stdio.h +- +cds/threading/details/wintls_manager.h +- + +..//cds/threading/details/wintls_manager.h +system_error +- +stdio.h +- +cds/threading/details/_common.h +- +cds/details/throw_exception.h +- + +..//cds/threading/model.h +cds/threading/details/_common.h +- +cds/user_setup/threading.h +- +cds/threading/details/auto_detect.h +- + +..//cds/urcu/details/base.h +cds/algo/atomic.h +- +cds/gc/details/retired_ptr.h +- +cds/details/allocator.h +- +cds/os/thread.h +- +cds/details/marked_ptr.h +- +cds/urcu/general_buffered.h +- +cds/urcu/general_buffered.h +- + +..//cds/urcu/details/gp.h +cds/urcu/details/gp_decl.h +- +cds/threading/model.h +- + +..//cds/urcu/details/gp_decl.h +cds/urcu/details/base.h +- +cds/details/static_functor.h +- +cds/details/lib.h +- +cds/user_setup/cache_line.h +- + +..//cds/urcu/details/gpb.h +mutex +- +limits +- +cds/urcu/details/gp.h +- +cds/algo/backoff_strategy.h +- +cds/container/vyukov_mpmc_cycle_queue.h +- + +..//cds/urcu/details/sh.h +memory.h +- +cds/urcu/details/sh_decl.h +- +cds/threading/model.h +- + +..//cds/urcu/details/sh_decl.h +cds/urcu/details/base.h +- +cds/details/static_functor.h +- +cds/details/lib.h +- +cds/user_setup/cache_line.h +- +signal.h +- + +..//cds/urcu/general_buffered.h +cds/urcu/details/gpb.h +- + +..//cds/user_setup/allocator.h +memory +- +cds/os/alloc_aligned.h +- + +..//cds/user_setup/cache_line.h + +..//cds/user_setup/threading.h + +..//cds/version.h + +/home/ahmad/Downloads/benchmark-fuzzying-tool/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/src/dhp.cpp +algorithm +- +vector +- +cds/gc/dhp.h +- +cds/os/thread.h +- + +/home/ahmad/Downloads/benchmark-fuzzying-tool/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/src/dllmain.cpp +cds/details/defs.h +- +cds/os/thread.h +- +vld.h +- +cds/os/topology.h +- +cds/algo/bitop.h +- +cds/os/win/topology.h +- + +/home/ahmad/Downloads/benchmark-fuzzying-tool/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/src/hp.cpp +algorithm +- +vector +- +cds/gc/hp.h +- +cds/os/thread.h +- + +/home/ahmad/Downloads/benchmark-fuzzying-tool/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/src/init.cpp +cds/init.h +- +cds/algo/atomic.h +- +cds/algo/backoff_strategy.h +- +cds/threading/details/msvc_manager.h +- +cds/threading/details/wintls_manager.h +- +cds/threading/details/gcc_manager.h +- +cds/threading/details/pthread_manager.h +- +cds/threading/details/cxx11_manager.h +- + +/home/ahmad/Downloads/benchmark-fuzzying-tool/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/src/thread_data.cpp +cds/threading/details/_common.h +- +cds/gc/hp.h +- +cds/gc/dhp.h +- + +/home/ahmad/Downloads/benchmark-fuzzying-tool/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/src/topology_hpux.cpp +cds/os/topology.h +- +cds/algo/atomic.h +- +limits +- + +/home/ahmad/Downloads/benchmark-fuzzying-tool/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/src/topology_linux.cpp +cds/os/topology.h +- +thread +- +unistd.h +- +fstream +- + +/home/ahmad/Downloads/benchmark-fuzzying-tool/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/src/topology_osx.cpp +cds/os/topology.h +- +sys/types.h +- +sys/sysctl.h +- + +/home/ahmad/Downloads/benchmark-fuzzying-tool/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/src/urcu_gp.cpp +cds/urcu/details/gp.h +- + +/home/ahmad/Downloads/benchmark-fuzzying-tool/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/src/urcu_sh.cpp +cds/urcu/details/sh.h +- + diff --git a/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/build-release/CMakeFiles/cds-s.dir/DependInfo.cmake b/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/build-release/CMakeFiles/cds-s.dir/DependInfo.cmake new file mode 100644 index 0000000..4794dcc --- /dev/null +++ b/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/build-release/CMakeFiles/cds-s.dir/DependInfo.cmake @@ -0,0 +1,31 @@ +# The set of languages for which implicit dependencies are needed: +set(CMAKE_DEPENDS_LANGUAGES + "CXX" + ) +# The set of files for implicit dependencies of each language: +set(CMAKE_DEPENDS_CHECK_CXX + "/home/ahmad/Downloads/benchmark-fuzzying-tool/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/src/dhp.cpp" "/home/ahmad/Downloads/benchmark-fuzzying-tool/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/build-release/CMakeFiles/cds-s.dir/src/dhp.cpp.o" + "/home/ahmad/Downloads/benchmark-fuzzying-tool/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/src/dllmain.cpp" "/home/ahmad/Downloads/benchmark-fuzzying-tool/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/build-release/CMakeFiles/cds-s.dir/src/dllmain.cpp.o" + "/home/ahmad/Downloads/benchmark-fuzzying-tool/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/src/hp.cpp" "/home/ahmad/Downloads/benchmark-fuzzying-tool/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/build-release/CMakeFiles/cds-s.dir/src/hp.cpp.o" + "/home/ahmad/Downloads/benchmark-fuzzying-tool/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/src/init.cpp" "/home/ahmad/Downloads/benchmark-fuzzying-tool/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/build-release/CMakeFiles/cds-s.dir/src/init.cpp.o" + "/home/ahmad/Downloads/benchmark-fuzzying-tool/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/src/thread_data.cpp" "/home/ahmad/Downloads/benchmark-fuzzying-tool/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/build-release/CMakeFiles/cds-s.dir/src/thread_data.cpp.o" + "/home/ahmad/Downloads/benchmark-fuzzying-tool/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/src/topology_hpux.cpp" "/home/ahmad/Downloads/benchmark-fuzzying-tool/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/build-release/CMakeFiles/cds-s.dir/src/topology_hpux.cpp.o" + "/home/ahmad/Downloads/benchmark-fuzzying-tool/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/src/topology_linux.cpp" "/home/ahmad/Downloads/benchmark-fuzzying-tool/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/build-release/CMakeFiles/cds-s.dir/src/topology_linux.cpp.o" + "/home/ahmad/Downloads/benchmark-fuzzying-tool/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/src/topology_osx.cpp" "/home/ahmad/Downloads/benchmark-fuzzying-tool/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/build-release/CMakeFiles/cds-s.dir/src/topology_osx.cpp.o" + "/home/ahmad/Downloads/benchmark-fuzzying-tool/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/src/urcu_gp.cpp" "/home/ahmad/Downloads/benchmark-fuzzying-tool/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/build-release/CMakeFiles/cds-s.dir/src/urcu_gp.cpp.o" + "/home/ahmad/Downloads/benchmark-fuzzying-tool/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/src/urcu_sh.cpp" "/home/ahmad/Downloads/benchmark-fuzzying-tool/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/build-release/CMakeFiles/cds-s.dir/src/urcu_sh.cpp.o" + ) +set(CMAKE_CXX_COMPILER_ID "GNU") + +# The include file search paths: +set(CMAKE_CXX_TARGET_INCLUDE_PATH + "." + "../" + ) + +# Targets to which this target links. +set(CMAKE_TARGET_LINKED_INFO_FILES + ) + +# Fortran module output directory. +set(CMAKE_Fortran_TARGET_MODULE_DIR "") diff --git a/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/build-release/CMakeFiles/cds-s.dir/build.make b/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/build-release/CMakeFiles/cds-s.dir/build.make new file mode 100644 index 0000000..5db1743 --- /dev/null +++ b/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/build-release/CMakeFiles/cds-s.dir/build.make @@ -0,0 +1,357 @@ +# CMAKE generated file: DO NOT EDIT! +# Generated by "Unix Makefiles" Generator, CMake Version 3.5 + +# Delete rule output on recipe failure. +.DELETE_ON_ERROR: + + +#============================================================================= +# Special targets provided by cmake. + +# Disable implicit rules so canonical targets will work. +.SUFFIXES: + + +# Remove some rules from gmake that .SUFFIXES does not remove. +SUFFIXES = + +.SUFFIXES: .hpux_make_needs_suffix_list + + +# Suppress display of executed commands. +$(VERBOSE).SILENT: + + +# A target that is always out of date. +cmake_force: + +.PHONY : cmake_force + +#============================================================================= +# Set environment variables for the build. + +# The shell in which to execute make rules. +SHELL = /bin/sh + +# The CMake executable. +CMAKE_COMMAND = /usr/bin/cmake + +# The command to remove a file. +RM = /usr/bin/cmake -E remove -f + +# Escaping for special characters. +EQUALS = = + +# The top-level source directory on which CMake was run. +CMAKE_SOURCE_DIR = /home/ahmad/Downloads/benchmark-fuzzying-tool/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2 + +# The top-level build directory on which CMake was run. +CMAKE_BINARY_DIR = /home/ahmad/Downloads/benchmark-fuzzying-tool/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/build-release + +# Include any dependencies generated for this target. +include CMakeFiles/cds-s.dir/depend.make + +# Include the progress variables for this target. +include CMakeFiles/cds-s.dir/progress.make + +# Include the compile flags for this target's objects. +include CMakeFiles/cds-s.dir/flags.make + +CMakeFiles/cds-s.dir/src/init.cpp.o: CMakeFiles/cds-s.dir/flags.make +CMakeFiles/cds-s.dir/src/init.cpp.o: ../src/init.cpp + @$(CMAKE_COMMAND) -E cmake_echo_color --switch=$(COLOR) --green --progress-dir=/home/ahmad/Downloads/benchmark-fuzzying-tool/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/build-release/CMakeFiles --progress-num=$(CMAKE_PROGRESS_1) "Building CXX object CMakeFiles/cds-s.dir/src/init.cpp.o" + /usr/bin/c++ $(CXX_DEFINES) $(CXX_INCLUDES) $(CXX_FLAGS) -o CMakeFiles/cds-s.dir/src/init.cpp.o -c /home/ahmad/Downloads/benchmark-fuzzying-tool/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/src/init.cpp + +CMakeFiles/cds-s.dir/src/init.cpp.i: cmake_force + @$(CMAKE_COMMAND) -E cmake_echo_color --switch=$(COLOR) --green "Preprocessing CXX source to CMakeFiles/cds-s.dir/src/init.cpp.i" + /usr/bin/c++ $(CXX_DEFINES) $(CXX_INCLUDES) $(CXX_FLAGS) -E /home/ahmad/Downloads/benchmark-fuzzying-tool/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/src/init.cpp > CMakeFiles/cds-s.dir/src/init.cpp.i + +CMakeFiles/cds-s.dir/src/init.cpp.s: cmake_force + @$(CMAKE_COMMAND) -E cmake_echo_color --switch=$(COLOR) --green "Compiling CXX source to assembly CMakeFiles/cds-s.dir/src/init.cpp.s" + /usr/bin/c++ $(CXX_DEFINES) $(CXX_INCLUDES) $(CXX_FLAGS) -S /home/ahmad/Downloads/benchmark-fuzzying-tool/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/src/init.cpp -o CMakeFiles/cds-s.dir/src/init.cpp.s + +CMakeFiles/cds-s.dir/src/init.cpp.o.requires: + +.PHONY : CMakeFiles/cds-s.dir/src/init.cpp.o.requires + +CMakeFiles/cds-s.dir/src/init.cpp.o.provides: CMakeFiles/cds-s.dir/src/init.cpp.o.requires + $(MAKE) -f CMakeFiles/cds-s.dir/build.make CMakeFiles/cds-s.dir/src/init.cpp.o.provides.build +.PHONY : CMakeFiles/cds-s.dir/src/init.cpp.o.provides + +CMakeFiles/cds-s.dir/src/init.cpp.o.provides.build: CMakeFiles/cds-s.dir/src/init.cpp.o + + +CMakeFiles/cds-s.dir/src/hp.cpp.o: CMakeFiles/cds-s.dir/flags.make +CMakeFiles/cds-s.dir/src/hp.cpp.o: ../src/hp.cpp + @$(CMAKE_COMMAND) -E cmake_echo_color --switch=$(COLOR) --green --progress-dir=/home/ahmad/Downloads/benchmark-fuzzying-tool/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/build-release/CMakeFiles --progress-num=$(CMAKE_PROGRESS_2) "Building CXX object CMakeFiles/cds-s.dir/src/hp.cpp.o" + /usr/bin/c++ $(CXX_DEFINES) $(CXX_INCLUDES) $(CXX_FLAGS) -o CMakeFiles/cds-s.dir/src/hp.cpp.o -c /home/ahmad/Downloads/benchmark-fuzzying-tool/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/src/hp.cpp + +CMakeFiles/cds-s.dir/src/hp.cpp.i: cmake_force + @$(CMAKE_COMMAND) -E cmake_echo_color --switch=$(COLOR) --green "Preprocessing CXX source to CMakeFiles/cds-s.dir/src/hp.cpp.i" + /usr/bin/c++ $(CXX_DEFINES) $(CXX_INCLUDES) $(CXX_FLAGS) -E /home/ahmad/Downloads/benchmark-fuzzying-tool/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/src/hp.cpp > CMakeFiles/cds-s.dir/src/hp.cpp.i + +CMakeFiles/cds-s.dir/src/hp.cpp.s: cmake_force + @$(CMAKE_COMMAND) -E cmake_echo_color --switch=$(COLOR) --green "Compiling CXX source to assembly CMakeFiles/cds-s.dir/src/hp.cpp.s" + /usr/bin/c++ $(CXX_DEFINES) $(CXX_INCLUDES) $(CXX_FLAGS) -S /home/ahmad/Downloads/benchmark-fuzzying-tool/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/src/hp.cpp -o CMakeFiles/cds-s.dir/src/hp.cpp.s + +CMakeFiles/cds-s.dir/src/hp.cpp.o.requires: + +.PHONY : CMakeFiles/cds-s.dir/src/hp.cpp.o.requires + +CMakeFiles/cds-s.dir/src/hp.cpp.o.provides: CMakeFiles/cds-s.dir/src/hp.cpp.o.requires + $(MAKE) -f CMakeFiles/cds-s.dir/build.make CMakeFiles/cds-s.dir/src/hp.cpp.o.provides.build +.PHONY : CMakeFiles/cds-s.dir/src/hp.cpp.o.provides + +CMakeFiles/cds-s.dir/src/hp.cpp.o.provides.build: CMakeFiles/cds-s.dir/src/hp.cpp.o + + +CMakeFiles/cds-s.dir/src/dhp.cpp.o: CMakeFiles/cds-s.dir/flags.make +CMakeFiles/cds-s.dir/src/dhp.cpp.o: ../src/dhp.cpp + @$(CMAKE_COMMAND) -E cmake_echo_color --switch=$(COLOR) --green --progress-dir=/home/ahmad/Downloads/benchmark-fuzzying-tool/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/build-release/CMakeFiles --progress-num=$(CMAKE_PROGRESS_3) "Building CXX object CMakeFiles/cds-s.dir/src/dhp.cpp.o" + /usr/bin/c++ $(CXX_DEFINES) $(CXX_INCLUDES) $(CXX_FLAGS) -o CMakeFiles/cds-s.dir/src/dhp.cpp.o -c /home/ahmad/Downloads/benchmark-fuzzying-tool/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/src/dhp.cpp + +CMakeFiles/cds-s.dir/src/dhp.cpp.i: cmake_force + @$(CMAKE_COMMAND) -E cmake_echo_color --switch=$(COLOR) --green "Preprocessing CXX source to CMakeFiles/cds-s.dir/src/dhp.cpp.i" + /usr/bin/c++ $(CXX_DEFINES) $(CXX_INCLUDES) $(CXX_FLAGS) -E /home/ahmad/Downloads/benchmark-fuzzying-tool/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/src/dhp.cpp > CMakeFiles/cds-s.dir/src/dhp.cpp.i + +CMakeFiles/cds-s.dir/src/dhp.cpp.s: cmake_force + @$(CMAKE_COMMAND) -E cmake_echo_color --switch=$(COLOR) --green "Compiling CXX source to assembly CMakeFiles/cds-s.dir/src/dhp.cpp.s" + /usr/bin/c++ $(CXX_DEFINES) $(CXX_INCLUDES) $(CXX_FLAGS) -S /home/ahmad/Downloads/benchmark-fuzzying-tool/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/src/dhp.cpp -o CMakeFiles/cds-s.dir/src/dhp.cpp.s + +CMakeFiles/cds-s.dir/src/dhp.cpp.o.requires: + +.PHONY : CMakeFiles/cds-s.dir/src/dhp.cpp.o.requires + +CMakeFiles/cds-s.dir/src/dhp.cpp.o.provides: CMakeFiles/cds-s.dir/src/dhp.cpp.o.requires + $(MAKE) -f CMakeFiles/cds-s.dir/build.make CMakeFiles/cds-s.dir/src/dhp.cpp.o.provides.build +.PHONY : CMakeFiles/cds-s.dir/src/dhp.cpp.o.provides + +CMakeFiles/cds-s.dir/src/dhp.cpp.o.provides.build: CMakeFiles/cds-s.dir/src/dhp.cpp.o + + +CMakeFiles/cds-s.dir/src/urcu_gp.cpp.o: CMakeFiles/cds-s.dir/flags.make +CMakeFiles/cds-s.dir/src/urcu_gp.cpp.o: ../src/urcu_gp.cpp + @$(CMAKE_COMMAND) -E cmake_echo_color --switch=$(COLOR) --green --progress-dir=/home/ahmad/Downloads/benchmark-fuzzying-tool/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/build-release/CMakeFiles --progress-num=$(CMAKE_PROGRESS_4) "Building CXX object CMakeFiles/cds-s.dir/src/urcu_gp.cpp.o" + /usr/bin/c++ $(CXX_DEFINES) $(CXX_INCLUDES) $(CXX_FLAGS) -o CMakeFiles/cds-s.dir/src/urcu_gp.cpp.o -c /home/ahmad/Downloads/benchmark-fuzzying-tool/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/src/urcu_gp.cpp + +CMakeFiles/cds-s.dir/src/urcu_gp.cpp.i: cmake_force + @$(CMAKE_COMMAND) -E cmake_echo_color --switch=$(COLOR) --green "Preprocessing CXX source to CMakeFiles/cds-s.dir/src/urcu_gp.cpp.i" + /usr/bin/c++ $(CXX_DEFINES) $(CXX_INCLUDES) $(CXX_FLAGS) -E /home/ahmad/Downloads/benchmark-fuzzying-tool/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/src/urcu_gp.cpp > CMakeFiles/cds-s.dir/src/urcu_gp.cpp.i + +CMakeFiles/cds-s.dir/src/urcu_gp.cpp.s: cmake_force + @$(CMAKE_COMMAND) -E cmake_echo_color --switch=$(COLOR) --green "Compiling CXX source to assembly CMakeFiles/cds-s.dir/src/urcu_gp.cpp.s" + /usr/bin/c++ $(CXX_DEFINES) $(CXX_INCLUDES) $(CXX_FLAGS) -S /home/ahmad/Downloads/benchmark-fuzzying-tool/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/src/urcu_gp.cpp -o CMakeFiles/cds-s.dir/src/urcu_gp.cpp.s + +CMakeFiles/cds-s.dir/src/urcu_gp.cpp.o.requires: + +.PHONY : CMakeFiles/cds-s.dir/src/urcu_gp.cpp.o.requires + +CMakeFiles/cds-s.dir/src/urcu_gp.cpp.o.provides: CMakeFiles/cds-s.dir/src/urcu_gp.cpp.o.requires + $(MAKE) -f CMakeFiles/cds-s.dir/build.make CMakeFiles/cds-s.dir/src/urcu_gp.cpp.o.provides.build +.PHONY : CMakeFiles/cds-s.dir/src/urcu_gp.cpp.o.provides + +CMakeFiles/cds-s.dir/src/urcu_gp.cpp.o.provides.build: CMakeFiles/cds-s.dir/src/urcu_gp.cpp.o + + +CMakeFiles/cds-s.dir/src/urcu_sh.cpp.o: CMakeFiles/cds-s.dir/flags.make +CMakeFiles/cds-s.dir/src/urcu_sh.cpp.o: ../src/urcu_sh.cpp + @$(CMAKE_COMMAND) -E cmake_echo_color --switch=$(COLOR) --green --progress-dir=/home/ahmad/Downloads/benchmark-fuzzying-tool/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/build-release/CMakeFiles --progress-num=$(CMAKE_PROGRESS_5) "Building CXX object CMakeFiles/cds-s.dir/src/urcu_sh.cpp.o" + /usr/bin/c++ $(CXX_DEFINES) $(CXX_INCLUDES) $(CXX_FLAGS) -o CMakeFiles/cds-s.dir/src/urcu_sh.cpp.o -c /home/ahmad/Downloads/benchmark-fuzzying-tool/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/src/urcu_sh.cpp + +CMakeFiles/cds-s.dir/src/urcu_sh.cpp.i: cmake_force + @$(CMAKE_COMMAND) -E cmake_echo_color --switch=$(COLOR) --green "Preprocessing CXX source to CMakeFiles/cds-s.dir/src/urcu_sh.cpp.i" + /usr/bin/c++ $(CXX_DEFINES) $(CXX_INCLUDES) $(CXX_FLAGS) -E /home/ahmad/Downloads/benchmark-fuzzying-tool/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/src/urcu_sh.cpp > CMakeFiles/cds-s.dir/src/urcu_sh.cpp.i + +CMakeFiles/cds-s.dir/src/urcu_sh.cpp.s: cmake_force + @$(CMAKE_COMMAND) -E cmake_echo_color --switch=$(COLOR) --green "Compiling CXX source to assembly CMakeFiles/cds-s.dir/src/urcu_sh.cpp.s" + /usr/bin/c++ $(CXX_DEFINES) $(CXX_INCLUDES) $(CXX_FLAGS) -S /home/ahmad/Downloads/benchmark-fuzzying-tool/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/src/urcu_sh.cpp -o CMakeFiles/cds-s.dir/src/urcu_sh.cpp.s + +CMakeFiles/cds-s.dir/src/urcu_sh.cpp.o.requires: + +.PHONY : CMakeFiles/cds-s.dir/src/urcu_sh.cpp.o.requires + +CMakeFiles/cds-s.dir/src/urcu_sh.cpp.o.provides: CMakeFiles/cds-s.dir/src/urcu_sh.cpp.o.requires + $(MAKE) -f CMakeFiles/cds-s.dir/build.make CMakeFiles/cds-s.dir/src/urcu_sh.cpp.o.provides.build +.PHONY : CMakeFiles/cds-s.dir/src/urcu_sh.cpp.o.provides + +CMakeFiles/cds-s.dir/src/urcu_sh.cpp.o.provides.build: CMakeFiles/cds-s.dir/src/urcu_sh.cpp.o + + +CMakeFiles/cds-s.dir/src/thread_data.cpp.o: CMakeFiles/cds-s.dir/flags.make +CMakeFiles/cds-s.dir/src/thread_data.cpp.o: ../src/thread_data.cpp + @$(CMAKE_COMMAND) -E cmake_echo_color --switch=$(COLOR) --green --progress-dir=/home/ahmad/Downloads/benchmark-fuzzying-tool/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/build-release/CMakeFiles --progress-num=$(CMAKE_PROGRESS_6) "Building CXX object CMakeFiles/cds-s.dir/src/thread_data.cpp.o" + /usr/bin/c++ $(CXX_DEFINES) $(CXX_INCLUDES) $(CXX_FLAGS) -o CMakeFiles/cds-s.dir/src/thread_data.cpp.o -c /home/ahmad/Downloads/benchmark-fuzzying-tool/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/src/thread_data.cpp + +CMakeFiles/cds-s.dir/src/thread_data.cpp.i: cmake_force + @$(CMAKE_COMMAND) -E cmake_echo_color --switch=$(COLOR) --green "Preprocessing CXX source to CMakeFiles/cds-s.dir/src/thread_data.cpp.i" + /usr/bin/c++ $(CXX_DEFINES) $(CXX_INCLUDES) $(CXX_FLAGS) -E /home/ahmad/Downloads/benchmark-fuzzying-tool/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/src/thread_data.cpp > CMakeFiles/cds-s.dir/src/thread_data.cpp.i + +CMakeFiles/cds-s.dir/src/thread_data.cpp.s: cmake_force + @$(CMAKE_COMMAND) -E cmake_echo_color --switch=$(COLOR) --green "Compiling CXX source to assembly CMakeFiles/cds-s.dir/src/thread_data.cpp.s" + /usr/bin/c++ $(CXX_DEFINES) $(CXX_INCLUDES) $(CXX_FLAGS) -S /home/ahmad/Downloads/benchmark-fuzzying-tool/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/src/thread_data.cpp -o CMakeFiles/cds-s.dir/src/thread_data.cpp.s + +CMakeFiles/cds-s.dir/src/thread_data.cpp.o.requires: + +.PHONY : CMakeFiles/cds-s.dir/src/thread_data.cpp.o.requires + +CMakeFiles/cds-s.dir/src/thread_data.cpp.o.provides: CMakeFiles/cds-s.dir/src/thread_data.cpp.o.requires + $(MAKE) -f CMakeFiles/cds-s.dir/build.make CMakeFiles/cds-s.dir/src/thread_data.cpp.o.provides.build +.PHONY : CMakeFiles/cds-s.dir/src/thread_data.cpp.o.provides + +CMakeFiles/cds-s.dir/src/thread_data.cpp.o.provides.build: CMakeFiles/cds-s.dir/src/thread_data.cpp.o + + +CMakeFiles/cds-s.dir/src/topology_hpux.cpp.o: CMakeFiles/cds-s.dir/flags.make +CMakeFiles/cds-s.dir/src/topology_hpux.cpp.o: ../src/topology_hpux.cpp + @$(CMAKE_COMMAND) -E cmake_echo_color --switch=$(COLOR) --green --progress-dir=/home/ahmad/Downloads/benchmark-fuzzying-tool/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/build-release/CMakeFiles --progress-num=$(CMAKE_PROGRESS_7) "Building CXX object CMakeFiles/cds-s.dir/src/topology_hpux.cpp.o" + /usr/bin/c++ $(CXX_DEFINES) $(CXX_INCLUDES) $(CXX_FLAGS) -o CMakeFiles/cds-s.dir/src/topology_hpux.cpp.o -c /home/ahmad/Downloads/benchmark-fuzzying-tool/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/src/topology_hpux.cpp + +CMakeFiles/cds-s.dir/src/topology_hpux.cpp.i: cmake_force + @$(CMAKE_COMMAND) -E cmake_echo_color --switch=$(COLOR) --green "Preprocessing CXX source to CMakeFiles/cds-s.dir/src/topology_hpux.cpp.i" + /usr/bin/c++ $(CXX_DEFINES) $(CXX_INCLUDES) $(CXX_FLAGS) -E /home/ahmad/Downloads/benchmark-fuzzying-tool/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/src/topology_hpux.cpp > CMakeFiles/cds-s.dir/src/topology_hpux.cpp.i + +CMakeFiles/cds-s.dir/src/topology_hpux.cpp.s: cmake_force + @$(CMAKE_COMMAND) -E cmake_echo_color --switch=$(COLOR) --green "Compiling CXX source to assembly CMakeFiles/cds-s.dir/src/topology_hpux.cpp.s" + /usr/bin/c++ $(CXX_DEFINES) $(CXX_INCLUDES) $(CXX_FLAGS) -S /home/ahmad/Downloads/benchmark-fuzzying-tool/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/src/topology_hpux.cpp -o CMakeFiles/cds-s.dir/src/topology_hpux.cpp.s + +CMakeFiles/cds-s.dir/src/topology_hpux.cpp.o.requires: + +.PHONY : CMakeFiles/cds-s.dir/src/topology_hpux.cpp.o.requires + +CMakeFiles/cds-s.dir/src/topology_hpux.cpp.o.provides: CMakeFiles/cds-s.dir/src/topology_hpux.cpp.o.requires + $(MAKE) -f CMakeFiles/cds-s.dir/build.make CMakeFiles/cds-s.dir/src/topology_hpux.cpp.o.provides.build +.PHONY : CMakeFiles/cds-s.dir/src/topology_hpux.cpp.o.provides + +CMakeFiles/cds-s.dir/src/topology_hpux.cpp.o.provides.build: CMakeFiles/cds-s.dir/src/topology_hpux.cpp.o + + +CMakeFiles/cds-s.dir/src/topology_linux.cpp.o: CMakeFiles/cds-s.dir/flags.make +CMakeFiles/cds-s.dir/src/topology_linux.cpp.o: ../src/topology_linux.cpp + @$(CMAKE_COMMAND) -E cmake_echo_color --switch=$(COLOR) --green --progress-dir=/home/ahmad/Downloads/benchmark-fuzzying-tool/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/build-release/CMakeFiles --progress-num=$(CMAKE_PROGRESS_8) "Building CXX object CMakeFiles/cds-s.dir/src/topology_linux.cpp.o" + /usr/bin/c++ $(CXX_DEFINES) $(CXX_INCLUDES) $(CXX_FLAGS) -o CMakeFiles/cds-s.dir/src/topology_linux.cpp.o -c /home/ahmad/Downloads/benchmark-fuzzying-tool/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/src/topology_linux.cpp + +CMakeFiles/cds-s.dir/src/topology_linux.cpp.i: cmake_force + @$(CMAKE_COMMAND) -E cmake_echo_color --switch=$(COLOR) --green "Preprocessing CXX source to CMakeFiles/cds-s.dir/src/topology_linux.cpp.i" + /usr/bin/c++ $(CXX_DEFINES) $(CXX_INCLUDES) $(CXX_FLAGS) -E /home/ahmad/Downloads/benchmark-fuzzying-tool/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/src/topology_linux.cpp > CMakeFiles/cds-s.dir/src/topology_linux.cpp.i + +CMakeFiles/cds-s.dir/src/topology_linux.cpp.s: cmake_force + @$(CMAKE_COMMAND) -E cmake_echo_color --switch=$(COLOR) --green "Compiling CXX source to assembly CMakeFiles/cds-s.dir/src/topology_linux.cpp.s" + /usr/bin/c++ $(CXX_DEFINES) $(CXX_INCLUDES) $(CXX_FLAGS) -S /home/ahmad/Downloads/benchmark-fuzzying-tool/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/src/topology_linux.cpp -o CMakeFiles/cds-s.dir/src/topology_linux.cpp.s + +CMakeFiles/cds-s.dir/src/topology_linux.cpp.o.requires: + +.PHONY : CMakeFiles/cds-s.dir/src/topology_linux.cpp.o.requires + +CMakeFiles/cds-s.dir/src/topology_linux.cpp.o.provides: CMakeFiles/cds-s.dir/src/topology_linux.cpp.o.requires + $(MAKE) -f CMakeFiles/cds-s.dir/build.make CMakeFiles/cds-s.dir/src/topology_linux.cpp.o.provides.build +.PHONY : CMakeFiles/cds-s.dir/src/topology_linux.cpp.o.provides + +CMakeFiles/cds-s.dir/src/topology_linux.cpp.o.provides.build: CMakeFiles/cds-s.dir/src/topology_linux.cpp.o + + +CMakeFiles/cds-s.dir/src/topology_osx.cpp.o: CMakeFiles/cds-s.dir/flags.make +CMakeFiles/cds-s.dir/src/topology_osx.cpp.o: ../src/topology_osx.cpp + @$(CMAKE_COMMAND) -E cmake_echo_color --switch=$(COLOR) --green --progress-dir=/home/ahmad/Downloads/benchmark-fuzzying-tool/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/build-release/CMakeFiles --progress-num=$(CMAKE_PROGRESS_9) "Building CXX object CMakeFiles/cds-s.dir/src/topology_osx.cpp.o" + /usr/bin/c++ $(CXX_DEFINES) $(CXX_INCLUDES) $(CXX_FLAGS) -o CMakeFiles/cds-s.dir/src/topology_osx.cpp.o -c /home/ahmad/Downloads/benchmark-fuzzying-tool/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/src/topology_osx.cpp + +CMakeFiles/cds-s.dir/src/topology_osx.cpp.i: cmake_force + @$(CMAKE_COMMAND) -E cmake_echo_color --switch=$(COLOR) --green "Preprocessing CXX source to CMakeFiles/cds-s.dir/src/topology_osx.cpp.i" + /usr/bin/c++ $(CXX_DEFINES) $(CXX_INCLUDES) $(CXX_FLAGS) -E /home/ahmad/Downloads/benchmark-fuzzying-tool/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/src/topology_osx.cpp > CMakeFiles/cds-s.dir/src/topology_osx.cpp.i + +CMakeFiles/cds-s.dir/src/topology_osx.cpp.s: cmake_force + @$(CMAKE_COMMAND) -E cmake_echo_color --switch=$(COLOR) --green "Compiling CXX source to assembly CMakeFiles/cds-s.dir/src/topology_osx.cpp.s" + /usr/bin/c++ $(CXX_DEFINES) $(CXX_INCLUDES) $(CXX_FLAGS) -S /home/ahmad/Downloads/benchmark-fuzzying-tool/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/src/topology_osx.cpp -o CMakeFiles/cds-s.dir/src/topology_osx.cpp.s + +CMakeFiles/cds-s.dir/src/topology_osx.cpp.o.requires: + +.PHONY : CMakeFiles/cds-s.dir/src/topology_osx.cpp.o.requires + +CMakeFiles/cds-s.dir/src/topology_osx.cpp.o.provides: CMakeFiles/cds-s.dir/src/topology_osx.cpp.o.requires + $(MAKE) -f CMakeFiles/cds-s.dir/build.make CMakeFiles/cds-s.dir/src/topology_osx.cpp.o.provides.build +.PHONY : CMakeFiles/cds-s.dir/src/topology_osx.cpp.o.provides + +CMakeFiles/cds-s.dir/src/topology_osx.cpp.o.provides.build: CMakeFiles/cds-s.dir/src/topology_osx.cpp.o + + +CMakeFiles/cds-s.dir/src/dllmain.cpp.o: CMakeFiles/cds-s.dir/flags.make +CMakeFiles/cds-s.dir/src/dllmain.cpp.o: ../src/dllmain.cpp + @$(CMAKE_COMMAND) -E cmake_echo_color --switch=$(COLOR) --green --progress-dir=/home/ahmad/Downloads/benchmark-fuzzying-tool/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/build-release/CMakeFiles --progress-num=$(CMAKE_PROGRESS_10) "Building CXX object CMakeFiles/cds-s.dir/src/dllmain.cpp.o" + /usr/bin/c++ $(CXX_DEFINES) $(CXX_INCLUDES) $(CXX_FLAGS) -o CMakeFiles/cds-s.dir/src/dllmain.cpp.o -c /home/ahmad/Downloads/benchmark-fuzzying-tool/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/src/dllmain.cpp + +CMakeFiles/cds-s.dir/src/dllmain.cpp.i: cmake_force + @$(CMAKE_COMMAND) -E cmake_echo_color --switch=$(COLOR) --green "Preprocessing CXX source to CMakeFiles/cds-s.dir/src/dllmain.cpp.i" + /usr/bin/c++ $(CXX_DEFINES) $(CXX_INCLUDES) $(CXX_FLAGS) -E /home/ahmad/Downloads/benchmark-fuzzying-tool/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/src/dllmain.cpp > CMakeFiles/cds-s.dir/src/dllmain.cpp.i + +CMakeFiles/cds-s.dir/src/dllmain.cpp.s: cmake_force + @$(CMAKE_COMMAND) -E cmake_echo_color --switch=$(COLOR) --green "Compiling CXX source to assembly CMakeFiles/cds-s.dir/src/dllmain.cpp.s" + /usr/bin/c++ $(CXX_DEFINES) $(CXX_INCLUDES) $(CXX_FLAGS) -S /home/ahmad/Downloads/benchmark-fuzzying-tool/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/src/dllmain.cpp -o CMakeFiles/cds-s.dir/src/dllmain.cpp.s + +CMakeFiles/cds-s.dir/src/dllmain.cpp.o.requires: + +.PHONY : CMakeFiles/cds-s.dir/src/dllmain.cpp.o.requires + +CMakeFiles/cds-s.dir/src/dllmain.cpp.o.provides: CMakeFiles/cds-s.dir/src/dllmain.cpp.o.requires + $(MAKE) -f CMakeFiles/cds-s.dir/build.make CMakeFiles/cds-s.dir/src/dllmain.cpp.o.provides.build +.PHONY : CMakeFiles/cds-s.dir/src/dllmain.cpp.o.provides + +CMakeFiles/cds-s.dir/src/dllmain.cpp.o.provides.build: CMakeFiles/cds-s.dir/src/dllmain.cpp.o + + +# Object files for target cds-s +cds__s_OBJECTS = \ +"CMakeFiles/cds-s.dir/src/init.cpp.o" \ +"CMakeFiles/cds-s.dir/src/hp.cpp.o" \ +"CMakeFiles/cds-s.dir/src/dhp.cpp.o" \ +"CMakeFiles/cds-s.dir/src/urcu_gp.cpp.o" \ +"CMakeFiles/cds-s.dir/src/urcu_sh.cpp.o" \ +"CMakeFiles/cds-s.dir/src/thread_data.cpp.o" \ +"CMakeFiles/cds-s.dir/src/topology_hpux.cpp.o" \ +"CMakeFiles/cds-s.dir/src/topology_linux.cpp.o" \ +"CMakeFiles/cds-s.dir/src/topology_osx.cpp.o" \ +"CMakeFiles/cds-s.dir/src/dllmain.cpp.o" + +# External object files for target cds-s +cds__s_EXTERNAL_OBJECTS = + +bin/libcds-s.a: CMakeFiles/cds-s.dir/src/init.cpp.o +bin/libcds-s.a: CMakeFiles/cds-s.dir/src/hp.cpp.o +bin/libcds-s.a: CMakeFiles/cds-s.dir/src/dhp.cpp.o +bin/libcds-s.a: CMakeFiles/cds-s.dir/src/urcu_gp.cpp.o +bin/libcds-s.a: CMakeFiles/cds-s.dir/src/urcu_sh.cpp.o +bin/libcds-s.a: CMakeFiles/cds-s.dir/src/thread_data.cpp.o +bin/libcds-s.a: CMakeFiles/cds-s.dir/src/topology_hpux.cpp.o +bin/libcds-s.a: CMakeFiles/cds-s.dir/src/topology_linux.cpp.o +bin/libcds-s.a: CMakeFiles/cds-s.dir/src/topology_osx.cpp.o +bin/libcds-s.a: CMakeFiles/cds-s.dir/src/dllmain.cpp.o +bin/libcds-s.a: CMakeFiles/cds-s.dir/build.make +bin/libcds-s.a: CMakeFiles/cds-s.dir/link.txt + @$(CMAKE_COMMAND) -E cmake_echo_color --switch=$(COLOR) --green --bold --progress-dir=/home/ahmad/Downloads/benchmark-fuzzying-tool/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/build-release/CMakeFiles --progress-num=$(CMAKE_PROGRESS_11) "Linking CXX static library bin/libcds-s.a" + $(CMAKE_COMMAND) -P CMakeFiles/cds-s.dir/cmake_clean_target.cmake + $(CMAKE_COMMAND) -E cmake_link_script CMakeFiles/cds-s.dir/link.txt --verbose=$(VERBOSE) + +# Rule to build all files generated by this target. +CMakeFiles/cds-s.dir/build: bin/libcds-s.a + +.PHONY : CMakeFiles/cds-s.dir/build + +CMakeFiles/cds-s.dir/requires: CMakeFiles/cds-s.dir/src/init.cpp.o.requires +CMakeFiles/cds-s.dir/requires: CMakeFiles/cds-s.dir/src/hp.cpp.o.requires +CMakeFiles/cds-s.dir/requires: CMakeFiles/cds-s.dir/src/dhp.cpp.o.requires +CMakeFiles/cds-s.dir/requires: CMakeFiles/cds-s.dir/src/urcu_gp.cpp.o.requires +CMakeFiles/cds-s.dir/requires: CMakeFiles/cds-s.dir/src/urcu_sh.cpp.o.requires +CMakeFiles/cds-s.dir/requires: CMakeFiles/cds-s.dir/src/thread_data.cpp.o.requires +CMakeFiles/cds-s.dir/requires: CMakeFiles/cds-s.dir/src/topology_hpux.cpp.o.requires +CMakeFiles/cds-s.dir/requires: CMakeFiles/cds-s.dir/src/topology_linux.cpp.o.requires +CMakeFiles/cds-s.dir/requires: CMakeFiles/cds-s.dir/src/topology_osx.cpp.o.requires +CMakeFiles/cds-s.dir/requires: CMakeFiles/cds-s.dir/src/dllmain.cpp.o.requires + +.PHONY : CMakeFiles/cds-s.dir/requires + +CMakeFiles/cds-s.dir/clean: + $(CMAKE_COMMAND) -P CMakeFiles/cds-s.dir/cmake_clean.cmake +.PHONY : CMakeFiles/cds-s.dir/clean + +CMakeFiles/cds-s.dir/depend: + cd /home/ahmad/Downloads/benchmark-fuzzying-tool/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/build-release && $(CMAKE_COMMAND) -E cmake_depends "Unix Makefiles" /home/ahmad/Downloads/benchmark-fuzzying-tool/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2 /home/ahmad/Downloads/benchmark-fuzzying-tool/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2 /home/ahmad/Downloads/benchmark-fuzzying-tool/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/build-release /home/ahmad/Downloads/benchmark-fuzzying-tool/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/build-release /home/ahmad/Downloads/benchmark-fuzzying-tool/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/build-release/CMakeFiles/cds-s.dir/DependInfo.cmake --color=$(COLOR) +.PHONY : CMakeFiles/cds-s.dir/depend + diff --git a/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/build-release/CMakeFiles/cds-s.dir/cmake_clean.cmake b/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/build-release/CMakeFiles/cds-s.dir/cmake_clean.cmake new file mode 100644 index 0000000..eeffc68 --- /dev/null +++ b/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/build-release/CMakeFiles/cds-s.dir/cmake_clean.cmake @@ -0,0 +1,19 @@ +file(REMOVE_RECURSE + "CMakeFiles/cds-s.dir/src/init.cpp.o" + "CMakeFiles/cds-s.dir/src/hp.cpp.o" + "CMakeFiles/cds-s.dir/src/dhp.cpp.o" + "CMakeFiles/cds-s.dir/src/urcu_gp.cpp.o" + "CMakeFiles/cds-s.dir/src/urcu_sh.cpp.o" + "CMakeFiles/cds-s.dir/src/thread_data.cpp.o" + "CMakeFiles/cds-s.dir/src/topology_hpux.cpp.o" + "CMakeFiles/cds-s.dir/src/topology_linux.cpp.o" + "CMakeFiles/cds-s.dir/src/topology_osx.cpp.o" + "CMakeFiles/cds-s.dir/src/dllmain.cpp.o" + "bin/libcds-s.pdb" + "bin/libcds-s.a" +) + +# Per-language clean rules from dependency scanning. +foreach(lang CXX) + include(CMakeFiles/cds-s.dir/cmake_clean_${lang}.cmake OPTIONAL) +endforeach() diff --git a/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/build-release/CMakeFiles/cds-s.dir/cmake_clean_target.cmake b/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/build-release/CMakeFiles/cds-s.dir/cmake_clean_target.cmake new file mode 100644 index 0000000..b0854c1 --- /dev/null +++ b/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/build-release/CMakeFiles/cds-s.dir/cmake_clean_target.cmake @@ -0,0 +1,3 @@ +file(REMOVE_RECURSE + "bin/libcds-s.a" +) diff --git a/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/build-release/CMakeFiles/cds-s.dir/depend.internal b/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/build-release/CMakeFiles/cds-s.dir/depend.internal new file mode 100644 index 0000000..bed128f --- /dev/null +++ b/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/build-release/CMakeFiles/cds-s.dir/depend.internal @@ -0,0 +1,1122 @@ +# CMAKE generated file: DO NOT EDIT! +# Generated by "Unix Makefiles" Generator, CMake Version 3.5 + +CMakeFiles/cds-s.dir/src/dhp.cpp.o + ..//cds/algo/atomic.h + ..//cds/algo/backoff_strategy.h + ..//cds/algo/base.h + ..//cds/algo/bitop.h + ..//cds/algo/elimination_tls.h + ..//cds/algo/int_algo.h + ..//cds/compiler/backoff.h + ..//cds/compiler/bitop.h + ..//cds/compiler/clang/defs.h + ..//cds/compiler/cxx11_atomic.h + ..//cds/compiler/defs.h + ..//cds/compiler/feature_tsan.h + ..//cds/compiler/gcc/amd64/backoff.h + ..//cds/compiler/gcc/amd64/bitop.h + ..//cds/compiler/gcc/amd64/cxx11_atomic.h + ..//cds/compiler/gcc/arm7/backoff.h + ..//cds/compiler/gcc/arm8/backoff.h + ..//cds/compiler/gcc/compiler_barriers.h + ..//cds/compiler/gcc/compiler_macro.h + ..//cds/compiler/gcc/defs.h + ..//cds/compiler/gcc/ia64/backoff.h + ..//cds/compiler/gcc/ia64/bitop.h + ..//cds/compiler/gcc/ia64/cxx11_atomic.h + ..//cds/compiler/gcc/ppc64/backoff.h + ..//cds/compiler/gcc/ppc64/bitop.h + ..//cds/compiler/gcc/sparc/backoff.h + ..//cds/compiler/gcc/sparc/bitop.h + ..//cds/compiler/gcc/sparc/cxx11_atomic.h + ..//cds/compiler/gcc/x86/backoff.h + ..//cds/compiler/gcc/x86/bitop.h + ..//cds/compiler/gcc/x86/cxx11_atomic.h + ..//cds/compiler/gcc/x86/cxx11_atomic32.h + ..//cds/compiler/icl/compiler_barriers.h + ..//cds/compiler/icl/defs.h + ..//cds/compiler/vc/amd64/backoff.h + ..//cds/compiler/vc/amd64/bitop.h + ..//cds/compiler/vc/amd64/cxx11_atomic.h + ..//cds/compiler/vc/compiler_barriers.h + ..//cds/compiler/vc/defs.h + ..//cds/compiler/vc/x86/backoff.h + ..//cds/compiler/vc/x86/bitop.h + ..//cds/compiler/vc/x86/cxx11_atomic.h + ..//cds/container/details/base.h + ..//cds/container/vyukov_mpmc_cycle_queue.h + ..//cds/details/aligned_type.h + ..//cds/details/allocator.h + ..//cds/details/bitop_generic.h + ..//cds/details/bounded_container.h + ..//cds/details/defs.h + ..//cds/details/is_aligned.h + ..//cds/details/lib.h + ..//cds/details/marked_ptr.h + ..//cds/details/static_functor.h + ..//cds/details/throw_exception.h + ..//cds/gc/details/hp_common.h + ..//cds/gc/details/retired_ptr.h + ..//cds/gc/dhp.h + ..//cds/gc/hp.h + ..//cds/init.h + ..//cds/intrusive/details/base.h + ..//cds/intrusive/details/node_traits.h + ..//cds/intrusive/free_list.h + ..//cds/intrusive/free_list_selector.h + ..//cds/intrusive/free_list_tagged.h + ..//cds/intrusive/options.h + ..//cds/opt/buffer.h + ..//cds/opt/options.h + ..//cds/opt/value_cleaner.h + ..//cds/os/aix/alloc_aligned.h + ..//cds/os/aix/topology.h + ..//cds/os/alloc_aligned.h + ..//cds/os/details/fake_topology.h + ..//cds/os/free_bsd/alloc_aligned.h + ..//cds/os/free_bsd/topology.h + ..//cds/os/hpux/alloc_aligned.h + ..//cds/os/hpux/topology.h + ..//cds/os/libc/alloc_aligned.h + ..//cds/os/linux/alloc_aligned.h + ..//cds/os/linux/topology.h + ..//cds/os/osx/topology.h + ..//cds/os/posix/alloc_aligned.h + ..//cds/os/posix/fake_topology.h + ..//cds/os/posix/thread.h + ..//cds/os/sunos/alloc_aligned.h + ..//cds/os/sunos/topology.h + ..//cds/os/thread.h + ..//cds/os/topology.h + ..//cds/os/win/alloc_aligned.h + ..//cds/os/win/thread.h + ..//cds/os/win/topology.h + ..//cds/threading/details/_common.h + ..//cds/threading/details/auto_detect.h + ..//cds/threading/details/cxx11.h + ..//cds/threading/details/cxx11_manager.h + ..//cds/threading/details/gcc.h + ..//cds/threading/details/gcc_manager.h + ..//cds/threading/details/msvc.h + ..//cds/threading/details/msvc_manager.h + ..//cds/threading/details/pthread.h + ..//cds/threading/details/pthread_manager.h + ..//cds/threading/details/wintls.h + ..//cds/threading/details/wintls_manager.h + ..//cds/threading/model.h + ..//cds/urcu/details/base.h + ..//cds/urcu/details/gp.h + ..//cds/urcu/details/gp_decl.h + ..//cds/urcu/details/gpb.h + ..//cds/urcu/details/sh_decl.h + ..//cds/urcu/general_buffered.h + ..//cds/user_setup/allocator.h + ..//cds/user_setup/cache_line.h + ..//cds/user_setup/threading.h + ..//cds/version.h + /home/ahmad/Downloads/benchmark-fuzzying-tool/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/src/dhp.cpp +CMakeFiles/cds-s.dir/src/dllmain.cpp.o + ..//cds/algo/atomic.h + ..//cds/algo/backoff_strategy.h + ..//cds/algo/base.h + ..//cds/algo/bitop.h + ..//cds/algo/elimination_tls.h + ..//cds/algo/int_algo.h + ..//cds/compiler/backoff.h + ..//cds/compiler/bitop.h + ..//cds/compiler/clang/defs.h + ..//cds/compiler/cxx11_atomic.h + ..//cds/compiler/defs.h + ..//cds/compiler/feature_tsan.h + ..//cds/compiler/gcc/amd64/backoff.h + ..//cds/compiler/gcc/amd64/bitop.h + ..//cds/compiler/gcc/amd64/cxx11_atomic.h + ..//cds/compiler/gcc/arm7/backoff.h + ..//cds/compiler/gcc/arm8/backoff.h + ..//cds/compiler/gcc/compiler_barriers.h + ..//cds/compiler/gcc/compiler_macro.h + ..//cds/compiler/gcc/defs.h + ..//cds/compiler/gcc/ia64/backoff.h + ..//cds/compiler/gcc/ia64/bitop.h + ..//cds/compiler/gcc/ia64/cxx11_atomic.h + ..//cds/compiler/gcc/ppc64/backoff.h + ..//cds/compiler/gcc/ppc64/bitop.h + ..//cds/compiler/gcc/sparc/backoff.h + ..//cds/compiler/gcc/sparc/bitop.h + ..//cds/compiler/gcc/sparc/cxx11_atomic.h + ..//cds/compiler/gcc/x86/backoff.h + ..//cds/compiler/gcc/x86/bitop.h + ..//cds/compiler/gcc/x86/cxx11_atomic.h + ..//cds/compiler/gcc/x86/cxx11_atomic32.h + ..//cds/compiler/icl/compiler_barriers.h + ..//cds/compiler/icl/defs.h + ..//cds/compiler/vc/amd64/backoff.h + ..//cds/compiler/vc/amd64/bitop.h + ..//cds/compiler/vc/amd64/cxx11_atomic.h + ..//cds/compiler/vc/compiler_barriers.h + ..//cds/compiler/vc/defs.h + ..//cds/compiler/vc/x86/backoff.h + ..//cds/compiler/vc/x86/bitop.h + ..//cds/compiler/vc/x86/cxx11_atomic.h + ..//cds/container/details/base.h + ..//cds/container/vyukov_mpmc_cycle_queue.h + ..//cds/details/aligned_type.h + ..//cds/details/allocator.h + ..//cds/details/bitop_generic.h + ..//cds/details/bounded_container.h + ..//cds/details/defs.h + ..//cds/details/is_aligned.h + ..//cds/details/lib.h + ..//cds/details/marked_ptr.h + ..//cds/details/static_functor.h + ..//cds/details/throw_exception.h + ..//cds/gc/details/hp_common.h + ..//cds/gc/details/retired_ptr.h + ..//cds/gc/hp.h + ..//cds/init.h + ..//cds/intrusive/details/base.h + ..//cds/intrusive/details/node_traits.h + ..//cds/intrusive/options.h + ..//cds/opt/buffer.h + ..//cds/opt/options.h + ..//cds/opt/value_cleaner.h + ..//cds/os/aix/alloc_aligned.h + ..//cds/os/aix/topology.h + ..//cds/os/alloc_aligned.h + ..//cds/os/details/fake_topology.h + ..//cds/os/free_bsd/alloc_aligned.h + ..//cds/os/free_bsd/topology.h + ..//cds/os/hpux/alloc_aligned.h + ..//cds/os/hpux/topology.h + ..//cds/os/libc/alloc_aligned.h + ..//cds/os/linux/alloc_aligned.h + ..//cds/os/linux/topology.h + ..//cds/os/osx/topology.h + ..//cds/os/posix/alloc_aligned.h + ..//cds/os/posix/fake_topology.h + ..//cds/os/posix/thread.h + ..//cds/os/sunos/alloc_aligned.h + ..//cds/os/sunos/topology.h + ..//cds/os/thread.h + ..//cds/os/topology.h + ..//cds/os/win/alloc_aligned.h + ..//cds/os/win/thread.h + ..//cds/os/win/topology.h + ..//cds/threading/details/_common.h + ..//cds/threading/details/auto_detect.h + ..//cds/threading/details/cxx11.h + ..//cds/threading/details/cxx11_manager.h + ..//cds/threading/details/gcc.h + ..//cds/threading/details/gcc_manager.h + ..//cds/threading/details/msvc.h + ..//cds/threading/details/msvc_manager.h + ..//cds/threading/details/pthread.h + ..//cds/threading/details/pthread_manager.h + ..//cds/threading/details/wintls.h + ..//cds/threading/details/wintls_manager.h + ..//cds/threading/model.h + ..//cds/urcu/details/base.h + ..//cds/urcu/details/gp.h + ..//cds/urcu/details/gp_decl.h + ..//cds/urcu/details/gpb.h + ..//cds/urcu/details/sh_decl.h + ..//cds/urcu/general_buffered.h + ..//cds/user_setup/allocator.h + ..//cds/user_setup/cache_line.h + ..//cds/user_setup/threading.h + ..//cds/version.h + /home/ahmad/Downloads/benchmark-fuzzying-tool/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/src/dllmain.cpp +CMakeFiles/cds-s.dir/src/hp.cpp.o + ..//cds/algo/atomic.h + ..//cds/algo/backoff_strategy.h + ..//cds/algo/base.h + ..//cds/algo/bitop.h + ..//cds/algo/elimination_tls.h + ..//cds/algo/int_algo.h + ..//cds/compiler/backoff.h + ..//cds/compiler/bitop.h + ..//cds/compiler/clang/defs.h + ..//cds/compiler/cxx11_atomic.h + ..//cds/compiler/defs.h + ..//cds/compiler/feature_tsan.h + ..//cds/compiler/gcc/amd64/backoff.h + ..//cds/compiler/gcc/amd64/bitop.h + ..//cds/compiler/gcc/amd64/cxx11_atomic.h + ..//cds/compiler/gcc/arm7/backoff.h + ..//cds/compiler/gcc/arm8/backoff.h + ..//cds/compiler/gcc/compiler_barriers.h + ..//cds/compiler/gcc/compiler_macro.h + ..//cds/compiler/gcc/defs.h + ..//cds/compiler/gcc/ia64/backoff.h + ..//cds/compiler/gcc/ia64/bitop.h + ..//cds/compiler/gcc/ia64/cxx11_atomic.h + ..//cds/compiler/gcc/ppc64/backoff.h + ..//cds/compiler/gcc/ppc64/bitop.h + ..//cds/compiler/gcc/sparc/backoff.h + ..//cds/compiler/gcc/sparc/bitop.h + ..//cds/compiler/gcc/sparc/cxx11_atomic.h + ..//cds/compiler/gcc/x86/backoff.h + ..//cds/compiler/gcc/x86/bitop.h + ..//cds/compiler/gcc/x86/cxx11_atomic.h + ..//cds/compiler/gcc/x86/cxx11_atomic32.h + ..//cds/compiler/icl/compiler_barriers.h + ..//cds/compiler/icl/defs.h + ..//cds/compiler/vc/amd64/backoff.h + ..//cds/compiler/vc/amd64/bitop.h + ..//cds/compiler/vc/amd64/cxx11_atomic.h + ..//cds/compiler/vc/compiler_barriers.h + ..//cds/compiler/vc/defs.h + ..//cds/compiler/vc/x86/backoff.h + ..//cds/compiler/vc/x86/bitop.h + ..//cds/compiler/vc/x86/cxx11_atomic.h + ..//cds/container/details/base.h + ..//cds/container/vyukov_mpmc_cycle_queue.h + ..//cds/details/aligned_type.h + ..//cds/details/allocator.h + ..//cds/details/bitop_generic.h + ..//cds/details/bounded_container.h + ..//cds/details/defs.h + ..//cds/details/is_aligned.h + ..//cds/details/lib.h + ..//cds/details/marked_ptr.h + ..//cds/details/static_functor.h + ..//cds/details/throw_exception.h + ..//cds/gc/details/hp_common.h + ..//cds/gc/details/retired_ptr.h + ..//cds/gc/hp.h + ..//cds/init.h + ..//cds/intrusive/details/base.h + ..//cds/intrusive/details/node_traits.h + ..//cds/intrusive/options.h + ..//cds/opt/buffer.h + ..//cds/opt/options.h + ..//cds/opt/value_cleaner.h + ..//cds/os/aix/alloc_aligned.h + ..//cds/os/aix/topology.h + ..//cds/os/alloc_aligned.h + ..//cds/os/details/fake_topology.h + ..//cds/os/free_bsd/alloc_aligned.h + ..//cds/os/free_bsd/topology.h + ..//cds/os/hpux/alloc_aligned.h + ..//cds/os/hpux/topology.h + ..//cds/os/libc/alloc_aligned.h + ..//cds/os/linux/alloc_aligned.h + ..//cds/os/linux/topology.h + ..//cds/os/osx/topology.h + ..//cds/os/posix/alloc_aligned.h + ..//cds/os/posix/fake_topology.h + ..//cds/os/posix/thread.h + ..//cds/os/sunos/alloc_aligned.h + ..//cds/os/sunos/topology.h + ..//cds/os/thread.h + ..//cds/os/topology.h + ..//cds/os/win/alloc_aligned.h + ..//cds/os/win/thread.h + ..//cds/os/win/topology.h + ..//cds/threading/details/_common.h + ..//cds/threading/details/auto_detect.h + ..//cds/threading/details/cxx11.h + ..//cds/threading/details/cxx11_manager.h + ..//cds/threading/details/gcc.h + ..//cds/threading/details/gcc_manager.h + ..//cds/threading/details/msvc.h + ..//cds/threading/details/msvc_manager.h + ..//cds/threading/details/pthread.h + ..//cds/threading/details/pthread_manager.h + ..//cds/threading/details/wintls.h + ..//cds/threading/details/wintls_manager.h + ..//cds/threading/model.h + ..//cds/urcu/details/base.h + ..//cds/urcu/details/gp.h + ..//cds/urcu/details/gp_decl.h + ..//cds/urcu/details/gpb.h + ..//cds/urcu/details/sh_decl.h + ..//cds/urcu/general_buffered.h + ..//cds/user_setup/allocator.h + ..//cds/user_setup/cache_line.h + ..//cds/user_setup/threading.h + ..//cds/version.h + /home/ahmad/Downloads/benchmark-fuzzying-tool/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/src/hp.cpp +CMakeFiles/cds-s.dir/src/init.cpp.o + ..//cds/algo/atomic.h + ..//cds/algo/backoff_strategy.h + ..//cds/algo/base.h + ..//cds/algo/bitop.h + ..//cds/algo/elimination_tls.h + ..//cds/algo/int_algo.h + ..//cds/compiler/backoff.h + ..//cds/compiler/bitop.h + ..//cds/compiler/clang/defs.h + ..//cds/compiler/cxx11_atomic.h + ..//cds/compiler/defs.h + ..//cds/compiler/feature_tsan.h + ..//cds/compiler/gcc/amd64/backoff.h + ..//cds/compiler/gcc/amd64/bitop.h + ..//cds/compiler/gcc/amd64/cxx11_atomic.h + ..//cds/compiler/gcc/arm7/backoff.h + ..//cds/compiler/gcc/arm8/backoff.h + ..//cds/compiler/gcc/compiler_barriers.h + ..//cds/compiler/gcc/compiler_macro.h + ..//cds/compiler/gcc/defs.h + ..//cds/compiler/gcc/ia64/backoff.h + ..//cds/compiler/gcc/ia64/bitop.h + ..//cds/compiler/gcc/ia64/cxx11_atomic.h + ..//cds/compiler/gcc/ppc64/backoff.h + ..//cds/compiler/gcc/ppc64/bitop.h + ..//cds/compiler/gcc/sparc/backoff.h + ..//cds/compiler/gcc/sparc/bitop.h + ..//cds/compiler/gcc/sparc/cxx11_atomic.h + ..//cds/compiler/gcc/x86/backoff.h + ..//cds/compiler/gcc/x86/bitop.h + ..//cds/compiler/gcc/x86/cxx11_atomic.h + ..//cds/compiler/gcc/x86/cxx11_atomic32.h + ..//cds/compiler/icl/compiler_barriers.h + ..//cds/compiler/icl/defs.h + ..//cds/compiler/vc/amd64/backoff.h + ..//cds/compiler/vc/amd64/bitop.h + ..//cds/compiler/vc/amd64/cxx11_atomic.h + ..//cds/compiler/vc/compiler_barriers.h + ..//cds/compiler/vc/defs.h + ..//cds/compiler/vc/x86/backoff.h + ..//cds/compiler/vc/x86/bitop.h + ..//cds/compiler/vc/x86/cxx11_atomic.h + ..//cds/container/details/base.h + ..//cds/container/vyukov_mpmc_cycle_queue.h + ..//cds/details/aligned_type.h + ..//cds/details/allocator.h + ..//cds/details/bitop_generic.h + ..//cds/details/bounded_container.h + ..//cds/details/defs.h + ..//cds/details/is_aligned.h + ..//cds/details/lib.h + ..//cds/details/marked_ptr.h + ..//cds/details/static_functor.h + ..//cds/details/throw_exception.h + ..//cds/gc/details/hp_common.h + ..//cds/gc/details/retired_ptr.h + ..//cds/gc/hp.h + ..//cds/init.h + ..//cds/intrusive/details/base.h + ..//cds/intrusive/details/node_traits.h + ..//cds/intrusive/options.h + ..//cds/opt/buffer.h + ..//cds/opt/options.h + ..//cds/opt/value_cleaner.h + ..//cds/os/aix/alloc_aligned.h + ..//cds/os/aix/topology.h + ..//cds/os/alloc_aligned.h + ..//cds/os/details/fake_topology.h + ..//cds/os/free_bsd/alloc_aligned.h + ..//cds/os/free_bsd/topology.h + ..//cds/os/hpux/alloc_aligned.h + ..//cds/os/hpux/topology.h + ..//cds/os/libc/alloc_aligned.h + ..//cds/os/linux/alloc_aligned.h + ..//cds/os/linux/topology.h + ..//cds/os/osx/topology.h + ..//cds/os/posix/alloc_aligned.h + ..//cds/os/posix/fake_topology.h + ..//cds/os/posix/thread.h + ..//cds/os/sunos/alloc_aligned.h + ..//cds/os/sunos/topology.h + ..//cds/os/thread.h + ..//cds/os/topology.h + ..//cds/os/win/alloc_aligned.h + ..//cds/os/win/thread.h + ..//cds/os/win/topology.h + ..//cds/threading/details/_common.h + ..//cds/threading/details/auto_detect.h + ..//cds/threading/details/cxx11.h + ..//cds/threading/details/cxx11_manager.h + ..//cds/threading/details/gcc.h + ..//cds/threading/details/gcc_manager.h + ..//cds/threading/details/msvc.h + ..//cds/threading/details/msvc_manager.h + ..//cds/threading/details/pthread.h + ..//cds/threading/details/pthread_manager.h + ..//cds/threading/details/wintls.h + ..//cds/threading/details/wintls_manager.h + ..//cds/threading/model.h + ..//cds/urcu/details/base.h + ..//cds/urcu/details/gp.h + ..//cds/urcu/details/gp_decl.h + ..//cds/urcu/details/gpb.h + ..//cds/urcu/details/sh_decl.h + ..//cds/urcu/general_buffered.h + ..//cds/user_setup/allocator.h + ..//cds/user_setup/cache_line.h + ..//cds/user_setup/threading.h + ..//cds/version.h + /home/ahmad/Downloads/benchmark-fuzzying-tool/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/src/init.cpp +CMakeFiles/cds-s.dir/src/thread_data.cpp.o + ..//cds/algo/atomic.h + ..//cds/algo/backoff_strategy.h + ..//cds/algo/base.h + ..//cds/algo/bitop.h + ..//cds/algo/elimination_tls.h + ..//cds/algo/int_algo.h + ..//cds/compiler/backoff.h + ..//cds/compiler/bitop.h + ..//cds/compiler/clang/defs.h + ..//cds/compiler/cxx11_atomic.h + ..//cds/compiler/defs.h + ..//cds/compiler/feature_tsan.h + ..//cds/compiler/gcc/amd64/backoff.h + ..//cds/compiler/gcc/amd64/bitop.h + ..//cds/compiler/gcc/amd64/cxx11_atomic.h + ..//cds/compiler/gcc/arm7/backoff.h + ..//cds/compiler/gcc/arm8/backoff.h + ..//cds/compiler/gcc/compiler_barriers.h + ..//cds/compiler/gcc/compiler_macro.h + ..//cds/compiler/gcc/defs.h + ..//cds/compiler/gcc/ia64/backoff.h + ..//cds/compiler/gcc/ia64/bitop.h + ..//cds/compiler/gcc/ia64/cxx11_atomic.h + ..//cds/compiler/gcc/ppc64/backoff.h + ..//cds/compiler/gcc/ppc64/bitop.h + ..//cds/compiler/gcc/sparc/backoff.h + ..//cds/compiler/gcc/sparc/bitop.h + ..//cds/compiler/gcc/sparc/cxx11_atomic.h + ..//cds/compiler/gcc/x86/backoff.h + ..//cds/compiler/gcc/x86/bitop.h + ..//cds/compiler/gcc/x86/cxx11_atomic.h + ..//cds/compiler/gcc/x86/cxx11_atomic32.h + ..//cds/compiler/icl/compiler_barriers.h + ..//cds/compiler/icl/defs.h + ..//cds/compiler/vc/amd64/backoff.h + ..//cds/compiler/vc/amd64/bitop.h + ..//cds/compiler/vc/amd64/cxx11_atomic.h + ..//cds/compiler/vc/compiler_barriers.h + ..//cds/compiler/vc/defs.h + ..//cds/compiler/vc/x86/backoff.h + ..//cds/compiler/vc/x86/bitop.h + ..//cds/compiler/vc/x86/cxx11_atomic.h + ..//cds/container/details/base.h + ..//cds/container/vyukov_mpmc_cycle_queue.h + ..//cds/details/aligned_type.h + ..//cds/details/allocator.h + ..//cds/details/bitop_generic.h + ..//cds/details/bounded_container.h + ..//cds/details/defs.h + ..//cds/details/is_aligned.h + ..//cds/details/lib.h + ..//cds/details/marked_ptr.h + ..//cds/details/static_functor.h + ..//cds/details/throw_exception.h + ..//cds/gc/details/hp_common.h + ..//cds/gc/details/retired_ptr.h + ..//cds/gc/dhp.h + ..//cds/gc/hp.h + ..//cds/init.h + ..//cds/intrusive/details/base.h + ..//cds/intrusive/details/node_traits.h + ..//cds/intrusive/free_list.h + ..//cds/intrusive/free_list_selector.h + ..//cds/intrusive/free_list_tagged.h + ..//cds/intrusive/options.h + ..//cds/opt/buffer.h + ..//cds/opt/options.h + ..//cds/opt/value_cleaner.h + ..//cds/os/aix/alloc_aligned.h + ..//cds/os/aix/topology.h + ..//cds/os/alloc_aligned.h + ..//cds/os/details/fake_topology.h + ..//cds/os/free_bsd/alloc_aligned.h + ..//cds/os/free_bsd/topology.h + ..//cds/os/hpux/alloc_aligned.h + ..//cds/os/hpux/topology.h + ..//cds/os/libc/alloc_aligned.h + ..//cds/os/linux/alloc_aligned.h + ..//cds/os/linux/topology.h + ..//cds/os/osx/topology.h + ..//cds/os/posix/alloc_aligned.h + ..//cds/os/posix/fake_topology.h + ..//cds/os/posix/thread.h + ..//cds/os/sunos/alloc_aligned.h + ..//cds/os/sunos/topology.h + ..//cds/os/thread.h + ..//cds/os/topology.h + ..//cds/os/win/alloc_aligned.h + ..//cds/os/win/thread.h + ..//cds/os/win/topology.h + ..//cds/threading/details/_common.h + ..//cds/threading/details/auto_detect.h + ..//cds/threading/details/cxx11.h + ..//cds/threading/details/cxx11_manager.h + ..//cds/threading/details/gcc.h + ..//cds/threading/details/gcc_manager.h + ..//cds/threading/details/msvc.h + ..//cds/threading/details/msvc_manager.h + ..//cds/threading/details/pthread.h + ..//cds/threading/details/pthread_manager.h + ..//cds/threading/details/wintls.h + ..//cds/threading/details/wintls_manager.h + ..//cds/threading/model.h + ..//cds/urcu/details/base.h + ..//cds/urcu/details/gp.h + ..//cds/urcu/details/gp_decl.h + ..//cds/urcu/details/gpb.h + ..//cds/urcu/details/sh_decl.h + ..//cds/urcu/general_buffered.h + ..//cds/user_setup/allocator.h + ..//cds/user_setup/cache_line.h + ..//cds/user_setup/threading.h + ..//cds/version.h + /home/ahmad/Downloads/benchmark-fuzzying-tool/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/src/thread_data.cpp +CMakeFiles/cds-s.dir/src/topology_hpux.cpp.o + ..//cds/algo/atomic.h + ..//cds/algo/backoff_strategy.h + ..//cds/algo/base.h + ..//cds/algo/bitop.h + ..//cds/algo/elimination_tls.h + ..//cds/algo/int_algo.h + ..//cds/compiler/backoff.h + ..//cds/compiler/bitop.h + ..//cds/compiler/clang/defs.h + ..//cds/compiler/cxx11_atomic.h + ..//cds/compiler/defs.h + ..//cds/compiler/feature_tsan.h + ..//cds/compiler/gcc/amd64/backoff.h + ..//cds/compiler/gcc/amd64/bitop.h + ..//cds/compiler/gcc/amd64/cxx11_atomic.h + ..//cds/compiler/gcc/arm7/backoff.h + ..//cds/compiler/gcc/arm8/backoff.h + ..//cds/compiler/gcc/compiler_barriers.h + ..//cds/compiler/gcc/compiler_macro.h + ..//cds/compiler/gcc/defs.h + ..//cds/compiler/gcc/ia64/backoff.h + ..//cds/compiler/gcc/ia64/bitop.h + ..//cds/compiler/gcc/ia64/cxx11_atomic.h + ..//cds/compiler/gcc/ppc64/backoff.h + ..//cds/compiler/gcc/ppc64/bitop.h + ..//cds/compiler/gcc/sparc/backoff.h + ..//cds/compiler/gcc/sparc/bitop.h + ..//cds/compiler/gcc/sparc/cxx11_atomic.h + ..//cds/compiler/gcc/x86/backoff.h + ..//cds/compiler/gcc/x86/bitop.h + ..//cds/compiler/gcc/x86/cxx11_atomic.h + ..//cds/compiler/gcc/x86/cxx11_atomic32.h + ..//cds/compiler/icl/compiler_barriers.h + ..//cds/compiler/icl/defs.h + ..//cds/compiler/vc/amd64/backoff.h + ..//cds/compiler/vc/amd64/bitop.h + ..//cds/compiler/vc/amd64/cxx11_atomic.h + ..//cds/compiler/vc/compiler_barriers.h + ..//cds/compiler/vc/defs.h + ..//cds/compiler/vc/x86/backoff.h + ..//cds/compiler/vc/x86/bitop.h + ..//cds/compiler/vc/x86/cxx11_atomic.h + ..//cds/container/details/base.h + ..//cds/container/vyukov_mpmc_cycle_queue.h + ..//cds/details/aligned_type.h + ..//cds/details/allocator.h + ..//cds/details/bitop_generic.h + ..//cds/details/bounded_container.h + ..//cds/details/defs.h + ..//cds/details/is_aligned.h + ..//cds/details/lib.h + ..//cds/details/marked_ptr.h + ..//cds/details/static_functor.h + ..//cds/details/throw_exception.h + ..//cds/gc/details/hp_common.h + ..//cds/gc/details/retired_ptr.h + ..//cds/gc/hp.h + ..//cds/init.h + ..//cds/intrusive/details/base.h + ..//cds/intrusive/details/node_traits.h + ..//cds/intrusive/options.h + ..//cds/opt/buffer.h + ..//cds/opt/options.h + ..//cds/opt/value_cleaner.h + ..//cds/os/aix/alloc_aligned.h + ..//cds/os/aix/topology.h + ..//cds/os/alloc_aligned.h + ..//cds/os/details/fake_topology.h + ..//cds/os/free_bsd/alloc_aligned.h + ..//cds/os/free_bsd/topology.h + ..//cds/os/hpux/alloc_aligned.h + ..//cds/os/hpux/topology.h + ..//cds/os/libc/alloc_aligned.h + ..//cds/os/linux/alloc_aligned.h + ..//cds/os/linux/topology.h + ..//cds/os/osx/topology.h + ..//cds/os/posix/alloc_aligned.h + ..//cds/os/posix/fake_topology.h + ..//cds/os/posix/thread.h + ..//cds/os/sunos/alloc_aligned.h + ..//cds/os/sunos/topology.h + ..//cds/os/thread.h + ..//cds/os/topology.h + ..//cds/os/win/alloc_aligned.h + ..//cds/os/win/thread.h + ..//cds/os/win/topology.h + ..//cds/threading/details/_common.h + ..//cds/threading/details/auto_detect.h + ..//cds/threading/details/cxx11.h + ..//cds/threading/details/cxx11_manager.h + ..//cds/threading/details/gcc.h + ..//cds/threading/details/gcc_manager.h + ..//cds/threading/details/msvc.h + ..//cds/threading/details/msvc_manager.h + ..//cds/threading/details/pthread.h + ..//cds/threading/details/pthread_manager.h + ..//cds/threading/details/wintls.h + ..//cds/threading/details/wintls_manager.h + ..//cds/threading/model.h + ..//cds/urcu/details/base.h + ..//cds/urcu/details/gp.h + ..//cds/urcu/details/gp_decl.h + ..//cds/urcu/details/gpb.h + ..//cds/urcu/details/sh_decl.h + ..//cds/urcu/general_buffered.h + ..//cds/user_setup/allocator.h + ..//cds/user_setup/cache_line.h + ..//cds/user_setup/threading.h + ..//cds/version.h + /home/ahmad/Downloads/benchmark-fuzzying-tool/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/src/topology_hpux.cpp +CMakeFiles/cds-s.dir/src/topology_linux.cpp.o + ..//cds/algo/atomic.h + ..//cds/algo/backoff_strategy.h + ..//cds/algo/base.h + ..//cds/algo/bitop.h + ..//cds/algo/elimination_tls.h + ..//cds/algo/int_algo.h + ..//cds/compiler/backoff.h + ..//cds/compiler/bitop.h + ..//cds/compiler/clang/defs.h + ..//cds/compiler/cxx11_atomic.h + ..//cds/compiler/defs.h + ..//cds/compiler/feature_tsan.h + ..//cds/compiler/gcc/amd64/backoff.h + ..//cds/compiler/gcc/amd64/bitop.h + ..//cds/compiler/gcc/amd64/cxx11_atomic.h + ..//cds/compiler/gcc/arm7/backoff.h + ..//cds/compiler/gcc/arm8/backoff.h + ..//cds/compiler/gcc/compiler_barriers.h + ..//cds/compiler/gcc/compiler_macro.h + ..//cds/compiler/gcc/defs.h + ..//cds/compiler/gcc/ia64/backoff.h + ..//cds/compiler/gcc/ia64/bitop.h + ..//cds/compiler/gcc/ia64/cxx11_atomic.h + ..//cds/compiler/gcc/ppc64/backoff.h + ..//cds/compiler/gcc/ppc64/bitop.h + ..//cds/compiler/gcc/sparc/backoff.h + ..//cds/compiler/gcc/sparc/bitop.h + ..//cds/compiler/gcc/sparc/cxx11_atomic.h + ..//cds/compiler/gcc/x86/backoff.h + ..//cds/compiler/gcc/x86/bitop.h + ..//cds/compiler/gcc/x86/cxx11_atomic.h + ..//cds/compiler/gcc/x86/cxx11_atomic32.h + ..//cds/compiler/icl/compiler_barriers.h + ..//cds/compiler/icl/defs.h + ..//cds/compiler/vc/amd64/backoff.h + ..//cds/compiler/vc/amd64/bitop.h + ..//cds/compiler/vc/amd64/cxx11_atomic.h + ..//cds/compiler/vc/compiler_barriers.h + ..//cds/compiler/vc/defs.h + ..//cds/compiler/vc/x86/backoff.h + ..//cds/compiler/vc/x86/bitop.h + ..//cds/compiler/vc/x86/cxx11_atomic.h + ..//cds/container/details/base.h + ..//cds/container/vyukov_mpmc_cycle_queue.h + ..//cds/details/aligned_type.h + ..//cds/details/allocator.h + ..//cds/details/bitop_generic.h + ..//cds/details/bounded_container.h + ..//cds/details/defs.h + ..//cds/details/is_aligned.h + ..//cds/details/lib.h + ..//cds/details/marked_ptr.h + ..//cds/details/static_functor.h + ..//cds/details/throw_exception.h + ..//cds/gc/details/hp_common.h + ..//cds/gc/details/retired_ptr.h + ..//cds/gc/hp.h + ..//cds/init.h + ..//cds/intrusive/details/base.h + ..//cds/intrusive/details/node_traits.h + ..//cds/intrusive/options.h + ..//cds/opt/buffer.h + ..//cds/opt/options.h + ..//cds/opt/value_cleaner.h + ..//cds/os/aix/alloc_aligned.h + ..//cds/os/aix/topology.h + ..//cds/os/alloc_aligned.h + ..//cds/os/details/fake_topology.h + ..//cds/os/free_bsd/alloc_aligned.h + ..//cds/os/free_bsd/topology.h + ..//cds/os/hpux/alloc_aligned.h + ..//cds/os/hpux/topology.h + ..//cds/os/libc/alloc_aligned.h + ..//cds/os/linux/alloc_aligned.h + ..//cds/os/linux/topology.h + ..//cds/os/osx/topology.h + ..//cds/os/posix/alloc_aligned.h + ..//cds/os/posix/fake_topology.h + ..//cds/os/posix/thread.h + ..//cds/os/sunos/alloc_aligned.h + ..//cds/os/sunos/topology.h + ..//cds/os/thread.h + ..//cds/os/topology.h + ..//cds/os/win/alloc_aligned.h + ..//cds/os/win/thread.h + ..//cds/os/win/topology.h + ..//cds/threading/details/_common.h + ..//cds/threading/details/auto_detect.h + ..//cds/threading/details/cxx11.h + ..//cds/threading/details/cxx11_manager.h + ..//cds/threading/details/gcc.h + ..//cds/threading/details/gcc_manager.h + ..//cds/threading/details/msvc.h + ..//cds/threading/details/msvc_manager.h + ..//cds/threading/details/pthread.h + ..//cds/threading/details/pthread_manager.h + ..//cds/threading/details/wintls.h + ..//cds/threading/details/wintls_manager.h + ..//cds/threading/model.h + ..//cds/urcu/details/base.h + ..//cds/urcu/details/gp.h + ..//cds/urcu/details/gp_decl.h + ..//cds/urcu/details/gpb.h + ..//cds/urcu/details/sh_decl.h + ..//cds/urcu/general_buffered.h + ..//cds/user_setup/allocator.h + ..//cds/user_setup/cache_line.h + ..//cds/user_setup/threading.h + ..//cds/version.h + /home/ahmad/Downloads/benchmark-fuzzying-tool/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/src/topology_linux.cpp +CMakeFiles/cds-s.dir/src/topology_osx.cpp.o + ..//cds/algo/atomic.h + ..//cds/algo/backoff_strategy.h + ..//cds/algo/base.h + ..//cds/algo/bitop.h + ..//cds/algo/elimination_tls.h + ..//cds/algo/int_algo.h + ..//cds/compiler/backoff.h + ..//cds/compiler/bitop.h + ..//cds/compiler/clang/defs.h + ..//cds/compiler/cxx11_atomic.h + ..//cds/compiler/defs.h + ..//cds/compiler/feature_tsan.h + ..//cds/compiler/gcc/amd64/backoff.h + ..//cds/compiler/gcc/amd64/bitop.h + ..//cds/compiler/gcc/amd64/cxx11_atomic.h + ..//cds/compiler/gcc/arm7/backoff.h + ..//cds/compiler/gcc/arm8/backoff.h + ..//cds/compiler/gcc/compiler_barriers.h + ..//cds/compiler/gcc/compiler_macro.h + ..//cds/compiler/gcc/defs.h + ..//cds/compiler/gcc/ia64/backoff.h + ..//cds/compiler/gcc/ia64/bitop.h + ..//cds/compiler/gcc/ia64/cxx11_atomic.h + ..//cds/compiler/gcc/ppc64/backoff.h + ..//cds/compiler/gcc/ppc64/bitop.h + ..//cds/compiler/gcc/sparc/backoff.h + ..//cds/compiler/gcc/sparc/bitop.h + ..//cds/compiler/gcc/sparc/cxx11_atomic.h + ..//cds/compiler/gcc/x86/backoff.h + ..//cds/compiler/gcc/x86/bitop.h + ..//cds/compiler/gcc/x86/cxx11_atomic.h + ..//cds/compiler/gcc/x86/cxx11_atomic32.h + ..//cds/compiler/icl/compiler_barriers.h + ..//cds/compiler/icl/defs.h + ..//cds/compiler/vc/amd64/backoff.h + ..//cds/compiler/vc/amd64/bitop.h + ..//cds/compiler/vc/amd64/cxx11_atomic.h + ..//cds/compiler/vc/compiler_barriers.h + ..//cds/compiler/vc/defs.h + ..//cds/compiler/vc/x86/backoff.h + ..//cds/compiler/vc/x86/bitop.h + ..//cds/compiler/vc/x86/cxx11_atomic.h + ..//cds/container/details/base.h + ..//cds/container/vyukov_mpmc_cycle_queue.h + ..//cds/details/aligned_type.h + ..//cds/details/allocator.h + ..//cds/details/bitop_generic.h + ..//cds/details/bounded_container.h + ..//cds/details/defs.h + ..//cds/details/is_aligned.h + ..//cds/details/lib.h + ..//cds/details/marked_ptr.h + ..//cds/details/static_functor.h + ..//cds/details/throw_exception.h + ..//cds/gc/details/hp_common.h + ..//cds/gc/details/retired_ptr.h + ..//cds/gc/hp.h + ..//cds/init.h + ..//cds/intrusive/details/base.h + ..//cds/intrusive/details/node_traits.h + ..//cds/intrusive/options.h + ..//cds/opt/buffer.h + ..//cds/opt/options.h + ..//cds/opt/value_cleaner.h + ..//cds/os/aix/alloc_aligned.h + ..//cds/os/aix/topology.h + ..//cds/os/alloc_aligned.h + ..//cds/os/details/fake_topology.h + ..//cds/os/free_bsd/alloc_aligned.h + ..//cds/os/free_bsd/topology.h + ..//cds/os/hpux/alloc_aligned.h + ..//cds/os/hpux/topology.h + ..//cds/os/libc/alloc_aligned.h + ..//cds/os/linux/alloc_aligned.h + ..//cds/os/linux/topology.h + ..//cds/os/osx/topology.h + ..//cds/os/posix/alloc_aligned.h + ..//cds/os/posix/fake_topology.h + ..//cds/os/posix/thread.h + ..//cds/os/sunos/alloc_aligned.h + ..//cds/os/sunos/topology.h + ..//cds/os/thread.h + ..//cds/os/topology.h + ..//cds/os/win/alloc_aligned.h + ..//cds/os/win/thread.h + ..//cds/os/win/topology.h + ..//cds/threading/details/_common.h + ..//cds/threading/details/auto_detect.h + ..//cds/threading/details/cxx11.h + ..//cds/threading/details/cxx11_manager.h + ..//cds/threading/details/gcc.h + ..//cds/threading/details/gcc_manager.h + ..//cds/threading/details/msvc.h + ..//cds/threading/details/msvc_manager.h + ..//cds/threading/details/pthread.h + ..//cds/threading/details/pthread_manager.h + ..//cds/threading/details/wintls.h + ..//cds/threading/details/wintls_manager.h + ..//cds/threading/model.h + ..//cds/urcu/details/base.h + ..//cds/urcu/details/gp.h + ..//cds/urcu/details/gp_decl.h + ..//cds/urcu/details/gpb.h + ..//cds/urcu/details/sh_decl.h + ..//cds/urcu/general_buffered.h + ..//cds/user_setup/allocator.h + ..//cds/user_setup/cache_line.h + ..//cds/user_setup/threading.h + ..//cds/version.h + /home/ahmad/Downloads/benchmark-fuzzying-tool/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/src/topology_osx.cpp +CMakeFiles/cds-s.dir/src/urcu_gp.cpp.o + ..//cds/algo/atomic.h + ..//cds/algo/backoff_strategy.h + ..//cds/algo/base.h + ..//cds/algo/bitop.h + ..//cds/algo/elimination_tls.h + ..//cds/algo/int_algo.h + ..//cds/compiler/backoff.h + ..//cds/compiler/bitop.h + ..//cds/compiler/clang/defs.h + ..//cds/compiler/cxx11_atomic.h + ..//cds/compiler/defs.h + ..//cds/compiler/feature_tsan.h + ..//cds/compiler/gcc/amd64/backoff.h + ..//cds/compiler/gcc/amd64/bitop.h + ..//cds/compiler/gcc/amd64/cxx11_atomic.h + ..//cds/compiler/gcc/arm7/backoff.h + ..//cds/compiler/gcc/arm8/backoff.h + ..//cds/compiler/gcc/compiler_barriers.h + ..//cds/compiler/gcc/compiler_macro.h + ..//cds/compiler/gcc/defs.h + ..//cds/compiler/gcc/ia64/backoff.h + ..//cds/compiler/gcc/ia64/bitop.h + ..//cds/compiler/gcc/ia64/cxx11_atomic.h + ..//cds/compiler/gcc/ppc64/backoff.h + ..//cds/compiler/gcc/ppc64/bitop.h + ..//cds/compiler/gcc/sparc/backoff.h + ..//cds/compiler/gcc/sparc/bitop.h + ..//cds/compiler/gcc/sparc/cxx11_atomic.h + ..//cds/compiler/gcc/x86/backoff.h + ..//cds/compiler/gcc/x86/bitop.h + ..//cds/compiler/gcc/x86/cxx11_atomic.h + ..//cds/compiler/gcc/x86/cxx11_atomic32.h + ..//cds/compiler/icl/compiler_barriers.h + ..//cds/compiler/icl/defs.h + ..//cds/compiler/vc/amd64/backoff.h + ..//cds/compiler/vc/amd64/bitop.h + ..//cds/compiler/vc/amd64/cxx11_atomic.h + ..//cds/compiler/vc/compiler_barriers.h + ..//cds/compiler/vc/defs.h + ..//cds/compiler/vc/x86/backoff.h + ..//cds/compiler/vc/x86/bitop.h + ..//cds/compiler/vc/x86/cxx11_atomic.h + ..//cds/container/details/base.h + ..//cds/container/vyukov_mpmc_cycle_queue.h + ..//cds/details/aligned_type.h + ..//cds/details/allocator.h + ..//cds/details/bitop_generic.h + ..//cds/details/bounded_container.h + ..//cds/details/defs.h + ..//cds/details/is_aligned.h + ..//cds/details/lib.h + ..//cds/details/marked_ptr.h + ..//cds/details/static_functor.h + ..//cds/details/throw_exception.h + ..//cds/gc/details/hp_common.h + ..//cds/gc/details/retired_ptr.h + ..//cds/gc/hp.h + ..//cds/init.h + ..//cds/intrusive/details/base.h + ..//cds/intrusive/details/node_traits.h + ..//cds/intrusive/options.h + ..//cds/opt/buffer.h + ..//cds/opt/options.h + ..//cds/opt/value_cleaner.h + ..//cds/os/aix/alloc_aligned.h + ..//cds/os/aix/topology.h + ..//cds/os/alloc_aligned.h + ..//cds/os/details/fake_topology.h + ..//cds/os/free_bsd/alloc_aligned.h + ..//cds/os/free_bsd/topology.h + ..//cds/os/hpux/alloc_aligned.h + ..//cds/os/hpux/topology.h + ..//cds/os/libc/alloc_aligned.h + ..//cds/os/linux/alloc_aligned.h + ..//cds/os/linux/topology.h + ..//cds/os/osx/topology.h + ..//cds/os/posix/alloc_aligned.h + ..//cds/os/posix/fake_topology.h + ..//cds/os/posix/thread.h + ..//cds/os/sunos/alloc_aligned.h + ..//cds/os/sunos/topology.h + ..//cds/os/thread.h + ..//cds/os/topology.h + ..//cds/os/win/alloc_aligned.h + ..//cds/os/win/thread.h + ..//cds/os/win/topology.h + ..//cds/threading/details/_common.h + ..//cds/threading/details/auto_detect.h + ..//cds/threading/details/cxx11.h + ..//cds/threading/details/cxx11_manager.h + ..//cds/threading/details/gcc.h + ..//cds/threading/details/gcc_manager.h + ..//cds/threading/details/msvc.h + ..//cds/threading/details/msvc_manager.h + ..//cds/threading/details/pthread.h + ..//cds/threading/details/pthread_manager.h + ..//cds/threading/details/wintls.h + ..//cds/threading/details/wintls_manager.h + ..//cds/threading/model.h + ..//cds/urcu/details/base.h + ..//cds/urcu/details/gp.h + ..//cds/urcu/details/gp_decl.h + ..//cds/urcu/details/gpb.h + ..//cds/urcu/details/sh_decl.h + ..//cds/urcu/general_buffered.h + ..//cds/user_setup/allocator.h + ..//cds/user_setup/cache_line.h + ..//cds/user_setup/threading.h + ..//cds/version.h + /home/ahmad/Downloads/benchmark-fuzzying-tool/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/src/urcu_gp.cpp +CMakeFiles/cds-s.dir/src/urcu_sh.cpp.o + ..//cds/algo/atomic.h + ..//cds/algo/backoff_strategy.h + ..//cds/algo/base.h + ..//cds/algo/bitop.h + ..//cds/algo/elimination_tls.h + ..//cds/algo/int_algo.h + ..//cds/compiler/backoff.h + ..//cds/compiler/bitop.h + ..//cds/compiler/clang/defs.h + ..//cds/compiler/cxx11_atomic.h + ..//cds/compiler/defs.h + ..//cds/compiler/feature_tsan.h + ..//cds/compiler/gcc/amd64/backoff.h + ..//cds/compiler/gcc/amd64/bitop.h + ..//cds/compiler/gcc/amd64/cxx11_atomic.h + ..//cds/compiler/gcc/arm7/backoff.h + ..//cds/compiler/gcc/arm8/backoff.h + ..//cds/compiler/gcc/compiler_barriers.h + ..//cds/compiler/gcc/compiler_macro.h + ..//cds/compiler/gcc/defs.h + ..//cds/compiler/gcc/ia64/backoff.h + ..//cds/compiler/gcc/ia64/bitop.h + ..//cds/compiler/gcc/ia64/cxx11_atomic.h + ..//cds/compiler/gcc/ppc64/backoff.h + ..//cds/compiler/gcc/ppc64/bitop.h + ..//cds/compiler/gcc/sparc/backoff.h + ..//cds/compiler/gcc/sparc/bitop.h + ..//cds/compiler/gcc/sparc/cxx11_atomic.h + ..//cds/compiler/gcc/x86/backoff.h + ..//cds/compiler/gcc/x86/bitop.h + ..//cds/compiler/gcc/x86/cxx11_atomic.h + ..//cds/compiler/gcc/x86/cxx11_atomic32.h + ..//cds/compiler/icl/compiler_barriers.h + ..//cds/compiler/icl/defs.h + ..//cds/compiler/vc/amd64/backoff.h + ..//cds/compiler/vc/amd64/bitop.h + ..//cds/compiler/vc/amd64/cxx11_atomic.h + ..//cds/compiler/vc/compiler_barriers.h + ..//cds/compiler/vc/defs.h + ..//cds/compiler/vc/x86/backoff.h + ..//cds/compiler/vc/x86/bitop.h + ..//cds/compiler/vc/x86/cxx11_atomic.h + ..//cds/container/details/base.h + ..//cds/container/vyukov_mpmc_cycle_queue.h + ..//cds/details/aligned_type.h + ..//cds/details/allocator.h + ..//cds/details/bitop_generic.h + ..//cds/details/bounded_container.h + ..//cds/details/defs.h + ..//cds/details/is_aligned.h + ..//cds/details/lib.h + ..//cds/details/marked_ptr.h + ..//cds/details/static_functor.h + ..//cds/details/throw_exception.h + ..//cds/gc/details/hp_common.h + ..//cds/gc/details/retired_ptr.h + ..//cds/gc/hp.h + ..//cds/init.h + ..//cds/intrusive/details/base.h + ..//cds/intrusive/details/node_traits.h + ..//cds/intrusive/options.h + ..//cds/opt/buffer.h + ..//cds/opt/options.h + ..//cds/opt/value_cleaner.h + ..//cds/os/aix/alloc_aligned.h + ..//cds/os/aix/topology.h + ..//cds/os/alloc_aligned.h + ..//cds/os/details/fake_topology.h + ..//cds/os/free_bsd/alloc_aligned.h + ..//cds/os/free_bsd/topology.h + ..//cds/os/hpux/alloc_aligned.h + ..//cds/os/hpux/topology.h + ..//cds/os/libc/alloc_aligned.h + ..//cds/os/linux/alloc_aligned.h + ..//cds/os/linux/topology.h + ..//cds/os/osx/topology.h + ..//cds/os/posix/alloc_aligned.h + ..//cds/os/posix/fake_topology.h + ..//cds/os/posix/thread.h + ..//cds/os/sunos/alloc_aligned.h + ..//cds/os/sunos/topology.h + ..//cds/os/thread.h + ..//cds/os/topology.h + ..//cds/os/win/alloc_aligned.h + ..//cds/os/win/thread.h + ..//cds/os/win/topology.h + ..//cds/threading/details/_common.h + ..//cds/threading/details/auto_detect.h + ..//cds/threading/details/cxx11.h + ..//cds/threading/details/cxx11_manager.h + ..//cds/threading/details/gcc.h + ..//cds/threading/details/gcc_manager.h + ..//cds/threading/details/msvc.h + ..//cds/threading/details/msvc_manager.h + ..//cds/threading/details/pthread.h + ..//cds/threading/details/pthread_manager.h + ..//cds/threading/details/wintls.h + ..//cds/threading/details/wintls_manager.h + ..//cds/threading/model.h + ..//cds/urcu/details/base.h + ..//cds/urcu/details/gp.h + ..//cds/urcu/details/gp_decl.h + ..//cds/urcu/details/gpb.h + ..//cds/urcu/details/sh.h + ..//cds/urcu/details/sh_decl.h + ..//cds/urcu/general_buffered.h + ..//cds/user_setup/allocator.h + ..//cds/user_setup/cache_line.h + ..//cds/user_setup/threading.h + ..//cds/version.h + /home/ahmad/Downloads/benchmark-fuzzying-tool/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/src/urcu_sh.cpp diff --git a/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/build-release/CMakeFiles/cds-s.dir/depend.make b/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/build-release/CMakeFiles/cds-s.dir/depend.make new file mode 100644 index 0000000..a081f47 --- /dev/null +++ b/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/build-release/CMakeFiles/cds-s.dir/depend.make @@ -0,0 +1,1122 @@ +# CMAKE generated file: DO NOT EDIT! +# Generated by "Unix Makefiles" Generator, CMake Version 3.5 + +CMakeFiles/cds-s.dir/src/dhp.cpp.o: ../cds/algo/atomic.h +CMakeFiles/cds-s.dir/src/dhp.cpp.o: ../cds/algo/backoff_strategy.h +CMakeFiles/cds-s.dir/src/dhp.cpp.o: ../cds/algo/base.h +CMakeFiles/cds-s.dir/src/dhp.cpp.o: ../cds/algo/bitop.h +CMakeFiles/cds-s.dir/src/dhp.cpp.o: ../cds/algo/elimination_tls.h +CMakeFiles/cds-s.dir/src/dhp.cpp.o: ../cds/algo/int_algo.h +CMakeFiles/cds-s.dir/src/dhp.cpp.o: ../cds/compiler/backoff.h +CMakeFiles/cds-s.dir/src/dhp.cpp.o: ../cds/compiler/bitop.h +CMakeFiles/cds-s.dir/src/dhp.cpp.o: ../cds/compiler/clang/defs.h +CMakeFiles/cds-s.dir/src/dhp.cpp.o: ../cds/compiler/cxx11_atomic.h +CMakeFiles/cds-s.dir/src/dhp.cpp.o: ../cds/compiler/defs.h +CMakeFiles/cds-s.dir/src/dhp.cpp.o: ../cds/compiler/feature_tsan.h +CMakeFiles/cds-s.dir/src/dhp.cpp.o: ../cds/compiler/gcc/amd64/backoff.h +CMakeFiles/cds-s.dir/src/dhp.cpp.o: ../cds/compiler/gcc/amd64/bitop.h +CMakeFiles/cds-s.dir/src/dhp.cpp.o: ../cds/compiler/gcc/amd64/cxx11_atomic.h +CMakeFiles/cds-s.dir/src/dhp.cpp.o: ../cds/compiler/gcc/arm7/backoff.h +CMakeFiles/cds-s.dir/src/dhp.cpp.o: ../cds/compiler/gcc/arm8/backoff.h +CMakeFiles/cds-s.dir/src/dhp.cpp.o: ../cds/compiler/gcc/compiler_barriers.h +CMakeFiles/cds-s.dir/src/dhp.cpp.o: ../cds/compiler/gcc/compiler_macro.h +CMakeFiles/cds-s.dir/src/dhp.cpp.o: ../cds/compiler/gcc/defs.h +CMakeFiles/cds-s.dir/src/dhp.cpp.o: ../cds/compiler/gcc/ia64/backoff.h +CMakeFiles/cds-s.dir/src/dhp.cpp.o: ../cds/compiler/gcc/ia64/bitop.h +CMakeFiles/cds-s.dir/src/dhp.cpp.o: ../cds/compiler/gcc/ia64/cxx11_atomic.h +CMakeFiles/cds-s.dir/src/dhp.cpp.o: ../cds/compiler/gcc/ppc64/backoff.h +CMakeFiles/cds-s.dir/src/dhp.cpp.o: ../cds/compiler/gcc/ppc64/bitop.h +CMakeFiles/cds-s.dir/src/dhp.cpp.o: ../cds/compiler/gcc/sparc/backoff.h +CMakeFiles/cds-s.dir/src/dhp.cpp.o: ../cds/compiler/gcc/sparc/bitop.h +CMakeFiles/cds-s.dir/src/dhp.cpp.o: ../cds/compiler/gcc/sparc/cxx11_atomic.h +CMakeFiles/cds-s.dir/src/dhp.cpp.o: ../cds/compiler/gcc/x86/backoff.h +CMakeFiles/cds-s.dir/src/dhp.cpp.o: ../cds/compiler/gcc/x86/bitop.h +CMakeFiles/cds-s.dir/src/dhp.cpp.o: ../cds/compiler/gcc/x86/cxx11_atomic.h +CMakeFiles/cds-s.dir/src/dhp.cpp.o: ../cds/compiler/gcc/x86/cxx11_atomic32.h +CMakeFiles/cds-s.dir/src/dhp.cpp.o: ../cds/compiler/icl/compiler_barriers.h +CMakeFiles/cds-s.dir/src/dhp.cpp.o: ../cds/compiler/icl/defs.h +CMakeFiles/cds-s.dir/src/dhp.cpp.o: ../cds/compiler/vc/amd64/backoff.h +CMakeFiles/cds-s.dir/src/dhp.cpp.o: ../cds/compiler/vc/amd64/bitop.h +CMakeFiles/cds-s.dir/src/dhp.cpp.o: ../cds/compiler/vc/amd64/cxx11_atomic.h +CMakeFiles/cds-s.dir/src/dhp.cpp.o: ../cds/compiler/vc/compiler_barriers.h +CMakeFiles/cds-s.dir/src/dhp.cpp.o: ../cds/compiler/vc/defs.h +CMakeFiles/cds-s.dir/src/dhp.cpp.o: ../cds/compiler/vc/x86/backoff.h +CMakeFiles/cds-s.dir/src/dhp.cpp.o: ../cds/compiler/vc/x86/bitop.h +CMakeFiles/cds-s.dir/src/dhp.cpp.o: ../cds/compiler/vc/x86/cxx11_atomic.h +CMakeFiles/cds-s.dir/src/dhp.cpp.o: ../cds/container/details/base.h +CMakeFiles/cds-s.dir/src/dhp.cpp.o: ../cds/container/vyukov_mpmc_cycle_queue.h +CMakeFiles/cds-s.dir/src/dhp.cpp.o: ../cds/details/aligned_type.h +CMakeFiles/cds-s.dir/src/dhp.cpp.o: ../cds/details/allocator.h +CMakeFiles/cds-s.dir/src/dhp.cpp.o: ../cds/details/bitop_generic.h +CMakeFiles/cds-s.dir/src/dhp.cpp.o: ../cds/details/bounded_container.h +CMakeFiles/cds-s.dir/src/dhp.cpp.o: ../cds/details/defs.h +CMakeFiles/cds-s.dir/src/dhp.cpp.o: ../cds/details/is_aligned.h +CMakeFiles/cds-s.dir/src/dhp.cpp.o: ../cds/details/lib.h +CMakeFiles/cds-s.dir/src/dhp.cpp.o: ../cds/details/marked_ptr.h +CMakeFiles/cds-s.dir/src/dhp.cpp.o: ../cds/details/static_functor.h +CMakeFiles/cds-s.dir/src/dhp.cpp.o: ../cds/details/throw_exception.h +CMakeFiles/cds-s.dir/src/dhp.cpp.o: ../cds/gc/details/hp_common.h +CMakeFiles/cds-s.dir/src/dhp.cpp.o: ../cds/gc/details/retired_ptr.h +CMakeFiles/cds-s.dir/src/dhp.cpp.o: ../cds/gc/dhp.h +CMakeFiles/cds-s.dir/src/dhp.cpp.o: ../cds/gc/hp.h +CMakeFiles/cds-s.dir/src/dhp.cpp.o: ../cds/init.h +CMakeFiles/cds-s.dir/src/dhp.cpp.o: ../cds/intrusive/details/base.h +CMakeFiles/cds-s.dir/src/dhp.cpp.o: ../cds/intrusive/details/node_traits.h +CMakeFiles/cds-s.dir/src/dhp.cpp.o: ../cds/intrusive/free_list.h +CMakeFiles/cds-s.dir/src/dhp.cpp.o: ../cds/intrusive/free_list_selector.h +CMakeFiles/cds-s.dir/src/dhp.cpp.o: ../cds/intrusive/free_list_tagged.h +CMakeFiles/cds-s.dir/src/dhp.cpp.o: ../cds/intrusive/options.h +CMakeFiles/cds-s.dir/src/dhp.cpp.o: ../cds/opt/buffer.h +CMakeFiles/cds-s.dir/src/dhp.cpp.o: ../cds/opt/options.h +CMakeFiles/cds-s.dir/src/dhp.cpp.o: ../cds/opt/value_cleaner.h +CMakeFiles/cds-s.dir/src/dhp.cpp.o: ../cds/os/aix/alloc_aligned.h +CMakeFiles/cds-s.dir/src/dhp.cpp.o: ../cds/os/aix/topology.h +CMakeFiles/cds-s.dir/src/dhp.cpp.o: ../cds/os/alloc_aligned.h +CMakeFiles/cds-s.dir/src/dhp.cpp.o: ../cds/os/details/fake_topology.h +CMakeFiles/cds-s.dir/src/dhp.cpp.o: ../cds/os/free_bsd/alloc_aligned.h +CMakeFiles/cds-s.dir/src/dhp.cpp.o: ../cds/os/free_bsd/topology.h +CMakeFiles/cds-s.dir/src/dhp.cpp.o: ../cds/os/hpux/alloc_aligned.h +CMakeFiles/cds-s.dir/src/dhp.cpp.o: ../cds/os/hpux/topology.h +CMakeFiles/cds-s.dir/src/dhp.cpp.o: ../cds/os/libc/alloc_aligned.h +CMakeFiles/cds-s.dir/src/dhp.cpp.o: ../cds/os/linux/alloc_aligned.h +CMakeFiles/cds-s.dir/src/dhp.cpp.o: ../cds/os/linux/topology.h +CMakeFiles/cds-s.dir/src/dhp.cpp.o: ../cds/os/osx/topology.h +CMakeFiles/cds-s.dir/src/dhp.cpp.o: ../cds/os/posix/alloc_aligned.h +CMakeFiles/cds-s.dir/src/dhp.cpp.o: ../cds/os/posix/fake_topology.h +CMakeFiles/cds-s.dir/src/dhp.cpp.o: ../cds/os/posix/thread.h +CMakeFiles/cds-s.dir/src/dhp.cpp.o: ../cds/os/sunos/alloc_aligned.h +CMakeFiles/cds-s.dir/src/dhp.cpp.o: ../cds/os/sunos/topology.h +CMakeFiles/cds-s.dir/src/dhp.cpp.o: ../cds/os/thread.h +CMakeFiles/cds-s.dir/src/dhp.cpp.o: ../cds/os/topology.h +CMakeFiles/cds-s.dir/src/dhp.cpp.o: ../cds/os/win/alloc_aligned.h +CMakeFiles/cds-s.dir/src/dhp.cpp.o: ../cds/os/win/thread.h +CMakeFiles/cds-s.dir/src/dhp.cpp.o: ../cds/os/win/topology.h +CMakeFiles/cds-s.dir/src/dhp.cpp.o: ../cds/threading/details/_common.h +CMakeFiles/cds-s.dir/src/dhp.cpp.o: ../cds/threading/details/auto_detect.h +CMakeFiles/cds-s.dir/src/dhp.cpp.o: ../cds/threading/details/cxx11.h +CMakeFiles/cds-s.dir/src/dhp.cpp.o: ../cds/threading/details/cxx11_manager.h +CMakeFiles/cds-s.dir/src/dhp.cpp.o: ../cds/threading/details/gcc.h +CMakeFiles/cds-s.dir/src/dhp.cpp.o: ../cds/threading/details/gcc_manager.h +CMakeFiles/cds-s.dir/src/dhp.cpp.o: ../cds/threading/details/msvc.h +CMakeFiles/cds-s.dir/src/dhp.cpp.o: ../cds/threading/details/msvc_manager.h +CMakeFiles/cds-s.dir/src/dhp.cpp.o: ../cds/threading/details/pthread.h +CMakeFiles/cds-s.dir/src/dhp.cpp.o: ../cds/threading/details/pthread_manager.h +CMakeFiles/cds-s.dir/src/dhp.cpp.o: ../cds/threading/details/wintls.h +CMakeFiles/cds-s.dir/src/dhp.cpp.o: ../cds/threading/details/wintls_manager.h +CMakeFiles/cds-s.dir/src/dhp.cpp.o: ../cds/threading/model.h +CMakeFiles/cds-s.dir/src/dhp.cpp.o: ../cds/urcu/details/base.h +CMakeFiles/cds-s.dir/src/dhp.cpp.o: ../cds/urcu/details/gp.h +CMakeFiles/cds-s.dir/src/dhp.cpp.o: ../cds/urcu/details/gp_decl.h +CMakeFiles/cds-s.dir/src/dhp.cpp.o: ../cds/urcu/details/gpb.h +CMakeFiles/cds-s.dir/src/dhp.cpp.o: ../cds/urcu/details/sh_decl.h +CMakeFiles/cds-s.dir/src/dhp.cpp.o: ../cds/urcu/general_buffered.h +CMakeFiles/cds-s.dir/src/dhp.cpp.o: ../cds/user_setup/allocator.h +CMakeFiles/cds-s.dir/src/dhp.cpp.o: ../cds/user_setup/cache_line.h +CMakeFiles/cds-s.dir/src/dhp.cpp.o: ../cds/user_setup/threading.h +CMakeFiles/cds-s.dir/src/dhp.cpp.o: ../cds/version.h +CMakeFiles/cds-s.dir/src/dhp.cpp.o: ../src/dhp.cpp + +CMakeFiles/cds-s.dir/src/dllmain.cpp.o: ../cds/algo/atomic.h +CMakeFiles/cds-s.dir/src/dllmain.cpp.o: ../cds/algo/backoff_strategy.h +CMakeFiles/cds-s.dir/src/dllmain.cpp.o: ../cds/algo/base.h +CMakeFiles/cds-s.dir/src/dllmain.cpp.o: ../cds/algo/bitop.h +CMakeFiles/cds-s.dir/src/dllmain.cpp.o: ../cds/algo/elimination_tls.h +CMakeFiles/cds-s.dir/src/dllmain.cpp.o: ../cds/algo/int_algo.h +CMakeFiles/cds-s.dir/src/dllmain.cpp.o: ../cds/compiler/backoff.h +CMakeFiles/cds-s.dir/src/dllmain.cpp.o: ../cds/compiler/bitop.h +CMakeFiles/cds-s.dir/src/dllmain.cpp.o: ../cds/compiler/clang/defs.h +CMakeFiles/cds-s.dir/src/dllmain.cpp.o: ../cds/compiler/cxx11_atomic.h +CMakeFiles/cds-s.dir/src/dllmain.cpp.o: ../cds/compiler/defs.h +CMakeFiles/cds-s.dir/src/dllmain.cpp.o: ../cds/compiler/feature_tsan.h +CMakeFiles/cds-s.dir/src/dllmain.cpp.o: ../cds/compiler/gcc/amd64/backoff.h +CMakeFiles/cds-s.dir/src/dllmain.cpp.o: ../cds/compiler/gcc/amd64/bitop.h +CMakeFiles/cds-s.dir/src/dllmain.cpp.o: ../cds/compiler/gcc/amd64/cxx11_atomic.h +CMakeFiles/cds-s.dir/src/dllmain.cpp.o: ../cds/compiler/gcc/arm7/backoff.h +CMakeFiles/cds-s.dir/src/dllmain.cpp.o: ../cds/compiler/gcc/arm8/backoff.h +CMakeFiles/cds-s.dir/src/dllmain.cpp.o: ../cds/compiler/gcc/compiler_barriers.h +CMakeFiles/cds-s.dir/src/dllmain.cpp.o: ../cds/compiler/gcc/compiler_macro.h +CMakeFiles/cds-s.dir/src/dllmain.cpp.o: ../cds/compiler/gcc/defs.h +CMakeFiles/cds-s.dir/src/dllmain.cpp.o: ../cds/compiler/gcc/ia64/backoff.h +CMakeFiles/cds-s.dir/src/dllmain.cpp.o: ../cds/compiler/gcc/ia64/bitop.h +CMakeFiles/cds-s.dir/src/dllmain.cpp.o: ../cds/compiler/gcc/ia64/cxx11_atomic.h +CMakeFiles/cds-s.dir/src/dllmain.cpp.o: ../cds/compiler/gcc/ppc64/backoff.h +CMakeFiles/cds-s.dir/src/dllmain.cpp.o: ../cds/compiler/gcc/ppc64/bitop.h +CMakeFiles/cds-s.dir/src/dllmain.cpp.o: ../cds/compiler/gcc/sparc/backoff.h +CMakeFiles/cds-s.dir/src/dllmain.cpp.o: ../cds/compiler/gcc/sparc/bitop.h +CMakeFiles/cds-s.dir/src/dllmain.cpp.o: ../cds/compiler/gcc/sparc/cxx11_atomic.h +CMakeFiles/cds-s.dir/src/dllmain.cpp.o: ../cds/compiler/gcc/x86/backoff.h +CMakeFiles/cds-s.dir/src/dllmain.cpp.o: ../cds/compiler/gcc/x86/bitop.h +CMakeFiles/cds-s.dir/src/dllmain.cpp.o: ../cds/compiler/gcc/x86/cxx11_atomic.h +CMakeFiles/cds-s.dir/src/dllmain.cpp.o: ../cds/compiler/gcc/x86/cxx11_atomic32.h +CMakeFiles/cds-s.dir/src/dllmain.cpp.o: ../cds/compiler/icl/compiler_barriers.h +CMakeFiles/cds-s.dir/src/dllmain.cpp.o: ../cds/compiler/icl/defs.h +CMakeFiles/cds-s.dir/src/dllmain.cpp.o: ../cds/compiler/vc/amd64/backoff.h +CMakeFiles/cds-s.dir/src/dllmain.cpp.o: ../cds/compiler/vc/amd64/bitop.h +CMakeFiles/cds-s.dir/src/dllmain.cpp.o: ../cds/compiler/vc/amd64/cxx11_atomic.h +CMakeFiles/cds-s.dir/src/dllmain.cpp.o: ../cds/compiler/vc/compiler_barriers.h +CMakeFiles/cds-s.dir/src/dllmain.cpp.o: ../cds/compiler/vc/defs.h +CMakeFiles/cds-s.dir/src/dllmain.cpp.o: ../cds/compiler/vc/x86/backoff.h +CMakeFiles/cds-s.dir/src/dllmain.cpp.o: ../cds/compiler/vc/x86/bitop.h +CMakeFiles/cds-s.dir/src/dllmain.cpp.o: ../cds/compiler/vc/x86/cxx11_atomic.h +CMakeFiles/cds-s.dir/src/dllmain.cpp.o: ../cds/container/details/base.h +CMakeFiles/cds-s.dir/src/dllmain.cpp.o: ../cds/container/vyukov_mpmc_cycle_queue.h +CMakeFiles/cds-s.dir/src/dllmain.cpp.o: ../cds/details/aligned_type.h +CMakeFiles/cds-s.dir/src/dllmain.cpp.o: ../cds/details/allocator.h +CMakeFiles/cds-s.dir/src/dllmain.cpp.o: ../cds/details/bitop_generic.h +CMakeFiles/cds-s.dir/src/dllmain.cpp.o: ../cds/details/bounded_container.h +CMakeFiles/cds-s.dir/src/dllmain.cpp.o: ../cds/details/defs.h +CMakeFiles/cds-s.dir/src/dllmain.cpp.o: ../cds/details/is_aligned.h +CMakeFiles/cds-s.dir/src/dllmain.cpp.o: ../cds/details/lib.h +CMakeFiles/cds-s.dir/src/dllmain.cpp.o: ../cds/details/marked_ptr.h +CMakeFiles/cds-s.dir/src/dllmain.cpp.o: ../cds/details/static_functor.h +CMakeFiles/cds-s.dir/src/dllmain.cpp.o: ../cds/details/throw_exception.h +CMakeFiles/cds-s.dir/src/dllmain.cpp.o: ../cds/gc/details/hp_common.h +CMakeFiles/cds-s.dir/src/dllmain.cpp.o: ../cds/gc/details/retired_ptr.h +CMakeFiles/cds-s.dir/src/dllmain.cpp.o: ../cds/gc/hp.h +CMakeFiles/cds-s.dir/src/dllmain.cpp.o: ../cds/init.h +CMakeFiles/cds-s.dir/src/dllmain.cpp.o: ../cds/intrusive/details/base.h +CMakeFiles/cds-s.dir/src/dllmain.cpp.o: ../cds/intrusive/details/node_traits.h +CMakeFiles/cds-s.dir/src/dllmain.cpp.o: ../cds/intrusive/options.h +CMakeFiles/cds-s.dir/src/dllmain.cpp.o: ../cds/opt/buffer.h +CMakeFiles/cds-s.dir/src/dllmain.cpp.o: ../cds/opt/options.h +CMakeFiles/cds-s.dir/src/dllmain.cpp.o: ../cds/opt/value_cleaner.h +CMakeFiles/cds-s.dir/src/dllmain.cpp.o: ../cds/os/aix/alloc_aligned.h +CMakeFiles/cds-s.dir/src/dllmain.cpp.o: ../cds/os/aix/topology.h +CMakeFiles/cds-s.dir/src/dllmain.cpp.o: ../cds/os/alloc_aligned.h +CMakeFiles/cds-s.dir/src/dllmain.cpp.o: ../cds/os/details/fake_topology.h +CMakeFiles/cds-s.dir/src/dllmain.cpp.o: ../cds/os/free_bsd/alloc_aligned.h +CMakeFiles/cds-s.dir/src/dllmain.cpp.o: ../cds/os/free_bsd/topology.h +CMakeFiles/cds-s.dir/src/dllmain.cpp.o: ../cds/os/hpux/alloc_aligned.h +CMakeFiles/cds-s.dir/src/dllmain.cpp.o: ../cds/os/hpux/topology.h +CMakeFiles/cds-s.dir/src/dllmain.cpp.o: ../cds/os/libc/alloc_aligned.h +CMakeFiles/cds-s.dir/src/dllmain.cpp.o: ../cds/os/linux/alloc_aligned.h +CMakeFiles/cds-s.dir/src/dllmain.cpp.o: ../cds/os/linux/topology.h +CMakeFiles/cds-s.dir/src/dllmain.cpp.o: ../cds/os/osx/topology.h +CMakeFiles/cds-s.dir/src/dllmain.cpp.o: ../cds/os/posix/alloc_aligned.h +CMakeFiles/cds-s.dir/src/dllmain.cpp.o: ../cds/os/posix/fake_topology.h +CMakeFiles/cds-s.dir/src/dllmain.cpp.o: ../cds/os/posix/thread.h +CMakeFiles/cds-s.dir/src/dllmain.cpp.o: ../cds/os/sunos/alloc_aligned.h +CMakeFiles/cds-s.dir/src/dllmain.cpp.o: ../cds/os/sunos/topology.h +CMakeFiles/cds-s.dir/src/dllmain.cpp.o: ../cds/os/thread.h +CMakeFiles/cds-s.dir/src/dllmain.cpp.o: ../cds/os/topology.h +CMakeFiles/cds-s.dir/src/dllmain.cpp.o: ../cds/os/win/alloc_aligned.h +CMakeFiles/cds-s.dir/src/dllmain.cpp.o: ../cds/os/win/thread.h +CMakeFiles/cds-s.dir/src/dllmain.cpp.o: ../cds/os/win/topology.h +CMakeFiles/cds-s.dir/src/dllmain.cpp.o: ../cds/threading/details/_common.h +CMakeFiles/cds-s.dir/src/dllmain.cpp.o: ../cds/threading/details/auto_detect.h +CMakeFiles/cds-s.dir/src/dllmain.cpp.o: ../cds/threading/details/cxx11.h +CMakeFiles/cds-s.dir/src/dllmain.cpp.o: ../cds/threading/details/cxx11_manager.h +CMakeFiles/cds-s.dir/src/dllmain.cpp.o: ../cds/threading/details/gcc.h +CMakeFiles/cds-s.dir/src/dllmain.cpp.o: ../cds/threading/details/gcc_manager.h +CMakeFiles/cds-s.dir/src/dllmain.cpp.o: ../cds/threading/details/msvc.h +CMakeFiles/cds-s.dir/src/dllmain.cpp.o: ../cds/threading/details/msvc_manager.h +CMakeFiles/cds-s.dir/src/dllmain.cpp.o: ../cds/threading/details/pthread.h +CMakeFiles/cds-s.dir/src/dllmain.cpp.o: ../cds/threading/details/pthread_manager.h +CMakeFiles/cds-s.dir/src/dllmain.cpp.o: ../cds/threading/details/wintls.h +CMakeFiles/cds-s.dir/src/dllmain.cpp.o: ../cds/threading/details/wintls_manager.h +CMakeFiles/cds-s.dir/src/dllmain.cpp.o: ../cds/threading/model.h +CMakeFiles/cds-s.dir/src/dllmain.cpp.o: ../cds/urcu/details/base.h +CMakeFiles/cds-s.dir/src/dllmain.cpp.o: ../cds/urcu/details/gp.h +CMakeFiles/cds-s.dir/src/dllmain.cpp.o: ../cds/urcu/details/gp_decl.h +CMakeFiles/cds-s.dir/src/dllmain.cpp.o: ../cds/urcu/details/gpb.h +CMakeFiles/cds-s.dir/src/dllmain.cpp.o: ../cds/urcu/details/sh_decl.h +CMakeFiles/cds-s.dir/src/dllmain.cpp.o: ../cds/urcu/general_buffered.h +CMakeFiles/cds-s.dir/src/dllmain.cpp.o: ../cds/user_setup/allocator.h +CMakeFiles/cds-s.dir/src/dllmain.cpp.o: ../cds/user_setup/cache_line.h +CMakeFiles/cds-s.dir/src/dllmain.cpp.o: ../cds/user_setup/threading.h +CMakeFiles/cds-s.dir/src/dllmain.cpp.o: ../cds/version.h +CMakeFiles/cds-s.dir/src/dllmain.cpp.o: ../src/dllmain.cpp + +CMakeFiles/cds-s.dir/src/hp.cpp.o: ../cds/algo/atomic.h +CMakeFiles/cds-s.dir/src/hp.cpp.o: ../cds/algo/backoff_strategy.h +CMakeFiles/cds-s.dir/src/hp.cpp.o: ../cds/algo/base.h +CMakeFiles/cds-s.dir/src/hp.cpp.o: ../cds/algo/bitop.h +CMakeFiles/cds-s.dir/src/hp.cpp.o: ../cds/algo/elimination_tls.h +CMakeFiles/cds-s.dir/src/hp.cpp.o: ../cds/algo/int_algo.h +CMakeFiles/cds-s.dir/src/hp.cpp.o: ../cds/compiler/backoff.h +CMakeFiles/cds-s.dir/src/hp.cpp.o: ../cds/compiler/bitop.h +CMakeFiles/cds-s.dir/src/hp.cpp.o: ../cds/compiler/clang/defs.h +CMakeFiles/cds-s.dir/src/hp.cpp.o: ../cds/compiler/cxx11_atomic.h +CMakeFiles/cds-s.dir/src/hp.cpp.o: ../cds/compiler/defs.h +CMakeFiles/cds-s.dir/src/hp.cpp.o: ../cds/compiler/feature_tsan.h +CMakeFiles/cds-s.dir/src/hp.cpp.o: ../cds/compiler/gcc/amd64/backoff.h +CMakeFiles/cds-s.dir/src/hp.cpp.o: ../cds/compiler/gcc/amd64/bitop.h +CMakeFiles/cds-s.dir/src/hp.cpp.o: ../cds/compiler/gcc/amd64/cxx11_atomic.h +CMakeFiles/cds-s.dir/src/hp.cpp.o: ../cds/compiler/gcc/arm7/backoff.h +CMakeFiles/cds-s.dir/src/hp.cpp.o: ../cds/compiler/gcc/arm8/backoff.h +CMakeFiles/cds-s.dir/src/hp.cpp.o: ../cds/compiler/gcc/compiler_barriers.h +CMakeFiles/cds-s.dir/src/hp.cpp.o: ../cds/compiler/gcc/compiler_macro.h +CMakeFiles/cds-s.dir/src/hp.cpp.o: ../cds/compiler/gcc/defs.h +CMakeFiles/cds-s.dir/src/hp.cpp.o: ../cds/compiler/gcc/ia64/backoff.h +CMakeFiles/cds-s.dir/src/hp.cpp.o: ../cds/compiler/gcc/ia64/bitop.h +CMakeFiles/cds-s.dir/src/hp.cpp.o: ../cds/compiler/gcc/ia64/cxx11_atomic.h +CMakeFiles/cds-s.dir/src/hp.cpp.o: ../cds/compiler/gcc/ppc64/backoff.h +CMakeFiles/cds-s.dir/src/hp.cpp.o: ../cds/compiler/gcc/ppc64/bitop.h +CMakeFiles/cds-s.dir/src/hp.cpp.o: ../cds/compiler/gcc/sparc/backoff.h +CMakeFiles/cds-s.dir/src/hp.cpp.o: ../cds/compiler/gcc/sparc/bitop.h +CMakeFiles/cds-s.dir/src/hp.cpp.o: ../cds/compiler/gcc/sparc/cxx11_atomic.h +CMakeFiles/cds-s.dir/src/hp.cpp.o: ../cds/compiler/gcc/x86/backoff.h +CMakeFiles/cds-s.dir/src/hp.cpp.o: ../cds/compiler/gcc/x86/bitop.h +CMakeFiles/cds-s.dir/src/hp.cpp.o: ../cds/compiler/gcc/x86/cxx11_atomic.h +CMakeFiles/cds-s.dir/src/hp.cpp.o: ../cds/compiler/gcc/x86/cxx11_atomic32.h +CMakeFiles/cds-s.dir/src/hp.cpp.o: ../cds/compiler/icl/compiler_barriers.h +CMakeFiles/cds-s.dir/src/hp.cpp.o: ../cds/compiler/icl/defs.h +CMakeFiles/cds-s.dir/src/hp.cpp.o: ../cds/compiler/vc/amd64/backoff.h +CMakeFiles/cds-s.dir/src/hp.cpp.o: ../cds/compiler/vc/amd64/bitop.h +CMakeFiles/cds-s.dir/src/hp.cpp.o: ../cds/compiler/vc/amd64/cxx11_atomic.h +CMakeFiles/cds-s.dir/src/hp.cpp.o: ../cds/compiler/vc/compiler_barriers.h +CMakeFiles/cds-s.dir/src/hp.cpp.o: ../cds/compiler/vc/defs.h +CMakeFiles/cds-s.dir/src/hp.cpp.o: ../cds/compiler/vc/x86/backoff.h +CMakeFiles/cds-s.dir/src/hp.cpp.o: ../cds/compiler/vc/x86/bitop.h +CMakeFiles/cds-s.dir/src/hp.cpp.o: ../cds/compiler/vc/x86/cxx11_atomic.h +CMakeFiles/cds-s.dir/src/hp.cpp.o: ../cds/container/details/base.h +CMakeFiles/cds-s.dir/src/hp.cpp.o: ../cds/container/vyukov_mpmc_cycle_queue.h +CMakeFiles/cds-s.dir/src/hp.cpp.o: ../cds/details/aligned_type.h +CMakeFiles/cds-s.dir/src/hp.cpp.o: ../cds/details/allocator.h +CMakeFiles/cds-s.dir/src/hp.cpp.o: ../cds/details/bitop_generic.h +CMakeFiles/cds-s.dir/src/hp.cpp.o: ../cds/details/bounded_container.h +CMakeFiles/cds-s.dir/src/hp.cpp.o: ../cds/details/defs.h +CMakeFiles/cds-s.dir/src/hp.cpp.o: ../cds/details/is_aligned.h +CMakeFiles/cds-s.dir/src/hp.cpp.o: ../cds/details/lib.h +CMakeFiles/cds-s.dir/src/hp.cpp.o: ../cds/details/marked_ptr.h +CMakeFiles/cds-s.dir/src/hp.cpp.o: ../cds/details/static_functor.h +CMakeFiles/cds-s.dir/src/hp.cpp.o: ../cds/details/throw_exception.h +CMakeFiles/cds-s.dir/src/hp.cpp.o: ../cds/gc/details/hp_common.h +CMakeFiles/cds-s.dir/src/hp.cpp.o: ../cds/gc/details/retired_ptr.h +CMakeFiles/cds-s.dir/src/hp.cpp.o: ../cds/gc/hp.h +CMakeFiles/cds-s.dir/src/hp.cpp.o: ../cds/init.h +CMakeFiles/cds-s.dir/src/hp.cpp.o: ../cds/intrusive/details/base.h +CMakeFiles/cds-s.dir/src/hp.cpp.o: ../cds/intrusive/details/node_traits.h +CMakeFiles/cds-s.dir/src/hp.cpp.o: ../cds/intrusive/options.h +CMakeFiles/cds-s.dir/src/hp.cpp.o: ../cds/opt/buffer.h +CMakeFiles/cds-s.dir/src/hp.cpp.o: ../cds/opt/options.h +CMakeFiles/cds-s.dir/src/hp.cpp.o: ../cds/opt/value_cleaner.h +CMakeFiles/cds-s.dir/src/hp.cpp.o: ../cds/os/aix/alloc_aligned.h +CMakeFiles/cds-s.dir/src/hp.cpp.o: ../cds/os/aix/topology.h +CMakeFiles/cds-s.dir/src/hp.cpp.o: ../cds/os/alloc_aligned.h +CMakeFiles/cds-s.dir/src/hp.cpp.o: ../cds/os/details/fake_topology.h +CMakeFiles/cds-s.dir/src/hp.cpp.o: ../cds/os/free_bsd/alloc_aligned.h +CMakeFiles/cds-s.dir/src/hp.cpp.o: ../cds/os/free_bsd/topology.h +CMakeFiles/cds-s.dir/src/hp.cpp.o: ../cds/os/hpux/alloc_aligned.h +CMakeFiles/cds-s.dir/src/hp.cpp.o: ../cds/os/hpux/topology.h +CMakeFiles/cds-s.dir/src/hp.cpp.o: ../cds/os/libc/alloc_aligned.h +CMakeFiles/cds-s.dir/src/hp.cpp.o: ../cds/os/linux/alloc_aligned.h +CMakeFiles/cds-s.dir/src/hp.cpp.o: ../cds/os/linux/topology.h +CMakeFiles/cds-s.dir/src/hp.cpp.o: ../cds/os/osx/topology.h +CMakeFiles/cds-s.dir/src/hp.cpp.o: ../cds/os/posix/alloc_aligned.h +CMakeFiles/cds-s.dir/src/hp.cpp.o: ../cds/os/posix/fake_topology.h +CMakeFiles/cds-s.dir/src/hp.cpp.o: ../cds/os/posix/thread.h +CMakeFiles/cds-s.dir/src/hp.cpp.o: ../cds/os/sunos/alloc_aligned.h +CMakeFiles/cds-s.dir/src/hp.cpp.o: ../cds/os/sunos/topology.h +CMakeFiles/cds-s.dir/src/hp.cpp.o: ../cds/os/thread.h +CMakeFiles/cds-s.dir/src/hp.cpp.o: ../cds/os/topology.h +CMakeFiles/cds-s.dir/src/hp.cpp.o: ../cds/os/win/alloc_aligned.h +CMakeFiles/cds-s.dir/src/hp.cpp.o: ../cds/os/win/thread.h +CMakeFiles/cds-s.dir/src/hp.cpp.o: ../cds/os/win/topology.h +CMakeFiles/cds-s.dir/src/hp.cpp.o: ../cds/threading/details/_common.h +CMakeFiles/cds-s.dir/src/hp.cpp.o: ../cds/threading/details/auto_detect.h +CMakeFiles/cds-s.dir/src/hp.cpp.o: ../cds/threading/details/cxx11.h +CMakeFiles/cds-s.dir/src/hp.cpp.o: ../cds/threading/details/cxx11_manager.h +CMakeFiles/cds-s.dir/src/hp.cpp.o: ../cds/threading/details/gcc.h +CMakeFiles/cds-s.dir/src/hp.cpp.o: ../cds/threading/details/gcc_manager.h +CMakeFiles/cds-s.dir/src/hp.cpp.o: ../cds/threading/details/msvc.h +CMakeFiles/cds-s.dir/src/hp.cpp.o: ../cds/threading/details/msvc_manager.h +CMakeFiles/cds-s.dir/src/hp.cpp.o: ../cds/threading/details/pthread.h +CMakeFiles/cds-s.dir/src/hp.cpp.o: ../cds/threading/details/pthread_manager.h +CMakeFiles/cds-s.dir/src/hp.cpp.o: ../cds/threading/details/wintls.h +CMakeFiles/cds-s.dir/src/hp.cpp.o: ../cds/threading/details/wintls_manager.h +CMakeFiles/cds-s.dir/src/hp.cpp.o: ../cds/threading/model.h +CMakeFiles/cds-s.dir/src/hp.cpp.o: ../cds/urcu/details/base.h +CMakeFiles/cds-s.dir/src/hp.cpp.o: ../cds/urcu/details/gp.h +CMakeFiles/cds-s.dir/src/hp.cpp.o: ../cds/urcu/details/gp_decl.h +CMakeFiles/cds-s.dir/src/hp.cpp.o: ../cds/urcu/details/gpb.h +CMakeFiles/cds-s.dir/src/hp.cpp.o: ../cds/urcu/details/sh_decl.h +CMakeFiles/cds-s.dir/src/hp.cpp.o: ../cds/urcu/general_buffered.h +CMakeFiles/cds-s.dir/src/hp.cpp.o: ../cds/user_setup/allocator.h +CMakeFiles/cds-s.dir/src/hp.cpp.o: ../cds/user_setup/cache_line.h +CMakeFiles/cds-s.dir/src/hp.cpp.o: ../cds/user_setup/threading.h +CMakeFiles/cds-s.dir/src/hp.cpp.o: ../cds/version.h +CMakeFiles/cds-s.dir/src/hp.cpp.o: ../src/hp.cpp + +CMakeFiles/cds-s.dir/src/init.cpp.o: ../cds/algo/atomic.h +CMakeFiles/cds-s.dir/src/init.cpp.o: ../cds/algo/backoff_strategy.h +CMakeFiles/cds-s.dir/src/init.cpp.o: ../cds/algo/base.h +CMakeFiles/cds-s.dir/src/init.cpp.o: ../cds/algo/bitop.h +CMakeFiles/cds-s.dir/src/init.cpp.o: ../cds/algo/elimination_tls.h +CMakeFiles/cds-s.dir/src/init.cpp.o: ../cds/algo/int_algo.h +CMakeFiles/cds-s.dir/src/init.cpp.o: ../cds/compiler/backoff.h +CMakeFiles/cds-s.dir/src/init.cpp.o: ../cds/compiler/bitop.h +CMakeFiles/cds-s.dir/src/init.cpp.o: ../cds/compiler/clang/defs.h +CMakeFiles/cds-s.dir/src/init.cpp.o: ../cds/compiler/cxx11_atomic.h +CMakeFiles/cds-s.dir/src/init.cpp.o: ../cds/compiler/defs.h +CMakeFiles/cds-s.dir/src/init.cpp.o: ../cds/compiler/feature_tsan.h +CMakeFiles/cds-s.dir/src/init.cpp.o: ../cds/compiler/gcc/amd64/backoff.h +CMakeFiles/cds-s.dir/src/init.cpp.o: ../cds/compiler/gcc/amd64/bitop.h +CMakeFiles/cds-s.dir/src/init.cpp.o: ../cds/compiler/gcc/amd64/cxx11_atomic.h +CMakeFiles/cds-s.dir/src/init.cpp.o: ../cds/compiler/gcc/arm7/backoff.h +CMakeFiles/cds-s.dir/src/init.cpp.o: ../cds/compiler/gcc/arm8/backoff.h +CMakeFiles/cds-s.dir/src/init.cpp.o: ../cds/compiler/gcc/compiler_barriers.h +CMakeFiles/cds-s.dir/src/init.cpp.o: ../cds/compiler/gcc/compiler_macro.h +CMakeFiles/cds-s.dir/src/init.cpp.o: ../cds/compiler/gcc/defs.h +CMakeFiles/cds-s.dir/src/init.cpp.o: ../cds/compiler/gcc/ia64/backoff.h +CMakeFiles/cds-s.dir/src/init.cpp.o: ../cds/compiler/gcc/ia64/bitop.h +CMakeFiles/cds-s.dir/src/init.cpp.o: ../cds/compiler/gcc/ia64/cxx11_atomic.h +CMakeFiles/cds-s.dir/src/init.cpp.o: ../cds/compiler/gcc/ppc64/backoff.h +CMakeFiles/cds-s.dir/src/init.cpp.o: ../cds/compiler/gcc/ppc64/bitop.h +CMakeFiles/cds-s.dir/src/init.cpp.o: ../cds/compiler/gcc/sparc/backoff.h +CMakeFiles/cds-s.dir/src/init.cpp.o: ../cds/compiler/gcc/sparc/bitop.h +CMakeFiles/cds-s.dir/src/init.cpp.o: ../cds/compiler/gcc/sparc/cxx11_atomic.h +CMakeFiles/cds-s.dir/src/init.cpp.o: ../cds/compiler/gcc/x86/backoff.h +CMakeFiles/cds-s.dir/src/init.cpp.o: ../cds/compiler/gcc/x86/bitop.h +CMakeFiles/cds-s.dir/src/init.cpp.o: ../cds/compiler/gcc/x86/cxx11_atomic.h +CMakeFiles/cds-s.dir/src/init.cpp.o: ../cds/compiler/gcc/x86/cxx11_atomic32.h +CMakeFiles/cds-s.dir/src/init.cpp.o: ../cds/compiler/icl/compiler_barriers.h +CMakeFiles/cds-s.dir/src/init.cpp.o: ../cds/compiler/icl/defs.h +CMakeFiles/cds-s.dir/src/init.cpp.o: ../cds/compiler/vc/amd64/backoff.h +CMakeFiles/cds-s.dir/src/init.cpp.o: ../cds/compiler/vc/amd64/bitop.h +CMakeFiles/cds-s.dir/src/init.cpp.o: ../cds/compiler/vc/amd64/cxx11_atomic.h +CMakeFiles/cds-s.dir/src/init.cpp.o: ../cds/compiler/vc/compiler_barriers.h +CMakeFiles/cds-s.dir/src/init.cpp.o: ../cds/compiler/vc/defs.h +CMakeFiles/cds-s.dir/src/init.cpp.o: ../cds/compiler/vc/x86/backoff.h +CMakeFiles/cds-s.dir/src/init.cpp.o: ../cds/compiler/vc/x86/bitop.h +CMakeFiles/cds-s.dir/src/init.cpp.o: ../cds/compiler/vc/x86/cxx11_atomic.h +CMakeFiles/cds-s.dir/src/init.cpp.o: ../cds/container/details/base.h +CMakeFiles/cds-s.dir/src/init.cpp.o: ../cds/container/vyukov_mpmc_cycle_queue.h +CMakeFiles/cds-s.dir/src/init.cpp.o: ../cds/details/aligned_type.h +CMakeFiles/cds-s.dir/src/init.cpp.o: ../cds/details/allocator.h +CMakeFiles/cds-s.dir/src/init.cpp.o: ../cds/details/bitop_generic.h +CMakeFiles/cds-s.dir/src/init.cpp.o: ../cds/details/bounded_container.h +CMakeFiles/cds-s.dir/src/init.cpp.o: ../cds/details/defs.h +CMakeFiles/cds-s.dir/src/init.cpp.o: ../cds/details/is_aligned.h +CMakeFiles/cds-s.dir/src/init.cpp.o: ../cds/details/lib.h +CMakeFiles/cds-s.dir/src/init.cpp.o: ../cds/details/marked_ptr.h +CMakeFiles/cds-s.dir/src/init.cpp.o: ../cds/details/static_functor.h +CMakeFiles/cds-s.dir/src/init.cpp.o: ../cds/details/throw_exception.h +CMakeFiles/cds-s.dir/src/init.cpp.o: ../cds/gc/details/hp_common.h +CMakeFiles/cds-s.dir/src/init.cpp.o: ../cds/gc/details/retired_ptr.h +CMakeFiles/cds-s.dir/src/init.cpp.o: ../cds/gc/hp.h +CMakeFiles/cds-s.dir/src/init.cpp.o: ../cds/init.h +CMakeFiles/cds-s.dir/src/init.cpp.o: ../cds/intrusive/details/base.h +CMakeFiles/cds-s.dir/src/init.cpp.o: ../cds/intrusive/details/node_traits.h +CMakeFiles/cds-s.dir/src/init.cpp.o: ../cds/intrusive/options.h +CMakeFiles/cds-s.dir/src/init.cpp.o: ../cds/opt/buffer.h +CMakeFiles/cds-s.dir/src/init.cpp.o: ../cds/opt/options.h +CMakeFiles/cds-s.dir/src/init.cpp.o: ../cds/opt/value_cleaner.h +CMakeFiles/cds-s.dir/src/init.cpp.o: ../cds/os/aix/alloc_aligned.h +CMakeFiles/cds-s.dir/src/init.cpp.o: ../cds/os/aix/topology.h +CMakeFiles/cds-s.dir/src/init.cpp.o: ../cds/os/alloc_aligned.h +CMakeFiles/cds-s.dir/src/init.cpp.o: ../cds/os/details/fake_topology.h +CMakeFiles/cds-s.dir/src/init.cpp.o: ../cds/os/free_bsd/alloc_aligned.h +CMakeFiles/cds-s.dir/src/init.cpp.o: ../cds/os/free_bsd/topology.h +CMakeFiles/cds-s.dir/src/init.cpp.o: ../cds/os/hpux/alloc_aligned.h +CMakeFiles/cds-s.dir/src/init.cpp.o: ../cds/os/hpux/topology.h +CMakeFiles/cds-s.dir/src/init.cpp.o: ../cds/os/libc/alloc_aligned.h +CMakeFiles/cds-s.dir/src/init.cpp.o: ../cds/os/linux/alloc_aligned.h +CMakeFiles/cds-s.dir/src/init.cpp.o: ../cds/os/linux/topology.h +CMakeFiles/cds-s.dir/src/init.cpp.o: ../cds/os/osx/topology.h +CMakeFiles/cds-s.dir/src/init.cpp.o: ../cds/os/posix/alloc_aligned.h +CMakeFiles/cds-s.dir/src/init.cpp.o: ../cds/os/posix/fake_topology.h +CMakeFiles/cds-s.dir/src/init.cpp.o: ../cds/os/posix/thread.h +CMakeFiles/cds-s.dir/src/init.cpp.o: ../cds/os/sunos/alloc_aligned.h +CMakeFiles/cds-s.dir/src/init.cpp.o: ../cds/os/sunos/topology.h +CMakeFiles/cds-s.dir/src/init.cpp.o: ../cds/os/thread.h +CMakeFiles/cds-s.dir/src/init.cpp.o: ../cds/os/topology.h +CMakeFiles/cds-s.dir/src/init.cpp.o: ../cds/os/win/alloc_aligned.h +CMakeFiles/cds-s.dir/src/init.cpp.o: ../cds/os/win/thread.h +CMakeFiles/cds-s.dir/src/init.cpp.o: ../cds/os/win/topology.h +CMakeFiles/cds-s.dir/src/init.cpp.o: ../cds/threading/details/_common.h +CMakeFiles/cds-s.dir/src/init.cpp.o: ../cds/threading/details/auto_detect.h +CMakeFiles/cds-s.dir/src/init.cpp.o: ../cds/threading/details/cxx11.h +CMakeFiles/cds-s.dir/src/init.cpp.o: ../cds/threading/details/cxx11_manager.h +CMakeFiles/cds-s.dir/src/init.cpp.o: ../cds/threading/details/gcc.h +CMakeFiles/cds-s.dir/src/init.cpp.o: ../cds/threading/details/gcc_manager.h +CMakeFiles/cds-s.dir/src/init.cpp.o: ../cds/threading/details/msvc.h +CMakeFiles/cds-s.dir/src/init.cpp.o: ../cds/threading/details/msvc_manager.h +CMakeFiles/cds-s.dir/src/init.cpp.o: ../cds/threading/details/pthread.h +CMakeFiles/cds-s.dir/src/init.cpp.o: ../cds/threading/details/pthread_manager.h +CMakeFiles/cds-s.dir/src/init.cpp.o: ../cds/threading/details/wintls.h +CMakeFiles/cds-s.dir/src/init.cpp.o: ../cds/threading/details/wintls_manager.h +CMakeFiles/cds-s.dir/src/init.cpp.o: ../cds/threading/model.h +CMakeFiles/cds-s.dir/src/init.cpp.o: ../cds/urcu/details/base.h +CMakeFiles/cds-s.dir/src/init.cpp.o: ../cds/urcu/details/gp.h +CMakeFiles/cds-s.dir/src/init.cpp.o: ../cds/urcu/details/gp_decl.h +CMakeFiles/cds-s.dir/src/init.cpp.o: ../cds/urcu/details/gpb.h +CMakeFiles/cds-s.dir/src/init.cpp.o: ../cds/urcu/details/sh_decl.h +CMakeFiles/cds-s.dir/src/init.cpp.o: ../cds/urcu/general_buffered.h +CMakeFiles/cds-s.dir/src/init.cpp.o: ../cds/user_setup/allocator.h +CMakeFiles/cds-s.dir/src/init.cpp.o: ../cds/user_setup/cache_line.h +CMakeFiles/cds-s.dir/src/init.cpp.o: ../cds/user_setup/threading.h +CMakeFiles/cds-s.dir/src/init.cpp.o: ../cds/version.h +CMakeFiles/cds-s.dir/src/init.cpp.o: ../src/init.cpp + +CMakeFiles/cds-s.dir/src/thread_data.cpp.o: ../cds/algo/atomic.h +CMakeFiles/cds-s.dir/src/thread_data.cpp.o: ../cds/algo/backoff_strategy.h +CMakeFiles/cds-s.dir/src/thread_data.cpp.o: ../cds/algo/base.h +CMakeFiles/cds-s.dir/src/thread_data.cpp.o: ../cds/algo/bitop.h +CMakeFiles/cds-s.dir/src/thread_data.cpp.o: ../cds/algo/elimination_tls.h +CMakeFiles/cds-s.dir/src/thread_data.cpp.o: ../cds/algo/int_algo.h +CMakeFiles/cds-s.dir/src/thread_data.cpp.o: ../cds/compiler/backoff.h +CMakeFiles/cds-s.dir/src/thread_data.cpp.o: ../cds/compiler/bitop.h +CMakeFiles/cds-s.dir/src/thread_data.cpp.o: ../cds/compiler/clang/defs.h +CMakeFiles/cds-s.dir/src/thread_data.cpp.o: ../cds/compiler/cxx11_atomic.h +CMakeFiles/cds-s.dir/src/thread_data.cpp.o: ../cds/compiler/defs.h +CMakeFiles/cds-s.dir/src/thread_data.cpp.o: ../cds/compiler/feature_tsan.h +CMakeFiles/cds-s.dir/src/thread_data.cpp.o: ../cds/compiler/gcc/amd64/backoff.h +CMakeFiles/cds-s.dir/src/thread_data.cpp.o: ../cds/compiler/gcc/amd64/bitop.h +CMakeFiles/cds-s.dir/src/thread_data.cpp.o: ../cds/compiler/gcc/amd64/cxx11_atomic.h +CMakeFiles/cds-s.dir/src/thread_data.cpp.o: ../cds/compiler/gcc/arm7/backoff.h +CMakeFiles/cds-s.dir/src/thread_data.cpp.o: ../cds/compiler/gcc/arm8/backoff.h +CMakeFiles/cds-s.dir/src/thread_data.cpp.o: ../cds/compiler/gcc/compiler_barriers.h +CMakeFiles/cds-s.dir/src/thread_data.cpp.o: ../cds/compiler/gcc/compiler_macro.h +CMakeFiles/cds-s.dir/src/thread_data.cpp.o: ../cds/compiler/gcc/defs.h +CMakeFiles/cds-s.dir/src/thread_data.cpp.o: ../cds/compiler/gcc/ia64/backoff.h +CMakeFiles/cds-s.dir/src/thread_data.cpp.o: ../cds/compiler/gcc/ia64/bitop.h +CMakeFiles/cds-s.dir/src/thread_data.cpp.o: ../cds/compiler/gcc/ia64/cxx11_atomic.h +CMakeFiles/cds-s.dir/src/thread_data.cpp.o: ../cds/compiler/gcc/ppc64/backoff.h +CMakeFiles/cds-s.dir/src/thread_data.cpp.o: ../cds/compiler/gcc/ppc64/bitop.h +CMakeFiles/cds-s.dir/src/thread_data.cpp.o: ../cds/compiler/gcc/sparc/backoff.h +CMakeFiles/cds-s.dir/src/thread_data.cpp.o: ../cds/compiler/gcc/sparc/bitop.h +CMakeFiles/cds-s.dir/src/thread_data.cpp.o: ../cds/compiler/gcc/sparc/cxx11_atomic.h +CMakeFiles/cds-s.dir/src/thread_data.cpp.o: ../cds/compiler/gcc/x86/backoff.h +CMakeFiles/cds-s.dir/src/thread_data.cpp.o: ../cds/compiler/gcc/x86/bitop.h +CMakeFiles/cds-s.dir/src/thread_data.cpp.o: ../cds/compiler/gcc/x86/cxx11_atomic.h +CMakeFiles/cds-s.dir/src/thread_data.cpp.o: ../cds/compiler/gcc/x86/cxx11_atomic32.h +CMakeFiles/cds-s.dir/src/thread_data.cpp.o: ../cds/compiler/icl/compiler_barriers.h +CMakeFiles/cds-s.dir/src/thread_data.cpp.o: ../cds/compiler/icl/defs.h +CMakeFiles/cds-s.dir/src/thread_data.cpp.o: ../cds/compiler/vc/amd64/backoff.h +CMakeFiles/cds-s.dir/src/thread_data.cpp.o: ../cds/compiler/vc/amd64/bitop.h +CMakeFiles/cds-s.dir/src/thread_data.cpp.o: ../cds/compiler/vc/amd64/cxx11_atomic.h +CMakeFiles/cds-s.dir/src/thread_data.cpp.o: ../cds/compiler/vc/compiler_barriers.h +CMakeFiles/cds-s.dir/src/thread_data.cpp.o: ../cds/compiler/vc/defs.h +CMakeFiles/cds-s.dir/src/thread_data.cpp.o: ../cds/compiler/vc/x86/backoff.h +CMakeFiles/cds-s.dir/src/thread_data.cpp.o: ../cds/compiler/vc/x86/bitop.h +CMakeFiles/cds-s.dir/src/thread_data.cpp.o: ../cds/compiler/vc/x86/cxx11_atomic.h +CMakeFiles/cds-s.dir/src/thread_data.cpp.o: ../cds/container/details/base.h +CMakeFiles/cds-s.dir/src/thread_data.cpp.o: ../cds/container/vyukov_mpmc_cycle_queue.h +CMakeFiles/cds-s.dir/src/thread_data.cpp.o: ../cds/details/aligned_type.h +CMakeFiles/cds-s.dir/src/thread_data.cpp.o: ../cds/details/allocator.h +CMakeFiles/cds-s.dir/src/thread_data.cpp.o: ../cds/details/bitop_generic.h +CMakeFiles/cds-s.dir/src/thread_data.cpp.o: ../cds/details/bounded_container.h +CMakeFiles/cds-s.dir/src/thread_data.cpp.o: ../cds/details/defs.h +CMakeFiles/cds-s.dir/src/thread_data.cpp.o: ../cds/details/is_aligned.h +CMakeFiles/cds-s.dir/src/thread_data.cpp.o: ../cds/details/lib.h +CMakeFiles/cds-s.dir/src/thread_data.cpp.o: ../cds/details/marked_ptr.h +CMakeFiles/cds-s.dir/src/thread_data.cpp.o: ../cds/details/static_functor.h +CMakeFiles/cds-s.dir/src/thread_data.cpp.o: ../cds/details/throw_exception.h +CMakeFiles/cds-s.dir/src/thread_data.cpp.o: ../cds/gc/details/hp_common.h +CMakeFiles/cds-s.dir/src/thread_data.cpp.o: ../cds/gc/details/retired_ptr.h +CMakeFiles/cds-s.dir/src/thread_data.cpp.o: ../cds/gc/dhp.h +CMakeFiles/cds-s.dir/src/thread_data.cpp.o: ../cds/gc/hp.h +CMakeFiles/cds-s.dir/src/thread_data.cpp.o: ../cds/init.h +CMakeFiles/cds-s.dir/src/thread_data.cpp.o: ../cds/intrusive/details/base.h +CMakeFiles/cds-s.dir/src/thread_data.cpp.o: ../cds/intrusive/details/node_traits.h +CMakeFiles/cds-s.dir/src/thread_data.cpp.o: ../cds/intrusive/free_list.h +CMakeFiles/cds-s.dir/src/thread_data.cpp.o: ../cds/intrusive/free_list_selector.h +CMakeFiles/cds-s.dir/src/thread_data.cpp.o: ../cds/intrusive/free_list_tagged.h +CMakeFiles/cds-s.dir/src/thread_data.cpp.o: ../cds/intrusive/options.h +CMakeFiles/cds-s.dir/src/thread_data.cpp.o: ../cds/opt/buffer.h +CMakeFiles/cds-s.dir/src/thread_data.cpp.o: ../cds/opt/options.h +CMakeFiles/cds-s.dir/src/thread_data.cpp.o: ../cds/opt/value_cleaner.h +CMakeFiles/cds-s.dir/src/thread_data.cpp.o: ../cds/os/aix/alloc_aligned.h +CMakeFiles/cds-s.dir/src/thread_data.cpp.o: ../cds/os/aix/topology.h +CMakeFiles/cds-s.dir/src/thread_data.cpp.o: ../cds/os/alloc_aligned.h +CMakeFiles/cds-s.dir/src/thread_data.cpp.o: ../cds/os/details/fake_topology.h +CMakeFiles/cds-s.dir/src/thread_data.cpp.o: ../cds/os/free_bsd/alloc_aligned.h +CMakeFiles/cds-s.dir/src/thread_data.cpp.o: ../cds/os/free_bsd/topology.h +CMakeFiles/cds-s.dir/src/thread_data.cpp.o: ../cds/os/hpux/alloc_aligned.h +CMakeFiles/cds-s.dir/src/thread_data.cpp.o: ../cds/os/hpux/topology.h +CMakeFiles/cds-s.dir/src/thread_data.cpp.o: ../cds/os/libc/alloc_aligned.h +CMakeFiles/cds-s.dir/src/thread_data.cpp.o: ../cds/os/linux/alloc_aligned.h +CMakeFiles/cds-s.dir/src/thread_data.cpp.o: ../cds/os/linux/topology.h +CMakeFiles/cds-s.dir/src/thread_data.cpp.o: ../cds/os/osx/topology.h +CMakeFiles/cds-s.dir/src/thread_data.cpp.o: ../cds/os/posix/alloc_aligned.h +CMakeFiles/cds-s.dir/src/thread_data.cpp.o: ../cds/os/posix/fake_topology.h +CMakeFiles/cds-s.dir/src/thread_data.cpp.o: ../cds/os/posix/thread.h +CMakeFiles/cds-s.dir/src/thread_data.cpp.o: ../cds/os/sunos/alloc_aligned.h +CMakeFiles/cds-s.dir/src/thread_data.cpp.o: ../cds/os/sunos/topology.h +CMakeFiles/cds-s.dir/src/thread_data.cpp.o: ../cds/os/thread.h +CMakeFiles/cds-s.dir/src/thread_data.cpp.o: ../cds/os/topology.h +CMakeFiles/cds-s.dir/src/thread_data.cpp.o: ../cds/os/win/alloc_aligned.h +CMakeFiles/cds-s.dir/src/thread_data.cpp.o: ../cds/os/win/thread.h +CMakeFiles/cds-s.dir/src/thread_data.cpp.o: ../cds/os/win/topology.h +CMakeFiles/cds-s.dir/src/thread_data.cpp.o: ../cds/threading/details/_common.h +CMakeFiles/cds-s.dir/src/thread_data.cpp.o: ../cds/threading/details/auto_detect.h +CMakeFiles/cds-s.dir/src/thread_data.cpp.o: ../cds/threading/details/cxx11.h +CMakeFiles/cds-s.dir/src/thread_data.cpp.o: ../cds/threading/details/cxx11_manager.h +CMakeFiles/cds-s.dir/src/thread_data.cpp.o: ../cds/threading/details/gcc.h +CMakeFiles/cds-s.dir/src/thread_data.cpp.o: ../cds/threading/details/gcc_manager.h +CMakeFiles/cds-s.dir/src/thread_data.cpp.o: ../cds/threading/details/msvc.h +CMakeFiles/cds-s.dir/src/thread_data.cpp.o: ../cds/threading/details/msvc_manager.h +CMakeFiles/cds-s.dir/src/thread_data.cpp.o: ../cds/threading/details/pthread.h +CMakeFiles/cds-s.dir/src/thread_data.cpp.o: ../cds/threading/details/pthread_manager.h +CMakeFiles/cds-s.dir/src/thread_data.cpp.o: ../cds/threading/details/wintls.h +CMakeFiles/cds-s.dir/src/thread_data.cpp.o: ../cds/threading/details/wintls_manager.h +CMakeFiles/cds-s.dir/src/thread_data.cpp.o: ../cds/threading/model.h +CMakeFiles/cds-s.dir/src/thread_data.cpp.o: ../cds/urcu/details/base.h +CMakeFiles/cds-s.dir/src/thread_data.cpp.o: ../cds/urcu/details/gp.h +CMakeFiles/cds-s.dir/src/thread_data.cpp.o: ../cds/urcu/details/gp_decl.h +CMakeFiles/cds-s.dir/src/thread_data.cpp.o: ../cds/urcu/details/gpb.h +CMakeFiles/cds-s.dir/src/thread_data.cpp.o: ../cds/urcu/details/sh_decl.h +CMakeFiles/cds-s.dir/src/thread_data.cpp.o: ../cds/urcu/general_buffered.h +CMakeFiles/cds-s.dir/src/thread_data.cpp.o: ../cds/user_setup/allocator.h +CMakeFiles/cds-s.dir/src/thread_data.cpp.o: ../cds/user_setup/cache_line.h +CMakeFiles/cds-s.dir/src/thread_data.cpp.o: ../cds/user_setup/threading.h +CMakeFiles/cds-s.dir/src/thread_data.cpp.o: ../cds/version.h +CMakeFiles/cds-s.dir/src/thread_data.cpp.o: ../src/thread_data.cpp + +CMakeFiles/cds-s.dir/src/topology_hpux.cpp.o: ../cds/algo/atomic.h +CMakeFiles/cds-s.dir/src/topology_hpux.cpp.o: ../cds/algo/backoff_strategy.h +CMakeFiles/cds-s.dir/src/topology_hpux.cpp.o: ../cds/algo/base.h +CMakeFiles/cds-s.dir/src/topology_hpux.cpp.o: ../cds/algo/bitop.h +CMakeFiles/cds-s.dir/src/topology_hpux.cpp.o: ../cds/algo/elimination_tls.h +CMakeFiles/cds-s.dir/src/topology_hpux.cpp.o: ../cds/algo/int_algo.h +CMakeFiles/cds-s.dir/src/topology_hpux.cpp.o: ../cds/compiler/backoff.h +CMakeFiles/cds-s.dir/src/topology_hpux.cpp.o: ../cds/compiler/bitop.h +CMakeFiles/cds-s.dir/src/topology_hpux.cpp.o: ../cds/compiler/clang/defs.h +CMakeFiles/cds-s.dir/src/topology_hpux.cpp.o: ../cds/compiler/cxx11_atomic.h +CMakeFiles/cds-s.dir/src/topology_hpux.cpp.o: ../cds/compiler/defs.h +CMakeFiles/cds-s.dir/src/topology_hpux.cpp.o: ../cds/compiler/feature_tsan.h +CMakeFiles/cds-s.dir/src/topology_hpux.cpp.o: ../cds/compiler/gcc/amd64/backoff.h +CMakeFiles/cds-s.dir/src/topology_hpux.cpp.o: ../cds/compiler/gcc/amd64/bitop.h +CMakeFiles/cds-s.dir/src/topology_hpux.cpp.o: ../cds/compiler/gcc/amd64/cxx11_atomic.h +CMakeFiles/cds-s.dir/src/topology_hpux.cpp.o: ../cds/compiler/gcc/arm7/backoff.h +CMakeFiles/cds-s.dir/src/topology_hpux.cpp.o: ../cds/compiler/gcc/arm8/backoff.h +CMakeFiles/cds-s.dir/src/topology_hpux.cpp.o: ../cds/compiler/gcc/compiler_barriers.h +CMakeFiles/cds-s.dir/src/topology_hpux.cpp.o: ../cds/compiler/gcc/compiler_macro.h +CMakeFiles/cds-s.dir/src/topology_hpux.cpp.o: ../cds/compiler/gcc/defs.h +CMakeFiles/cds-s.dir/src/topology_hpux.cpp.o: ../cds/compiler/gcc/ia64/backoff.h +CMakeFiles/cds-s.dir/src/topology_hpux.cpp.o: ../cds/compiler/gcc/ia64/bitop.h +CMakeFiles/cds-s.dir/src/topology_hpux.cpp.o: ../cds/compiler/gcc/ia64/cxx11_atomic.h +CMakeFiles/cds-s.dir/src/topology_hpux.cpp.o: ../cds/compiler/gcc/ppc64/backoff.h +CMakeFiles/cds-s.dir/src/topology_hpux.cpp.o: ../cds/compiler/gcc/ppc64/bitop.h +CMakeFiles/cds-s.dir/src/topology_hpux.cpp.o: ../cds/compiler/gcc/sparc/backoff.h +CMakeFiles/cds-s.dir/src/topology_hpux.cpp.o: ../cds/compiler/gcc/sparc/bitop.h +CMakeFiles/cds-s.dir/src/topology_hpux.cpp.o: ../cds/compiler/gcc/sparc/cxx11_atomic.h +CMakeFiles/cds-s.dir/src/topology_hpux.cpp.o: ../cds/compiler/gcc/x86/backoff.h +CMakeFiles/cds-s.dir/src/topology_hpux.cpp.o: ../cds/compiler/gcc/x86/bitop.h +CMakeFiles/cds-s.dir/src/topology_hpux.cpp.o: ../cds/compiler/gcc/x86/cxx11_atomic.h +CMakeFiles/cds-s.dir/src/topology_hpux.cpp.o: ../cds/compiler/gcc/x86/cxx11_atomic32.h +CMakeFiles/cds-s.dir/src/topology_hpux.cpp.o: ../cds/compiler/icl/compiler_barriers.h +CMakeFiles/cds-s.dir/src/topology_hpux.cpp.o: ../cds/compiler/icl/defs.h +CMakeFiles/cds-s.dir/src/topology_hpux.cpp.o: ../cds/compiler/vc/amd64/backoff.h +CMakeFiles/cds-s.dir/src/topology_hpux.cpp.o: ../cds/compiler/vc/amd64/bitop.h +CMakeFiles/cds-s.dir/src/topology_hpux.cpp.o: ../cds/compiler/vc/amd64/cxx11_atomic.h +CMakeFiles/cds-s.dir/src/topology_hpux.cpp.o: ../cds/compiler/vc/compiler_barriers.h +CMakeFiles/cds-s.dir/src/topology_hpux.cpp.o: ../cds/compiler/vc/defs.h +CMakeFiles/cds-s.dir/src/topology_hpux.cpp.o: ../cds/compiler/vc/x86/backoff.h +CMakeFiles/cds-s.dir/src/topology_hpux.cpp.o: ../cds/compiler/vc/x86/bitop.h +CMakeFiles/cds-s.dir/src/topology_hpux.cpp.o: ../cds/compiler/vc/x86/cxx11_atomic.h +CMakeFiles/cds-s.dir/src/topology_hpux.cpp.o: ../cds/container/details/base.h +CMakeFiles/cds-s.dir/src/topology_hpux.cpp.o: ../cds/container/vyukov_mpmc_cycle_queue.h +CMakeFiles/cds-s.dir/src/topology_hpux.cpp.o: ../cds/details/aligned_type.h +CMakeFiles/cds-s.dir/src/topology_hpux.cpp.o: ../cds/details/allocator.h +CMakeFiles/cds-s.dir/src/topology_hpux.cpp.o: ../cds/details/bitop_generic.h +CMakeFiles/cds-s.dir/src/topology_hpux.cpp.o: ../cds/details/bounded_container.h +CMakeFiles/cds-s.dir/src/topology_hpux.cpp.o: ../cds/details/defs.h +CMakeFiles/cds-s.dir/src/topology_hpux.cpp.o: ../cds/details/is_aligned.h +CMakeFiles/cds-s.dir/src/topology_hpux.cpp.o: ../cds/details/lib.h +CMakeFiles/cds-s.dir/src/topology_hpux.cpp.o: ../cds/details/marked_ptr.h +CMakeFiles/cds-s.dir/src/topology_hpux.cpp.o: ../cds/details/static_functor.h +CMakeFiles/cds-s.dir/src/topology_hpux.cpp.o: ../cds/details/throw_exception.h +CMakeFiles/cds-s.dir/src/topology_hpux.cpp.o: ../cds/gc/details/hp_common.h +CMakeFiles/cds-s.dir/src/topology_hpux.cpp.o: ../cds/gc/details/retired_ptr.h +CMakeFiles/cds-s.dir/src/topology_hpux.cpp.o: ../cds/gc/hp.h +CMakeFiles/cds-s.dir/src/topology_hpux.cpp.o: ../cds/init.h +CMakeFiles/cds-s.dir/src/topology_hpux.cpp.o: ../cds/intrusive/details/base.h +CMakeFiles/cds-s.dir/src/topology_hpux.cpp.o: ../cds/intrusive/details/node_traits.h +CMakeFiles/cds-s.dir/src/topology_hpux.cpp.o: ../cds/intrusive/options.h +CMakeFiles/cds-s.dir/src/topology_hpux.cpp.o: ../cds/opt/buffer.h +CMakeFiles/cds-s.dir/src/topology_hpux.cpp.o: ../cds/opt/options.h +CMakeFiles/cds-s.dir/src/topology_hpux.cpp.o: ../cds/opt/value_cleaner.h +CMakeFiles/cds-s.dir/src/topology_hpux.cpp.o: ../cds/os/aix/alloc_aligned.h +CMakeFiles/cds-s.dir/src/topology_hpux.cpp.o: ../cds/os/aix/topology.h +CMakeFiles/cds-s.dir/src/topology_hpux.cpp.o: ../cds/os/alloc_aligned.h +CMakeFiles/cds-s.dir/src/topology_hpux.cpp.o: ../cds/os/details/fake_topology.h +CMakeFiles/cds-s.dir/src/topology_hpux.cpp.o: ../cds/os/free_bsd/alloc_aligned.h +CMakeFiles/cds-s.dir/src/topology_hpux.cpp.o: ../cds/os/free_bsd/topology.h +CMakeFiles/cds-s.dir/src/topology_hpux.cpp.o: ../cds/os/hpux/alloc_aligned.h +CMakeFiles/cds-s.dir/src/topology_hpux.cpp.o: ../cds/os/hpux/topology.h +CMakeFiles/cds-s.dir/src/topology_hpux.cpp.o: ../cds/os/libc/alloc_aligned.h +CMakeFiles/cds-s.dir/src/topology_hpux.cpp.o: ../cds/os/linux/alloc_aligned.h +CMakeFiles/cds-s.dir/src/topology_hpux.cpp.o: ../cds/os/linux/topology.h +CMakeFiles/cds-s.dir/src/topology_hpux.cpp.o: ../cds/os/osx/topology.h +CMakeFiles/cds-s.dir/src/topology_hpux.cpp.o: ../cds/os/posix/alloc_aligned.h +CMakeFiles/cds-s.dir/src/topology_hpux.cpp.o: ../cds/os/posix/fake_topology.h +CMakeFiles/cds-s.dir/src/topology_hpux.cpp.o: ../cds/os/posix/thread.h +CMakeFiles/cds-s.dir/src/topology_hpux.cpp.o: ../cds/os/sunos/alloc_aligned.h +CMakeFiles/cds-s.dir/src/topology_hpux.cpp.o: ../cds/os/sunos/topology.h +CMakeFiles/cds-s.dir/src/topology_hpux.cpp.o: ../cds/os/thread.h +CMakeFiles/cds-s.dir/src/topology_hpux.cpp.o: ../cds/os/topology.h +CMakeFiles/cds-s.dir/src/topology_hpux.cpp.o: ../cds/os/win/alloc_aligned.h +CMakeFiles/cds-s.dir/src/topology_hpux.cpp.o: ../cds/os/win/thread.h +CMakeFiles/cds-s.dir/src/topology_hpux.cpp.o: ../cds/os/win/topology.h +CMakeFiles/cds-s.dir/src/topology_hpux.cpp.o: ../cds/threading/details/_common.h +CMakeFiles/cds-s.dir/src/topology_hpux.cpp.o: ../cds/threading/details/auto_detect.h +CMakeFiles/cds-s.dir/src/topology_hpux.cpp.o: ../cds/threading/details/cxx11.h +CMakeFiles/cds-s.dir/src/topology_hpux.cpp.o: ../cds/threading/details/cxx11_manager.h +CMakeFiles/cds-s.dir/src/topology_hpux.cpp.o: ../cds/threading/details/gcc.h +CMakeFiles/cds-s.dir/src/topology_hpux.cpp.o: ../cds/threading/details/gcc_manager.h +CMakeFiles/cds-s.dir/src/topology_hpux.cpp.o: ../cds/threading/details/msvc.h +CMakeFiles/cds-s.dir/src/topology_hpux.cpp.o: ../cds/threading/details/msvc_manager.h +CMakeFiles/cds-s.dir/src/topology_hpux.cpp.o: ../cds/threading/details/pthread.h +CMakeFiles/cds-s.dir/src/topology_hpux.cpp.o: ../cds/threading/details/pthread_manager.h +CMakeFiles/cds-s.dir/src/topology_hpux.cpp.o: ../cds/threading/details/wintls.h +CMakeFiles/cds-s.dir/src/topology_hpux.cpp.o: ../cds/threading/details/wintls_manager.h +CMakeFiles/cds-s.dir/src/topology_hpux.cpp.o: ../cds/threading/model.h +CMakeFiles/cds-s.dir/src/topology_hpux.cpp.o: ../cds/urcu/details/base.h +CMakeFiles/cds-s.dir/src/topology_hpux.cpp.o: ../cds/urcu/details/gp.h +CMakeFiles/cds-s.dir/src/topology_hpux.cpp.o: ../cds/urcu/details/gp_decl.h +CMakeFiles/cds-s.dir/src/topology_hpux.cpp.o: ../cds/urcu/details/gpb.h +CMakeFiles/cds-s.dir/src/topology_hpux.cpp.o: ../cds/urcu/details/sh_decl.h +CMakeFiles/cds-s.dir/src/topology_hpux.cpp.o: ../cds/urcu/general_buffered.h +CMakeFiles/cds-s.dir/src/topology_hpux.cpp.o: ../cds/user_setup/allocator.h +CMakeFiles/cds-s.dir/src/topology_hpux.cpp.o: ../cds/user_setup/cache_line.h +CMakeFiles/cds-s.dir/src/topology_hpux.cpp.o: ../cds/user_setup/threading.h +CMakeFiles/cds-s.dir/src/topology_hpux.cpp.o: ../cds/version.h +CMakeFiles/cds-s.dir/src/topology_hpux.cpp.o: ../src/topology_hpux.cpp + +CMakeFiles/cds-s.dir/src/topology_linux.cpp.o: ../cds/algo/atomic.h +CMakeFiles/cds-s.dir/src/topology_linux.cpp.o: ../cds/algo/backoff_strategy.h +CMakeFiles/cds-s.dir/src/topology_linux.cpp.o: ../cds/algo/base.h +CMakeFiles/cds-s.dir/src/topology_linux.cpp.o: ../cds/algo/bitop.h +CMakeFiles/cds-s.dir/src/topology_linux.cpp.o: ../cds/algo/elimination_tls.h +CMakeFiles/cds-s.dir/src/topology_linux.cpp.o: ../cds/algo/int_algo.h +CMakeFiles/cds-s.dir/src/topology_linux.cpp.o: ../cds/compiler/backoff.h +CMakeFiles/cds-s.dir/src/topology_linux.cpp.o: ../cds/compiler/bitop.h +CMakeFiles/cds-s.dir/src/topology_linux.cpp.o: ../cds/compiler/clang/defs.h +CMakeFiles/cds-s.dir/src/topology_linux.cpp.o: ../cds/compiler/cxx11_atomic.h +CMakeFiles/cds-s.dir/src/topology_linux.cpp.o: ../cds/compiler/defs.h +CMakeFiles/cds-s.dir/src/topology_linux.cpp.o: ../cds/compiler/feature_tsan.h +CMakeFiles/cds-s.dir/src/topology_linux.cpp.o: ../cds/compiler/gcc/amd64/backoff.h +CMakeFiles/cds-s.dir/src/topology_linux.cpp.o: ../cds/compiler/gcc/amd64/bitop.h +CMakeFiles/cds-s.dir/src/topology_linux.cpp.o: ../cds/compiler/gcc/amd64/cxx11_atomic.h +CMakeFiles/cds-s.dir/src/topology_linux.cpp.o: ../cds/compiler/gcc/arm7/backoff.h +CMakeFiles/cds-s.dir/src/topology_linux.cpp.o: ../cds/compiler/gcc/arm8/backoff.h +CMakeFiles/cds-s.dir/src/topology_linux.cpp.o: ../cds/compiler/gcc/compiler_barriers.h +CMakeFiles/cds-s.dir/src/topology_linux.cpp.o: ../cds/compiler/gcc/compiler_macro.h +CMakeFiles/cds-s.dir/src/topology_linux.cpp.o: ../cds/compiler/gcc/defs.h +CMakeFiles/cds-s.dir/src/topology_linux.cpp.o: ../cds/compiler/gcc/ia64/backoff.h +CMakeFiles/cds-s.dir/src/topology_linux.cpp.o: ../cds/compiler/gcc/ia64/bitop.h +CMakeFiles/cds-s.dir/src/topology_linux.cpp.o: ../cds/compiler/gcc/ia64/cxx11_atomic.h +CMakeFiles/cds-s.dir/src/topology_linux.cpp.o: ../cds/compiler/gcc/ppc64/backoff.h +CMakeFiles/cds-s.dir/src/topology_linux.cpp.o: ../cds/compiler/gcc/ppc64/bitop.h +CMakeFiles/cds-s.dir/src/topology_linux.cpp.o: ../cds/compiler/gcc/sparc/backoff.h +CMakeFiles/cds-s.dir/src/topology_linux.cpp.o: ../cds/compiler/gcc/sparc/bitop.h +CMakeFiles/cds-s.dir/src/topology_linux.cpp.o: ../cds/compiler/gcc/sparc/cxx11_atomic.h +CMakeFiles/cds-s.dir/src/topology_linux.cpp.o: ../cds/compiler/gcc/x86/backoff.h +CMakeFiles/cds-s.dir/src/topology_linux.cpp.o: ../cds/compiler/gcc/x86/bitop.h +CMakeFiles/cds-s.dir/src/topology_linux.cpp.o: ../cds/compiler/gcc/x86/cxx11_atomic.h +CMakeFiles/cds-s.dir/src/topology_linux.cpp.o: ../cds/compiler/gcc/x86/cxx11_atomic32.h +CMakeFiles/cds-s.dir/src/topology_linux.cpp.o: ../cds/compiler/icl/compiler_barriers.h +CMakeFiles/cds-s.dir/src/topology_linux.cpp.o: ../cds/compiler/icl/defs.h +CMakeFiles/cds-s.dir/src/topology_linux.cpp.o: ../cds/compiler/vc/amd64/backoff.h +CMakeFiles/cds-s.dir/src/topology_linux.cpp.o: ../cds/compiler/vc/amd64/bitop.h +CMakeFiles/cds-s.dir/src/topology_linux.cpp.o: ../cds/compiler/vc/amd64/cxx11_atomic.h +CMakeFiles/cds-s.dir/src/topology_linux.cpp.o: ../cds/compiler/vc/compiler_barriers.h +CMakeFiles/cds-s.dir/src/topology_linux.cpp.o: ../cds/compiler/vc/defs.h +CMakeFiles/cds-s.dir/src/topology_linux.cpp.o: ../cds/compiler/vc/x86/backoff.h +CMakeFiles/cds-s.dir/src/topology_linux.cpp.o: ../cds/compiler/vc/x86/bitop.h +CMakeFiles/cds-s.dir/src/topology_linux.cpp.o: ../cds/compiler/vc/x86/cxx11_atomic.h +CMakeFiles/cds-s.dir/src/topology_linux.cpp.o: ../cds/container/details/base.h +CMakeFiles/cds-s.dir/src/topology_linux.cpp.o: ../cds/container/vyukov_mpmc_cycle_queue.h +CMakeFiles/cds-s.dir/src/topology_linux.cpp.o: ../cds/details/aligned_type.h +CMakeFiles/cds-s.dir/src/topology_linux.cpp.o: ../cds/details/allocator.h +CMakeFiles/cds-s.dir/src/topology_linux.cpp.o: ../cds/details/bitop_generic.h +CMakeFiles/cds-s.dir/src/topology_linux.cpp.o: ../cds/details/bounded_container.h +CMakeFiles/cds-s.dir/src/topology_linux.cpp.o: ../cds/details/defs.h +CMakeFiles/cds-s.dir/src/topology_linux.cpp.o: ../cds/details/is_aligned.h +CMakeFiles/cds-s.dir/src/topology_linux.cpp.o: ../cds/details/lib.h +CMakeFiles/cds-s.dir/src/topology_linux.cpp.o: ../cds/details/marked_ptr.h +CMakeFiles/cds-s.dir/src/topology_linux.cpp.o: ../cds/details/static_functor.h +CMakeFiles/cds-s.dir/src/topology_linux.cpp.o: ../cds/details/throw_exception.h +CMakeFiles/cds-s.dir/src/topology_linux.cpp.o: ../cds/gc/details/hp_common.h +CMakeFiles/cds-s.dir/src/topology_linux.cpp.o: ../cds/gc/details/retired_ptr.h +CMakeFiles/cds-s.dir/src/topology_linux.cpp.o: ../cds/gc/hp.h +CMakeFiles/cds-s.dir/src/topology_linux.cpp.o: ../cds/init.h +CMakeFiles/cds-s.dir/src/topology_linux.cpp.o: ../cds/intrusive/details/base.h +CMakeFiles/cds-s.dir/src/topology_linux.cpp.o: ../cds/intrusive/details/node_traits.h +CMakeFiles/cds-s.dir/src/topology_linux.cpp.o: ../cds/intrusive/options.h +CMakeFiles/cds-s.dir/src/topology_linux.cpp.o: ../cds/opt/buffer.h +CMakeFiles/cds-s.dir/src/topology_linux.cpp.o: ../cds/opt/options.h +CMakeFiles/cds-s.dir/src/topology_linux.cpp.o: ../cds/opt/value_cleaner.h +CMakeFiles/cds-s.dir/src/topology_linux.cpp.o: ../cds/os/aix/alloc_aligned.h +CMakeFiles/cds-s.dir/src/topology_linux.cpp.o: ../cds/os/aix/topology.h +CMakeFiles/cds-s.dir/src/topology_linux.cpp.o: ../cds/os/alloc_aligned.h +CMakeFiles/cds-s.dir/src/topology_linux.cpp.o: ../cds/os/details/fake_topology.h +CMakeFiles/cds-s.dir/src/topology_linux.cpp.o: ../cds/os/free_bsd/alloc_aligned.h +CMakeFiles/cds-s.dir/src/topology_linux.cpp.o: ../cds/os/free_bsd/topology.h +CMakeFiles/cds-s.dir/src/topology_linux.cpp.o: ../cds/os/hpux/alloc_aligned.h +CMakeFiles/cds-s.dir/src/topology_linux.cpp.o: ../cds/os/hpux/topology.h +CMakeFiles/cds-s.dir/src/topology_linux.cpp.o: ../cds/os/libc/alloc_aligned.h +CMakeFiles/cds-s.dir/src/topology_linux.cpp.o: ../cds/os/linux/alloc_aligned.h +CMakeFiles/cds-s.dir/src/topology_linux.cpp.o: ../cds/os/linux/topology.h +CMakeFiles/cds-s.dir/src/topology_linux.cpp.o: ../cds/os/osx/topology.h +CMakeFiles/cds-s.dir/src/topology_linux.cpp.o: ../cds/os/posix/alloc_aligned.h +CMakeFiles/cds-s.dir/src/topology_linux.cpp.o: ../cds/os/posix/fake_topology.h +CMakeFiles/cds-s.dir/src/topology_linux.cpp.o: ../cds/os/posix/thread.h +CMakeFiles/cds-s.dir/src/topology_linux.cpp.o: ../cds/os/sunos/alloc_aligned.h +CMakeFiles/cds-s.dir/src/topology_linux.cpp.o: ../cds/os/sunos/topology.h +CMakeFiles/cds-s.dir/src/topology_linux.cpp.o: ../cds/os/thread.h +CMakeFiles/cds-s.dir/src/topology_linux.cpp.o: ../cds/os/topology.h +CMakeFiles/cds-s.dir/src/topology_linux.cpp.o: ../cds/os/win/alloc_aligned.h +CMakeFiles/cds-s.dir/src/topology_linux.cpp.o: ../cds/os/win/thread.h +CMakeFiles/cds-s.dir/src/topology_linux.cpp.o: ../cds/os/win/topology.h +CMakeFiles/cds-s.dir/src/topology_linux.cpp.o: ../cds/threading/details/_common.h +CMakeFiles/cds-s.dir/src/topology_linux.cpp.o: ../cds/threading/details/auto_detect.h +CMakeFiles/cds-s.dir/src/topology_linux.cpp.o: ../cds/threading/details/cxx11.h +CMakeFiles/cds-s.dir/src/topology_linux.cpp.o: ../cds/threading/details/cxx11_manager.h +CMakeFiles/cds-s.dir/src/topology_linux.cpp.o: ../cds/threading/details/gcc.h +CMakeFiles/cds-s.dir/src/topology_linux.cpp.o: ../cds/threading/details/gcc_manager.h +CMakeFiles/cds-s.dir/src/topology_linux.cpp.o: ../cds/threading/details/msvc.h +CMakeFiles/cds-s.dir/src/topology_linux.cpp.o: ../cds/threading/details/msvc_manager.h +CMakeFiles/cds-s.dir/src/topology_linux.cpp.o: ../cds/threading/details/pthread.h +CMakeFiles/cds-s.dir/src/topology_linux.cpp.o: ../cds/threading/details/pthread_manager.h +CMakeFiles/cds-s.dir/src/topology_linux.cpp.o: ../cds/threading/details/wintls.h +CMakeFiles/cds-s.dir/src/topology_linux.cpp.o: ../cds/threading/details/wintls_manager.h +CMakeFiles/cds-s.dir/src/topology_linux.cpp.o: ../cds/threading/model.h +CMakeFiles/cds-s.dir/src/topology_linux.cpp.o: ../cds/urcu/details/base.h +CMakeFiles/cds-s.dir/src/topology_linux.cpp.o: ../cds/urcu/details/gp.h +CMakeFiles/cds-s.dir/src/topology_linux.cpp.o: ../cds/urcu/details/gp_decl.h +CMakeFiles/cds-s.dir/src/topology_linux.cpp.o: ../cds/urcu/details/gpb.h +CMakeFiles/cds-s.dir/src/topology_linux.cpp.o: ../cds/urcu/details/sh_decl.h +CMakeFiles/cds-s.dir/src/topology_linux.cpp.o: ../cds/urcu/general_buffered.h +CMakeFiles/cds-s.dir/src/topology_linux.cpp.o: ../cds/user_setup/allocator.h +CMakeFiles/cds-s.dir/src/topology_linux.cpp.o: ../cds/user_setup/cache_line.h +CMakeFiles/cds-s.dir/src/topology_linux.cpp.o: ../cds/user_setup/threading.h +CMakeFiles/cds-s.dir/src/topology_linux.cpp.o: ../cds/version.h +CMakeFiles/cds-s.dir/src/topology_linux.cpp.o: ../src/topology_linux.cpp + +CMakeFiles/cds-s.dir/src/topology_osx.cpp.o: ../cds/algo/atomic.h +CMakeFiles/cds-s.dir/src/topology_osx.cpp.o: ../cds/algo/backoff_strategy.h +CMakeFiles/cds-s.dir/src/topology_osx.cpp.o: ../cds/algo/base.h +CMakeFiles/cds-s.dir/src/topology_osx.cpp.o: ../cds/algo/bitop.h +CMakeFiles/cds-s.dir/src/topology_osx.cpp.o: ../cds/algo/elimination_tls.h +CMakeFiles/cds-s.dir/src/topology_osx.cpp.o: ../cds/algo/int_algo.h +CMakeFiles/cds-s.dir/src/topology_osx.cpp.o: ../cds/compiler/backoff.h +CMakeFiles/cds-s.dir/src/topology_osx.cpp.o: ../cds/compiler/bitop.h +CMakeFiles/cds-s.dir/src/topology_osx.cpp.o: ../cds/compiler/clang/defs.h +CMakeFiles/cds-s.dir/src/topology_osx.cpp.o: ../cds/compiler/cxx11_atomic.h +CMakeFiles/cds-s.dir/src/topology_osx.cpp.o: ../cds/compiler/defs.h +CMakeFiles/cds-s.dir/src/topology_osx.cpp.o: ../cds/compiler/feature_tsan.h +CMakeFiles/cds-s.dir/src/topology_osx.cpp.o: ../cds/compiler/gcc/amd64/backoff.h +CMakeFiles/cds-s.dir/src/topology_osx.cpp.o: ../cds/compiler/gcc/amd64/bitop.h +CMakeFiles/cds-s.dir/src/topology_osx.cpp.o: ../cds/compiler/gcc/amd64/cxx11_atomic.h +CMakeFiles/cds-s.dir/src/topology_osx.cpp.o: ../cds/compiler/gcc/arm7/backoff.h +CMakeFiles/cds-s.dir/src/topology_osx.cpp.o: ../cds/compiler/gcc/arm8/backoff.h +CMakeFiles/cds-s.dir/src/topology_osx.cpp.o: ../cds/compiler/gcc/compiler_barriers.h +CMakeFiles/cds-s.dir/src/topology_osx.cpp.o: ../cds/compiler/gcc/compiler_macro.h +CMakeFiles/cds-s.dir/src/topology_osx.cpp.o: ../cds/compiler/gcc/defs.h +CMakeFiles/cds-s.dir/src/topology_osx.cpp.o: ../cds/compiler/gcc/ia64/backoff.h +CMakeFiles/cds-s.dir/src/topology_osx.cpp.o: ../cds/compiler/gcc/ia64/bitop.h +CMakeFiles/cds-s.dir/src/topology_osx.cpp.o: ../cds/compiler/gcc/ia64/cxx11_atomic.h +CMakeFiles/cds-s.dir/src/topology_osx.cpp.o: ../cds/compiler/gcc/ppc64/backoff.h +CMakeFiles/cds-s.dir/src/topology_osx.cpp.o: ../cds/compiler/gcc/ppc64/bitop.h +CMakeFiles/cds-s.dir/src/topology_osx.cpp.o: ../cds/compiler/gcc/sparc/backoff.h +CMakeFiles/cds-s.dir/src/topology_osx.cpp.o: ../cds/compiler/gcc/sparc/bitop.h +CMakeFiles/cds-s.dir/src/topology_osx.cpp.o: ../cds/compiler/gcc/sparc/cxx11_atomic.h +CMakeFiles/cds-s.dir/src/topology_osx.cpp.o: ../cds/compiler/gcc/x86/backoff.h +CMakeFiles/cds-s.dir/src/topology_osx.cpp.o: ../cds/compiler/gcc/x86/bitop.h +CMakeFiles/cds-s.dir/src/topology_osx.cpp.o: ../cds/compiler/gcc/x86/cxx11_atomic.h +CMakeFiles/cds-s.dir/src/topology_osx.cpp.o: ../cds/compiler/gcc/x86/cxx11_atomic32.h +CMakeFiles/cds-s.dir/src/topology_osx.cpp.o: ../cds/compiler/icl/compiler_barriers.h +CMakeFiles/cds-s.dir/src/topology_osx.cpp.o: ../cds/compiler/icl/defs.h +CMakeFiles/cds-s.dir/src/topology_osx.cpp.o: ../cds/compiler/vc/amd64/backoff.h +CMakeFiles/cds-s.dir/src/topology_osx.cpp.o: ../cds/compiler/vc/amd64/bitop.h +CMakeFiles/cds-s.dir/src/topology_osx.cpp.o: ../cds/compiler/vc/amd64/cxx11_atomic.h +CMakeFiles/cds-s.dir/src/topology_osx.cpp.o: ../cds/compiler/vc/compiler_barriers.h +CMakeFiles/cds-s.dir/src/topology_osx.cpp.o: ../cds/compiler/vc/defs.h +CMakeFiles/cds-s.dir/src/topology_osx.cpp.o: ../cds/compiler/vc/x86/backoff.h +CMakeFiles/cds-s.dir/src/topology_osx.cpp.o: ../cds/compiler/vc/x86/bitop.h +CMakeFiles/cds-s.dir/src/topology_osx.cpp.o: ../cds/compiler/vc/x86/cxx11_atomic.h +CMakeFiles/cds-s.dir/src/topology_osx.cpp.o: ../cds/container/details/base.h +CMakeFiles/cds-s.dir/src/topology_osx.cpp.o: ../cds/container/vyukov_mpmc_cycle_queue.h +CMakeFiles/cds-s.dir/src/topology_osx.cpp.o: ../cds/details/aligned_type.h +CMakeFiles/cds-s.dir/src/topology_osx.cpp.o: ../cds/details/allocator.h +CMakeFiles/cds-s.dir/src/topology_osx.cpp.o: ../cds/details/bitop_generic.h +CMakeFiles/cds-s.dir/src/topology_osx.cpp.o: ../cds/details/bounded_container.h +CMakeFiles/cds-s.dir/src/topology_osx.cpp.o: ../cds/details/defs.h +CMakeFiles/cds-s.dir/src/topology_osx.cpp.o: ../cds/details/is_aligned.h +CMakeFiles/cds-s.dir/src/topology_osx.cpp.o: ../cds/details/lib.h +CMakeFiles/cds-s.dir/src/topology_osx.cpp.o: ../cds/details/marked_ptr.h +CMakeFiles/cds-s.dir/src/topology_osx.cpp.o: ../cds/details/static_functor.h +CMakeFiles/cds-s.dir/src/topology_osx.cpp.o: ../cds/details/throw_exception.h +CMakeFiles/cds-s.dir/src/topology_osx.cpp.o: ../cds/gc/details/hp_common.h +CMakeFiles/cds-s.dir/src/topology_osx.cpp.o: ../cds/gc/details/retired_ptr.h +CMakeFiles/cds-s.dir/src/topology_osx.cpp.o: ../cds/gc/hp.h +CMakeFiles/cds-s.dir/src/topology_osx.cpp.o: ../cds/init.h +CMakeFiles/cds-s.dir/src/topology_osx.cpp.o: ../cds/intrusive/details/base.h +CMakeFiles/cds-s.dir/src/topology_osx.cpp.o: ../cds/intrusive/details/node_traits.h +CMakeFiles/cds-s.dir/src/topology_osx.cpp.o: ../cds/intrusive/options.h +CMakeFiles/cds-s.dir/src/topology_osx.cpp.o: ../cds/opt/buffer.h +CMakeFiles/cds-s.dir/src/topology_osx.cpp.o: ../cds/opt/options.h +CMakeFiles/cds-s.dir/src/topology_osx.cpp.o: ../cds/opt/value_cleaner.h +CMakeFiles/cds-s.dir/src/topology_osx.cpp.o: ../cds/os/aix/alloc_aligned.h +CMakeFiles/cds-s.dir/src/topology_osx.cpp.o: ../cds/os/aix/topology.h +CMakeFiles/cds-s.dir/src/topology_osx.cpp.o: ../cds/os/alloc_aligned.h +CMakeFiles/cds-s.dir/src/topology_osx.cpp.o: ../cds/os/details/fake_topology.h +CMakeFiles/cds-s.dir/src/topology_osx.cpp.o: ../cds/os/free_bsd/alloc_aligned.h +CMakeFiles/cds-s.dir/src/topology_osx.cpp.o: ../cds/os/free_bsd/topology.h +CMakeFiles/cds-s.dir/src/topology_osx.cpp.o: ../cds/os/hpux/alloc_aligned.h +CMakeFiles/cds-s.dir/src/topology_osx.cpp.o: ../cds/os/hpux/topology.h +CMakeFiles/cds-s.dir/src/topology_osx.cpp.o: ../cds/os/libc/alloc_aligned.h +CMakeFiles/cds-s.dir/src/topology_osx.cpp.o: ../cds/os/linux/alloc_aligned.h +CMakeFiles/cds-s.dir/src/topology_osx.cpp.o: ../cds/os/linux/topology.h +CMakeFiles/cds-s.dir/src/topology_osx.cpp.o: ../cds/os/osx/topology.h +CMakeFiles/cds-s.dir/src/topology_osx.cpp.o: ../cds/os/posix/alloc_aligned.h +CMakeFiles/cds-s.dir/src/topology_osx.cpp.o: ../cds/os/posix/fake_topology.h +CMakeFiles/cds-s.dir/src/topology_osx.cpp.o: ../cds/os/posix/thread.h +CMakeFiles/cds-s.dir/src/topology_osx.cpp.o: ../cds/os/sunos/alloc_aligned.h +CMakeFiles/cds-s.dir/src/topology_osx.cpp.o: ../cds/os/sunos/topology.h +CMakeFiles/cds-s.dir/src/topology_osx.cpp.o: ../cds/os/thread.h +CMakeFiles/cds-s.dir/src/topology_osx.cpp.o: ../cds/os/topology.h +CMakeFiles/cds-s.dir/src/topology_osx.cpp.o: ../cds/os/win/alloc_aligned.h +CMakeFiles/cds-s.dir/src/topology_osx.cpp.o: ../cds/os/win/thread.h +CMakeFiles/cds-s.dir/src/topology_osx.cpp.o: ../cds/os/win/topology.h +CMakeFiles/cds-s.dir/src/topology_osx.cpp.o: ../cds/threading/details/_common.h +CMakeFiles/cds-s.dir/src/topology_osx.cpp.o: ../cds/threading/details/auto_detect.h +CMakeFiles/cds-s.dir/src/topology_osx.cpp.o: ../cds/threading/details/cxx11.h +CMakeFiles/cds-s.dir/src/topology_osx.cpp.o: ../cds/threading/details/cxx11_manager.h +CMakeFiles/cds-s.dir/src/topology_osx.cpp.o: ../cds/threading/details/gcc.h +CMakeFiles/cds-s.dir/src/topology_osx.cpp.o: ../cds/threading/details/gcc_manager.h +CMakeFiles/cds-s.dir/src/topology_osx.cpp.o: ../cds/threading/details/msvc.h +CMakeFiles/cds-s.dir/src/topology_osx.cpp.o: ../cds/threading/details/msvc_manager.h +CMakeFiles/cds-s.dir/src/topology_osx.cpp.o: ../cds/threading/details/pthread.h +CMakeFiles/cds-s.dir/src/topology_osx.cpp.o: ../cds/threading/details/pthread_manager.h +CMakeFiles/cds-s.dir/src/topology_osx.cpp.o: ../cds/threading/details/wintls.h +CMakeFiles/cds-s.dir/src/topology_osx.cpp.o: ../cds/threading/details/wintls_manager.h +CMakeFiles/cds-s.dir/src/topology_osx.cpp.o: ../cds/threading/model.h +CMakeFiles/cds-s.dir/src/topology_osx.cpp.o: ../cds/urcu/details/base.h +CMakeFiles/cds-s.dir/src/topology_osx.cpp.o: ../cds/urcu/details/gp.h +CMakeFiles/cds-s.dir/src/topology_osx.cpp.o: ../cds/urcu/details/gp_decl.h +CMakeFiles/cds-s.dir/src/topology_osx.cpp.o: ../cds/urcu/details/gpb.h +CMakeFiles/cds-s.dir/src/topology_osx.cpp.o: ../cds/urcu/details/sh_decl.h +CMakeFiles/cds-s.dir/src/topology_osx.cpp.o: ../cds/urcu/general_buffered.h +CMakeFiles/cds-s.dir/src/topology_osx.cpp.o: ../cds/user_setup/allocator.h +CMakeFiles/cds-s.dir/src/topology_osx.cpp.o: ../cds/user_setup/cache_line.h +CMakeFiles/cds-s.dir/src/topology_osx.cpp.o: ../cds/user_setup/threading.h +CMakeFiles/cds-s.dir/src/topology_osx.cpp.o: ../cds/version.h +CMakeFiles/cds-s.dir/src/topology_osx.cpp.o: ../src/topology_osx.cpp + +CMakeFiles/cds-s.dir/src/urcu_gp.cpp.o: ../cds/algo/atomic.h +CMakeFiles/cds-s.dir/src/urcu_gp.cpp.o: ../cds/algo/backoff_strategy.h +CMakeFiles/cds-s.dir/src/urcu_gp.cpp.o: ../cds/algo/base.h +CMakeFiles/cds-s.dir/src/urcu_gp.cpp.o: ../cds/algo/bitop.h +CMakeFiles/cds-s.dir/src/urcu_gp.cpp.o: ../cds/algo/elimination_tls.h +CMakeFiles/cds-s.dir/src/urcu_gp.cpp.o: ../cds/algo/int_algo.h +CMakeFiles/cds-s.dir/src/urcu_gp.cpp.o: ../cds/compiler/backoff.h +CMakeFiles/cds-s.dir/src/urcu_gp.cpp.o: ../cds/compiler/bitop.h +CMakeFiles/cds-s.dir/src/urcu_gp.cpp.o: ../cds/compiler/clang/defs.h +CMakeFiles/cds-s.dir/src/urcu_gp.cpp.o: ../cds/compiler/cxx11_atomic.h +CMakeFiles/cds-s.dir/src/urcu_gp.cpp.o: ../cds/compiler/defs.h +CMakeFiles/cds-s.dir/src/urcu_gp.cpp.o: ../cds/compiler/feature_tsan.h +CMakeFiles/cds-s.dir/src/urcu_gp.cpp.o: ../cds/compiler/gcc/amd64/backoff.h +CMakeFiles/cds-s.dir/src/urcu_gp.cpp.o: ../cds/compiler/gcc/amd64/bitop.h +CMakeFiles/cds-s.dir/src/urcu_gp.cpp.o: ../cds/compiler/gcc/amd64/cxx11_atomic.h +CMakeFiles/cds-s.dir/src/urcu_gp.cpp.o: ../cds/compiler/gcc/arm7/backoff.h +CMakeFiles/cds-s.dir/src/urcu_gp.cpp.o: ../cds/compiler/gcc/arm8/backoff.h +CMakeFiles/cds-s.dir/src/urcu_gp.cpp.o: ../cds/compiler/gcc/compiler_barriers.h +CMakeFiles/cds-s.dir/src/urcu_gp.cpp.o: ../cds/compiler/gcc/compiler_macro.h +CMakeFiles/cds-s.dir/src/urcu_gp.cpp.o: ../cds/compiler/gcc/defs.h +CMakeFiles/cds-s.dir/src/urcu_gp.cpp.o: ../cds/compiler/gcc/ia64/backoff.h +CMakeFiles/cds-s.dir/src/urcu_gp.cpp.o: ../cds/compiler/gcc/ia64/bitop.h +CMakeFiles/cds-s.dir/src/urcu_gp.cpp.o: ../cds/compiler/gcc/ia64/cxx11_atomic.h +CMakeFiles/cds-s.dir/src/urcu_gp.cpp.o: ../cds/compiler/gcc/ppc64/backoff.h +CMakeFiles/cds-s.dir/src/urcu_gp.cpp.o: ../cds/compiler/gcc/ppc64/bitop.h +CMakeFiles/cds-s.dir/src/urcu_gp.cpp.o: ../cds/compiler/gcc/sparc/backoff.h +CMakeFiles/cds-s.dir/src/urcu_gp.cpp.o: ../cds/compiler/gcc/sparc/bitop.h +CMakeFiles/cds-s.dir/src/urcu_gp.cpp.o: ../cds/compiler/gcc/sparc/cxx11_atomic.h +CMakeFiles/cds-s.dir/src/urcu_gp.cpp.o: ../cds/compiler/gcc/x86/backoff.h +CMakeFiles/cds-s.dir/src/urcu_gp.cpp.o: ../cds/compiler/gcc/x86/bitop.h +CMakeFiles/cds-s.dir/src/urcu_gp.cpp.o: ../cds/compiler/gcc/x86/cxx11_atomic.h +CMakeFiles/cds-s.dir/src/urcu_gp.cpp.o: ../cds/compiler/gcc/x86/cxx11_atomic32.h +CMakeFiles/cds-s.dir/src/urcu_gp.cpp.o: ../cds/compiler/icl/compiler_barriers.h +CMakeFiles/cds-s.dir/src/urcu_gp.cpp.o: ../cds/compiler/icl/defs.h +CMakeFiles/cds-s.dir/src/urcu_gp.cpp.o: ../cds/compiler/vc/amd64/backoff.h +CMakeFiles/cds-s.dir/src/urcu_gp.cpp.o: ../cds/compiler/vc/amd64/bitop.h +CMakeFiles/cds-s.dir/src/urcu_gp.cpp.o: ../cds/compiler/vc/amd64/cxx11_atomic.h +CMakeFiles/cds-s.dir/src/urcu_gp.cpp.o: ../cds/compiler/vc/compiler_barriers.h +CMakeFiles/cds-s.dir/src/urcu_gp.cpp.o: ../cds/compiler/vc/defs.h +CMakeFiles/cds-s.dir/src/urcu_gp.cpp.o: ../cds/compiler/vc/x86/backoff.h +CMakeFiles/cds-s.dir/src/urcu_gp.cpp.o: ../cds/compiler/vc/x86/bitop.h +CMakeFiles/cds-s.dir/src/urcu_gp.cpp.o: ../cds/compiler/vc/x86/cxx11_atomic.h +CMakeFiles/cds-s.dir/src/urcu_gp.cpp.o: ../cds/container/details/base.h +CMakeFiles/cds-s.dir/src/urcu_gp.cpp.o: ../cds/container/vyukov_mpmc_cycle_queue.h +CMakeFiles/cds-s.dir/src/urcu_gp.cpp.o: ../cds/details/aligned_type.h +CMakeFiles/cds-s.dir/src/urcu_gp.cpp.o: ../cds/details/allocator.h +CMakeFiles/cds-s.dir/src/urcu_gp.cpp.o: ../cds/details/bitop_generic.h +CMakeFiles/cds-s.dir/src/urcu_gp.cpp.o: ../cds/details/bounded_container.h +CMakeFiles/cds-s.dir/src/urcu_gp.cpp.o: ../cds/details/defs.h +CMakeFiles/cds-s.dir/src/urcu_gp.cpp.o: ../cds/details/is_aligned.h +CMakeFiles/cds-s.dir/src/urcu_gp.cpp.o: ../cds/details/lib.h +CMakeFiles/cds-s.dir/src/urcu_gp.cpp.o: ../cds/details/marked_ptr.h +CMakeFiles/cds-s.dir/src/urcu_gp.cpp.o: ../cds/details/static_functor.h +CMakeFiles/cds-s.dir/src/urcu_gp.cpp.o: ../cds/details/throw_exception.h +CMakeFiles/cds-s.dir/src/urcu_gp.cpp.o: ../cds/gc/details/hp_common.h +CMakeFiles/cds-s.dir/src/urcu_gp.cpp.o: ../cds/gc/details/retired_ptr.h +CMakeFiles/cds-s.dir/src/urcu_gp.cpp.o: ../cds/gc/hp.h +CMakeFiles/cds-s.dir/src/urcu_gp.cpp.o: ../cds/init.h +CMakeFiles/cds-s.dir/src/urcu_gp.cpp.o: ../cds/intrusive/details/base.h +CMakeFiles/cds-s.dir/src/urcu_gp.cpp.o: ../cds/intrusive/details/node_traits.h +CMakeFiles/cds-s.dir/src/urcu_gp.cpp.o: ../cds/intrusive/options.h +CMakeFiles/cds-s.dir/src/urcu_gp.cpp.o: ../cds/opt/buffer.h +CMakeFiles/cds-s.dir/src/urcu_gp.cpp.o: ../cds/opt/options.h +CMakeFiles/cds-s.dir/src/urcu_gp.cpp.o: ../cds/opt/value_cleaner.h +CMakeFiles/cds-s.dir/src/urcu_gp.cpp.o: ../cds/os/aix/alloc_aligned.h +CMakeFiles/cds-s.dir/src/urcu_gp.cpp.o: ../cds/os/aix/topology.h +CMakeFiles/cds-s.dir/src/urcu_gp.cpp.o: ../cds/os/alloc_aligned.h +CMakeFiles/cds-s.dir/src/urcu_gp.cpp.o: ../cds/os/details/fake_topology.h +CMakeFiles/cds-s.dir/src/urcu_gp.cpp.o: ../cds/os/free_bsd/alloc_aligned.h +CMakeFiles/cds-s.dir/src/urcu_gp.cpp.o: ../cds/os/free_bsd/topology.h +CMakeFiles/cds-s.dir/src/urcu_gp.cpp.o: ../cds/os/hpux/alloc_aligned.h +CMakeFiles/cds-s.dir/src/urcu_gp.cpp.o: ../cds/os/hpux/topology.h +CMakeFiles/cds-s.dir/src/urcu_gp.cpp.o: ../cds/os/libc/alloc_aligned.h +CMakeFiles/cds-s.dir/src/urcu_gp.cpp.o: ../cds/os/linux/alloc_aligned.h +CMakeFiles/cds-s.dir/src/urcu_gp.cpp.o: ../cds/os/linux/topology.h +CMakeFiles/cds-s.dir/src/urcu_gp.cpp.o: ../cds/os/osx/topology.h +CMakeFiles/cds-s.dir/src/urcu_gp.cpp.o: ../cds/os/posix/alloc_aligned.h +CMakeFiles/cds-s.dir/src/urcu_gp.cpp.o: ../cds/os/posix/fake_topology.h +CMakeFiles/cds-s.dir/src/urcu_gp.cpp.o: ../cds/os/posix/thread.h +CMakeFiles/cds-s.dir/src/urcu_gp.cpp.o: ../cds/os/sunos/alloc_aligned.h +CMakeFiles/cds-s.dir/src/urcu_gp.cpp.o: ../cds/os/sunos/topology.h +CMakeFiles/cds-s.dir/src/urcu_gp.cpp.o: ../cds/os/thread.h +CMakeFiles/cds-s.dir/src/urcu_gp.cpp.o: ../cds/os/topology.h +CMakeFiles/cds-s.dir/src/urcu_gp.cpp.o: ../cds/os/win/alloc_aligned.h +CMakeFiles/cds-s.dir/src/urcu_gp.cpp.o: ../cds/os/win/thread.h +CMakeFiles/cds-s.dir/src/urcu_gp.cpp.o: ../cds/os/win/topology.h +CMakeFiles/cds-s.dir/src/urcu_gp.cpp.o: ../cds/threading/details/_common.h +CMakeFiles/cds-s.dir/src/urcu_gp.cpp.o: ../cds/threading/details/auto_detect.h +CMakeFiles/cds-s.dir/src/urcu_gp.cpp.o: ../cds/threading/details/cxx11.h +CMakeFiles/cds-s.dir/src/urcu_gp.cpp.o: ../cds/threading/details/cxx11_manager.h +CMakeFiles/cds-s.dir/src/urcu_gp.cpp.o: ../cds/threading/details/gcc.h +CMakeFiles/cds-s.dir/src/urcu_gp.cpp.o: ../cds/threading/details/gcc_manager.h +CMakeFiles/cds-s.dir/src/urcu_gp.cpp.o: ../cds/threading/details/msvc.h +CMakeFiles/cds-s.dir/src/urcu_gp.cpp.o: ../cds/threading/details/msvc_manager.h +CMakeFiles/cds-s.dir/src/urcu_gp.cpp.o: ../cds/threading/details/pthread.h +CMakeFiles/cds-s.dir/src/urcu_gp.cpp.o: ../cds/threading/details/pthread_manager.h +CMakeFiles/cds-s.dir/src/urcu_gp.cpp.o: ../cds/threading/details/wintls.h +CMakeFiles/cds-s.dir/src/urcu_gp.cpp.o: ../cds/threading/details/wintls_manager.h +CMakeFiles/cds-s.dir/src/urcu_gp.cpp.o: ../cds/threading/model.h +CMakeFiles/cds-s.dir/src/urcu_gp.cpp.o: ../cds/urcu/details/base.h +CMakeFiles/cds-s.dir/src/urcu_gp.cpp.o: ../cds/urcu/details/gp.h +CMakeFiles/cds-s.dir/src/urcu_gp.cpp.o: ../cds/urcu/details/gp_decl.h +CMakeFiles/cds-s.dir/src/urcu_gp.cpp.o: ../cds/urcu/details/gpb.h +CMakeFiles/cds-s.dir/src/urcu_gp.cpp.o: ../cds/urcu/details/sh_decl.h +CMakeFiles/cds-s.dir/src/urcu_gp.cpp.o: ../cds/urcu/general_buffered.h +CMakeFiles/cds-s.dir/src/urcu_gp.cpp.o: ../cds/user_setup/allocator.h +CMakeFiles/cds-s.dir/src/urcu_gp.cpp.o: ../cds/user_setup/cache_line.h +CMakeFiles/cds-s.dir/src/urcu_gp.cpp.o: ../cds/user_setup/threading.h +CMakeFiles/cds-s.dir/src/urcu_gp.cpp.o: ../cds/version.h +CMakeFiles/cds-s.dir/src/urcu_gp.cpp.o: ../src/urcu_gp.cpp + +CMakeFiles/cds-s.dir/src/urcu_sh.cpp.o: ../cds/algo/atomic.h +CMakeFiles/cds-s.dir/src/urcu_sh.cpp.o: ../cds/algo/backoff_strategy.h +CMakeFiles/cds-s.dir/src/urcu_sh.cpp.o: ../cds/algo/base.h +CMakeFiles/cds-s.dir/src/urcu_sh.cpp.o: ../cds/algo/bitop.h +CMakeFiles/cds-s.dir/src/urcu_sh.cpp.o: ../cds/algo/elimination_tls.h +CMakeFiles/cds-s.dir/src/urcu_sh.cpp.o: ../cds/algo/int_algo.h +CMakeFiles/cds-s.dir/src/urcu_sh.cpp.o: ../cds/compiler/backoff.h +CMakeFiles/cds-s.dir/src/urcu_sh.cpp.o: ../cds/compiler/bitop.h +CMakeFiles/cds-s.dir/src/urcu_sh.cpp.o: ../cds/compiler/clang/defs.h +CMakeFiles/cds-s.dir/src/urcu_sh.cpp.o: ../cds/compiler/cxx11_atomic.h +CMakeFiles/cds-s.dir/src/urcu_sh.cpp.o: ../cds/compiler/defs.h +CMakeFiles/cds-s.dir/src/urcu_sh.cpp.o: ../cds/compiler/feature_tsan.h +CMakeFiles/cds-s.dir/src/urcu_sh.cpp.o: ../cds/compiler/gcc/amd64/backoff.h +CMakeFiles/cds-s.dir/src/urcu_sh.cpp.o: ../cds/compiler/gcc/amd64/bitop.h +CMakeFiles/cds-s.dir/src/urcu_sh.cpp.o: ../cds/compiler/gcc/amd64/cxx11_atomic.h +CMakeFiles/cds-s.dir/src/urcu_sh.cpp.o: ../cds/compiler/gcc/arm7/backoff.h +CMakeFiles/cds-s.dir/src/urcu_sh.cpp.o: ../cds/compiler/gcc/arm8/backoff.h +CMakeFiles/cds-s.dir/src/urcu_sh.cpp.o: ../cds/compiler/gcc/compiler_barriers.h +CMakeFiles/cds-s.dir/src/urcu_sh.cpp.o: ../cds/compiler/gcc/compiler_macro.h +CMakeFiles/cds-s.dir/src/urcu_sh.cpp.o: ../cds/compiler/gcc/defs.h +CMakeFiles/cds-s.dir/src/urcu_sh.cpp.o: ../cds/compiler/gcc/ia64/backoff.h +CMakeFiles/cds-s.dir/src/urcu_sh.cpp.o: ../cds/compiler/gcc/ia64/bitop.h +CMakeFiles/cds-s.dir/src/urcu_sh.cpp.o: ../cds/compiler/gcc/ia64/cxx11_atomic.h +CMakeFiles/cds-s.dir/src/urcu_sh.cpp.o: ../cds/compiler/gcc/ppc64/backoff.h +CMakeFiles/cds-s.dir/src/urcu_sh.cpp.o: ../cds/compiler/gcc/ppc64/bitop.h +CMakeFiles/cds-s.dir/src/urcu_sh.cpp.o: ../cds/compiler/gcc/sparc/backoff.h +CMakeFiles/cds-s.dir/src/urcu_sh.cpp.o: ../cds/compiler/gcc/sparc/bitop.h +CMakeFiles/cds-s.dir/src/urcu_sh.cpp.o: ../cds/compiler/gcc/sparc/cxx11_atomic.h +CMakeFiles/cds-s.dir/src/urcu_sh.cpp.o: ../cds/compiler/gcc/x86/backoff.h +CMakeFiles/cds-s.dir/src/urcu_sh.cpp.o: ../cds/compiler/gcc/x86/bitop.h +CMakeFiles/cds-s.dir/src/urcu_sh.cpp.o: ../cds/compiler/gcc/x86/cxx11_atomic.h +CMakeFiles/cds-s.dir/src/urcu_sh.cpp.o: ../cds/compiler/gcc/x86/cxx11_atomic32.h +CMakeFiles/cds-s.dir/src/urcu_sh.cpp.o: ../cds/compiler/icl/compiler_barriers.h +CMakeFiles/cds-s.dir/src/urcu_sh.cpp.o: ../cds/compiler/icl/defs.h +CMakeFiles/cds-s.dir/src/urcu_sh.cpp.o: ../cds/compiler/vc/amd64/backoff.h +CMakeFiles/cds-s.dir/src/urcu_sh.cpp.o: ../cds/compiler/vc/amd64/bitop.h +CMakeFiles/cds-s.dir/src/urcu_sh.cpp.o: ../cds/compiler/vc/amd64/cxx11_atomic.h +CMakeFiles/cds-s.dir/src/urcu_sh.cpp.o: ../cds/compiler/vc/compiler_barriers.h +CMakeFiles/cds-s.dir/src/urcu_sh.cpp.o: ../cds/compiler/vc/defs.h +CMakeFiles/cds-s.dir/src/urcu_sh.cpp.o: ../cds/compiler/vc/x86/backoff.h +CMakeFiles/cds-s.dir/src/urcu_sh.cpp.o: ../cds/compiler/vc/x86/bitop.h +CMakeFiles/cds-s.dir/src/urcu_sh.cpp.o: ../cds/compiler/vc/x86/cxx11_atomic.h +CMakeFiles/cds-s.dir/src/urcu_sh.cpp.o: ../cds/container/details/base.h +CMakeFiles/cds-s.dir/src/urcu_sh.cpp.o: ../cds/container/vyukov_mpmc_cycle_queue.h +CMakeFiles/cds-s.dir/src/urcu_sh.cpp.o: ../cds/details/aligned_type.h +CMakeFiles/cds-s.dir/src/urcu_sh.cpp.o: ../cds/details/allocator.h +CMakeFiles/cds-s.dir/src/urcu_sh.cpp.o: ../cds/details/bitop_generic.h +CMakeFiles/cds-s.dir/src/urcu_sh.cpp.o: ../cds/details/bounded_container.h +CMakeFiles/cds-s.dir/src/urcu_sh.cpp.o: ../cds/details/defs.h +CMakeFiles/cds-s.dir/src/urcu_sh.cpp.o: ../cds/details/is_aligned.h +CMakeFiles/cds-s.dir/src/urcu_sh.cpp.o: ../cds/details/lib.h +CMakeFiles/cds-s.dir/src/urcu_sh.cpp.o: ../cds/details/marked_ptr.h +CMakeFiles/cds-s.dir/src/urcu_sh.cpp.o: ../cds/details/static_functor.h +CMakeFiles/cds-s.dir/src/urcu_sh.cpp.o: ../cds/details/throw_exception.h +CMakeFiles/cds-s.dir/src/urcu_sh.cpp.o: ../cds/gc/details/hp_common.h +CMakeFiles/cds-s.dir/src/urcu_sh.cpp.o: ../cds/gc/details/retired_ptr.h +CMakeFiles/cds-s.dir/src/urcu_sh.cpp.o: ../cds/gc/hp.h +CMakeFiles/cds-s.dir/src/urcu_sh.cpp.o: ../cds/init.h +CMakeFiles/cds-s.dir/src/urcu_sh.cpp.o: ../cds/intrusive/details/base.h +CMakeFiles/cds-s.dir/src/urcu_sh.cpp.o: ../cds/intrusive/details/node_traits.h +CMakeFiles/cds-s.dir/src/urcu_sh.cpp.o: ../cds/intrusive/options.h +CMakeFiles/cds-s.dir/src/urcu_sh.cpp.o: ../cds/opt/buffer.h +CMakeFiles/cds-s.dir/src/urcu_sh.cpp.o: ../cds/opt/options.h +CMakeFiles/cds-s.dir/src/urcu_sh.cpp.o: ../cds/opt/value_cleaner.h +CMakeFiles/cds-s.dir/src/urcu_sh.cpp.o: ../cds/os/aix/alloc_aligned.h +CMakeFiles/cds-s.dir/src/urcu_sh.cpp.o: ../cds/os/aix/topology.h +CMakeFiles/cds-s.dir/src/urcu_sh.cpp.o: ../cds/os/alloc_aligned.h +CMakeFiles/cds-s.dir/src/urcu_sh.cpp.o: ../cds/os/details/fake_topology.h +CMakeFiles/cds-s.dir/src/urcu_sh.cpp.o: ../cds/os/free_bsd/alloc_aligned.h +CMakeFiles/cds-s.dir/src/urcu_sh.cpp.o: ../cds/os/free_bsd/topology.h +CMakeFiles/cds-s.dir/src/urcu_sh.cpp.o: ../cds/os/hpux/alloc_aligned.h +CMakeFiles/cds-s.dir/src/urcu_sh.cpp.o: ../cds/os/hpux/topology.h +CMakeFiles/cds-s.dir/src/urcu_sh.cpp.o: ../cds/os/libc/alloc_aligned.h +CMakeFiles/cds-s.dir/src/urcu_sh.cpp.o: ../cds/os/linux/alloc_aligned.h +CMakeFiles/cds-s.dir/src/urcu_sh.cpp.o: ../cds/os/linux/topology.h +CMakeFiles/cds-s.dir/src/urcu_sh.cpp.o: ../cds/os/osx/topology.h +CMakeFiles/cds-s.dir/src/urcu_sh.cpp.o: ../cds/os/posix/alloc_aligned.h +CMakeFiles/cds-s.dir/src/urcu_sh.cpp.o: ../cds/os/posix/fake_topology.h +CMakeFiles/cds-s.dir/src/urcu_sh.cpp.o: ../cds/os/posix/thread.h +CMakeFiles/cds-s.dir/src/urcu_sh.cpp.o: ../cds/os/sunos/alloc_aligned.h +CMakeFiles/cds-s.dir/src/urcu_sh.cpp.o: ../cds/os/sunos/topology.h +CMakeFiles/cds-s.dir/src/urcu_sh.cpp.o: ../cds/os/thread.h +CMakeFiles/cds-s.dir/src/urcu_sh.cpp.o: ../cds/os/topology.h +CMakeFiles/cds-s.dir/src/urcu_sh.cpp.o: ../cds/os/win/alloc_aligned.h +CMakeFiles/cds-s.dir/src/urcu_sh.cpp.o: ../cds/os/win/thread.h +CMakeFiles/cds-s.dir/src/urcu_sh.cpp.o: ../cds/os/win/topology.h +CMakeFiles/cds-s.dir/src/urcu_sh.cpp.o: ../cds/threading/details/_common.h +CMakeFiles/cds-s.dir/src/urcu_sh.cpp.o: ../cds/threading/details/auto_detect.h +CMakeFiles/cds-s.dir/src/urcu_sh.cpp.o: ../cds/threading/details/cxx11.h +CMakeFiles/cds-s.dir/src/urcu_sh.cpp.o: ../cds/threading/details/cxx11_manager.h +CMakeFiles/cds-s.dir/src/urcu_sh.cpp.o: ../cds/threading/details/gcc.h +CMakeFiles/cds-s.dir/src/urcu_sh.cpp.o: ../cds/threading/details/gcc_manager.h +CMakeFiles/cds-s.dir/src/urcu_sh.cpp.o: ../cds/threading/details/msvc.h +CMakeFiles/cds-s.dir/src/urcu_sh.cpp.o: ../cds/threading/details/msvc_manager.h +CMakeFiles/cds-s.dir/src/urcu_sh.cpp.o: ../cds/threading/details/pthread.h +CMakeFiles/cds-s.dir/src/urcu_sh.cpp.o: ../cds/threading/details/pthread_manager.h +CMakeFiles/cds-s.dir/src/urcu_sh.cpp.o: ../cds/threading/details/wintls.h +CMakeFiles/cds-s.dir/src/urcu_sh.cpp.o: ../cds/threading/details/wintls_manager.h +CMakeFiles/cds-s.dir/src/urcu_sh.cpp.o: ../cds/threading/model.h +CMakeFiles/cds-s.dir/src/urcu_sh.cpp.o: ../cds/urcu/details/base.h +CMakeFiles/cds-s.dir/src/urcu_sh.cpp.o: ../cds/urcu/details/gp.h +CMakeFiles/cds-s.dir/src/urcu_sh.cpp.o: ../cds/urcu/details/gp_decl.h +CMakeFiles/cds-s.dir/src/urcu_sh.cpp.o: ../cds/urcu/details/gpb.h +CMakeFiles/cds-s.dir/src/urcu_sh.cpp.o: ../cds/urcu/details/sh.h +CMakeFiles/cds-s.dir/src/urcu_sh.cpp.o: ../cds/urcu/details/sh_decl.h +CMakeFiles/cds-s.dir/src/urcu_sh.cpp.o: ../cds/urcu/general_buffered.h +CMakeFiles/cds-s.dir/src/urcu_sh.cpp.o: ../cds/user_setup/allocator.h +CMakeFiles/cds-s.dir/src/urcu_sh.cpp.o: ../cds/user_setup/cache_line.h +CMakeFiles/cds-s.dir/src/urcu_sh.cpp.o: ../cds/user_setup/threading.h +CMakeFiles/cds-s.dir/src/urcu_sh.cpp.o: ../cds/version.h +CMakeFiles/cds-s.dir/src/urcu_sh.cpp.o: ../src/urcu_sh.cpp + diff --git a/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/build-release/CMakeFiles/cds-s.dir/flags.make b/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/build-release/CMakeFiles/cds-s.dir/flags.make new file mode 100644 index 0000000..dd4316a --- /dev/null +++ b/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/build-release/CMakeFiles/cds-s.dir/flags.make @@ -0,0 +1,10 @@ +# CMAKE generated file: DO NOT EDIT! +# Generated by "Unix Makefiles" Generator, CMake Version 3.5 + +# compile CXX with /usr/bin/c++ +CXX_FLAGS = -O3 -DNDEBUG -std=c++11 -mcx16 -Wall -Wextra -pedantic -Wno-unused-local-typedefs + +CXX_DEFINES = + +CXX_INCLUDES = -I/home/ahmad/Downloads/benchmark-fuzzying-tool/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/build-release -I/home/ahmad/Downloads/benchmark-fuzzying-tool/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2 + diff --git a/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/build-release/CMakeFiles/cds-s.dir/link.txt b/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/build-release/CMakeFiles/cds-s.dir/link.txt new file mode 100644 index 0000000..30df9e3 --- /dev/null +++ b/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/build-release/CMakeFiles/cds-s.dir/link.txt @@ -0,0 +1,2 @@ +/usr/bin/ar qc bin/libcds-s.a CMakeFiles/cds-s.dir/src/init.cpp.o CMakeFiles/cds-s.dir/src/hp.cpp.o CMakeFiles/cds-s.dir/src/dhp.cpp.o CMakeFiles/cds-s.dir/src/urcu_gp.cpp.o CMakeFiles/cds-s.dir/src/urcu_sh.cpp.o CMakeFiles/cds-s.dir/src/thread_data.cpp.o CMakeFiles/cds-s.dir/src/topology_hpux.cpp.o CMakeFiles/cds-s.dir/src/topology_linux.cpp.o CMakeFiles/cds-s.dir/src/topology_osx.cpp.o CMakeFiles/cds-s.dir/src/dllmain.cpp.o +/usr/bin/ranlib bin/libcds-s.a diff --git a/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/build-release/CMakeFiles/cds-s.dir/progress.make b/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/build-release/CMakeFiles/cds-s.dir/progress.make new file mode 100644 index 0000000..4a1edec --- /dev/null +++ b/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/build-release/CMakeFiles/cds-s.dir/progress.make @@ -0,0 +1,12 @@ +CMAKE_PROGRESS_1 = 12 +CMAKE_PROGRESS_2 = 13 +CMAKE_PROGRESS_3 = 14 +CMAKE_PROGRESS_4 = 15 +CMAKE_PROGRESS_5 = 16 +CMAKE_PROGRESS_6 = 17 +CMAKE_PROGRESS_7 = 18 +CMAKE_PROGRESS_8 = 19 +CMAKE_PROGRESS_9 = 20 +CMAKE_PROGRESS_10 = 21 +CMAKE_PROGRESS_11 = 22 + diff --git a/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/build-release/CMakeFiles/cds-s.dir/src/dhp.cpp.o b/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/build-release/CMakeFiles/cds-s.dir/src/dhp.cpp.o new file mode 100644 index 0000000000000000000000000000000000000000..f43580444dc792eaad311ec3713919406116f239 GIT binary patch literal 26688 zcmd^n3v^t?ndWV^WXliKA{e8DR|5^m7?f^XHfq~|WLs8U)3SvnJDy-@%aV+h$dX37 z?FS3Nrd!50O%rFB$uP;x!Z6GM@|eY&S;panAo(TZu(1u{=@7gL4{KzIfW%RFdB4Bz zU)|N$-L_|v?4Gk*=d`NsSAYFg|D*nT+-u)e8(C1`@fa$3jO&felR^!n|CUU-S{AE~ ztBflp?L<7Eg`W?B>c|B?>v6Kkn@W;_amny`c7CAhN!gKckz6~_;8o+;%Z}c zh4117frf|=juks((KzB01HbW#@ZkD!V&EOGY}Ig->NNV)+!HlTOB;o~M-04sgYfJT zhdveI_k5|BKxf(?`Yzr?EzHzBPb3vE#K4h4;Tf$@MFJ-xNn>H^f&-D{IrZU#^}cz} z)%)h|6u!B;%<#}6yYP*A`=@sd9^dJSO&0NgFEH(c=D^V#BA%V%&_|K*eyi0CzyGx+ zGj-Dmt};Cb%|oM$l9L7_;X~_YMZ$X*rV0la*^>_3d5xKB0NEjPUH{Jm+v?@O#YapJ`A z0ZkwFNX;_SJ}#z=)-0`Q64%`W;g>f0?%M(@iN5De`xDcC(U-V@9#hd$VgD{Nx#wHv9jCv=m}p!>bI|7*dN21 zH9szfaMmP|ngs11^lkY&Qu^TTQH&IQouHX#M3VC`{@Iubre}8~wFZNl9I^Z13Bq&K z8ZYcprZB^!)-+*%Bw`GDa|ETq~&{GqHyg2LX9BE@2Y$~gCPPP!&M$axj#)tn2?8#rIi`AW{$ zaNf=N1}ERViE<<{xOuBnqT;qp@qtY7p-l0yO!0|KaYv@upDFIk6c1*KuVjixGR32r z;z*`A%EjTy($C0mU@%`|>t~pcsfJRK+DZ$UKtR8N;?ZCMHNadb72!j|UNRcPoIfN6 z=N3zx^`5k(cU4M1j-3nd^547*bi*(Gs1$TVE5Kcj=l?hVzxH(gZ>@#;GroV2*XL(S zp||uDu)2i3Q`#Wg`K_=>eVz6k*gWulf$gR2ZuSaB^E%jlBU$$nNsjgBp z^&pv8gkQJHXr3kSG)^qVdV_T({CaG}X_maJ7)@jQu~D%u5UDShnzL5~%B{1p44r5M z@vc%c3?TNKNK)#go@VmZkbpKYK{MX(7bz`X(>GX5 z3lDZaO@T6Dj}@tfSWU)Yd zUt%pbOuToRfRd`Po??);+#+>H!2c77;_M5Z;D^cJPeWueKkc#Y!J`C6ONp*bcRcWG{Be+o+xR z5$O>@Z!9+d!p3GN{4-&9m-+7bI#3F-SBt*m)|H`WGwW%5z@zp+S!?RgVsENN6S<$* zA}{A$Mn62uaEQE`@WD-6DMJ#2n<&6gLh%)+WbEB^gi1H>k}^8$t2_(Ej|C&i3#l$^ z%UvGoukSnTOVq(HBgs0%`(6Ip(6hCn=W(#G-^OBKt*lS3MAY9Gv9W0%#sSCVZ8HVS z3AB;3`AlKrFwIdPB1i09lEOMK9r+`48Y~P9za-Z&EOPbsk=R2b`6W!Lx>7AVO_J*{ zZ&WfNU5;Olm&C((5z%**y2iWQJ>EajI3H8%9XkhRHqKq@G_(}6I!Lp67tPSp95b|( zlrPQV#8lj_q-SU?)n!f5-$+U(|I;y)gQMAy8I9~?`0dR8%(?sd7~7Pf+gF_NB&Xsi zigUUzu^Xcnw4gfnLd+&PaIrBG@ooQSa!Oxc3V-+Z9l>9|pAyMh*t;I~#=TLh$uoje zfpwLz_wl|}I0hGX^=Rxv+-a1GDLZjVBjQI!h`dz9?+n7{`Sf7dZZT1Q!fwSI@7ww&`I=REUktLb^LZ!2X; zVz7?}PYK2J%y(2Av8CEY5?3uLDWlx)V0Q)^C7eb!HfIG`9Jf#PCA#R$MhXGJ@LR&! z17Z=gDn)98pX4PGtMCir=a<7DtgVJ(2`?YoqE2NF>6!ZenXOI;u)DIIRjpn6d4e|7 zCtW18iujz%rMT9`B%F3}G1WPrWc(p{ofH%Mk8`CIt%zu{?eHG?%&X=*@_|>}!T_~# zgtp&4qve*Cf^Ze00Lzr2cKfMREzWY{!1&Oi= zPxUFxHC*1-C#&jb&k6YM-i>ja$p&2NR*&Lv*K_vJIFM)j)82=T23F37QEY7`#j%K8 zh242^B)liKgU)}m&E!lI7p-P$!Kes-u)bvIpTU&%mEB&1U+Ot##`i+=!ASVS*d8gg znZ)F#GaL%#R47LQQ+|-j7%flzKHRw1mirQ~Nt4jV6<${+Z+lSiLpns`u~dnR@JQ<4 z=zJQ%26wp`{-rPRuc(gLqoH@{ikWm~7Y0^uqYKvLqyY8;aGS4HQm8)!K`Q%2`1)S| z(EG%b?g8kw`p_Rxu@4o#yWawiMFR$py;YBU^u(J0%y5#4V5|iAfQYXNx5m~ z{wNUEc&EzAu^Yv40Oi|PCW zS?z;tzbX47DO`Eub;rVUY{JHIlH#XmskzENdagEj!PfRMAC^h>CKL>?^?3(=G_#Mw3AH4-boV& z_9NS3qM!dvF(@MT+YuXE#3?UFJkF6Mk}QO|=LCkTAZo-OA#a2Q;E=w)>41jX@h9#7 zxNskuNIJpDtFW2dex6lCUJ2u>DLZrv`!^-l^hKGiFi5;dY{3aN49KdMQFnUAQ{k{|HC8;QK zJP^~&*Jn#VDZ;pA^qQJpc;T>Ec<0jWt4iB3j>T6Ra|tzr*L3IDYjzVerza?-`;3++ zV#VuiruonexQy%mp6;_u`!jyYV*d`yFOIm{3kV!4WZ!1;A`=s)?{Msbp?5GKwZBV# zM@~CBcgOAiw@b`9~&5{_ieuu`^r726sdWL zTJWvpyr6XX&j?KR`bBuJFENq)IkicuIwf$=wJ%mSbO01Quka=y5+3j+ekdEo%0)6P z?CV8xBd*2vB4m7R6jy5CA1jZfz9A!7_-Ax!fEz&^@s^+h`ynEXl@I^-%=#@~W;rfF zE}JtKSBs=+>8L{p#P zLE8Be=SVY2DD<0*7Sh9Gm)ewhc#-x$@s2>X2glud`_5vzW>cs2R0?c6pq%3|-|%z);c+;f4&Sfa2lJvqb}+a= za9+*1bZ^v4??ztExS#Vf&ZP^e`6UL&5R7S?pd9aykap3ZGfVu=NPf#hE#!Ts^E%3L z2m6hiW#ziI7OSgg)~slKv~6v7M{`SCbX9Z9SEJ3b4b-l#qia=jhkw3k@Hf^k@prBI z3h4dqz5dQFWS#9+dvizo?QN|@t0!rnDt^I{<*xE}5r@TokYm{_#gT^#G zH(%Uw-YaFoC*30^X|Zpjb;MZ#h{n25?g8kS}4 z38gOiko)O5hT8>Num23!dWZ%TvT}!n6DK^SBOW;O4+@I@t-v@~fFdyUE4zPO(P!h0 zUydt!V1n`Z_@Zx3Fn&9}=-U&hzVICQ6Y{eU7NGE?x9HwNO9sp^6^ zJn!I;`dLxY&&L?4F(BAKrs#t)#=BHRy>AYt>pysk9`+bJh?|y#Hb?)D7W2%hMblea zd#ByjGQD+8cW4HjJ*qi-tv2xgY_V?MyjlM8rK@6{R?I)6qOu}*&CHm53Vkg!vm#ho z5eiOmD*V%ep_##%!7%!w@Bb)yZeL<}Hk5kKpE$Od_9YZNE|4{b@j1>3lR*dh6@)dH z(1z8RPh}+fzK{~R{Bl_`jL%bEQ&Ji)Xe=p4@oM}m!jQ(GA7epD>AeLtC1qQ@H6{Ld zVZovCC1s$WTT(oK0;;R>vW9|7;(Vh&R%Q^heqbeH6T^ zx{$t6#WdrI28sM2!WeyDN{RHhlCe7oxQYCr;LGE8l@!-P!PI6G*GH%xsR6$YEaTr} zN-G_GgY0&nx3N?vGYqddAvs9_>wC40rUsINc(`aqaQq< zW1g>}tn7rbyf9u`#xN>@k1)*@7?bK(_e~hoyUH`Z#1Er5E?P5z+Lf~mkK}1r2XhYU z%@$6Oexr1=Giyu`tlLpF$fQ2DGXHNP(yDQ>J{IKA$HJ0>9`F69&5rwS zeB#u!vC(%ieV4W+089l?|kEHN&&3y>dopW4(B!@zA`cBx62u z`i`0Kl|1KUY?l-Wu;~?t0c1OwzLb6C2v=Q-x#5A{l;%|4z_{X|aC-I{=O{QU_oi3F zEEx3iJKby$PhR=e{7EqIvXj6&<#@&$ey&lzMvq=+mE;;H`xnp`Xt?fkUgP{MTJ_*H zE>y*=+-qEv^{gJe>Cq^}$-Ty<*#JLz)2m%}wb!^j8^F_HjNkbe1}9ZP-pft~fq3bv zJ4f7~g~MqgGrS4-C7Pbg z8K+p8i+?H)pY0OQw2=EM@Om*1pI_v`KazYh{!IH8N*6U{|1IURm#s}Q}@Ave}Bevn z1`eS#Zz)ZNex;r-ah_@KLTMJ`+b(sUm7Z5IeuVLaQ*CTwy!$c-Q0v0?6o1Bf_{Iys zFVW)s#mwh8@Lc-281~DB&&Y!tIGw6#Uowyo{z*;w1K?v#zD1y!Twj*hMt)oLvoHCmzR(P(q)S7N9?TvPK^;&Q>t8Hzx*J`$EjkHtt##TkmXtvKz z?_JvyZtnzTXG>cY#MvJ0zJ!@Sqm~@FwORUcXEPiV?$;c(*7S6(#~o&Cw7I9Jd1JJ* zZ9SZ&^EPYEX*o`l9GjM9*}&be92m`j9$ekDuWgIA_4IW0%y-(JvgQ1c zl&aC17L2k)jtow34L7#9g_+*dX0`XUvDB5Taa~26qPK0>5bbX3>Fw&oMXj|lx-Lj9 zPa6NcX|de;$zD6F_WS&wes4Qa|lwo4rFt@DvYIJpTdxsHS z+PS{Hvo*S;tv9x|%`MopHQkPnQc%dH=4^r-I?f`PA%#2H5Z9*lN6{`{Upc69ba*mV zo?gl2r&}GpG#rlCix(_ttVKYpnH#BfPu@z9o-{Ft$FyK?o5c}2qpaG71r6)g;_te~ z={cK)Te>jadSWeBEroU6hQt*VKyaw4rKb%| z$l2R4cUzmS=97#h39*)8%v~F^C>s`GkqAxa#yxEx8@PUb^|Qx2Smykj#IrwKB46l=>gIJ6*V9t8%pp~>4-)j0;uTk zXNHQu!v8_TXKC~o@IGJBtMMG;4JhF{{r%V33;3jYsu+fUn|+UdP|egMY|44OiU(qG*h2^z=_575_2pOf=l9@l$7p35-*} zGd242HT+r)zcde@D;cNpRN$x5)f!%i%*>pqQMm+S8tcgyvU8oe&pb`96%+L4FP z(~P_2+Nt5XT>CZtx?Dfk=ykbX)#!D(-eKG=*D;M=m+KP^*X8o^eS@1%G2?Ez&eCvQ zu5%f8`{5TfT#r}fjJx~2Mx(z*^XF+AzEH!LYPcSkCiBFk@m!_RPt|aWZ)&~_=fQi< z1`!hR(c{VAFi!f>?e%SqUboj{jJxIifkv;}>uC+w?X@QlpZ$!x?R7xIb$h+6@z?$E z7aFeH>ursXF7L2Lugm)f#@+IMrqS#2mM~M&vo7ys#@&3*XWT8XU&D2I%QgPGyfgFg zpRLjBeps#H3o%yZKOGvrSi_t66^im7wT>^M7iCC<>wdUG!)s8d_}{JJx?S$k@T)ZX zZ!%8$;h~{q7~j(9b$dO+xLePUYxKIkex%{Lz54U;*~7ToUi&p%x7YI;f8Acc&cpwx zMz7mzM8ju8ex=VZoGaBK-%RQIrFo3I?NYDdi!?sV^6) zsdeGXjw`;9O|S5O(eNdjJ`?;(kofEP!y2x~`R{7DKF;5oTvo@^;kw+8;~8oe%82jgzJ{!*jY<=U*_x?FZ1KKC*1mg{~E*X8<-#$T7~Ar064XFKEW z@jt22>wf+d4WA7gt9bPq*Q4Vaz3x{h^3b1kDHtG;|1{yJ_?K(AKK^SMcl+TC zjb69cT*lq<)@k&*y_RaYZm$)2_%t)_w%2M6*X`A*@z?EjM;`v0HG17Hwub9=8PsrH zpF*Kjq!*%|L7$<$w-qVjC zCH0<^##4@;@{>7KiA4Am{1i?YB@cd7rxITarEx!3csY0biJ~VSy8ihZOn#}Q->dL7 zkL**YKbQFvpCEo}oO~U1lAez-z1z><*ZAmuu0BX}kH3%`3yG|E!T*Gk2;AU&;9A zIal~{eh#?s27dmc3m5!MvQScds^t^DZ{S?vN=J&H!pSEn-Rxv(KIQzp+J*c1`7d2~ z89%Fx)a1sfj&SO$lpc_jpA-INi8y~$J>e@fT&GtuQMFEAq0y@`5Fe#Os`k?#B*Jfz zCFhTy%cLLrsYJ1YlEVG+3IFthbHde}B1nyyaPn!DHgG}lY2fF_T=+^F7}A?gmhM-5 zulPl-r+G1*X(&_vO#CNlc)doiVgglLjH~ge@0#w^=qGD@{?$dVzH55Lg{$wH{+Ejn z%?(Or)FG1U_X!ScbKE!^oW8%*>D6~itu9=Br<8Ev>N}+eT)6rk=^q&F=8eKX{qbK~SStF+tW;^ln^hHLo%%@Y2@F*QioYTSs$+ z)waPhDrAw~`5G0gdV39tR>V3x+P~V?vC;Kdk^N%yq}PM;MYO|_n4cNBq*NWAwh4Wt z@gL~HA@3sjNDZNi5L#=GRi}|K_qQGS2at#Uww4zDq9Y1RbhNp}Ji}(X~!$+gmx>Q>=Ek_XDG)O`Q)=lxY*^FLB)A$MpV0J)e&P z@vK$==lwT0g$MXPoaCoIls;7KnhK2i=WvaZla>dZT~Iq@a_@ zGY_>@?N$FF#@zjH;{G>qdzJZBfpa<&oiR=WZ7CKWcIQMaw+0>x5Gi?606i* z;S1~---(*QdcF!?;jCEuhIqB=PVM?@OgX`EX#E}5sy!(V+qe#x4beCKjgBp5r+(Uj z<(oQ#&e3U~U9AN=acUB^w(C{g_0^R+|5vNJqahBZ$}Vcbf2X^@uN$4|JR|M?|Bo0k An*aa+ literal 0 HcmV?d00001 diff --git a/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/build-release/CMakeFiles/cds-s.dir/src/hp.cpp.o b/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/build-release/CMakeFiles/cds-s.dir/src/hp.cpp.o new file mode 100644 index 0000000000000000000000000000000000000000..c64c76ae5f7335015b67d24eda64a251961cec53 GIT binary patch literal 21432 zcmc(G3v?URo%hJH6OjZ<4S{+?(kMjPI#688d0B*lo%nGFMu~}?uq~mAV=LILUzJ8q zLK6}qJ9Kssb-TN#+ueTOat`b%r`^8J_Tyu_ZLwpBDJ>9Fpgc;+7HF&}Ne$s)NE3g* z|DAiS@mLD!?tW+QIpewWpa1LrU-vQc{#vcBz~xe8b163{*(V8Al-(@buj1V*Tm<_ITnk0tFYZgleHrfd#`-7qxoLvz^e(AUlrzKLB~{AK zGVhW;j>eKBv)LmS#W61%`lFkLMs{7ohK{>ARl^vk&ElulPgXZ=X=KLZZ0OXD%=I`M zJ;y?)y{Q*~s2XRyOAZi)n%dyfQUQex9V=w6iEv5_OlqcbQ)PZX%eQFUnSM$L7Yjee+wp43~_&|7yksi|8g z#VgfyR2`kzXf7DhLZiLcX`v%Gr3yzj8VjD;cdeRg0NSWJlm>?OGlw{OSfaD8r9$PU{y`{Q|edZnveoLeGzC-X7>wi`?{-heud;1pAV=Cff#=F{zH&t_Qz@sKl zx!BM-H#0X*Fk_5iu$DR1)Pj@DEPx}t$%nv>g`V&p{2uQ-<4yjQtZOa^sK&eiGqz5s z=Jn{m$ezZ+b8GjtR3!l9FTgK8!e zwl1M)##lVcjKiwA&l65{`q+vS5K;*L-nGQaD;U+sOb-Ir#SDfn^Wa{RI1WC{02gDQ zC-FmOLR2eTX$ekrvE*Z=mU>X`0Ca(8YjJJysHxk>8Mj_jz>`@7AtGN*H%n%q;|4!7 zHa2-2)!91Fx7)RS+s@?{h>%pb*#oX_Q864SC7Z% zLuZ$2ys8epS*W^B@fW->jA@}?GNTf9hMGN|y^l<#KQhnC2ugnz`n4X`La#H#yhd@e z|I1P?jn=4D_(;(~MA=R3?LP&xz|8)sD3JadhNA>GuKczu5roHzSgNLIoSCQ*Tfnr+ zLp-yZLx;QWWvLCY$PsnuG%RvN9X*e{3LEb-;}2};^+GX54LKuRG8fiZtfX&&3kJx) z%#~q=8$EIjJY9#xI?RSnApMWB(TQ;A9d9ZCWD0QONajJZ$$XUroUxdnOTT;`2{!ab z;nWZ|raG1@>C*snsRJ@zs&A$8G&vRamCUU7Fz*lQ5g&S>l=TmK`@U2N6QveCPUAv@Ac=beB|>dugtYdt6bc*^@niBK1^hff zSZLVWw}4YL1$7u( ztvII{RB8F3y+0dd$?+T6-?J5mnd{lH)CZ_)cb}t4#{1Q;3S)auoemkaZLOxF)P)~ zwo=XL2>3L!rz9MD*W1@gT#eVXYiJb4sjzWU9h!90{9@_|6=NdYKX}&~%^2jVJ__FR zj<9v1-cO|eH>pnZ5YOw>ycbv~YCPoFYJ0#Gule{uz5TVkzJCHRWf5lW_@*Jayb|&11z-w^XXCVpQ0L7RImBOW+!)2E}E4S#gx5z61+bO{PnMsbX6A7^5O>U}{4O^Jz>?KITi` z&QlgkoktHq5_g4`>VYx-Bs@~4tt<5&B;To+_K^1zTIj?sUz(PSGGCd~AJ3CNO4Z~M zPdIcw@qe_?8E@Yml>O$afQpanl z<=*~Uvi7$D4?auQr+8J;-#klr<7rxBV5&`jnY;l9X1zCz2ul}3_$9>#XMkqhMt(Qnga(|l0Pf@EX>F?ib936 z+IVV9qiQ^d`inX8(MrNko|L%{v)zVu$)hxspSRLGZ|ymhy?-Ny=!0grr^aaWV}kRs zk*abF{)6`sifh!+JF0iXbG7}akcA~b5hSB{C}kt`1kH;04K@@jg@K&|+4-CGzxN19 zt}*KU%v&>#diIE=L=9@^E)N^2w*(ru`cU>?z(RID3ylg%>z}CYALOflD2hPXgpywm z$+Jo1^z{hc%+}0s2}@mHf+fCYRH5X*K;H4*^Dqch<85{5_l2ln z`n52xj?#BH_zRg=$srdTLG^>kk@aMG#KK_=?&-uEH>Wh;xEgvQel;kj@aZKSx_M&P z8Mb1GrM~uQL^(_TemtD2@%c34Np+;k;$!T_5PpyF-Ca6T)x%l4K4c>YxP!RB1A_!b zLxA{?93ZPvrGu4*$Ed$^z$$au_#k_og}t!qv@Lf%!9kAC%*$va*Z-_uhXwFRVSt#7 zGw+YSGl))N>5+q$2vi8F>sYz2T3$+TwnAzNq%vLwzfKy0OX4S3K3I%V-i?Qd3B!Ul z_17E$xC6{^L6HE-xGXUTr6=>xJkN+x=J%X-T71(|ea4csl_js|(zo-xAX4v;m%0jqvk>#Or(eT6kz z@?_;SQ1;|(l_*~p7B1Md;)zf^#I+lsDFu> z$w3bJF5OV7smA#eL*rrZkM>~|^gKb$4VbC1CrI5~qNTzmS_oK&^-k4<$tzI!nHfVS z4r1lkgZ-h4t*iCN)~jY60{nf|eOLKIE*)uw)n3gklwmD^?4X5@aheukm^tPJCXZ?+ z!C`akEeMEB9>u>FI-K|hDZuV=RUat~xYUtas6;beUFk!vM702`KH>%AOuvB2&mAJ!m>-9h1(TT{SoHvfnU{pE(2DkdNFp2?gpS5haJd?M z*rCJN@0zvHN5|UWkIcOfCb1Y{4>fs|Z=}Q&Hp1;qv?9Nxz7}2PY3YB=57^`LEhVh6 z1U^oDJpDJI6aJ;jp7HkGO`Oc{a$h@rslUw3MrgjluDOg4hyJ{PVgNAcan-#yFk=LK zE1a>SxBrxMgSUS_uR2o&Rpy6elX=wAmk)&Y#*288Obcb&B%?U@GV@w8Oe^w&Bhdq9 zKyTjx>dYLB77lrmdgs8@ zMhVX3qr|}U-qS zU8+^Skn=c{F}IG>$rRbk7~eUdnf2pZYX5lHIH!e%c2)6noX}_-F?#^K;8Z%-At|wU zw9rKFa%S8*&df~{`~=Oo73XG%J>uJ7@lB5SGT&jwOITZDA2&7+X{Q+z{MgJI^I34k zHc-D)GtQ@%QC5aNh;QX5dWdi+j=e0M?UB|vg~05l8rWr#;^{gIeF`}~N+)S_fT*Hg zqeO2VXGR?k@#Y263pp-qa+2>Ba9~L6Gf&e&!q7w^wk9}idmN`1VN`!Cq{6P_nL@I! zYI?&sBv6f)m~lcg=x|L<{?Ru!dTMeKiYH!CO*&yZf>}8 z!$pCb4$0^M#mbxHxh4CDvsqI8`Rd3pd4DSBDmAm$sy!5I%-Du=gjblc3M2e{iHdDu zGZuf?C}ifNSc&9Iz7|6~h-u`MV)EG8O5K0w4yPrtEaTVbJIZ@CQk>4Ns7Sfa}xHwe6!9M_~8}rW7@#9gl(RN6BUm-^ru4A`?JTUZQEyI zL!;h3X$8PHz%rcrIUVKm^(crD3S&Dmh?LM-{dU!yt(v#uoUt3u{p!Yecgq{;}U<+DIQ0P`rSElE6}p&o^49r9yX7eRPgFj@;g(~Cz^xc*`W{k4Y4``PwUaqU$6=%kiw0eA40k+CM#EQ3XF^~Hj zs@@xMC7!OH-0I1^0qM|=pZlzM66cjZEp_Lll~u`NSH`fmMckib&!(|ceM&Aop#4QP zoLjKaZ12|xhxy8+;ZdrNR`}C83Nz0Lf8vt(M1>_Y8^N)bKpU7YMr^tGw-|cEm-E*C zOmKbeHvY0V+M;*IuDdQ0jzl|qI+|OeksZw~UyL*-b}N&UuB9tcmruB6oxenRtzg#6 zmngp|a-;wJB}Mp~xdf9KkmxF021ML1)vi*MJw|h!AJSqJTHm zM=5KZ$;>7{;|!JYPf@GrQ&dqnwYLc0ru3q56z9q3`v!GBrD(=mZ0J7<@%B>0(poyk z)z;yBB~`N+tjbjr8)-V8{?U6V6|TgU*4h^zsd$vrcN;pm)k5R?tvvU_p+E42D^EP? zmG18q>p`TVUo~dan^M)lSu8r(F^C1QxBoosf`Y@cwTkb<)zns;l^)en`h0#&4b^B2 zTy18StHxUEu(}>~1A|B>kSa8O>vWl$l#DrgR?cgv?kOF|qjK2X9&{jt#WQM%7i=z9 z(qYa9>lPFx&4mv^1uC5%8|yu=>hxo4HS}9=YBBIt`W`Wy`Cpc;7U07aeIJu@(scUC z?P8r@YXSeJ)ZXPVGYpb`)tPV1w!Zo=cZa8i9)H-lvZXa%aYsu*Ol{Tdj=5LR8AK0H;OVx+zD1L`txltxQ(~Jq`%F@KEnGe4p9{P#!QVC zaUHy8eOFehi+#z0#$pd(pThMO@opAmD|N-bdkdVdJp%1A^h56-+cwFxFqFXU+llPu)etTP*HVpd2*IJp49i(>oxc zwwC^Y?;(^PGKX^prNx=eRjNQk zD@kgjLU^r{Dg-^?j42gI&j{oE;=yY6hzxJPF5lI@m$X#?8;2|$~R z;#f!>$oVSCxzAl&>`xW0FD|{8#(!v5b#Y~Kc1>}QJ5w;HxDuiv5GI;3NBvy)_gr`b zWXGFo7@GX(=SNV}V_=nlc}X@J`GX1A0|eMSWi!}R*lcrgdBK-3+~Hz>^${5DSMbME z#U7H;Cup}3EgJFj2s_?`z8qr?Ka>~rPIm*vQjMVf1n!(R>*M-jzxz^*n8K;L*i${1 z{;dniGK_3Ff<*ciGU+LV z^3w?AXHdzugGGbj_Ym>R`WQ)Rr%@f-2B%?(_!@FR*Gi;J%AM0^pW$+^#bk}6nu^meK(H~ z@nE~T`7fDlfGK?Xb<@n|o|*--?|RTBeHsLa&r9f+z@P-*E^?mFKY;&?!?WY!a|__} z5eJ0t6FDe+W0nv3UOQ!?GlUrGq2fjQH{F8a$r8D50xc?~GXEwKDkJjvaH$0iE zxnYT1&5e=eYHq6{&v@mQMa}WXJvGB+U&NsQRkl2pYdxyFRDYfA+j-#M%L9L!^QT`H zT5X=VzXkYGn_r}5FGFq1CHGT+UuDZ{$!8OX%T=scTS7)G51&1G;9t)J|32VLZE;m1 z^go&h|F?PIrvRUiypS%SwHvjinAmcScTFDn4SC?(^1#0cILY;i(k=a)%!B_;z^_o2 zTgu_DtTW#eeEfosT>HH);N=4TfS{~^e?O*V=L^YaK1@LR+%HNTpC>_6C*a2fiChbI z=7ImMfLDn?xJStSP9FH6fFBk3v^JzRCg9_jTF)2`Z_A-D_m?Q?d>x248sNG7^;v;G zDDY`5NNsN({BH>S?ZS|JO$huE0Y4^$@bw+wE=+D)h9J zTwNg@paoa!ZL#iMk?8K0Xpi3B-KB19jO056!I(ESMH+5yX}9=PRF<`Lcg6LdSa(m^ z+H%{-(Xy(1nB#-e(AEZQ3B(POnXA5OP*OLu1vx3$_(w`23o zjVmLKA?_PkzOjZzWD~z8lrum+LUsq3{-@-V=WC_svqo66N?KOfaC$ndibtje^#zev z#;0Y?1##ELBR$=59qFYAbClK&8WA6hs92C9QOM}$i(Cs+nUeNiG?x^}g9wMI5a$O@MsttP;FrCpw94gA=QC&eB9o`m40#wiVR zeq{m1){Q}w1uW>AceM8gsYJxvBfQq|EW@&DXE()FlR6^;$W%AafUerrhUu_ZSufVX zw1Y3l^(TCwqIN=T-3d;=^O5jNu|J|l@O8K(e4p4CI`Plg;CB4gVxdm>BucPVPUe9( zW4}X9;!C-6vChv0zX9uW3BLgyiQkh4J}TgJv!AxTV8f^XK#+Xiu)#@x2|sCrSJ~i~ zi~S*;Y;C|L+cE){z70-im9jl% z<3s$V+!C?>bn*$<;C4RW7jQW)$>&`MT#omI*vAsSJr3Wt!R>ZlDE6y_zsaU&+y>Wd z@E_RV_Bec5z@2_^i~TL}x7*=T8+@aU{}lr6 z*#A52cB_Cp?Y}(_{=eJsH{0aCE8tE$e`v$E+qp<2zthfh1f2A-+u3V_+wHJ851*@S z_$zICUTuR{+2EhC!R>Ll$p*jKhQHMYx8rZK!R>j}VuKSt{kIRbS>im!>BogOxSjvq z0#0_Ooc80Qc8@qOBR-$SCF66O2ztV&`9-#W$^$Ay$(Zlp6cXMC=-z+CQi=u7_OYidq>sXysMv=U!TvorAR6J?m=6TFImXu^$CaCz=0?IZcnoI#CzM@_;@tw)7yLQTT` z{0X07#a+VXIk^DyFCL5JgY8a!zYUkH!_uAv|C|j^z9;xL8{Cd>_pcq_9uHD4;=}(H zOYriGJHc-keY*KYpT;F)okF+6Taer9~1cBa=_(xfPWTn(x3E^Uou{F;LGm-|KY%2K}w@} zM1s+4y5#qPMGm-3IOc%M?*O+5xYQ>|%+ct3D>cbqeh2ue11`SdiZpG8_h2_cqy!MLU|y^yqF~DdSx_5K_u^#N!HwmL*i)80VY!$i zJ&X9yq?yuXN@{*3avr_W0iHDs8j(%q{MZ~Z#~^c3(9zz-^S7|O$)aTVU|CQpyQ8H= z90Nw+h>j@O*$^scj`x4)0ASiE|L1kUsYy&v^a$hA&poYk;VJWd^nrynB4KShl(_Ku zh7}XsB&>u7=7mC*7)VfSODp#UII#o%4xDU`G9nCKm zA2fdIN_)t47r{xsn9k6%+KWQ2PXfeU(o67PqLWMh59x3Ut=zhC|I?9{q?ht({w0`G zzWl`fSAt&N{IVnNv^MAVqsh(Q?317kE|y1AkT`l5qNn-V-kkRM7rf;3b|m!Y2>F!` zxKqt<<9#ms9TWDGaX@~ey@b<#{|;C#{nzIizf=C(dFUGiz05zL@#Y-=DL`}S-$RFr zXmZm%kJATP^gdzmiS@z9s0L^2Y_e+!slFslVj&T|qy~p+JBN zOO7V#b-$1=@dcYHdinfh9{CSdSr~GhB%glMBl$`A zE1)O)7vOGhKNRwb#@?Lcp995n(H|A`QjX*y<;&+H(C6~sV3mcz1S09b6qlrzd_#HU zj|usuf?nPvy?kDkN4}@p!q_elN&Yfil3vPh0X@OoGzeSs3ByTz`O5j+F7%gmPU1;= znxE{AY)wtxV}jm-X8+bX9VcE%I*ECYpm(@mVS@e%v7eLpPW=ypK9~R6g#C8oJvFjl zDK1Gb?MF3TprDs+$7-zp-#cD9^`~d2?nLPeSoBAQz)#_k)}K!Kw5D)~;DpDB0PTm4 J3EV0F{{fEz8Mjz-({@5*4ufZC;|sLOk)a^vFThe zw%TmosovOV_ky%nT~zC;HgEJYOMj&ss#aHZ?Pl(%)->JF3~dQwFJip%@-b8T)5_fB z*!T|gB=TNC-zAg+(@;N$@+cvub&~!5Q|7Oft`K7t_?3+cWGWk#(P|9cXF*edKaL1V zVOH2_L=TF}kS*h94gzRZ*w`Ul1>{0uUbJS95U*kcty=>2Okut>!%v7)-z0w_PWw&x zEG+a)VSOmi3gEMa`4Yd6IN7(cug{2^Xp{dZ;w{99&%D4(RVRv=^>lr~ag&D2Q`_^C zHQh+8;68Ypw(mDXIFHLRM_&}tR>zBz)N+8f{QMB5-T{u~+lLu#+u05~otoxy-yT>o zRPegoN@LqglZMd4UZEz@4c?E!fCJg~r667JMG=p!HgxNbx;<8;AHFygxs-0j+;+WS zvk~QL;pJq3`l|v1@Jf!LZVZuh*S^U)L=!NO>ICzP^xH(UUCOJv+6J6H!a7 zyyj5>wN1zQmjVa*QhIUU#5e(uID2&dt_ctZ;dHaB8Jgq5N2894z{FEUo2kj5o7!!V zX`IU~(qhH9kgxTr(-^fL0U zBTnm$FpE$j9^n*E_AO8ZCdYq3g50khuLG}a{uGaChC*7%Q6vxML+M0fNx21(Z5%nRIfXHGesY{>~JC55mN~5Z&p{2CjjlKR*oQ>sO zmlPyGN?gg9pdO0EiJrL;F6oVmR?WRP&IpxKkf^vIwZXhMJ5x{AyD05Q_RRj~z5l#< zZ}#J}^u&>#K!ETTAcslkic{!!bjArfOaR(SlJ6z6$&=YBb>R;=ueN$$8h}Z)Io=VI5t~S@~O~2ug6LvY#8bo@m@uz*5&SzR#In%s# zY{`(-<}YgFwzT*&3a$3`Q}Kz!%mW=#wQs2!cFMK8Z(iPWiW(Ms~`l)aC>4W6mw?U65qs!)Yj8LS3QYB>t7A+()razD{=U(<$B(8`Z$<{P z({N^~$@r&qC0V{s)qqQs)FI1`bfXe=I$M@K($3`f8OOvl2Z z<}Ul4Kz507(t|)q95^kW%VWxiU-02qeE2mV9)g5=t-HsE7kv1N58u+` zt-r&EzX+V>hvkKy3y3}CV_)>)UjW}J)SXvoh4N0_IVX0hX6Sh>uiLtkt5%8j!Q^l* zZ;d*&DKWCre*79&d}1`ld)mlw)I>|b2Qia z59j|+fY16WjMQtn`oX+m>*b0y6f0IW3qr0K_MEBlkTR1~Q7txFG)$wWR~WW6TQ8>5 zaZ9V7NM+Nc%JN_tl>*S*(?^0(;eF8xztPKiUCI9VP<)R6@_YKhe zXqIRi6frlW@v|+`>pcAzIP)ipb#d0#r=hoNPG91qno-de+nBeBf{$gQ zOj{PAK&hLR@{CcrNR-^%?5tsei8*H*%F)T}AiZ~qLcNWWR;cN-23!5fy9l^q>D_Pi z1TbtXd_T@**E@ADaD3||*gWrKEE=>^ELnKZ;X3@iL_g%+?WcV6Wp ze&omDIT)apJFYjF|2#Kjy?g(sfqT`jgf}TPzAvmF`f`6>KfHIkau4hq=3j!b*!aFV h^NJnkzGv}Q*~U=rDfqe9N9MJDgniQ9btT5V{(mr@L?{3N literal 0 HcmV?d00001 diff --git a/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/build-release/CMakeFiles/cds-s.dir/src/topology_hpux.cpp.o b/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/build-release/CMakeFiles/cds-s.dir/src/topology_hpux.cpp.o new file mode 100644 index 0000000000000000000000000000000000000000..39b27b58dc6ac35f37ac6aad45e41e6a13352ee1 GIT binary patch literal 952 zcmbW0y-ve05XUcV`5F-m5=%N2Bx*U(MuGuFs!A8c))^%sq7o#Ih=bAvUZ=0e6TmsK z71vG&ILW@dpa18x?Br|yaNlzrQ0BlLw3(s+l?HCwnkF|iH-+3~@dtGwm{T+O72Orp+vz)!L zm-W*sFDnw~IRGivDGwv`{Jrrj74f9vK%PS$0*Q(ONX*hS$&h7?CuH%s92Y!{U+q4B zBxk4>cZDyoWx-a~Cff7WaEP;D<_!h)sy&(YBa|HC_;39s)`~qT4&4Ho_&UT`^;dqX zFF7?6sD#ehJfPj*O5U_g;VX^^TGQFWjLk>fZdYGOyW0|^5R zWq_?CVq)cUln=ni=)i(lk;*&!F121v2QISj?)UC@|NQcPee=njrU8ovkKoi}6yURd zYS$982A5$08V5fM$LN1LkMG98_rg)#>f(IiP&SD8^=!=JrQ-HdA;HUp z&yhY$;jHK2*GXQCp1TPEUH@>UUMr2FE>I$@Izlsz|&z z6AiE@v6W1p-0?@sV-RP$yEqsI{ovK4+3)$Iv2ce&Xzp#@bK6mAd#AFAV3lLaJ--*P zzs7)_*p9oQ=d?>^*9qHioX~5!f$xsO(DU6%8kC>Kx*s+DXJO!aQ51yrK;D}5Gir8Z zQJIc}H;w%-$mh>VDwpUSakLpXGWc|6iB%Px<3L1{LF}}oj>A-U*$TbBBVunHgJ?Uk z1ELj0kN{%j_j~(Ze*(e{1_RHRA$|~hVqax8Nv6Dvw&dOdosxfed)-DTlmfvK61I>1%KsF z>jz%eBq<62Qm!)*#i-(~)o1OAv=%@fBM||K%MwV!vndA}N$B1&G|Hh(22 zSRn5TUtq)7M$`qy^EL1kXUXgvV$G^M+4X0bvXA4x^_N&H_oO&XV=4$uh`#Qxb!IR# z_1z2%U)O1Lh>m@Bxi(RW3!SLcu9tD&S68e2FQxL1_Ham*cH9d7JN^BA)#ym&X|enN E0uco>`Tzg` literal 0 HcmV?d00001 diff --git a/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/build-release/CMakeFiles/cds-s.dir/src/urcu_gp.cpp.o b/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/build-release/CMakeFiles/cds-s.dir/src/urcu_gp.cpp.o new file mode 100644 index 0000000000000000000000000000000000000000..668398ec14ee97fd4833644311f82c8610af68a5 GIT binary patch literal 1264 zcmbtUO-sW-5S_Mu9t6D!Ug}8&T{g9i)Pn>{EQ0i)^y0ygCTm)3(~xYWUi2sFPt#wZ zvv!wkxAD}4FmK+>n;C}Pyt}v8)k+1_slXK!o?-z$s|9xcpaa{m0nqJqF6+nrK_2tG z-Xcvx&+Poqv)(M5&?d3y$vjYRP|Kz^Z5O4Mkm$Vz-#<$MBxpf|W;{w>`O1Dseplux9`ZEEy(l3;l7RQnI}gIFDY%P}aep+<8dj8e z*(i?2j89{46ldIz19sE9r(T0bEM}=c_V_Sme#k+zOII#v zwbg|4!MQOYknDBB{Q+8@&h1bk&So4)=yM;)Aj^OR)5(Oz$i-96NVnHN!(9Z=a-*x6 z1C)z9hBtTNI166km~Y__LxMAJmQzR;kf}eysJIjV>Gv1Z^IV-eMMu=6I*HhkzHRWT zzp^ciE3Bu!=#GZX+&zq$vy1ddK0#O03TWzuU-nvOQuigT*wH3_>iwv_Y$I3Je<&N7 IKMu41FF{$3ZvX%Q literal 0 HcmV?d00001 diff --git a/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/build-release/CMakeFiles/cds-s.dir/src/urcu_sh.cpp.o b/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/build-release/CMakeFiles/cds-s.dir/src/urcu_sh.cpp.o new file mode 100644 index 0000000000000000000000000000000000000000..f04058033f319ca73fb68509be2bfcd551638f5e GIT binary patch literal 1784 zcmbVMF>ljQ5WbK?LusoPF;pr;CIk|FvP&>&>s)@jU{5(ogH{7ctj5wwK75`fg8Q!nfL2jBIah-L2m|XlJp= z>T9gQj1?=(J@b`mF{8mu6#PD!i~v+iCnmfwbb(mKN@Y@oi)AoYnLYYE}+xa(uZ z_2I}|f9%F?mnT+h5cxb=>pMMu)C3uIx?H3~?uDJuQ#H(9wZuAogqIN1uXqfJ6Mt+BB5s4v_$uv8=?9Vjc|k;?=>6u^8} zxWL+J3OT^CI0}0_Is)bm`h6Z_N<0vpZSNke;22&HSkC5w(@ESuA8+@+MjeUtP3SMp zMvO$guaP(T#+ETaUxBR>q^`+N#*jXp8SSIq*^To){${tQEpLgqeC@yejt<7 zeu|^}CI-spSA4}&`pg|8XyYQjniI`j6yH*S)T)1_cMw;0G4(^yDcE}MbII5H`Y1Vi zU;6w{F|X|YHspb-d!qXzuHq|v5i$Mj5rRDls&gQWMDL$`y?4s{K|d%Pk}!+*WnB9H Pgq7`IlSTDEdVT-DmubLM literal 0 HcmV?d00001 diff --git a/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/build-release/CMakeFiles/cds.dir/CXX.includecache b/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/build-release/CMakeFiles/cds.dir/CXX.includecache new file mode 100644 index 0000000..e5872a2 --- /dev/null +++ b/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/build-release/CMakeFiles/cds.dir/CXX.includecache @@ -0,0 +1,888 @@ +#IncludeRegexLine: ^[ ]*#[ ]*(include|import)[ ]*[<"]([^">]+)([">]) + +#IncludeRegexScan: ^.*$ + +#IncludeRegexComplain: ^$ + +#IncludeRegexTransform: + +..//cds/algo/atomic.h +cds/details/defs.h +- +cds/user_setup/cache_line.h +- +boost/version.hpp +- +boost/atomic.hpp +- +cds/compiler/cxx11_atomic.h +- +atomic +- + +..//cds/algo/backoff_strategy.h +utility +- +thread +- +chrono +- +cds/compiler/backoff.h +- +cds/algo/backoff_strategy.h +- +cds/algo/backoff_strategy.h +- + +..//cds/algo/base.h +cds/details/defs.h +- + +..//cds/algo/bitop.h +cds/details/defs.h +- +cds/compiler/bitop.h +- + +..//cds/algo/elimination_tls.h +cds/algo/base.h +- + +..//cds/algo/int_algo.h +cds/algo/bitop.h +- + +..//cds/compiler/backoff.h +cds/details/defs.h +- +cds/compiler/vc/x86/backoff.h +- +cds/compiler/vc/amd64/backoff.h +- +cds/compiler/gcc/x86/backoff.h +- +cds/compiler/gcc/amd64/backoff.h +- +cds/compiler/gcc/ia64/backoff.h +- +cds/compiler/gcc/sparc/backoff.h +- +cds/compiler/gcc/ppc64/backoff.h +- +cds/compiler/gcc/arm7/backoff.h +- +cds/compiler/gcc/arm8/backoff.h +- + +..//cds/compiler/bitop.h +cds/compiler/vc/x86/bitop.h +- +cds/compiler/vc/amd64/bitop.h +- +cds/compiler/gcc/x86/bitop.h +- +cds/compiler/gcc/amd64/bitop.h +- +cds/compiler/gcc/sparc/bitop.h +- +cds/compiler/gcc/ia64/bitop.h +- +cds/compiler/gcc/ppc64/bitop.h +- +cds/details/bitop_generic.h +- + +..//cds/compiler/clang/defs.h +cds/compiler/gcc/compiler_macro.h +- +cds/compiler/gcc/compiler_barriers.h +- + +..//cds/compiler/cxx11_atomic.h +type_traits +- +cds/details/defs.h +- +cds/details/aligned_type.h +- +cds/compiler/vc/x86/cxx11_atomic.h +- +cds/compiler/vc/amd64/cxx11_atomic.h +- +cds/compiler/gcc/x86/cxx11_atomic.h +- +cds/compiler/gcc/amd64/cxx11_atomic.h +- +cds/compiler/gcc/ia64/cxx11_atomic.h +- +cds/compiler/gcc/sparc/cxx11_atomic.h +- +cds/compiler/gcc/ppc64/cxx11_atomic.h +- + +..//cds/compiler/defs.h +cds/compiler/vc/defs.h +- +cds/compiler/gcc/defs.h +- +cds/compiler/icl/defs.h +- +cds/compiler/clang/defs.h +- +cds/compiler/feature_tsan.h +- + +..//cds/compiler/feature_tsan.h + +..//cds/compiler/gcc/amd64/backoff.h + +..//cds/compiler/gcc/amd64/bitop.h + +..//cds/compiler/gcc/amd64/cxx11_atomic.h +cstdint +- +cds/compiler/gcc/x86/cxx11_atomic32.h +- + +..//cds/compiler/gcc/arm7/backoff.h + +..//cds/compiler/gcc/arm8/backoff.h + +..//cds/compiler/gcc/compiler_barriers.h + +..//cds/compiler/gcc/compiler_macro.h + +..//cds/compiler/gcc/defs.h +cds/compiler/gcc/compiler_macro.h +- +cds/compiler/gcc/compiler_barriers.h +- + +..//cds/compiler/gcc/ia64/backoff.h + +..//cds/compiler/gcc/ia64/bitop.h + +..//cds/compiler/gcc/ia64/cxx11_atomic.h +cstdint +- + +..//cds/compiler/gcc/ppc64/backoff.h + +..//cds/compiler/gcc/ppc64/bitop.h + +..//cds/compiler/gcc/sparc/backoff.h + +..//cds/compiler/gcc/sparc/bitop.h + +..//cds/compiler/gcc/sparc/cxx11_atomic.h +cstdint +- + +..//cds/compiler/gcc/x86/backoff.h + +..//cds/compiler/gcc/x86/bitop.h + +..//cds/compiler/gcc/x86/cxx11_atomic.h +cstdint +- +cds/compiler/gcc/x86/cxx11_atomic32.h +- + +..//cds/compiler/gcc/x86/cxx11_atomic32.h +cstdint +- +cds/details/is_aligned.h +- + +..//cds/compiler/icl/compiler_barriers.h +intrin.h +- +atomic +- + +..//cds/compiler/icl/defs.h +cds/compiler/icl/compiler_barriers.h +- + +..//cds/compiler/vc/amd64/backoff.h +intrin.h +- + +..//cds/compiler/vc/amd64/bitop.h +intrin.h +- +intrin.h +- + +..//cds/compiler/vc/amd64/cxx11_atomic.h +intrin.h +- +emmintrin.h +- +cds/details/is_aligned.h +- + +..//cds/compiler/vc/compiler_barriers.h +intrin.h +- +atomic +- + +..//cds/compiler/vc/defs.h +stdlib.h +- +crtdbg.h +- +cds/compiler/vc/compiler_barriers.h +- + +..//cds/compiler/vc/x86/backoff.h +intrin.h +- + +..//cds/compiler/vc/x86/bitop.h +intrin.h +- + +..//cds/compiler/vc/x86/cxx11_atomic.h +intrin.h +- +emmintrin.h +- +cds/details/is_aligned.h +- + +..//cds/container/details/base.h +cds/intrusive/details/base.h +- + +..//cds/container/vyukov_mpmc_cycle_queue.h +cds/container/details/base.h +- +cds/opt/buffer.h +- +cds/opt/value_cleaner.h +- +cds/algo/atomic.h +- +cds/details/bounded_container.h +- + +..//cds/details/aligned_type.h +cds/details/defs.h +- + +..//cds/details/allocator.h +type_traits +- +memory +- +cds/details/defs.h +- +cds/user_setup/allocator.h +- + +..//cds/details/bitop_generic.h +cstdlib +- + +..//cds/details/bounded_container.h + +..//cds/details/defs.h +stddef.h +- +stdlib.h +- +assert.h +- +cstdint +- +exception +- +stdexcept +- +string +- +memory +- +cds/version.h +- +cds/init.h +- +cds/gc/hp.h +- +cds/gc/hp.h +- +cds/gc/hp.h +- +cds/compiler/defs.h +- + +..//cds/details/is_aligned.h +cds/details/defs.h +- + +..//cds/details/lib.h +cds/details/defs.h +- + +..//cds/details/marked_ptr.h +cds/algo/atomic.h +- + +..//cds/details/static_functor.h + +..//cds/details/throw_exception.h +cds/details/defs.h +- +stdio.h +- + +..//cds/gc/details/hp_common.h +cds/algo/atomic.h +- +cds/gc/details/retired_ptr.h +- + +..//cds/gc/details/retired_ptr.h +cds/details/defs.h +- +cds/details/static_functor.h +- + +..//cds/gc/dhp.h +exception +- +cds/gc/details/hp_common.h +- +cds/details/lib.h +- +cds/threading/model.h +- +cds/intrusive/free_list_selector.h +- +cds/details/throw_exception.h +- +cds/details/static_functor.h +- +cds/details/marked_ptr.h +- +cds/user_setup/cache_line.h +- + +..//cds/gc/hp.h +exception +- +cds/gc/details/hp_common.h +- +cds/details/lib.h +- +cds/threading/model.h +- +cds/details/throw_exception.h +- +cds/details/static_functor.h +- +cds/details/marked_ptr.h +- +cds/user_setup/cache_line.h +- + +..//cds/init.h +cds/details/defs.h +- +cds/os/topology.h +- +cds/threading/model.h +- +cds/details/lib.h +- + +..//cds/intrusive/details/base.h +cds/intrusive/details/node_traits.h +- +cds/details/allocator.h +- +cds/algo/backoff_strategy.h +- + +..//cds/intrusive/details/node_traits.h +cds/intrusive/options.h +- + +..//cds/intrusive/free_list.h +cds/algo/atomic.h +- +cds/intrusive/free_list.h +- + +..//cds/intrusive/free_list_selector.h +cds/details/defs.h +- +cds/intrusive/free_list_tagged.h +- +cds/intrusive/free_list.h +- + +..//cds/intrusive/free_list_tagged.h +cds/algo/atomic.h +- +cds/intrusive/free_list_tagged.h +- + +..//cds/intrusive/options.h +cds/opt/options.h +- +cds/details/allocator.h +- + +..//cds/opt/buffer.h +memory.h +- +cds/details/defs.h +- +cds/user_setup/allocator.h +- +cds/details/allocator.h +- +cds/algo/int_algo.h +- + +..//cds/opt/options.h +cstdlib +- +cds/details/aligned_type.h +- +cds/user_setup/allocator.h +- +cds/user_setup/cache_line.h +- +cds/algo/atomic.h +- +cds/opt/options.h +- +type_traits +- +cds/opt/options.h +- + +..//cds/opt/value_cleaner.h +cds/details/defs.h +- + +..//cds/os/aix/alloc_aligned.h +cds/os/posix/alloc_aligned.h +- + +..//cds/os/aix/topology.h +cds/os/details/fake_topology.h +- +unistd.h +- +sys/processor.h +- + +..//cds/os/alloc_aligned.h +cds/details/defs.h +- +cds/os/win/alloc_aligned.h +- +cds/os/linux/alloc_aligned.h +- +cds/os/sunos/alloc_aligned.h +- +cds/os/hpux/alloc_aligned.h +- +cds/os/aix/alloc_aligned.h +- +cds/os/free_bsd/alloc_aligned.h +- +cds/os/posix/alloc_aligned.h +- +memory +- +cds/details/is_aligned.h +- +cds/algo/int_algo.h +- +cds/details/throw_exception.h +- + +..//cds/os/details/fake_topology.h +cds/details/defs.h +- +cds/threading/model.h +- + +..//cds/os/free_bsd/alloc_aligned.h +cds/os/posix/alloc_aligned.h +- + +..//cds/os/free_bsd/topology.h +cds/os/details/fake_topology.h +- +sys/types.h +- +sys/sysctl.h +- + +..//cds/os/hpux/alloc_aligned.h +cds/os/libc/alloc_aligned.h +- + +..//cds/os/hpux/topology.h +sys/mpctl.h +- + +..//cds/os/libc/alloc_aligned.h +stdlib.h +- + +..//cds/os/linux/alloc_aligned.h +cds/os/libc/alloc_aligned.h +- +cds/os/posix/alloc_aligned.h +- + +..//cds/os/linux/topology.h +cds/details/defs.h +- +cds/threading/model.h +- +sys/syscall.h +- +sched.h +- + +..//cds/os/osx/topology.h +cds/os/details/fake_topology.h +- + +..//cds/os/posix/alloc_aligned.h +stdlib.h +- + +..//cds/os/posix/fake_topology.h +cds/os/details/fake_topology.h +- +unistd.h +- + +..//cds/os/posix/thread.h +pthread.h +- +signal.h +- + +..//cds/os/sunos/alloc_aligned.h +cds/os/libc/alloc_aligned.h +- + +..//cds/os/sunos/topology.h +sys/processor.h +- +unistd.h +- + +..//cds/os/thread.h +thread +- +cds/details/defs.h +- +cds/os/win/thread.h +- +cds/os/posix/thread.h +- + +..//cds/os/topology.h +cds/details/defs.h +- +cds/os/win/topology.h +- +cds/os/linux/topology.h +- +cds/os/sunos/topology.h +- +cds/os/hpux/topology.h +- +cds/os/aix/topology.h +- +cds/os/free_bsd/topology.h +- +cds/os/osx/topology.h +- +cds/os/posix/fake_topology.h +- + +..//cds/os/win/alloc_aligned.h +malloc.h +- + +..//cds/os/win/thread.h +windows.h +- + +..//cds/os/win/topology.h +cds/details/defs.h +- +windows.h +- + +..//cds/threading/details/_common.h +cds/urcu/details/gp_decl.h +- +cds/urcu/details/sh_decl.h +- +cds/algo/elimination_tls.h +- + +..//cds/threading/details/auto_detect.h +cds/threading/details/msvc.h +- +cds/threading/details/wintls.h +- +cds/threading/details/pthread.h +- +cds/threading/details/gcc.h +- +cds/threading/details/cxx11.h +- + +..//cds/threading/details/cxx11.h +cds/threading/details/cxx11_manager.h +- + +..//cds/threading/details/cxx11_manager.h +cds/threading/details/_common.h +- + +..//cds/threading/details/gcc.h +cds/threading/details/gcc_manager.h +- + +..//cds/threading/details/gcc_manager.h +cds/threading/details/_common.h +- + +..//cds/threading/details/msvc.h +cds/threading/details/msvc_manager.h +- + +..//cds/threading/details/msvc_manager.h +cds/threading/details/_common.h +- + +..//cds/threading/details/pthread.h +cds/threading/details/pthread_manager.h +- + +..//cds/threading/details/pthread_manager.h +system_error +- +stdio.h +- +pthread.h +- +cds/threading/details/_common.h +- +cds/details/throw_exception.h +- + +..//cds/threading/details/wintls.h +stdio.h +- +cds/threading/details/wintls_manager.h +- + +..//cds/threading/details/wintls_manager.h +system_error +- +stdio.h +- +cds/threading/details/_common.h +- +cds/details/throw_exception.h +- + +..//cds/threading/model.h +cds/threading/details/_common.h +- +cds/user_setup/threading.h +- +cds/threading/details/auto_detect.h +- + +..//cds/urcu/details/base.h +cds/algo/atomic.h +- +cds/gc/details/retired_ptr.h +- +cds/details/allocator.h +- +cds/os/thread.h +- +cds/details/marked_ptr.h +- +cds/urcu/general_buffered.h +- +cds/urcu/general_buffered.h +- + +..//cds/urcu/details/gp.h +cds/urcu/details/gp_decl.h +- +cds/threading/model.h +- + +..//cds/urcu/details/gp_decl.h +cds/urcu/details/base.h +- +cds/details/static_functor.h +- +cds/details/lib.h +- +cds/user_setup/cache_line.h +- + +..//cds/urcu/details/gpb.h +mutex +- +limits +- +cds/urcu/details/gp.h +- +cds/algo/backoff_strategy.h +- +cds/container/vyukov_mpmc_cycle_queue.h +- + +..//cds/urcu/details/sh.h +memory.h +- +cds/urcu/details/sh_decl.h +- +cds/threading/model.h +- + +..//cds/urcu/details/sh_decl.h +cds/urcu/details/base.h +- +cds/details/static_functor.h +- +cds/details/lib.h +- +cds/user_setup/cache_line.h +- +signal.h +- + +..//cds/urcu/general_buffered.h +cds/urcu/details/gpb.h +- + +..//cds/user_setup/allocator.h +memory +- +cds/os/alloc_aligned.h +- + +..//cds/user_setup/cache_line.h + +..//cds/user_setup/threading.h + +..//cds/version.h + +/home/ahmad/Downloads/benchmark-fuzzying-tool/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/src/dhp.cpp +algorithm +- +vector +- +cds/gc/dhp.h +- +cds/os/thread.h +- + +/home/ahmad/Downloads/benchmark-fuzzying-tool/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/src/dllmain.cpp +cds/details/defs.h +- +cds/os/thread.h +- +vld.h +- +cds/os/topology.h +- +cds/algo/bitop.h +- +cds/os/win/topology.h +- + +/home/ahmad/Downloads/benchmark-fuzzying-tool/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/src/hp.cpp +algorithm +- +vector +- +cds/gc/hp.h +- +cds/os/thread.h +- + +/home/ahmad/Downloads/benchmark-fuzzying-tool/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/src/init.cpp +cds/init.h +- +cds/algo/atomic.h +- +cds/algo/backoff_strategy.h +- +cds/threading/details/msvc_manager.h +- +cds/threading/details/wintls_manager.h +- +cds/threading/details/gcc_manager.h +- +cds/threading/details/pthread_manager.h +- +cds/threading/details/cxx11_manager.h +- + +/home/ahmad/Downloads/benchmark-fuzzying-tool/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/src/thread_data.cpp +cds/threading/details/_common.h +- +cds/gc/hp.h +- +cds/gc/dhp.h +- + +/home/ahmad/Downloads/benchmark-fuzzying-tool/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/src/topology_hpux.cpp +cds/os/topology.h +- +cds/algo/atomic.h +- +limits +- + +/home/ahmad/Downloads/benchmark-fuzzying-tool/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/src/topology_linux.cpp +cds/os/topology.h +- +thread +- +unistd.h +- +fstream +- + +/home/ahmad/Downloads/benchmark-fuzzying-tool/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/src/topology_osx.cpp +cds/os/topology.h +- +sys/types.h +- +sys/sysctl.h +- + +/home/ahmad/Downloads/benchmark-fuzzying-tool/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/src/urcu_gp.cpp +cds/urcu/details/gp.h +- + +/home/ahmad/Downloads/benchmark-fuzzying-tool/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/src/urcu_sh.cpp +cds/urcu/details/sh.h +- + diff --git a/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/build-release/CMakeFiles/cds.dir/DependInfo.cmake b/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/build-release/CMakeFiles/cds.dir/DependInfo.cmake new file mode 100644 index 0000000..31accaf --- /dev/null +++ b/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/build-release/CMakeFiles/cds.dir/DependInfo.cmake @@ -0,0 +1,37 @@ +# The set of languages for which implicit dependencies are needed: +set(CMAKE_DEPENDS_LANGUAGES + "CXX" + ) +# The set of files for implicit dependencies of each language: +set(CMAKE_DEPENDS_CHECK_CXX + "/home/ahmad/Downloads/benchmark-fuzzying-tool/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/src/dhp.cpp" "/home/ahmad/Downloads/benchmark-fuzzying-tool/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/build-release/CMakeFiles/cds.dir/src/dhp.cpp.o" + "/home/ahmad/Downloads/benchmark-fuzzying-tool/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/src/dllmain.cpp" "/home/ahmad/Downloads/benchmark-fuzzying-tool/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/build-release/CMakeFiles/cds.dir/src/dllmain.cpp.o" + "/home/ahmad/Downloads/benchmark-fuzzying-tool/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/src/hp.cpp" "/home/ahmad/Downloads/benchmark-fuzzying-tool/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/build-release/CMakeFiles/cds.dir/src/hp.cpp.o" + "/home/ahmad/Downloads/benchmark-fuzzying-tool/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/src/init.cpp" "/home/ahmad/Downloads/benchmark-fuzzying-tool/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/build-release/CMakeFiles/cds.dir/src/init.cpp.o" + "/home/ahmad/Downloads/benchmark-fuzzying-tool/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/src/thread_data.cpp" "/home/ahmad/Downloads/benchmark-fuzzying-tool/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/build-release/CMakeFiles/cds.dir/src/thread_data.cpp.o" + "/home/ahmad/Downloads/benchmark-fuzzying-tool/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/src/topology_hpux.cpp" "/home/ahmad/Downloads/benchmark-fuzzying-tool/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/build-release/CMakeFiles/cds.dir/src/topology_hpux.cpp.o" + "/home/ahmad/Downloads/benchmark-fuzzying-tool/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/src/topology_linux.cpp" "/home/ahmad/Downloads/benchmark-fuzzying-tool/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/build-release/CMakeFiles/cds.dir/src/topology_linux.cpp.o" + "/home/ahmad/Downloads/benchmark-fuzzying-tool/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/src/topology_osx.cpp" "/home/ahmad/Downloads/benchmark-fuzzying-tool/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/build-release/CMakeFiles/cds.dir/src/topology_osx.cpp.o" + "/home/ahmad/Downloads/benchmark-fuzzying-tool/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/src/urcu_gp.cpp" "/home/ahmad/Downloads/benchmark-fuzzying-tool/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/build-release/CMakeFiles/cds.dir/src/urcu_gp.cpp.o" + "/home/ahmad/Downloads/benchmark-fuzzying-tool/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/src/urcu_sh.cpp" "/home/ahmad/Downloads/benchmark-fuzzying-tool/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/build-release/CMakeFiles/cds.dir/src/urcu_sh.cpp.o" + ) +set(CMAKE_CXX_COMPILER_ID "GNU") + +# The include file search paths: +set(CMAKE_CXX_TARGET_INCLUDE_PATH + "." + "../" + ) + +# Pairs of files generated by the same build rule. +set(CMAKE_MULTIPLE_OUTPUT_PAIRS + "/home/ahmad/Downloads/benchmark-fuzzying-tool/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/build-release/bin/libcds.so" "/home/ahmad/Downloads/benchmark-fuzzying-tool/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/build-release/bin/libcds.so.2.3.2" + ) + + +# Targets to which this target links. +set(CMAKE_TARGET_LINKED_INFO_FILES + ) + +# Fortran module output directory. +set(CMAKE_Fortran_TARGET_MODULE_DIR "") diff --git a/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/build-release/CMakeFiles/cds.dir/build.make b/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/build-release/CMakeFiles/cds.dir/build.make new file mode 100644 index 0000000..b1ebf22 --- /dev/null +++ b/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/build-release/CMakeFiles/cds.dir/build.make @@ -0,0 +1,360 @@ +# CMAKE generated file: DO NOT EDIT! +# Generated by "Unix Makefiles" Generator, CMake Version 3.5 + +# Delete rule output on recipe failure. +.DELETE_ON_ERROR: + + +#============================================================================= +# Special targets provided by cmake. + +# Disable implicit rules so canonical targets will work. +.SUFFIXES: + + +# Remove some rules from gmake that .SUFFIXES does not remove. +SUFFIXES = + +.SUFFIXES: .hpux_make_needs_suffix_list + + +# Suppress display of executed commands. +$(VERBOSE).SILENT: + + +# A target that is always out of date. +cmake_force: + +.PHONY : cmake_force + +#============================================================================= +# Set environment variables for the build. + +# The shell in which to execute make rules. +SHELL = /bin/sh + +# The CMake executable. +CMAKE_COMMAND = /usr/bin/cmake + +# The command to remove a file. +RM = /usr/bin/cmake -E remove -f + +# Escaping for special characters. +EQUALS = = + +# The top-level source directory on which CMake was run. +CMAKE_SOURCE_DIR = /home/ahmad/Downloads/benchmark-fuzzying-tool/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2 + +# The top-level build directory on which CMake was run. +CMAKE_BINARY_DIR = /home/ahmad/Downloads/benchmark-fuzzying-tool/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/build-release + +# Include any dependencies generated for this target. +include CMakeFiles/cds.dir/depend.make + +# Include the progress variables for this target. +include CMakeFiles/cds.dir/progress.make + +# Include the compile flags for this target's objects. +include CMakeFiles/cds.dir/flags.make + +CMakeFiles/cds.dir/src/init.cpp.o: CMakeFiles/cds.dir/flags.make +CMakeFiles/cds.dir/src/init.cpp.o: ../src/init.cpp + @$(CMAKE_COMMAND) -E cmake_echo_color --switch=$(COLOR) --green --progress-dir=/home/ahmad/Downloads/benchmark-fuzzying-tool/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/build-release/CMakeFiles --progress-num=$(CMAKE_PROGRESS_1) "Building CXX object CMakeFiles/cds.dir/src/init.cpp.o" + /usr/bin/c++ $(CXX_DEFINES) $(CXX_INCLUDES) $(CXX_FLAGS) -o CMakeFiles/cds.dir/src/init.cpp.o -c /home/ahmad/Downloads/benchmark-fuzzying-tool/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/src/init.cpp + +CMakeFiles/cds.dir/src/init.cpp.i: cmake_force + @$(CMAKE_COMMAND) -E cmake_echo_color --switch=$(COLOR) --green "Preprocessing CXX source to CMakeFiles/cds.dir/src/init.cpp.i" + /usr/bin/c++ $(CXX_DEFINES) $(CXX_INCLUDES) $(CXX_FLAGS) -E /home/ahmad/Downloads/benchmark-fuzzying-tool/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/src/init.cpp > CMakeFiles/cds.dir/src/init.cpp.i + +CMakeFiles/cds.dir/src/init.cpp.s: cmake_force + @$(CMAKE_COMMAND) -E cmake_echo_color --switch=$(COLOR) --green "Compiling CXX source to assembly CMakeFiles/cds.dir/src/init.cpp.s" + /usr/bin/c++ $(CXX_DEFINES) $(CXX_INCLUDES) $(CXX_FLAGS) -S /home/ahmad/Downloads/benchmark-fuzzying-tool/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/src/init.cpp -o CMakeFiles/cds.dir/src/init.cpp.s + +CMakeFiles/cds.dir/src/init.cpp.o.requires: + +.PHONY : CMakeFiles/cds.dir/src/init.cpp.o.requires + +CMakeFiles/cds.dir/src/init.cpp.o.provides: CMakeFiles/cds.dir/src/init.cpp.o.requires + $(MAKE) -f CMakeFiles/cds.dir/build.make CMakeFiles/cds.dir/src/init.cpp.o.provides.build +.PHONY : CMakeFiles/cds.dir/src/init.cpp.o.provides + +CMakeFiles/cds.dir/src/init.cpp.o.provides.build: CMakeFiles/cds.dir/src/init.cpp.o + + +CMakeFiles/cds.dir/src/hp.cpp.o: CMakeFiles/cds.dir/flags.make +CMakeFiles/cds.dir/src/hp.cpp.o: ../src/hp.cpp + @$(CMAKE_COMMAND) -E cmake_echo_color --switch=$(COLOR) --green --progress-dir=/home/ahmad/Downloads/benchmark-fuzzying-tool/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/build-release/CMakeFiles --progress-num=$(CMAKE_PROGRESS_2) "Building CXX object CMakeFiles/cds.dir/src/hp.cpp.o" + /usr/bin/c++ $(CXX_DEFINES) $(CXX_INCLUDES) $(CXX_FLAGS) -o CMakeFiles/cds.dir/src/hp.cpp.o -c /home/ahmad/Downloads/benchmark-fuzzying-tool/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/src/hp.cpp + +CMakeFiles/cds.dir/src/hp.cpp.i: cmake_force + @$(CMAKE_COMMAND) -E cmake_echo_color --switch=$(COLOR) --green "Preprocessing CXX source to CMakeFiles/cds.dir/src/hp.cpp.i" + /usr/bin/c++ $(CXX_DEFINES) $(CXX_INCLUDES) $(CXX_FLAGS) -E /home/ahmad/Downloads/benchmark-fuzzying-tool/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/src/hp.cpp > CMakeFiles/cds.dir/src/hp.cpp.i + +CMakeFiles/cds.dir/src/hp.cpp.s: cmake_force + @$(CMAKE_COMMAND) -E cmake_echo_color --switch=$(COLOR) --green "Compiling CXX source to assembly CMakeFiles/cds.dir/src/hp.cpp.s" + /usr/bin/c++ $(CXX_DEFINES) $(CXX_INCLUDES) $(CXX_FLAGS) -S /home/ahmad/Downloads/benchmark-fuzzying-tool/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/src/hp.cpp -o CMakeFiles/cds.dir/src/hp.cpp.s + +CMakeFiles/cds.dir/src/hp.cpp.o.requires: + +.PHONY : CMakeFiles/cds.dir/src/hp.cpp.o.requires + +CMakeFiles/cds.dir/src/hp.cpp.o.provides: CMakeFiles/cds.dir/src/hp.cpp.o.requires + $(MAKE) -f CMakeFiles/cds.dir/build.make CMakeFiles/cds.dir/src/hp.cpp.o.provides.build +.PHONY : CMakeFiles/cds.dir/src/hp.cpp.o.provides + +CMakeFiles/cds.dir/src/hp.cpp.o.provides.build: CMakeFiles/cds.dir/src/hp.cpp.o + + +CMakeFiles/cds.dir/src/dhp.cpp.o: CMakeFiles/cds.dir/flags.make +CMakeFiles/cds.dir/src/dhp.cpp.o: ../src/dhp.cpp + @$(CMAKE_COMMAND) -E cmake_echo_color --switch=$(COLOR) --green --progress-dir=/home/ahmad/Downloads/benchmark-fuzzying-tool/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/build-release/CMakeFiles --progress-num=$(CMAKE_PROGRESS_3) "Building CXX object CMakeFiles/cds.dir/src/dhp.cpp.o" + /usr/bin/c++ $(CXX_DEFINES) $(CXX_INCLUDES) $(CXX_FLAGS) -o CMakeFiles/cds.dir/src/dhp.cpp.o -c /home/ahmad/Downloads/benchmark-fuzzying-tool/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/src/dhp.cpp + +CMakeFiles/cds.dir/src/dhp.cpp.i: cmake_force + @$(CMAKE_COMMAND) -E cmake_echo_color --switch=$(COLOR) --green "Preprocessing CXX source to CMakeFiles/cds.dir/src/dhp.cpp.i" + /usr/bin/c++ $(CXX_DEFINES) $(CXX_INCLUDES) $(CXX_FLAGS) -E /home/ahmad/Downloads/benchmark-fuzzying-tool/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/src/dhp.cpp > CMakeFiles/cds.dir/src/dhp.cpp.i + +CMakeFiles/cds.dir/src/dhp.cpp.s: cmake_force + @$(CMAKE_COMMAND) -E cmake_echo_color --switch=$(COLOR) --green "Compiling CXX source to assembly CMakeFiles/cds.dir/src/dhp.cpp.s" + /usr/bin/c++ $(CXX_DEFINES) $(CXX_INCLUDES) $(CXX_FLAGS) -S /home/ahmad/Downloads/benchmark-fuzzying-tool/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/src/dhp.cpp -o CMakeFiles/cds.dir/src/dhp.cpp.s + +CMakeFiles/cds.dir/src/dhp.cpp.o.requires: + +.PHONY : CMakeFiles/cds.dir/src/dhp.cpp.o.requires + +CMakeFiles/cds.dir/src/dhp.cpp.o.provides: CMakeFiles/cds.dir/src/dhp.cpp.o.requires + $(MAKE) -f CMakeFiles/cds.dir/build.make CMakeFiles/cds.dir/src/dhp.cpp.o.provides.build +.PHONY : CMakeFiles/cds.dir/src/dhp.cpp.o.provides + +CMakeFiles/cds.dir/src/dhp.cpp.o.provides.build: CMakeFiles/cds.dir/src/dhp.cpp.o + + +CMakeFiles/cds.dir/src/urcu_gp.cpp.o: CMakeFiles/cds.dir/flags.make +CMakeFiles/cds.dir/src/urcu_gp.cpp.o: ../src/urcu_gp.cpp + @$(CMAKE_COMMAND) -E cmake_echo_color --switch=$(COLOR) --green --progress-dir=/home/ahmad/Downloads/benchmark-fuzzying-tool/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/build-release/CMakeFiles --progress-num=$(CMAKE_PROGRESS_4) "Building CXX object CMakeFiles/cds.dir/src/urcu_gp.cpp.o" + /usr/bin/c++ $(CXX_DEFINES) $(CXX_INCLUDES) $(CXX_FLAGS) -o CMakeFiles/cds.dir/src/urcu_gp.cpp.o -c /home/ahmad/Downloads/benchmark-fuzzying-tool/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/src/urcu_gp.cpp + +CMakeFiles/cds.dir/src/urcu_gp.cpp.i: cmake_force + @$(CMAKE_COMMAND) -E cmake_echo_color --switch=$(COLOR) --green "Preprocessing CXX source to CMakeFiles/cds.dir/src/urcu_gp.cpp.i" + /usr/bin/c++ $(CXX_DEFINES) $(CXX_INCLUDES) $(CXX_FLAGS) -E /home/ahmad/Downloads/benchmark-fuzzying-tool/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/src/urcu_gp.cpp > CMakeFiles/cds.dir/src/urcu_gp.cpp.i + +CMakeFiles/cds.dir/src/urcu_gp.cpp.s: cmake_force + @$(CMAKE_COMMAND) -E cmake_echo_color --switch=$(COLOR) --green "Compiling CXX source to assembly CMakeFiles/cds.dir/src/urcu_gp.cpp.s" + /usr/bin/c++ $(CXX_DEFINES) $(CXX_INCLUDES) $(CXX_FLAGS) -S /home/ahmad/Downloads/benchmark-fuzzying-tool/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/src/urcu_gp.cpp -o CMakeFiles/cds.dir/src/urcu_gp.cpp.s + +CMakeFiles/cds.dir/src/urcu_gp.cpp.o.requires: + +.PHONY : CMakeFiles/cds.dir/src/urcu_gp.cpp.o.requires + +CMakeFiles/cds.dir/src/urcu_gp.cpp.o.provides: CMakeFiles/cds.dir/src/urcu_gp.cpp.o.requires + $(MAKE) -f CMakeFiles/cds.dir/build.make CMakeFiles/cds.dir/src/urcu_gp.cpp.o.provides.build +.PHONY : CMakeFiles/cds.dir/src/urcu_gp.cpp.o.provides + +CMakeFiles/cds.dir/src/urcu_gp.cpp.o.provides.build: CMakeFiles/cds.dir/src/urcu_gp.cpp.o + + +CMakeFiles/cds.dir/src/urcu_sh.cpp.o: CMakeFiles/cds.dir/flags.make +CMakeFiles/cds.dir/src/urcu_sh.cpp.o: ../src/urcu_sh.cpp + @$(CMAKE_COMMAND) -E cmake_echo_color --switch=$(COLOR) --green --progress-dir=/home/ahmad/Downloads/benchmark-fuzzying-tool/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/build-release/CMakeFiles --progress-num=$(CMAKE_PROGRESS_5) "Building CXX object CMakeFiles/cds.dir/src/urcu_sh.cpp.o" + /usr/bin/c++ $(CXX_DEFINES) $(CXX_INCLUDES) $(CXX_FLAGS) -o CMakeFiles/cds.dir/src/urcu_sh.cpp.o -c /home/ahmad/Downloads/benchmark-fuzzying-tool/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/src/urcu_sh.cpp + +CMakeFiles/cds.dir/src/urcu_sh.cpp.i: cmake_force + @$(CMAKE_COMMAND) -E cmake_echo_color --switch=$(COLOR) --green "Preprocessing CXX source to CMakeFiles/cds.dir/src/urcu_sh.cpp.i" + /usr/bin/c++ $(CXX_DEFINES) $(CXX_INCLUDES) $(CXX_FLAGS) -E /home/ahmad/Downloads/benchmark-fuzzying-tool/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/src/urcu_sh.cpp > CMakeFiles/cds.dir/src/urcu_sh.cpp.i + +CMakeFiles/cds.dir/src/urcu_sh.cpp.s: cmake_force + @$(CMAKE_COMMAND) -E cmake_echo_color --switch=$(COLOR) --green "Compiling CXX source to assembly CMakeFiles/cds.dir/src/urcu_sh.cpp.s" + /usr/bin/c++ $(CXX_DEFINES) $(CXX_INCLUDES) $(CXX_FLAGS) -S /home/ahmad/Downloads/benchmark-fuzzying-tool/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/src/urcu_sh.cpp -o CMakeFiles/cds.dir/src/urcu_sh.cpp.s + +CMakeFiles/cds.dir/src/urcu_sh.cpp.o.requires: + +.PHONY : CMakeFiles/cds.dir/src/urcu_sh.cpp.o.requires + +CMakeFiles/cds.dir/src/urcu_sh.cpp.o.provides: CMakeFiles/cds.dir/src/urcu_sh.cpp.o.requires + $(MAKE) -f CMakeFiles/cds.dir/build.make CMakeFiles/cds.dir/src/urcu_sh.cpp.o.provides.build +.PHONY : CMakeFiles/cds.dir/src/urcu_sh.cpp.o.provides + +CMakeFiles/cds.dir/src/urcu_sh.cpp.o.provides.build: CMakeFiles/cds.dir/src/urcu_sh.cpp.o + + +CMakeFiles/cds.dir/src/thread_data.cpp.o: CMakeFiles/cds.dir/flags.make +CMakeFiles/cds.dir/src/thread_data.cpp.o: ../src/thread_data.cpp + @$(CMAKE_COMMAND) -E cmake_echo_color --switch=$(COLOR) --green --progress-dir=/home/ahmad/Downloads/benchmark-fuzzying-tool/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/build-release/CMakeFiles --progress-num=$(CMAKE_PROGRESS_6) "Building CXX object CMakeFiles/cds.dir/src/thread_data.cpp.o" + /usr/bin/c++ $(CXX_DEFINES) $(CXX_INCLUDES) $(CXX_FLAGS) -o CMakeFiles/cds.dir/src/thread_data.cpp.o -c /home/ahmad/Downloads/benchmark-fuzzying-tool/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/src/thread_data.cpp + +CMakeFiles/cds.dir/src/thread_data.cpp.i: cmake_force + @$(CMAKE_COMMAND) -E cmake_echo_color --switch=$(COLOR) --green "Preprocessing CXX source to CMakeFiles/cds.dir/src/thread_data.cpp.i" + /usr/bin/c++ $(CXX_DEFINES) $(CXX_INCLUDES) $(CXX_FLAGS) -E /home/ahmad/Downloads/benchmark-fuzzying-tool/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/src/thread_data.cpp > CMakeFiles/cds.dir/src/thread_data.cpp.i + +CMakeFiles/cds.dir/src/thread_data.cpp.s: cmake_force + @$(CMAKE_COMMAND) -E cmake_echo_color --switch=$(COLOR) --green "Compiling CXX source to assembly CMakeFiles/cds.dir/src/thread_data.cpp.s" + /usr/bin/c++ $(CXX_DEFINES) $(CXX_INCLUDES) $(CXX_FLAGS) -S /home/ahmad/Downloads/benchmark-fuzzying-tool/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/src/thread_data.cpp -o CMakeFiles/cds.dir/src/thread_data.cpp.s + +CMakeFiles/cds.dir/src/thread_data.cpp.o.requires: + +.PHONY : CMakeFiles/cds.dir/src/thread_data.cpp.o.requires + +CMakeFiles/cds.dir/src/thread_data.cpp.o.provides: CMakeFiles/cds.dir/src/thread_data.cpp.o.requires + $(MAKE) -f CMakeFiles/cds.dir/build.make CMakeFiles/cds.dir/src/thread_data.cpp.o.provides.build +.PHONY : CMakeFiles/cds.dir/src/thread_data.cpp.o.provides + +CMakeFiles/cds.dir/src/thread_data.cpp.o.provides.build: CMakeFiles/cds.dir/src/thread_data.cpp.o + + +CMakeFiles/cds.dir/src/topology_hpux.cpp.o: CMakeFiles/cds.dir/flags.make +CMakeFiles/cds.dir/src/topology_hpux.cpp.o: ../src/topology_hpux.cpp + @$(CMAKE_COMMAND) -E cmake_echo_color --switch=$(COLOR) --green --progress-dir=/home/ahmad/Downloads/benchmark-fuzzying-tool/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/build-release/CMakeFiles --progress-num=$(CMAKE_PROGRESS_7) "Building CXX object CMakeFiles/cds.dir/src/topology_hpux.cpp.o" + /usr/bin/c++ $(CXX_DEFINES) $(CXX_INCLUDES) $(CXX_FLAGS) -o CMakeFiles/cds.dir/src/topology_hpux.cpp.o -c /home/ahmad/Downloads/benchmark-fuzzying-tool/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/src/topology_hpux.cpp + +CMakeFiles/cds.dir/src/topology_hpux.cpp.i: cmake_force + @$(CMAKE_COMMAND) -E cmake_echo_color --switch=$(COLOR) --green "Preprocessing CXX source to CMakeFiles/cds.dir/src/topology_hpux.cpp.i" + /usr/bin/c++ $(CXX_DEFINES) $(CXX_INCLUDES) $(CXX_FLAGS) -E /home/ahmad/Downloads/benchmark-fuzzying-tool/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/src/topology_hpux.cpp > CMakeFiles/cds.dir/src/topology_hpux.cpp.i + +CMakeFiles/cds.dir/src/topology_hpux.cpp.s: cmake_force + @$(CMAKE_COMMAND) -E cmake_echo_color --switch=$(COLOR) --green "Compiling CXX source to assembly CMakeFiles/cds.dir/src/topology_hpux.cpp.s" + /usr/bin/c++ $(CXX_DEFINES) $(CXX_INCLUDES) $(CXX_FLAGS) -S /home/ahmad/Downloads/benchmark-fuzzying-tool/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/src/topology_hpux.cpp -o CMakeFiles/cds.dir/src/topology_hpux.cpp.s + +CMakeFiles/cds.dir/src/topology_hpux.cpp.o.requires: + +.PHONY : CMakeFiles/cds.dir/src/topology_hpux.cpp.o.requires + +CMakeFiles/cds.dir/src/topology_hpux.cpp.o.provides: CMakeFiles/cds.dir/src/topology_hpux.cpp.o.requires + $(MAKE) -f CMakeFiles/cds.dir/build.make CMakeFiles/cds.dir/src/topology_hpux.cpp.o.provides.build +.PHONY : CMakeFiles/cds.dir/src/topology_hpux.cpp.o.provides + +CMakeFiles/cds.dir/src/topology_hpux.cpp.o.provides.build: CMakeFiles/cds.dir/src/topology_hpux.cpp.o + + +CMakeFiles/cds.dir/src/topology_linux.cpp.o: CMakeFiles/cds.dir/flags.make +CMakeFiles/cds.dir/src/topology_linux.cpp.o: ../src/topology_linux.cpp + @$(CMAKE_COMMAND) -E cmake_echo_color --switch=$(COLOR) --green --progress-dir=/home/ahmad/Downloads/benchmark-fuzzying-tool/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/build-release/CMakeFiles --progress-num=$(CMAKE_PROGRESS_8) "Building CXX object CMakeFiles/cds.dir/src/topology_linux.cpp.o" + /usr/bin/c++ $(CXX_DEFINES) $(CXX_INCLUDES) $(CXX_FLAGS) -o CMakeFiles/cds.dir/src/topology_linux.cpp.o -c /home/ahmad/Downloads/benchmark-fuzzying-tool/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/src/topology_linux.cpp + +CMakeFiles/cds.dir/src/topology_linux.cpp.i: cmake_force + @$(CMAKE_COMMAND) -E cmake_echo_color --switch=$(COLOR) --green "Preprocessing CXX source to CMakeFiles/cds.dir/src/topology_linux.cpp.i" + /usr/bin/c++ $(CXX_DEFINES) $(CXX_INCLUDES) $(CXX_FLAGS) -E /home/ahmad/Downloads/benchmark-fuzzying-tool/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/src/topology_linux.cpp > CMakeFiles/cds.dir/src/topology_linux.cpp.i + +CMakeFiles/cds.dir/src/topology_linux.cpp.s: cmake_force + @$(CMAKE_COMMAND) -E cmake_echo_color --switch=$(COLOR) --green "Compiling CXX source to assembly CMakeFiles/cds.dir/src/topology_linux.cpp.s" + /usr/bin/c++ $(CXX_DEFINES) $(CXX_INCLUDES) $(CXX_FLAGS) -S /home/ahmad/Downloads/benchmark-fuzzying-tool/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/src/topology_linux.cpp -o CMakeFiles/cds.dir/src/topology_linux.cpp.s + +CMakeFiles/cds.dir/src/topology_linux.cpp.o.requires: + +.PHONY : CMakeFiles/cds.dir/src/topology_linux.cpp.o.requires + +CMakeFiles/cds.dir/src/topology_linux.cpp.o.provides: CMakeFiles/cds.dir/src/topology_linux.cpp.o.requires + $(MAKE) -f CMakeFiles/cds.dir/build.make CMakeFiles/cds.dir/src/topology_linux.cpp.o.provides.build +.PHONY : CMakeFiles/cds.dir/src/topology_linux.cpp.o.provides + +CMakeFiles/cds.dir/src/topology_linux.cpp.o.provides.build: CMakeFiles/cds.dir/src/topology_linux.cpp.o + + +CMakeFiles/cds.dir/src/topology_osx.cpp.o: CMakeFiles/cds.dir/flags.make +CMakeFiles/cds.dir/src/topology_osx.cpp.o: ../src/topology_osx.cpp + @$(CMAKE_COMMAND) -E cmake_echo_color --switch=$(COLOR) --green --progress-dir=/home/ahmad/Downloads/benchmark-fuzzying-tool/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/build-release/CMakeFiles --progress-num=$(CMAKE_PROGRESS_9) "Building CXX object CMakeFiles/cds.dir/src/topology_osx.cpp.o" + /usr/bin/c++ $(CXX_DEFINES) $(CXX_INCLUDES) $(CXX_FLAGS) -o CMakeFiles/cds.dir/src/topology_osx.cpp.o -c /home/ahmad/Downloads/benchmark-fuzzying-tool/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/src/topology_osx.cpp + +CMakeFiles/cds.dir/src/topology_osx.cpp.i: cmake_force + @$(CMAKE_COMMAND) -E cmake_echo_color --switch=$(COLOR) --green "Preprocessing CXX source to CMakeFiles/cds.dir/src/topology_osx.cpp.i" + /usr/bin/c++ $(CXX_DEFINES) $(CXX_INCLUDES) $(CXX_FLAGS) -E /home/ahmad/Downloads/benchmark-fuzzying-tool/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/src/topology_osx.cpp > CMakeFiles/cds.dir/src/topology_osx.cpp.i + +CMakeFiles/cds.dir/src/topology_osx.cpp.s: cmake_force + @$(CMAKE_COMMAND) -E cmake_echo_color --switch=$(COLOR) --green "Compiling CXX source to assembly CMakeFiles/cds.dir/src/topology_osx.cpp.s" + /usr/bin/c++ $(CXX_DEFINES) $(CXX_INCLUDES) $(CXX_FLAGS) -S /home/ahmad/Downloads/benchmark-fuzzying-tool/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/src/topology_osx.cpp -o CMakeFiles/cds.dir/src/topology_osx.cpp.s + +CMakeFiles/cds.dir/src/topology_osx.cpp.o.requires: + +.PHONY : CMakeFiles/cds.dir/src/topology_osx.cpp.o.requires + +CMakeFiles/cds.dir/src/topology_osx.cpp.o.provides: CMakeFiles/cds.dir/src/topology_osx.cpp.o.requires + $(MAKE) -f CMakeFiles/cds.dir/build.make CMakeFiles/cds.dir/src/topology_osx.cpp.o.provides.build +.PHONY : CMakeFiles/cds.dir/src/topology_osx.cpp.o.provides + +CMakeFiles/cds.dir/src/topology_osx.cpp.o.provides.build: CMakeFiles/cds.dir/src/topology_osx.cpp.o + + +CMakeFiles/cds.dir/src/dllmain.cpp.o: CMakeFiles/cds.dir/flags.make +CMakeFiles/cds.dir/src/dllmain.cpp.o: ../src/dllmain.cpp + @$(CMAKE_COMMAND) -E cmake_echo_color --switch=$(COLOR) --green --progress-dir=/home/ahmad/Downloads/benchmark-fuzzying-tool/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/build-release/CMakeFiles --progress-num=$(CMAKE_PROGRESS_10) "Building CXX object CMakeFiles/cds.dir/src/dllmain.cpp.o" + /usr/bin/c++ $(CXX_DEFINES) $(CXX_INCLUDES) $(CXX_FLAGS) -o CMakeFiles/cds.dir/src/dllmain.cpp.o -c /home/ahmad/Downloads/benchmark-fuzzying-tool/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/src/dllmain.cpp + +CMakeFiles/cds.dir/src/dllmain.cpp.i: cmake_force + @$(CMAKE_COMMAND) -E cmake_echo_color --switch=$(COLOR) --green "Preprocessing CXX source to CMakeFiles/cds.dir/src/dllmain.cpp.i" + /usr/bin/c++ $(CXX_DEFINES) $(CXX_INCLUDES) $(CXX_FLAGS) -E /home/ahmad/Downloads/benchmark-fuzzying-tool/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/src/dllmain.cpp > CMakeFiles/cds.dir/src/dllmain.cpp.i + +CMakeFiles/cds.dir/src/dllmain.cpp.s: cmake_force + @$(CMAKE_COMMAND) -E cmake_echo_color --switch=$(COLOR) --green "Compiling CXX source to assembly CMakeFiles/cds.dir/src/dllmain.cpp.s" + /usr/bin/c++ $(CXX_DEFINES) $(CXX_INCLUDES) $(CXX_FLAGS) -S /home/ahmad/Downloads/benchmark-fuzzying-tool/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/src/dllmain.cpp -o CMakeFiles/cds.dir/src/dllmain.cpp.s + +CMakeFiles/cds.dir/src/dllmain.cpp.o.requires: + +.PHONY : CMakeFiles/cds.dir/src/dllmain.cpp.o.requires + +CMakeFiles/cds.dir/src/dllmain.cpp.o.provides: CMakeFiles/cds.dir/src/dllmain.cpp.o.requires + $(MAKE) -f CMakeFiles/cds.dir/build.make CMakeFiles/cds.dir/src/dllmain.cpp.o.provides.build +.PHONY : CMakeFiles/cds.dir/src/dllmain.cpp.o.provides + +CMakeFiles/cds.dir/src/dllmain.cpp.o.provides.build: CMakeFiles/cds.dir/src/dllmain.cpp.o + + +# Object files for target cds +cds_OBJECTS = \ +"CMakeFiles/cds.dir/src/init.cpp.o" \ +"CMakeFiles/cds.dir/src/hp.cpp.o" \ +"CMakeFiles/cds.dir/src/dhp.cpp.o" \ +"CMakeFiles/cds.dir/src/urcu_gp.cpp.o" \ +"CMakeFiles/cds.dir/src/urcu_sh.cpp.o" \ +"CMakeFiles/cds.dir/src/thread_data.cpp.o" \ +"CMakeFiles/cds.dir/src/topology_hpux.cpp.o" \ +"CMakeFiles/cds.dir/src/topology_linux.cpp.o" \ +"CMakeFiles/cds.dir/src/topology_osx.cpp.o" \ +"CMakeFiles/cds.dir/src/dllmain.cpp.o" + +# External object files for target cds +cds_EXTERNAL_OBJECTS = + +bin/libcds.so.2.3.2: CMakeFiles/cds.dir/src/init.cpp.o +bin/libcds.so.2.3.2: CMakeFiles/cds.dir/src/hp.cpp.o +bin/libcds.so.2.3.2: CMakeFiles/cds.dir/src/dhp.cpp.o +bin/libcds.so.2.3.2: CMakeFiles/cds.dir/src/urcu_gp.cpp.o +bin/libcds.so.2.3.2: CMakeFiles/cds.dir/src/urcu_sh.cpp.o +bin/libcds.so.2.3.2: CMakeFiles/cds.dir/src/thread_data.cpp.o +bin/libcds.so.2.3.2: CMakeFiles/cds.dir/src/topology_hpux.cpp.o +bin/libcds.so.2.3.2: CMakeFiles/cds.dir/src/topology_linux.cpp.o +bin/libcds.so.2.3.2: CMakeFiles/cds.dir/src/topology_osx.cpp.o +bin/libcds.so.2.3.2: CMakeFiles/cds.dir/src/dllmain.cpp.o +bin/libcds.so.2.3.2: CMakeFiles/cds.dir/build.make +bin/libcds.so.2.3.2: CMakeFiles/cds.dir/link.txt + @$(CMAKE_COMMAND) -E cmake_echo_color --switch=$(COLOR) --green --bold --progress-dir=/home/ahmad/Downloads/benchmark-fuzzying-tool/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/build-release/CMakeFiles --progress-num=$(CMAKE_PROGRESS_11) "Linking CXX shared library bin/libcds.so" + $(CMAKE_COMMAND) -E cmake_link_script CMakeFiles/cds.dir/link.txt --verbose=$(VERBOSE) + $(CMAKE_COMMAND) -E cmake_symlink_library bin/libcds.so.2.3.2 bin/libcds.so.2.3.2 bin/libcds.so + +bin/libcds.so: bin/libcds.so.2.3.2 + @$(CMAKE_COMMAND) -E touch_nocreate bin/libcds.so + +# Rule to build all files generated by this target. +CMakeFiles/cds.dir/build: bin/libcds.so + +.PHONY : CMakeFiles/cds.dir/build + +CMakeFiles/cds.dir/requires: CMakeFiles/cds.dir/src/init.cpp.o.requires +CMakeFiles/cds.dir/requires: CMakeFiles/cds.dir/src/hp.cpp.o.requires +CMakeFiles/cds.dir/requires: CMakeFiles/cds.dir/src/dhp.cpp.o.requires +CMakeFiles/cds.dir/requires: CMakeFiles/cds.dir/src/urcu_gp.cpp.o.requires +CMakeFiles/cds.dir/requires: CMakeFiles/cds.dir/src/urcu_sh.cpp.o.requires +CMakeFiles/cds.dir/requires: CMakeFiles/cds.dir/src/thread_data.cpp.o.requires +CMakeFiles/cds.dir/requires: CMakeFiles/cds.dir/src/topology_hpux.cpp.o.requires +CMakeFiles/cds.dir/requires: CMakeFiles/cds.dir/src/topology_linux.cpp.o.requires +CMakeFiles/cds.dir/requires: CMakeFiles/cds.dir/src/topology_osx.cpp.o.requires +CMakeFiles/cds.dir/requires: CMakeFiles/cds.dir/src/dllmain.cpp.o.requires + +.PHONY : CMakeFiles/cds.dir/requires + +CMakeFiles/cds.dir/clean: + $(CMAKE_COMMAND) -P CMakeFiles/cds.dir/cmake_clean.cmake +.PHONY : CMakeFiles/cds.dir/clean + +CMakeFiles/cds.dir/depend: + cd /home/ahmad/Downloads/benchmark-fuzzying-tool/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/build-release && $(CMAKE_COMMAND) -E cmake_depends "Unix Makefiles" /home/ahmad/Downloads/benchmark-fuzzying-tool/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2 /home/ahmad/Downloads/benchmark-fuzzying-tool/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2 /home/ahmad/Downloads/benchmark-fuzzying-tool/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/build-release /home/ahmad/Downloads/benchmark-fuzzying-tool/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/build-release /home/ahmad/Downloads/benchmark-fuzzying-tool/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/build-release/CMakeFiles/cds.dir/DependInfo.cmake --color=$(COLOR) +.PHONY : CMakeFiles/cds.dir/depend + diff --git a/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/build-release/CMakeFiles/cds.dir/cmake_clean.cmake b/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/build-release/CMakeFiles/cds.dir/cmake_clean.cmake new file mode 100644 index 0000000..89a54cb --- /dev/null +++ b/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/build-release/CMakeFiles/cds.dir/cmake_clean.cmake @@ -0,0 +1,20 @@ +file(REMOVE_RECURSE + "CMakeFiles/cds.dir/src/init.cpp.o" + "CMakeFiles/cds.dir/src/hp.cpp.o" + "CMakeFiles/cds.dir/src/dhp.cpp.o" + "CMakeFiles/cds.dir/src/urcu_gp.cpp.o" + "CMakeFiles/cds.dir/src/urcu_sh.cpp.o" + "CMakeFiles/cds.dir/src/thread_data.cpp.o" + "CMakeFiles/cds.dir/src/topology_hpux.cpp.o" + "CMakeFiles/cds.dir/src/topology_linux.cpp.o" + "CMakeFiles/cds.dir/src/topology_osx.cpp.o" + "CMakeFiles/cds.dir/src/dllmain.cpp.o" + "bin/libcds.pdb" + "bin/libcds.so" + "bin/libcds.so.2.3.2" +) + +# Per-language clean rules from dependency scanning. +foreach(lang CXX) + include(CMakeFiles/cds.dir/cmake_clean_${lang}.cmake OPTIONAL) +endforeach() diff --git a/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/build-release/CMakeFiles/cds.dir/depend.internal b/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/build-release/CMakeFiles/cds.dir/depend.internal new file mode 100644 index 0000000..5e7a78c --- /dev/null +++ b/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/build-release/CMakeFiles/cds.dir/depend.internal @@ -0,0 +1,1122 @@ +# CMAKE generated file: DO NOT EDIT! +# Generated by "Unix Makefiles" Generator, CMake Version 3.5 + +CMakeFiles/cds.dir/src/dhp.cpp.o + ..//cds/algo/atomic.h + ..//cds/algo/backoff_strategy.h + ..//cds/algo/base.h + ..//cds/algo/bitop.h + ..//cds/algo/elimination_tls.h + ..//cds/algo/int_algo.h + ..//cds/compiler/backoff.h + ..//cds/compiler/bitop.h + ..//cds/compiler/clang/defs.h + ..//cds/compiler/cxx11_atomic.h + ..//cds/compiler/defs.h + ..//cds/compiler/feature_tsan.h + ..//cds/compiler/gcc/amd64/backoff.h + ..//cds/compiler/gcc/amd64/bitop.h + ..//cds/compiler/gcc/amd64/cxx11_atomic.h + ..//cds/compiler/gcc/arm7/backoff.h + ..//cds/compiler/gcc/arm8/backoff.h + ..//cds/compiler/gcc/compiler_barriers.h + ..//cds/compiler/gcc/compiler_macro.h + ..//cds/compiler/gcc/defs.h + ..//cds/compiler/gcc/ia64/backoff.h + ..//cds/compiler/gcc/ia64/bitop.h + ..//cds/compiler/gcc/ia64/cxx11_atomic.h + ..//cds/compiler/gcc/ppc64/backoff.h + ..//cds/compiler/gcc/ppc64/bitop.h + ..//cds/compiler/gcc/sparc/backoff.h + ..//cds/compiler/gcc/sparc/bitop.h + ..//cds/compiler/gcc/sparc/cxx11_atomic.h + ..//cds/compiler/gcc/x86/backoff.h + ..//cds/compiler/gcc/x86/bitop.h + ..//cds/compiler/gcc/x86/cxx11_atomic.h + ..//cds/compiler/gcc/x86/cxx11_atomic32.h + ..//cds/compiler/icl/compiler_barriers.h + ..//cds/compiler/icl/defs.h + ..//cds/compiler/vc/amd64/backoff.h + ..//cds/compiler/vc/amd64/bitop.h + ..//cds/compiler/vc/amd64/cxx11_atomic.h + ..//cds/compiler/vc/compiler_barriers.h + ..//cds/compiler/vc/defs.h + ..//cds/compiler/vc/x86/backoff.h + ..//cds/compiler/vc/x86/bitop.h + ..//cds/compiler/vc/x86/cxx11_atomic.h + ..//cds/container/details/base.h + ..//cds/container/vyukov_mpmc_cycle_queue.h + ..//cds/details/aligned_type.h + ..//cds/details/allocator.h + ..//cds/details/bitop_generic.h + ..//cds/details/bounded_container.h + ..//cds/details/defs.h + ..//cds/details/is_aligned.h + ..//cds/details/lib.h + ..//cds/details/marked_ptr.h + ..//cds/details/static_functor.h + ..//cds/details/throw_exception.h + ..//cds/gc/details/hp_common.h + ..//cds/gc/details/retired_ptr.h + ..//cds/gc/dhp.h + ..//cds/gc/hp.h + ..//cds/init.h + ..//cds/intrusive/details/base.h + ..//cds/intrusive/details/node_traits.h + ..//cds/intrusive/free_list.h + ..//cds/intrusive/free_list_selector.h + ..//cds/intrusive/free_list_tagged.h + ..//cds/intrusive/options.h + ..//cds/opt/buffer.h + ..//cds/opt/options.h + ..//cds/opt/value_cleaner.h + ..//cds/os/aix/alloc_aligned.h + ..//cds/os/aix/topology.h + ..//cds/os/alloc_aligned.h + ..//cds/os/details/fake_topology.h + ..//cds/os/free_bsd/alloc_aligned.h + ..//cds/os/free_bsd/topology.h + ..//cds/os/hpux/alloc_aligned.h + ..//cds/os/hpux/topology.h + ..//cds/os/libc/alloc_aligned.h + ..//cds/os/linux/alloc_aligned.h + ..//cds/os/linux/topology.h + ..//cds/os/osx/topology.h + ..//cds/os/posix/alloc_aligned.h + ..//cds/os/posix/fake_topology.h + ..//cds/os/posix/thread.h + ..//cds/os/sunos/alloc_aligned.h + ..//cds/os/sunos/topology.h + ..//cds/os/thread.h + ..//cds/os/topology.h + ..//cds/os/win/alloc_aligned.h + ..//cds/os/win/thread.h + ..//cds/os/win/topology.h + ..//cds/threading/details/_common.h + ..//cds/threading/details/auto_detect.h + ..//cds/threading/details/cxx11.h + ..//cds/threading/details/cxx11_manager.h + ..//cds/threading/details/gcc.h + ..//cds/threading/details/gcc_manager.h + ..//cds/threading/details/msvc.h + ..//cds/threading/details/msvc_manager.h + ..//cds/threading/details/pthread.h + ..//cds/threading/details/pthread_manager.h + ..//cds/threading/details/wintls.h + ..//cds/threading/details/wintls_manager.h + ..//cds/threading/model.h + ..//cds/urcu/details/base.h + ..//cds/urcu/details/gp.h + ..//cds/urcu/details/gp_decl.h + ..//cds/urcu/details/gpb.h + ..//cds/urcu/details/sh_decl.h + ..//cds/urcu/general_buffered.h + ..//cds/user_setup/allocator.h + ..//cds/user_setup/cache_line.h + ..//cds/user_setup/threading.h + ..//cds/version.h + /home/ahmad/Downloads/benchmark-fuzzying-tool/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/src/dhp.cpp +CMakeFiles/cds.dir/src/dllmain.cpp.o + ..//cds/algo/atomic.h + ..//cds/algo/backoff_strategy.h + ..//cds/algo/base.h + ..//cds/algo/bitop.h + ..//cds/algo/elimination_tls.h + ..//cds/algo/int_algo.h + ..//cds/compiler/backoff.h + ..//cds/compiler/bitop.h + ..//cds/compiler/clang/defs.h + ..//cds/compiler/cxx11_atomic.h + ..//cds/compiler/defs.h + ..//cds/compiler/feature_tsan.h + ..//cds/compiler/gcc/amd64/backoff.h + ..//cds/compiler/gcc/amd64/bitop.h + ..//cds/compiler/gcc/amd64/cxx11_atomic.h + ..//cds/compiler/gcc/arm7/backoff.h + ..//cds/compiler/gcc/arm8/backoff.h + ..//cds/compiler/gcc/compiler_barriers.h + ..//cds/compiler/gcc/compiler_macro.h + ..//cds/compiler/gcc/defs.h + ..//cds/compiler/gcc/ia64/backoff.h + ..//cds/compiler/gcc/ia64/bitop.h + ..//cds/compiler/gcc/ia64/cxx11_atomic.h + ..//cds/compiler/gcc/ppc64/backoff.h + ..//cds/compiler/gcc/ppc64/bitop.h + ..//cds/compiler/gcc/sparc/backoff.h + ..//cds/compiler/gcc/sparc/bitop.h + ..//cds/compiler/gcc/sparc/cxx11_atomic.h + ..//cds/compiler/gcc/x86/backoff.h + ..//cds/compiler/gcc/x86/bitop.h + ..//cds/compiler/gcc/x86/cxx11_atomic.h + ..//cds/compiler/gcc/x86/cxx11_atomic32.h + ..//cds/compiler/icl/compiler_barriers.h + ..//cds/compiler/icl/defs.h + ..//cds/compiler/vc/amd64/backoff.h + ..//cds/compiler/vc/amd64/bitop.h + ..//cds/compiler/vc/amd64/cxx11_atomic.h + ..//cds/compiler/vc/compiler_barriers.h + ..//cds/compiler/vc/defs.h + ..//cds/compiler/vc/x86/backoff.h + ..//cds/compiler/vc/x86/bitop.h + ..//cds/compiler/vc/x86/cxx11_atomic.h + ..//cds/container/details/base.h + ..//cds/container/vyukov_mpmc_cycle_queue.h + ..//cds/details/aligned_type.h + ..//cds/details/allocator.h + ..//cds/details/bitop_generic.h + ..//cds/details/bounded_container.h + ..//cds/details/defs.h + ..//cds/details/is_aligned.h + ..//cds/details/lib.h + ..//cds/details/marked_ptr.h + ..//cds/details/static_functor.h + ..//cds/details/throw_exception.h + ..//cds/gc/details/hp_common.h + ..//cds/gc/details/retired_ptr.h + ..//cds/gc/hp.h + ..//cds/init.h + ..//cds/intrusive/details/base.h + ..//cds/intrusive/details/node_traits.h + ..//cds/intrusive/options.h + ..//cds/opt/buffer.h + ..//cds/opt/options.h + ..//cds/opt/value_cleaner.h + ..//cds/os/aix/alloc_aligned.h + ..//cds/os/aix/topology.h + ..//cds/os/alloc_aligned.h + ..//cds/os/details/fake_topology.h + ..//cds/os/free_bsd/alloc_aligned.h + ..//cds/os/free_bsd/topology.h + ..//cds/os/hpux/alloc_aligned.h + ..//cds/os/hpux/topology.h + ..//cds/os/libc/alloc_aligned.h + ..//cds/os/linux/alloc_aligned.h + ..//cds/os/linux/topology.h + ..//cds/os/osx/topology.h + ..//cds/os/posix/alloc_aligned.h + ..//cds/os/posix/fake_topology.h + ..//cds/os/posix/thread.h + ..//cds/os/sunos/alloc_aligned.h + ..//cds/os/sunos/topology.h + ..//cds/os/thread.h + ..//cds/os/topology.h + ..//cds/os/win/alloc_aligned.h + ..//cds/os/win/thread.h + ..//cds/os/win/topology.h + ..//cds/threading/details/_common.h + ..//cds/threading/details/auto_detect.h + ..//cds/threading/details/cxx11.h + ..//cds/threading/details/cxx11_manager.h + ..//cds/threading/details/gcc.h + ..//cds/threading/details/gcc_manager.h + ..//cds/threading/details/msvc.h + ..//cds/threading/details/msvc_manager.h + ..//cds/threading/details/pthread.h + ..//cds/threading/details/pthread_manager.h + ..//cds/threading/details/wintls.h + ..//cds/threading/details/wintls_manager.h + ..//cds/threading/model.h + ..//cds/urcu/details/base.h + ..//cds/urcu/details/gp.h + ..//cds/urcu/details/gp_decl.h + ..//cds/urcu/details/gpb.h + ..//cds/urcu/details/sh_decl.h + ..//cds/urcu/general_buffered.h + ..//cds/user_setup/allocator.h + ..//cds/user_setup/cache_line.h + ..//cds/user_setup/threading.h + ..//cds/version.h + /home/ahmad/Downloads/benchmark-fuzzying-tool/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/src/dllmain.cpp +CMakeFiles/cds.dir/src/hp.cpp.o + ..//cds/algo/atomic.h + ..//cds/algo/backoff_strategy.h + ..//cds/algo/base.h + ..//cds/algo/bitop.h + ..//cds/algo/elimination_tls.h + ..//cds/algo/int_algo.h + ..//cds/compiler/backoff.h + ..//cds/compiler/bitop.h + ..//cds/compiler/clang/defs.h + ..//cds/compiler/cxx11_atomic.h + ..//cds/compiler/defs.h + ..//cds/compiler/feature_tsan.h + ..//cds/compiler/gcc/amd64/backoff.h + ..//cds/compiler/gcc/amd64/bitop.h + ..//cds/compiler/gcc/amd64/cxx11_atomic.h + ..//cds/compiler/gcc/arm7/backoff.h + ..//cds/compiler/gcc/arm8/backoff.h + ..//cds/compiler/gcc/compiler_barriers.h + ..//cds/compiler/gcc/compiler_macro.h + ..//cds/compiler/gcc/defs.h + ..//cds/compiler/gcc/ia64/backoff.h + ..//cds/compiler/gcc/ia64/bitop.h + ..//cds/compiler/gcc/ia64/cxx11_atomic.h + ..//cds/compiler/gcc/ppc64/backoff.h + ..//cds/compiler/gcc/ppc64/bitop.h + ..//cds/compiler/gcc/sparc/backoff.h + ..//cds/compiler/gcc/sparc/bitop.h + ..//cds/compiler/gcc/sparc/cxx11_atomic.h + ..//cds/compiler/gcc/x86/backoff.h + ..//cds/compiler/gcc/x86/bitop.h + ..//cds/compiler/gcc/x86/cxx11_atomic.h + ..//cds/compiler/gcc/x86/cxx11_atomic32.h + ..//cds/compiler/icl/compiler_barriers.h + ..//cds/compiler/icl/defs.h + ..//cds/compiler/vc/amd64/backoff.h + ..//cds/compiler/vc/amd64/bitop.h + ..//cds/compiler/vc/amd64/cxx11_atomic.h + ..//cds/compiler/vc/compiler_barriers.h + ..//cds/compiler/vc/defs.h + ..//cds/compiler/vc/x86/backoff.h + ..//cds/compiler/vc/x86/bitop.h + ..//cds/compiler/vc/x86/cxx11_atomic.h + ..//cds/container/details/base.h + ..//cds/container/vyukov_mpmc_cycle_queue.h + ..//cds/details/aligned_type.h + ..//cds/details/allocator.h + ..//cds/details/bitop_generic.h + ..//cds/details/bounded_container.h + ..//cds/details/defs.h + ..//cds/details/is_aligned.h + ..//cds/details/lib.h + ..//cds/details/marked_ptr.h + ..//cds/details/static_functor.h + ..//cds/details/throw_exception.h + ..//cds/gc/details/hp_common.h + ..//cds/gc/details/retired_ptr.h + ..//cds/gc/hp.h + ..//cds/init.h + ..//cds/intrusive/details/base.h + ..//cds/intrusive/details/node_traits.h + ..//cds/intrusive/options.h + ..//cds/opt/buffer.h + ..//cds/opt/options.h + ..//cds/opt/value_cleaner.h + ..//cds/os/aix/alloc_aligned.h + ..//cds/os/aix/topology.h + ..//cds/os/alloc_aligned.h + ..//cds/os/details/fake_topology.h + ..//cds/os/free_bsd/alloc_aligned.h + ..//cds/os/free_bsd/topology.h + ..//cds/os/hpux/alloc_aligned.h + ..//cds/os/hpux/topology.h + ..//cds/os/libc/alloc_aligned.h + ..//cds/os/linux/alloc_aligned.h + ..//cds/os/linux/topology.h + ..//cds/os/osx/topology.h + ..//cds/os/posix/alloc_aligned.h + ..//cds/os/posix/fake_topology.h + ..//cds/os/posix/thread.h + ..//cds/os/sunos/alloc_aligned.h + ..//cds/os/sunos/topology.h + ..//cds/os/thread.h + ..//cds/os/topology.h + ..//cds/os/win/alloc_aligned.h + ..//cds/os/win/thread.h + ..//cds/os/win/topology.h + ..//cds/threading/details/_common.h + ..//cds/threading/details/auto_detect.h + ..//cds/threading/details/cxx11.h + ..//cds/threading/details/cxx11_manager.h + ..//cds/threading/details/gcc.h + ..//cds/threading/details/gcc_manager.h + ..//cds/threading/details/msvc.h + ..//cds/threading/details/msvc_manager.h + ..//cds/threading/details/pthread.h + ..//cds/threading/details/pthread_manager.h + ..//cds/threading/details/wintls.h + ..//cds/threading/details/wintls_manager.h + ..//cds/threading/model.h + ..//cds/urcu/details/base.h + ..//cds/urcu/details/gp.h + ..//cds/urcu/details/gp_decl.h + ..//cds/urcu/details/gpb.h + ..//cds/urcu/details/sh_decl.h + ..//cds/urcu/general_buffered.h + ..//cds/user_setup/allocator.h + ..//cds/user_setup/cache_line.h + ..//cds/user_setup/threading.h + ..//cds/version.h + /home/ahmad/Downloads/benchmark-fuzzying-tool/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/src/hp.cpp +CMakeFiles/cds.dir/src/init.cpp.o + ..//cds/algo/atomic.h + ..//cds/algo/backoff_strategy.h + ..//cds/algo/base.h + ..//cds/algo/bitop.h + ..//cds/algo/elimination_tls.h + ..//cds/algo/int_algo.h + ..//cds/compiler/backoff.h + ..//cds/compiler/bitop.h + ..//cds/compiler/clang/defs.h + ..//cds/compiler/cxx11_atomic.h + ..//cds/compiler/defs.h + ..//cds/compiler/feature_tsan.h + ..//cds/compiler/gcc/amd64/backoff.h + ..//cds/compiler/gcc/amd64/bitop.h + ..//cds/compiler/gcc/amd64/cxx11_atomic.h + ..//cds/compiler/gcc/arm7/backoff.h + ..//cds/compiler/gcc/arm8/backoff.h + ..//cds/compiler/gcc/compiler_barriers.h + ..//cds/compiler/gcc/compiler_macro.h + ..//cds/compiler/gcc/defs.h + ..//cds/compiler/gcc/ia64/backoff.h + ..//cds/compiler/gcc/ia64/bitop.h + ..//cds/compiler/gcc/ia64/cxx11_atomic.h + ..//cds/compiler/gcc/ppc64/backoff.h + ..//cds/compiler/gcc/ppc64/bitop.h + ..//cds/compiler/gcc/sparc/backoff.h + ..//cds/compiler/gcc/sparc/bitop.h + ..//cds/compiler/gcc/sparc/cxx11_atomic.h + ..//cds/compiler/gcc/x86/backoff.h + ..//cds/compiler/gcc/x86/bitop.h + ..//cds/compiler/gcc/x86/cxx11_atomic.h + ..//cds/compiler/gcc/x86/cxx11_atomic32.h + ..//cds/compiler/icl/compiler_barriers.h + ..//cds/compiler/icl/defs.h + ..//cds/compiler/vc/amd64/backoff.h + ..//cds/compiler/vc/amd64/bitop.h + ..//cds/compiler/vc/amd64/cxx11_atomic.h + ..//cds/compiler/vc/compiler_barriers.h + ..//cds/compiler/vc/defs.h + ..//cds/compiler/vc/x86/backoff.h + ..//cds/compiler/vc/x86/bitop.h + ..//cds/compiler/vc/x86/cxx11_atomic.h + ..//cds/container/details/base.h + ..//cds/container/vyukov_mpmc_cycle_queue.h + ..//cds/details/aligned_type.h + ..//cds/details/allocator.h + ..//cds/details/bitop_generic.h + ..//cds/details/bounded_container.h + ..//cds/details/defs.h + ..//cds/details/is_aligned.h + ..//cds/details/lib.h + ..//cds/details/marked_ptr.h + ..//cds/details/static_functor.h + ..//cds/details/throw_exception.h + ..//cds/gc/details/hp_common.h + ..//cds/gc/details/retired_ptr.h + ..//cds/gc/hp.h + ..//cds/init.h + ..//cds/intrusive/details/base.h + ..//cds/intrusive/details/node_traits.h + ..//cds/intrusive/options.h + ..//cds/opt/buffer.h + ..//cds/opt/options.h + ..//cds/opt/value_cleaner.h + ..//cds/os/aix/alloc_aligned.h + ..//cds/os/aix/topology.h + ..//cds/os/alloc_aligned.h + ..//cds/os/details/fake_topology.h + ..//cds/os/free_bsd/alloc_aligned.h + ..//cds/os/free_bsd/topology.h + ..//cds/os/hpux/alloc_aligned.h + ..//cds/os/hpux/topology.h + ..//cds/os/libc/alloc_aligned.h + ..//cds/os/linux/alloc_aligned.h + ..//cds/os/linux/topology.h + ..//cds/os/osx/topology.h + ..//cds/os/posix/alloc_aligned.h + ..//cds/os/posix/fake_topology.h + ..//cds/os/posix/thread.h + ..//cds/os/sunos/alloc_aligned.h + ..//cds/os/sunos/topology.h + ..//cds/os/thread.h + ..//cds/os/topology.h + ..//cds/os/win/alloc_aligned.h + ..//cds/os/win/thread.h + ..//cds/os/win/topology.h + ..//cds/threading/details/_common.h + ..//cds/threading/details/auto_detect.h + ..//cds/threading/details/cxx11.h + ..//cds/threading/details/cxx11_manager.h + ..//cds/threading/details/gcc.h + ..//cds/threading/details/gcc_manager.h + ..//cds/threading/details/msvc.h + ..//cds/threading/details/msvc_manager.h + ..//cds/threading/details/pthread.h + ..//cds/threading/details/pthread_manager.h + ..//cds/threading/details/wintls.h + ..//cds/threading/details/wintls_manager.h + ..//cds/threading/model.h + ..//cds/urcu/details/base.h + ..//cds/urcu/details/gp.h + ..//cds/urcu/details/gp_decl.h + ..//cds/urcu/details/gpb.h + ..//cds/urcu/details/sh_decl.h + ..//cds/urcu/general_buffered.h + ..//cds/user_setup/allocator.h + ..//cds/user_setup/cache_line.h + ..//cds/user_setup/threading.h + ..//cds/version.h + /home/ahmad/Downloads/benchmark-fuzzying-tool/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/src/init.cpp +CMakeFiles/cds.dir/src/thread_data.cpp.o + ..//cds/algo/atomic.h + ..//cds/algo/backoff_strategy.h + ..//cds/algo/base.h + ..//cds/algo/bitop.h + ..//cds/algo/elimination_tls.h + ..//cds/algo/int_algo.h + ..//cds/compiler/backoff.h + ..//cds/compiler/bitop.h + ..//cds/compiler/clang/defs.h + ..//cds/compiler/cxx11_atomic.h + ..//cds/compiler/defs.h + ..//cds/compiler/feature_tsan.h + ..//cds/compiler/gcc/amd64/backoff.h + ..//cds/compiler/gcc/amd64/bitop.h + ..//cds/compiler/gcc/amd64/cxx11_atomic.h + ..//cds/compiler/gcc/arm7/backoff.h + ..//cds/compiler/gcc/arm8/backoff.h + ..//cds/compiler/gcc/compiler_barriers.h + ..//cds/compiler/gcc/compiler_macro.h + ..//cds/compiler/gcc/defs.h + ..//cds/compiler/gcc/ia64/backoff.h + ..//cds/compiler/gcc/ia64/bitop.h + ..//cds/compiler/gcc/ia64/cxx11_atomic.h + ..//cds/compiler/gcc/ppc64/backoff.h + ..//cds/compiler/gcc/ppc64/bitop.h + ..//cds/compiler/gcc/sparc/backoff.h + ..//cds/compiler/gcc/sparc/bitop.h + ..//cds/compiler/gcc/sparc/cxx11_atomic.h + ..//cds/compiler/gcc/x86/backoff.h + ..//cds/compiler/gcc/x86/bitop.h + ..//cds/compiler/gcc/x86/cxx11_atomic.h + ..//cds/compiler/gcc/x86/cxx11_atomic32.h + ..//cds/compiler/icl/compiler_barriers.h + ..//cds/compiler/icl/defs.h + ..//cds/compiler/vc/amd64/backoff.h + ..//cds/compiler/vc/amd64/bitop.h + ..//cds/compiler/vc/amd64/cxx11_atomic.h + ..//cds/compiler/vc/compiler_barriers.h + ..//cds/compiler/vc/defs.h + ..//cds/compiler/vc/x86/backoff.h + ..//cds/compiler/vc/x86/bitop.h + ..//cds/compiler/vc/x86/cxx11_atomic.h + ..//cds/container/details/base.h + ..//cds/container/vyukov_mpmc_cycle_queue.h + ..//cds/details/aligned_type.h + ..//cds/details/allocator.h + ..//cds/details/bitop_generic.h + ..//cds/details/bounded_container.h + ..//cds/details/defs.h + ..//cds/details/is_aligned.h + ..//cds/details/lib.h + ..//cds/details/marked_ptr.h + ..//cds/details/static_functor.h + ..//cds/details/throw_exception.h + ..//cds/gc/details/hp_common.h + ..//cds/gc/details/retired_ptr.h + ..//cds/gc/dhp.h + ..//cds/gc/hp.h + ..//cds/init.h + ..//cds/intrusive/details/base.h + ..//cds/intrusive/details/node_traits.h + ..//cds/intrusive/free_list.h + ..//cds/intrusive/free_list_selector.h + ..//cds/intrusive/free_list_tagged.h + ..//cds/intrusive/options.h + ..//cds/opt/buffer.h + ..//cds/opt/options.h + ..//cds/opt/value_cleaner.h + ..//cds/os/aix/alloc_aligned.h + ..//cds/os/aix/topology.h + ..//cds/os/alloc_aligned.h + ..//cds/os/details/fake_topology.h + ..//cds/os/free_bsd/alloc_aligned.h + ..//cds/os/free_bsd/topology.h + ..//cds/os/hpux/alloc_aligned.h + ..//cds/os/hpux/topology.h + ..//cds/os/libc/alloc_aligned.h + ..//cds/os/linux/alloc_aligned.h + ..//cds/os/linux/topology.h + ..//cds/os/osx/topology.h + ..//cds/os/posix/alloc_aligned.h + ..//cds/os/posix/fake_topology.h + ..//cds/os/posix/thread.h + ..//cds/os/sunos/alloc_aligned.h + ..//cds/os/sunos/topology.h + ..//cds/os/thread.h + ..//cds/os/topology.h + ..//cds/os/win/alloc_aligned.h + ..//cds/os/win/thread.h + ..//cds/os/win/topology.h + ..//cds/threading/details/_common.h + ..//cds/threading/details/auto_detect.h + ..//cds/threading/details/cxx11.h + ..//cds/threading/details/cxx11_manager.h + ..//cds/threading/details/gcc.h + ..//cds/threading/details/gcc_manager.h + ..//cds/threading/details/msvc.h + ..//cds/threading/details/msvc_manager.h + ..//cds/threading/details/pthread.h + ..//cds/threading/details/pthread_manager.h + ..//cds/threading/details/wintls.h + ..//cds/threading/details/wintls_manager.h + ..//cds/threading/model.h + ..//cds/urcu/details/base.h + ..//cds/urcu/details/gp.h + ..//cds/urcu/details/gp_decl.h + ..//cds/urcu/details/gpb.h + ..//cds/urcu/details/sh_decl.h + ..//cds/urcu/general_buffered.h + ..//cds/user_setup/allocator.h + ..//cds/user_setup/cache_line.h + ..//cds/user_setup/threading.h + ..//cds/version.h + /home/ahmad/Downloads/benchmark-fuzzying-tool/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/src/thread_data.cpp +CMakeFiles/cds.dir/src/topology_hpux.cpp.o + ..//cds/algo/atomic.h + ..//cds/algo/backoff_strategy.h + ..//cds/algo/base.h + ..//cds/algo/bitop.h + ..//cds/algo/elimination_tls.h + ..//cds/algo/int_algo.h + ..//cds/compiler/backoff.h + ..//cds/compiler/bitop.h + ..//cds/compiler/clang/defs.h + ..//cds/compiler/cxx11_atomic.h + ..//cds/compiler/defs.h + ..//cds/compiler/feature_tsan.h + ..//cds/compiler/gcc/amd64/backoff.h + ..//cds/compiler/gcc/amd64/bitop.h + ..//cds/compiler/gcc/amd64/cxx11_atomic.h + ..//cds/compiler/gcc/arm7/backoff.h + ..//cds/compiler/gcc/arm8/backoff.h + ..//cds/compiler/gcc/compiler_barriers.h + ..//cds/compiler/gcc/compiler_macro.h + ..//cds/compiler/gcc/defs.h + ..//cds/compiler/gcc/ia64/backoff.h + ..//cds/compiler/gcc/ia64/bitop.h + ..//cds/compiler/gcc/ia64/cxx11_atomic.h + ..//cds/compiler/gcc/ppc64/backoff.h + ..//cds/compiler/gcc/ppc64/bitop.h + ..//cds/compiler/gcc/sparc/backoff.h + ..//cds/compiler/gcc/sparc/bitop.h + ..//cds/compiler/gcc/sparc/cxx11_atomic.h + ..//cds/compiler/gcc/x86/backoff.h + ..//cds/compiler/gcc/x86/bitop.h + ..//cds/compiler/gcc/x86/cxx11_atomic.h + ..//cds/compiler/gcc/x86/cxx11_atomic32.h + ..//cds/compiler/icl/compiler_barriers.h + ..//cds/compiler/icl/defs.h + ..//cds/compiler/vc/amd64/backoff.h + ..//cds/compiler/vc/amd64/bitop.h + ..//cds/compiler/vc/amd64/cxx11_atomic.h + ..//cds/compiler/vc/compiler_barriers.h + ..//cds/compiler/vc/defs.h + ..//cds/compiler/vc/x86/backoff.h + ..//cds/compiler/vc/x86/bitop.h + ..//cds/compiler/vc/x86/cxx11_atomic.h + ..//cds/container/details/base.h + ..//cds/container/vyukov_mpmc_cycle_queue.h + ..//cds/details/aligned_type.h + ..//cds/details/allocator.h + ..//cds/details/bitop_generic.h + ..//cds/details/bounded_container.h + ..//cds/details/defs.h + ..//cds/details/is_aligned.h + ..//cds/details/lib.h + ..//cds/details/marked_ptr.h + ..//cds/details/static_functor.h + ..//cds/details/throw_exception.h + ..//cds/gc/details/hp_common.h + ..//cds/gc/details/retired_ptr.h + ..//cds/gc/hp.h + ..//cds/init.h + ..//cds/intrusive/details/base.h + ..//cds/intrusive/details/node_traits.h + ..//cds/intrusive/options.h + ..//cds/opt/buffer.h + ..//cds/opt/options.h + ..//cds/opt/value_cleaner.h + ..//cds/os/aix/alloc_aligned.h + ..//cds/os/aix/topology.h + ..//cds/os/alloc_aligned.h + ..//cds/os/details/fake_topology.h + ..//cds/os/free_bsd/alloc_aligned.h + ..//cds/os/free_bsd/topology.h + ..//cds/os/hpux/alloc_aligned.h + ..//cds/os/hpux/topology.h + ..//cds/os/libc/alloc_aligned.h + ..//cds/os/linux/alloc_aligned.h + ..//cds/os/linux/topology.h + ..//cds/os/osx/topology.h + ..//cds/os/posix/alloc_aligned.h + ..//cds/os/posix/fake_topology.h + ..//cds/os/posix/thread.h + ..//cds/os/sunos/alloc_aligned.h + ..//cds/os/sunos/topology.h + ..//cds/os/thread.h + ..//cds/os/topology.h + ..//cds/os/win/alloc_aligned.h + ..//cds/os/win/thread.h + ..//cds/os/win/topology.h + ..//cds/threading/details/_common.h + ..//cds/threading/details/auto_detect.h + ..//cds/threading/details/cxx11.h + ..//cds/threading/details/cxx11_manager.h + ..//cds/threading/details/gcc.h + ..//cds/threading/details/gcc_manager.h + ..//cds/threading/details/msvc.h + ..//cds/threading/details/msvc_manager.h + ..//cds/threading/details/pthread.h + ..//cds/threading/details/pthread_manager.h + ..//cds/threading/details/wintls.h + ..//cds/threading/details/wintls_manager.h + ..//cds/threading/model.h + ..//cds/urcu/details/base.h + ..//cds/urcu/details/gp.h + ..//cds/urcu/details/gp_decl.h + ..//cds/urcu/details/gpb.h + ..//cds/urcu/details/sh_decl.h + ..//cds/urcu/general_buffered.h + ..//cds/user_setup/allocator.h + ..//cds/user_setup/cache_line.h + ..//cds/user_setup/threading.h + ..//cds/version.h + /home/ahmad/Downloads/benchmark-fuzzying-tool/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/src/topology_hpux.cpp +CMakeFiles/cds.dir/src/topology_linux.cpp.o + ..//cds/algo/atomic.h + ..//cds/algo/backoff_strategy.h + ..//cds/algo/base.h + ..//cds/algo/bitop.h + ..//cds/algo/elimination_tls.h + ..//cds/algo/int_algo.h + ..//cds/compiler/backoff.h + ..//cds/compiler/bitop.h + ..//cds/compiler/clang/defs.h + ..//cds/compiler/cxx11_atomic.h + ..//cds/compiler/defs.h + ..//cds/compiler/feature_tsan.h + ..//cds/compiler/gcc/amd64/backoff.h + ..//cds/compiler/gcc/amd64/bitop.h + ..//cds/compiler/gcc/amd64/cxx11_atomic.h + ..//cds/compiler/gcc/arm7/backoff.h + ..//cds/compiler/gcc/arm8/backoff.h + ..//cds/compiler/gcc/compiler_barriers.h + ..//cds/compiler/gcc/compiler_macro.h + ..//cds/compiler/gcc/defs.h + ..//cds/compiler/gcc/ia64/backoff.h + ..//cds/compiler/gcc/ia64/bitop.h + ..//cds/compiler/gcc/ia64/cxx11_atomic.h + ..//cds/compiler/gcc/ppc64/backoff.h + ..//cds/compiler/gcc/ppc64/bitop.h + ..//cds/compiler/gcc/sparc/backoff.h + ..//cds/compiler/gcc/sparc/bitop.h + ..//cds/compiler/gcc/sparc/cxx11_atomic.h + ..//cds/compiler/gcc/x86/backoff.h + ..//cds/compiler/gcc/x86/bitop.h + ..//cds/compiler/gcc/x86/cxx11_atomic.h + ..//cds/compiler/gcc/x86/cxx11_atomic32.h + ..//cds/compiler/icl/compiler_barriers.h + ..//cds/compiler/icl/defs.h + ..//cds/compiler/vc/amd64/backoff.h + ..//cds/compiler/vc/amd64/bitop.h + ..//cds/compiler/vc/amd64/cxx11_atomic.h + ..//cds/compiler/vc/compiler_barriers.h + ..//cds/compiler/vc/defs.h + ..//cds/compiler/vc/x86/backoff.h + ..//cds/compiler/vc/x86/bitop.h + ..//cds/compiler/vc/x86/cxx11_atomic.h + ..//cds/container/details/base.h + ..//cds/container/vyukov_mpmc_cycle_queue.h + ..//cds/details/aligned_type.h + ..//cds/details/allocator.h + ..//cds/details/bitop_generic.h + ..//cds/details/bounded_container.h + ..//cds/details/defs.h + ..//cds/details/is_aligned.h + ..//cds/details/lib.h + ..//cds/details/marked_ptr.h + ..//cds/details/static_functor.h + ..//cds/details/throw_exception.h + ..//cds/gc/details/hp_common.h + ..//cds/gc/details/retired_ptr.h + ..//cds/gc/hp.h + ..//cds/init.h + ..//cds/intrusive/details/base.h + ..//cds/intrusive/details/node_traits.h + ..//cds/intrusive/options.h + ..//cds/opt/buffer.h + ..//cds/opt/options.h + ..//cds/opt/value_cleaner.h + ..//cds/os/aix/alloc_aligned.h + ..//cds/os/aix/topology.h + ..//cds/os/alloc_aligned.h + ..//cds/os/details/fake_topology.h + ..//cds/os/free_bsd/alloc_aligned.h + ..//cds/os/free_bsd/topology.h + ..//cds/os/hpux/alloc_aligned.h + ..//cds/os/hpux/topology.h + ..//cds/os/libc/alloc_aligned.h + ..//cds/os/linux/alloc_aligned.h + ..//cds/os/linux/topology.h + ..//cds/os/osx/topology.h + ..//cds/os/posix/alloc_aligned.h + ..//cds/os/posix/fake_topology.h + ..//cds/os/posix/thread.h + ..//cds/os/sunos/alloc_aligned.h + ..//cds/os/sunos/topology.h + ..//cds/os/thread.h + ..//cds/os/topology.h + ..//cds/os/win/alloc_aligned.h + ..//cds/os/win/thread.h + ..//cds/os/win/topology.h + ..//cds/threading/details/_common.h + ..//cds/threading/details/auto_detect.h + ..//cds/threading/details/cxx11.h + ..//cds/threading/details/cxx11_manager.h + ..//cds/threading/details/gcc.h + ..//cds/threading/details/gcc_manager.h + ..//cds/threading/details/msvc.h + ..//cds/threading/details/msvc_manager.h + ..//cds/threading/details/pthread.h + ..//cds/threading/details/pthread_manager.h + ..//cds/threading/details/wintls.h + ..//cds/threading/details/wintls_manager.h + ..//cds/threading/model.h + ..//cds/urcu/details/base.h + ..//cds/urcu/details/gp.h + ..//cds/urcu/details/gp_decl.h + ..//cds/urcu/details/gpb.h + ..//cds/urcu/details/sh_decl.h + ..//cds/urcu/general_buffered.h + ..//cds/user_setup/allocator.h + ..//cds/user_setup/cache_line.h + ..//cds/user_setup/threading.h + ..//cds/version.h + /home/ahmad/Downloads/benchmark-fuzzying-tool/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/src/topology_linux.cpp +CMakeFiles/cds.dir/src/topology_osx.cpp.o + ..//cds/algo/atomic.h + ..//cds/algo/backoff_strategy.h + ..//cds/algo/base.h + ..//cds/algo/bitop.h + ..//cds/algo/elimination_tls.h + ..//cds/algo/int_algo.h + ..//cds/compiler/backoff.h + ..//cds/compiler/bitop.h + ..//cds/compiler/clang/defs.h + ..//cds/compiler/cxx11_atomic.h + ..//cds/compiler/defs.h + ..//cds/compiler/feature_tsan.h + ..//cds/compiler/gcc/amd64/backoff.h + ..//cds/compiler/gcc/amd64/bitop.h + ..//cds/compiler/gcc/amd64/cxx11_atomic.h + ..//cds/compiler/gcc/arm7/backoff.h + ..//cds/compiler/gcc/arm8/backoff.h + ..//cds/compiler/gcc/compiler_barriers.h + ..//cds/compiler/gcc/compiler_macro.h + ..//cds/compiler/gcc/defs.h + ..//cds/compiler/gcc/ia64/backoff.h + ..//cds/compiler/gcc/ia64/bitop.h + ..//cds/compiler/gcc/ia64/cxx11_atomic.h + ..//cds/compiler/gcc/ppc64/backoff.h + ..//cds/compiler/gcc/ppc64/bitop.h + ..//cds/compiler/gcc/sparc/backoff.h + ..//cds/compiler/gcc/sparc/bitop.h + ..//cds/compiler/gcc/sparc/cxx11_atomic.h + ..//cds/compiler/gcc/x86/backoff.h + ..//cds/compiler/gcc/x86/bitop.h + ..//cds/compiler/gcc/x86/cxx11_atomic.h + ..//cds/compiler/gcc/x86/cxx11_atomic32.h + ..//cds/compiler/icl/compiler_barriers.h + ..//cds/compiler/icl/defs.h + ..//cds/compiler/vc/amd64/backoff.h + ..//cds/compiler/vc/amd64/bitop.h + ..//cds/compiler/vc/amd64/cxx11_atomic.h + ..//cds/compiler/vc/compiler_barriers.h + ..//cds/compiler/vc/defs.h + ..//cds/compiler/vc/x86/backoff.h + ..//cds/compiler/vc/x86/bitop.h + ..//cds/compiler/vc/x86/cxx11_atomic.h + ..//cds/container/details/base.h + ..//cds/container/vyukov_mpmc_cycle_queue.h + ..//cds/details/aligned_type.h + ..//cds/details/allocator.h + ..//cds/details/bitop_generic.h + ..//cds/details/bounded_container.h + ..//cds/details/defs.h + ..//cds/details/is_aligned.h + ..//cds/details/lib.h + ..//cds/details/marked_ptr.h + ..//cds/details/static_functor.h + ..//cds/details/throw_exception.h + ..//cds/gc/details/hp_common.h + ..//cds/gc/details/retired_ptr.h + ..//cds/gc/hp.h + ..//cds/init.h + ..//cds/intrusive/details/base.h + ..//cds/intrusive/details/node_traits.h + ..//cds/intrusive/options.h + ..//cds/opt/buffer.h + ..//cds/opt/options.h + ..//cds/opt/value_cleaner.h + ..//cds/os/aix/alloc_aligned.h + ..//cds/os/aix/topology.h + ..//cds/os/alloc_aligned.h + ..//cds/os/details/fake_topology.h + ..//cds/os/free_bsd/alloc_aligned.h + ..//cds/os/free_bsd/topology.h + ..//cds/os/hpux/alloc_aligned.h + ..//cds/os/hpux/topology.h + ..//cds/os/libc/alloc_aligned.h + ..//cds/os/linux/alloc_aligned.h + ..//cds/os/linux/topology.h + ..//cds/os/osx/topology.h + ..//cds/os/posix/alloc_aligned.h + ..//cds/os/posix/fake_topology.h + ..//cds/os/posix/thread.h + ..//cds/os/sunos/alloc_aligned.h + ..//cds/os/sunos/topology.h + ..//cds/os/thread.h + ..//cds/os/topology.h + ..//cds/os/win/alloc_aligned.h + ..//cds/os/win/thread.h + ..//cds/os/win/topology.h + ..//cds/threading/details/_common.h + ..//cds/threading/details/auto_detect.h + ..//cds/threading/details/cxx11.h + ..//cds/threading/details/cxx11_manager.h + ..//cds/threading/details/gcc.h + ..//cds/threading/details/gcc_manager.h + ..//cds/threading/details/msvc.h + ..//cds/threading/details/msvc_manager.h + ..//cds/threading/details/pthread.h + ..//cds/threading/details/pthread_manager.h + ..//cds/threading/details/wintls.h + ..//cds/threading/details/wintls_manager.h + ..//cds/threading/model.h + ..//cds/urcu/details/base.h + ..//cds/urcu/details/gp.h + ..//cds/urcu/details/gp_decl.h + ..//cds/urcu/details/gpb.h + ..//cds/urcu/details/sh_decl.h + ..//cds/urcu/general_buffered.h + ..//cds/user_setup/allocator.h + ..//cds/user_setup/cache_line.h + ..//cds/user_setup/threading.h + ..//cds/version.h + /home/ahmad/Downloads/benchmark-fuzzying-tool/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/src/topology_osx.cpp +CMakeFiles/cds.dir/src/urcu_gp.cpp.o + ..//cds/algo/atomic.h + ..//cds/algo/backoff_strategy.h + ..//cds/algo/base.h + ..//cds/algo/bitop.h + ..//cds/algo/elimination_tls.h + ..//cds/algo/int_algo.h + ..//cds/compiler/backoff.h + ..//cds/compiler/bitop.h + ..//cds/compiler/clang/defs.h + ..//cds/compiler/cxx11_atomic.h + ..//cds/compiler/defs.h + ..//cds/compiler/feature_tsan.h + ..//cds/compiler/gcc/amd64/backoff.h + ..//cds/compiler/gcc/amd64/bitop.h + ..//cds/compiler/gcc/amd64/cxx11_atomic.h + ..//cds/compiler/gcc/arm7/backoff.h + ..//cds/compiler/gcc/arm8/backoff.h + ..//cds/compiler/gcc/compiler_barriers.h + ..//cds/compiler/gcc/compiler_macro.h + ..//cds/compiler/gcc/defs.h + ..//cds/compiler/gcc/ia64/backoff.h + ..//cds/compiler/gcc/ia64/bitop.h + ..//cds/compiler/gcc/ia64/cxx11_atomic.h + ..//cds/compiler/gcc/ppc64/backoff.h + ..//cds/compiler/gcc/ppc64/bitop.h + ..//cds/compiler/gcc/sparc/backoff.h + ..//cds/compiler/gcc/sparc/bitop.h + ..//cds/compiler/gcc/sparc/cxx11_atomic.h + ..//cds/compiler/gcc/x86/backoff.h + ..//cds/compiler/gcc/x86/bitop.h + ..//cds/compiler/gcc/x86/cxx11_atomic.h + ..//cds/compiler/gcc/x86/cxx11_atomic32.h + ..//cds/compiler/icl/compiler_barriers.h + ..//cds/compiler/icl/defs.h + ..//cds/compiler/vc/amd64/backoff.h + ..//cds/compiler/vc/amd64/bitop.h + ..//cds/compiler/vc/amd64/cxx11_atomic.h + ..//cds/compiler/vc/compiler_barriers.h + ..//cds/compiler/vc/defs.h + ..//cds/compiler/vc/x86/backoff.h + ..//cds/compiler/vc/x86/bitop.h + ..//cds/compiler/vc/x86/cxx11_atomic.h + ..//cds/container/details/base.h + ..//cds/container/vyukov_mpmc_cycle_queue.h + ..//cds/details/aligned_type.h + ..//cds/details/allocator.h + ..//cds/details/bitop_generic.h + ..//cds/details/bounded_container.h + ..//cds/details/defs.h + ..//cds/details/is_aligned.h + ..//cds/details/lib.h + ..//cds/details/marked_ptr.h + ..//cds/details/static_functor.h + ..//cds/details/throw_exception.h + ..//cds/gc/details/hp_common.h + ..//cds/gc/details/retired_ptr.h + ..//cds/gc/hp.h + ..//cds/init.h + ..//cds/intrusive/details/base.h + ..//cds/intrusive/details/node_traits.h + ..//cds/intrusive/options.h + ..//cds/opt/buffer.h + ..//cds/opt/options.h + ..//cds/opt/value_cleaner.h + ..//cds/os/aix/alloc_aligned.h + ..//cds/os/aix/topology.h + ..//cds/os/alloc_aligned.h + ..//cds/os/details/fake_topology.h + ..//cds/os/free_bsd/alloc_aligned.h + ..//cds/os/free_bsd/topology.h + ..//cds/os/hpux/alloc_aligned.h + ..//cds/os/hpux/topology.h + ..//cds/os/libc/alloc_aligned.h + ..//cds/os/linux/alloc_aligned.h + ..//cds/os/linux/topology.h + ..//cds/os/osx/topology.h + ..//cds/os/posix/alloc_aligned.h + ..//cds/os/posix/fake_topology.h + ..//cds/os/posix/thread.h + ..//cds/os/sunos/alloc_aligned.h + ..//cds/os/sunos/topology.h + ..//cds/os/thread.h + ..//cds/os/topology.h + ..//cds/os/win/alloc_aligned.h + ..//cds/os/win/thread.h + ..//cds/os/win/topology.h + ..//cds/threading/details/_common.h + ..//cds/threading/details/auto_detect.h + ..//cds/threading/details/cxx11.h + ..//cds/threading/details/cxx11_manager.h + ..//cds/threading/details/gcc.h + ..//cds/threading/details/gcc_manager.h + ..//cds/threading/details/msvc.h + ..//cds/threading/details/msvc_manager.h + ..//cds/threading/details/pthread.h + ..//cds/threading/details/pthread_manager.h + ..//cds/threading/details/wintls.h + ..//cds/threading/details/wintls_manager.h + ..//cds/threading/model.h + ..//cds/urcu/details/base.h + ..//cds/urcu/details/gp.h + ..//cds/urcu/details/gp_decl.h + ..//cds/urcu/details/gpb.h + ..//cds/urcu/details/sh_decl.h + ..//cds/urcu/general_buffered.h + ..//cds/user_setup/allocator.h + ..//cds/user_setup/cache_line.h + ..//cds/user_setup/threading.h + ..//cds/version.h + /home/ahmad/Downloads/benchmark-fuzzying-tool/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/src/urcu_gp.cpp +CMakeFiles/cds.dir/src/urcu_sh.cpp.o + ..//cds/algo/atomic.h + ..//cds/algo/backoff_strategy.h + ..//cds/algo/base.h + ..//cds/algo/bitop.h + ..//cds/algo/elimination_tls.h + ..//cds/algo/int_algo.h + ..//cds/compiler/backoff.h + ..//cds/compiler/bitop.h + ..//cds/compiler/clang/defs.h + ..//cds/compiler/cxx11_atomic.h + ..//cds/compiler/defs.h + ..//cds/compiler/feature_tsan.h + ..//cds/compiler/gcc/amd64/backoff.h + ..//cds/compiler/gcc/amd64/bitop.h + ..//cds/compiler/gcc/amd64/cxx11_atomic.h + ..//cds/compiler/gcc/arm7/backoff.h + ..//cds/compiler/gcc/arm8/backoff.h + ..//cds/compiler/gcc/compiler_barriers.h + ..//cds/compiler/gcc/compiler_macro.h + ..//cds/compiler/gcc/defs.h + ..//cds/compiler/gcc/ia64/backoff.h + ..//cds/compiler/gcc/ia64/bitop.h + ..//cds/compiler/gcc/ia64/cxx11_atomic.h + ..//cds/compiler/gcc/ppc64/backoff.h + ..//cds/compiler/gcc/ppc64/bitop.h + ..//cds/compiler/gcc/sparc/backoff.h + ..//cds/compiler/gcc/sparc/bitop.h + ..//cds/compiler/gcc/sparc/cxx11_atomic.h + ..//cds/compiler/gcc/x86/backoff.h + ..//cds/compiler/gcc/x86/bitop.h + ..//cds/compiler/gcc/x86/cxx11_atomic.h + ..//cds/compiler/gcc/x86/cxx11_atomic32.h + ..//cds/compiler/icl/compiler_barriers.h + ..//cds/compiler/icl/defs.h + ..//cds/compiler/vc/amd64/backoff.h + ..//cds/compiler/vc/amd64/bitop.h + ..//cds/compiler/vc/amd64/cxx11_atomic.h + ..//cds/compiler/vc/compiler_barriers.h + ..//cds/compiler/vc/defs.h + ..//cds/compiler/vc/x86/backoff.h + ..//cds/compiler/vc/x86/bitop.h + ..//cds/compiler/vc/x86/cxx11_atomic.h + ..//cds/container/details/base.h + ..//cds/container/vyukov_mpmc_cycle_queue.h + ..//cds/details/aligned_type.h + ..//cds/details/allocator.h + ..//cds/details/bitop_generic.h + ..//cds/details/bounded_container.h + ..//cds/details/defs.h + ..//cds/details/is_aligned.h + ..//cds/details/lib.h + ..//cds/details/marked_ptr.h + ..//cds/details/static_functor.h + ..//cds/details/throw_exception.h + ..//cds/gc/details/hp_common.h + ..//cds/gc/details/retired_ptr.h + ..//cds/gc/hp.h + ..//cds/init.h + ..//cds/intrusive/details/base.h + ..//cds/intrusive/details/node_traits.h + ..//cds/intrusive/options.h + ..//cds/opt/buffer.h + ..//cds/opt/options.h + ..//cds/opt/value_cleaner.h + ..//cds/os/aix/alloc_aligned.h + ..//cds/os/aix/topology.h + ..//cds/os/alloc_aligned.h + ..//cds/os/details/fake_topology.h + ..//cds/os/free_bsd/alloc_aligned.h + ..//cds/os/free_bsd/topology.h + ..//cds/os/hpux/alloc_aligned.h + ..//cds/os/hpux/topology.h + ..//cds/os/libc/alloc_aligned.h + ..//cds/os/linux/alloc_aligned.h + ..//cds/os/linux/topology.h + ..//cds/os/osx/topology.h + ..//cds/os/posix/alloc_aligned.h + ..//cds/os/posix/fake_topology.h + ..//cds/os/posix/thread.h + ..//cds/os/sunos/alloc_aligned.h + ..//cds/os/sunos/topology.h + ..//cds/os/thread.h + ..//cds/os/topology.h + ..//cds/os/win/alloc_aligned.h + ..//cds/os/win/thread.h + ..//cds/os/win/topology.h + ..//cds/threading/details/_common.h + ..//cds/threading/details/auto_detect.h + ..//cds/threading/details/cxx11.h + ..//cds/threading/details/cxx11_manager.h + ..//cds/threading/details/gcc.h + ..//cds/threading/details/gcc_manager.h + ..//cds/threading/details/msvc.h + ..//cds/threading/details/msvc_manager.h + ..//cds/threading/details/pthread.h + ..//cds/threading/details/pthread_manager.h + ..//cds/threading/details/wintls.h + ..//cds/threading/details/wintls_manager.h + ..//cds/threading/model.h + ..//cds/urcu/details/base.h + ..//cds/urcu/details/gp.h + ..//cds/urcu/details/gp_decl.h + ..//cds/urcu/details/gpb.h + ..//cds/urcu/details/sh.h + ..//cds/urcu/details/sh_decl.h + ..//cds/urcu/general_buffered.h + ..//cds/user_setup/allocator.h + ..//cds/user_setup/cache_line.h + ..//cds/user_setup/threading.h + ..//cds/version.h + /home/ahmad/Downloads/benchmark-fuzzying-tool/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/src/urcu_sh.cpp diff --git a/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/build-release/CMakeFiles/cds.dir/depend.make b/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/build-release/CMakeFiles/cds.dir/depend.make new file mode 100644 index 0000000..f7481b8 --- /dev/null +++ b/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/build-release/CMakeFiles/cds.dir/depend.make @@ -0,0 +1,1122 @@ +# CMAKE generated file: DO NOT EDIT! +# Generated by "Unix Makefiles" Generator, CMake Version 3.5 + +CMakeFiles/cds.dir/src/dhp.cpp.o: ../cds/algo/atomic.h +CMakeFiles/cds.dir/src/dhp.cpp.o: ../cds/algo/backoff_strategy.h +CMakeFiles/cds.dir/src/dhp.cpp.o: ../cds/algo/base.h +CMakeFiles/cds.dir/src/dhp.cpp.o: ../cds/algo/bitop.h +CMakeFiles/cds.dir/src/dhp.cpp.o: ../cds/algo/elimination_tls.h +CMakeFiles/cds.dir/src/dhp.cpp.o: ../cds/algo/int_algo.h +CMakeFiles/cds.dir/src/dhp.cpp.o: ../cds/compiler/backoff.h +CMakeFiles/cds.dir/src/dhp.cpp.o: ../cds/compiler/bitop.h +CMakeFiles/cds.dir/src/dhp.cpp.o: ../cds/compiler/clang/defs.h +CMakeFiles/cds.dir/src/dhp.cpp.o: ../cds/compiler/cxx11_atomic.h +CMakeFiles/cds.dir/src/dhp.cpp.o: ../cds/compiler/defs.h +CMakeFiles/cds.dir/src/dhp.cpp.o: ../cds/compiler/feature_tsan.h +CMakeFiles/cds.dir/src/dhp.cpp.o: ../cds/compiler/gcc/amd64/backoff.h +CMakeFiles/cds.dir/src/dhp.cpp.o: ../cds/compiler/gcc/amd64/bitop.h +CMakeFiles/cds.dir/src/dhp.cpp.o: ../cds/compiler/gcc/amd64/cxx11_atomic.h +CMakeFiles/cds.dir/src/dhp.cpp.o: ../cds/compiler/gcc/arm7/backoff.h +CMakeFiles/cds.dir/src/dhp.cpp.o: ../cds/compiler/gcc/arm8/backoff.h +CMakeFiles/cds.dir/src/dhp.cpp.o: ../cds/compiler/gcc/compiler_barriers.h +CMakeFiles/cds.dir/src/dhp.cpp.o: ../cds/compiler/gcc/compiler_macro.h +CMakeFiles/cds.dir/src/dhp.cpp.o: ../cds/compiler/gcc/defs.h +CMakeFiles/cds.dir/src/dhp.cpp.o: ../cds/compiler/gcc/ia64/backoff.h +CMakeFiles/cds.dir/src/dhp.cpp.o: ../cds/compiler/gcc/ia64/bitop.h +CMakeFiles/cds.dir/src/dhp.cpp.o: ../cds/compiler/gcc/ia64/cxx11_atomic.h +CMakeFiles/cds.dir/src/dhp.cpp.o: ../cds/compiler/gcc/ppc64/backoff.h +CMakeFiles/cds.dir/src/dhp.cpp.o: ../cds/compiler/gcc/ppc64/bitop.h +CMakeFiles/cds.dir/src/dhp.cpp.o: ../cds/compiler/gcc/sparc/backoff.h +CMakeFiles/cds.dir/src/dhp.cpp.o: ../cds/compiler/gcc/sparc/bitop.h +CMakeFiles/cds.dir/src/dhp.cpp.o: ../cds/compiler/gcc/sparc/cxx11_atomic.h +CMakeFiles/cds.dir/src/dhp.cpp.o: ../cds/compiler/gcc/x86/backoff.h +CMakeFiles/cds.dir/src/dhp.cpp.o: ../cds/compiler/gcc/x86/bitop.h +CMakeFiles/cds.dir/src/dhp.cpp.o: ../cds/compiler/gcc/x86/cxx11_atomic.h +CMakeFiles/cds.dir/src/dhp.cpp.o: ../cds/compiler/gcc/x86/cxx11_atomic32.h +CMakeFiles/cds.dir/src/dhp.cpp.o: ../cds/compiler/icl/compiler_barriers.h +CMakeFiles/cds.dir/src/dhp.cpp.o: ../cds/compiler/icl/defs.h +CMakeFiles/cds.dir/src/dhp.cpp.o: ../cds/compiler/vc/amd64/backoff.h +CMakeFiles/cds.dir/src/dhp.cpp.o: ../cds/compiler/vc/amd64/bitop.h +CMakeFiles/cds.dir/src/dhp.cpp.o: ../cds/compiler/vc/amd64/cxx11_atomic.h +CMakeFiles/cds.dir/src/dhp.cpp.o: ../cds/compiler/vc/compiler_barriers.h +CMakeFiles/cds.dir/src/dhp.cpp.o: ../cds/compiler/vc/defs.h +CMakeFiles/cds.dir/src/dhp.cpp.o: ../cds/compiler/vc/x86/backoff.h +CMakeFiles/cds.dir/src/dhp.cpp.o: ../cds/compiler/vc/x86/bitop.h +CMakeFiles/cds.dir/src/dhp.cpp.o: ../cds/compiler/vc/x86/cxx11_atomic.h +CMakeFiles/cds.dir/src/dhp.cpp.o: ../cds/container/details/base.h +CMakeFiles/cds.dir/src/dhp.cpp.o: ../cds/container/vyukov_mpmc_cycle_queue.h +CMakeFiles/cds.dir/src/dhp.cpp.o: ../cds/details/aligned_type.h +CMakeFiles/cds.dir/src/dhp.cpp.o: ../cds/details/allocator.h +CMakeFiles/cds.dir/src/dhp.cpp.o: ../cds/details/bitop_generic.h +CMakeFiles/cds.dir/src/dhp.cpp.o: ../cds/details/bounded_container.h +CMakeFiles/cds.dir/src/dhp.cpp.o: ../cds/details/defs.h +CMakeFiles/cds.dir/src/dhp.cpp.o: ../cds/details/is_aligned.h +CMakeFiles/cds.dir/src/dhp.cpp.o: ../cds/details/lib.h +CMakeFiles/cds.dir/src/dhp.cpp.o: ../cds/details/marked_ptr.h +CMakeFiles/cds.dir/src/dhp.cpp.o: ../cds/details/static_functor.h +CMakeFiles/cds.dir/src/dhp.cpp.o: ../cds/details/throw_exception.h +CMakeFiles/cds.dir/src/dhp.cpp.o: ../cds/gc/details/hp_common.h +CMakeFiles/cds.dir/src/dhp.cpp.o: ../cds/gc/details/retired_ptr.h +CMakeFiles/cds.dir/src/dhp.cpp.o: ../cds/gc/dhp.h +CMakeFiles/cds.dir/src/dhp.cpp.o: ../cds/gc/hp.h +CMakeFiles/cds.dir/src/dhp.cpp.o: ../cds/init.h +CMakeFiles/cds.dir/src/dhp.cpp.o: ../cds/intrusive/details/base.h +CMakeFiles/cds.dir/src/dhp.cpp.o: ../cds/intrusive/details/node_traits.h +CMakeFiles/cds.dir/src/dhp.cpp.o: ../cds/intrusive/free_list.h +CMakeFiles/cds.dir/src/dhp.cpp.o: ../cds/intrusive/free_list_selector.h +CMakeFiles/cds.dir/src/dhp.cpp.o: ../cds/intrusive/free_list_tagged.h +CMakeFiles/cds.dir/src/dhp.cpp.o: ../cds/intrusive/options.h +CMakeFiles/cds.dir/src/dhp.cpp.o: ../cds/opt/buffer.h +CMakeFiles/cds.dir/src/dhp.cpp.o: ../cds/opt/options.h +CMakeFiles/cds.dir/src/dhp.cpp.o: ../cds/opt/value_cleaner.h +CMakeFiles/cds.dir/src/dhp.cpp.o: ../cds/os/aix/alloc_aligned.h +CMakeFiles/cds.dir/src/dhp.cpp.o: ../cds/os/aix/topology.h +CMakeFiles/cds.dir/src/dhp.cpp.o: ../cds/os/alloc_aligned.h +CMakeFiles/cds.dir/src/dhp.cpp.o: ../cds/os/details/fake_topology.h +CMakeFiles/cds.dir/src/dhp.cpp.o: ../cds/os/free_bsd/alloc_aligned.h +CMakeFiles/cds.dir/src/dhp.cpp.o: ../cds/os/free_bsd/topology.h +CMakeFiles/cds.dir/src/dhp.cpp.o: ../cds/os/hpux/alloc_aligned.h +CMakeFiles/cds.dir/src/dhp.cpp.o: ../cds/os/hpux/topology.h +CMakeFiles/cds.dir/src/dhp.cpp.o: ../cds/os/libc/alloc_aligned.h +CMakeFiles/cds.dir/src/dhp.cpp.o: ../cds/os/linux/alloc_aligned.h +CMakeFiles/cds.dir/src/dhp.cpp.o: ../cds/os/linux/topology.h +CMakeFiles/cds.dir/src/dhp.cpp.o: ../cds/os/osx/topology.h +CMakeFiles/cds.dir/src/dhp.cpp.o: ../cds/os/posix/alloc_aligned.h +CMakeFiles/cds.dir/src/dhp.cpp.o: ../cds/os/posix/fake_topology.h +CMakeFiles/cds.dir/src/dhp.cpp.o: ../cds/os/posix/thread.h +CMakeFiles/cds.dir/src/dhp.cpp.o: ../cds/os/sunos/alloc_aligned.h +CMakeFiles/cds.dir/src/dhp.cpp.o: ../cds/os/sunos/topology.h +CMakeFiles/cds.dir/src/dhp.cpp.o: ../cds/os/thread.h +CMakeFiles/cds.dir/src/dhp.cpp.o: ../cds/os/topology.h +CMakeFiles/cds.dir/src/dhp.cpp.o: ../cds/os/win/alloc_aligned.h +CMakeFiles/cds.dir/src/dhp.cpp.o: ../cds/os/win/thread.h +CMakeFiles/cds.dir/src/dhp.cpp.o: ../cds/os/win/topology.h +CMakeFiles/cds.dir/src/dhp.cpp.o: ../cds/threading/details/_common.h +CMakeFiles/cds.dir/src/dhp.cpp.o: ../cds/threading/details/auto_detect.h +CMakeFiles/cds.dir/src/dhp.cpp.o: ../cds/threading/details/cxx11.h +CMakeFiles/cds.dir/src/dhp.cpp.o: ../cds/threading/details/cxx11_manager.h +CMakeFiles/cds.dir/src/dhp.cpp.o: ../cds/threading/details/gcc.h +CMakeFiles/cds.dir/src/dhp.cpp.o: ../cds/threading/details/gcc_manager.h +CMakeFiles/cds.dir/src/dhp.cpp.o: ../cds/threading/details/msvc.h +CMakeFiles/cds.dir/src/dhp.cpp.o: ../cds/threading/details/msvc_manager.h +CMakeFiles/cds.dir/src/dhp.cpp.o: ../cds/threading/details/pthread.h +CMakeFiles/cds.dir/src/dhp.cpp.o: ../cds/threading/details/pthread_manager.h +CMakeFiles/cds.dir/src/dhp.cpp.o: ../cds/threading/details/wintls.h +CMakeFiles/cds.dir/src/dhp.cpp.o: ../cds/threading/details/wintls_manager.h +CMakeFiles/cds.dir/src/dhp.cpp.o: ../cds/threading/model.h +CMakeFiles/cds.dir/src/dhp.cpp.o: ../cds/urcu/details/base.h +CMakeFiles/cds.dir/src/dhp.cpp.o: ../cds/urcu/details/gp.h +CMakeFiles/cds.dir/src/dhp.cpp.o: ../cds/urcu/details/gp_decl.h +CMakeFiles/cds.dir/src/dhp.cpp.o: ../cds/urcu/details/gpb.h +CMakeFiles/cds.dir/src/dhp.cpp.o: ../cds/urcu/details/sh_decl.h +CMakeFiles/cds.dir/src/dhp.cpp.o: ../cds/urcu/general_buffered.h +CMakeFiles/cds.dir/src/dhp.cpp.o: ../cds/user_setup/allocator.h +CMakeFiles/cds.dir/src/dhp.cpp.o: ../cds/user_setup/cache_line.h +CMakeFiles/cds.dir/src/dhp.cpp.o: ../cds/user_setup/threading.h +CMakeFiles/cds.dir/src/dhp.cpp.o: ../cds/version.h +CMakeFiles/cds.dir/src/dhp.cpp.o: ../src/dhp.cpp + +CMakeFiles/cds.dir/src/dllmain.cpp.o: ../cds/algo/atomic.h +CMakeFiles/cds.dir/src/dllmain.cpp.o: ../cds/algo/backoff_strategy.h +CMakeFiles/cds.dir/src/dllmain.cpp.o: ../cds/algo/base.h +CMakeFiles/cds.dir/src/dllmain.cpp.o: ../cds/algo/bitop.h +CMakeFiles/cds.dir/src/dllmain.cpp.o: ../cds/algo/elimination_tls.h +CMakeFiles/cds.dir/src/dllmain.cpp.o: ../cds/algo/int_algo.h +CMakeFiles/cds.dir/src/dllmain.cpp.o: ../cds/compiler/backoff.h +CMakeFiles/cds.dir/src/dllmain.cpp.o: ../cds/compiler/bitop.h +CMakeFiles/cds.dir/src/dllmain.cpp.o: ../cds/compiler/clang/defs.h +CMakeFiles/cds.dir/src/dllmain.cpp.o: ../cds/compiler/cxx11_atomic.h +CMakeFiles/cds.dir/src/dllmain.cpp.o: ../cds/compiler/defs.h +CMakeFiles/cds.dir/src/dllmain.cpp.o: ../cds/compiler/feature_tsan.h +CMakeFiles/cds.dir/src/dllmain.cpp.o: ../cds/compiler/gcc/amd64/backoff.h +CMakeFiles/cds.dir/src/dllmain.cpp.o: ../cds/compiler/gcc/amd64/bitop.h +CMakeFiles/cds.dir/src/dllmain.cpp.o: ../cds/compiler/gcc/amd64/cxx11_atomic.h +CMakeFiles/cds.dir/src/dllmain.cpp.o: ../cds/compiler/gcc/arm7/backoff.h +CMakeFiles/cds.dir/src/dllmain.cpp.o: ../cds/compiler/gcc/arm8/backoff.h +CMakeFiles/cds.dir/src/dllmain.cpp.o: ../cds/compiler/gcc/compiler_barriers.h +CMakeFiles/cds.dir/src/dllmain.cpp.o: ../cds/compiler/gcc/compiler_macro.h +CMakeFiles/cds.dir/src/dllmain.cpp.o: ../cds/compiler/gcc/defs.h +CMakeFiles/cds.dir/src/dllmain.cpp.o: ../cds/compiler/gcc/ia64/backoff.h +CMakeFiles/cds.dir/src/dllmain.cpp.o: ../cds/compiler/gcc/ia64/bitop.h +CMakeFiles/cds.dir/src/dllmain.cpp.o: ../cds/compiler/gcc/ia64/cxx11_atomic.h +CMakeFiles/cds.dir/src/dllmain.cpp.o: ../cds/compiler/gcc/ppc64/backoff.h +CMakeFiles/cds.dir/src/dllmain.cpp.o: ../cds/compiler/gcc/ppc64/bitop.h +CMakeFiles/cds.dir/src/dllmain.cpp.o: ../cds/compiler/gcc/sparc/backoff.h +CMakeFiles/cds.dir/src/dllmain.cpp.o: ../cds/compiler/gcc/sparc/bitop.h +CMakeFiles/cds.dir/src/dllmain.cpp.o: ../cds/compiler/gcc/sparc/cxx11_atomic.h +CMakeFiles/cds.dir/src/dllmain.cpp.o: ../cds/compiler/gcc/x86/backoff.h +CMakeFiles/cds.dir/src/dllmain.cpp.o: ../cds/compiler/gcc/x86/bitop.h +CMakeFiles/cds.dir/src/dllmain.cpp.o: ../cds/compiler/gcc/x86/cxx11_atomic.h +CMakeFiles/cds.dir/src/dllmain.cpp.o: ../cds/compiler/gcc/x86/cxx11_atomic32.h +CMakeFiles/cds.dir/src/dllmain.cpp.o: ../cds/compiler/icl/compiler_barriers.h +CMakeFiles/cds.dir/src/dllmain.cpp.o: ../cds/compiler/icl/defs.h +CMakeFiles/cds.dir/src/dllmain.cpp.o: ../cds/compiler/vc/amd64/backoff.h +CMakeFiles/cds.dir/src/dllmain.cpp.o: ../cds/compiler/vc/amd64/bitop.h +CMakeFiles/cds.dir/src/dllmain.cpp.o: ../cds/compiler/vc/amd64/cxx11_atomic.h +CMakeFiles/cds.dir/src/dllmain.cpp.o: ../cds/compiler/vc/compiler_barriers.h +CMakeFiles/cds.dir/src/dllmain.cpp.o: ../cds/compiler/vc/defs.h +CMakeFiles/cds.dir/src/dllmain.cpp.o: ../cds/compiler/vc/x86/backoff.h +CMakeFiles/cds.dir/src/dllmain.cpp.o: ../cds/compiler/vc/x86/bitop.h +CMakeFiles/cds.dir/src/dllmain.cpp.o: ../cds/compiler/vc/x86/cxx11_atomic.h +CMakeFiles/cds.dir/src/dllmain.cpp.o: ../cds/container/details/base.h +CMakeFiles/cds.dir/src/dllmain.cpp.o: ../cds/container/vyukov_mpmc_cycle_queue.h +CMakeFiles/cds.dir/src/dllmain.cpp.o: ../cds/details/aligned_type.h +CMakeFiles/cds.dir/src/dllmain.cpp.o: ../cds/details/allocator.h +CMakeFiles/cds.dir/src/dllmain.cpp.o: ../cds/details/bitop_generic.h +CMakeFiles/cds.dir/src/dllmain.cpp.o: ../cds/details/bounded_container.h +CMakeFiles/cds.dir/src/dllmain.cpp.o: ../cds/details/defs.h +CMakeFiles/cds.dir/src/dllmain.cpp.o: ../cds/details/is_aligned.h +CMakeFiles/cds.dir/src/dllmain.cpp.o: ../cds/details/lib.h +CMakeFiles/cds.dir/src/dllmain.cpp.o: ../cds/details/marked_ptr.h +CMakeFiles/cds.dir/src/dllmain.cpp.o: ../cds/details/static_functor.h +CMakeFiles/cds.dir/src/dllmain.cpp.o: ../cds/details/throw_exception.h +CMakeFiles/cds.dir/src/dllmain.cpp.o: ../cds/gc/details/hp_common.h +CMakeFiles/cds.dir/src/dllmain.cpp.o: ../cds/gc/details/retired_ptr.h +CMakeFiles/cds.dir/src/dllmain.cpp.o: ../cds/gc/hp.h +CMakeFiles/cds.dir/src/dllmain.cpp.o: ../cds/init.h +CMakeFiles/cds.dir/src/dllmain.cpp.o: ../cds/intrusive/details/base.h +CMakeFiles/cds.dir/src/dllmain.cpp.o: ../cds/intrusive/details/node_traits.h +CMakeFiles/cds.dir/src/dllmain.cpp.o: ../cds/intrusive/options.h +CMakeFiles/cds.dir/src/dllmain.cpp.o: ../cds/opt/buffer.h +CMakeFiles/cds.dir/src/dllmain.cpp.o: ../cds/opt/options.h +CMakeFiles/cds.dir/src/dllmain.cpp.o: ../cds/opt/value_cleaner.h +CMakeFiles/cds.dir/src/dllmain.cpp.o: ../cds/os/aix/alloc_aligned.h +CMakeFiles/cds.dir/src/dllmain.cpp.o: ../cds/os/aix/topology.h +CMakeFiles/cds.dir/src/dllmain.cpp.o: ../cds/os/alloc_aligned.h +CMakeFiles/cds.dir/src/dllmain.cpp.o: ../cds/os/details/fake_topology.h +CMakeFiles/cds.dir/src/dllmain.cpp.o: ../cds/os/free_bsd/alloc_aligned.h +CMakeFiles/cds.dir/src/dllmain.cpp.o: ../cds/os/free_bsd/topology.h +CMakeFiles/cds.dir/src/dllmain.cpp.o: ../cds/os/hpux/alloc_aligned.h +CMakeFiles/cds.dir/src/dllmain.cpp.o: ../cds/os/hpux/topology.h +CMakeFiles/cds.dir/src/dllmain.cpp.o: ../cds/os/libc/alloc_aligned.h +CMakeFiles/cds.dir/src/dllmain.cpp.o: ../cds/os/linux/alloc_aligned.h +CMakeFiles/cds.dir/src/dllmain.cpp.o: ../cds/os/linux/topology.h +CMakeFiles/cds.dir/src/dllmain.cpp.o: ../cds/os/osx/topology.h +CMakeFiles/cds.dir/src/dllmain.cpp.o: ../cds/os/posix/alloc_aligned.h +CMakeFiles/cds.dir/src/dllmain.cpp.o: ../cds/os/posix/fake_topology.h +CMakeFiles/cds.dir/src/dllmain.cpp.o: ../cds/os/posix/thread.h +CMakeFiles/cds.dir/src/dllmain.cpp.o: ../cds/os/sunos/alloc_aligned.h +CMakeFiles/cds.dir/src/dllmain.cpp.o: ../cds/os/sunos/topology.h +CMakeFiles/cds.dir/src/dllmain.cpp.o: ../cds/os/thread.h +CMakeFiles/cds.dir/src/dllmain.cpp.o: ../cds/os/topology.h +CMakeFiles/cds.dir/src/dllmain.cpp.o: ../cds/os/win/alloc_aligned.h +CMakeFiles/cds.dir/src/dllmain.cpp.o: ../cds/os/win/thread.h +CMakeFiles/cds.dir/src/dllmain.cpp.o: ../cds/os/win/topology.h +CMakeFiles/cds.dir/src/dllmain.cpp.o: ../cds/threading/details/_common.h +CMakeFiles/cds.dir/src/dllmain.cpp.o: ../cds/threading/details/auto_detect.h +CMakeFiles/cds.dir/src/dllmain.cpp.o: ../cds/threading/details/cxx11.h +CMakeFiles/cds.dir/src/dllmain.cpp.o: ../cds/threading/details/cxx11_manager.h +CMakeFiles/cds.dir/src/dllmain.cpp.o: ../cds/threading/details/gcc.h +CMakeFiles/cds.dir/src/dllmain.cpp.o: ../cds/threading/details/gcc_manager.h +CMakeFiles/cds.dir/src/dllmain.cpp.o: ../cds/threading/details/msvc.h +CMakeFiles/cds.dir/src/dllmain.cpp.o: ../cds/threading/details/msvc_manager.h +CMakeFiles/cds.dir/src/dllmain.cpp.o: ../cds/threading/details/pthread.h +CMakeFiles/cds.dir/src/dllmain.cpp.o: ../cds/threading/details/pthread_manager.h +CMakeFiles/cds.dir/src/dllmain.cpp.o: ../cds/threading/details/wintls.h +CMakeFiles/cds.dir/src/dllmain.cpp.o: ../cds/threading/details/wintls_manager.h +CMakeFiles/cds.dir/src/dllmain.cpp.o: ../cds/threading/model.h +CMakeFiles/cds.dir/src/dllmain.cpp.o: ../cds/urcu/details/base.h +CMakeFiles/cds.dir/src/dllmain.cpp.o: ../cds/urcu/details/gp.h +CMakeFiles/cds.dir/src/dllmain.cpp.o: ../cds/urcu/details/gp_decl.h +CMakeFiles/cds.dir/src/dllmain.cpp.o: ../cds/urcu/details/gpb.h +CMakeFiles/cds.dir/src/dllmain.cpp.o: ../cds/urcu/details/sh_decl.h +CMakeFiles/cds.dir/src/dllmain.cpp.o: ../cds/urcu/general_buffered.h +CMakeFiles/cds.dir/src/dllmain.cpp.o: ../cds/user_setup/allocator.h +CMakeFiles/cds.dir/src/dllmain.cpp.o: ../cds/user_setup/cache_line.h +CMakeFiles/cds.dir/src/dllmain.cpp.o: ../cds/user_setup/threading.h +CMakeFiles/cds.dir/src/dllmain.cpp.o: ../cds/version.h +CMakeFiles/cds.dir/src/dllmain.cpp.o: ../src/dllmain.cpp + +CMakeFiles/cds.dir/src/hp.cpp.o: ../cds/algo/atomic.h +CMakeFiles/cds.dir/src/hp.cpp.o: ../cds/algo/backoff_strategy.h +CMakeFiles/cds.dir/src/hp.cpp.o: ../cds/algo/base.h +CMakeFiles/cds.dir/src/hp.cpp.o: ../cds/algo/bitop.h +CMakeFiles/cds.dir/src/hp.cpp.o: ../cds/algo/elimination_tls.h +CMakeFiles/cds.dir/src/hp.cpp.o: ../cds/algo/int_algo.h +CMakeFiles/cds.dir/src/hp.cpp.o: ../cds/compiler/backoff.h +CMakeFiles/cds.dir/src/hp.cpp.o: ../cds/compiler/bitop.h +CMakeFiles/cds.dir/src/hp.cpp.o: ../cds/compiler/clang/defs.h +CMakeFiles/cds.dir/src/hp.cpp.o: ../cds/compiler/cxx11_atomic.h +CMakeFiles/cds.dir/src/hp.cpp.o: ../cds/compiler/defs.h +CMakeFiles/cds.dir/src/hp.cpp.o: ../cds/compiler/feature_tsan.h +CMakeFiles/cds.dir/src/hp.cpp.o: ../cds/compiler/gcc/amd64/backoff.h +CMakeFiles/cds.dir/src/hp.cpp.o: ../cds/compiler/gcc/amd64/bitop.h +CMakeFiles/cds.dir/src/hp.cpp.o: ../cds/compiler/gcc/amd64/cxx11_atomic.h +CMakeFiles/cds.dir/src/hp.cpp.o: ../cds/compiler/gcc/arm7/backoff.h +CMakeFiles/cds.dir/src/hp.cpp.o: ../cds/compiler/gcc/arm8/backoff.h +CMakeFiles/cds.dir/src/hp.cpp.o: ../cds/compiler/gcc/compiler_barriers.h +CMakeFiles/cds.dir/src/hp.cpp.o: ../cds/compiler/gcc/compiler_macro.h +CMakeFiles/cds.dir/src/hp.cpp.o: ../cds/compiler/gcc/defs.h +CMakeFiles/cds.dir/src/hp.cpp.o: ../cds/compiler/gcc/ia64/backoff.h +CMakeFiles/cds.dir/src/hp.cpp.o: ../cds/compiler/gcc/ia64/bitop.h +CMakeFiles/cds.dir/src/hp.cpp.o: ../cds/compiler/gcc/ia64/cxx11_atomic.h +CMakeFiles/cds.dir/src/hp.cpp.o: ../cds/compiler/gcc/ppc64/backoff.h +CMakeFiles/cds.dir/src/hp.cpp.o: ../cds/compiler/gcc/ppc64/bitop.h +CMakeFiles/cds.dir/src/hp.cpp.o: ../cds/compiler/gcc/sparc/backoff.h +CMakeFiles/cds.dir/src/hp.cpp.o: ../cds/compiler/gcc/sparc/bitop.h +CMakeFiles/cds.dir/src/hp.cpp.o: ../cds/compiler/gcc/sparc/cxx11_atomic.h +CMakeFiles/cds.dir/src/hp.cpp.o: ../cds/compiler/gcc/x86/backoff.h +CMakeFiles/cds.dir/src/hp.cpp.o: ../cds/compiler/gcc/x86/bitop.h +CMakeFiles/cds.dir/src/hp.cpp.o: ../cds/compiler/gcc/x86/cxx11_atomic.h +CMakeFiles/cds.dir/src/hp.cpp.o: ../cds/compiler/gcc/x86/cxx11_atomic32.h +CMakeFiles/cds.dir/src/hp.cpp.o: ../cds/compiler/icl/compiler_barriers.h +CMakeFiles/cds.dir/src/hp.cpp.o: ../cds/compiler/icl/defs.h +CMakeFiles/cds.dir/src/hp.cpp.o: ../cds/compiler/vc/amd64/backoff.h +CMakeFiles/cds.dir/src/hp.cpp.o: ../cds/compiler/vc/amd64/bitop.h +CMakeFiles/cds.dir/src/hp.cpp.o: ../cds/compiler/vc/amd64/cxx11_atomic.h +CMakeFiles/cds.dir/src/hp.cpp.o: ../cds/compiler/vc/compiler_barriers.h +CMakeFiles/cds.dir/src/hp.cpp.o: ../cds/compiler/vc/defs.h +CMakeFiles/cds.dir/src/hp.cpp.o: ../cds/compiler/vc/x86/backoff.h +CMakeFiles/cds.dir/src/hp.cpp.o: ../cds/compiler/vc/x86/bitop.h +CMakeFiles/cds.dir/src/hp.cpp.o: ../cds/compiler/vc/x86/cxx11_atomic.h +CMakeFiles/cds.dir/src/hp.cpp.o: ../cds/container/details/base.h +CMakeFiles/cds.dir/src/hp.cpp.o: ../cds/container/vyukov_mpmc_cycle_queue.h +CMakeFiles/cds.dir/src/hp.cpp.o: ../cds/details/aligned_type.h +CMakeFiles/cds.dir/src/hp.cpp.o: ../cds/details/allocator.h +CMakeFiles/cds.dir/src/hp.cpp.o: ../cds/details/bitop_generic.h +CMakeFiles/cds.dir/src/hp.cpp.o: ../cds/details/bounded_container.h +CMakeFiles/cds.dir/src/hp.cpp.o: ../cds/details/defs.h +CMakeFiles/cds.dir/src/hp.cpp.o: ../cds/details/is_aligned.h +CMakeFiles/cds.dir/src/hp.cpp.o: ../cds/details/lib.h +CMakeFiles/cds.dir/src/hp.cpp.o: ../cds/details/marked_ptr.h +CMakeFiles/cds.dir/src/hp.cpp.o: ../cds/details/static_functor.h +CMakeFiles/cds.dir/src/hp.cpp.o: ../cds/details/throw_exception.h +CMakeFiles/cds.dir/src/hp.cpp.o: ../cds/gc/details/hp_common.h +CMakeFiles/cds.dir/src/hp.cpp.o: ../cds/gc/details/retired_ptr.h +CMakeFiles/cds.dir/src/hp.cpp.o: ../cds/gc/hp.h +CMakeFiles/cds.dir/src/hp.cpp.o: ../cds/init.h +CMakeFiles/cds.dir/src/hp.cpp.o: ../cds/intrusive/details/base.h +CMakeFiles/cds.dir/src/hp.cpp.o: ../cds/intrusive/details/node_traits.h +CMakeFiles/cds.dir/src/hp.cpp.o: ../cds/intrusive/options.h +CMakeFiles/cds.dir/src/hp.cpp.o: ../cds/opt/buffer.h +CMakeFiles/cds.dir/src/hp.cpp.o: ../cds/opt/options.h +CMakeFiles/cds.dir/src/hp.cpp.o: ../cds/opt/value_cleaner.h +CMakeFiles/cds.dir/src/hp.cpp.o: ../cds/os/aix/alloc_aligned.h +CMakeFiles/cds.dir/src/hp.cpp.o: ../cds/os/aix/topology.h +CMakeFiles/cds.dir/src/hp.cpp.o: ../cds/os/alloc_aligned.h +CMakeFiles/cds.dir/src/hp.cpp.o: ../cds/os/details/fake_topology.h +CMakeFiles/cds.dir/src/hp.cpp.o: ../cds/os/free_bsd/alloc_aligned.h +CMakeFiles/cds.dir/src/hp.cpp.o: ../cds/os/free_bsd/topology.h +CMakeFiles/cds.dir/src/hp.cpp.o: ../cds/os/hpux/alloc_aligned.h +CMakeFiles/cds.dir/src/hp.cpp.o: ../cds/os/hpux/topology.h +CMakeFiles/cds.dir/src/hp.cpp.o: ../cds/os/libc/alloc_aligned.h +CMakeFiles/cds.dir/src/hp.cpp.o: ../cds/os/linux/alloc_aligned.h +CMakeFiles/cds.dir/src/hp.cpp.o: ../cds/os/linux/topology.h +CMakeFiles/cds.dir/src/hp.cpp.o: ../cds/os/osx/topology.h +CMakeFiles/cds.dir/src/hp.cpp.o: ../cds/os/posix/alloc_aligned.h +CMakeFiles/cds.dir/src/hp.cpp.o: ../cds/os/posix/fake_topology.h +CMakeFiles/cds.dir/src/hp.cpp.o: ../cds/os/posix/thread.h +CMakeFiles/cds.dir/src/hp.cpp.o: ../cds/os/sunos/alloc_aligned.h +CMakeFiles/cds.dir/src/hp.cpp.o: ../cds/os/sunos/topology.h +CMakeFiles/cds.dir/src/hp.cpp.o: ../cds/os/thread.h +CMakeFiles/cds.dir/src/hp.cpp.o: ../cds/os/topology.h +CMakeFiles/cds.dir/src/hp.cpp.o: ../cds/os/win/alloc_aligned.h +CMakeFiles/cds.dir/src/hp.cpp.o: ../cds/os/win/thread.h +CMakeFiles/cds.dir/src/hp.cpp.o: ../cds/os/win/topology.h +CMakeFiles/cds.dir/src/hp.cpp.o: ../cds/threading/details/_common.h +CMakeFiles/cds.dir/src/hp.cpp.o: ../cds/threading/details/auto_detect.h +CMakeFiles/cds.dir/src/hp.cpp.o: ../cds/threading/details/cxx11.h +CMakeFiles/cds.dir/src/hp.cpp.o: ../cds/threading/details/cxx11_manager.h +CMakeFiles/cds.dir/src/hp.cpp.o: ../cds/threading/details/gcc.h +CMakeFiles/cds.dir/src/hp.cpp.o: ../cds/threading/details/gcc_manager.h +CMakeFiles/cds.dir/src/hp.cpp.o: ../cds/threading/details/msvc.h +CMakeFiles/cds.dir/src/hp.cpp.o: ../cds/threading/details/msvc_manager.h +CMakeFiles/cds.dir/src/hp.cpp.o: ../cds/threading/details/pthread.h +CMakeFiles/cds.dir/src/hp.cpp.o: ../cds/threading/details/pthread_manager.h +CMakeFiles/cds.dir/src/hp.cpp.o: ../cds/threading/details/wintls.h +CMakeFiles/cds.dir/src/hp.cpp.o: ../cds/threading/details/wintls_manager.h +CMakeFiles/cds.dir/src/hp.cpp.o: ../cds/threading/model.h +CMakeFiles/cds.dir/src/hp.cpp.o: ../cds/urcu/details/base.h +CMakeFiles/cds.dir/src/hp.cpp.o: ../cds/urcu/details/gp.h +CMakeFiles/cds.dir/src/hp.cpp.o: ../cds/urcu/details/gp_decl.h +CMakeFiles/cds.dir/src/hp.cpp.o: ../cds/urcu/details/gpb.h +CMakeFiles/cds.dir/src/hp.cpp.o: ../cds/urcu/details/sh_decl.h +CMakeFiles/cds.dir/src/hp.cpp.o: ../cds/urcu/general_buffered.h +CMakeFiles/cds.dir/src/hp.cpp.o: ../cds/user_setup/allocator.h +CMakeFiles/cds.dir/src/hp.cpp.o: ../cds/user_setup/cache_line.h +CMakeFiles/cds.dir/src/hp.cpp.o: ../cds/user_setup/threading.h +CMakeFiles/cds.dir/src/hp.cpp.o: ../cds/version.h +CMakeFiles/cds.dir/src/hp.cpp.o: ../src/hp.cpp + +CMakeFiles/cds.dir/src/init.cpp.o: ../cds/algo/atomic.h +CMakeFiles/cds.dir/src/init.cpp.o: ../cds/algo/backoff_strategy.h +CMakeFiles/cds.dir/src/init.cpp.o: ../cds/algo/base.h +CMakeFiles/cds.dir/src/init.cpp.o: ../cds/algo/bitop.h +CMakeFiles/cds.dir/src/init.cpp.o: ../cds/algo/elimination_tls.h +CMakeFiles/cds.dir/src/init.cpp.o: ../cds/algo/int_algo.h +CMakeFiles/cds.dir/src/init.cpp.o: ../cds/compiler/backoff.h +CMakeFiles/cds.dir/src/init.cpp.o: ../cds/compiler/bitop.h +CMakeFiles/cds.dir/src/init.cpp.o: ../cds/compiler/clang/defs.h +CMakeFiles/cds.dir/src/init.cpp.o: ../cds/compiler/cxx11_atomic.h +CMakeFiles/cds.dir/src/init.cpp.o: ../cds/compiler/defs.h +CMakeFiles/cds.dir/src/init.cpp.o: ../cds/compiler/feature_tsan.h +CMakeFiles/cds.dir/src/init.cpp.o: ../cds/compiler/gcc/amd64/backoff.h +CMakeFiles/cds.dir/src/init.cpp.o: ../cds/compiler/gcc/amd64/bitop.h +CMakeFiles/cds.dir/src/init.cpp.o: ../cds/compiler/gcc/amd64/cxx11_atomic.h +CMakeFiles/cds.dir/src/init.cpp.o: ../cds/compiler/gcc/arm7/backoff.h +CMakeFiles/cds.dir/src/init.cpp.o: ../cds/compiler/gcc/arm8/backoff.h +CMakeFiles/cds.dir/src/init.cpp.o: ../cds/compiler/gcc/compiler_barriers.h +CMakeFiles/cds.dir/src/init.cpp.o: ../cds/compiler/gcc/compiler_macro.h +CMakeFiles/cds.dir/src/init.cpp.o: ../cds/compiler/gcc/defs.h +CMakeFiles/cds.dir/src/init.cpp.o: ../cds/compiler/gcc/ia64/backoff.h +CMakeFiles/cds.dir/src/init.cpp.o: ../cds/compiler/gcc/ia64/bitop.h +CMakeFiles/cds.dir/src/init.cpp.o: ../cds/compiler/gcc/ia64/cxx11_atomic.h +CMakeFiles/cds.dir/src/init.cpp.o: ../cds/compiler/gcc/ppc64/backoff.h +CMakeFiles/cds.dir/src/init.cpp.o: ../cds/compiler/gcc/ppc64/bitop.h +CMakeFiles/cds.dir/src/init.cpp.o: ../cds/compiler/gcc/sparc/backoff.h +CMakeFiles/cds.dir/src/init.cpp.o: ../cds/compiler/gcc/sparc/bitop.h +CMakeFiles/cds.dir/src/init.cpp.o: ../cds/compiler/gcc/sparc/cxx11_atomic.h +CMakeFiles/cds.dir/src/init.cpp.o: ../cds/compiler/gcc/x86/backoff.h +CMakeFiles/cds.dir/src/init.cpp.o: ../cds/compiler/gcc/x86/bitop.h +CMakeFiles/cds.dir/src/init.cpp.o: ../cds/compiler/gcc/x86/cxx11_atomic.h +CMakeFiles/cds.dir/src/init.cpp.o: ../cds/compiler/gcc/x86/cxx11_atomic32.h +CMakeFiles/cds.dir/src/init.cpp.o: ../cds/compiler/icl/compiler_barriers.h +CMakeFiles/cds.dir/src/init.cpp.o: ../cds/compiler/icl/defs.h +CMakeFiles/cds.dir/src/init.cpp.o: ../cds/compiler/vc/amd64/backoff.h +CMakeFiles/cds.dir/src/init.cpp.o: ../cds/compiler/vc/amd64/bitop.h +CMakeFiles/cds.dir/src/init.cpp.o: ../cds/compiler/vc/amd64/cxx11_atomic.h +CMakeFiles/cds.dir/src/init.cpp.o: ../cds/compiler/vc/compiler_barriers.h +CMakeFiles/cds.dir/src/init.cpp.o: ../cds/compiler/vc/defs.h +CMakeFiles/cds.dir/src/init.cpp.o: ../cds/compiler/vc/x86/backoff.h +CMakeFiles/cds.dir/src/init.cpp.o: ../cds/compiler/vc/x86/bitop.h +CMakeFiles/cds.dir/src/init.cpp.o: ../cds/compiler/vc/x86/cxx11_atomic.h +CMakeFiles/cds.dir/src/init.cpp.o: ../cds/container/details/base.h +CMakeFiles/cds.dir/src/init.cpp.o: ../cds/container/vyukov_mpmc_cycle_queue.h +CMakeFiles/cds.dir/src/init.cpp.o: ../cds/details/aligned_type.h +CMakeFiles/cds.dir/src/init.cpp.o: ../cds/details/allocator.h +CMakeFiles/cds.dir/src/init.cpp.o: ../cds/details/bitop_generic.h +CMakeFiles/cds.dir/src/init.cpp.o: ../cds/details/bounded_container.h +CMakeFiles/cds.dir/src/init.cpp.o: ../cds/details/defs.h +CMakeFiles/cds.dir/src/init.cpp.o: ../cds/details/is_aligned.h +CMakeFiles/cds.dir/src/init.cpp.o: ../cds/details/lib.h +CMakeFiles/cds.dir/src/init.cpp.o: ../cds/details/marked_ptr.h +CMakeFiles/cds.dir/src/init.cpp.o: ../cds/details/static_functor.h +CMakeFiles/cds.dir/src/init.cpp.o: ../cds/details/throw_exception.h +CMakeFiles/cds.dir/src/init.cpp.o: ../cds/gc/details/hp_common.h +CMakeFiles/cds.dir/src/init.cpp.o: ../cds/gc/details/retired_ptr.h +CMakeFiles/cds.dir/src/init.cpp.o: ../cds/gc/hp.h +CMakeFiles/cds.dir/src/init.cpp.o: ../cds/init.h +CMakeFiles/cds.dir/src/init.cpp.o: ../cds/intrusive/details/base.h +CMakeFiles/cds.dir/src/init.cpp.o: ../cds/intrusive/details/node_traits.h +CMakeFiles/cds.dir/src/init.cpp.o: ../cds/intrusive/options.h +CMakeFiles/cds.dir/src/init.cpp.o: ../cds/opt/buffer.h +CMakeFiles/cds.dir/src/init.cpp.o: ../cds/opt/options.h +CMakeFiles/cds.dir/src/init.cpp.o: ../cds/opt/value_cleaner.h +CMakeFiles/cds.dir/src/init.cpp.o: ../cds/os/aix/alloc_aligned.h +CMakeFiles/cds.dir/src/init.cpp.o: ../cds/os/aix/topology.h +CMakeFiles/cds.dir/src/init.cpp.o: ../cds/os/alloc_aligned.h +CMakeFiles/cds.dir/src/init.cpp.o: ../cds/os/details/fake_topology.h +CMakeFiles/cds.dir/src/init.cpp.o: ../cds/os/free_bsd/alloc_aligned.h +CMakeFiles/cds.dir/src/init.cpp.o: ../cds/os/free_bsd/topology.h +CMakeFiles/cds.dir/src/init.cpp.o: ../cds/os/hpux/alloc_aligned.h +CMakeFiles/cds.dir/src/init.cpp.o: ../cds/os/hpux/topology.h +CMakeFiles/cds.dir/src/init.cpp.o: ../cds/os/libc/alloc_aligned.h +CMakeFiles/cds.dir/src/init.cpp.o: ../cds/os/linux/alloc_aligned.h +CMakeFiles/cds.dir/src/init.cpp.o: ../cds/os/linux/topology.h +CMakeFiles/cds.dir/src/init.cpp.o: ../cds/os/osx/topology.h +CMakeFiles/cds.dir/src/init.cpp.o: ../cds/os/posix/alloc_aligned.h +CMakeFiles/cds.dir/src/init.cpp.o: ../cds/os/posix/fake_topology.h +CMakeFiles/cds.dir/src/init.cpp.o: ../cds/os/posix/thread.h +CMakeFiles/cds.dir/src/init.cpp.o: ../cds/os/sunos/alloc_aligned.h +CMakeFiles/cds.dir/src/init.cpp.o: ../cds/os/sunos/topology.h +CMakeFiles/cds.dir/src/init.cpp.o: ../cds/os/thread.h +CMakeFiles/cds.dir/src/init.cpp.o: ../cds/os/topology.h +CMakeFiles/cds.dir/src/init.cpp.o: ../cds/os/win/alloc_aligned.h +CMakeFiles/cds.dir/src/init.cpp.o: ../cds/os/win/thread.h +CMakeFiles/cds.dir/src/init.cpp.o: ../cds/os/win/topology.h +CMakeFiles/cds.dir/src/init.cpp.o: ../cds/threading/details/_common.h +CMakeFiles/cds.dir/src/init.cpp.o: ../cds/threading/details/auto_detect.h +CMakeFiles/cds.dir/src/init.cpp.o: ../cds/threading/details/cxx11.h +CMakeFiles/cds.dir/src/init.cpp.o: ../cds/threading/details/cxx11_manager.h +CMakeFiles/cds.dir/src/init.cpp.o: ../cds/threading/details/gcc.h +CMakeFiles/cds.dir/src/init.cpp.o: ../cds/threading/details/gcc_manager.h +CMakeFiles/cds.dir/src/init.cpp.o: ../cds/threading/details/msvc.h +CMakeFiles/cds.dir/src/init.cpp.o: ../cds/threading/details/msvc_manager.h +CMakeFiles/cds.dir/src/init.cpp.o: ../cds/threading/details/pthread.h +CMakeFiles/cds.dir/src/init.cpp.o: ../cds/threading/details/pthread_manager.h +CMakeFiles/cds.dir/src/init.cpp.o: ../cds/threading/details/wintls.h +CMakeFiles/cds.dir/src/init.cpp.o: ../cds/threading/details/wintls_manager.h +CMakeFiles/cds.dir/src/init.cpp.o: ../cds/threading/model.h +CMakeFiles/cds.dir/src/init.cpp.o: ../cds/urcu/details/base.h +CMakeFiles/cds.dir/src/init.cpp.o: ../cds/urcu/details/gp.h +CMakeFiles/cds.dir/src/init.cpp.o: ../cds/urcu/details/gp_decl.h +CMakeFiles/cds.dir/src/init.cpp.o: ../cds/urcu/details/gpb.h +CMakeFiles/cds.dir/src/init.cpp.o: ../cds/urcu/details/sh_decl.h +CMakeFiles/cds.dir/src/init.cpp.o: ../cds/urcu/general_buffered.h +CMakeFiles/cds.dir/src/init.cpp.o: ../cds/user_setup/allocator.h +CMakeFiles/cds.dir/src/init.cpp.o: ../cds/user_setup/cache_line.h +CMakeFiles/cds.dir/src/init.cpp.o: ../cds/user_setup/threading.h +CMakeFiles/cds.dir/src/init.cpp.o: ../cds/version.h +CMakeFiles/cds.dir/src/init.cpp.o: ../src/init.cpp + +CMakeFiles/cds.dir/src/thread_data.cpp.o: ../cds/algo/atomic.h +CMakeFiles/cds.dir/src/thread_data.cpp.o: ../cds/algo/backoff_strategy.h +CMakeFiles/cds.dir/src/thread_data.cpp.o: ../cds/algo/base.h +CMakeFiles/cds.dir/src/thread_data.cpp.o: ../cds/algo/bitop.h +CMakeFiles/cds.dir/src/thread_data.cpp.o: ../cds/algo/elimination_tls.h +CMakeFiles/cds.dir/src/thread_data.cpp.o: ../cds/algo/int_algo.h +CMakeFiles/cds.dir/src/thread_data.cpp.o: ../cds/compiler/backoff.h +CMakeFiles/cds.dir/src/thread_data.cpp.o: ../cds/compiler/bitop.h +CMakeFiles/cds.dir/src/thread_data.cpp.o: ../cds/compiler/clang/defs.h +CMakeFiles/cds.dir/src/thread_data.cpp.o: ../cds/compiler/cxx11_atomic.h +CMakeFiles/cds.dir/src/thread_data.cpp.o: ../cds/compiler/defs.h +CMakeFiles/cds.dir/src/thread_data.cpp.o: ../cds/compiler/feature_tsan.h +CMakeFiles/cds.dir/src/thread_data.cpp.o: ../cds/compiler/gcc/amd64/backoff.h +CMakeFiles/cds.dir/src/thread_data.cpp.o: ../cds/compiler/gcc/amd64/bitop.h +CMakeFiles/cds.dir/src/thread_data.cpp.o: ../cds/compiler/gcc/amd64/cxx11_atomic.h +CMakeFiles/cds.dir/src/thread_data.cpp.o: ../cds/compiler/gcc/arm7/backoff.h +CMakeFiles/cds.dir/src/thread_data.cpp.o: ../cds/compiler/gcc/arm8/backoff.h +CMakeFiles/cds.dir/src/thread_data.cpp.o: ../cds/compiler/gcc/compiler_barriers.h +CMakeFiles/cds.dir/src/thread_data.cpp.o: ../cds/compiler/gcc/compiler_macro.h +CMakeFiles/cds.dir/src/thread_data.cpp.o: ../cds/compiler/gcc/defs.h +CMakeFiles/cds.dir/src/thread_data.cpp.o: ../cds/compiler/gcc/ia64/backoff.h +CMakeFiles/cds.dir/src/thread_data.cpp.o: ../cds/compiler/gcc/ia64/bitop.h +CMakeFiles/cds.dir/src/thread_data.cpp.o: ../cds/compiler/gcc/ia64/cxx11_atomic.h +CMakeFiles/cds.dir/src/thread_data.cpp.o: ../cds/compiler/gcc/ppc64/backoff.h +CMakeFiles/cds.dir/src/thread_data.cpp.o: ../cds/compiler/gcc/ppc64/bitop.h +CMakeFiles/cds.dir/src/thread_data.cpp.o: ../cds/compiler/gcc/sparc/backoff.h +CMakeFiles/cds.dir/src/thread_data.cpp.o: ../cds/compiler/gcc/sparc/bitop.h +CMakeFiles/cds.dir/src/thread_data.cpp.o: ../cds/compiler/gcc/sparc/cxx11_atomic.h +CMakeFiles/cds.dir/src/thread_data.cpp.o: ../cds/compiler/gcc/x86/backoff.h +CMakeFiles/cds.dir/src/thread_data.cpp.o: ../cds/compiler/gcc/x86/bitop.h +CMakeFiles/cds.dir/src/thread_data.cpp.o: ../cds/compiler/gcc/x86/cxx11_atomic.h +CMakeFiles/cds.dir/src/thread_data.cpp.o: ../cds/compiler/gcc/x86/cxx11_atomic32.h +CMakeFiles/cds.dir/src/thread_data.cpp.o: ../cds/compiler/icl/compiler_barriers.h +CMakeFiles/cds.dir/src/thread_data.cpp.o: ../cds/compiler/icl/defs.h +CMakeFiles/cds.dir/src/thread_data.cpp.o: ../cds/compiler/vc/amd64/backoff.h +CMakeFiles/cds.dir/src/thread_data.cpp.o: ../cds/compiler/vc/amd64/bitop.h +CMakeFiles/cds.dir/src/thread_data.cpp.o: ../cds/compiler/vc/amd64/cxx11_atomic.h +CMakeFiles/cds.dir/src/thread_data.cpp.o: ../cds/compiler/vc/compiler_barriers.h +CMakeFiles/cds.dir/src/thread_data.cpp.o: ../cds/compiler/vc/defs.h +CMakeFiles/cds.dir/src/thread_data.cpp.o: ../cds/compiler/vc/x86/backoff.h +CMakeFiles/cds.dir/src/thread_data.cpp.o: ../cds/compiler/vc/x86/bitop.h +CMakeFiles/cds.dir/src/thread_data.cpp.o: ../cds/compiler/vc/x86/cxx11_atomic.h +CMakeFiles/cds.dir/src/thread_data.cpp.o: ../cds/container/details/base.h +CMakeFiles/cds.dir/src/thread_data.cpp.o: ../cds/container/vyukov_mpmc_cycle_queue.h +CMakeFiles/cds.dir/src/thread_data.cpp.o: ../cds/details/aligned_type.h +CMakeFiles/cds.dir/src/thread_data.cpp.o: ../cds/details/allocator.h +CMakeFiles/cds.dir/src/thread_data.cpp.o: ../cds/details/bitop_generic.h +CMakeFiles/cds.dir/src/thread_data.cpp.o: ../cds/details/bounded_container.h +CMakeFiles/cds.dir/src/thread_data.cpp.o: ../cds/details/defs.h +CMakeFiles/cds.dir/src/thread_data.cpp.o: ../cds/details/is_aligned.h +CMakeFiles/cds.dir/src/thread_data.cpp.o: ../cds/details/lib.h +CMakeFiles/cds.dir/src/thread_data.cpp.o: ../cds/details/marked_ptr.h +CMakeFiles/cds.dir/src/thread_data.cpp.o: ../cds/details/static_functor.h +CMakeFiles/cds.dir/src/thread_data.cpp.o: ../cds/details/throw_exception.h +CMakeFiles/cds.dir/src/thread_data.cpp.o: ../cds/gc/details/hp_common.h +CMakeFiles/cds.dir/src/thread_data.cpp.o: ../cds/gc/details/retired_ptr.h +CMakeFiles/cds.dir/src/thread_data.cpp.o: ../cds/gc/dhp.h +CMakeFiles/cds.dir/src/thread_data.cpp.o: ../cds/gc/hp.h +CMakeFiles/cds.dir/src/thread_data.cpp.o: ../cds/init.h +CMakeFiles/cds.dir/src/thread_data.cpp.o: ../cds/intrusive/details/base.h +CMakeFiles/cds.dir/src/thread_data.cpp.o: ../cds/intrusive/details/node_traits.h +CMakeFiles/cds.dir/src/thread_data.cpp.o: ../cds/intrusive/free_list.h +CMakeFiles/cds.dir/src/thread_data.cpp.o: ../cds/intrusive/free_list_selector.h +CMakeFiles/cds.dir/src/thread_data.cpp.o: ../cds/intrusive/free_list_tagged.h +CMakeFiles/cds.dir/src/thread_data.cpp.o: ../cds/intrusive/options.h +CMakeFiles/cds.dir/src/thread_data.cpp.o: ../cds/opt/buffer.h +CMakeFiles/cds.dir/src/thread_data.cpp.o: ../cds/opt/options.h +CMakeFiles/cds.dir/src/thread_data.cpp.o: ../cds/opt/value_cleaner.h +CMakeFiles/cds.dir/src/thread_data.cpp.o: ../cds/os/aix/alloc_aligned.h +CMakeFiles/cds.dir/src/thread_data.cpp.o: ../cds/os/aix/topology.h +CMakeFiles/cds.dir/src/thread_data.cpp.o: ../cds/os/alloc_aligned.h +CMakeFiles/cds.dir/src/thread_data.cpp.o: ../cds/os/details/fake_topology.h +CMakeFiles/cds.dir/src/thread_data.cpp.o: ../cds/os/free_bsd/alloc_aligned.h +CMakeFiles/cds.dir/src/thread_data.cpp.o: ../cds/os/free_bsd/topology.h +CMakeFiles/cds.dir/src/thread_data.cpp.o: ../cds/os/hpux/alloc_aligned.h +CMakeFiles/cds.dir/src/thread_data.cpp.o: ../cds/os/hpux/topology.h +CMakeFiles/cds.dir/src/thread_data.cpp.o: ../cds/os/libc/alloc_aligned.h +CMakeFiles/cds.dir/src/thread_data.cpp.o: ../cds/os/linux/alloc_aligned.h +CMakeFiles/cds.dir/src/thread_data.cpp.o: ../cds/os/linux/topology.h +CMakeFiles/cds.dir/src/thread_data.cpp.o: ../cds/os/osx/topology.h +CMakeFiles/cds.dir/src/thread_data.cpp.o: ../cds/os/posix/alloc_aligned.h +CMakeFiles/cds.dir/src/thread_data.cpp.o: ../cds/os/posix/fake_topology.h +CMakeFiles/cds.dir/src/thread_data.cpp.o: ../cds/os/posix/thread.h +CMakeFiles/cds.dir/src/thread_data.cpp.o: ../cds/os/sunos/alloc_aligned.h +CMakeFiles/cds.dir/src/thread_data.cpp.o: ../cds/os/sunos/topology.h +CMakeFiles/cds.dir/src/thread_data.cpp.o: ../cds/os/thread.h +CMakeFiles/cds.dir/src/thread_data.cpp.o: ../cds/os/topology.h +CMakeFiles/cds.dir/src/thread_data.cpp.o: ../cds/os/win/alloc_aligned.h +CMakeFiles/cds.dir/src/thread_data.cpp.o: ../cds/os/win/thread.h +CMakeFiles/cds.dir/src/thread_data.cpp.o: ../cds/os/win/topology.h +CMakeFiles/cds.dir/src/thread_data.cpp.o: ../cds/threading/details/_common.h +CMakeFiles/cds.dir/src/thread_data.cpp.o: ../cds/threading/details/auto_detect.h +CMakeFiles/cds.dir/src/thread_data.cpp.o: ../cds/threading/details/cxx11.h +CMakeFiles/cds.dir/src/thread_data.cpp.o: ../cds/threading/details/cxx11_manager.h +CMakeFiles/cds.dir/src/thread_data.cpp.o: ../cds/threading/details/gcc.h +CMakeFiles/cds.dir/src/thread_data.cpp.o: ../cds/threading/details/gcc_manager.h +CMakeFiles/cds.dir/src/thread_data.cpp.o: ../cds/threading/details/msvc.h +CMakeFiles/cds.dir/src/thread_data.cpp.o: ../cds/threading/details/msvc_manager.h +CMakeFiles/cds.dir/src/thread_data.cpp.o: ../cds/threading/details/pthread.h +CMakeFiles/cds.dir/src/thread_data.cpp.o: ../cds/threading/details/pthread_manager.h +CMakeFiles/cds.dir/src/thread_data.cpp.o: ../cds/threading/details/wintls.h +CMakeFiles/cds.dir/src/thread_data.cpp.o: ../cds/threading/details/wintls_manager.h +CMakeFiles/cds.dir/src/thread_data.cpp.o: ../cds/threading/model.h +CMakeFiles/cds.dir/src/thread_data.cpp.o: ../cds/urcu/details/base.h +CMakeFiles/cds.dir/src/thread_data.cpp.o: ../cds/urcu/details/gp.h +CMakeFiles/cds.dir/src/thread_data.cpp.o: ../cds/urcu/details/gp_decl.h +CMakeFiles/cds.dir/src/thread_data.cpp.o: ../cds/urcu/details/gpb.h +CMakeFiles/cds.dir/src/thread_data.cpp.o: ../cds/urcu/details/sh_decl.h +CMakeFiles/cds.dir/src/thread_data.cpp.o: ../cds/urcu/general_buffered.h +CMakeFiles/cds.dir/src/thread_data.cpp.o: ../cds/user_setup/allocator.h +CMakeFiles/cds.dir/src/thread_data.cpp.o: ../cds/user_setup/cache_line.h +CMakeFiles/cds.dir/src/thread_data.cpp.o: ../cds/user_setup/threading.h +CMakeFiles/cds.dir/src/thread_data.cpp.o: ../cds/version.h +CMakeFiles/cds.dir/src/thread_data.cpp.o: ../src/thread_data.cpp + +CMakeFiles/cds.dir/src/topology_hpux.cpp.o: ../cds/algo/atomic.h +CMakeFiles/cds.dir/src/topology_hpux.cpp.o: ../cds/algo/backoff_strategy.h +CMakeFiles/cds.dir/src/topology_hpux.cpp.o: ../cds/algo/base.h +CMakeFiles/cds.dir/src/topology_hpux.cpp.o: ../cds/algo/bitop.h +CMakeFiles/cds.dir/src/topology_hpux.cpp.o: ../cds/algo/elimination_tls.h +CMakeFiles/cds.dir/src/topology_hpux.cpp.o: ../cds/algo/int_algo.h +CMakeFiles/cds.dir/src/topology_hpux.cpp.o: ../cds/compiler/backoff.h +CMakeFiles/cds.dir/src/topology_hpux.cpp.o: ../cds/compiler/bitop.h +CMakeFiles/cds.dir/src/topology_hpux.cpp.o: ../cds/compiler/clang/defs.h +CMakeFiles/cds.dir/src/topology_hpux.cpp.o: ../cds/compiler/cxx11_atomic.h +CMakeFiles/cds.dir/src/topology_hpux.cpp.o: ../cds/compiler/defs.h +CMakeFiles/cds.dir/src/topology_hpux.cpp.o: ../cds/compiler/feature_tsan.h +CMakeFiles/cds.dir/src/topology_hpux.cpp.o: ../cds/compiler/gcc/amd64/backoff.h +CMakeFiles/cds.dir/src/topology_hpux.cpp.o: ../cds/compiler/gcc/amd64/bitop.h +CMakeFiles/cds.dir/src/topology_hpux.cpp.o: ../cds/compiler/gcc/amd64/cxx11_atomic.h +CMakeFiles/cds.dir/src/topology_hpux.cpp.o: ../cds/compiler/gcc/arm7/backoff.h +CMakeFiles/cds.dir/src/topology_hpux.cpp.o: ../cds/compiler/gcc/arm8/backoff.h +CMakeFiles/cds.dir/src/topology_hpux.cpp.o: ../cds/compiler/gcc/compiler_barriers.h +CMakeFiles/cds.dir/src/topology_hpux.cpp.o: ../cds/compiler/gcc/compiler_macro.h +CMakeFiles/cds.dir/src/topology_hpux.cpp.o: ../cds/compiler/gcc/defs.h +CMakeFiles/cds.dir/src/topology_hpux.cpp.o: ../cds/compiler/gcc/ia64/backoff.h +CMakeFiles/cds.dir/src/topology_hpux.cpp.o: ../cds/compiler/gcc/ia64/bitop.h +CMakeFiles/cds.dir/src/topology_hpux.cpp.o: ../cds/compiler/gcc/ia64/cxx11_atomic.h +CMakeFiles/cds.dir/src/topology_hpux.cpp.o: ../cds/compiler/gcc/ppc64/backoff.h +CMakeFiles/cds.dir/src/topology_hpux.cpp.o: ../cds/compiler/gcc/ppc64/bitop.h +CMakeFiles/cds.dir/src/topology_hpux.cpp.o: ../cds/compiler/gcc/sparc/backoff.h +CMakeFiles/cds.dir/src/topology_hpux.cpp.o: ../cds/compiler/gcc/sparc/bitop.h +CMakeFiles/cds.dir/src/topology_hpux.cpp.o: ../cds/compiler/gcc/sparc/cxx11_atomic.h +CMakeFiles/cds.dir/src/topology_hpux.cpp.o: ../cds/compiler/gcc/x86/backoff.h +CMakeFiles/cds.dir/src/topology_hpux.cpp.o: ../cds/compiler/gcc/x86/bitop.h +CMakeFiles/cds.dir/src/topology_hpux.cpp.o: ../cds/compiler/gcc/x86/cxx11_atomic.h +CMakeFiles/cds.dir/src/topology_hpux.cpp.o: ../cds/compiler/gcc/x86/cxx11_atomic32.h +CMakeFiles/cds.dir/src/topology_hpux.cpp.o: ../cds/compiler/icl/compiler_barriers.h +CMakeFiles/cds.dir/src/topology_hpux.cpp.o: ../cds/compiler/icl/defs.h +CMakeFiles/cds.dir/src/topology_hpux.cpp.o: ../cds/compiler/vc/amd64/backoff.h +CMakeFiles/cds.dir/src/topology_hpux.cpp.o: ../cds/compiler/vc/amd64/bitop.h +CMakeFiles/cds.dir/src/topology_hpux.cpp.o: ../cds/compiler/vc/amd64/cxx11_atomic.h +CMakeFiles/cds.dir/src/topology_hpux.cpp.o: ../cds/compiler/vc/compiler_barriers.h +CMakeFiles/cds.dir/src/topology_hpux.cpp.o: ../cds/compiler/vc/defs.h +CMakeFiles/cds.dir/src/topology_hpux.cpp.o: ../cds/compiler/vc/x86/backoff.h +CMakeFiles/cds.dir/src/topology_hpux.cpp.o: ../cds/compiler/vc/x86/bitop.h +CMakeFiles/cds.dir/src/topology_hpux.cpp.o: ../cds/compiler/vc/x86/cxx11_atomic.h +CMakeFiles/cds.dir/src/topology_hpux.cpp.o: ../cds/container/details/base.h +CMakeFiles/cds.dir/src/topology_hpux.cpp.o: ../cds/container/vyukov_mpmc_cycle_queue.h +CMakeFiles/cds.dir/src/topology_hpux.cpp.o: ../cds/details/aligned_type.h +CMakeFiles/cds.dir/src/topology_hpux.cpp.o: ../cds/details/allocator.h +CMakeFiles/cds.dir/src/topology_hpux.cpp.o: ../cds/details/bitop_generic.h +CMakeFiles/cds.dir/src/topology_hpux.cpp.o: ../cds/details/bounded_container.h +CMakeFiles/cds.dir/src/topology_hpux.cpp.o: ../cds/details/defs.h +CMakeFiles/cds.dir/src/topology_hpux.cpp.o: ../cds/details/is_aligned.h +CMakeFiles/cds.dir/src/topology_hpux.cpp.o: ../cds/details/lib.h +CMakeFiles/cds.dir/src/topology_hpux.cpp.o: ../cds/details/marked_ptr.h +CMakeFiles/cds.dir/src/topology_hpux.cpp.o: ../cds/details/static_functor.h +CMakeFiles/cds.dir/src/topology_hpux.cpp.o: ../cds/details/throw_exception.h +CMakeFiles/cds.dir/src/topology_hpux.cpp.o: ../cds/gc/details/hp_common.h +CMakeFiles/cds.dir/src/topology_hpux.cpp.o: ../cds/gc/details/retired_ptr.h +CMakeFiles/cds.dir/src/topology_hpux.cpp.o: ../cds/gc/hp.h +CMakeFiles/cds.dir/src/topology_hpux.cpp.o: ../cds/init.h +CMakeFiles/cds.dir/src/topology_hpux.cpp.o: ../cds/intrusive/details/base.h +CMakeFiles/cds.dir/src/topology_hpux.cpp.o: ../cds/intrusive/details/node_traits.h +CMakeFiles/cds.dir/src/topology_hpux.cpp.o: ../cds/intrusive/options.h +CMakeFiles/cds.dir/src/topology_hpux.cpp.o: ../cds/opt/buffer.h +CMakeFiles/cds.dir/src/topology_hpux.cpp.o: ../cds/opt/options.h +CMakeFiles/cds.dir/src/topology_hpux.cpp.o: ../cds/opt/value_cleaner.h +CMakeFiles/cds.dir/src/topology_hpux.cpp.o: ../cds/os/aix/alloc_aligned.h +CMakeFiles/cds.dir/src/topology_hpux.cpp.o: ../cds/os/aix/topology.h +CMakeFiles/cds.dir/src/topology_hpux.cpp.o: ../cds/os/alloc_aligned.h +CMakeFiles/cds.dir/src/topology_hpux.cpp.o: ../cds/os/details/fake_topology.h +CMakeFiles/cds.dir/src/topology_hpux.cpp.o: ../cds/os/free_bsd/alloc_aligned.h +CMakeFiles/cds.dir/src/topology_hpux.cpp.o: ../cds/os/free_bsd/topology.h +CMakeFiles/cds.dir/src/topology_hpux.cpp.o: ../cds/os/hpux/alloc_aligned.h +CMakeFiles/cds.dir/src/topology_hpux.cpp.o: ../cds/os/hpux/topology.h +CMakeFiles/cds.dir/src/topology_hpux.cpp.o: ../cds/os/libc/alloc_aligned.h +CMakeFiles/cds.dir/src/topology_hpux.cpp.o: ../cds/os/linux/alloc_aligned.h +CMakeFiles/cds.dir/src/topology_hpux.cpp.o: ../cds/os/linux/topology.h +CMakeFiles/cds.dir/src/topology_hpux.cpp.o: ../cds/os/osx/topology.h +CMakeFiles/cds.dir/src/topology_hpux.cpp.o: ../cds/os/posix/alloc_aligned.h +CMakeFiles/cds.dir/src/topology_hpux.cpp.o: ../cds/os/posix/fake_topology.h +CMakeFiles/cds.dir/src/topology_hpux.cpp.o: ../cds/os/posix/thread.h +CMakeFiles/cds.dir/src/topology_hpux.cpp.o: ../cds/os/sunos/alloc_aligned.h +CMakeFiles/cds.dir/src/topology_hpux.cpp.o: ../cds/os/sunos/topology.h +CMakeFiles/cds.dir/src/topology_hpux.cpp.o: ../cds/os/thread.h +CMakeFiles/cds.dir/src/topology_hpux.cpp.o: ../cds/os/topology.h +CMakeFiles/cds.dir/src/topology_hpux.cpp.o: ../cds/os/win/alloc_aligned.h +CMakeFiles/cds.dir/src/topology_hpux.cpp.o: ../cds/os/win/thread.h +CMakeFiles/cds.dir/src/topology_hpux.cpp.o: ../cds/os/win/topology.h +CMakeFiles/cds.dir/src/topology_hpux.cpp.o: ../cds/threading/details/_common.h +CMakeFiles/cds.dir/src/topology_hpux.cpp.o: ../cds/threading/details/auto_detect.h +CMakeFiles/cds.dir/src/topology_hpux.cpp.o: ../cds/threading/details/cxx11.h +CMakeFiles/cds.dir/src/topology_hpux.cpp.o: ../cds/threading/details/cxx11_manager.h +CMakeFiles/cds.dir/src/topology_hpux.cpp.o: ../cds/threading/details/gcc.h +CMakeFiles/cds.dir/src/topology_hpux.cpp.o: ../cds/threading/details/gcc_manager.h +CMakeFiles/cds.dir/src/topology_hpux.cpp.o: ../cds/threading/details/msvc.h +CMakeFiles/cds.dir/src/topology_hpux.cpp.o: ../cds/threading/details/msvc_manager.h +CMakeFiles/cds.dir/src/topology_hpux.cpp.o: ../cds/threading/details/pthread.h +CMakeFiles/cds.dir/src/topology_hpux.cpp.o: ../cds/threading/details/pthread_manager.h +CMakeFiles/cds.dir/src/topology_hpux.cpp.o: ../cds/threading/details/wintls.h +CMakeFiles/cds.dir/src/topology_hpux.cpp.o: ../cds/threading/details/wintls_manager.h +CMakeFiles/cds.dir/src/topology_hpux.cpp.o: ../cds/threading/model.h +CMakeFiles/cds.dir/src/topology_hpux.cpp.o: ../cds/urcu/details/base.h +CMakeFiles/cds.dir/src/topology_hpux.cpp.o: ../cds/urcu/details/gp.h +CMakeFiles/cds.dir/src/topology_hpux.cpp.o: ../cds/urcu/details/gp_decl.h +CMakeFiles/cds.dir/src/topology_hpux.cpp.o: ../cds/urcu/details/gpb.h +CMakeFiles/cds.dir/src/topology_hpux.cpp.o: ../cds/urcu/details/sh_decl.h +CMakeFiles/cds.dir/src/topology_hpux.cpp.o: ../cds/urcu/general_buffered.h +CMakeFiles/cds.dir/src/topology_hpux.cpp.o: ../cds/user_setup/allocator.h +CMakeFiles/cds.dir/src/topology_hpux.cpp.o: ../cds/user_setup/cache_line.h +CMakeFiles/cds.dir/src/topology_hpux.cpp.o: ../cds/user_setup/threading.h +CMakeFiles/cds.dir/src/topology_hpux.cpp.o: ../cds/version.h +CMakeFiles/cds.dir/src/topology_hpux.cpp.o: ../src/topology_hpux.cpp + +CMakeFiles/cds.dir/src/topology_linux.cpp.o: ../cds/algo/atomic.h +CMakeFiles/cds.dir/src/topology_linux.cpp.o: ../cds/algo/backoff_strategy.h +CMakeFiles/cds.dir/src/topology_linux.cpp.o: ../cds/algo/base.h +CMakeFiles/cds.dir/src/topology_linux.cpp.o: ../cds/algo/bitop.h +CMakeFiles/cds.dir/src/topology_linux.cpp.o: ../cds/algo/elimination_tls.h +CMakeFiles/cds.dir/src/topology_linux.cpp.o: ../cds/algo/int_algo.h +CMakeFiles/cds.dir/src/topology_linux.cpp.o: ../cds/compiler/backoff.h +CMakeFiles/cds.dir/src/topology_linux.cpp.o: ../cds/compiler/bitop.h +CMakeFiles/cds.dir/src/topology_linux.cpp.o: ../cds/compiler/clang/defs.h +CMakeFiles/cds.dir/src/topology_linux.cpp.o: ../cds/compiler/cxx11_atomic.h +CMakeFiles/cds.dir/src/topology_linux.cpp.o: ../cds/compiler/defs.h +CMakeFiles/cds.dir/src/topology_linux.cpp.o: ../cds/compiler/feature_tsan.h +CMakeFiles/cds.dir/src/topology_linux.cpp.o: ../cds/compiler/gcc/amd64/backoff.h +CMakeFiles/cds.dir/src/topology_linux.cpp.o: ../cds/compiler/gcc/amd64/bitop.h +CMakeFiles/cds.dir/src/topology_linux.cpp.o: ../cds/compiler/gcc/amd64/cxx11_atomic.h +CMakeFiles/cds.dir/src/topology_linux.cpp.o: ../cds/compiler/gcc/arm7/backoff.h +CMakeFiles/cds.dir/src/topology_linux.cpp.o: ../cds/compiler/gcc/arm8/backoff.h +CMakeFiles/cds.dir/src/topology_linux.cpp.o: ../cds/compiler/gcc/compiler_barriers.h +CMakeFiles/cds.dir/src/topology_linux.cpp.o: ../cds/compiler/gcc/compiler_macro.h +CMakeFiles/cds.dir/src/topology_linux.cpp.o: ../cds/compiler/gcc/defs.h +CMakeFiles/cds.dir/src/topology_linux.cpp.o: ../cds/compiler/gcc/ia64/backoff.h +CMakeFiles/cds.dir/src/topology_linux.cpp.o: ../cds/compiler/gcc/ia64/bitop.h +CMakeFiles/cds.dir/src/topology_linux.cpp.o: ../cds/compiler/gcc/ia64/cxx11_atomic.h +CMakeFiles/cds.dir/src/topology_linux.cpp.o: ../cds/compiler/gcc/ppc64/backoff.h +CMakeFiles/cds.dir/src/topology_linux.cpp.o: ../cds/compiler/gcc/ppc64/bitop.h +CMakeFiles/cds.dir/src/topology_linux.cpp.o: ../cds/compiler/gcc/sparc/backoff.h +CMakeFiles/cds.dir/src/topology_linux.cpp.o: ../cds/compiler/gcc/sparc/bitop.h +CMakeFiles/cds.dir/src/topology_linux.cpp.o: ../cds/compiler/gcc/sparc/cxx11_atomic.h +CMakeFiles/cds.dir/src/topology_linux.cpp.o: ../cds/compiler/gcc/x86/backoff.h +CMakeFiles/cds.dir/src/topology_linux.cpp.o: ../cds/compiler/gcc/x86/bitop.h +CMakeFiles/cds.dir/src/topology_linux.cpp.o: ../cds/compiler/gcc/x86/cxx11_atomic.h +CMakeFiles/cds.dir/src/topology_linux.cpp.o: ../cds/compiler/gcc/x86/cxx11_atomic32.h +CMakeFiles/cds.dir/src/topology_linux.cpp.o: ../cds/compiler/icl/compiler_barriers.h +CMakeFiles/cds.dir/src/topology_linux.cpp.o: ../cds/compiler/icl/defs.h +CMakeFiles/cds.dir/src/topology_linux.cpp.o: ../cds/compiler/vc/amd64/backoff.h +CMakeFiles/cds.dir/src/topology_linux.cpp.o: ../cds/compiler/vc/amd64/bitop.h +CMakeFiles/cds.dir/src/topology_linux.cpp.o: ../cds/compiler/vc/amd64/cxx11_atomic.h +CMakeFiles/cds.dir/src/topology_linux.cpp.o: ../cds/compiler/vc/compiler_barriers.h +CMakeFiles/cds.dir/src/topology_linux.cpp.o: ../cds/compiler/vc/defs.h +CMakeFiles/cds.dir/src/topology_linux.cpp.o: ../cds/compiler/vc/x86/backoff.h +CMakeFiles/cds.dir/src/topology_linux.cpp.o: ../cds/compiler/vc/x86/bitop.h +CMakeFiles/cds.dir/src/topology_linux.cpp.o: ../cds/compiler/vc/x86/cxx11_atomic.h +CMakeFiles/cds.dir/src/topology_linux.cpp.o: ../cds/container/details/base.h +CMakeFiles/cds.dir/src/topology_linux.cpp.o: ../cds/container/vyukov_mpmc_cycle_queue.h +CMakeFiles/cds.dir/src/topology_linux.cpp.o: ../cds/details/aligned_type.h +CMakeFiles/cds.dir/src/topology_linux.cpp.o: ../cds/details/allocator.h +CMakeFiles/cds.dir/src/topology_linux.cpp.o: ../cds/details/bitop_generic.h +CMakeFiles/cds.dir/src/topology_linux.cpp.o: ../cds/details/bounded_container.h +CMakeFiles/cds.dir/src/topology_linux.cpp.o: ../cds/details/defs.h +CMakeFiles/cds.dir/src/topology_linux.cpp.o: ../cds/details/is_aligned.h +CMakeFiles/cds.dir/src/topology_linux.cpp.o: ../cds/details/lib.h +CMakeFiles/cds.dir/src/topology_linux.cpp.o: ../cds/details/marked_ptr.h +CMakeFiles/cds.dir/src/topology_linux.cpp.o: ../cds/details/static_functor.h +CMakeFiles/cds.dir/src/topology_linux.cpp.o: ../cds/details/throw_exception.h +CMakeFiles/cds.dir/src/topology_linux.cpp.o: ../cds/gc/details/hp_common.h +CMakeFiles/cds.dir/src/topology_linux.cpp.o: ../cds/gc/details/retired_ptr.h +CMakeFiles/cds.dir/src/topology_linux.cpp.o: ../cds/gc/hp.h +CMakeFiles/cds.dir/src/topology_linux.cpp.o: ../cds/init.h +CMakeFiles/cds.dir/src/topology_linux.cpp.o: ../cds/intrusive/details/base.h +CMakeFiles/cds.dir/src/topology_linux.cpp.o: ../cds/intrusive/details/node_traits.h +CMakeFiles/cds.dir/src/topology_linux.cpp.o: ../cds/intrusive/options.h +CMakeFiles/cds.dir/src/topology_linux.cpp.o: ../cds/opt/buffer.h +CMakeFiles/cds.dir/src/topology_linux.cpp.o: ../cds/opt/options.h +CMakeFiles/cds.dir/src/topology_linux.cpp.o: ../cds/opt/value_cleaner.h +CMakeFiles/cds.dir/src/topology_linux.cpp.o: ../cds/os/aix/alloc_aligned.h +CMakeFiles/cds.dir/src/topology_linux.cpp.o: ../cds/os/aix/topology.h +CMakeFiles/cds.dir/src/topology_linux.cpp.o: ../cds/os/alloc_aligned.h +CMakeFiles/cds.dir/src/topology_linux.cpp.o: ../cds/os/details/fake_topology.h +CMakeFiles/cds.dir/src/topology_linux.cpp.o: ../cds/os/free_bsd/alloc_aligned.h +CMakeFiles/cds.dir/src/topology_linux.cpp.o: ../cds/os/free_bsd/topology.h +CMakeFiles/cds.dir/src/topology_linux.cpp.o: ../cds/os/hpux/alloc_aligned.h +CMakeFiles/cds.dir/src/topology_linux.cpp.o: ../cds/os/hpux/topology.h +CMakeFiles/cds.dir/src/topology_linux.cpp.o: ../cds/os/libc/alloc_aligned.h +CMakeFiles/cds.dir/src/topology_linux.cpp.o: ../cds/os/linux/alloc_aligned.h +CMakeFiles/cds.dir/src/topology_linux.cpp.o: ../cds/os/linux/topology.h +CMakeFiles/cds.dir/src/topology_linux.cpp.o: ../cds/os/osx/topology.h +CMakeFiles/cds.dir/src/topology_linux.cpp.o: ../cds/os/posix/alloc_aligned.h +CMakeFiles/cds.dir/src/topology_linux.cpp.o: ../cds/os/posix/fake_topology.h +CMakeFiles/cds.dir/src/topology_linux.cpp.o: ../cds/os/posix/thread.h +CMakeFiles/cds.dir/src/topology_linux.cpp.o: ../cds/os/sunos/alloc_aligned.h +CMakeFiles/cds.dir/src/topology_linux.cpp.o: ../cds/os/sunos/topology.h +CMakeFiles/cds.dir/src/topology_linux.cpp.o: ../cds/os/thread.h +CMakeFiles/cds.dir/src/topology_linux.cpp.o: ../cds/os/topology.h +CMakeFiles/cds.dir/src/topology_linux.cpp.o: ../cds/os/win/alloc_aligned.h +CMakeFiles/cds.dir/src/topology_linux.cpp.o: ../cds/os/win/thread.h +CMakeFiles/cds.dir/src/topology_linux.cpp.o: ../cds/os/win/topology.h +CMakeFiles/cds.dir/src/topology_linux.cpp.o: ../cds/threading/details/_common.h +CMakeFiles/cds.dir/src/topology_linux.cpp.o: ../cds/threading/details/auto_detect.h +CMakeFiles/cds.dir/src/topology_linux.cpp.o: ../cds/threading/details/cxx11.h +CMakeFiles/cds.dir/src/topology_linux.cpp.o: ../cds/threading/details/cxx11_manager.h +CMakeFiles/cds.dir/src/topology_linux.cpp.o: ../cds/threading/details/gcc.h +CMakeFiles/cds.dir/src/topology_linux.cpp.o: ../cds/threading/details/gcc_manager.h +CMakeFiles/cds.dir/src/topology_linux.cpp.o: ../cds/threading/details/msvc.h +CMakeFiles/cds.dir/src/topology_linux.cpp.o: ../cds/threading/details/msvc_manager.h +CMakeFiles/cds.dir/src/topology_linux.cpp.o: ../cds/threading/details/pthread.h +CMakeFiles/cds.dir/src/topology_linux.cpp.o: ../cds/threading/details/pthread_manager.h +CMakeFiles/cds.dir/src/topology_linux.cpp.o: ../cds/threading/details/wintls.h +CMakeFiles/cds.dir/src/topology_linux.cpp.o: ../cds/threading/details/wintls_manager.h +CMakeFiles/cds.dir/src/topology_linux.cpp.o: ../cds/threading/model.h +CMakeFiles/cds.dir/src/topology_linux.cpp.o: ../cds/urcu/details/base.h +CMakeFiles/cds.dir/src/topology_linux.cpp.o: ../cds/urcu/details/gp.h +CMakeFiles/cds.dir/src/topology_linux.cpp.o: ../cds/urcu/details/gp_decl.h +CMakeFiles/cds.dir/src/topology_linux.cpp.o: ../cds/urcu/details/gpb.h +CMakeFiles/cds.dir/src/topology_linux.cpp.o: ../cds/urcu/details/sh_decl.h +CMakeFiles/cds.dir/src/topology_linux.cpp.o: ../cds/urcu/general_buffered.h +CMakeFiles/cds.dir/src/topology_linux.cpp.o: ../cds/user_setup/allocator.h +CMakeFiles/cds.dir/src/topology_linux.cpp.o: ../cds/user_setup/cache_line.h +CMakeFiles/cds.dir/src/topology_linux.cpp.o: ../cds/user_setup/threading.h +CMakeFiles/cds.dir/src/topology_linux.cpp.o: ../cds/version.h +CMakeFiles/cds.dir/src/topology_linux.cpp.o: ../src/topology_linux.cpp + +CMakeFiles/cds.dir/src/topology_osx.cpp.o: ../cds/algo/atomic.h +CMakeFiles/cds.dir/src/topology_osx.cpp.o: ../cds/algo/backoff_strategy.h +CMakeFiles/cds.dir/src/topology_osx.cpp.o: ../cds/algo/base.h +CMakeFiles/cds.dir/src/topology_osx.cpp.o: ../cds/algo/bitop.h +CMakeFiles/cds.dir/src/topology_osx.cpp.o: ../cds/algo/elimination_tls.h +CMakeFiles/cds.dir/src/topology_osx.cpp.o: ../cds/algo/int_algo.h +CMakeFiles/cds.dir/src/topology_osx.cpp.o: ../cds/compiler/backoff.h +CMakeFiles/cds.dir/src/topology_osx.cpp.o: ../cds/compiler/bitop.h +CMakeFiles/cds.dir/src/topology_osx.cpp.o: ../cds/compiler/clang/defs.h +CMakeFiles/cds.dir/src/topology_osx.cpp.o: ../cds/compiler/cxx11_atomic.h +CMakeFiles/cds.dir/src/topology_osx.cpp.o: ../cds/compiler/defs.h +CMakeFiles/cds.dir/src/topology_osx.cpp.o: ../cds/compiler/feature_tsan.h +CMakeFiles/cds.dir/src/topology_osx.cpp.o: ../cds/compiler/gcc/amd64/backoff.h +CMakeFiles/cds.dir/src/topology_osx.cpp.o: ../cds/compiler/gcc/amd64/bitop.h +CMakeFiles/cds.dir/src/topology_osx.cpp.o: ../cds/compiler/gcc/amd64/cxx11_atomic.h +CMakeFiles/cds.dir/src/topology_osx.cpp.o: ../cds/compiler/gcc/arm7/backoff.h +CMakeFiles/cds.dir/src/topology_osx.cpp.o: ../cds/compiler/gcc/arm8/backoff.h +CMakeFiles/cds.dir/src/topology_osx.cpp.o: ../cds/compiler/gcc/compiler_barriers.h +CMakeFiles/cds.dir/src/topology_osx.cpp.o: ../cds/compiler/gcc/compiler_macro.h +CMakeFiles/cds.dir/src/topology_osx.cpp.o: ../cds/compiler/gcc/defs.h +CMakeFiles/cds.dir/src/topology_osx.cpp.o: ../cds/compiler/gcc/ia64/backoff.h +CMakeFiles/cds.dir/src/topology_osx.cpp.o: ../cds/compiler/gcc/ia64/bitop.h +CMakeFiles/cds.dir/src/topology_osx.cpp.o: ../cds/compiler/gcc/ia64/cxx11_atomic.h +CMakeFiles/cds.dir/src/topology_osx.cpp.o: ../cds/compiler/gcc/ppc64/backoff.h +CMakeFiles/cds.dir/src/topology_osx.cpp.o: ../cds/compiler/gcc/ppc64/bitop.h +CMakeFiles/cds.dir/src/topology_osx.cpp.o: ../cds/compiler/gcc/sparc/backoff.h +CMakeFiles/cds.dir/src/topology_osx.cpp.o: ../cds/compiler/gcc/sparc/bitop.h +CMakeFiles/cds.dir/src/topology_osx.cpp.o: ../cds/compiler/gcc/sparc/cxx11_atomic.h +CMakeFiles/cds.dir/src/topology_osx.cpp.o: ../cds/compiler/gcc/x86/backoff.h +CMakeFiles/cds.dir/src/topology_osx.cpp.o: ../cds/compiler/gcc/x86/bitop.h +CMakeFiles/cds.dir/src/topology_osx.cpp.o: ../cds/compiler/gcc/x86/cxx11_atomic.h +CMakeFiles/cds.dir/src/topology_osx.cpp.o: ../cds/compiler/gcc/x86/cxx11_atomic32.h +CMakeFiles/cds.dir/src/topology_osx.cpp.o: ../cds/compiler/icl/compiler_barriers.h +CMakeFiles/cds.dir/src/topology_osx.cpp.o: ../cds/compiler/icl/defs.h +CMakeFiles/cds.dir/src/topology_osx.cpp.o: ../cds/compiler/vc/amd64/backoff.h +CMakeFiles/cds.dir/src/topology_osx.cpp.o: ../cds/compiler/vc/amd64/bitop.h +CMakeFiles/cds.dir/src/topology_osx.cpp.o: ../cds/compiler/vc/amd64/cxx11_atomic.h +CMakeFiles/cds.dir/src/topology_osx.cpp.o: ../cds/compiler/vc/compiler_barriers.h +CMakeFiles/cds.dir/src/topology_osx.cpp.o: ../cds/compiler/vc/defs.h +CMakeFiles/cds.dir/src/topology_osx.cpp.o: ../cds/compiler/vc/x86/backoff.h +CMakeFiles/cds.dir/src/topology_osx.cpp.o: ../cds/compiler/vc/x86/bitop.h +CMakeFiles/cds.dir/src/topology_osx.cpp.o: ../cds/compiler/vc/x86/cxx11_atomic.h +CMakeFiles/cds.dir/src/topology_osx.cpp.o: ../cds/container/details/base.h +CMakeFiles/cds.dir/src/topology_osx.cpp.o: ../cds/container/vyukov_mpmc_cycle_queue.h +CMakeFiles/cds.dir/src/topology_osx.cpp.o: ../cds/details/aligned_type.h +CMakeFiles/cds.dir/src/topology_osx.cpp.o: ../cds/details/allocator.h +CMakeFiles/cds.dir/src/topology_osx.cpp.o: ../cds/details/bitop_generic.h +CMakeFiles/cds.dir/src/topology_osx.cpp.o: ../cds/details/bounded_container.h +CMakeFiles/cds.dir/src/topology_osx.cpp.o: ../cds/details/defs.h +CMakeFiles/cds.dir/src/topology_osx.cpp.o: ../cds/details/is_aligned.h +CMakeFiles/cds.dir/src/topology_osx.cpp.o: ../cds/details/lib.h +CMakeFiles/cds.dir/src/topology_osx.cpp.o: ../cds/details/marked_ptr.h +CMakeFiles/cds.dir/src/topology_osx.cpp.o: ../cds/details/static_functor.h +CMakeFiles/cds.dir/src/topology_osx.cpp.o: ../cds/details/throw_exception.h +CMakeFiles/cds.dir/src/topology_osx.cpp.o: ../cds/gc/details/hp_common.h +CMakeFiles/cds.dir/src/topology_osx.cpp.o: ../cds/gc/details/retired_ptr.h +CMakeFiles/cds.dir/src/topology_osx.cpp.o: ../cds/gc/hp.h +CMakeFiles/cds.dir/src/topology_osx.cpp.o: ../cds/init.h +CMakeFiles/cds.dir/src/topology_osx.cpp.o: ../cds/intrusive/details/base.h +CMakeFiles/cds.dir/src/topology_osx.cpp.o: ../cds/intrusive/details/node_traits.h +CMakeFiles/cds.dir/src/topology_osx.cpp.o: ../cds/intrusive/options.h +CMakeFiles/cds.dir/src/topology_osx.cpp.o: ../cds/opt/buffer.h +CMakeFiles/cds.dir/src/topology_osx.cpp.o: ../cds/opt/options.h +CMakeFiles/cds.dir/src/topology_osx.cpp.o: ../cds/opt/value_cleaner.h +CMakeFiles/cds.dir/src/topology_osx.cpp.o: ../cds/os/aix/alloc_aligned.h +CMakeFiles/cds.dir/src/topology_osx.cpp.o: ../cds/os/aix/topology.h +CMakeFiles/cds.dir/src/topology_osx.cpp.o: ../cds/os/alloc_aligned.h +CMakeFiles/cds.dir/src/topology_osx.cpp.o: ../cds/os/details/fake_topology.h +CMakeFiles/cds.dir/src/topology_osx.cpp.o: ../cds/os/free_bsd/alloc_aligned.h +CMakeFiles/cds.dir/src/topology_osx.cpp.o: ../cds/os/free_bsd/topology.h +CMakeFiles/cds.dir/src/topology_osx.cpp.o: ../cds/os/hpux/alloc_aligned.h +CMakeFiles/cds.dir/src/topology_osx.cpp.o: ../cds/os/hpux/topology.h +CMakeFiles/cds.dir/src/topology_osx.cpp.o: ../cds/os/libc/alloc_aligned.h +CMakeFiles/cds.dir/src/topology_osx.cpp.o: ../cds/os/linux/alloc_aligned.h +CMakeFiles/cds.dir/src/topology_osx.cpp.o: ../cds/os/linux/topology.h +CMakeFiles/cds.dir/src/topology_osx.cpp.o: ../cds/os/osx/topology.h +CMakeFiles/cds.dir/src/topology_osx.cpp.o: ../cds/os/posix/alloc_aligned.h +CMakeFiles/cds.dir/src/topology_osx.cpp.o: ../cds/os/posix/fake_topology.h +CMakeFiles/cds.dir/src/topology_osx.cpp.o: ../cds/os/posix/thread.h +CMakeFiles/cds.dir/src/topology_osx.cpp.o: ../cds/os/sunos/alloc_aligned.h +CMakeFiles/cds.dir/src/topology_osx.cpp.o: ../cds/os/sunos/topology.h +CMakeFiles/cds.dir/src/topology_osx.cpp.o: ../cds/os/thread.h +CMakeFiles/cds.dir/src/topology_osx.cpp.o: ../cds/os/topology.h +CMakeFiles/cds.dir/src/topology_osx.cpp.o: ../cds/os/win/alloc_aligned.h +CMakeFiles/cds.dir/src/topology_osx.cpp.o: ../cds/os/win/thread.h +CMakeFiles/cds.dir/src/topology_osx.cpp.o: ../cds/os/win/topology.h +CMakeFiles/cds.dir/src/topology_osx.cpp.o: ../cds/threading/details/_common.h +CMakeFiles/cds.dir/src/topology_osx.cpp.o: ../cds/threading/details/auto_detect.h +CMakeFiles/cds.dir/src/topology_osx.cpp.o: ../cds/threading/details/cxx11.h +CMakeFiles/cds.dir/src/topology_osx.cpp.o: ../cds/threading/details/cxx11_manager.h +CMakeFiles/cds.dir/src/topology_osx.cpp.o: ../cds/threading/details/gcc.h +CMakeFiles/cds.dir/src/topology_osx.cpp.o: ../cds/threading/details/gcc_manager.h +CMakeFiles/cds.dir/src/topology_osx.cpp.o: ../cds/threading/details/msvc.h +CMakeFiles/cds.dir/src/topology_osx.cpp.o: ../cds/threading/details/msvc_manager.h +CMakeFiles/cds.dir/src/topology_osx.cpp.o: ../cds/threading/details/pthread.h +CMakeFiles/cds.dir/src/topology_osx.cpp.o: ../cds/threading/details/pthread_manager.h +CMakeFiles/cds.dir/src/topology_osx.cpp.o: ../cds/threading/details/wintls.h +CMakeFiles/cds.dir/src/topology_osx.cpp.o: ../cds/threading/details/wintls_manager.h +CMakeFiles/cds.dir/src/topology_osx.cpp.o: ../cds/threading/model.h +CMakeFiles/cds.dir/src/topology_osx.cpp.o: ../cds/urcu/details/base.h +CMakeFiles/cds.dir/src/topology_osx.cpp.o: ../cds/urcu/details/gp.h +CMakeFiles/cds.dir/src/topology_osx.cpp.o: ../cds/urcu/details/gp_decl.h +CMakeFiles/cds.dir/src/topology_osx.cpp.o: ../cds/urcu/details/gpb.h +CMakeFiles/cds.dir/src/topology_osx.cpp.o: ../cds/urcu/details/sh_decl.h +CMakeFiles/cds.dir/src/topology_osx.cpp.o: ../cds/urcu/general_buffered.h +CMakeFiles/cds.dir/src/topology_osx.cpp.o: ../cds/user_setup/allocator.h +CMakeFiles/cds.dir/src/topology_osx.cpp.o: ../cds/user_setup/cache_line.h +CMakeFiles/cds.dir/src/topology_osx.cpp.o: ../cds/user_setup/threading.h +CMakeFiles/cds.dir/src/topology_osx.cpp.o: ../cds/version.h +CMakeFiles/cds.dir/src/topology_osx.cpp.o: ../src/topology_osx.cpp + +CMakeFiles/cds.dir/src/urcu_gp.cpp.o: ../cds/algo/atomic.h +CMakeFiles/cds.dir/src/urcu_gp.cpp.o: ../cds/algo/backoff_strategy.h +CMakeFiles/cds.dir/src/urcu_gp.cpp.o: ../cds/algo/base.h +CMakeFiles/cds.dir/src/urcu_gp.cpp.o: ../cds/algo/bitop.h +CMakeFiles/cds.dir/src/urcu_gp.cpp.o: ../cds/algo/elimination_tls.h +CMakeFiles/cds.dir/src/urcu_gp.cpp.o: ../cds/algo/int_algo.h +CMakeFiles/cds.dir/src/urcu_gp.cpp.o: ../cds/compiler/backoff.h +CMakeFiles/cds.dir/src/urcu_gp.cpp.o: ../cds/compiler/bitop.h +CMakeFiles/cds.dir/src/urcu_gp.cpp.o: ../cds/compiler/clang/defs.h +CMakeFiles/cds.dir/src/urcu_gp.cpp.o: ../cds/compiler/cxx11_atomic.h +CMakeFiles/cds.dir/src/urcu_gp.cpp.o: ../cds/compiler/defs.h +CMakeFiles/cds.dir/src/urcu_gp.cpp.o: ../cds/compiler/feature_tsan.h +CMakeFiles/cds.dir/src/urcu_gp.cpp.o: ../cds/compiler/gcc/amd64/backoff.h +CMakeFiles/cds.dir/src/urcu_gp.cpp.o: ../cds/compiler/gcc/amd64/bitop.h +CMakeFiles/cds.dir/src/urcu_gp.cpp.o: ../cds/compiler/gcc/amd64/cxx11_atomic.h +CMakeFiles/cds.dir/src/urcu_gp.cpp.o: ../cds/compiler/gcc/arm7/backoff.h +CMakeFiles/cds.dir/src/urcu_gp.cpp.o: ../cds/compiler/gcc/arm8/backoff.h +CMakeFiles/cds.dir/src/urcu_gp.cpp.o: ../cds/compiler/gcc/compiler_barriers.h +CMakeFiles/cds.dir/src/urcu_gp.cpp.o: ../cds/compiler/gcc/compiler_macro.h +CMakeFiles/cds.dir/src/urcu_gp.cpp.o: ../cds/compiler/gcc/defs.h +CMakeFiles/cds.dir/src/urcu_gp.cpp.o: ../cds/compiler/gcc/ia64/backoff.h +CMakeFiles/cds.dir/src/urcu_gp.cpp.o: ../cds/compiler/gcc/ia64/bitop.h +CMakeFiles/cds.dir/src/urcu_gp.cpp.o: ../cds/compiler/gcc/ia64/cxx11_atomic.h +CMakeFiles/cds.dir/src/urcu_gp.cpp.o: ../cds/compiler/gcc/ppc64/backoff.h +CMakeFiles/cds.dir/src/urcu_gp.cpp.o: ../cds/compiler/gcc/ppc64/bitop.h +CMakeFiles/cds.dir/src/urcu_gp.cpp.o: ../cds/compiler/gcc/sparc/backoff.h +CMakeFiles/cds.dir/src/urcu_gp.cpp.o: ../cds/compiler/gcc/sparc/bitop.h +CMakeFiles/cds.dir/src/urcu_gp.cpp.o: ../cds/compiler/gcc/sparc/cxx11_atomic.h +CMakeFiles/cds.dir/src/urcu_gp.cpp.o: ../cds/compiler/gcc/x86/backoff.h +CMakeFiles/cds.dir/src/urcu_gp.cpp.o: ../cds/compiler/gcc/x86/bitop.h +CMakeFiles/cds.dir/src/urcu_gp.cpp.o: ../cds/compiler/gcc/x86/cxx11_atomic.h +CMakeFiles/cds.dir/src/urcu_gp.cpp.o: ../cds/compiler/gcc/x86/cxx11_atomic32.h +CMakeFiles/cds.dir/src/urcu_gp.cpp.o: ../cds/compiler/icl/compiler_barriers.h +CMakeFiles/cds.dir/src/urcu_gp.cpp.o: ../cds/compiler/icl/defs.h +CMakeFiles/cds.dir/src/urcu_gp.cpp.o: ../cds/compiler/vc/amd64/backoff.h +CMakeFiles/cds.dir/src/urcu_gp.cpp.o: ../cds/compiler/vc/amd64/bitop.h +CMakeFiles/cds.dir/src/urcu_gp.cpp.o: ../cds/compiler/vc/amd64/cxx11_atomic.h +CMakeFiles/cds.dir/src/urcu_gp.cpp.o: ../cds/compiler/vc/compiler_barriers.h +CMakeFiles/cds.dir/src/urcu_gp.cpp.o: ../cds/compiler/vc/defs.h +CMakeFiles/cds.dir/src/urcu_gp.cpp.o: ../cds/compiler/vc/x86/backoff.h +CMakeFiles/cds.dir/src/urcu_gp.cpp.o: ../cds/compiler/vc/x86/bitop.h +CMakeFiles/cds.dir/src/urcu_gp.cpp.o: ../cds/compiler/vc/x86/cxx11_atomic.h +CMakeFiles/cds.dir/src/urcu_gp.cpp.o: ../cds/container/details/base.h +CMakeFiles/cds.dir/src/urcu_gp.cpp.o: ../cds/container/vyukov_mpmc_cycle_queue.h +CMakeFiles/cds.dir/src/urcu_gp.cpp.o: ../cds/details/aligned_type.h +CMakeFiles/cds.dir/src/urcu_gp.cpp.o: ../cds/details/allocator.h +CMakeFiles/cds.dir/src/urcu_gp.cpp.o: ../cds/details/bitop_generic.h +CMakeFiles/cds.dir/src/urcu_gp.cpp.o: ../cds/details/bounded_container.h +CMakeFiles/cds.dir/src/urcu_gp.cpp.o: ../cds/details/defs.h +CMakeFiles/cds.dir/src/urcu_gp.cpp.o: ../cds/details/is_aligned.h +CMakeFiles/cds.dir/src/urcu_gp.cpp.o: ../cds/details/lib.h +CMakeFiles/cds.dir/src/urcu_gp.cpp.o: ../cds/details/marked_ptr.h +CMakeFiles/cds.dir/src/urcu_gp.cpp.o: ../cds/details/static_functor.h +CMakeFiles/cds.dir/src/urcu_gp.cpp.o: ../cds/details/throw_exception.h +CMakeFiles/cds.dir/src/urcu_gp.cpp.o: ../cds/gc/details/hp_common.h +CMakeFiles/cds.dir/src/urcu_gp.cpp.o: ../cds/gc/details/retired_ptr.h +CMakeFiles/cds.dir/src/urcu_gp.cpp.o: ../cds/gc/hp.h +CMakeFiles/cds.dir/src/urcu_gp.cpp.o: ../cds/init.h +CMakeFiles/cds.dir/src/urcu_gp.cpp.o: ../cds/intrusive/details/base.h +CMakeFiles/cds.dir/src/urcu_gp.cpp.o: ../cds/intrusive/details/node_traits.h +CMakeFiles/cds.dir/src/urcu_gp.cpp.o: ../cds/intrusive/options.h +CMakeFiles/cds.dir/src/urcu_gp.cpp.o: ../cds/opt/buffer.h +CMakeFiles/cds.dir/src/urcu_gp.cpp.o: ../cds/opt/options.h +CMakeFiles/cds.dir/src/urcu_gp.cpp.o: ../cds/opt/value_cleaner.h +CMakeFiles/cds.dir/src/urcu_gp.cpp.o: ../cds/os/aix/alloc_aligned.h +CMakeFiles/cds.dir/src/urcu_gp.cpp.o: ../cds/os/aix/topology.h +CMakeFiles/cds.dir/src/urcu_gp.cpp.o: ../cds/os/alloc_aligned.h +CMakeFiles/cds.dir/src/urcu_gp.cpp.o: ../cds/os/details/fake_topology.h +CMakeFiles/cds.dir/src/urcu_gp.cpp.o: ../cds/os/free_bsd/alloc_aligned.h +CMakeFiles/cds.dir/src/urcu_gp.cpp.o: ../cds/os/free_bsd/topology.h +CMakeFiles/cds.dir/src/urcu_gp.cpp.o: ../cds/os/hpux/alloc_aligned.h +CMakeFiles/cds.dir/src/urcu_gp.cpp.o: ../cds/os/hpux/topology.h +CMakeFiles/cds.dir/src/urcu_gp.cpp.o: ../cds/os/libc/alloc_aligned.h +CMakeFiles/cds.dir/src/urcu_gp.cpp.o: ../cds/os/linux/alloc_aligned.h +CMakeFiles/cds.dir/src/urcu_gp.cpp.o: ../cds/os/linux/topology.h +CMakeFiles/cds.dir/src/urcu_gp.cpp.o: ../cds/os/osx/topology.h +CMakeFiles/cds.dir/src/urcu_gp.cpp.o: ../cds/os/posix/alloc_aligned.h +CMakeFiles/cds.dir/src/urcu_gp.cpp.o: ../cds/os/posix/fake_topology.h +CMakeFiles/cds.dir/src/urcu_gp.cpp.o: ../cds/os/posix/thread.h +CMakeFiles/cds.dir/src/urcu_gp.cpp.o: ../cds/os/sunos/alloc_aligned.h +CMakeFiles/cds.dir/src/urcu_gp.cpp.o: ../cds/os/sunos/topology.h +CMakeFiles/cds.dir/src/urcu_gp.cpp.o: ../cds/os/thread.h +CMakeFiles/cds.dir/src/urcu_gp.cpp.o: ../cds/os/topology.h +CMakeFiles/cds.dir/src/urcu_gp.cpp.o: ../cds/os/win/alloc_aligned.h +CMakeFiles/cds.dir/src/urcu_gp.cpp.o: ../cds/os/win/thread.h +CMakeFiles/cds.dir/src/urcu_gp.cpp.o: ../cds/os/win/topology.h +CMakeFiles/cds.dir/src/urcu_gp.cpp.o: ../cds/threading/details/_common.h +CMakeFiles/cds.dir/src/urcu_gp.cpp.o: ../cds/threading/details/auto_detect.h +CMakeFiles/cds.dir/src/urcu_gp.cpp.o: ../cds/threading/details/cxx11.h +CMakeFiles/cds.dir/src/urcu_gp.cpp.o: ../cds/threading/details/cxx11_manager.h +CMakeFiles/cds.dir/src/urcu_gp.cpp.o: ../cds/threading/details/gcc.h +CMakeFiles/cds.dir/src/urcu_gp.cpp.o: ../cds/threading/details/gcc_manager.h +CMakeFiles/cds.dir/src/urcu_gp.cpp.o: ../cds/threading/details/msvc.h +CMakeFiles/cds.dir/src/urcu_gp.cpp.o: ../cds/threading/details/msvc_manager.h +CMakeFiles/cds.dir/src/urcu_gp.cpp.o: ../cds/threading/details/pthread.h +CMakeFiles/cds.dir/src/urcu_gp.cpp.o: ../cds/threading/details/pthread_manager.h +CMakeFiles/cds.dir/src/urcu_gp.cpp.o: ../cds/threading/details/wintls.h +CMakeFiles/cds.dir/src/urcu_gp.cpp.o: ../cds/threading/details/wintls_manager.h +CMakeFiles/cds.dir/src/urcu_gp.cpp.o: ../cds/threading/model.h +CMakeFiles/cds.dir/src/urcu_gp.cpp.o: ../cds/urcu/details/base.h +CMakeFiles/cds.dir/src/urcu_gp.cpp.o: ../cds/urcu/details/gp.h +CMakeFiles/cds.dir/src/urcu_gp.cpp.o: ../cds/urcu/details/gp_decl.h +CMakeFiles/cds.dir/src/urcu_gp.cpp.o: ../cds/urcu/details/gpb.h +CMakeFiles/cds.dir/src/urcu_gp.cpp.o: ../cds/urcu/details/sh_decl.h +CMakeFiles/cds.dir/src/urcu_gp.cpp.o: ../cds/urcu/general_buffered.h +CMakeFiles/cds.dir/src/urcu_gp.cpp.o: ../cds/user_setup/allocator.h +CMakeFiles/cds.dir/src/urcu_gp.cpp.o: ../cds/user_setup/cache_line.h +CMakeFiles/cds.dir/src/urcu_gp.cpp.o: ../cds/user_setup/threading.h +CMakeFiles/cds.dir/src/urcu_gp.cpp.o: ../cds/version.h +CMakeFiles/cds.dir/src/urcu_gp.cpp.o: ../src/urcu_gp.cpp + +CMakeFiles/cds.dir/src/urcu_sh.cpp.o: ../cds/algo/atomic.h +CMakeFiles/cds.dir/src/urcu_sh.cpp.o: ../cds/algo/backoff_strategy.h +CMakeFiles/cds.dir/src/urcu_sh.cpp.o: ../cds/algo/base.h +CMakeFiles/cds.dir/src/urcu_sh.cpp.o: ../cds/algo/bitop.h +CMakeFiles/cds.dir/src/urcu_sh.cpp.o: ../cds/algo/elimination_tls.h +CMakeFiles/cds.dir/src/urcu_sh.cpp.o: ../cds/algo/int_algo.h +CMakeFiles/cds.dir/src/urcu_sh.cpp.o: ../cds/compiler/backoff.h +CMakeFiles/cds.dir/src/urcu_sh.cpp.o: ../cds/compiler/bitop.h +CMakeFiles/cds.dir/src/urcu_sh.cpp.o: ../cds/compiler/clang/defs.h +CMakeFiles/cds.dir/src/urcu_sh.cpp.o: ../cds/compiler/cxx11_atomic.h +CMakeFiles/cds.dir/src/urcu_sh.cpp.o: ../cds/compiler/defs.h +CMakeFiles/cds.dir/src/urcu_sh.cpp.o: ../cds/compiler/feature_tsan.h +CMakeFiles/cds.dir/src/urcu_sh.cpp.o: ../cds/compiler/gcc/amd64/backoff.h +CMakeFiles/cds.dir/src/urcu_sh.cpp.o: ../cds/compiler/gcc/amd64/bitop.h +CMakeFiles/cds.dir/src/urcu_sh.cpp.o: ../cds/compiler/gcc/amd64/cxx11_atomic.h +CMakeFiles/cds.dir/src/urcu_sh.cpp.o: ../cds/compiler/gcc/arm7/backoff.h +CMakeFiles/cds.dir/src/urcu_sh.cpp.o: ../cds/compiler/gcc/arm8/backoff.h +CMakeFiles/cds.dir/src/urcu_sh.cpp.o: ../cds/compiler/gcc/compiler_barriers.h +CMakeFiles/cds.dir/src/urcu_sh.cpp.o: ../cds/compiler/gcc/compiler_macro.h +CMakeFiles/cds.dir/src/urcu_sh.cpp.o: ../cds/compiler/gcc/defs.h +CMakeFiles/cds.dir/src/urcu_sh.cpp.o: ../cds/compiler/gcc/ia64/backoff.h +CMakeFiles/cds.dir/src/urcu_sh.cpp.o: ../cds/compiler/gcc/ia64/bitop.h +CMakeFiles/cds.dir/src/urcu_sh.cpp.o: ../cds/compiler/gcc/ia64/cxx11_atomic.h +CMakeFiles/cds.dir/src/urcu_sh.cpp.o: ../cds/compiler/gcc/ppc64/backoff.h +CMakeFiles/cds.dir/src/urcu_sh.cpp.o: ../cds/compiler/gcc/ppc64/bitop.h +CMakeFiles/cds.dir/src/urcu_sh.cpp.o: ../cds/compiler/gcc/sparc/backoff.h +CMakeFiles/cds.dir/src/urcu_sh.cpp.o: ../cds/compiler/gcc/sparc/bitop.h +CMakeFiles/cds.dir/src/urcu_sh.cpp.o: ../cds/compiler/gcc/sparc/cxx11_atomic.h +CMakeFiles/cds.dir/src/urcu_sh.cpp.o: ../cds/compiler/gcc/x86/backoff.h +CMakeFiles/cds.dir/src/urcu_sh.cpp.o: ../cds/compiler/gcc/x86/bitop.h +CMakeFiles/cds.dir/src/urcu_sh.cpp.o: ../cds/compiler/gcc/x86/cxx11_atomic.h +CMakeFiles/cds.dir/src/urcu_sh.cpp.o: ../cds/compiler/gcc/x86/cxx11_atomic32.h +CMakeFiles/cds.dir/src/urcu_sh.cpp.o: ../cds/compiler/icl/compiler_barriers.h +CMakeFiles/cds.dir/src/urcu_sh.cpp.o: ../cds/compiler/icl/defs.h +CMakeFiles/cds.dir/src/urcu_sh.cpp.o: ../cds/compiler/vc/amd64/backoff.h +CMakeFiles/cds.dir/src/urcu_sh.cpp.o: ../cds/compiler/vc/amd64/bitop.h +CMakeFiles/cds.dir/src/urcu_sh.cpp.o: ../cds/compiler/vc/amd64/cxx11_atomic.h +CMakeFiles/cds.dir/src/urcu_sh.cpp.o: ../cds/compiler/vc/compiler_barriers.h +CMakeFiles/cds.dir/src/urcu_sh.cpp.o: ../cds/compiler/vc/defs.h +CMakeFiles/cds.dir/src/urcu_sh.cpp.o: ../cds/compiler/vc/x86/backoff.h +CMakeFiles/cds.dir/src/urcu_sh.cpp.o: ../cds/compiler/vc/x86/bitop.h +CMakeFiles/cds.dir/src/urcu_sh.cpp.o: ../cds/compiler/vc/x86/cxx11_atomic.h +CMakeFiles/cds.dir/src/urcu_sh.cpp.o: ../cds/container/details/base.h +CMakeFiles/cds.dir/src/urcu_sh.cpp.o: ../cds/container/vyukov_mpmc_cycle_queue.h +CMakeFiles/cds.dir/src/urcu_sh.cpp.o: ../cds/details/aligned_type.h +CMakeFiles/cds.dir/src/urcu_sh.cpp.o: ../cds/details/allocator.h +CMakeFiles/cds.dir/src/urcu_sh.cpp.o: ../cds/details/bitop_generic.h +CMakeFiles/cds.dir/src/urcu_sh.cpp.o: ../cds/details/bounded_container.h +CMakeFiles/cds.dir/src/urcu_sh.cpp.o: ../cds/details/defs.h +CMakeFiles/cds.dir/src/urcu_sh.cpp.o: ../cds/details/is_aligned.h +CMakeFiles/cds.dir/src/urcu_sh.cpp.o: ../cds/details/lib.h +CMakeFiles/cds.dir/src/urcu_sh.cpp.o: ../cds/details/marked_ptr.h +CMakeFiles/cds.dir/src/urcu_sh.cpp.o: ../cds/details/static_functor.h +CMakeFiles/cds.dir/src/urcu_sh.cpp.o: ../cds/details/throw_exception.h +CMakeFiles/cds.dir/src/urcu_sh.cpp.o: ../cds/gc/details/hp_common.h +CMakeFiles/cds.dir/src/urcu_sh.cpp.o: ../cds/gc/details/retired_ptr.h +CMakeFiles/cds.dir/src/urcu_sh.cpp.o: ../cds/gc/hp.h +CMakeFiles/cds.dir/src/urcu_sh.cpp.o: ../cds/init.h +CMakeFiles/cds.dir/src/urcu_sh.cpp.o: ../cds/intrusive/details/base.h +CMakeFiles/cds.dir/src/urcu_sh.cpp.o: ../cds/intrusive/details/node_traits.h +CMakeFiles/cds.dir/src/urcu_sh.cpp.o: ../cds/intrusive/options.h +CMakeFiles/cds.dir/src/urcu_sh.cpp.o: ../cds/opt/buffer.h +CMakeFiles/cds.dir/src/urcu_sh.cpp.o: ../cds/opt/options.h +CMakeFiles/cds.dir/src/urcu_sh.cpp.o: ../cds/opt/value_cleaner.h +CMakeFiles/cds.dir/src/urcu_sh.cpp.o: ../cds/os/aix/alloc_aligned.h +CMakeFiles/cds.dir/src/urcu_sh.cpp.o: ../cds/os/aix/topology.h +CMakeFiles/cds.dir/src/urcu_sh.cpp.o: ../cds/os/alloc_aligned.h +CMakeFiles/cds.dir/src/urcu_sh.cpp.o: ../cds/os/details/fake_topology.h +CMakeFiles/cds.dir/src/urcu_sh.cpp.o: ../cds/os/free_bsd/alloc_aligned.h +CMakeFiles/cds.dir/src/urcu_sh.cpp.o: ../cds/os/free_bsd/topology.h +CMakeFiles/cds.dir/src/urcu_sh.cpp.o: ../cds/os/hpux/alloc_aligned.h +CMakeFiles/cds.dir/src/urcu_sh.cpp.o: ../cds/os/hpux/topology.h +CMakeFiles/cds.dir/src/urcu_sh.cpp.o: ../cds/os/libc/alloc_aligned.h +CMakeFiles/cds.dir/src/urcu_sh.cpp.o: ../cds/os/linux/alloc_aligned.h +CMakeFiles/cds.dir/src/urcu_sh.cpp.o: ../cds/os/linux/topology.h +CMakeFiles/cds.dir/src/urcu_sh.cpp.o: ../cds/os/osx/topology.h +CMakeFiles/cds.dir/src/urcu_sh.cpp.o: ../cds/os/posix/alloc_aligned.h +CMakeFiles/cds.dir/src/urcu_sh.cpp.o: ../cds/os/posix/fake_topology.h +CMakeFiles/cds.dir/src/urcu_sh.cpp.o: ../cds/os/posix/thread.h +CMakeFiles/cds.dir/src/urcu_sh.cpp.o: ../cds/os/sunos/alloc_aligned.h +CMakeFiles/cds.dir/src/urcu_sh.cpp.o: ../cds/os/sunos/topology.h +CMakeFiles/cds.dir/src/urcu_sh.cpp.o: ../cds/os/thread.h +CMakeFiles/cds.dir/src/urcu_sh.cpp.o: ../cds/os/topology.h +CMakeFiles/cds.dir/src/urcu_sh.cpp.o: ../cds/os/win/alloc_aligned.h +CMakeFiles/cds.dir/src/urcu_sh.cpp.o: ../cds/os/win/thread.h +CMakeFiles/cds.dir/src/urcu_sh.cpp.o: ../cds/os/win/topology.h +CMakeFiles/cds.dir/src/urcu_sh.cpp.o: ../cds/threading/details/_common.h +CMakeFiles/cds.dir/src/urcu_sh.cpp.o: ../cds/threading/details/auto_detect.h +CMakeFiles/cds.dir/src/urcu_sh.cpp.o: ../cds/threading/details/cxx11.h +CMakeFiles/cds.dir/src/urcu_sh.cpp.o: ../cds/threading/details/cxx11_manager.h +CMakeFiles/cds.dir/src/urcu_sh.cpp.o: ../cds/threading/details/gcc.h +CMakeFiles/cds.dir/src/urcu_sh.cpp.o: ../cds/threading/details/gcc_manager.h +CMakeFiles/cds.dir/src/urcu_sh.cpp.o: ../cds/threading/details/msvc.h +CMakeFiles/cds.dir/src/urcu_sh.cpp.o: ../cds/threading/details/msvc_manager.h +CMakeFiles/cds.dir/src/urcu_sh.cpp.o: ../cds/threading/details/pthread.h +CMakeFiles/cds.dir/src/urcu_sh.cpp.o: ../cds/threading/details/pthread_manager.h +CMakeFiles/cds.dir/src/urcu_sh.cpp.o: ../cds/threading/details/wintls.h +CMakeFiles/cds.dir/src/urcu_sh.cpp.o: ../cds/threading/details/wintls_manager.h +CMakeFiles/cds.dir/src/urcu_sh.cpp.o: ../cds/threading/model.h +CMakeFiles/cds.dir/src/urcu_sh.cpp.o: ../cds/urcu/details/base.h +CMakeFiles/cds.dir/src/urcu_sh.cpp.o: ../cds/urcu/details/gp.h +CMakeFiles/cds.dir/src/urcu_sh.cpp.o: ../cds/urcu/details/gp_decl.h +CMakeFiles/cds.dir/src/urcu_sh.cpp.o: ../cds/urcu/details/gpb.h +CMakeFiles/cds.dir/src/urcu_sh.cpp.o: ../cds/urcu/details/sh.h +CMakeFiles/cds.dir/src/urcu_sh.cpp.o: ../cds/urcu/details/sh_decl.h +CMakeFiles/cds.dir/src/urcu_sh.cpp.o: ../cds/urcu/general_buffered.h +CMakeFiles/cds.dir/src/urcu_sh.cpp.o: ../cds/user_setup/allocator.h +CMakeFiles/cds.dir/src/urcu_sh.cpp.o: ../cds/user_setup/cache_line.h +CMakeFiles/cds.dir/src/urcu_sh.cpp.o: ../cds/user_setup/threading.h +CMakeFiles/cds.dir/src/urcu_sh.cpp.o: ../cds/version.h +CMakeFiles/cds.dir/src/urcu_sh.cpp.o: ../src/urcu_sh.cpp + diff --git a/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/build-release/CMakeFiles/cds.dir/flags.make b/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/build-release/CMakeFiles/cds.dir/flags.make new file mode 100644 index 0000000..1fbf198 --- /dev/null +++ b/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/build-release/CMakeFiles/cds.dir/flags.make @@ -0,0 +1,10 @@ +# CMAKE generated file: DO NOT EDIT! +# Generated by "Unix Makefiles" Generator, CMake Version 3.5 + +# compile CXX with /usr/bin/c++ +CXX_FLAGS = -O3 -DNDEBUG -fPIC -std=c++11 -mcx16 -Wall -Wextra -pedantic -Wno-unused-local-typedefs + +CXX_DEFINES = -Dcds_EXPORTS + +CXX_INCLUDES = -I/home/ahmad/Downloads/benchmark-fuzzying-tool/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/build-release -I/home/ahmad/Downloads/benchmark-fuzzying-tool/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2 + diff --git a/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/build-release/CMakeFiles/cds.dir/link.txt b/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/build-release/CMakeFiles/cds.dir/link.txt new file mode 100644 index 0000000..7b53fc0 --- /dev/null +++ b/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/build-release/CMakeFiles/cds.dir/link.txt @@ -0,0 +1 @@ +/usr/bin/c++ -fPIC -O3 -DNDEBUG -shared -Wl,-soname,libcds.so.2.3.2 -o bin/libcds.so.2.3.2 CMakeFiles/cds.dir/src/init.cpp.o CMakeFiles/cds.dir/src/hp.cpp.o CMakeFiles/cds.dir/src/dhp.cpp.o CMakeFiles/cds.dir/src/urcu_gp.cpp.o CMakeFiles/cds.dir/src/urcu_sh.cpp.o CMakeFiles/cds.dir/src/thread_data.cpp.o CMakeFiles/cds.dir/src/topology_hpux.cpp.o CMakeFiles/cds.dir/src/topology_linux.cpp.o CMakeFiles/cds.dir/src/topology_osx.cpp.o CMakeFiles/cds.dir/src/dllmain.cpp.o -lpthread diff --git a/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/build-release/CMakeFiles/cds.dir/progress.make b/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/build-release/CMakeFiles/cds.dir/progress.make new file mode 100644 index 0000000..eeadf29 --- /dev/null +++ b/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/build-release/CMakeFiles/cds.dir/progress.make @@ -0,0 +1,12 @@ +CMAKE_PROGRESS_1 = 1 +CMAKE_PROGRESS_2 = 2 +CMAKE_PROGRESS_3 = 3 +CMAKE_PROGRESS_4 = 4 +CMAKE_PROGRESS_5 = 5 +CMAKE_PROGRESS_6 = 6 +CMAKE_PROGRESS_7 = 7 +CMAKE_PROGRESS_8 = 8 +CMAKE_PROGRESS_9 = 9 +CMAKE_PROGRESS_10 = 10 +CMAKE_PROGRESS_11 = 11 + diff --git a/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/build-release/CMakeFiles/cds.dir/src/dhp.cpp.o b/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/build-release/CMakeFiles/cds.dir/src/dhp.cpp.o new file mode 100644 index 0000000000000000000000000000000000000000..195f212ef98893486c7ee7116e95cfce84960ed9 GIT binary patch literal 25424 zcmd^ndw5*MmFI1>BsaEEH?adAFwll3$O(vUTej60?3OKAbfxO6?r_GOdf5SmVJ|`rX5c+L~=7mEBMkq1S78FDqT!IS2UF+$q>7uRUo@%{db zV_I8<|Kb5o#!@5G*j|TY)X#=L^s?y4mb2LK`(Dn~aFp|A>C-DtRyVC}WY$w`_=Dxl z^AtP$DU1HrpMDvHx^>)t@ebn9(y^AuD5=^edNSGPx4%r`c08;9ny!7E;l^;*x`?j=(A?49{t@n zn)LKnPl_ttb4WitxyGJ7Vnh%3Uu{I6UY#x;S!2z9{?04)bOXo^>%(J^@NB&`OhWA0 zuMlMb{6r-fw!gqE-_2jI-T>2R*QbrMJgd{Fw2KT&W7eVZd!Z6b7k{K%zw-}lI(hQs z`16YWtcmIyb?XG1KUuxDx`|zT4?4fL(SQF=j2as}qFaB`t(W`*bLll54>0Qw#{746 zdwa;Ir#|qo;ZMEHUNgz8qYRyO%&DhmA7geA2Ew1(192>R(0}(Myzsm~^*CAAo*mMy znIUG~IH}v$p!`b)?J%2`6*u#W3?nKA$E$O+VgkXKl% zd~En*@6kpgPkrM5=8H76)nBi^g?oPWjWkT~U%Sr7zA}wBo0>f7AK>;K9Aefd7|rU( zedwGyo26&N`iJ~G|CWqCa$pjgqOFrO6E(wL3H_I1Cg`36MtU=pI@hp<;0erg)SS+& zfS}N$ljZ_ueax(}U2O0fW-luF}a zVped5N-)5+lg|_NoL08bX(<_EsRQLu-5CbNAx18N4!+`aaBMp?=Uv&AtAAtUnaB|+ z1;vAI#Un$cfj>vn%7U8ub!%9+j`#<*@tHYfVE!1?yW}sP3;+_A9vt!y>?I0o7!9X} z18juMtg(?8`Gn>lSWUGf_(NGW2buLOiWI8}nj`X{$jR1(heci`@|eh($QwkyUgR4^ zzFFiwBH!xdckQ4YiH+QKuT!Gpu59taY;kwCxF=hDG+W%8Ee>Ui2eZXP+2Sv=#nEi> zXtp?!El!H!__^HA$Zs%U{(*Zx!~9G)1X%iBTHqK0{Bnv0UX6@r+kn8+rl%>B#dlGCW#Qs5ymmD8w+>%Gf{myHYLw? zhVG{{YUAJML$<4Jm$JU5j``X|sB3FuoPgQzj{`m)&zZ%eAhQ;cd+guEQlEMIuV&T+ zLSiYiw)ogc9m$4c)y)xkP~_!2x9Y-dWEvG|{okt~CU?dcT<6#Yr-({(+xB60`dd zVRsQT`)B+?nwW;ggjtv@(dU@eGsl0=w}4`IVP3%oPnefSp2?0!YS<&^P|lbDv`%1AK7s)Wk_sf$04UgMLGSS-Z4tx&HH&ro&Jf{HFd1Yu&<=r zoSp8Xrx(hZwc$EptbGl;4a?p^!@?pkjs=vB ztn!iA3Ee;9qrf27C}x%FwpX_sX-)Mo>o}({&(kfhZUM9n+HDkv#$V=fsDAJr!+I~d zo7wBIvegCH@LSlY?!#i=H=D*?M*Y+fjJ|KnL`Ln8&^Xk_?rkXGQu}83k6Yirke++s zzP#VUS+Kfd9ha+XGp0k8voYAu(j6fLRX!bP-$uq_b_r!ql3N?;jRC`I4F#MU?x?!; zl%V4q8)kEE!}_q^N;n$~r}}dd4JEVs@OH+JWyimIUGKKDd%@i9L zBUBZ6hHp6x`*JajF|_`Hci>(1*ky-xyD1c`$4+A@A9p?ain&-%cL((J_bBAC=$qyo zT6gR_wUcY%JJ@$c-%L(8Y?!EEO+HIgZ7xgS6wsG$2$h@XV4HT59BqF-C1c2F^^|tjOz7Te>pe#9%LmDc%&C14m}*)!6(( z0ygPzWU4o3=@yPx0MH-47YmHPR6mGa+jGf1r0g6WJ`vDIYC?PtQ-M!vDgx%@fXMEGrJt=@2(C#4_fLF`aBY-Ph;1V=~Y8+ubeH}zo9pe>dum)+kY3Lpo9H( z_0t2PQ4~Fn8xFp1`gH4!vAMK`us6_#^sj@$L;qU%HH=Y}nIQhbpMe{bggKL>`j+ZA zCd!6TFf&np?eHrT1*Z`VNJmIWwtj_(j^Hlnawp3Bx^+cJXz+PRbwpdg&g#RF@ngG0 zyv3q|kn4`MoXD^*#r&qQ0AGH`TrSpSIDijEf#cU|6ZQU|{3t|WJ?WRW*2Y)!IA|eU z+dtqV0runwvm8^w6o$2jco@QN`a7J0VcdOuo-wP4OE&yJxionP`6e9HfQ!?8K)T5@ zf!&??Wo8`|`w!+g9TVy2H5<4IDE6z=me1m^c3bYqO!9Mq_Z!MRB%X&g3d0` zutt+NDS=S*6Z-R$%h+vn%5=t;HV}hDIMt-LI-^Tx(JJ~ArawVahR+a8V@x{CoAGZu zLja%22G_zqBZKRi`3y`6tJ2syeDHYQT4w)|=j=0OPn4?C%u8ISt_qkV=xkUr`!$@e zr4cArnSQ5ZmUn!g=%QFUaH4+OK^%I%dN`9p`nvVr8O;gVKfxxu$4`0`-=z;6Iw3|8+ifa6kv87G4~Uk-Zq4)`YT!bnm4IV zuA19J4Wvx=V6CT%-=Ni}f&Q!~_uXjNopi0?AK)u}8gbG{n;=MU4>9iFx;@CQAB7UXf ze`1KGCYG}YX&QTu9DV5+Cf{A3-UC|y{hbg&UD<)RdYBc#{DXRXN&V8wkpJ$Np|@_Y z!zEVTq#k{__nfhFK%TXybr2h2tbz@bxQ5^q#|#Ub$Pu3r{WQ6k*7K#horH|FkWo9y zqQBpA_Sio_s)$QD{0uD|(^F5u@?j%7o_xybv|Sux_SPV6EHF4*%c)R~0(=lRRL~fH zkBvB%k5#m?GpnxLKkzP%jP((V5)<5W4nNnT5JW#U|G-&Pk1IeU{bSmB7>Hi$_2`@a zfyc-W)<=;K>gT_&51;h%xN9%1w?++I8Vw%##v+P@htZwNAr@WMH*f3&IAnao&E_9b za}YKDyFWk$f+_|+`EGsmY5%}60D82_f6v=gV`B!{tI(Y)#y85?+~VpFmvHDR;Pkhg zEzv>$z!4&cBf=>D!ESO150*kk_m;ea{(-hvIXVG7p+p$!QO`u-JE5rKCus%9k3;!8gNmqKfm9!3J_lIo$ zK_iVTKV0{Y9R?rYmObSei*YYDu}=Z_G~)i@pD6yX@WD3zfq7JIt>TfMx;_@*^rZao zQQW*@(E4UE?gliPOSe|}2ND=R-VItt&w{hdsNw`EWw)`IM*6M;6w8zi2L_X-5{8oQ zIx+QUJ?R4ljv;LRD~89puc$|m8>>J^u}>twg#F=~{=xZx#^U%R)=R^h94jV!&_;)b zC4i0BPtG$-D1-oz^LMNP-L9v_{|Rc0``aAw)nkg#gzK%56NM(*%<)^~dR%Cpah3c7 zFOpaxqwjIt5$AU89KNe}I_8`~sAJ=}csM$~m+p9>af1?v<*ve6aI8sI;&>vKUjN`d z++X++My%XdWmaxR_lWkoDrRA1tvcj&&Y-61VmJlXm5=Gf4FmLK_Iia6MLnW@I=w?n z=gn64dQcQ6dO`LfXlUVZ*!Uedt!KLRnOxHTP;gtO+ZX9*WbpOm+_CpDGScq$-%=z7 z?jPU<#Ite#W2sSqM*1Pj28NQaGCM_Ukdb~^EJZZz!Q?RQg*u%^!-$@^=vJ=O zamj%otKKM_uk?R}Hz~HeTP6(qG6MJ)5(=0<_91am#fN0(n>?#XSf=KH?rk93S2HAozH40!PS^S|2H5(1n0AL6jmE)F3qQ z5BzV^b!jr}AFz2eT!RhDGyZ`TV0oOy5jqHkBPSv!ai>#ne+~0;0$)E2>jeWJ5Y#Jbb-@E%PHK{+NWFR$YU>bH{_=X^ZZ=pXNSgLlIw$>>Wq2n2VbwZUQK>mAK^PQ z>Ib=l>Rb;|NKPFH%Xo~tz8yrDF+Bf#X(usLPMCMlu^oNL41h9zc2|!5Wv~CO zgL3Y>?Iq-aEBFUHgcslV+0MRo@O5VW%Dj}>f6tc%d#4Ee=DTvfV$A?a@=f#upR@Nn zU;6Q(lV4tANyVp19DM!?%g6an4{?d*ENvs$VbeV>{7BY}zaiFvkyQ;;2|s5dkBOZ7 zDRAzm$jb#D6!{#H2So03@>0ssl=yy-=Ii=?0x8MEDP0Fx)A&yioC@(*)bnd}hsM#^ z`t{A6xIfWicK2R=b-X^F=zlEnpc&;~o(!ejXp8!49K3r4C%WNLD-=VQ=b@uZG z;zr(kY zwee|pNMdO4VNSW?m0P`&Cq03WJh0;4qLO_@+Dkpe56;bbBg=>-j z=_z^KqdiB`WMU(km;blL%#4wmIQg1|EvYWoVqrb_AmNYw{qpx z!Sc16l3ivpxTvDCB7EiIB!7#1BeJ+6Tv-tb&vz<<3&N4b;l<%76rkTVQS#hzgXY;9 z@LVu+nr|1@YI57TyhhW$AacUOqWmSwo6Bh9snrNQMcJGcoL{>JD8(kS`a++6A4N&%sUN|Q$^R6g}oOQO)m>n zm-(t^P#Z&dDE({Om`k#ej?*96M$xOX3GO?*eN=N>)szL+VU)=y)(c+x4IL%IIs|5L zMAOKAI|QZ^=Ca+Ij4NC>BO&bvvLK$n6a9Y;Wo|#NU(pNGsq?^i{{_t|v>|Prrt!az zd`;N6rfj3P$Kx_`^^69{(1dOmBQQz+2Z;ZEZ*5sHUA&^K{JxT!vhdE*>awcTw3@OV z9`90&Ko$HE;|KrD_+mzMWqXz69urZ}?D7F^!0k*!6=EF|1b*hvG>>nVCOsr0K#_jXpg`5{4OZL-cpQ3fD zfiN=Vp!JDmg!`sm7T8HKDdnvz3--hF_G4B(&Eprvwn{;JHOkJsxE*d+GlP7*LDb3k zM_8-CDE?6*Y^%WT;7H*={{!Pzto`m}J_@hKcNJ5ryxLh)Fao^V^a41|nA}7|TBOLA zYrtZG2L(A_2SnMc%@n2F%*%gGn~Tw#m;b^xA5Ip^O}FEf^l1<@BJ&K~VjO^%VwnZTt4iPO7RlPjLAb#iZJRdwThyPpf< z&C7pPJOu_$-jTrON^9rPXYW9DM!#Bcbe5+k+?80-zTDO0{FQ~ ze314pfs5y}|7_r-|EM68`^xJCJ|S?qx7<_!-zM;wSR(np6Z$+T@Ld9@eI=z~f%k}| zllGF7P6E%Tryu<%J%_|nNP9<0Un+oKEAUa#A=(#GN&wF%zgNmR$DyNrAf^8(@ZDm` zqkSHweFfw^E9iT~lEL?Ip#MbR4Pt4a{TQXiurKxNkXQmWuElT4IkGQ^*BRHZ zs5av9HStI!+L~x`vkKfvrjJIsv8d(yLcXju6Hh09^%|vfAuF>>{hWm6ht|DEbsH0_Nl*S-yn}+@~GfkLG6N%=z zozz#~a2i(LZ>zm@I9OFo3Up!}YPffkBFUNsSO}wNp-qYP@!qv<~XS~mBHfyzv zQ}!h{#q~JumMU6$dbA2-J>&Ub+fgZdvsSb zTojJj95*-jc5lJ8ZEL)_x3_s)yeqKhzP!knhHgZHY@To8_n9yxt*T5Gtj#od{Oy$Q3uHz9gm z*@lZdNUB*^(VJ+iz%<>uHQtlx?d$Hs4Yj!~-XC@eSlHLu8*T5xh;_9j;`uwdazQO` zncG=RS@eHvOQOeY@9xTL81TR~X{4I;Q}!H9X2%a@Xo8#fmd&@v+nU=uwD{VtE$v;c z@f#9-$u4I@Wk zUmY|KNAkkamTovtZ?eVATOFxt=}k1733BEH=4NZN**v8~B%W3+$m!meL)owf%RywJ zVD3${boaKBk2ac7EDN3OT@dYF*hpF)SK#o}@!)MQal&G*jy>YFFVWGK#~zV|%_dxR zv(Pthrz-nen!9Szi3pZuRl0^z>Fn-z`!gvM-JIy?i5Fy?IxvyNe4SUt7c!fBOd|_h z6Q}D#WQm}GjZW3qDn5SX2b=O)D@ z&|jzEZx@jN8y7C+)QR)Ymm!1p>=M6C;BGzd6gcU*Mxp;<0s1y^jv{(>-0u{)yWJz= zYErf<_4%WMhm>|b;@m~@mnyhV;3R*6f?uG}(>DVtf4+iWtKeZp4&BE~`Xvg!Ou<(w za+WJNQ}CFAHwm2jtM+SyLa+9#Md0p!^(*vhzXlXs?U${{QTz2D6kP4s-z##|em$(< z3zdF7s^B*$_-$w71<5_`OUv@(&kLN!_p5lM{ePq2bqao5!D|%!5^>&k+xg1^cl%F7 z;BGtDDDdf~)%PQ{<@nk0^Ld8J8CoT=kz<6}so<*p(~2C`{@+n>)&9E`Ija31 zRdChLdlkG|>F*l?cdt9|DD>*ObM6iu|7o+&%7ZDD>+5DiimQ z?(scW;H0NIz84DIJ-%UuULD_M3a*ZCts+MqUqivw@m;6LQOEaI1y{%SZxmdOOW!>o z9YUght;ZwBWwZc3>jI*7=&O)R`fC+Drr-|=+-?6Qb3ugUZg+)(--LQ8f5t_eocm!& z!Dk8FJuc@9+&wN=D)j2OELCuITvjM@)N#=je3_!pT1AdJF3kdW`&GL_uliMwz}@<% z6na(vf3M)G{tqg0RQV zaeF!i9VEB^oF{M^_nYxZKUt*UF$KRy;BNcZDfFuS*9hEg|BVX0YX2?;SM8rvQm${w`6kKetIK66zLiFTklt{ixFZCjvenTSL zy#QrOZu?9)@Ray7DR7db`p-#$yZz@29;brnRsRVpxavQb3!LPr{!^jgs{ce4Im^K# z`?XBLV+y`T!BzX*sNmHKy|gp=1NC7u9!la^!|g-(Mgoz%RpOVC5{0%?96uHD{O`;^_Nj&VpJXTOq;^hKw6S>5L;{D&b@HygLW@IKe zMsi1$4%d?P7_^p2Bp zU5H-(uIegLPgqpYP*$VhvlYBv!Lz^O(6sFePO|tUKsKo0=PGg@S8$>qqRD|Y>ca6U zo2M}aCpm-ZPs8*@2=zQ&D<0o>ri{bO^^JY!h8iM`pfW8G8LTYm(fMn1U0J{Qbfo6rAKse~pO&lJe#67w&W6^7jjWqu?ZG3m!@@DmclKziW6?!HNF6 zM2tlLWPy@wSN`6i%7tq|PNp@v@R-0iyYSrt->%>?ws1)mecL+C=1p1!KIu65W?AY@ zbTn6(iLItqK_7#(icNie8V4$pT^;SWCpxydUMq6Hc}%(a=O26Uy;I0NGjh@()8J`Y z=1xVw3VY--Q((Wci- z;v+scH~lLS=W~0efB3d?y6-d&FE%D{GM(;=eTB)Li|d+ckG|k`hz@Y@ zR3FPLTDm(s6J2mt{HvHmMctaUSMm?kfFwD2hD zO!-N_Q}N65niTD}e;+FH*}tmZ;Rp*xvJ-vEaohjrE;8-r0{n*rzxzXaYY(Ecy4cPONtseOu-l3(J- z!A~l>?Y~2`FRrZ|VfgFr|D_l=;`h47Z@1u=?MMN#effS7`109rLg>!~A?be+9?37| z(!V3j*ZyA7ez|N&)jRo4TYH%nv3PNf>h)43v_U{EhVP5Kll14@Ql3rH1-@h&O zmjoi0{BC=@`RU(P=d<5~LjSOkNBWaXOMa>U9@#!}HBFqM{tp#s|Db4JwnOdH{>k0` zv!Z>er_^Cmw7*yaBB!yYB=Z*pzk_D~R#0PklH`+BM+HB!oD>%P_mtHW>5S ze^m5;LQHJxKgDu)|7q{(p8F*Gkk~-}(WRJMe|mS35WBXQE+~<oiR=WZ7CKWcIQMaw+0>x5Gi?606i* z;S1~---(*QdcF!?;jCEuhIqB=PVM?@OgX`EX#E}5sy!(V+qe#x4beCKjgBp5r+(Uj z<(oQ#&e3U~U9AN=acUB^w(C{g_0^R+|5vNJqahBZ$}Vcbf2X^@uN$4|JR|M?|Bo0k An*aa+ literal 0 HcmV?d00001 diff --git a/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/build-release/CMakeFiles/cds.dir/src/hp.cpp.o b/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/build-release/CMakeFiles/cds.dir/src/hp.cpp.o new file mode 100644 index 0000000000000000000000000000000000000000..7089a6aa73b971cfed87426870ad24976a48156d GIT binary patch literal 21536 zcmc(H4Rl+@mH(6FBq9lxfWW#yK_RAfO7L2a6Y>ZJ+p+9<;VHP-32YlyQEUYp{8j16 z2{a@rvIDOVLE3ZnbZPf=|7W}XZ+HLg>DQLhp7zv^Ax>!vF<*u9ku0AUO29x#O(F6A z?z|c4NtQ_3?f>i?q?z}7ckbM|_s*S}D~H`dy`{|MQe<%{S1Y+Ei7LvG)?B-RHyf0I zQp0H%f-llO)O>Z#E4~NWO@0>Wk-<3FzCEqC1%tPAP)HS+mKb!oMu0CRPY3kd% zI<(YhX2mPbbx50>*Bdc(d7LeuY24V@!9IN#Ccm-Wd+%OYij6$3nICHAQ{LeV=`j`dG4nlr`Ky|> zv&y3-kGa^`2{*Gg%rJ9`VY0S5wbYWM%qoK+yvc8a9a9f_@BA)rJmyXQoV05#snX2F zRm|Kpqghv>{S1S2ux!X;Gg_*9Ml;{k)c0fcO#Kr}HO>qL@I8(BF=!$=^XYx{Z|MQ8aZY( zGxN>#C+W5NSZpa5b~BzdYcb?1hl~qJL}qQ8Nq2C(95~6`Tm?CRUQK<+X51{*eUh2u zaHvc8b57(g&LP z2VFgGY-HxEeBEf)YPNQzZcfLe%-pY8yF8)PkdH0@Jq9a>op)VquP-Rq$1D#V*u_kS zCi9@KOhCSs%>05Ur2ZlCBW7W|F1FGhJrQNehx}P7Z9bYcU$Zu0kp5}LmDo|`$(*-s zpJoLWI1XGm=tsK}b+vB3NfyFHl;P3D3z$`vmOMZt7#~VTqcXFXzu(t*gRKG0d~ahr zoD+Jeo}&k>am^f~wWGPlw8{5%^-*tXh&_M& z+S;A<)A6%eGK1;Az{XxJ*IlpD3yK4}>xi!Ynwe{um4H?TJv)DzUF}$C_5z^Q&eSK2 z4Z50PaB-d7WcSW&yu##|8C3Yh(0~Uyb?F`XknEX!s9Nf?G0g4GlH-s_u;apSz5q^l z1d=zGPcsWP-w37+kPAGsqC<#X_psDD=;JlboY2PJf=VW|$yq6nZl2Jw`o~@_my^v| z;v8t@!t{E0wK*=kIaHRq@L_TwW*(kBmp4*NDrsGKLe>?`+KhVSxOey%oItFHy`w}<{rv#7`r zKQ!`+@hnq+pE$@agTs?km>pGq9vqT0v)hOozy`wuhMSVFOiXttawD%XIEIEA*`$x8XKXW}km3kjh?TaU9lkwhnpHQ|3 zss^u&as4kyCtUQ96;<@Le(#;@F#d+`CbRvo%1{S6YG_94E|Q;VNws_MP-ANb@C!JQJXGWA8{OosTU z1{H78;xoEKncYbK~ksHmxK+IuJ24xt?eX_?X0H@5rI zbX+WHkEh5U)rjZVI!-2jtgG*Ohi|6UZ`D_6a5?a4u9{)$JKMeKePAu+&bKR5pN<`c zl<>-0U41z*mR?3qXud;!MWGkF@5rce4)zglLALyNy6cZTmT=)ALV5?E-y0v*!`_5M z4}barXjM0l(6|^BLwiSpr0stL9(bIzPyVW;@Be`4rqguB(9Nmz7swhgV9t8u@UV0R z87K8=>S9ZVvw~w0nul$aPMin z6DH@+%vChZ_&zpr{6P}3*=+SQZ}T+b#e-dVMjNqdJM?*j58?D_91zcD>LgCH2ZJM{ z%)GABfP2g%CTya$me*!$HJeyK&GFp&vuQHF-)Hapa)0J06fDb+^Pb7Iig7N<=+KCu zH-D>{Pvf-0%oD#{17Z=8MngL)4R{L`M1)c0qkdm0S z8waV^%USB54}mTA@JjyZlKTU#v?KgY0?T6$C;AzlQKlE@R;ZGtuByZlS~sz&s!wCT z_j&Jn0B<$(9c}ETazqH@a;VopXxSh5mGs->n2Svyx{%(QNb|7GejigGO}uh#O7~4` z>cRM@&?8GvFDIFK{mk}v+43=#`tskxi&*ld=}@ZK=hMwcw220rkGTU=cnQw2qk5uY zkh6CEiB0Sg*B=bHXOvJB#6J)s@__)LVYiHXL*~T%8OyX^u9OoO&`ll+A3n$yg1H)A$iHnOs8|pmG zd_ME7oDs(z^6&=^EGjv3@VwII1{YW9DS*?`kXbKT%#D5P~>qOtZN`JKUh1~8^sKJ zSz2r8a_kj?yEFUkR*J8DMmLI=ZA8!mNp<7cw4#c`T#bjbk90-Byp?_V_iOw7byBzq z&Tz@2Yw`?JEexYs7s0gN;YHXC=+t+fy}p_Cbz94LHBEX|UobM>2a*TT)>Gf43TOUj znUx&n$p51Xp`0S1KQT5P^8VlsoRXd*)LMs4a~;;@wUv4*RH>_=+Hdq}7EDry=*+Ab zJaH6L9K`+6#nuM>u_n!G!2>=GE z9A?1PgFsvYY5nTz{=`>F0CtzF@j!W%OPdHnB-#mEs=w_@w2C3XZ1zxk0ambF-?V;m zSEch|QRlX>kIysxGI@;_M-O$$J)?;QFS%_ACEKL+!k&*u(6 z1pf(~k`?81kV$Nm*4oVTV!tSH_notztXh)$0;!+8HM2~Nm$%r?$SyYf?f-Ng3ICsH zsin!ViI!?n`$b(TYM-b*c0HdeOkJFBQ+%=Kp3P~bYS^c}bmqGE1itMR`ZTRj+=t0d zzvj*|^0;@n4ewcMfG!~OGyT2Jks4&ffDj%{z?`2%$_evb4fo=>1r zVn|5L3|)!?2b}aem>;Kh^9JN?5UGi8o6R>l;miC7GoQuT4)=(u#r#M$!_zkQoIikw zxP}*mSyGTJNE_v4#*MhR;Tg5xelxh0YsIRw~PGjYl%!P6}^d6qanNtql=SO}+Zq=1lBqglU>YzIt$ zq0pO>GkEsc9+^CG@$PYZHQ5;<(BIUdqd1PM?;1DTR|2G?a4XYO zL$EPjtl=Nkne(&focXXF{H4W@kPerjT?3}GZv==@FVejHED6R$1&z|d$ZQe!Hr%=I z*R7l2nb_zkB4+vWK#RcW9sV+4mb`=V411{w4!&UES9+>#FngXtR-w$LYBJe{Y5?|*3u-Ke4p72=;hiq+vCi9gpN5c zGbm#oQ?E8Yui==gt0B+!`$F~xO>&Q1#~0g)%yO1|{iLS8rJ2_liQYq{+~c2Qso<_fNRihfKcq>Ia1yHEp`MpH!$EFT#raj(S0m)4MPaTeS?;= z3rXw`kjL<*_R%Uc-^(l%{v0yjLna14jnq?0bY3WRRk@Zj%BQjZW}g}!HM|r7ab;s; z|5c8ci$ms=W?i9?H>)pRn<_8IIZd$&zFETbAecna8r;tL)aOZKtcE+|I|{Okh?pir;M|6 zX;dLMvuZUH$HWH2@K&U-uUec=U}e~O!&D3I@SADOhc*0wWQ&?R?;~u%!{zL`Rwzq> z6B}a4+2FM#t$|l|y=&G2-G$q2nZ3lCkWLL!1}B$J)zuYMc0M(P-$cxZkx%7XI|sj* z&<{6Iw4S=koBj!97i|6L6y2aCW+BG|)svOdkI!9f@E0~AJ?^j29Oo{W_qAbVXIH%L z=FYnA!N4jwY}nzj!92>p@kh<^cy7=*X`FUt{pzt1&bGWXbn&px{$w7l!c-utX8zoc3{n-{Ryfg z6+WW7QDF2tC>1*V_41aY_^DpFm5R^0F+U3;9jTg+02kv6L=88dgrVIl@gn|KE z7rDRdDk@4N6i>cVi_;Ai?ibui#VKdQ zXNt-vj%b~O+*{>|YO%Lb6MBtpXHrfO~9vT2U{kRofHZ831ql4|uP-;E;yduW? zfnz*pzvhaD+nwFk!xrRYKNn-|q5e5t7PEOPY~E1ZvQ3Ms(cQx*swlUi?N}4*EBs}j z&8mzoQi!%*(CS31W!De%JA|2yL%Rp~x~4V1MbKVJw3v0W?AC>}jA`D_3t#wmw56U4 z=3RD!bK=k!+3Y_BPqGyi!VU_|!x39Q&kJlBjZ;iN!R(q?U$MtkcCx6q1sBmgtHrkA zBVHZZ^_pLfAS$FaT+U`781OZvBPl1Jesk3lnAz+#8scOYN3;_Cpi-BfhF z1{d`emAP>dt<~qn7#q+&Rk;khDp%g-A+M0utw^orzj0@GhC-a8^xWcB7S9ENxcOht z%zfonD&~OX>SYdjhJUA*h|4SVQkfoQqR{lG8cDKyo6x(Rr zHUaVd3+=NYDB)X3U=+Sjp?-klxp_75w~Dd|en9j-v8JUVz9ezs%X~i(ecZ}=ucUDRVh^AXvnh%I4x3a7RerXBZ>18<* zH;R;#NL27X;#lrxYu0Xu4Q& zqerpgwxjo)S8hcFpn^B<>~5R?!maQh+w#B&=5>i9KFG7^eZVR17nX08z`s)h|6vLI z3E-DF;)9F`$AK?(_(`=8pb}FpCQkr(F+Lp}rzmX~zo@sD;Ipd){?!urkAN?A___3$ zr%KTOR|)(b;Ac}b5eFnXV^aA9o|l%%+2j0R!%7qIVtQC#0`D$?UtR*=4xHrii7iIn zQSJkNo^q*>A}7x`OYr%j;8QF3$aCwIz#9ZE&#qp$CXMSq*B*lJ+i0#6cp?2Q0{_0? z!_T{*zaRKniU9E}_4a_EuN7M)onNWEDDXD=15*@;$IAuiNb+nE_+EhrO5m*mpBD7v zg1#SkF?)SM(2t6(iq57~zEOhyr-FWuP$WN-f<7&9xs>?%68N$+Z29-lAIqZfvn1;E z0zV{her^PQv%q~~!~b{cgz_bUj|yDwNB=2s{)f1rgBZL#2mBKL*SPjGKO3UifxxoV zv0um?We9jN{rqzY{KqA5CHz@uYM?IMs$bXCsE5NF!hwL=72Os|^cvx9v1l~h7wsE} z-4<*c`lvp2wX#p7w|AgZcIrc)b|bJd9FBC|l877O?r3CC+n|QSH}@yPojY~}t_X+w z2V#AZ-f#~xY!M{;w6?aPc4M`j`P16#!X*V&`B=OU$bwXQiN3uyyeb?X7>oy2g*9*u z_eSG!&OaC&>Iiq#hTB`i9f5F1UAU$v9*fkhRBbs|hmjvKVgvDkm=W$B82JAy>Z#;x zZy`}3r!C~FmC^uuV3pAw8`vI>?&ypT8a)I3+J^RUsVooXd|gMl?b^;Bn@^ou(>c%| zHwI$^gEdzK95atcW5g^>)7RRh+OOz}8j+sfcpwmq8a=USS9s8f1syib*LCMW-yqku z*4DE1`fJ-)hTBzc8)&}0nP%i1zP6}?oMP-=F#U_hC(8$=<#S3{y-I3U=rDgd)W^el zPW_R@E8}^q<|ApZh=&IU;s%zN5$y}djffGn)iKu(Po6*{%o=JN*#^5`>FL*!KIak*;tg7K_{#?vHNA>h8bU=suNDbzG>9O&=vWsec2- zs~FOjJ$=z|G!`3(HQQZ3s$+8?Z^}irQp-uYdTB;{E+Tqi6uFFx4!^FB^hDa|&^}5A z2%l3C+VCg*P}13bYj_)$Z@zj~#`|J{y3XE6Jl@k8j(0}-gKZny1Bm@la1`3@iWrfg z(tJZrEV``*(R#;@@L)6+ALz$#9mZ|pp;`soLf^np6nxu_y4tX?jBN{BZwNU`cw_(e zp8l@z_0f2ukBA*JAR$&7z41cT)dLIDY^c6DYGCi^ip}j>R~yGWdv~%IDD4EnwwAV` zK70=$NPtPOsEt9-G=5}n5Bk@p~0 zNBZ%@S1~cybi!Oll(e5Un{)BO6S=LirN^+wsp+VK|XzyTY7oKCKz-oS~ zaSC{vdI<1H201asJXv$Uv5*SZa8;9ew-%c#?WP5D&F>$mrJbNBZuE2_LXxM}%dH{1 zcvxFqpkV6|9mx1|Ph6+kKK>dTDhBRo&q`SI_a;zm+7Ljnz{xMNwPz?mfg|1%=7ESW zMV!C34Ui9ZXmdP(w0Z3uw0?Ux_RS&t_-y}%f;O(j%4&S@7Yn>(M5=)BItRXp02Hbk z@saciajz_-r~47dXke-hr@+Ta)Qx7`@UTDX=9P~~*-Yf7zJAT8#$7#p!I`}y4_+tm|w4+-j zG)W%Pk<6nl5cw^_w%{ZE;W>emJarEI&hzkqLiA4jCGnF2;Z8k>@XYDyH$ch%DzvE( z{VIGUe!oMWCI>FU2B)Wg0V3(;FAc=!Y6rd)Z7PNOq%}o_aLP4FJ~S69gkOV?#EB;r z;@^Oe#Km?uhrd`ZoZe}dQuM+sbLgdgXk5~j9QR@}APV75Jxl)5zKl~T*pJ<3$;+5iARxUK;93@6I;al+$;U$~LBRqnS zEb=|!of1Ut5pBZBjFYUBPCxQ@0lfg1dHyX0 zxXjxd1-Q)HKUsjw{QL_JoaDJw%%NN?JbFzZnYXVjz-8WksRJiImkT}}4xISNy#3t; zxXjOg*?|+EfZ#Lgz=@B{&o35skaEiWy#4!PPMwo)HLhtaHr})iMG?{Z&MGDMoH&^8vJimZtm<9Nx3j=&>ICi2f>=T^Zj4C zap%qQe_sZqDPl$fsbX$i`b$$gOPf{SsWSw2S8fb2f!bm0Q>8tN_BlnKx5nU$hYOk= z4L@8(dWG~H{HXjVa^`(~(S9sO{8??Zrgg){%Q64XTSfn9H+~0*Amv$eb8G-Q`74#j z5kJKhBadT)c#`K(IuFsAQG}~(U8=t>`pdrnp#IDMjQ(3hf9Lpwo$mO(qQ5++(D;!* zqrXw4Kb@~mcl@12`qP>7bo&n%>A$W>e?KrfIylc%cvSY$7s_h|fBNLJ;q3PXXpu|} zS;THMTw&Kc1Y^=4=~C)bo+Cd2jK&w+Fj{sQk^~YEwK(Qv(G_SB&%*wDME@JJly<{k z@J#0bK7ZOM9YywcK)hKvem~HU>c6mnL81QsP4+LK`;w5qsQ@n!Q~4LXFQ)$o`9J&5 z*^%^5shC3jKLD(l{J$xo|HAQ)mFRy+^q2QFknvJD|0$rw$)096{qW`q$?-%3Cs!)E0{>9|qA>?PGBgs#(tWbXXzYvl9@;)ud6`#0p=AFgx zl4z;OdPMXuw95+7{|1{p`z`xP;H#p4;rMrp{_=h$>81RV&;J$u=NAa@b9rHw9Yrns zNq8mt5mwlLRP>jBeWa^e^3j^iStwDZj*jBFC2lh0>vjf=;*_LuV0eYu$ZMo&}zWQp+y8|;DQI5d8Bk@5dSj4$Q1d*RFLtPle_$&nomR^+J1^AR#zcn(gMZ1uqNbr{_7c1Dn3Y?!QC)en8Mjz-({@5*4ufZC;|sLOk)a^vFThe zw%TmosovOV_ky%nT~zC;HgEJYOMj&ss#aHZ?Pl(%)->JF3~dQwFJip%@-b8T)5_fB z*!T|gB=TNC-zAg+(@;N$@+cvub&~!5Q|7Oft`K7t_?3+cWGWk#(P|9cXF*edKaL1V zVOH2_L=TF}kS*h94gzRZ*w`Ul1>{0uUbJS95U*kcty=>2Okut>!%v7)-z0w_PWw&x zEG+a)VSOmi3gEMa`4Yd6IN7(cug{2^Xp{dZ;w{99&%D4(RVRv=^>lr~ag&D2Q`_^C zHQh+8;68Ypw(mDXIFHLRM_&}tR>zBz)N+8f{QMB5-T{u~+lLu#+u05~otoxy-yT>o zRPegoN@LqglZMd4UZEz@4c?E!fCJg~r667JMG=p!HgxNbx;<8;AHFygxs-0j+;+WS zvk~QL;pJq3`l|v1@Jf!LZVZuh*S^U)L=!NO>ICzP^xH(UUCOJv+6J6H!a7 zyyj5>wN1zQmjVa*QhIUU#5e(uID2&dt_ctZ;dHaB8Jgq5N2894z{FEUo2kj5o7!!V zX`IU~(qhH9kgxTr(-^fL0U zBTnm$FpE$j9^n*E_AO8ZCdYq3g50khuLG}a{uGaChC*+g@V!wJrIXU zr8(LkmM$o*@wl>bIG{8xD9vZ%(Z)0=E3HlFt~75dOOF$4(=|F`U>A~Y#Sb=qww|D@ ziB?XSXkI?Jq6HJN^juAdHa-Tp7~M09>*V;7OiT9Dy6&y@ z1!{@WJV?@B{!b6r@rdmr!0Ysc^?H(beAZZ(R{f#kD#X*Lv>@FDWzbH3v|-{# zmOdAb9haY%kIN@6Db1kLnhHb4>Ysdi1lFfZD9z8-;F*xi$q)pZ4C8aQSrwF(r@|Ah z3?wHZtpGXcIw|1_m5oPPO5$8+i?0DSM5{(KBSeEcv@-xxshW} zIlCa~kVFB?qO1jJ=Xg(gl~y|3nNQR5@wW?KBqRt9f-Tg#3x&?Z5rH#ufU>6hl(iIF zCf$eA$9}tn9T@Vcji=KGV)5Ku-LUGh$HXBqv2VCeN78H3u$UMUrNqPbL~I}-4JU>Z zBjDJ}+ygI9k-#Y-uzUNK@G6KV0l$aM3AndZL^#`%iuA1nQ<2!2kQ|A>u}O|3mNt(@ zlEKx$)<^;%IT8a3P*ahxybVL0Wg1^%uSSHWo}PCvMQASA%=!-Z6C%H~*Q53**xBv6 z=^&xWUx(o!2ufZSB=m~wnhKIFT^IsE;vH|0Z0_Rk1Z0;ZPMSNwhN00Cx;(Nzc-aR( z8(CswJZYX!BSS(=!yR*CxD&-28EdqyC-H zIDf{6|B(a7ZzbpNfcpdu^__Ux2fyfpZ)cw_)WI)5*Wb%ql6LHQo2?fNuQ7NC)E2oK=UtCzw4WJl{mM)G@=JwIImWb3_FR8_v zwxF}sZ@-IyD;^2=mp=&@+BYm8_Ne))3d>-zoaH55^BN z+_emep(q&V+oSIBqm1MBk8jXl0)MalbAP@r@13WH6AW|5HOu@@xdhz#JPPwjO}cpJ@sb4N`@;I6FZbv5!*|M^dw|F_ p=3l~r!`$)X*d064zh?2j$tF?m4ruQ65%XGqmi_f@yTsk={{turTp$1d literal 0 HcmV?d00001 diff --git a/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/build-release/CMakeFiles/cds.dir/src/topology_hpux.cpp.o b/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/build-release/CMakeFiles/cds.dir/src/topology_hpux.cpp.o new file mode 100644 index 0000000000000000000000000000000000000000..39b27b58dc6ac35f37ac6aad45e41e6a13352ee1 GIT binary patch literal 952 zcmbW0y-ve05XUcV`5F-m5=%N2Bx*U(MuGuFs!A8c))^%sq7o#Ih=bAvUZ=0e6TmsK z71vG&ILW@dpa18x?Br|yaNlzrQ0BlLw3(s+l?HCwnkF|iH-+3~@dtGwm{T+O72Orp+vz)!L zm-W*sFDnw~IRGivDGwv`{Jrrj74f9vK%PS$0*Q(ONX*hS$&h7?CuH%s92Y!{U+q4B zBxk4>cZDyoWx-a~Cff7WaEP;D<_!h)sy&(YBa|HC_;39s)`~qT4&4Ho_&UT`^;dqX zFljQ5WX}(1C){?5J*MBVn7AaC#R$#Frc&{X%JCUHLU~;SdQb8sEHlf4m1of zlm)SLW7z7zDdMUfTy ze6AmQnRl6-(6b_=-yUvg3wkD}=ky058{o3kiMQ*J*hwd5rzTT-SRmoo<=$1&kEhe? z)2R^8N_;}?+B~c1LMW&J*(+2)ar{> z0`nY`Z9DCt^cswnYHfL`YShZ*dTGPhSX!=@45HP8T+s4tv$>$ROt1OI^lZa&9joto zwqp&5IxctSzHc~do@?2@?|MZS?^tPkbao3%!E~c9LVcdc_4jZnQ`98N9b9BHuAxmr z{f}mVcAh{K#Z$|0SL?O zc5Mek95=AV%IfC4ADGrQh>&f!j4jXX+I-r7YYmz752!b*fEdZmp&Gr_J2i_tG{eFu zVrcx4L((NqGp2L+W?z%|Es3jtR738oigHBdE;^J`jMn{2UFMNr3^W=j1@S zFZzDOWj=qu8N}4tQ;KI2Y>^WYwSMx|+9~`%a@Hh){>8Jn)bk08J3lXzev^4vsptO# DU&W=u literal 0 HcmV?d00001 diff --git a/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/build-release/CMakeFiles/cds.dir/src/topology_osx.cpp.o b/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/build-release/CMakeFiles/cds.dir/src/topology_osx.cpp.o new file mode 100644 index 0000000000000000000000000000000000000000..5c1d889b0ca22177c36e61a61c256fa78a5a9a5d GIT binary patch literal 952 zcmbW0y-ve05XUcV`5IwDVhIxax8Nv6Dvw&dOdosxfed)-DTlmfvK61I>1%KsF z>jz%eBq<62Qm!)*#i-(~)o1OAv=%@fBM||K%MwV!vndA}N$B1&G|Hh(22 zSRn5TUtq)7M$`qy^EL1kXUXgvV$G^M+4X0bvXA4x^_N&H_oO&XV=4$uh`#Qxb!IR# z_1z2%U)O1Lh>m@Bxi(RW3!SLcu9tD&S68e2FQxL1_Ham*cH9d7JN^BA)#ym&X|enN E0uco>`Tzg` literal 0 HcmV?d00001 diff --git a/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/build-release/CMakeFiles/cds.dir/src/urcu_gp.cpp.o b/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/build-release/CMakeFiles/cds.dir/src/urcu_gp.cpp.o new file mode 100644 index 0000000000000000000000000000000000000000..668398ec14ee97fd4833644311f82c8610af68a5 GIT binary patch literal 1264 zcmbtUO-sW-5S_Mu9t6D!Ug}8&T{g9i)Pn>{EQ0i)^y0ygCTm)3(~xYWUi2sFPt#wZ zvv!wkxAD}4FmK+>n;C}Pyt}v8)k+1_slXK!o?-z$s|9xcpaa{m0nqJqF6+nrK_2tG z-Xcvx&+Poqv)(M5&?d3y$vjYRP|Kz^Z5O4Mkm$Vz-#<$MBxpf|W;{w>`O1Dseplux9`ZEEy(l3;l7RQnI}gIFDY%P}aep+<8dj8e z*(i?2j89{46ldIz19sE9r(T0bEM}=c_V_Sme#k+zOII#v zwbg|4!MQOYknDBB{Q+8@&h1bk&So4)=yM;)Aj^OR)5(Oz$i-96NVnHN!(9Z=a-*x6 z1C)z9hBtTNI166km~Y__LxMAJmQzR;kf}eysJIjV>Gv1Z^IV-eMMu=6I*HhkzHRWT zzp^ciE3Bu!=#GZX+&zq$vy1ddK0#O03TWzuU-nvOQuigT*wH3_>iwv_Y$I3Je<&N7 IKMu41FF{$3ZvX%Q literal 0 HcmV?d00001 diff --git a/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/build-release/CMakeFiles/cds.dir/src/urcu_sh.cpp.o b/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/build-release/CMakeFiles/cds.dir/src/urcu_sh.cpp.o new file mode 100644 index 0000000000000000000000000000000000000000..ed462a3f0bc5acdf0a46fa5a54fb1ae2d536b50e GIT binary patch literal 1840 zcmbVL&2G~`5S}!J2574mfmEp+azZVlmK{=;azJj2+f)i|)t>_#SUL74vEn$9y+$nu z5JE^uz3>!>D{p|r&ol4{95}!osq8qrty!lW7->B--^{l&v%7v_*Vji11&~sJ`*7w- z6yRs^%(ha~f(f_`wO2n%wc`tj9N+zps`mOLo2VUIGiZ!{wMKc)=h7z(p=_+J-7C)> zG?I`dxuP4UZtBaNb}c0X6rSxu;c%)jJ3dx=%Ns@f zhKS9helR)ZO_l_bS2)`!{5r`;0E$B=M?5ophFIh~4U;0I=aCB)VJwFsP=w+A7U5zp ze}W@dGkvBrbF9PHR4yCf-UUS?UkWrH4-Fv}k13dVxoy#V+a4BO64N zv?z=F)b*RbEBE-T?~0M_oA_bOoY1BJ8o-Q6e|PP`#^aVZdW+jv`1*Y^kVy=3!51>l zm&D85Mi~(+b?~FbM_uMLxMtBnx1|-)z|j~zWT1Htb3kjvF{FT&gn{3o!4YU~uiK>| zPYHXBY8zVz3w#i_16sHg|AOmERXhvYzCqzQF^-=9hfQlN#?iD;9T< zFXxmo$u}h+wCrE%O^!=DoBA&6WNbC}iQub!y%!v{FLnM$Ja5?j^~Hh8d&2ueuH;L6 unq%tOa|qrDu#HYe)c(;|d&l%|g0mwC_|IM9ORbOCu=;ai)9Xq>#f3ivEy&&g literal 0 HcmV?d00001 diff --git a/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/build-release/CMakeFiles/cmake.check_cache b/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/build-release/CMakeFiles/cmake.check_cache new file mode 100644 index 0000000..3dccd73 --- /dev/null +++ b/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/build-release/CMakeFiles/cmake.check_cache @@ -0,0 +1 @@ +# This file is generated by cmake for dependency checking of the CMakeCache.txt file diff --git a/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/build-release/CMakeFiles/feature_tests.bin b/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/build-release/CMakeFiles/feature_tests.bin new file mode 100755 index 0000000000000000000000000000000000000000..6c440e3f3fecefb9cee9c3151cbf31e26cbe7575 GIT binary patch literal 12696 zcmeGiZEPIHb?(k~9Fm;v5Wq<&*|wo|sC=A*V+^Rxp1*RYPDmY_q%CGy-|n3k?)J>y zUa(UNsRF1LghsSdqke=|6;dkkgH~#xLV`p=Bp@M$QV@cwN+F;?pacP_E74rvo1J&J zw>Ne`RsU2TYxm82^WK}cZ{Ez!c;4OpLj&PZh*1f%Z!pBRHmIEBy%~FT9Y+PNn=NK4 zTf;784M3@fN98RDy&Az;p_*W$;EMpPgQq*}5xT=FuMoV{BSQ~fAqx>=f0d$&st4D$ zcql=}6iz@nw6*y%jsm=eR|qP?jy3^F4pEeA6LM`rj^J@&7(x0z{5E_>guaom3K-%l z0@lah6^Oi8w!Y;WSf6d;C_t502zGj8Mv&yb1#)QTXD!Wwaai;>mtW3^?;|Ksc4gKY z@9MlVYqV!AyExT8)!o(J)fp?~W9#{MqkiPwv}Kg{M1E8NLz`E?gE4^Zf9UJob?m^F zpFi@zbm(6XJhbtphbLdaHvArxTLjO(=G~3#iz^m4*3~WhVI5PkZ81F9Pf!2L@7=n% z_mwB#eCW=l*Zy?Lqu+e<&}%Qed0_qc>+ipM`;JR*z58c({%P-r(WNWiIB@!<527G4 zXK9BPMbf$iXv-z^F`y|D`Y-07KQ;$_5a`WpC5tP!K&?`?LKW#sfPO!xuVUQ+`Y_NH z34a#oE$FRA6>8oXAp9OY%UP67E>aO?CVw+_BS56nI&g)c?*rJ(R@K&W2ILpZ|8qlYoTl1GnXe4Kh5}hN$odD*0xc;#tXhk|k_*2+En*6~(xPrsyBA_5Sb0&HC)W&4!+2o7o+>6TEmXNll zHEisN*Z=OXuLJv8v1JqlPuXDD*t7K)11aE7qU>G02sbeTV|3<74ma z0vW8&eE-wrO!3V9{hucHH~n$90u!z2sYB3D*;ceXu|08PVy2WBO^gifz5Kz86)=8p zJHl;)GiL{9ULIQa79X3#|Bjs6ey&tX-uu3*bi7XAJ2dm|(9GFB&`?_O*W{ifq2x8E zi|;1)Jk$8yvBXV@vBb>@?a01LpC26oPvTeu2UKfHs46OIn%M=tJ9cOT{fUv$;r?sm zsi`SV&txF3b=QO$mez||SHoFh=~-)+=@caVSxYZyu9=(6f{>gcW9xG@W(f~V%o)= zmd-nO=#Bv)kjf&bd=y{|vsMc9%{x{wp0MDfS&jvj^Xi1KsW@5UJNSjtv$Z3O-qtL` zv|TH0nUdJbm^KC^%rfIV4(FU!O3UiGaYHt-EPKLqV9uU*L@XP2ccy0RIkPaSr)0Bt z&Q81rZmO2jCm}|q;`tOz7Mu&`3rzS4&uIK5pE-8koJyIKZmnx=j5-K{l$JAdnDRIm+>{9HMsof511`hnZ(eId9 zeF|9kG?K4CzQq6{)qtr6u`jXC%1pQ#Sf4euaYs+xX1bD4b$066A}%n(N(oB=;xV5V zj;-xS_#O8|j$~xOK{%>7XRCF0;4cmRW>hLT?Q^h3$W{+`!ZCFNA_M}e))3dxEr{jJ zc~;A!ZKd*tInR&*2aI>FG6nhSQrltChIohZPvMFNLek#>*k3yH^tDrWEZnq~9Z>Bg z{&UQE42*3(<>cpMAa@onfS>u$>d19?hOCFD?QE&E8{iIrc;49Z&r<0iz$t*o0GN-*bW_wHUr>6Xg2|}o@n!r!adQ} z2P6GaWp7O{K}(Rd=-))Q5L5BPhcjoi*hLHs!A@$ZvS zAl>UM z6FA-5tQ0uiudWt2p4qUh@&Idvf3IWVXFGt@{>iyg9_jnTDgu5^MR>4IMZoU{5zdBH z1Psr#SYGh}!}~fctsY<(2}mG96}UdYLT5hWr*kXbn_#)b1MGyj$0GVMfs@@Nk9U(; z2#5dV^Y1w*_~Fuk4YNd(!{=H{D|gd+KUXgCve`>g~N&X&W6c+HO(V5bKP^ z+q;S!>$t0M_0TnevMR-j~Icd>JF(>R zD~mw%%u4wi%JEdqzlh1kAp+4!<5$hUm{A;5(=RB;OEtarJ`~#{Y$;PDevhzaaN(`` zRq%SmcR%ZIj4;XlDKJEQ_rKNrE#*9eYWi{(Z&v*zUXQQ~%lQ-j#t8fuh30cL{UV0f zXu%R;7eii!jDAV^KGQD~!Pz9{Yc;+0ejM8)l6z{8PODKx3(mtj_F<_)@oKovg<$mv z+K=m782rCGFfOPW>4yUJn4q)3y4uI}SMJkkJ=n(Swf@{C^pw|Wz_Hv1Pe?K@j{sfv zU5|7AW`@_4SY8ne+FX8k26#enW3RNR$X+gghMLSj$RO+YZ(Q=inmGA@>siU#0^|EB zr`N_wJ$F=X9JX?LZ9IPs=pjjd#nqf%8&{iwF3V3C1G>V$q5Ab}POpu_X`svUI<^CS zxn!R1=Jaaah&?Ry1oJ>11-eqp&YlFiEZ^gKpv&?--US_)*AdJoc|`CB^FUq$x-1Xm zZNVSR133+JnVqFM=zrmQBG^x z?~1F6)ztC23;u0ia0@ZG%3#`ehPu-p6|EW=PHgVi`nU9f;oetJ9q-c1-C~-FQOIi( zx@}U|XVQGWyqQ4qjjH++TK=1~8ec|@m(}yR(rQ?&Fr9Ptae%Jl!3nBaa4YXjvY4HB z%~+ymu-(-&0?XLN*m%)`JT}WCbypzC-o;y%9$(Vz{lCbKS! z@l2r@RAZSupqLfFVmvu1=Hz(>Qp}tXQ8ZzI62d%DF9bXZs^6LdHt0srSt+m~@1lC3 zU^-I_vXNK}0y_u6Dp?LbHwk~oU<#J^#k4=8d`;S~lVarcmoEc?KPj}YrhOdcLn?k| zsMiJX4#55LwBK`u!nFbZpnM;+!&Oe6`rjW`xx{`!xQSzxa|P|+1(|Ju3Z5ZIem1NE zhUX0q*kj^@G2aTSBoDtNykP!Z1Rj#7bIU;zdtCJW z0xkDL{I>xco;e7oed|FXpm=KdIm&OpAT!)&ps6HJ``cqep1za%4}SkWLcT{Rq;t~= zi9G5G%0CD+vzv66#FzfUlKo#mB0KJ5R|9!qr6Wt z^sLZBG741${2f5QJmnu#UUFxEKiL1%0eLz{#Knhx9FPzC^Eqh8L|vRiSm^%^!C%0{>$ek#giAnL*Pwv1ZRQf@1ORaiV!3_Ndd_behw-~ zqwmM`Oe}GQgK~c!=JiE&gc0Z*`;YsHh$|k!%bl$>A}= 404 +"1" +#else +"0" +#endif +"c_function_prototypes\n" +"C_FEATURE:" +#if (__GNUC__ * 100 + __GNUC_MINOR__) >= 404 && defined(__STDC_VERSION__) && __STDC_VERSION__ >= 199901L +"1" +#else +"0" +#endif +"c_restrict\n" +"C_FEATURE:" +#if (__GNUC__ * 100 + __GNUC_MINOR__) >= 406 && defined(__STDC_VERSION__) && __STDC_VERSION__ >= 201000L +"1" +#else +"0" +#endif +"c_static_assert\n" +"C_FEATURE:" +#if (__GNUC__ * 100 + __GNUC_MINOR__) >= 404 && defined(__STDC_VERSION__) && __STDC_VERSION__ >= 199901L +"1" +#else +"0" +#endif +"c_variadic_macros\n" + +}; + +int main(int argc, char** argv) { (void)argv; return features[argc]; } diff --git a/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/build-release/CMakeFiles/feature_tests.cxx b/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/build-release/CMakeFiles/feature_tests.cxx new file mode 100644 index 0000000..b93418c --- /dev/null +++ b/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/build-release/CMakeFiles/feature_tests.cxx @@ -0,0 +1,405 @@ + + const char features[] = {"\n" +"CXX_FEATURE:" +#if (__GNUC__ * 100 + __GNUC_MINOR__) >= 500 && __cplusplus >= 201402L +"1" +#else +"0" +#endif +"cxx_aggregate_default_initializers\n" +"CXX_FEATURE:" +#if (__GNUC__ * 100 + __GNUC_MINOR__) >= 407 && __cplusplus >= 201103L +"1" +#else +"0" +#endif +"cxx_alias_templates\n" +"CXX_FEATURE:" +#if (__GNUC__ * 100 + __GNUC_MINOR__) >= 408 && __cplusplus >= 201103L +"1" +#else +"0" +#endif +"cxx_alignas\n" +"CXX_FEATURE:" +#if (__GNUC__ * 100 + __GNUC_MINOR__) >= 408 && __cplusplus >= 201103L +"1" +#else +"0" +#endif +"cxx_alignof\n" +"CXX_FEATURE:" +#if (__GNUC__ * 100 + __GNUC_MINOR__) >= 408 && __cplusplus >= 201103L +"1" +#else +"0" +#endif +"cxx_attributes\n" +"CXX_FEATURE:" +#if (__GNUC__ * 100 + __GNUC_MINOR__) >= 409 && __cplusplus > 201103L +"1" +#else +"0" +#endif +"cxx_attribute_deprecated\n" +"CXX_FEATURE:" +#if (__GNUC__ * 100 + __GNUC_MINOR__) >= 404 && (__cplusplus >= 201103L || (defined(__GXX_EXPERIMENTAL_CXX0X__) && __GXX_EXPERIMENTAL_CXX0X__)) +"1" +#else +"0" +#endif +"cxx_auto_type\n" +"CXX_FEATURE:" +#if (__GNUC__ * 100 + __GNUC_MINOR__) >= 409 && __cplusplus > 201103L +"1" +#else +"0" +#endif +"cxx_binary_literals\n" +"CXX_FEATURE:" +#if (__GNUC__ * 100 + __GNUC_MINOR__) >= 406 && (__cplusplus >= 201103L || (defined(__GXX_EXPERIMENTAL_CXX0X__) && __GXX_EXPERIMENTAL_CXX0X__)) +"1" +#else +"0" +#endif +"cxx_constexpr\n" +"CXX_FEATURE:" +#if (__GNUC__ * 100 + __GNUC_MINOR__) >= 409 && __cplusplus > 201103L +"1" +#else +"0" +#endif +"cxx_contextual_conversions\n" +"CXX_FEATURE:" +#if (__GNUC__ * 100 + __GNUC_MINOR__) >= 404 && (__cplusplus >= 201103L || (defined(__GXX_EXPERIMENTAL_CXX0X__) && __GXX_EXPERIMENTAL_CXX0X__)) +"1" +#else +"0" +#endif +"cxx_decltype\n" +"CXX_FEATURE:" +#if (__GNUC__ * 100 + __GNUC_MINOR__) >= 409 && __cplusplus > 201103L +"1" +#else +"0" +#endif +"cxx_decltype_auto\n" +"CXX_FEATURE:" +#if ((__GNUC__ * 10000 + __GNUC_MINOR__ * 100 + __GNUC_PATCHLEVEL__) >= 40801) && __cplusplus >= 201103L +"1" +#else +"0" +#endif +"cxx_decltype_incomplete_return_types\n" +"CXX_FEATURE:" +#if (__GNUC__ * 100 + __GNUC_MINOR__) >= 404 && (__cplusplus >= 201103L || (defined(__GXX_EXPERIMENTAL_CXX0X__) && __GXX_EXPERIMENTAL_CXX0X__)) +"1" +#else +"0" +#endif +"cxx_default_function_template_args\n" +"CXX_FEATURE:" +#if (__GNUC__ * 100 + __GNUC_MINOR__) >= 404 && (__cplusplus >= 201103L || (defined(__GXX_EXPERIMENTAL_CXX0X__) && __GXX_EXPERIMENTAL_CXX0X__)) +"1" +#else +"0" +#endif +"cxx_defaulted_functions\n" +"CXX_FEATURE:" +#if (__GNUC__ * 100 + __GNUC_MINOR__) >= 406 && (__cplusplus >= 201103L || (defined(__GXX_EXPERIMENTAL_CXX0X__) && __GXX_EXPERIMENTAL_CXX0X__)) +"1" +#else +"0" +#endif +"cxx_defaulted_move_initializers\n" +"CXX_FEATURE:" +#if (__GNUC__ * 100 + __GNUC_MINOR__) >= 407 && __cplusplus >= 201103L +"1" +#else +"0" +#endif +"cxx_delegating_constructors\n" +"CXX_FEATURE:" +#if (__GNUC__ * 100 + __GNUC_MINOR__) >= 404 && (__cplusplus >= 201103L || (defined(__GXX_EXPERIMENTAL_CXX0X__) && __GXX_EXPERIMENTAL_CXX0X__)) +"1" +#else +"0" +#endif +"cxx_deleted_functions\n" +"CXX_FEATURE:" +#if (__GNUC__ * 100 + __GNUC_MINOR__) >= 409 && __cplusplus > 201103L +"1" +#else +"0" +#endif +"cxx_digit_separators\n" +"CXX_FEATURE:" +#if (__GNUC__ * 100 + __GNUC_MINOR__) >= 406 && (__cplusplus >= 201103L || (defined(__GXX_EXPERIMENTAL_CXX0X__) && __GXX_EXPERIMENTAL_CXX0X__)) +"1" +#else +"0" +#endif +"cxx_enum_forward_declarations\n" +"CXX_FEATURE:" +#if (__GNUC__ * 100 + __GNUC_MINOR__) >= 405 && (__cplusplus >= 201103L || (defined(__GXX_EXPERIMENTAL_CXX0X__) && __GXX_EXPERIMENTAL_CXX0X__)) +"1" +#else +"0" +#endif +"cxx_explicit_conversions\n" +"CXX_FEATURE:" +#if (__GNUC__ * 100 + __GNUC_MINOR__) >= 407 && __cplusplus >= 201103L +"1" +#else +"0" +#endif +"cxx_extended_friend_declarations\n" +"CXX_FEATURE:" +#if (__GNUC__ * 100 + __GNUC_MINOR__) >= 404 && (__cplusplus >= 201103L || (defined(__GXX_EXPERIMENTAL_CXX0X__) && __GXX_EXPERIMENTAL_CXX0X__)) +"1" +#else +"0" +#endif +"cxx_extern_templates\n" +"CXX_FEATURE:" +#if (__GNUC__ * 100 + __GNUC_MINOR__) >= 407 && __cplusplus >= 201103L +"1" +#else +"0" +#endif +"cxx_final\n" +"CXX_FEATURE:" +#if (__GNUC__ * 100 + __GNUC_MINOR__) >= 404 && (__cplusplus >= 201103L || (defined(__GXX_EXPERIMENTAL_CXX0X__) && __GXX_EXPERIMENTAL_CXX0X__)) +"1" +#else +"0" +#endif +"cxx_func_identifier\n" +"CXX_FEATURE:" +#if (__GNUC__ * 100 + __GNUC_MINOR__) >= 404 && (__cplusplus >= 201103L || (defined(__GXX_EXPERIMENTAL_CXX0X__) && __GXX_EXPERIMENTAL_CXX0X__)) +"1" +#else +"0" +#endif +"cxx_generalized_initializers\n" +"CXX_FEATURE:" +#if (__GNUC__ * 100 + __GNUC_MINOR__) >= 409 && __cplusplus > 201103L +"1" +#else +"0" +#endif +"cxx_generic_lambdas\n" +"CXX_FEATURE:" +#if (__GNUC__ * 100 + __GNUC_MINOR__) >= 408 && __cplusplus >= 201103L +"1" +#else +"0" +#endif +"cxx_inheriting_constructors\n" +"CXX_FEATURE:" +#if (__GNUC__ * 100 + __GNUC_MINOR__) >= 404 && (__cplusplus >= 201103L || (defined(__GXX_EXPERIMENTAL_CXX0X__) && __GXX_EXPERIMENTAL_CXX0X__)) +"1" +#else +"0" +#endif +"cxx_inline_namespaces\n" +"CXX_FEATURE:" +#if (__GNUC__ * 100 + __GNUC_MINOR__) >= 405 && (__cplusplus >= 201103L || (defined(__GXX_EXPERIMENTAL_CXX0X__) && __GXX_EXPERIMENTAL_CXX0X__)) +"1" +#else +"0" +#endif +"cxx_lambdas\n" +"CXX_FEATURE:" +#if (__GNUC__ * 100 + __GNUC_MINOR__) >= 409 && __cplusplus > 201103L +"1" +#else +"0" +#endif +"cxx_lambda_init_captures\n" +"CXX_FEATURE:" +#if (__GNUC__ * 100 + __GNUC_MINOR__) >= 405 && (__cplusplus >= 201103L || (defined(__GXX_EXPERIMENTAL_CXX0X__) && __GXX_EXPERIMENTAL_CXX0X__)) +"1" +#else +"0" +#endif +"cxx_local_type_template_args\n" +"CXX_FEATURE:" +#if (__GNUC__ * 100 + __GNUC_MINOR__) >= 404 && (__cplusplus >= 201103L || (defined(__GXX_EXPERIMENTAL_CXX0X__) && __GXX_EXPERIMENTAL_CXX0X__)) +"1" +#else +"0" +#endif +"cxx_long_long_type\n" +"CXX_FEATURE:" +#if (__GNUC__ * 100 + __GNUC_MINOR__) >= 406 && (__cplusplus >= 201103L || (defined(__GXX_EXPERIMENTAL_CXX0X__) && __GXX_EXPERIMENTAL_CXX0X__)) +"1" +#else +"0" +#endif +"cxx_noexcept\n" +"CXX_FEATURE:" +#if (__GNUC__ * 100 + __GNUC_MINOR__) >= 407 && __cplusplus >= 201103L +"1" +#else +"0" +#endif +"cxx_nonstatic_member_init\n" +"CXX_FEATURE:" +#if (__GNUC__ * 100 + __GNUC_MINOR__) >= 406 && (__cplusplus >= 201103L || (defined(__GXX_EXPERIMENTAL_CXX0X__) && __GXX_EXPERIMENTAL_CXX0X__)) +"1" +#else +"0" +#endif +"cxx_nullptr\n" +"CXX_FEATURE:" +#if (__GNUC__ * 100 + __GNUC_MINOR__) >= 407 && __cplusplus >= 201103L +"1" +#else +"0" +#endif +"cxx_override\n" +"CXX_FEATURE:" +#if (__GNUC__ * 100 + __GNUC_MINOR__) >= 406 && (__cplusplus >= 201103L || (defined(__GXX_EXPERIMENTAL_CXX0X__) && __GXX_EXPERIMENTAL_CXX0X__)) +"1" +#else +"0" +#endif +"cxx_range_for\n" +"CXX_FEATURE:" +#if (__GNUC__ * 100 + __GNUC_MINOR__) >= 405 && (__cplusplus >= 201103L || (defined(__GXX_EXPERIMENTAL_CXX0X__) && __GXX_EXPERIMENTAL_CXX0X__)) +"1" +#else +"0" +#endif +"cxx_raw_string_literals\n" +"CXX_FEATURE:" +#if ((__GNUC__ * 10000 + __GNUC_MINOR__ * 100 + __GNUC_PATCHLEVEL__) >= 40801) && __cplusplus >= 201103L +"1" +#else +"0" +#endif +"cxx_reference_qualified_functions\n" +"CXX_FEATURE:" +#if (__GNUC__ * 100 + __GNUC_MINOR__) >= 500 && __cplusplus >= 201402L +"1" +#else +"0" +#endif +"cxx_relaxed_constexpr\n" +"CXX_FEATURE:" +#if (__GNUC__ * 100 + __GNUC_MINOR__) >= 409 && __cplusplus > 201103L +"1" +#else +"0" +#endif +"cxx_return_type_deduction\n" +"CXX_FEATURE:" +#if (__GNUC__ * 100 + __GNUC_MINOR__) >= 404 && (__cplusplus >= 201103L || (defined(__GXX_EXPERIMENTAL_CXX0X__) && __GXX_EXPERIMENTAL_CXX0X__)) +"1" +#else +"0" +#endif +"cxx_right_angle_brackets\n" +"CXX_FEATURE:" +#if (__GNUC__ * 100 + __GNUC_MINOR__) >= 404 && (__cplusplus >= 201103L || (defined(__GXX_EXPERIMENTAL_CXX0X__) && __GXX_EXPERIMENTAL_CXX0X__)) +"1" +#else +"0" +#endif +"cxx_rvalue_references\n" +"CXX_FEATURE:" +#if (__GNUC__ * 100 + __GNUC_MINOR__) >= 404 && (__cplusplus >= 201103L || (defined(__GXX_EXPERIMENTAL_CXX0X__) && __GXX_EXPERIMENTAL_CXX0X__)) +"1" +#else +"0" +#endif +"cxx_sizeof_member\n" +"CXX_FEATURE:" +#if (__GNUC__ * 100 + __GNUC_MINOR__) >= 404 && (__cplusplus >= 201103L || (defined(__GXX_EXPERIMENTAL_CXX0X__) && __GXX_EXPERIMENTAL_CXX0X__)) +"1" +#else +"0" +#endif +"cxx_static_assert\n" +"CXX_FEATURE:" +#if (__GNUC__ * 100 + __GNUC_MINOR__) >= 404 && (__cplusplus >= 201103L || (defined(__GXX_EXPERIMENTAL_CXX0X__) && __GXX_EXPERIMENTAL_CXX0X__)) +"1" +#else +"0" +#endif +"cxx_strong_enums\n" +"CXX_FEATURE:" +#if (__GNUC__ * 100 + __GNUC_MINOR__) >= 404 && __cplusplus +"1" +#else +"0" +#endif +"cxx_template_template_parameters\n" +"CXX_FEATURE:" +#if (__GNUC__ * 100 + __GNUC_MINOR__) >= 408 && __cplusplus >= 201103L +"1" +#else +"0" +#endif +"cxx_thread_local\n" +"CXX_FEATURE:" +#if (__GNUC__ * 100 + __GNUC_MINOR__) >= 404 && (__cplusplus >= 201103L || (defined(__GXX_EXPERIMENTAL_CXX0X__) && __GXX_EXPERIMENTAL_CXX0X__)) +"1" +#else +"0" +#endif +"cxx_trailing_return_types\n" +"CXX_FEATURE:" +#if (__GNUC__ * 100 + __GNUC_MINOR__) >= 404 && (__cplusplus >= 201103L || (defined(__GXX_EXPERIMENTAL_CXX0X__) && __GXX_EXPERIMENTAL_CXX0X__)) +"1" +#else +"0" +#endif +"cxx_unicode_literals\n" +"CXX_FEATURE:" +#if (__GNUC__ * 100 + __GNUC_MINOR__) >= 404 && (__cplusplus >= 201103L || (defined(__GXX_EXPERIMENTAL_CXX0X__) && __GXX_EXPERIMENTAL_CXX0X__)) +"1" +#else +"0" +#endif +"cxx_uniform_initialization\n" +"CXX_FEATURE:" +#if (__GNUC__ * 100 + __GNUC_MINOR__) >= 406 && (__cplusplus >= 201103L || (defined(__GXX_EXPERIMENTAL_CXX0X__) && __GXX_EXPERIMENTAL_CXX0X__)) +"1" +#else +"0" +#endif +"cxx_unrestricted_unions\n" +"CXX_FEATURE:" +#if (__GNUC__ * 100 + __GNUC_MINOR__) >= 407 && __cplusplus >= 201103L +"1" +#else +"0" +#endif +"cxx_user_literals\n" +"CXX_FEATURE:" +#if (__GNUC__ * 100 + __GNUC_MINOR__) >= 500 && __cplusplus >= 201402L +"1" +#else +"0" +#endif +"cxx_variable_templates\n" +"CXX_FEATURE:" +#if (__GNUC__ * 100 + __GNUC_MINOR__) >= 404 && (__cplusplus >= 201103L || (defined(__GXX_EXPERIMENTAL_CXX0X__) && __GXX_EXPERIMENTAL_CXX0X__)) +"1" +#else +"0" +#endif +"cxx_variadic_macros\n" +"CXX_FEATURE:" +#if (__GNUC__ * 100 + __GNUC_MINOR__) >= 404 && (__cplusplus >= 201103L || (defined(__GXX_EXPERIMENTAL_CXX0X__) && __GXX_EXPERIMENTAL_CXX0X__)) +"1" +#else +"0" +#endif +"cxx_variadic_templates\n" + +}; + +int main(int argc, char** argv) { (void)argv; return features[argc]; } diff --git a/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/build-release/CMakeFiles/progress.marks b/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/build-release/CMakeFiles/progress.marks new file mode 100644 index 0000000..2bd5a0a --- /dev/null +++ b/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/build-release/CMakeFiles/progress.marks @@ -0,0 +1 @@ +22 diff --git a/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/build-release/CPackConfig.cmake b/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/build-release/CPackConfig.cmake new file mode 100644 index 0000000..2a16232 --- /dev/null +++ b/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/build-release/CPackConfig.cmake @@ -0,0 +1,108 @@ +# This file will be configured to contain variables for CPack. These variables +# should be set in the CMake list file of the project before CPack module is +# included. The list of available CPACK_xxx variables and their associated +# documentation may be obtained using +# cpack --help-variable-list +# +# Some variables are common to all generators (e.g. CPACK_PACKAGE_NAME) +# and some are specific to a generator +# (e.g. CPACK_NSIS_EXTRA_INSTALL_COMMANDS). The generator specific variables +# usually begin with CPACK__xxxx. + + +SET(CPACK_ARCHIVE_COMPONENT_INSTALL "ON") +SET(CPACK_BINARY_7Z "") +SET(CPACK_BINARY_BUNDLE "") +SET(CPACK_BINARY_CYGWIN "") +SET(CPACK_BINARY_DEB "OFF") +SET(CPACK_BINARY_DRAGNDROP "") +SET(CPACK_BINARY_IFW "OFF") +SET(CPACK_BINARY_NSIS "OFF") +SET(CPACK_BINARY_OSXX11 "") +SET(CPACK_BINARY_PACKAGEMAKER "") +SET(CPACK_BINARY_RPM "OFF") +SET(CPACK_BINARY_STGZ "ON") +SET(CPACK_BINARY_TBZ2 "OFF") +SET(CPACK_BINARY_TGZ "ON") +SET(CPACK_BINARY_TXZ "OFF") +SET(CPACK_BINARY_TZ "ON") +SET(CPACK_BINARY_WIX "") +SET(CPACK_BINARY_ZIP "") +SET(CPACK_CMAKE_GENERATOR "Unix Makefiles") +SET(CPACK_COMPONENTS_ALL "devel;lib") +SET(CPACK_COMPONENT_GROUP_DEVELOPMENT_DESCRIPTION "All of the tools you'll ever need to develop lock-free oriented software with libcds") +SET(CPACK_COMPONENT_GROUP_RUNTIME_DESCRIPTION "Only libcds library for runtime") +SET(CPACK_COMPONENT_UNSPECIFIED_HIDDEN "TRUE") +SET(CPACK_COMPONENT_UNSPECIFIED_REQUIRED "TRUE") +SET(CPACK_COMPONENT_devel_DEPENDS "lib") +SET(CPACK_COMPONENT_devel_DISPLAY_NAME "C++ Headers") +SET(CPACK_COMPONENT_devel_GROUP "Development") +SET(CPACK_COMPONENT_lib_DISPLAY_NAME "Libraries") +SET(CPACK_COMPONENT_lib_GROUP "Runtime") +SET(CPACK_DEBIAN_PACKAGE_CONTROL_EXTRA "/home/ahmad/Downloads/benchmark-fuzzying-tool/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/build/cmake/post_install_script.sh;;/home/ahmad/Downloads/benchmark-fuzzying-tool/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/build/cmake/post_uninstall_script.sh;") +SET(CPACK_DEBIAN_PACKAGE_DEPENDS "boost (>= 1.50)") +SET(CPACK_DEBIAN_PACKAGE_HOMEPAGE "https://github.com/khizmax/libcds") +SET(CPACK_DEB_COMPONENT_INSTALL "ON") +SET(CPACK_GENERATOR "STGZ;TGZ;TZ") +SET(CPACK_INSTALL_CMAKE_PROJECTS "/home/ahmad/Downloads/benchmark-fuzzying-tool/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/build-release;cds;ALL;/") +SET(CPACK_INSTALL_PREFIX "/usr/local") +SET(CPACK_MODULE_PATH "/home/ahmad/Downloads/benchmark-fuzzying-tool/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/build/cmake") +SET(CPACK_NSIS_CONTACT "Max Khizhinsky ") +SET(CPACK_NSIS_DISPLAY_NAME "cds") +SET(CPACK_NSIS_DISPLAY_NAME_SET "TRUE") +SET(CPACK_NSIS_ENABLE_UNINSTALL_BEFORE_INSTALL "ON") +SET(CPACK_NSIS_INSTALLER_ICON_CODE "") +SET(CPACK_NSIS_INSTALLER_MUI_ICON_CODE "") +SET(CPACK_NSIS_INSTALL_ROOT "$PROGRAMFILES") +SET(CPACK_NSIS_MODIFY_PATH "ON") +SET(CPACK_NSIS_PACKAGE_NAME "cds") +SET(CPACK_OUTPUT_CONFIG_FILE "/home/ahmad/Downloads/benchmark-fuzzying-tool/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/build-release/CPackConfig.cmake") +SET(CPACK_PACKAGE_CONTACT "Max Khizhinsky ") +SET(CPACK_PACKAGE_DEFAULT_LOCATION "/") +SET(CPACK_PACKAGE_DESCRIPTION_FILE "/home/ahmad/Downloads/benchmark-fuzzying-tool/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/build/cmake/description.txt") +SET(CPACK_PACKAGE_DESCRIPTION_SUMMARY "Library of concurrent data structures") +SET(CPACK_PACKAGE_FILE_NAME "cds-2.3.2-1") +SET(CPACK_PACKAGE_INSTALL_DIRECTORY "cds") +SET(CPACK_PACKAGE_INSTALL_REGISTRY_KEY "cds") +SET(CPACK_PACKAGE_NAME "cds") +SET(CPACK_PACKAGE_RELEASE "1") +SET(CPACK_PACKAGE_RELOCATABLE "true") +SET(CPACK_PACKAGE_VENDOR "Humanity") +SET(CPACK_PACKAGE_VERSION "2.3.2") +SET(CPACK_PACKAGE_VERSION_MAJOR "0") +SET(CPACK_PACKAGE_VERSION_MINOR "1") +SET(CPACK_PACKAGE_VERSION_PATCH "1") +SET(CPACK_RESOURCE_FILE_LICENSE "/usr/share/cmake-3.5/Templates/CPack.GenericLicense.txt") +SET(CPACK_RESOURCE_FILE_README "/usr/share/cmake-3.5/Templates/CPack.GenericDescription.txt") +SET(CPACK_RESOURCE_FILE_WELCOME "/usr/share/cmake-3.5/Templates/CPack.GenericWelcome.txt") +SET(CPACK_RPM_COMPONENT_INSTALL "ON") +SET(CPACK_RPM_EXCLUDE_FROM_AUTO_FILELIST_ADDITION "/usr/local") +SET(CPACK_RPM_PACKAGE_GROUP "System Environment/Base") +SET(CPACK_RPM_PACKAGE_LICENSE "GPL") +SET(CPACK_RPM_PACKAGE_RELEASE "1") +SET(CPACK_RPM_PACKAGE_REQUIRES "boost >= 1.50") +SET(CPACK_RPM_PACKAGE_URL "https://github.com/khizmax/libcds") +SET(CPACK_RPM_POST_INSTALL_SCRIPT_FILE "/home/ahmad/Downloads/benchmark-fuzzying-tool/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/build/cmake/post_install_script.sh") +SET(CPACK_RPM_POST_UNINSTALL_SCRIPT_FILE "/home/ahmad/Downloads/benchmark-fuzzying-tool/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/build/cmake/post_uninstall_script.sh") +SET(CPACK_RPM_devel_PACKAGE_REQUIRES "boost >= 1.50, cds-lib = 2.3.2") +SET(CPACK_SET_DESTDIR "OFF") +SET(CPACK_SOURCE_7Z "") +SET(CPACK_SOURCE_CYGWIN "") +SET(CPACK_SOURCE_GENERATOR "TBZ2;TGZ;TXZ;TZ") +SET(CPACK_SOURCE_OUTPUT_CONFIG_FILE "/home/ahmad/Downloads/benchmark-fuzzying-tool/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/build-release/CPackSourceConfig.cmake") +SET(CPACK_SOURCE_TBZ2 "ON") +SET(CPACK_SOURCE_TGZ "ON") +SET(CPACK_SOURCE_TXZ "ON") +SET(CPACK_SOURCE_TZ "ON") +SET(CPACK_SOURCE_ZIP "OFF") +SET(CPACK_SYSTEM_NAME "Linux") +SET(CPACK_TOPLEVEL_TAG "Linux") +SET(CPACK_WIX_SIZEOF_VOID_P "8") + +if(NOT CPACK_PROPERTIES_FILE) + set(CPACK_PROPERTIES_FILE "/home/ahmad/Downloads/benchmark-fuzzying-tool/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/build-release/CPackProperties.cmake") +endif() + +if(EXISTS ${CPACK_PROPERTIES_FILE}) + include(${CPACK_PROPERTIES_FILE}) +endif() diff --git a/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/build-release/CPackSourceConfig.cmake b/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/build-release/CPackSourceConfig.cmake new file mode 100644 index 0000000..f3cc8ff --- /dev/null +++ b/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/build-release/CPackSourceConfig.cmake @@ -0,0 +1,115 @@ +# This file will be configured to contain variables for CPack. These variables +# should be set in the CMake list file of the project before CPack module is +# included. The list of available CPACK_xxx variables and their associated +# documentation may be obtained using +# cpack --help-variable-list +# +# Some variables are common to all generators (e.g. CPACK_PACKAGE_NAME) +# and some are specific to a generator +# (e.g. CPACK_NSIS_EXTRA_INSTALL_COMMANDS). The generator specific variables +# usually begin with CPACK__xxxx. + + +SET(CPACK_ARCHIVE_COMPONENT_INSTALL "ON") +SET(CPACK_BINARY_7Z "") +SET(CPACK_BINARY_BUNDLE "") +SET(CPACK_BINARY_CYGWIN "") +SET(CPACK_BINARY_DEB "OFF") +SET(CPACK_BINARY_DRAGNDROP "") +SET(CPACK_BINARY_IFW "OFF") +SET(CPACK_BINARY_NSIS "OFF") +SET(CPACK_BINARY_OSXX11 "") +SET(CPACK_BINARY_PACKAGEMAKER "") +SET(CPACK_BINARY_RPM "OFF") +SET(CPACK_BINARY_STGZ "ON") +SET(CPACK_BINARY_TBZ2 "OFF") +SET(CPACK_BINARY_TGZ "ON") +SET(CPACK_BINARY_TXZ "OFF") +SET(CPACK_BINARY_TZ "ON") +SET(CPACK_BINARY_WIX "") +SET(CPACK_BINARY_ZIP "") +SET(CPACK_CMAKE_GENERATOR "Unix Makefiles") +SET(CPACK_COMPONENTS_ALL "devel;lib") +SET(CPACK_COMPONENT_GROUP_DEVELOPMENT_DESCRIPTION "All of the tools you'll ever need to develop lock-free oriented software with libcds") +SET(CPACK_COMPONENT_GROUP_RUNTIME_DESCRIPTION "Only libcds library for runtime") +SET(CPACK_COMPONENT_UNSPECIFIED_HIDDEN "TRUE") +SET(CPACK_COMPONENT_UNSPECIFIED_REQUIRED "TRUE") +SET(CPACK_COMPONENT_devel_DEPENDS "lib") +SET(CPACK_COMPONENT_devel_DISPLAY_NAME "C++ Headers") +SET(CPACK_COMPONENT_devel_GROUP "Development") +SET(CPACK_COMPONENT_lib_DISPLAY_NAME "Libraries") +SET(CPACK_COMPONENT_lib_GROUP "Runtime") +SET(CPACK_DEBIAN_PACKAGE_CONTROL_EXTRA "/home/ahmad/Downloads/benchmark-fuzzying-tool/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/build/cmake/post_install_script.sh;;/home/ahmad/Downloads/benchmark-fuzzying-tool/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/build/cmake/post_uninstall_script.sh;") +SET(CPACK_DEBIAN_PACKAGE_DEPENDS "boost (>= 1.50)") +SET(CPACK_DEBIAN_PACKAGE_HOMEPAGE "https://github.com/khizmax/libcds") +SET(CPACK_DEB_COMPONENT_INSTALL "ON") +SET(CPACK_GENERATOR "TBZ2;TGZ;TXZ;TZ") +SET(CPACK_IGNORE_FILES "/CVS/;/\\.svn/;/\\.bzr/;/\\.hg/;/\\.git/;\\.swp\$;\\.#;/#") +SET(CPACK_INSTALLED_DIRECTORIES "/home/ahmad/Downloads/benchmark-fuzzying-tool/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2;/") +SET(CPACK_INSTALL_CMAKE_PROJECTS "") +SET(CPACK_INSTALL_PREFIX "/usr/local") +SET(CPACK_MODULE_PATH "/home/ahmad/Downloads/benchmark-fuzzying-tool/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/build/cmake") +SET(CPACK_NSIS_CONTACT "Max Khizhinsky ") +SET(CPACK_NSIS_DISPLAY_NAME "cds") +SET(CPACK_NSIS_DISPLAY_NAME_SET "TRUE") +SET(CPACK_NSIS_ENABLE_UNINSTALL_BEFORE_INSTALL "ON") +SET(CPACK_NSIS_INSTALLER_ICON_CODE "") +SET(CPACK_NSIS_INSTALLER_MUI_ICON_CODE "") +SET(CPACK_NSIS_INSTALL_ROOT "$PROGRAMFILES") +SET(CPACK_NSIS_MODIFY_PATH "ON") +SET(CPACK_NSIS_PACKAGE_NAME "cds") +SET(CPACK_OUTPUT_CONFIG_FILE "/home/ahmad/Downloads/benchmark-fuzzying-tool/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/build-release/CPackConfig.cmake") +SET(CPACK_PACKAGE_CONTACT "Max Khizhinsky ") +SET(CPACK_PACKAGE_DEFAULT_LOCATION "/") +SET(CPACK_PACKAGE_DESCRIPTION_FILE "/home/ahmad/Downloads/benchmark-fuzzying-tool/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/build/cmake/description.txt") +SET(CPACK_PACKAGE_DESCRIPTION_SUMMARY "Library of concurrent data structures") +SET(CPACK_PACKAGE_FILE_NAME "cds-2.3.2-Source") +SET(CPACK_PACKAGE_INSTALL_DIRECTORY "cds") +SET(CPACK_PACKAGE_INSTALL_REGISTRY_KEY "cds") +SET(CPACK_PACKAGE_NAME "cds") +SET(CPACK_PACKAGE_RELEASE "1") +SET(CPACK_PACKAGE_RELOCATABLE "true") +SET(CPACK_PACKAGE_VENDOR "Humanity") +SET(CPACK_PACKAGE_VERSION "2.3.2") +SET(CPACK_PACKAGE_VERSION_MAJOR "0") +SET(CPACK_PACKAGE_VERSION_MINOR "1") +SET(CPACK_PACKAGE_VERSION_PATCH "1") +SET(CPACK_RESOURCE_FILE_LICENSE "/usr/share/cmake-3.5/Templates/CPack.GenericLicense.txt") +SET(CPACK_RESOURCE_FILE_README "/usr/share/cmake-3.5/Templates/CPack.GenericDescription.txt") +SET(CPACK_RESOURCE_FILE_WELCOME "/usr/share/cmake-3.5/Templates/CPack.GenericWelcome.txt") +SET(CPACK_RPM_COMPONENT_INSTALL "ON") +SET(CPACK_RPM_EXCLUDE_FROM_AUTO_FILELIST_ADDITION "/usr/local") +SET(CPACK_RPM_PACKAGE_GROUP "System Environment/Base") +SET(CPACK_RPM_PACKAGE_LICENSE "GPL") +SET(CPACK_RPM_PACKAGE_RELEASE "1") +SET(CPACK_RPM_PACKAGE_REQUIRES "boost >= 1.50") +SET(CPACK_RPM_PACKAGE_URL "https://github.com/khizmax/libcds") +SET(CPACK_RPM_POST_INSTALL_SCRIPT_FILE "/home/ahmad/Downloads/benchmark-fuzzying-tool/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/build/cmake/post_install_script.sh") +SET(CPACK_RPM_POST_UNINSTALL_SCRIPT_FILE "/home/ahmad/Downloads/benchmark-fuzzying-tool/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/build/cmake/post_uninstall_script.sh") +SET(CPACK_RPM_devel_PACKAGE_REQUIRES "boost >= 1.50, cds-lib = 2.3.2") +SET(CPACK_SET_DESTDIR "OFF") +SET(CPACK_SOURCE_7Z "") +SET(CPACK_SOURCE_CYGWIN "") +SET(CPACK_SOURCE_GENERATOR "TBZ2;TGZ;TXZ;TZ") +SET(CPACK_SOURCE_IGNORE_FILES "/CVS/;/\\.svn/;/\\.bzr/;/\\.hg/;/\\.git/;\\.swp\$;\\.#;/#") +SET(CPACK_SOURCE_INSTALLED_DIRECTORIES "/home/ahmad/Downloads/benchmark-fuzzying-tool/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2;/") +SET(CPACK_SOURCE_OUTPUT_CONFIG_FILE "/home/ahmad/Downloads/benchmark-fuzzying-tool/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/build-release/CPackSourceConfig.cmake") +SET(CPACK_SOURCE_PACKAGE_FILE_NAME "cds-2.3.2-Source") +SET(CPACK_SOURCE_TBZ2 "ON") +SET(CPACK_SOURCE_TGZ "ON") +SET(CPACK_SOURCE_TOPLEVEL_TAG "Linux-Source") +SET(CPACK_SOURCE_TXZ "ON") +SET(CPACK_SOURCE_TZ "ON") +SET(CPACK_SOURCE_ZIP "OFF") +SET(CPACK_STRIP_FILES "") +SET(CPACK_SYSTEM_NAME "Linux") +SET(CPACK_TOPLEVEL_TAG "Linux-Source") +SET(CPACK_WIX_SIZEOF_VOID_P "8") + +if(NOT CPACK_PROPERTIES_FILE) + set(CPACK_PROPERTIES_FILE "/home/ahmad/Downloads/benchmark-fuzzying-tool/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/build-release/CPackProperties.cmake") +endif() + +if(EXISTS ${CPACK_PROPERTIES_FILE}) + include(${CPACK_PROPERTIES_FILE}) +endif() diff --git a/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/build-release/Makefile b/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/build-release/Makefile new file mode 100644 index 0000000..b940562 --- /dev/null +++ b/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/build-release/Makefile @@ -0,0 +1,564 @@ +# CMAKE generated file: DO NOT EDIT! +# Generated by "Unix Makefiles" Generator, CMake Version 3.5 + +# Default target executed when no arguments are given to make. +default_target: all + +.PHONY : default_target + +# Allow only one "make -f Makefile2" at a time, but pass parallelism. +.NOTPARALLEL: + + +#============================================================================= +# Special targets provided by cmake. + +# Disable implicit rules so canonical targets will work. +.SUFFIXES: + + +# Remove some rules from gmake that .SUFFIXES does not remove. +SUFFIXES = + +.SUFFIXES: .hpux_make_needs_suffix_list + + +# Suppress display of executed commands. +$(VERBOSE).SILENT: + + +# A target that is always out of date. +cmake_force: + +.PHONY : cmake_force + +#============================================================================= +# Set environment variables for the build. + +# The shell in which to execute make rules. +SHELL = /bin/sh + +# The CMake executable. +CMAKE_COMMAND = /usr/bin/cmake + +# The command to remove a file. +RM = /usr/bin/cmake -E remove -f + +# Escaping for special characters. +EQUALS = = + +# The top-level source directory on which CMake was run. +CMAKE_SOURCE_DIR = /home/ahmad/Downloads/benchmark-fuzzying-tool/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2 + +# The top-level build directory on which CMake was run. +CMAKE_BINARY_DIR = /home/ahmad/Downloads/benchmark-fuzzying-tool/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/build-release + +#============================================================================= +# Targets provided globally by CMake. + +# Special rule for the target edit_cache +edit_cache: + @$(CMAKE_COMMAND) -E cmake_echo_color --switch=$(COLOR) --cyan "No interactive CMake dialog available..." + /usr/bin/cmake -E echo No\ interactive\ CMake\ dialog\ available. +.PHONY : edit_cache + +# Special rule for the target edit_cache +edit_cache/fast: edit_cache + +.PHONY : edit_cache/fast + +# Special rule for the target package_source +package_source: + @$(CMAKE_COMMAND) -E cmake_echo_color --switch=$(COLOR) --cyan "Run CPack packaging tool for source..." + /usr/bin/cpack --config ./CPackSourceConfig.cmake /home/ahmad/Downloads/benchmark-fuzzying-tool/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/build-release/CPackSourceConfig.cmake +.PHONY : package_source + +# Special rule for the target package_source +package_source/fast: package_source + +.PHONY : package_source/fast + +# Special rule for the target rebuild_cache +rebuild_cache: + @$(CMAKE_COMMAND) -E cmake_echo_color --switch=$(COLOR) --cyan "Running CMake to regenerate build system..." + /usr/bin/cmake -H$(CMAKE_SOURCE_DIR) -B$(CMAKE_BINARY_DIR) +.PHONY : rebuild_cache + +# Special rule for the target rebuild_cache +rebuild_cache/fast: rebuild_cache + +.PHONY : rebuild_cache/fast + +# Special rule for the target list_install_components +list_install_components: + @$(CMAKE_COMMAND) -E cmake_echo_color --switch=$(COLOR) --cyan "Available install components are: \"devel\" \"lib\"" +.PHONY : list_install_components + +# Special rule for the target list_install_components +list_install_components/fast: list_install_components + +.PHONY : list_install_components/fast + +# Special rule for the target install +install: preinstall + @$(CMAKE_COMMAND) -E cmake_echo_color --switch=$(COLOR) --cyan "Install the project..." + /usr/bin/cmake -P cmake_install.cmake +.PHONY : install + +# Special rule for the target install +install/fast: preinstall/fast + @$(CMAKE_COMMAND) -E cmake_echo_color --switch=$(COLOR) --cyan "Install the project..." + /usr/bin/cmake -P cmake_install.cmake +.PHONY : install/fast + +# Special rule for the target package +package: preinstall + @$(CMAKE_COMMAND) -E cmake_echo_color --switch=$(COLOR) --cyan "Run CPack packaging tool..." + /usr/bin/cpack --config ./CPackConfig.cmake +.PHONY : package + +# Special rule for the target package +package/fast: package + +.PHONY : package/fast + +# Special rule for the target install/strip +install/strip: preinstall + @$(CMAKE_COMMAND) -E cmake_echo_color --switch=$(COLOR) --cyan "Installing the project stripped..." + /usr/bin/cmake -DCMAKE_INSTALL_DO_STRIP=1 -P cmake_install.cmake +.PHONY : install/strip + +# Special rule for the target install/strip +install/strip/fast: install/strip + +.PHONY : install/strip/fast + +# Special rule for the target install/local +install/local: preinstall + @$(CMAKE_COMMAND) -E cmake_echo_color --switch=$(COLOR) --cyan "Installing only the local directory..." + /usr/bin/cmake -DCMAKE_INSTALL_LOCAL_ONLY=1 -P cmake_install.cmake +.PHONY : install/local + +# Special rule for the target install/local +install/local/fast: install/local + +.PHONY : install/local/fast + +# The main all target +all: cmake_check_build_system + $(CMAKE_COMMAND) -E cmake_progress_start /home/ahmad/Downloads/benchmark-fuzzying-tool/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/build-release/CMakeFiles /home/ahmad/Downloads/benchmark-fuzzying-tool/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/build-release/CMakeFiles/progress.marks + $(MAKE) -f CMakeFiles/Makefile2 all + $(CMAKE_COMMAND) -E cmake_progress_start /home/ahmad/Downloads/benchmark-fuzzying-tool/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/build-release/CMakeFiles 0 +.PHONY : all + +# The main clean target +clean: + $(MAKE) -f CMakeFiles/Makefile2 clean +.PHONY : clean + +# The main clean target +clean/fast: clean + +.PHONY : clean/fast + +# Prepare targets for installation. +preinstall: all + $(MAKE) -f CMakeFiles/Makefile2 preinstall +.PHONY : preinstall + +# Prepare targets for installation. +preinstall/fast: + $(MAKE) -f CMakeFiles/Makefile2 preinstall +.PHONY : preinstall/fast + +# clear depends +depend: + $(CMAKE_COMMAND) -H$(CMAKE_SOURCE_DIR) -B$(CMAKE_BINARY_DIR) --check-build-system CMakeFiles/Makefile.cmake 1 +.PHONY : depend + +#============================================================================= +# Target rules for targets named cds + +# Build rule for target. +cds: cmake_check_build_system + $(MAKE) -f CMakeFiles/Makefile2 cds +.PHONY : cds + +# fast build rule for target. +cds/fast: + $(MAKE) -f CMakeFiles/cds.dir/build.make CMakeFiles/cds.dir/build +.PHONY : cds/fast + +#============================================================================= +# Target rules for targets named cds-s + +# Build rule for target. +cds-s: cmake_check_build_system + $(MAKE) -f CMakeFiles/Makefile2 cds-s +.PHONY : cds-s + +# fast build rule for target. +cds-s/fast: + $(MAKE) -f CMakeFiles/cds-s.dir/build.make CMakeFiles/cds-s.dir/build +.PHONY : cds-s/fast + +src/dhp.o: src/dhp.cpp.o + +.PHONY : src/dhp.o + +# target to build an object file +src/dhp.cpp.o: + $(MAKE) -f CMakeFiles/cds.dir/build.make CMakeFiles/cds.dir/src/dhp.cpp.o + $(MAKE) -f CMakeFiles/cds-s.dir/build.make CMakeFiles/cds-s.dir/src/dhp.cpp.o +.PHONY : src/dhp.cpp.o + +src/dhp.i: src/dhp.cpp.i + +.PHONY : src/dhp.i + +# target to preprocess a source file +src/dhp.cpp.i: + $(MAKE) -f CMakeFiles/cds.dir/build.make CMakeFiles/cds.dir/src/dhp.cpp.i + $(MAKE) -f CMakeFiles/cds-s.dir/build.make CMakeFiles/cds-s.dir/src/dhp.cpp.i +.PHONY : src/dhp.cpp.i + +src/dhp.s: src/dhp.cpp.s + +.PHONY : src/dhp.s + +# target to generate assembly for a file +src/dhp.cpp.s: + $(MAKE) -f CMakeFiles/cds.dir/build.make CMakeFiles/cds.dir/src/dhp.cpp.s + $(MAKE) -f CMakeFiles/cds-s.dir/build.make CMakeFiles/cds-s.dir/src/dhp.cpp.s +.PHONY : src/dhp.cpp.s + +src/dllmain.o: src/dllmain.cpp.o + +.PHONY : src/dllmain.o + +# target to build an object file +src/dllmain.cpp.o: + $(MAKE) -f CMakeFiles/cds.dir/build.make CMakeFiles/cds.dir/src/dllmain.cpp.o + $(MAKE) -f CMakeFiles/cds-s.dir/build.make CMakeFiles/cds-s.dir/src/dllmain.cpp.o +.PHONY : src/dllmain.cpp.o + +src/dllmain.i: src/dllmain.cpp.i + +.PHONY : src/dllmain.i + +# target to preprocess a source file +src/dllmain.cpp.i: + $(MAKE) -f CMakeFiles/cds.dir/build.make CMakeFiles/cds.dir/src/dllmain.cpp.i + $(MAKE) -f CMakeFiles/cds-s.dir/build.make CMakeFiles/cds-s.dir/src/dllmain.cpp.i +.PHONY : src/dllmain.cpp.i + +src/dllmain.s: src/dllmain.cpp.s + +.PHONY : src/dllmain.s + +# target to generate assembly for a file +src/dllmain.cpp.s: + $(MAKE) -f CMakeFiles/cds.dir/build.make CMakeFiles/cds.dir/src/dllmain.cpp.s + $(MAKE) -f CMakeFiles/cds-s.dir/build.make CMakeFiles/cds-s.dir/src/dllmain.cpp.s +.PHONY : src/dllmain.cpp.s + +src/hp.o: src/hp.cpp.o + +.PHONY : src/hp.o + +# target to build an object file +src/hp.cpp.o: + $(MAKE) -f CMakeFiles/cds.dir/build.make CMakeFiles/cds.dir/src/hp.cpp.o + $(MAKE) -f CMakeFiles/cds-s.dir/build.make CMakeFiles/cds-s.dir/src/hp.cpp.o +.PHONY : src/hp.cpp.o + +src/hp.i: src/hp.cpp.i + +.PHONY : src/hp.i + +# target to preprocess a source file +src/hp.cpp.i: + $(MAKE) -f CMakeFiles/cds.dir/build.make CMakeFiles/cds.dir/src/hp.cpp.i + $(MAKE) -f CMakeFiles/cds-s.dir/build.make CMakeFiles/cds-s.dir/src/hp.cpp.i +.PHONY : src/hp.cpp.i + +src/hp.s: src/hp.cpp.s + +.PHONY : src/hp.s + +# target to generate assembly for a file +src/hp.cpp.s: + $(MAKE) -f CMakeFiles/cds.dir/build.make CMakeFiles/cds.dir/src/hp.cpp.s + $(MAKE) -f CMakeFiles/cds-s.dir/build.make CMakeFiles/cds-s.dir/src/hp.cpp.s +.PHONY : src/hp.cpp.s + +src/init.o: src/init.cpp.o + +.PHONY : src/init.o + +# target to build an object file +src/init.cpp.o: + $(MAKE) -f CMakeFiles/cds.dir/build.make CMakeFiles/cds.dir/src/init.cpp.o + $(MAKE) -f CMakeFiles/cds-s.dir/build.make CMakeFiles/cds-s.dir/src/init.cpp.o +.PHONY : src/init.cpp.o + +src/init.i: src/init.cpp.i + +.PHONY : src/init.i + +# target to preprocess a source file +src/init.cpp.i: + $(MAKE) -f CMakeFiles/cds.dir/build.make CMakeFiles/cds.dir/src/init.cpp.i + $(MAKE) -f CMakeFiles/cds-s.dir/build.make CMakeFiles/cds-s.dir/src/init.cpp.i +.PHONY : src/init.cpp.i + +src/init.s: src/init.cpp.s + +.PHONY : src/init.s + +# target to generate assembly for a file +src/init.cpp.s: + $(MAKE) -f CMakeFiles/cds.dir/build.make CMakeFiles/cds.dir/src/init.cpp.s + $(MAKE) -f CMakeFiles/cds-s.dir/build.make CMakeFiles/cds-s.dir/src/init.cpp.s +.PHONY : src/init.cpp.s + +src/thread_data.o: src/thread_data.cpp.o + +.PHONY : src/thread_data.o + +# target to build an object file +src/thread_data.cpp.o: + $(MAKE) -f CMakeFiles/cds.dir/build.make CMakeFiles/cds.dir/src/thread_data.cpp.o + $(MAKE) -f CMakeFiles/cds-s.dir/build.make CMakeFiles/cds-s.dir/src/thread_data.cpp.o +.PHONY : src/thread_data.cpp.o + +src/thread_data.i: src/thread_data.cpp.i + +.PHONY : src/thread_data.i + +# target to preprocess a source file +src/thread_data.cpp.i: + $(MAKE) -f CMakeFiles/cds.dir/build.make CMakeFiles/cds.dir/src/thread_data.cpp.i + $(MAKE) -f CMakeFiles/cds-s.dir/build.make CMakeFiles/cds-s.dir/src/thread_data.cpp.i +.PHONY : src/thread_data.cpp.i + +src/thread_data.s: src/thread_data.cpp.s + +.PHONY : src/thread_data.s + +# target to generate assembly for a file +src/thread_data.cpp.s: + $(MAKE) -f CMakeFiles/cds.dir/build.make CMakeFiles/cds.dir/src/thread_data.cpp.s + $(MAKE) -f CMakeFiles/cds-s.dir/build.make CMakeFiles/cds-s.dir/src/thread_data.cpp.s +.PHONY : src/thread_data.cpp.s + +src/topology_hpux.o: src/topology_hpux.cpp.o + +.PHONY : src/topology_hpux.o + +# target to build an object file +src/topology_hpux.cpp.o: + $(MAKE) -f CMakeFiles/cds.dir/build.make CMakeFiles/cds.dir/src/topology_hpux.cpp.o + $(MAKE) -f CMakeFiles/cds-s.dir/build.make CMakeFiles/cds-s.dir/src/topology_hpux.cpp.o +.PHONY : src/topology_hpux.cpp.o + +src/topology_hpux.i: src/topology_hpux.cpp.i + +.PHONY : src/topology_hpux.i + +# target to preprocess a source file +src/topology_hpux.cpp.i: + $(MAKE) -f CMakeFiles/cds.dir/build.make CMakeFiles/cds.dir/src/topology_hpux.cpp.i + $(MAKE) -f CMakeFiles/cds-s.dir/build.make CMakeFiles/cds-s.dir/src/topology_hpux.cpp.i +.PHONY : src/topology_hpux.cpp.i + +src/topology_hpux.s: src/topology_hpux.cpp.s + +.PHONY : src/topology_hpux.s + +# target to generate assembly for a file +src/topology_hpux.cpp.s: + $(MAKE) -f CMakeFiles/cds.dir/build.make CMakeFiles/cds.dir/src/topology_hpux.cpp.s + $(MAKE) -f CMakeFiles/cds-s.dir/build.make CMakeFiles/cds-s.dir/src/topology_hpux.cpp.s +.PHONY : src/topology_hpux.cpp.s + +src/topology_linux.o: src/topology_linux.cpp.o + +.PHONY : src/topology_linux.o + +# target to build an object file +src/topology_linux.cpp.o: + $(MAKE) -f CMakeFiles/cds.dir/build.make CMakeFiles/cds.dir/src/topology_linux.cpp.o + $(MAKE) -f CMakeFiles/cds-s.dir/build.make CMakeFiles/cds-s.dir/src/topology_linux.cpp.o +.PHONY : src/topology_linux.cpp.o + +src/topology_linux.i: src/topology_linux.cpp.i + +.PHONY : src/topology_linux.i + +# target to preprocess a source file +src/topology_linux.cpp.i: + $(MAKE) -f CMakeFiles/cds.dir/build.make CMakeFiles/cds.dir/src/topology_linux.cpp.i + $(MAKE) -f CMakeFiles/cds-s.dir/build.make CMakeFiles/cds-s.dir/src/topology_linux.cpp.i +.PHONY : src/topology_linux.cpp.i + +src/topology_linux.s: src/topology_linux.cpp.s + +.PHONY : src/topology_linux.s + +# target to generate assembly for a file +src/topology_linux.cpp.s: + $(MAKE) -f CMakeFiles/cds.dir/build.make CMakeFiles/cds.dir/src/topology_linux.cpp.s + $(MAKE) -f CMakeFiles/cds-s.dir/build.make CMakeFiles/cds-s.dir/src/topology_linux.cpp.s +.PHONY : src/topology_linux.cpp.s + +src/topology_osx.o: src/topology_osx.cpp.o + +.PHONY : src/topology_osx.o + +# target to build an object file +src/topology_osx.cpp.o: + $(MAKE) -f CMakeFiles/cds.dir/build.make CMakeFiles/cds.dir/src/topology_osx.cpp.o + $(MAKE) -f CMakeFiles/cds-s.dir/build.make CMakeFiles/cds-s.dir/src/topology_osx.cpp.o +.PHONY : src/topology_osx.cpp.o + +src/topology_osx.i: src/topology_osx.cpp.i + +.PHONY : src/topology_osx.i + +# target to preprocess a source file +src/topology_osx.cpp.i: + $(MAKE) -f CMakeFiles/cds.dir/build.make CMakeFiles/cds.dir/src/topology_osx.cpp.i + $(MAKE) -f CMakeFiles/cds-s.dir/build.make CMakeFiles/cds-s.dir/src/topology_osx.cpp.i +.PHONY : src/topology_osx.cpp.i + +src/topology_osx.s: src/topology_osx.cpp.s + +.PHONY : src/topology_osx.s + +# target to generate assembly for a file +src/topology_osx.cpp.s: + $(MAKE) -f CMakeFiles/cds.dir/build.make CMakeFiles/cds.dir/src/topology_osx.cpp.s + $(MAKE) -f CMakeFiles/cds-s.dir/build.make CMakeFiles/cds-s.dir/src/topology_osx.cpp.s +.PHONY : src/topology_osx.cpp.s + +src/urcu_gp.o: src/urcu_gp.cpp.o + +.PHONY : src/urcu_gp.o + +# target to build an object file +src/urcu_gp.cpp.o: + $(MAKE) -f CMakeFiles/cds.dir/build.make CMakeFiles/cds.dir/src/urcu_gp.cpp.o + $(MAKE) -f CMakeFiles/cds-s.dir/build.make CMakeFiles/cds-s.dir/src/urcu_gp.cpp.o +.PHONY : src/urcu_gp.cpp.o + +src/urcu_gp.i: src/urcu_gp.cpp.i + +.PHONY : src/urcu_gp.i + +# target to preprocess a source file +src/urcu_gp.cpp.i: + $(MAKE) -f CMakeFiles/cds.dir/build.make CMakeFiles/cds.dir/src/urcu_gp.cpp.i + $(MAKE) -f CMakeFiles/cds-s.dir/build.make CMakeFiles/cds-s.dir/src/urcu_gp.cpp.i +.PHONY : src/urcu_gp.cpp.i + +src/urcu_gp.s: src/urcu_gp.cpp.s + +.PHONY : src/urcu_gp.s + +# target to generate assembly for a file +src/urcu_gp.cpp.s: + $(MAKE) -f CMakeFiles/cds.dir/build.make CMakeFiles/cds.dir/src/urcu_gp.cpp.s + $(MAKE) -f CMakeFiles/cds-s.dir/build.make CMakeFiles/cds-s.dir/src/urcu_gp.cpp.s +.PHONY : src/urcu_gp.cpp.s + +src/urcu_sh.o: src/urcu_sh.cpp.o + +.PHONY : src/urcu_sh.o + +# target to build an object file +src/urcu_sh.cpp.o: + $(MAKE) -f CMakeFiles/cds.dir/build.make CMakeFiles/cds.dir/src/urcu_sh.cpp.o + $(MAKE) -f CMakeFiles/cds-s.dir/build.make CMakeFiles/cds-s.dir/src/urcu_sh.cpp.o +.PHONY : src/urcu_sh.cpp.o + +src/urcu_sh.i: src/urcu_sh.cpp.i + +.PHONY : src/urcu_sh.i + +# target to preprocess a source file +src/urcu_sh.cpp.i: + $(MAKE) -f CMakeFiles/cds.dir/build.make CMakeFiles/cds.dir/src/urcu_sh.cpp.i + $(MAKE) -f CMakeFiles/cds-s.dir/build.make CMakeFiles/cds-s.dir/src/urcu_sh.cpp.i +.PHONY : src/urcu_sh.cpp.i + +src/urcu_sh.s: src/urcu_sh.cpp.s + +.PHONY : src/urcu_sh.s + +# target to generate assembly for a file +src/urcu_sh.cpp.s: + $(MAKE) -f CMakeFiles/cds.dir/build.make CMakeFiles/cds.dir/src/urcu_sh.cpp.s + $(MAKE) -f CMakeFiles/cds-s.dir/build.make CMakeFiles/cds-s.dir/src/urcu_sh.cpp.s +.PHONY : src/urcu_sh.cpp.s + +# Help Target +help: + @echo "The following are some of the valid targets for this Makefile:" + @echo "... all (the default if no target is provided)" + @echo "... clean" + @echo "... depend" + @echo "... edit_cache" + @echo "... package_source" + @echo "... rebuild_cache" + @echo "... list_install_components" + @echo "... install" + @echo "... package" + @echo "... cds" + @echo "... cds-s" + @echo "... install/strip" + @echo "... install/local" + @echo "... src/dhp.o" + @echo "... src/dhp.i" + @echo "... src/dhp.s" + @echo "... src/dllmain.o" + @echo "... src/dllmain.i" + @echo "... src/dllmain.s" + @echo "... src/hp.o" + @echo "... src/hp.i" + @echo "... src/hp.s" + @echo "... src/init.o" + @echo "... src/init.i" + @echo "... src/init.s" + @echo "... src/thread_data.o" + @echo "... src/thread_data.i" + @echo "... src/thread_data.s" + @echo "... src/topology_hpux.o" + @echo "... src/topology_hpux.i" + @echo "... src/topology_hpux.s" + @echo "... src/topology_linux.o" + @echo "... src/topology_linux.i" + @echo "... src/topology_linux.s" + @echo "... src/topology_osx.o" + @echo "... src/topology_osx.i" + @echo "... src/topology_osx.s" + @echo "... src/urcu_gp.o" + @echo "... src/urcu_gp.i" + @echo "... src/urcu_gp.s" + @echo "... src/urcu_sh.o" + @echo "... src/urcu_sh.i" + @echo "... src/urcu_sh.s" +.PHONY : help + + + +#============================================================================= +# Special targets to cleanup operation of make. + +# Special rule to run CMake to check the build system integrity. +# No rule that depends on this can have commands that come from listfiles +# because they might be regenerated. +cmake_check_build_system: + $(CMAKE_COMMAND) -H$(CMAKE_SOURCE_DIR) -B$(CMAKE_BINARY_DIR) --check-build-system CMakeFiles/Makefile.cmake 0 +.PHONY : cmake_check_build_system + diff --git a/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/build-release/arch.c b/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/build-release/arch.c new file mode 100644 index 0000000..dd042e4 --- /dev/null +++ b/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/build-release/arch.c @@ -0,0 +1,46 @@ + +#if defined(__arm__) || defined(__TARGET_ARCH_ARM) + #if defined(__ARM_ARCH_7__) \ + || defined(__ARM_ARCH_7A__) \ + || defined(__ARM_ARCH_7R__) \ + || defined(__ARM_ARCH_7M__) \ + || (defined(__TARGET_ARCH_ARM) && __TARGET_ARCH_ARM-0 >= 7) + #error cmake_ARCH armv7 + #elif defined(__ARM_ARCH_6__) \ + || defined(__ARM_ARCH_6J__) \ + || defined(__ARM_ARCH_6T2__) \ + || defined(__ARM_ARCH_6Z__) \ + || defined(__ARM_ARCH_6K__) \ + || defined(__ARM_ARCH_6ZK__) \ + || defined(__ARM_ARCH_6M__) \ + || (defined(__TARGET_ARCH_ARM) && __TARGET_ARCH_ARM-0 >= 6) + #error cmake_ARCH armv6 + #elif defined(__ARM_ARCH_5TEJ__) \ + || (defined(__TARGET_ARCH_ARM) && __TARGET_ARCH_ARM-0 >= 5) + #error cmake_ARCH armv5 + #else + #error cmake_ARCH arm + #endif +#elif defined(__aarch64__) + #if defined(__ARM_ARCH) && __ARM_ARCH == 8 + #error cmake_ARCH armv8 + #else + #error cmake_ARCH arm64 + #endif +#elif defined(__i386) || defined(__i386__) || defined(_M_IX86) + #error cmake_ARCH i386 +#elif defined(__x86_64) || defined(__x86_64__) || defined(__amd64) || defined(_M_X64) + #error cmake_ARCH x86_64 +#elif defined(__ia64) || defined(__ia64__) || defined(_M_IA64) + #error cmake_ARCH ia64 +#elif defined(__ppc__) || defined(__ppc) || defined(__powerpc__) \ + || defined(_ARCH_COM) || defined(_ARCH_PWR) || defined(_ARCH_PPC) \ + || defined(_M_MPPC) || defined(_M_PPC) + #if defined(__ppc64__) || defined(__powerpc64__) || defined(__64BIT__) + #error cmake_ARCH ppc64 + #else + #error cmake_ARCH ppc + #endif +#endif + +#error cmake_ARCH unknown diff --git a/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/build-release/bin/libcds-s.a b/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/build-release/bin/libcds-s.a new file mode 100644 index 0000000000000000000000000000000000000000..f3302491a61d0179b4b0ebb25e9911bcb49f0dea GIT binary patch literal 67134 zcmeHw3w&Hvwf9UWX+sN{00klsnL>~hq-4^x%}~Tio3v+O(uO{u3OG%ZX_HHu#AK!q zE{{SI;5ZJ}*S+|-S3dRrM7=6}`YHOUq!b#&2Q4ZHi1Z3bLnsD8tRnRL|MyvYX3v?K zHU+ur{oMIY_CD*Zz4qE`uf6u;oU``K^V|B{J1>}aZXh?*8kbJm{2vI^*VfiMC~Ky9 zJf2HDp304r`<+jZ(i3II-@we2lei?N|1$TUS@x5$|r> z66;U)B)T@lV~PH@uEfB6rhAi#<_tlNJ}BP9`{5Lji(i;La59Y>?2jk9`r{q3 zzC?e0cYI)=dHF>v>S8PEW4cX%UDw$coZHiz03%&&k*=%a9Zhvg4_02Hl%)t#Mg&Ff zHwClGR1BroDJOCm<;)q_&>yU8hdvH;wZ{h9+j^Q?m#zrLf}PsBFFH}v*ziJ@2YwznmE`VJ-rF;f=ufsM znm26NK%PQmFP7NS7cWpqs0-L_J?-&Aw1RbAJ$>D6xH66LK7V@T85Iz zZ&cMD4ejV=33PqcL7XPYSdwfT1VbHhNT_#94kDVbE*6|4dHdt-z5Us~Z)BdYS|TTF zp)%&QB@%7zor33XarGLX=uR=SU(6u!8wBF$e<9<4N7v< zt+lzGSINH?nw01oNOZNMk5lvN3CK}8tFD{Esbnqw=2v6Xc+Ln~M4atawDvj%fICS$1zVjY@HuB|Faq{%T>nq7r6 zM_bso{{FTtYNX6*JR&KL%jxSYGxn}vL3!LlGFdC+>X(W_E<%C2c?9gRC_bE-4E0ehOsdJs#Q!BG~gxY5;kg-xoy z8$7g4ozrx1qnfpCB$_wN%JlR4WPf{7o7uYHdW=n&v3AE3y*-$fIg^CQ(iOF_y4v;e zo_If|&CWo9sa>LNeRK0X%uSasTJ>fKu1&667w25@%@GWSG=~o3iAm54)(v!KYpg07 z3=MRx$E-V+%T};nz@2S99o_N%=B`#EcJ-|5jU`$$lL41JP3pPwEK-bHoW`o4c4cM> zP|pc-)`}KE;^OI0NDx4*r?tPgeG!&D*-^VN#o&A(N3j}PH4yJ$rlq}^cCTBuVs5mn zC%L&H(c9PC-MfB^i^+ThMUG^!Jm~GuWgs70k8qxI-3xqUeP1ZqP^Y15rpZK)^Yq#6 zeSNcg&z+{(jdk`VH|GJnS*1K^??48|mXQ0Zm}0vP^Xi>VhPpWoncar#nxl(Lyk2ZN zIKk_=z>~ewP>*LmMmIyUZ7^xTBtxWFn4zS?AmkYCDm2%+Tgs|we_=uwX>WXfx6n@ zyxMuSA>^GQ`Mp;y_jor~dQUoXYWX(sM7k%-z2hV%tX9%TNn9*qwMsoN9w>C*S5QJ8 zj}L!^f)A7m1z%PgncLj{@dCeGAUfuy&of1!xsfk}?S>zb3y>?HXKEgNHXz3s_0lrC zSl|K4q2)G!>!XeO>KcAl;F@n8|5D((y*2!J2>NJ4KAO&Az>hcTtMNArTwgZ`Utbh> zSg!T`M+I&PT;qFmr_s|JJpe_6uzx)fbg4y{DCSxOjq{IvqyI7cM(l44*hW=asP(T} zL9=k(vjYa4d330k1Y!IICO~m9I zUB>8n4zaKkd6Y4iG$Y-gHRvQHD8sM@=vWNDw!cRL&T@1=trEO<2wclsz@9!yXgm$q z`StZXks!>S|7JmO^XtwZ1YV)~>-<6{6A{-8VO*&4wIaz)oqwxb>&RXHV%#82Ehmkq z^Uu=&iFNrpUIw^Mo2cJ6FO0Msw?p!qx=r*E{zC|>|Ji?T{$sdRsQqgTv_D5DonN=> z(SW(zj-{=U{5`xdf}4L{yLngWg1BE|j$_mDbC*w^3YEWKh?cvEyUXwFGiPFfz#->i z!TLGQBS?52s$>VuxhyT>DCp)*Z21ln_cR4xs_^s`~^;x0OO-ESUpY^F+t$S78 z%v{h)FFeq=a@7jUzR%je@7B=q97DEcUNTh{-voIqtVdrjps!}J1#l$#C^gExY`(D;h6{Xgtf((IdPU zrg1Y>616>+y=(ktuw9 z!hg$+Xen#>p@{wGi2ZZ_(8;_^$0{xR_t9C;L{e8*mq!Nod9CfQ`K;8^{g%DgLS-Fs zM$$8$w^Ai&2>;;iAZLZ{_uudpm3YuU_+3gjHKRIWA6aeLm+X(E&PVze7Pv#pwxq26 zk#y(&i2Xt&^vC{sEA%HT-MD{a5dSAoKMRr=+*@v?TFY^3(}Wco+jvUEZrvZXkFZh~ zJJbe8%E!M+%+T1Tr=s?Ami^nk$D-okGCFIIQ%C>W_3@lp%I0fpx+ z`!SG@+RyI2;Wc0l?@K-t8GNBUk}9o^+Al>?713Ew1z*8^AgUYo;&N;IpL}~)5Pk4f z|8+lO(>7kw_&%liY$Ry!;PW*dR;u)j8cG$6R0T(@&>s@9Xu20o^jgGzxh3>`qBW9A zdaMOWPt@K!5V!1mBB^W2ThbdUty#~ZkW#eowWm7$3PP>4Qsq!wuVq_ETIGnVlFx#U zWrK=+O?mPTD}|yutT|4>$sTL)-WrEJFt-)Fpk8-3TA3qs|4Dq(d zO8Kn8aqze(V5JsSA+E9PUs;e3?+%S5e}!sQM+WaB3fQ2G?L}2%=Rn2^MOlHY{x)8s zB!UW~OTPxW1oGs99v`u{vnNHo+atUG7!CczpYEhS+Y{r{gl`KPnC?SAIeDJ7{WoRN zboX4;;~Gfs>bVmG$AD*V#C{^O{h88;cb~eU48z`N=!cfw06BxrzVfT@%5;D9Jf}yn z`&*%(CR(DQr!44vlzMaBKkIT?Te}%@0Tc;nlqq8W@IDG{@ZLa13v_$!Ktl;N#r)+b zK?(PiS?Q*-2`dE~u?j@Pie{E`@Nn%1t@I*DWM^di3y{dp$nMwCuUhQiTlR0P?N60T z9i!+o(j?O%jZ-{h7o!Qbv3)J?xMlm0V$-0d7o%g{V{Ly9-G7(0dw)ylWq-OF$W(B9 z=lJI-lVc(*;3bFnYh#bSjt;i{>C#LUmZrPU@Ql3xuqt(%)|bXhX`T*b)V{$=Eh)GB zUt0qGNL1Hwu|M>F4EAWa`^c_kKY2i{8?E#VXcvUqlA3R&E=N54ihpPynq6s7Bz5Bm z(V0I0U}W&Ra*u!LII0C&peDF4n(piyKhYX~JmHJlr(1(N&^hk&Mi%sz`v+e@)``Z! zW)Io`hHp=4#J9LSnhLZGKb?5g3O$y*-#Qz5PaUvVRtJ=E7~FrrKlB>Lb(Cq1qLIr% z#6NsC26GGMv`W~vN6B@im4<->BLNIPWFVjv#0u^356w_HR#sQiUZM-5+a`RlQWngf zHFyu~;*R8A2c3{vJD9GtwvWR=UH*$oYY?{ElbFsz%cW3-kXl(?#VT6fdss^)JpT>H zp&_Xssc&+Wr4WZl&`MF}j)q?K4}Fqiw4eCSyH)(F=&aYGHZ85HwEue}*5Jguty`^G z_gLPC_NM;`t9Jcs9Ax|-`j7~_9HIuT@x=1)Q6`G@mQ>smT@di!a3N|7gMvY!d4DAI zv;L#UJ_$(Hp4j(WHbvFC-%4ZX0LJr>77Pd}CcjQ%HPKXOP1NqLu8gMos#-$7_YZ9# zRr|^4*{p@Vuf={ovi*RM<4Y<5Rk8QC43AtpFKUmduD%i_V@==sz%p%4y>a7R7OL;Pkt)P)Y%=? zl`Y<9qIPGMs zW2d5Nz#16W_GQ*CEB$^*IDBBN2AB~H3rAZt(l&-RbYV4&MF#J!9J@kwSxj~A-ine` zQ$*8!5XPUSMP^4A)c9{;+eI+!q2KS1hMwD0ImWF!-Cr$td;Fa3Q4<;5S>6(QJ^6p4 zp_lwa>)HEL_0~t!j{X6Ov zEiFdh;gQ6#80l3DTC*OBdjFtIiDDisjBQr+d+uO49Fr$G0%PBVRHODDmaB|+t$(Z@n$6R#3)VvKVP47Fqb%+^2yW?OFuG;FK_EZ@xx4quol7+uhQ(t&bV zfdQx*dlp2UzKS_T@@L~uz$S)0YIGxw9WoBs@8XqZ&sD%})}XrfB&J|Q#(bHEv(f(X zsudCYXRyB*Bkyh?{^0Yv??Y^tVP0}KtMdI0-t#xV2DA5V5=0t#HkUWqodFDRmDXrj ztqNg*{{U5-7YV%_@h|#W^YA|O!m95`mfg6hu||0X&eW<2;!tzc#w)NhYwXQh!>`=M zlAG)$0n6Vs0eg0r!$cEo=caONbcs`-eQ6~MeHatjW313_DQV&T&BG&V_76s(2+Lsd zm!RaSEID_+1oM2R+@O_i7d?y6K15R8)m1HN{If!PA??H|S^JV&a9{;`?v6?e=Hx{O z9piN*_33IBjG0^G&Y|l2k@Si&#fVWBv41)Kd9%*Ce{ki&X$maXb(rWqTWY00vkRH6 z)HPM=(yPZWj!b*hog}*NR)wUhjiG01G}TgNrO&Uz6hCT*Ve%hl>-cZ_CNf0q-$l0n zx)c^HaSp_rfa$v@_ygUq2Df{yQCL3|Il7RNhc4WM%Kaqy^hN1t zAXVVj5ke85Ac37*DK%O;XvU7cOt0JKn7Qoyj`DgH^1`gsnC_MbM-;x5I+hE$;fE57 zF##SettJsX3Apo1BS<7??!3V%0v3XH9W&SZGirF}5|mnnQY~c#KSB;cCHWmp9~{Ck z@AmB^VqwCX{(wRN?l#Nzf{|*LacpuLOwag#sD4I5<*r<{>^Gg5H z{5tu@f1=wT;{(?CYDo!coQgJ1o-p=NFRQk8w`JlnE42cgFETmT z`r+bV8BkOJ0(~~(ySh5B25Koh?r87uKHUud;p>#u$qXX8e`qq}cRT#5ig0awvg#x` zs?0?)jPph-bq)p7fqtQoc!3enKeUyJ{Hh$w`JSsD(hk;Pv_o!3uSdIJ?TkT+rNSVp z_8k8{`fKt2?1%*uha#PN-L}j^35}_0iHar82oD`d&P5SUe#fN9Xf;~K;u#!8;aSW!Ag= zob%rD8vZ*L^kE9#=}7huRc9W4$UoGI`&PP_3&64nEKeo2c(7-NDPHQ@iKusvmEz#) zALLvM+gbj>Cy^i`Hi)>xG^3guT@lB8q0eJeCUwaKccv&Wdt%+TXlltsG=1Gfi~U+O zw0%=p?c;=Y4?t(Pf)*N;`#LP8|K(_C|Hd;c`_c(3wQRrIL9;K#z8Q3nbXy&|gQJz> zU$X2+F}KD#Zts!kc2Rr3+BS3Q{3@to8JM^#YQH{qI(ud4j{}#e9X)6`7{^+cdwb*> zyAT-NA~sf8WPEI~gPuVi*v*|ZZXiZruVJDunXv4|*u*=sdhA4n3zC~5 za3f*+{!%PWu-SGWb}w3B{V|bh@jg3VO8G`o{uXQ!MC?Z``?;vi&9%tjZ!7oi-gn>t z7*9SPNpXj1Uu7iKg4xr)7OaA>1&3Wt*7{lQ#;Br@o10|ZlyUma;A^K|wukuy@^K>#gQtta0cv3Gn1pq& zTCA%He#@*sVA#O0gk@e7J1XUo?SC$f_`i2=&a!<97BrFT4>$_&uOQiy{yw+z)qE70 zp$dD~jzd#IuO_aDq^3qvmtvo>7mcX~U~DXmpXk_UwLeJN+sy%_p&QLLR{Clz&FvSI zlrdX)oRvUV8O5;tDiEN+N{gDSlAnvh9<^{D@(oBBb~)k7^t$o@4i(Q)%ZxoA5wP}M9E7ib949i;VXQ3)B!w0TCFKYjZTXtMka_@h_3N;J_ z#;$kDv+Uj0?mu#sD>oN3Y#Ola2RM6t$_l-dD6>KrB;FsvG&>qEQ?fo?tN$sE7c$Ci3hl!RKUK46`Jb*$jA;g zGikk>*3nUa&Z98$jH)YDk{YNmWwu7u<0^GyTW&G3kd4o6V@jU7+d&=wiBVj5m>F!iH(<{D|v832$N^n5I@Kq=NW5JWH4`mQHpx; zAgCn5{bPG!Ii#AqZ5;{y(w{yB_z^yj*fIWZj;v2k)n`>EvO>|aN#1IyA^ zp8+vLAY)Is`>L3;iy9;aB(b@edpF7QPdw-+Q5nsTIbKk(e+vn+osX z|F()s!d5GY$HQmL44lMutCaIx+0$52Iasoyq8zYy;&-#$n}SZ|SzJ;1!IH*`svCWc z6@kIhlHJoPs(`<+qP&UE;T!N%^F74>N&M!D%3DgTimJ44VMXABrAsPmZY*o8s2!Z* zOM5G7Kx1J=)k2kDz32BbvM|i&KhV9AWexh8DgqAe8KjAzUUfvibP4i*ki6Vj+E`ID zSk_e0T5|I=MHx9A%GPvG7!WZ)1h`iuy2?#tgt)dnXle;C;Gw&gK zuWF5|w-l!;lkL;*Dt3ZwH48wP4&zwL1j_jk%elqZToFi@F082eAnSkQl*WpN!KqCZ zeZKLMX%!798U;c`8F4I`?)!=tcYy4=(+EMc9Rq3$%HuJ0pk82pmW{ynumtuw0+wfF z2APV?mRHo4ydTxwQ4wg|38DQ6?eXJ^a+Wb9c`s*P1kx)PIo^b{u493As4dx;+YHpD zCdvDD#42yLjh9ped`F?iRHw#@^2X^bzf_4h`yNvbU{wP9I?{%YEI}<+7}-@@d_&6n z1ZfHWUQlD^EF;n{nANRppc0jY5b z=_&}6@HNus)%XMW_Y^!^FEzFRehl=0_?6NJ_3QH=7I;8(S&bJ`mJe?;yTb{{??t;% zP7FG361W4+!g=lUoS?xuu}_W0IUrA;qBGqS5U}opy!5Fz!Q_S!`PAEC1^~(1K!^3+ zMny$X{mMy#&dHL>MyI3xB?jEoX`koqSwth{%W!D`k@R_H<^tr(=Q+Iy{EQ;-GmF4$ z^5Dx<`?0g<2cO5(*6jV_-vkt+1ECfOhx4rfvlsd%pdb97X7p1%&eQJF{<^~#iom~8 z1pX66pKlgA;Yf*J1bn8^E?Tn3!nPGE_nm;BX7p=KXPJVFvVr#7gnoY!I$MgsKT-t# zRlsK&dQ~O#`Hf2z{~ z1j=H8?-4qBF1Vox{9^(Siz3`4<$kFMd_>^81fO$5hP?uxILf(3byPS546F7Mldk50 zxDy4uPc@J9rnb3ulyi{SsO;I9!usyQL>M+Lq|3Q_Yrz`Yof$&i2!ixny0bbXZ?1obo`W$e4P&8-1Z7h_B z4p^K#j)w%v#1EJAzw0+X+7Hf;#d>=CH{d%3$&lQX5sQ_*Y946=1=|+nN=i&p#-c7Oi=Jt4BqN}$DU#!NYS=^3P=<;CM z%2?|~?OhI?x`x@xOGsaTZ{O_swMNYc;{7BRWAzW^e8CV*uB;E{cwt2oYh)BZFH}%L z{yQ8$m|WCr$re&=eDUkJKF~ekpvdsiWA>VZ;^H$}mpcr8MvMNEuZM?{$eU;mDp*z_ z=AuQkEf`3yjYVSOjL8vAz9$K@ce-`2X>RtrW8}|#x_3~oKpkz$)bzB$c)+hm8$UFM zuCHzDh$$Brv7Y#*Sa-ZU6*ypD;};O;Il+IC&@VKC{zISB`YGN)dy*@_RcF~>)=}}S3GRG$#^Y_ zt>3&k1`k*Rz4-dQE3qZEvDO1~zM*$xTxn-rZA_ZPX@r;(80?EDwyI}SS5HT5d3+#= z58?6@M?2XN{aUto@rq`6V`*F%ZFaeX%JaN|%9H=Ybi5(COurkG@QU;g$47ZB7cA)4 zL;RV=2IAf8@>@Gt$j3(^O;@VpIE6Ee%s(6?{T(?-f}6G=%mTbHwXN;i7^I09=!z*1 zY0CE)rd8{dkBF6#JPn}cT$Bf`-_!~3Q5!uA)hwHP8O}43KjQ%v!*jB9C!Fv3X!ucB zA2ATV06z`CM%IOH{Fe;4i9c5+>cnSJLe}$q5qKNcI}93MmpdKv{6g>xFhAGu3y{$G zeMR8A1rXeGY6EW4`KrKmy)>QQ zyWqOM&&fKL__ap4zcb*boKKYXD)E;Y_yYz!YQVo{z)d}TOyKTz@yYs@^i4S&Wx$sj z^p6*~o6ac)zFF=W0;fF9-OjTN{EH3xa|O=ksl!jNRnIf<*+sORts?j>2L2oaf0Y3b z8}MNRzRZCCP#Bi z;QItlI_7wEu56sT=`;zP_-4CIk%9=Hji2U6+b+V-HQ-39z-=hr(*fIEDrk5>uEo45 z-1&fM`X|bL(%{~ehMy{M@?S4$M%9ugJS1sGQ%@}X7{m-Ke8R&7B1ki4;CkOr%SY4U zn8CodW6@Vtz z76rxo1}=OpjO_-#`P8zS1tMh1l}b0b-~oY0U2y$8;bs?nkKljW1=r63{zKs8pZw@2 z8NYDh>*oOf<-(surV+}egYhk z>*oM_U2y#zpjz5Z)6vfX&JnmSccD|9XQdlo#;t8GxPCtHc^6ziAGphaYn#j>N{w|2 zdD72;SGwT(`OjSj{AA%#MU(S|q+A*mbBHuMyFW0QEj`DZ(qZPw_+>KPFu_ez=F(uI z(9W3Jz;iq^(Yw>6uoa^A`gEuq+}fKweFwW&IUI!Rso?COXEt50J3ILBP`S1{4sr%U zgUbsQ_~9b?uizm$r<@^id4gIgZ| ziJL0FiG=htDZjx5ceD8g+%F`*Jt9A?2W%&Qk75(ho=U+2?+PfLDx`4f^~uZuLk z=3mqKvgDuQVxXFar9ja6^ewI*2y^GJmHhhs6pioJpE<~1Nd9|8{@PAZ|5+u?zlMKT zm#+(uxb+k#vD5{3m;bFI<$JozH0;|rNgetrEwk@9a3 zI~cm2ET8Z6XnGp{IPz2eC5X*%hm_AeW^mVk8W=B>f0yLfn;-~X#x}hTF@0IdvB)^VyetlhEr2O(m2V;#Ovi#HW)A@Dz?Z{7s#*YF5zF>IgO|Db9^!bWzC@De#!4Zv;P;n8%}QNd>Zp6$?s}@k0tr+ck_P( z@)v5qPLbbc+-IQtYVgzfwftz)1qG>W*wf^sZ*|>t^UrHH@61_Q?d0Dj1-=uPoPWB@ z=bXY-gvz*w84&Jp4dCwbak@c{)6YZt{G2zhp&`I9d7qynS2(=ze^@2M@c4PA(gBXI z+-KtB2=-0#$8m=HkK7vOHM;_1HYwu(ehmNc@%axQpa1ajY54fm9tHTR>e*A3b2H^V z@GGjbjKCujzDRBSE^SC+M#dxDCvX`Vy_Zjj;Ksml?*V^0_(FF99VK}0rJKD->UcPZ zqYDe~-UR#?!6lx;h=leh;A01$y^n3PhJOHuJM<|SRQ?19x0gpkyWmrBaI`#voT&wH zZ0m!^MmXujxARlPlZbtG)V{JjYPVPFZ+~~(4VL8Vaei-a-#bSCv-FtI9DGH6VBQ?u zhcBe*G@(?!h{ku&LC4n!zTJnzR(#TH{}BF`Mq6MgP}beI6_waIh@uP<> zwZv;A?v%Js;>}Kc{Z_^Z*68&&J1Hi&Ws|pNlecA)w`Y@gWRrJglOx&W{n_NMZ1NY` zz7>>@=_ld%V9NMi$~Va1>%NWW!95T>MobYWib8ma z5MCmLmk8k{Agp%SKoH-+gRAeW+HWU4I=Fznrhf~VYvp0ff(#r*ryJge;48|+=D1k& z5A8>{X@NT+_@96iT=-T=EKvO|5_%*tF9J{1k@RQS#Pk_g#eN2V%i<;#pd|beF&q3O zB5RT!RuU&$>GxJf7Qn9#otSFRjFAuk$=^m(YK;KDhyI~=q6YLtR2vx_3D8l58mU%C zQ_CqdHebZbfVF|k^^wY! zG{%Qam0EMF^rowFN5ttoEvfHT?*e;tZn+1s*W_mq4f|J-?!i3cnGDiJQ6zO9#r2j; zDl^xjg40{>SUKIlLK(`@ThMRd7CumTDx)g#zrfJ=oQs6r15T(o!JA9B*5*Ou`@MG4)bnnu|aWU7u3%%H~d%>$RCQLGS znvRsdk@$`PFDDSro&w)TRh)UDWBkw=0vV9w*Cp#}NW2SuhX^WT#!2V@bW(d+WhF5B z8V>MEfz-e8-;6&!{%hK2zpv zKK2v%2CDMk^Z`I=WZy}rv1bNq}IUd-ygLxX@}?fv53qZXe_65BB$3B5%|Irt55%3_6~)SI3Z*C z>7x#l#qo!=FT@smPx3Y^bt$?OJkn?VcFs`auuN2xi#Pc06#f;ztRDDuWB#3cb3P3%4E09b17OT1nxnW^rsP9Pf%VFl)4#_Z)X{(nTF# zlY$Nd@fRYfm}76qa-+%^|4r7%n{&Pky$09jtShxETPt-omZHkz=e^+C;(+S37i>1` zfx}BCqW(MogG%WiPUHX1;XU}TeWJo|ot0`qd&56mdL=!??M|EppK#hYmDAvs9p0aW z_pU}9x1pSr5AK;DIy~@SQwyCJ@5^=z{MFI53}>Tk5IwKj>mP1+tgX{7+5TvkSgEhD z^RN!eCApRQXEW1J2y3)C;2<>DNL(wi^!(;9uSbV(W`tmk4wHEXBr`qV z5pmR3VpndL)2fbYzJu8r+Gxnh1mC}CA-G^Z+dtIHH5&y2gz;xAXAX!#EKzTzHwRc> zRuSG4n``k4C`Y=Z&0z?Z@S3q3^{UK4r7Ob8sA|R$-a$tv7}y=T%BsgM(+B!n?ju^d zpOVr=1eID!j=Wm6`js7@PeKP=b z+uc;%iWGFAsd}|etWk+ntQHF;G}PtQ4I9y*=RZNPwqI9Qx2k@e*@B9sOMe-)JE~jx zZ#9R9HBp>^qfY){P8|9{ZG(3BhrXw-doXK%A3E|gaP;^DJiI&WeRvheu^QNi2`An` z#aeAG04e{_GR(uNT38e)pOmYiO={tQ1Rcabk-WDhwGPXbrxR`TO_{zN3xf22R&y(d z#24!Ir>1Y^(*HW<)rX|t!J|x`PJR|Aay?Is#PM*NYLwnU0qx-Cm``|!QpVe{88ZJRlm8~o?jlPBhnp#o~ zEep=C_TO+X_>SNNjz~I;6F45h868Ie-LO~pW1@kPQ=ifC2+xFQ!0a5yD%_X6i|fAy zk<>hWsss;%s2JlLmm;r%XF=?QjM?0JXWco<0?KLl*TTa0Lp_p)ZmqeJAros@;FYjr(!Kf5S6?(~&68 z3~9kxSwl|)5D7K-Z+e^wRgDH^w`p>5#1)P~f$K&3OZ|YeY`)(=q%0(qkLPL(4{(0% z?S@xGE*7m2s^=g25sRU9q=Q8*4+5&JGF5NR+`m91;)?>|u7CKCG_61c5CX8$tCiB% z&|U3S5qsy@?{Hs@n^<349GF}8Hqz-tpKyxib%N)B)X} zpFH*~7}E2*)TdOpqZ?wCkTVbV00n0rFp@J5INC(hUzS0KZEl=-a35}3X`F*b-AXO0 zRVx2FfvJIj724?^I+FUF-l{~Mw%`@`{$$nIgTRn}g(m@U`t2Y3y2_NSu~H$+zQ9Ut zfg|CaFd5hG$CeuCCu^eV4=GEAV?MBH0B^8Z;w{Gw%!i1WtQr5`+3{N)SeHFKC%Z^+ z=E7Jgm`5DI{;^(TzhQIos5>~6#u*C)!l^Utj%r38xd00Um<9LdRbye*lT|%=SQ0D~ zI{fey1wFr7Wyv{*VeBT5$FnLJKP3qNFn1WnA5xg^_EoOfUv}mlIL`vxc-ZAN9x!1c zfO8fOxn9ejz2P7FvKr#hKk*Pu7HrMI$0IcPuO!~*v_~}6BkRnl{dm-V)OiwQtS_;S zjHEbHf(njdv0qXci4(By#1V!Nk9&Q&4_>jcyNHpg98HX&2NN}CVPzyEST_W~hJWaX zz=ddVv|!C`Qem2ei$Xb0&JF8Nzh3Ql1j#mP$d9H;(Wnf=UaBJqswmKZC5l6(Rj^H3w_v|9DV z1ikwliX(F#$E-}AgPzB+gZ$w9O&na{AOF!|^WhXa{!z1i&@WmQE2E1oiNg{r^~Qar z-H0iFEX)=MB(9QJsQ}>R&b4~$8{O7fpWj8`4*r{5_>1_6M^WO-y~%1l3ep*j=Ex?P>+*FfQ4ZbzSc)%cM(R1I?}D?zs2m1^IR!wKDm#W;$Xj$pMTXU@@cJrh)mPl(%1(#^7CDJI)GrlBt7dT$O1>1VC1~gfP zO#)o-S5DB){#Q!M@N(*|5+ub%KjW&8!gK#L&##UsyLGzfi_^;fWxD6(X=R_B&U>ZD zLZ1*Hy{iO?Z~DsM!1t!oGJG-y;PE$<+=8+AK7J5(eOcL!WuDKJIi&sc`2SgN*%!R< zn(BZ~So8SQ9n(FJ(ktT7k@Jvz@yN1QjwGFvpY%S9Me6HiWj~wZNlyX715?U=H^sA$ zN!<6JkM8K{W?20S4VppqdTF-)QnJ^Z|d2@I)0> zMhEdb2y1ivy<^#|0^cuWpvtdSslxu=6LN2|{@yb_mexJUWKX$(MSuONeVRlr?bk^TEJ;gtf@sSORSmO6eK zU0Ta1&&Pe%H+K{Ke8U}XF|XMkPXIK3Bs3pGTDKE;`ROz|f(NV~zX`#42YAxunr9K9 zL49lCm^}c;|7d0$Nzgc*`D&yNpD&V+9{)$pY;X!+jeJ@*_aL3+Dc5&A<)OcW{xB@9 z<}1(dc~%}5R_yZnK6(&&a6+*kc9mf_>zwsfp7nj6Jwt&0=+7RXM_N+8z! z<@uRkQnAMOB~am^2gbn(ltZWF=XP(797$-Qf2y0%Go(F{%-Id_>%S2VvI9L} zr#e%DxW5qGA>x{SG5` zZ{Elc^_W5XQa>32B7BYXIqgfGI6ip0b%%QdU;8lFBM7IV&v_v?Ejb;x3S86Ca9$sd zFL(!Ed+5;AocM5jtY4$}ndS)y39W0qEaYSTtH3YGc<#v6IiLCxQwSN6Prbcw03><% zzSK|4Cjy8sGuq|e_NlLJb7A~e$oYaG7tX8VKG{VXTv>dOM4{jVr9$Cwe4`%#Y4&$_ zm;%y;;71mLp98q*OI^=}=mTPArq!MAT38lsWg+_MB6RLl@N5qKt>Mp$(D`K%_@5M= ztUhzz!f-0=WtO@F_`GYwz=RZ?^@U-MrtLg(Hh z@Sg&H4D8GV>dnA85yQ(x;K#wn6aNm}X9$bxohR@KG2NQZnj&zlc^o=w!RPU)xfJN# zq2VG(J%9QE;DzM&B;dzGvvkn%e_7~^2tZrpv%w$v*WWU!c^5KYDDYMe5D0p{)m8+) zS>U$`KIdW#9|F7(pPwm$|4o61g+b207@ib(K!|b9#c&GbMgCi52;iKGp&9T(B?orqt&qjgo5>@BiiQ%(F=s3RPr+E4}X+==;BE*jioz}N2u%(ysKRI3x z(*b)uy~M*;vvy72x-n+W3IgmQ*`_GE{H{c>iOQaUCzA==5%@YE{tQk z_lEu+#4~ory|E%Rr$3(P>W_;<^ZIpgVnavig*ruR5r4Hp%yqJ$~ zMGT&p9AEi7l%bHAa}k0$?&0$(%vJ8v^O5FfZo(tI|kqZT-W%p zoQ`KQ74kanB3wVv-qzC$Hi9_p#H@qi=iV*I&}99QXBe5M&Vew=PrBlBI^v18_D+#n zVbLnjYrsJlIT&7pYJp^XqIqqB1`gK3n^vN0Ako!6(7c=isV5+(HSjp8F6cU^LUK@N zR<1BbRbcxaC~ui}aA$(HQ08eRh|j_gul=ZSsjN!~XHu3Oo_kzyjsLj7xo*(@K9Th7 zG4O-9*7zS4f||aD|C<3n&%i%f=J^_5^JDrHH}OA;`7wjW&(JsECjP0Ir<(Yh{$~uh ziT{5DPX7c=PFD4Qhk= z!41E?2s|Pi1aABt2AurpavwZOkr@X1#0!->lay2EJLZPYc{#um5G>oAtWW zfSdKYs|cO%3EW+;dknZ)uLlhJX1#u9;G6Y&)WA3E^{l|%^?KgGH|zCh18&w!`<{2x zDVKc+cfGVvdcw_m9Vhs1Jv`BXoA#{NT$Fwsa7dYi$w$~>Oe6zi77r48=Uor5__WGUyH{0vJB6J=Q zIQf~4pAHWiaI?K0G3c9m_)7zBw%2bAI%a*x4Sch{e-ya8zONhjW_>G!DCKO{_jrN3 z>6|2RcYOl}+^lbnLEo(Jydw1VSpnq7)Wfhr=R(}m`qOQ|ml^Pt$KnD(>yIACm-0cG zz)d~8+<-UYzNUYJ0XN&_CIfz!f&XEFQ$E5PQXbF082Dy;eO};hIe*c>H{0vm2Hb3~ zks@^N6S%v*9x&i$dp%^(H{0u}BJ}qf_-1=e81MzCzn15T$0>0re@^CnX_3I$UiAik ziveG1z%MC6=L!SgY_GKfcel$w8TfMz`kMsqZkMYKe6w9XV8G3G`EU_B{~~aAyL`%k zo9*&BgTC1=cNU@lO#|O-m+u>Jvt8?s02R-Qjl zZgmF!y9~J5UY|1HW_#7j0|U}A%Uxi=R~Yn93g7~P_$Hk{1qjWDA0^L2-0gmhz}@X0 z5V*VD&oJ=KcAsm&&31oR5jqP6?r!%b2Hb4-Wd?n--QQ#2o8!*=4180cy9Ms9*ZU28 zvtHL5aI;=^5jwXB++DAa8gR2-pEBs1^}5Y~oBDI7z}@_R)4(_N{QCxc0rW!KrN;z* z0$}EN^ooIR>eYcF_(z>aZFBT(C1Op##(LT>7H}K7Nu?@J{E~5tAl;@ZMH{0b^1AdVq|B}<$ zD*1Mx+<=?oj^BWjzCJ5ynE^NXd7l9{`PnUS%9C@?0DcVmIcW*fHTY>gIiEKX2%m+Y zh7-o%#gF%NkZqw1JRmoG61(+8&opr!eg?I;B| zTjly(7u=F- z)`daS39GC8GnBN9G(C+^onW}ki86F**vKwaU} z3yBHWeTpE>GvU-}9X3ls(`l9K+gf4S(eZ!l=z>bl%J#IT)D073>X z4bJy3O}Ktfslx@=?&sKqJ`noY~7ryS#x4ZE5`%8TPk3rM#aV|Z7 zcH!&ymn>;GU9NtA>0*IvI=0Zc*@eGrrgP)NF8nVFzSbj6U%&74OBWsezEipJT#0$~ z`%dQbCc<}}?i5gK;A@+p?)(MLhRQ#+=%9}W)r07q1BNDnUhJGEK6=f20NGUZpr(VN z8wWArq^3ih%ap@%;deWVSFs4b`*0}ZI4Z^WB(upWNlIdeb#PKa;WP`m0w*U;5X^X*qk@$t^sy8gBeI56ukb9U05=p08o zT_fjzSg!QgQG;uCUFn9IxLE#`l3$O<%+LLW$>#5n{M!C9Kj#IL&EH)hKj#UP&7UZc zpSIeaKL8kqHoZPTQiZJ&KP+}fE;I8JeSH!Vq#4!#iAUb;ByN@caMqt?XnAPcbv9ru zU(7X9PFP*-#9ICuuJh~bcOX%y{MFp7L8x^iwdP@7onMz96qvjGty2DG$*-e;PDso( zK|%RReiynLOHEj_&QBw61YN!XQb`w@=Ndp&`K}u}|B)^NZh5kPh2(d00r?#TT(|tb zP^A11PO|<_7RkR&^6T{l&{S~q{}`Z!_`gf|ws)2!rmy+^5{q!xt4s2KS0hR+>|~&i zN`BfFGt{2xTwP@Xk#^VrXUJcu{;jK=jO9`w>rY*D*Z+CkuK?U_Kkkt7HNHbFn_pi) zTcrHempB+&&Mcp{R@2k){m4%i-SQui@@4zlA%_3%`tuvjLizVdeqD|xpv%|Srz3wM z`Sl6^mLQUUZoTXLn(pc%P5zlNEdpb2xIJrrOD z1dZ=pXUucJxs4E%I{EkDk^+`7-4<+5(izI+%8p zJh;n00PE0`o|8C?vxn|5Di~J0 zgt5m0i}eP56ByOWNwRLmzpZuG+{t?Q-w?VKN;oL=Z6W6iM;oo9+wOe=*Vj7!n!vS< z(r}N_Ncu(CdcyPx#&yB5s|PENp7wZzABoh}u8;S```h%Jws>ag5Dl&$9T z0(YI5b))2WM{e9=wsO7L2d)Lo;Lgu$!<1xDH%n?lV%B>qe!BizE=6sn6r`Vl2${UP zs%|*YnK`TNkhXGu!(rRXyNt#SD@MXNn{66AtUX`u_?p^FC_L^d*Lzh=ldlQ_`l2$~NUqfo$Xlit_Mnuu*A)WFec% z+dfY*8_ZVE55@f_xO=j54&E_F%d0wr1{ey_?!3_KwkCLQU+35Pbez-10EY2neqB!_RnYklHD*PC?XD9E{JWx(|Idxfxf)?^zHXDOZob_0 ze-H2qwO@k>RJRk`k9l=|4KFrkwn~022Vp0#u7!9f?k>sDAQk358OKL=`NhV}auLX( z#>{igK@uwn^U_H>|MmK!J~&TMCV#f6I=ocs6IQV^X7a2yIe(-w$2DL%eaJ3>JK9z+ z#!t(>6poM=KoHj8yHs||el1b9mq`tg%c$d$v1NE);sSLaUXm-}PP-)?hbP{r;6mI# z#2p!H)N#>|lMr|qMSrD#_3e% z@;SN}V8fP*xC(wK z`K`NGqi9FI_C2FWLz7H71T*t`<{hgM}w|^=sD#N*@rVJGF_Bm%XZW>4!hj~*(l<{Be$43F{{4Og;N@Pf*4Wm>& zmsJB=*2mt$iogd;8!KvVEK`2}nkvG+&2UOq3lRL08Sekc&XDeUe$Q6TJzCEAvo8Gh2S460>7&WoO{xR=p0uBzOD%TTSeeg zF}4+=e^e3p+W}Ydkuh261$bXn1fT2OLi9fXc%l6wxL^gGvj`_-G|l)xn~PFCThGq; zgS9KwXYNh-%Do<*8WK6*W5b~VzjP1j{i0ZAGYDU@%gx{FX2~M11>OKbr+Fs1KlaAgRI&H=Y=ZxUn0n11*4}w# zY+YMdw-!VZWzc>*^86+Q5Ap$jMkk7V<6pQetY~9Bt^K|2i+Yni+3)j@Z$}9`ov=(KKB50yUaD< zCjhSLpKrj~E*gKe0jK;me4_!UoHhJo2AuNL@Bq>b;CYXE2?FTOP&s9y-}c5p=x{ zckkg=$=~Ua$^3Wo&uh0#iLBod?US1%cQ|_w0ompqa}jr!e{L|8eV93ky@$};I>Dd( z-os|2&@d|pt@;1q*`_Da+t=IOyM9ZovoE<>%{~ui7V~yv->TSGQD{+E(2z`Dm-yjkV3V#W1n!(Z2(g5Bb)1LJuKgh zd!!Q}5<*6wGd8cgt4EBUc09Li#oQ=?4I0N-Zo}=SvlOhW?QHAs*woe^kG1#qv?u%f z<2~(Lj5T>4l^~ZxTn5Dl273E*7f^M1*xDPT!*=E=Dfxdh&)OP<9_jEGkn4jP{J*Dp z@M*d}61#P!*gSX_g@K@L_2JHg*E-qr{=01&#{}9*!|bR$9gcx5B)`t5<4@{@D|Xkv*gW_)$!EFpQ&wguHV@VU)%7~u zd2p>XNT*pC(r*5F?UpIC?Bzd%H~I`NAVl?kgF`L9{B!5&d95?OHAhG%?floz;QlcW zF7|$jyQe|o+TO={2mXep!RlRU>YaP-8`etVirqRp6ZZ*o+nlf~pT2bGKNC0D58r71 zztwqgomBL%JrCyol^NXf@96H{(AL#6>37xs(dQ4N&l|#p_ql;B8xn165hwbcxKpS8 r+g81fCj4XVn?LqZG8@f~Vx--DhI_?sdqwnf{zm#dpH-N_&Hw)cN|A zP=oCdRR|1p$!qCr+S2#Z*DbXF(v&XP1mZwjaM*WTRtYD?B@kSe`2RbzNLRAV()R!V zzW04xkZ0!HGiT16IdkUBnNjrBTIb3vlSz?Jw(>=VN`qhEkV^5wH7GJjDwPT)4}Y!7 zNenkP`D#k;_&yGjse)ScP#ZJw>AsOllv-2L^@8XCA+g8G{$}AXfvizhL~3OjnOX<| zk4*D;QOQ9{A4R-RPYVT}Oc^d>8_0$=0$OmFv{wYGxA&dWa@H3-MWmPaze=b2`$+BL8`$ zXNo)}OGN%Gq@=2LQ7%Ke6rT!Fc8K(Br03%EMSLppp=Sk8lp3UU_^^%&*lMIMd>Zjt zhtGO^F2?6e_|S7XK5l$AQUOmBKATvM|7k_ifsYp-A3lD3geoXOy^Lzr7!wFMOSO>E936#?mEje*>dp4xwpMk^|K?{ zA1waE>wi6Q?6M8-A3ylC&vnC!AN}cX``>PVq3n%E-^|xd{~23%#%R^r#~$;2aF;K7 z(&_Vh#ysz=|84Z|`4jh?)OYzeHokUSUUScd+QBzZf2s3y&%i}{Dld8CkH5I%j-Ffd z!b@-auSFBz{LymTHw*M%op*BiL))LaVZ{Y^eB%#;^P4_cJ@L!;Hr6lt{vVIsvZ4IT zFO*g6Gh1e^b#<)Ywg26Fzc$%>>7K$Bk++V1`r9}5KXa$)oBgYHe&ytwn=Q84>yIxS z8P6Vg()P{;tv!D(Zhc2vSao63$hOVa@7>+e|N-D7Td^JN)!R z$KU0c-7B+(Ys&1_3A5U?q77Hb=B?*?)lm8zFfTh zw1@6okUe|d9p4|TYyM^ZrNL#v^_%#vbejIQnKScWI1T<*@Smx_-f86a_oE1Zj^bO(h36D9tewl@ zRx(a_K$bl}L}nrIZ32F*fZxsN&rvMZ9I!>eAA%fCRgP13-)Lo!A^~@?ahaoxlhMSp z5_t5?f_#X+Wfd<=`iGbt<|w0n4*wialrLf7CH_ZhI6&I-uLJ*RWr4Eq7At!p`L7Xl zn8C#x3)-V6f|U5&A^0rdU?qf<=#PqpW4?gzn#19)D>>j%0l&;d?Vg~-M7tO!_FRO; zkl-6`wX(A0=b35b|Cpd}Yv=XlxMT@A`_AL=tvmte#LrwMCm;YKd0XHQh;fm0P6Gp{ zD*1{};GZn<#kf?`!iDEqo+v-T_+%QFj=7x9pr9ky=?&=kap0&&)R+B**#LB^{mc{X z4hlV#YwR%N^EhQ9O@Bg=Cym!=+IamH{E(bS)8zb9=n3)f5_+{r=$i^YX?(4;=;I;# zMNff{zg6eu5y+9A3xpgLK?lRfo?bzxQqX~#v1bAJA^L-29LYbRrv&z&=}8)&=L>zP zB!>deA)YAt=r7~Hg9EGr{%bj2t;}S=a=F33Z ze5nxl)*6ogV?l3-&94)bi8THXz#pgnrt9YqMZY$P@s;xVv*3TjrJV3{m_PK)6Lefc zo^2eg6bOA*Dmh?}fNz3gQM=N462q%3V)JoSg?+FHJM(~` ze-G%8Jf*y(opB0&Mk+btodW+f(chkiRQL%(o}*$uN=M@Zp>I7xPb54f;3JzjoomJT z-i;!S*EZ2Fs2_W*=m+tsin8SMYtjx>@fSTp&QZ}Xfu88!Z-sn%T>Opf*V{sG$J56B zG05Knentd8lHT>q&w+jJ8(W!>|D^LXFEPj|4UdAD*=Bd z9r~4?Es2(r`1Z1C3-^{lzE(`FU|4b7QM?h zwe`|qdmvqAWq|3_4N5n8g1Wo8**~>vi7kM4;LYpbq6;B<{k5)@uC5OJ?P@3`BHV`X z!JzI!j2#^v)JX^RR}XISc`~5cOWHbp?d~Se^ir8h+Wv`1JDR+mfuKLs6#Qi9ZNPSS zf|E>aRy4t8KzEGel`xCRUfUhb9<=G*l7fh_F466!qORZ5FS+ep}#V0{d)pe## z%h*VyG}EOBd!`3G?JZM>#9rzSLP=XiC9V#c^q*;2`hYrGJ?*|vF-rEOEq+g`z&@cI z?95n!PjoSbS6Zc#;;LYxrs3U~sjsw}rS&znzTJlL4q`2BYl0CZO)Vop%IK@E61&eE z2txORo(`JhvH}f0$xg?txgvyhu+`)CX)b9oGTPhPcFdL44$Roj5Ntxtgv?eOVs$Z4*X8)1S<^#+Gti>IqmZ?x$RwR)pnZ!BS!sX?}r$@WqYV0i7YAZWzB zv8~Ha)+ErTGYiC~B5ZYwmmz9XG=QXe{uF50=2i&DsPm=GpTzKI#4-vwBSM(9wbVJaA3E2VfIp|4Oed>CKv1j0^13+MVMx&iv-x54#GStF&Nu082y@;vw zKC$VR#QzD~ov2zmz2QyZe2jsYc$qD{eESLPcWKQ>8}P=29VMbuoY=&?FAMpbLeiv{ z*f(Jv#P*@x6U0_c8WL@FgH127ZSr(_{Mf_r#U6fk(7ma)b}2RrYpU1&D+D)&T3WFA z`&S9t9g;)Nc<>W2XfFx0CVR{}YIg+MHo@Q2Q`xeY32>{sv$@^luWfS?A}uz05T3Yn zP~dF_e4eJZmNxja+@Pe%*w|vKjHhWjUBrfMo2@aiM=PVfEwi+pn-gYc8dTcjCXI4t zB#1`ubosqa)!3aTeTQj~mjT&{P4n7-rwqP4Wr6x098 z|9Gc~_y1-wDMhgYZ(m0seUd{^-qCR-zeXgzq!ppJu=xG2o}b4ft0W@Sifk zPcpy@qE=R-ei`901O74tzGA>X#{l1F;KvBhGvK!v@OK&Tjc|(rf29H6X27p8z$*;! zHUqrU04JHsM>W84gpzn%0#14$&pPPkdI6{DV1#cl(3wk!;%PO&nYM@hLfWy!xo=OAUo`8a916&+=u(vJ)T-rW*vEBfudt36^V1Q$R zNIb0ucxM6%eKWva26(ptE{>Mi+a3cPM>mOQn*m;)fI{C4aB;-X-flO*vCt%*9R|3( z(?&0M8sKvh*ov~t0Kd=x-)(?zFu(^5aCvutUhgx&Nw?%PVt}ugK%`F@;71$aqXzh~ z26)T>Uub}j8{oAD_=EvoX@Dz2pGj}$8Q^&a_!0x$Vt^MK-~|Ra-Q|#v)c}`wUg%ws z0dAAXNNooA1qOJ90bXE$R~q2Q8{nz|UTT254DgE#@bw1x`3Cp~1H9b;Z#BSi3Y>U+ z2Dm(Xq?g?W_{j-u%+uZjd1@s0yBn>F+CLb~jU9s9r1svIx5t2l>}2_hCaW@eS`Gf1 zPpL$X%FIHy{I znWkRc%F54BnU=J;g_R$naz2$6R(_Dmw8X^6j{tDmy;QbPd6bp!rt;BL9%1EQQkh(% z_#i9)50z;O#dopt52#EkAK$^s-=;FTWbpx3zLm-JO_EVXbh6d^VL&rSb?Xmr!{jl?Pe*^Hg3$aczm^|&Jv~BM+fLZmSiEa=4AE*cLEwuW}irA_lWGC zmH`m)Bx};x(F5pt$?TG3_V{FWMlyTIZKYaNZPab09A*FFwo;C=ce|~WqwIgXt&~&t zw3tt+0VH~E9D-7#xs%%`afp% z%YVS@AEEj~N3zxMxH=R&Pc`jT_a6xsfY@R|Yz`;JWtvW}=iD~XQ9@^^z2}-y0W5`I z4bD*`=e~t;iOq*f$5f2UWqWfgfoM83m7l*}0;YSuA|TaPuj^m<-O2AO1knozi~q@Jw|# zXyO?)t3z*QtB&E#v(%y2v*Y9MO-{OEtKK*2lp0+zS=C5ddcQh!5aPOD-T$HL_?tQU z1Sn|XL*`R@h?W+uHaVk(iaIo!qnak_qt3!fXGB>QJ@H{@u$PSd^W{`Z`Y2ZPO;;YtGW5E}-q#hT@>`V10Oqh(zW+NthPYCoECn z>POXZ-j^<`x|HN~LDV_Rv?_|1kt}BzQ*2~x8(31KIqz!Wznl9uVuZ#XPRbiY$ly7E6VEk;-~-SvQr)46Y&F`7*|-N17|T&1 z98fr*hMxg>XZZEl^<@A04~AaVdjFQEMRE$A;X_&^-?{i@`(eBXB6|^DnWqlDmmO;$ z^xpT)Uww>*w(7E~%h}AUT1UeK{p~$qZB`>Wr;%k>!AgOBP<6Z;Y<5OBW02n0!f!hr zhl4cr*nHF?OV#D2&hU7^qlWL*BG=~CN4qTQ;%CrU4#xS~Q^dUOjak%49u(N5hE-(M zJfsC7@K>sazmiw)csukfHG=k<)l%Mi$OAvfn&6Vt(IPXn$T~FAIj%AxHWucM9oI6T zMQRl&4pg|-ihKdmA}6g8Z^nN6kIBit!O-*QRiW0qmr&3?gp5qV*w)yc6O)qzRTpsy zXyJF(Hb6NchoUKRz#P}YLo|0Z(~!3RA5OS=(Sue zx};2vT%%~quPz%89HaJ*q5IFPL$BsIO|Q}mvI9=jsMGN$HC&-aLXe6t@9N(sW;-St zp8z!5RmY>j)lSEl3Ke&fntb(Yxu#L))Nn19K!A~f1{q~(?*D+sv-iFtDbFgMjl0^r z2OJ4i&s($A{+VJk)ZM8V--$PC_~chC>Y>qDEBHhw%rnu4t5(T{=IyWJ&x*T5TRYY;2Fl zTMJJnlpeCL%Tvw2s6&T?g+;W0^nE!8B8#4QKO~0{esOZXez6sRc?C2t^6DeY)X1es z`wyG@4nhgUeAqfj=v3bdpw|1vMUX!x#r-C2 zxi`<;`!`e_uj;K;FeWg3qd8jk$~SDSEvHTweCkiay3%j@{6yxXwS-gkx7IkP02I|tMENU-ow-TI9jpj|xKa(0| z2$9CZ0_vh_x|e#%glE2fJ_ZtYw?1+gt+FJ<{y_|rlWIF1@0>Z+C&&MjSf_P}P4wuTCsc>SeEm0A=SQGUp;uMM3&FW6?4P4nG51DTkFHTB z*OLBNoQ`)w-=Q9Irp|0GwA7nkb%tBvv5`d}eHuzR z%v&EPBvK>H)yeqXv|5B8jh}*{0c&7f_%Z6@5jFZ{NVsw`UIa|daUgg!rY?F9Lxd^J zmNBjOK1=*EHf7=J+|vV|*-$v6J_zFjF(SpzD}1sw;&FZD_Dqkv;MvDQ8nI-otUg) zjo$^SI>V#XE*eGC=Kfle_J0Fze}tq@`l`hL11=_();ReX&TuUL6&ei;U~=^KK*Qqs zG&s?77ca%$=b;ND_O4CwbVL!oG<|e@mE2X&sJcD9vgdp0Q(ojAzDDB za1C}_Eo$t8ao|KqUGn%k5kA+=h?TEFvS&c;KfIfmtO?gyRddZa?8WY8l#z#R+6;MK zWPv$-0uJIas$)OA*?qPBgKGGKf*{l*_uz$KY`D%w-BiPMMe6WO%F<>=%)dzPUx73B z%m82a<@(G#6imyLZX*i4%a!1K;?W=xfgk>h7JdTW3bu{+RDks4{O>oQM*kp|j^Mmn zcx>UnJHs#WePrx1h|L*Zo<_>Be_;Dh9<2E)9I4lH)ads{i22Ah1*~Yo`fwg^!Q3~> z-h?ncdKl8(DB~HQ>5SADsL^u@;6gjYSX3QPV7<4PZ@3F@weVZo&`UY65W%w`-XKiN zz4iyB-u4ce)L~c`G&#J2sRwMbMRhzBdgc76(=x6(_60tV8VP)QxnB*hnb>?tT|A^l zZ}|eWNbP-TygpiEu{gsIYQvSBPPiLgcnQkUT{K+jV@;UeQ-^!R_6H623=$L%Y@&5< z&pwuAV&4;6@i(li*tefenv>XLCl*#LUuTHXrB!UB;7=n{rA68c3+kizqdH>H#o$SX z@n1gK0L!)qy%-Dqouk9Q(IU4O5?}CatM>L45+|nE%?HslEU39!__?tkB?objfgiRv zdmkfcNpb7)?1M2IdZ0#*)x!7Dp11#zpbDRNIH!<$Iqo#y`GY}ZLW}lZ&-)FtK*k(? zY8msALvFNaAr0KaJ+NDeLC^{9rn$G~0H~Ac+(Bg22v*ClF$5UOB!6nyMAj2K*gKC6 z9R<@f_HE%$)+%E!#pZ%V%+e(ObiO*x;SH<#9Iha-^j0Xrg=DJ?j&(5@W{$OCzLCwy z{UU0rkqX)zn_~|!#xfVj_;9@kDpCff2m|vi)Yl>>V`$BNv#}VE*LNJB-)iJLT*`Pg zPI7fT9~?8^)k{OZaeP9emGJKeuD5F-c+-%fk-$ZHT=Tl2>#GM z>*6<{Zo^F8RF_XfEFL|q#OsYcB=!mf_Z<@(A? zZM1wxF?z)3(K`%3_n}pwVVFnxV9qd)%xIYA(Qkk)6qq(ynl4KcSKEw>qHIvVE(}(!WXXq0&L$M#GarzTemi|Y~eJ;FLqh8v8 zl^i{R9N15h9137*um|90-MAWFi6GLP!uSG) z3z-~X-YNoCM1JfcL>?f{9Js~^_S~=SKT?l53rC~g^!iv1$ybY*>#<4I!cVK=H?X;e zMrgfnT4Gi3UBGzgSqlEJ7)YcZ>*K+Cc;N^EA*x2bejU*c3;~)#Z3@nyoEHndhJN;b zc*oVR+x0C46Pa%x;JABUuVIP7pPxWwHJg53lb&AD7;ea+py>U4`<{u zXeJgqvWN+~%+SIxn)_~{S=M_E#Tm9z78ESC_6M9%moMQ@A*zsNa$@ttf(q5(n62^` z)Q7Q2Jc4lLwP#a66EP;*^-yejTy<0gtnuquo0v=c)&2jVU9ot%ykc`e4L?Y3&dVx< zk*hks82qvZH`VE=&)a-+Jzt=S@BRzeWczSzvD*9k5zX^v1F0sc3swh2Mq_pz@x zM+Zpj!tab7CC0Zt{0<@$I4UTk5Mge8^qd?m8q67oK2AQ~HyAXNMZgwD9r|kyUe2ix z$F#^YjYik;!uiph9QZ9-g!W+wC;sv~86T=1+v-3RKry<6Fxa#-hyawkbqLbf2Q5(LGVv}%bCg;Ce zWmT|ugEd-CPLt_A3i)c0;}EDMch(dOVlR#GEW^+o6G80fu*Ag?zj^?hJBn}HS!}y^ zC{J^YAimu@oTs5?WH}C5vsXvk@3+uyV#efrKblg*XF9`I<~hSnY`@P=&#*!G#X>~h zu6ho3f$VSiK6WD}ZNH22wOadOw%w_jH`Ujoxsh!lj(4X(I05mz@0!El)Lx=h}ZbierLx~-!h+3KZR?*vG{Gp_Z?{eUf z3|EPbPnFmRVca;1*eF$rjZ;;HfLDrC6{$<4>qWXjq^%|ipxFPR-lW}ixCN0ZrDGCQ8kPKfN-aZJzHl10wrHd@Z2 zE*uPPBbQgjg@W_Q{}Hi97Gq>##1PJ$_a|bHLhkV43zC5_3dLH#1aD{m*b~3Cpuza1 zv>3Xcg}?N_K>yE8=>IeSvi`%Z`DpzQcBJV3i17RFu|RH<^XGyWybpTGMWHbe;TWO( zK?j0}^xc7kePR34-6FJxg-PR6#H_mk-_DG-_i%h<*2IiZmGF zjkNWrSjZh`^H|7pPmvcy9$(^o;No+@bGUpJ5}Xh89?9-H8(Vpp#2M^BV7QKGL$T@# zL~0dj5lh2$HhBsF{o767@cAR-W{4~)i+4s&Itk6QsK1%TTMhnD_``cDL1%LQBg>iS zV1{uP$!>|oP64wxYh_hL{De975fRG2ZkXoG}(%`V_jgr;cI|W#CQn>h0d@)kQ|SXh}TP@BefD)q+A5Ul{QhX5NRc)!#&$5 z!GlQr2+vVgPXE0d+NQ`8p#lSoZcg!&n4x{~(UIQyw)su9=6i$$*6rmR}9yf=0F$ zq2N7^EW(EvM(}f5I9m$?NI@VnK4VWXJA^Xd2GKY6rY6? z5Qj{#{Vo~@+t|(w(q`Al_LrbP>HPki_}mHgW&NI%_X19O7r`RA`%#5mkW7+(?QtXNJWl%iob)e|NYY0b-kY4C?*lEi z1dy=<$?>QCF8mi#E4VLkWB5jQj&NX{hzLEvT#l6E9&_I-(5`xf&afH9g@t-}8q3+Z zYmO&_OSP!iqD8+)CXeFx1#I7)IMN0vgxDQb!$d{Z5Jek;C#um)EZXu*3ybK$S)LMU z4gf+pq|JS=oFqrl4RhaNw64M6+2Gb$oj4GNGj}++-Wgd9WhOVv{w&y*s?3}i?B_7W z>B>TQh8kUks2eCyNbwMJa)LM}!bY8L1`rO$-!k@ieLuXmheA7v*#e5OSu~vCvN=o{ zHmNCNr5QsRKHZ_}RH!8A^l|sx#fA<&Oa#y&Os-)|eH30(qF1ieUU8S~U+B#)jk^GJ z;zmXX{x+SsjUsD`bvz2i!igJp?$5C;9>>zhRzy>VccV9?zI!#V?Xq1ZYCLllOB zZD%tC(-@Nu^JeU}SQd>tMwg81UGurl{G91R?!)%9(Y3_bcqT@7#=~rMzXv?%%q}ES zXFlMfgj8v4EwS5yiFdIYP)_(C8SX}sPc#5JEjSiT*;6NbcWkDIJ8a=denzi8*olLxeW}9-yIrBS>WbP6QrQh4l~B{s2Vk zo(EudGHZzUhd)%WJFp!RH@FOah1&qYv3L|-S{S=asI3bSXhKcldYRe4G8iXBv31zo z_rX5S3cAX0<>7gHH#xtI4g1xFE@Fmntd9psi{?B>tB;HRtS9x|;EZ&@K{;sddlwbe zDC{JHh(L&5U8pks)*}5(?a7JR0~{^V0o}d~s{Ja$*U#hLK(IFHk428f;z5SJGI%;E z;(B#C8Zr0jI1?nt7#5zz-q%CvkS7Fo`QZ7`eYi8mrmP$5lSZ1SMSogITUBaeh0yN= zeKnQ-9y+G@IQq@ZCWL-aANkn`dP3dqKZ(tgOAE1DK`yw)>s**i- z#pNM$-;*RB<2^Y9^l?Uy059$?rQef#97h-Op4?3xAVFQ}p(Y_#7;_vNEvrWq*nIsH z;9HAaglklFxc&A7?#CSs^rSpf?@-lmm5&{i=$Bu4d&5i#Y zRL$)B3aRzQJ*@j7dECPq3*FB<9f4CB>9&$%fx+o6qD&Do&_NvSfMe)A9B~->m9(ff_fIPeJwIp^iwhSu}C$poI9hxAkV`I>$P5UJa6u!OE@@mvmdNq z{5rq3SBYt~ygoeYM9>I#nak;LWIsewHi*-Xz^U=Ws3GkeZqok)Z}#Dh`TB!+L5C%D zZ?E35*W7mi0L@WhzTsthgF9+Ca7TA4ROTqt0Rt`?M%JOL07t*ZU7~&FzDEchiU^^Y z`@N(RCM-SddKc@Ux$hxH5SEqS(uF)&o&xx}tmoti{g{wQAVQh&*4+OtTemS=nF!SA zdVcss$97j^sbb%=lIM9iB*GnnSS|D?UQO4~a0@;>lJpPN#rvF5Tp_^M3*!4hmtEeu z-^9L~LF)tjt#@NHsrHX>*rrBS@GFU@Qjc&p#?M$*7P0Ss(e%WPq!@1cW6%P}s!R=N zG?#Z*nEN~!U)Bv;MrT3UC+9zLmK8$fMk?WsoBNlNCR}$P*)qc^AqJDClHqpWJ+uO2 z8^Uh)O~!l=;y@KfE?$g|!cP>JKB1cC{>1>rb^H>te_G2 zJWyLL3T9lr z{|!uEa{uG=By48XzQ9&)E8$WcF>+Cb8pgyYAJAZ^-u>ngDetSgFZR2u^DVP!iq z7XyP$(P36I_j90_vUBmpmaz8acf3{)Qw#H7Ura5RPf&tOw&6vwT84%z1AA!U@WoyQ zBKyt-t!G;JW9}C%+easjC*xWXeA&>6@zXjSWs>^X3pk;vdEB+ddA*)G;m<0r6Cz*M% zB_KrCn_z|5@{hM-?z4W<#bsP~MDP1&uyyU6!q@|-l5*!MP6~!tJq#cdgt>n$pW$L*X|=_#c0y^4gLZYZF$IM?u#&MxEc=11LnS7 z8oFT~nZcPl4o^qzhwYOHj?_mk#=IQI>4!7?s1siR6A^Rb8;1-Oz-grA6l#jE@q~|- z+g>`)mq!u71*y!>D;#@Vt`AyoN2Lje{q^CeL$_(e%rm2YkUAtIBJ(B0?A5OL+6&fd2yH_dXICwy7gLLG>q48>0F{PW3*ZrI=Jp?CCv5s_zdms$b_+ ze`KKgci_ct#~<3R((=)a{m_kIf^Mypf&AEOEdw8Q|Fj3hTCgA5%@}7RX}B+U32Tw< zd0@X83YPoCsvR=im-`%X94#N2NnL|a`*Q4lf<$s(Ta>;J`hW;w1?RDWlX>TU12l+i z9}>9_n#cN;*a!U@h_ny74B*sw_s8E)WXnQit1$fheqx9kp6-6)2JnF;)p$Sg8-vsz z+)tbbEl9baxSUBQaX%6I?IL|cs*1Eyq)bl%XL^d1=_gXFcwZn=i%6j;L}vyi$@_oN z+>60NvK-01ZXsBrB^>u-qTn+*`BDx zl?9*wK&(VIBdj2U&&0JrWiV`XaSbIdr)+VTmyBl3bu+eiSmz%dI_i%JjR0iAQ=gSO1X)e*S#m zK|@ccgnKjrpv6vRxHw1?vo9YKCD-gOz@=Zn{@sYbSglH=B9>3ou*sxVwtzA@dn|B5qE?bnzpBTpNgEb!8!q+}L}>LnQY*1f(EGZfrg4 zL+<`lXgrJPE3fCqUI7fAEUvQct&e)3J2*(dS??&BeQ~88aVol1@3)xye+#hOKhf6= zbKgnQs)}vTOzS{qTK5yJ#P<)>FuoTR$&DK6shR1;Af61o4FQ}&54)q>P?DZKHt~lH zeGRUhLMwCz`A=&LNZ0ZbhR)m{ppNwNuMY651oZ94HE@$23Eh3qdbA(=HHkQU?=bR; z!p2k_*g5gF5rn64*8yK*W@rPH04p!*XDJ0Xc_ zc}|Z16u#e(^kaj+2Pv-elkgEKEJRwr3nIo4-+huaYfQ-l3^}(DP@(oF;v|y$%!vYW z41^Abgu%$Bk;3aUWrcYv-9AaYujb}AlKYmdPY4K9NCMTIP-OlrP+*@nyF@1{Y!L>tcSIT^V!(U-w|i?RZtFRvDpu=xsP5ehxULgy6aCi0p%`FQ_$-_ z`)s{l_jK@o$DMxBL3f9`8Gc>6ccZ)As=BYjKU-;adD$-~SR3lsSiKvsKwWEFz=~fE zu(sh}O*S5Ixvc~2 zm&wT35oTAysJML2%ZFv=XTc={VADNNtEo?;tqRJoL+jvyJsgtvCnVN&P`e zzkBCGng_mpNOuE&2ads?0^T6fairr&E8&bPxbEOXx(oL-b|KyI9QZ#9{69ZAN&kY+ zju#;(DvyFcO8<)fBJDwHnT>uSEkZhqv=XW9CCC>k-ILvcbi=EV6Ve`B?XH-E@*8-M zv=Vo9>37I1Z-Gxl1jY}cA4q-gq8?J$1o%a|18E+{Z4_w{(n@^Uw-TufX&(6XA+;hM zKst_;{(TM~-do>?ypYnrw=;rNfdL#RIMO^b%6JfBXOHQsHHxX*Vme{Y%sje_TL|ch z_-wliyhD~szGY>8!7B6Y&3Qe_c?*_*zO-;5Riyeg`1C;6a2Szz2!1I(+WK6q0{@rtTBsZ#DUri!iFB2loNK0dS$xfN;HePT;pO2>){_lD7fh z1^f_!k~}BnWaPQ>!i;>?&Auv=^w{s!o&))hfIj_pR1O)Gs6hleVEPgd=^g3CZos!9pF;);yhdqEZzNsangb#B(wJt2m`XsYfB?lMB4zkI z2fBHCCnxt10I!UCUWaPyj>@;964!PiXsY)C+TDSAbPFiw1CUFrTa#aTm9gq7j1lRd zGY9{!9YR$-sBe&KCS9n5E>vb7@fB>uX`bE=dg~FQ+=9G8hO6?eS(hY4sB#4%{67PK zC-8|EMN7;x$h-@^+yja4P1pg_p$~w+5BO&jKBUl_b>&fF+B5m(Kz=d2|1nqth8^H-b{AHdm;P0}GbG&wTM@GK6;*orAL_4j&nw(sXynG0L2jGM73DVbv-vdr( z&ZiTyJGm}A26*N8)OFzi;40u&!q2cS%z=-v6ZAgmx^NEgx4|d*XVwLp-&dpl1bmp6 zh#$x`v)#|S`>0fl&jeP6DtBO|*);v6cAo>P57#!>{9ecF ziMFvO3A?fi%gq+va>B5zoXu>085DRN^(#rg*t|JE-Um(q5dHvDBH<;+Sm-Y|G?V-ACKm=VptcR1H2XR^mXBFfzRv&)xksZ zoC6cP9rzjn(qy?h;TD`f+aqlz&^Bj?%M=+U*LPZnzL0qxs%8sI4M>q* z4B(5R^xK^|-^Jj8rkF3V(KpgMyp`7B-WfFz($7JT{2QZ_rZPD-g3iA9I6K@6A#k&*Es3Le8x*+s}_==l(KB`B_fxeK~|M=O?+j zkr~Q$GjhK>gUXiRP2l>T?A-gZl_#@Bd&I&$rri5X$`hi!&$aaCVHCeM2N=Jba~`sH z&CUJYT%vH|Yo@bO*9A=a z>hBzx^b$&uaC)U@w?VWI*ONWt*q+iOWA80T^D^7)AS>I03(21WD@&y>T_Ng|pG^<8 zj_hGRIC2|`cv-@CE#hU#=PpqnTOszy_HabO_#a%z;fRK_vg`-kc~;&FUzi>ob+EFe z!=a!3olXMQs5RrOCxkw}Cep(qopBaNKTf2li*%_-SBi9TeqMY>L;Eh6m_=~j_`Q=~n&SQ$a7&q^hyFQ%s=e`ihpt{Q)fb0H_s z?GUN7OZd&fv#mvIH{!P&L)LQqilFVRr6E?bUu$1lY{SnA+7|N{))Je2scosvA!$yR zW|P&E6WxGBzbPo~iUF)-<3DLnQsFs@v^%NrTt&{`RQL=<&ihpOOhwv{RCr#(&ZNR; zDbl{C!jDo0gNb_2hHsMv1KLGz<>o~`(6$}nC(0z6Z{e$Mbu zP{xIPAsvoEIzjYlY5O32sGHi5@vXUxEX{;7d82ok=50u_+VuVP{&uC~Qvf9cl2h8T9Dz@@*rTHyZ%4oN1tQH+h~NINU#Rw3Zh?n=3J2>6Jw z$5L*`!NDRrqxHN&DK|Q!BKW`s9R8Bv=S1*B{T)o}?G`^V_oTtU&)`$-=l=kH4907u zlh-xc_lbsYa0L2NJJr((C_ z3vj^60v?-&&OB@!h)>^n9C53_|2*Ie0k^E;fMW%`9`NH(1KnnioWIu!I+Yi5IGt(H z^9{g{hP{mmJ(u-f6>#M?E5pMyXV069YZXUicLR|9 zCc_t%4axn0lbm}5Ov>$Ph7Y~EoWGzmI(j}7@PR5`?h*QS)?7ZmBQ+c@$Lk^n=P={n zYXGNq4t|35x7_0U4{v8i-_N&gfJ=hIfl;q2}P zvJHTf+@4C4+Z})}G!hp1K|#kN6oKw~(epNg^Stpd{hl_-XHf7T5--n{a3LQ}z%Lc> zo(2w(^1PnGjg)!*cEE}M(KP-KGWb+{3sjTr+hBmdJ4K+B0#1C6+BsmpfUgtq^m%#> zg9{`>@@;`{5qiEwyn9T-{T%Rsfd2z8g1%D13C|Vu;|w4E@U2z`k^DO_aBM!db3|!p zE&?1XG3B{Y;E#)SknUp8^B{xsyz%dAfRlYt1yI_<`N#2gtwPV`j^!)?Pqza#fM*&P z`h`uX#FQrrILUKV$g@CT>=Ja+*V#uI9oRP^2Wfxa7w`?2asqV6gPuij7&5iHY#R6l zfYUhU-DYL=<)21|5C2x^zijs_fKz`JVQ;Z~v**VGUkUKC5Wn)EfR9vi_?#HUZp7rIMD&o=-k{okIZ zpAQQ<>Gpq|;h&&Pr0M5ExRS(Yy1!H*;OYL)1%O-7Zu)##$M89C#=n~ZCwsCXt=(@3 z`h!9r){9Dm)6jWz8u%FCBp;WMk5$kyFUYJvj%nb{3_dkJa|PfuzqSdwa$YM<{$L;& zYH2BMQgnS)^%~u&H8kqF((LhjHnjzU9={&!(3{%5ot^+Nn!Wla_LT~~8Q<#&uU_O-=NjkP^MldBKV*r>?Ko z?RH1Ar^Ov=5287bNAK`-c>P;yU0oklr^F`fpb2l2tki)z4MBUUuDhGB2nB+AtHh88L)AdfTzr)?Gw?T+*NK13Mx*CE@`B!4Jh7w(V*<=c%4%QZ{1GHLt z+6lkGrkCrw*B7vt>lzJByC)D}^lNLo8udn--cYAE+V#c~y|^vlcNdpBIG;;(NHys9 z2E6{D-tP7Ozh(8~_-t57tb$L@WO=C^0H?h?*y{Ig);--#9$&D{+o`Q?(8W-e<9k3% zH^ldVAe62P8gLY`iKDp5+ZhP@{9a%2GP|MY0gs=E>C}DACB-h)$Zq$0f^B|J zv+fJ}YYk(VDr-`6CTq>La^srw8%p&C2OAqmzM+PCWNhA+Q9(YY?RF6Tr`o5HuazU8 z6vEPSDcOwEp$zlasMH^cTpCE3H6Mw)ETH?mfgq+=(9@v@f^If1F<++Y;m1kjpArWj z4pK0KEHEO{76d{Ybxm*e8I1}g#J=?PX)VUoO8-Ah#?)5-eKv!XCgWHc#^TeMw@=3F zr!*R6WZ70H&8e$PPV0|u&PG$#?#S?cL~Ru&HGADvHf0l7e1RTvBYb-J~TS$yH;6 zRR7H1gk^KZUh0R7)CRYUaa>(e3ys%PxGpZCb<;qLTCK6Oo*SU{^u@TQB#GkS{lc)= z%g7V5lkeg2V^HiR0U^&CXsOXxlN~fRl`_;N_D#OzP=g{?mChy)`~w>EO`cBpSa7u% zILOBw&Y^Ej_1a`JQ%1s$sn+T8dz;`!dHvFX)HijIuMl*@SAu4)>eAN;SGc;}9SD#g zd`YLfBSmfop~ssAlhx=(%rL+om3>#mJ$?Gd;%QBN_bGxf+x|UFf zDFxix0GzumDY}X_OLTjwXw&a$^7@;TqF~-JRj#(zc64+Qe-2_*4{q^!fJW{*Se)wO ze%yNf@9$VvcQuP2>B9PLG^%m6e!eRVy^zUR+vSVGt!GQWk6v=*Eu9@vxV= zgF$ywtB_JsU%78Y!uZ5PCM9np-v+y+wx&Ss8WLiebat>brA;@QA{j^WBjyX%4PPU( z>**6la8SX9KGYPf-DnU)qRUI!M#rdk)2aoQQjS+*YsGi*H@p2F9o|z@$nW=bHf>4c zz1Y4?(Y3~UJ-JVW0jFJJqkAKn?Q|RBkTW)AY@{5QaX-|^X!@>fip8-dN6;Wa3}#c( z-b^n)DWVcvz!Ma^y5t0`b**%Db>MFo^f_IbQs=7CVhJBo&MSiyO7Slcl-SdpVxc(H z)5dilWnGON2$6jlU8HbNEY@gVAm)NBB=EEZpBp|F@>fqky4dPn#f4EHGPET)yYI))y3Yu*`K7B*oGPm0^2|MX?|t0k_jc3 z+T<|c8%H0@w|$x1WhOTFOm(Ce5b(6OBzZ9`|Nn#_Q+=PLATMdurkJI4la%x%s8drG z`h+Sbe6VtIXRu)S5d~>N-;&cZDMz>(8Rgdybigy{z|aOgnT=zThW%3*v(l#KfWau( z8WT3Oj8?WJi%FzqVjz~)yF1;RJpQGsw;jf;nl@Gf;@=2H6jr7p$jxE=ch_`mLrZC!TSGX&ZY zM}=)-J{DSQ@zxq08QFa?%*@(;8qbb|h<{n9sr5>|1<}!@ElF%e49bB<49n!SZn-e3 zr7)PuIb&du=5-q1Q!EDG8Za9!EFbnCGQO84fJV(I)hVQm(-5ppc|XlsY-f4_#YFp= zyuXA3PveF(yQlczNe4Bx2cFJmgFNJ#^fB!x)Yce?ETh;iZO}^ehO%TZV#*#S&7fg$ zWUJHpy9PBd*a*2Ro#MG=oYskP$}|kdVUn`Vs6z0NMdX!v_-S4FWOkt4yLpNo5E`21 zs2L1&W)ZOMpc6)8ZNSq^+rrgeV^@S+jayyARN!C_g2U?Zl<}ID5 z${eV#5vu+n$SI5Tp`5WMe)dtNDC!gr1GAG1Q#Q77~cVr)#UwjneG!W#Py=Y{WDp= z0vTM%O0+NUvCGs)FY!>jL_?lS;+U5`mH10=+5QF#11Iji%kR~2DBVF!eds;`eaA)C zm-pgj`nEuq{7XEU(lIuDXGWF>7xD@+9T)Y9f4Z_G`4?dJ@4C=CSzq3>PozTNvVB=! z_P-N#31x*KD8Kh3Q~5n1s*fY&MEkOSH((?ac|T6x+m|Vhg;O5EP69^P2aNUQ{eGE_ z%8rQ|Vt8=2&Yx@3>dWr|$kZzEWFq@7%QqmC(*76to5cO-YKh5H*{*~}kulbn-z$*m zEQu)6^#0!_>dS7*?;ptY;k5ec?SCh&zWknoOv@zUY1*gz7R0|qliz2M=>c5*Fh1$> z`xRaq>&x#w$aFrvG(2fV<$k;)n%P1y-Qu21_l>;LfT1pB)7$?OV7TJP<}kfauP^nK z-qDj@|4G1b^)69ge&1q1w4aWjUVa&MsZVl#Z5RA+|Ah5pY4zoOmmQ*h58fM}^!DGu zOPYJKefd2PD=8)(d7obDovbhQ=z8E7`Ip}ZvE9t7D28{A+lcywdwA&ti6~yC6`SKh QciqA3e>Kejr?>xq0jLqGF8}}l literal 0 HcmV?d00001 diff --git a/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/build-release/cmake_install.cmake b/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/build-release/cmake_install.cmake new file mode 100644 index 0000000..f9a9d02 --- /dev/null +++ b/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/build-release/cmake_install.cmake @@ -0,0 +1,103 @@ +# Install script for directory: /home/ahmad/Downloads/benchmark-fuzzying-tool/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2 + +# Set the install prefix +if(NOT DEFINED CMAKE_INSTALL_PREFIX) + set(CMAKE_INSTALL_PREFIX "/usr/local") +endif() +string(REGEX REPLACE "/$" "" CMAKE_INSTALL_PREFIX "${CMAKE_INSTALL_PREFIX}") + +# Set the install configuration name. +if(NOT DEFINED CMAKE_INSTALL_CONFIG_NAME) + if(BUILD_TYPE) + string(REGEX REPLACE "^[^A-Za-z0-9_]+" "" + CMAKE_INSTALL_CONFIG_NAME "${BUILD_TYPE}") + else() + set(CMAKE_INSTALL_CONFIG_NAME "RELEASE") + endif() + message(STATUS "Install configuration: \"${CMAKE_INSTALL_CONFIG_NAME}\"") +endif() + +# Set the component getting installed. +if(NOT CMAKE_INSTALL_COMPONENT) + if(COMPONENT) + message(STATUS "Install component: \"${COMPONENT}\"") + set(CMAKE_INSTALL_COMPONENT "${COMPONENT}") + else() + set(CMAKE_INSTALL_COMPONENT) + endif() +endif() + +# Install shared libraries without execute permission? +if(NOT DEFINED CMAKE_INSTALL_SO_NO_EXE) + set(CMAKE_INSTALL_SO_NO_EXE "1") +endif() + +if(NOT CMAKE_INSTALL_COMPONENT OR "${CMAKE_INSTALL_COMPONENT}" STREQUAL "lib") + if(EXISTS "$ENV{DESTDIR}${CMAKE_INSTALL_PREFIX}/lib64/libcds.so.2.3.2" AND + NOT IS_SYMLINK "$ENV{DESTDIR}${CMAKE_INSTALL_PREFIX}/lib64/libcds.so.2.3.2") + file(RPATH_CHECK + FILE "$ENV{DESTDIR}${CMAKE_INSTALL_PREFIX}/lib64/libcds.so.2.3.2" + RPATH "") + endif() + file(INSTALL DESTINATION "${CMAKE_INSTALL_PREFIX}/lib64" TYPE SHARED_LIBRARY FILES "/home/ahmad/Downloads/benchmark-fuzzying-tool/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/build-release/bin/libcds.so.2.3.2") + if(EXISTS "$ENV{DESTDIR}${CMAKE_INSTALL_PREFIX}/lib64/libcds.so.2.3.2" AND + NOT IS_SYMLINK "$ENV{DESTDIR}${CMAKE_INSTALL_PREFIX}/lib64/libcds.so.2.3.2") + if(CMAKE_INSTALL_DO_STRIP) + execute_process(COMMAND "/usr/bin/strip" "$ENV{DESTDIR}${CMAKE_INSTALL_PREFIX}/lib64/libcds.so.2.3.2") + endif() + endif() +endif() + +if(NOT CMAKE_INSTALL_COMPONENT OR "${CMAKE_INSTALL_COMPONENT}" STREQUAL "devel") + if(EXISTS "$ENV{DESTDIR}${CMAKE_INSTALL_PREFIX}/lib64/libcds.so" AND + NOT IS_SYMLINK "$ENV{DESTDIR}${CMAKE_INSTALL_PREFIX}/lib64/libcds.so") + file(RPATH_CHECK + FILE "$ENV{DESTDIR}${CMAKE_INSTALL_PREFIX}/lib64/libcds.so" + RPATH "") + endif() + file(INSTALL DESTINATION "${CMAKE_INSTALL_PREFIX}/lib64" TYPE SHARED_LIBRARY FILES "/home/ahmad/Downloads/benchmark-fuzzying-tool/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/build-release/bin/libcds.so") + if(EXISTS "$ENV{DESTDIR}${CMAKE_INSTALL_PREFIX}/lib64/libcds.so" AND + NOT IS_SYMLINK "$ENV{DESTDIR}${CMAKE_INSTALL_PREFIX}/lib64/libcds.so") + if(CMAKE_INSTALL_DO_STRIP) + execute_process(COMMAND "/usr/bin/strip" "$ENV{DESTDIR}${CMAKE_INSTALL_PREFIX}/lib64/libcds.so") + endif() + endif() +endif() + +if(NOT CMAKE_INSTALL_COMPONENT OR "${CMAKE_INSTALL_COMPONENT}" STREQUAL "lib") + file(INSTALL DESTINATION "${CMAKE_INSTALL_PREFIX}/lib64" TYPE STATIC_LIBRARY FILES "/home/ahmad/Downloads/benchmark-fuzzying-tool/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/build-release/bin/libcds-s.a") +endif() + +if(NOT CMAKE_INSTALL_COMPONENT OR "${CMAKE_INSTALL_COMPONENT}" STREQUAL "Unspecified") + if(EXISTS "$ENV{DESTDIR}${CMAKE_INSTALL_PREFIX}/lib/cmake/LibCDS/LibCDSConfig.cmake") + file(DIFFERENT EXPORT_FILE_CHANGED FILES + "$ENV{DESTDIR}${CMAKE_INSTALL_PREFIX}/lib/cmake/LibCDS/LibCDSConfig.cmake" + "/home/ahmad/Downloads/benchmark-fuzzying-tool/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/build-release/CMakeFiles/Export/lib/cmake/LibCDS/LibCDSConfig.cmake") + if(EXPORT_FILE_CHANGED) + file(GLOB OLD_CONFIG_FILES "$ENV{DESTDIR}${CMAKE_INSTALL_PREFIX}/lib/cmake/LibCDS/LibCDSConfig-*.cmake") + if(OLD_CONFIG_FILES) + message(STATUS "Old export file \"$ENV{DESTDIR}${CMAKE_INSTALL_PREFIX}/lib/cmake/LibCDS/LibCDSConfig.cmake\" will be replaced. Removing files [${OLD_CONFIG_FILES}].") + file(REMOVE ${OLD_CONFIG_FILES}) + endif() + endif() + endif() + file(INSTALL DESTINATION "${CMAKE_INSTALL_PREFIX}/lib/cmake/LibCDS" TYPE FILE FILES "/home/ahmad/Downloads/benchmark-fuzzying-tool/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/build-release/CMakeFiles/Export/lib/cmake/LibCDS/LibCDSConfig.cmake") + if("${CMAKE_INSTALL_CONFIG_NAME}" MATCHES "^([Rr][Ee][Ll][Ee][Aa][Ss][Ee])$") + file(INSTALL DESTINATION "${CMAKE_INSTALL_PREFIX}/lib/cmake/LibCDS" TYPE FILE FILES "/home/ahmad/Downloads/benchmark-fuzzying-tool/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/build-release/CMakeFiles/Export/lib/cmake/LibCDS/LibCDSConfig-release.cmake") + endif() +endif() + +if(NOT CMAKE_INSTALL_COMPONENT OR "${CMAKE_INSTALL_COMPONENT}" STREQUAL "devel") + file(INSTALL DESTINATION "${CMAKE_INSTALL_PREFIX}/include" TYPE DIRECTORY FILES "/home/ahmad/Downloads/benchmark-fuzzying-tool/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds") +endif() + +if(CMAKE_INSTALL_COMPONENT) + set(CMAKE_INSTALL_MANIFEST "install_manifest_${CMAKE_INSTALL_COMPONENT}.txt") +else() + set(CMAKE_INSTALL_MANIFEST "install_manifest.txt") +endif() + +string(REPLACE ";" "\n" CMAKE_INSTALL_MANIFEST_CONTENT + "${CMAKE_INSTALL_MANIFEST_FILES}") +file(WRITE "/home/ahmad/Downloads/benchmark-fuzzying-tool/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/build-release/${CMAKE_INSTALL_MANIFEST}" + "${CMAKE_INSTALL_MANIFEST_CONTENT}") diff --git a/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/build/CI/VASEx-CI-2/cds-libs b/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/build/CI/VASEx-CI-2/cds-libs new file mode 100644 index 0000000..fbe4dba --- /dev/null +++ b/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/build/CI/VASEx-CI-2/cds-libs @@ -0,0 +1,180 @@ +######################################### +# Generic parameters + +workspace: $WORKSPACE +libcds-source: source +make-job: 10 +gtest-include: $GTEST_ROOT/googletest/include + +######################################### +#GCC-4.8 +gcc-4.8-root: $GCC48_ROOT/bin +gcc-4.8-cxx: g++-4.8 +gcc-4.8-cc: gcc-4.8 +gcc-4.8-exe-ldflags: -L$GCC48_ROOT/lib64 -Wl,-rpath=$GCC48_ROOT/lib64 +gcc-4.8-extlib: rt +gcc-4.8-boost: $BOOST_ROOT +gcc-4.8-64-boost-lib: stage64-gcc4.8/lib +gcc-4.8-gtest: $GTEST_ROOT +gcc-4.8-64-gtest-lib: $GTEST_ROOT/lib-gcc4.8/libgtest.a + + +######################################## +#GCC-4.9 +gcc-4.9-root: $GCC49_ROOT/bin +gcc-4.9-cxx: g++-4.9 +gcc-4.9-cc: gcc-4.9 +gcc-4.9-exe-ldflags: -Wl,-rpath=$GCC49_ROOT/lib64 +gcc-4.9-extlib: rt +gcc-4.9-boost: $BOOST_ROOT +gcc-4.9-64-boost-lib: stage64-gcc4.9/lib +gcc-4.9-gtest: $GTEST_ROOT +gcc-4.9-64-gtest-lib: $GTEST_ROOT/lib-gcc4.9/libgtest.a + +######################################## +#GCC-5 +gcc-5-root: $GCC5_ROOT/bin +gcc-5-cxx: g++-5 +gcc-5-cc: gcc-5 +gcc-5-boost: $BOOST_ROOT +gcc-5-exe-ldflags: -Wl,-rpath=$GCC5_ROOT/lib64 +gcc-5-extlib: rt +gcc-5-64-boost-lib: stage64-gcc5/lib +gcc-5-64-asan-boost-lib: stage64-gcc5-asan/lib +gcc-5-64-tsan-boost-lib: stage64-gcc5-tsan/lib +gcc-5-gtest: $GTEST_ROOT +gcc-5-64-gtest-lib: $GTEST_ROOT/lib-gcc5/libgtest.a + +######################################## +#GCC-6 +gcc-6-root: $GCC6_ROOT/bin +gcc-6-cxx: g++-6 +gcc-6-cc: gcc-6 +gcc-6-boost: $BOOST_ROOT +gcc-6-cxxflags: -march=native -std=c++14 +gcc-6-exe-ldflags: -Wl,-rpath=$GCC6_ROOT/lib64 +gcc-6-extlib: rt +gcc-6-64-boost-lib: stage64-gcc6/lib +gcc-6-64-asan-boost-lib: stage64-gcc6-asan/lib +gcc-6-64-tsan-boost-lib: stage64-gcc6-tsan/lib +gcc-6-gtest: $GTEST_ROOT +gcc-6-64-gtest-lib: $GTEST_ROOT/lib-gcc6/libgtest.a + +######################################## +#GCC-7 +gcc-7-root: $GCC7_ROOT/bin +gcc-7-cxx: g++-7 +gcc-7-cc: gcc-7 +gcc-7-boost: $BOOST_ROOT +gcc-7-cxxflags: -march=native -std=c++1z +gcc-7-exe-ldflags: -Wl,-rpath=$GCC7_ROOT/lib64 +gcc-7-extlib: rt +gcc-7-64-boost-lib: stage64-gcc7/lib +gcc-7-64-asan-boost-lib: stage64-gcc7-asan/lib +gcc-7-64-tsan-boost-lib: stage64-gcc7-tsan/lib +gcc-7-gtest: $GTEST_ROOT +gcc-7-64-gtest-lib: $GTEST_ROOT/lib-gcc7/libgtest.a + +######################################## +# clang-3.6 +clang-3.6-root: $CLANG36_ROOT/bin +clang-3.6-ld-lib-path: $GCC5_ROOT/lib64 +clang-3.6-cxx: clang++ +clang-3.6-cc: clang +clang-3.6-cxxflags: -Wdocumentation +clang-3.6-exe-ldflags: -L$GCC5_ROOT/lib64 -latomic -Wl,-rpath=$GCC5_ROOT/lib64 +clang-3.6-extlib: rt +clang-3.6-boost: $BOOST_ROOT +clang-3.6-64-boost-lib: stage64-clang3.6/lib +clang-3.6-gtest: $GTEST_ROOT +clang-3.6-64-gtest-lib: $GTEST_ROOT/lib-clang3.6/libgtest.a + +######################################## +# clang-3.7 +clang-3.7-root: $CLANG37_ROOT/bin +clang-3.7-ld-lib-path: $GCC6_ROOT/lib64 +clang-3.7-cxx: clang++ +clang-3.7-cc: clang +clang-3.7-cxxflags: -stdlib=libc++ -Wdocumentation +clang-3.7-exe-ldflags: -L$CLANG37_ROOT/lib -Wl,-rpath=$CLANG37_ROOT/lib -lc++abi +clang-3.7-extlib: rt +clang-3.7-boost: $BOOST_ROOT +clang-3.7-64-boost-lib: stage64-clang3.7/lib +clang-3.7-gtest: $GTEST_ROOT +clang-3.7-64-gtest-lib: $GTEST_ROOT/lib-clang3.7/libgtest.a +clang-3.7-cmake-flags: -DCMAKE_C_COMPILER_WORKS=1 -DCMAKE_CXX_COMPILER_WORKS=1 + + +######################################## +# clang-3.8 +clang-3.8-root: $CLANG38_ROOT/bin +clang-3.8-ld-lib-path: $GCC6_ROOT/lib64 +clang-3.8-cxx: clang++ +clang-3.8-cc: clang +clang-3.8-cxxflags: -stdlib=libc++ -Wdocumentation +clang-3.8-exe-ldflags: -L$CLANG38_ROOT/lib -Wl,-rpath=$CLANG38_ROOT/lib +clang-3.8-extlib: rt +clang-3.8-boost: $BOOST_ROOT +clang-3.8-64-boost-lib: stage64-clang3.8/lib +clang-3.8-gtest: $GTEST_ROOT +clang-3.8-64-gtest-lib: $GTEST_ROOT/lib-clang3.8/libgtest.a + + +######################################## +# clang-3.9 +clang-3.9-root: $CLANG39_ROOT/bin +clang-3.9-ld-lib-path: $GCC6_ROOT/lib64 +clang-3.9-cxx: clang++ +clang-3.9-cc: clang +clang-3.9-cxxflags: -stdlib=libc++ -Wdocumentation +clang-3.9-exe-ldflags: -L$CLANG39_ROOT/lib -Wl,-rpath=$CLANG39_ROOT/lib +clang-3.9-extlib: rt +clang-3.9-boost: $BOOST_ROOT +clang-3.9-64-boost-lib: stage64-clang3.9/lib +clang-3.9-64-asan-boost-lib: stage64-clang3.9-asan/lib +clang-3.9-64-tsan-boost-lib: stage64-clang3.9-tsan/lib +clang-3.9-gtest: $GTEST_ROOT +clang-3.9-64-gtest-lib: $GTEST_ROOT/lib-clang3.9/libgtest.a + + +######################################## +# clang-4 +clang-4-root: $CLANG4_ROOT/bin +clang-4-cxx: clang++ +clang-4-cc: clang +clang-4-cxxflags: -stdlib=libc++ -Wdocumentation -std=c++14 +clang-4-exe-ldflags: -L$CLANG4_ROOT/lib -Wl,-rpath=$CLANG4_ROOT/lib +clang-4-extlib: rt +clang-4-boost: $BOOST_ROOT +clang-4-64-boost-lib: stage64-clang4/lib +clang-4-64-asan-boost-lib: stage64-clang4-asan/lib +clang-4-64-tsan-boost-lib: stage64-clang4-tsan/lib +clang-4-gtest: $GTEST_ROOT +clang-4-64-gtest-lib: $GTEST_ROOT/lib-clang4/libgtest.a + + +######################################## +# clang-5 +clang-5-root: $CLANG5_ROOT/bin +clang-5-cxx: clang++ +clang-5-cc: clang +clang-5-cxxflags: -stdlib=libc++ -Wdocumentation -std=c++1z +clang-5-exe-ldflags: -L$CLANG5_ROOT/lib -Wl,-rpath=$CLANG5_ROOT/lib +clang-5-extlib: rt +clang-5-boost: $LIB_ROOT/boost_1_65_1 +clang-5-64-boost-lib: stage64-clang5-std17/lib +clang-5-64-asan-boost-lib: stage64-clang5-asan/lib +clang-5-64-tsan-boost-lib: stage64-clang5-tsan/lib +clang-5-gtest: $GTEST_ROOT +clang-5-64-gtest-lib: $GTEST_ROOT/lib-clang5/libgtest.a + + + + + + + + + + + diff --git a/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/build/CI/VASEx-CI-2/ci-build b/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/build/CI/VASEx-CI-2/ci-build new file mode 100755 index 0000000..3f3c194 --- /dev/null +++ b/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/build/CI/VASEx-CI-2/ci-build @@ -0,0 +1,73 @@ +#! /bin/bash + +# Useful envvars: +# CI_SCRIPT_PATH - path where to find scripts +# TOOLSET - toolset: x64-gcc-5, x64-clang-3.9 and so on +# BUILD_TYPE - build type: 'dbg', 'rel', 'asan', 'tsan' +# WORKSPACE - path where to build + +env|sort + +case "$TOOLSET" in + "x64-gcc-4.8") + echo "GCC-4.8 '$BUILD_TYPE', toolset root: $GCC48_ROOT" + $CI_SCRIPT_PATH/ci-script/cds-$BUILD_TYPE-gcc-4.8-64 $* + EXIT_CODE=$? + ;; + "x64-gcc-4.9") + echo "GCC-4.9 '$BUILD_TYPE', toolset root: $GCC49_ROOT" + $CI_SCRIPT_PATH/ci-script/cds-$BUILD_TYPE-gcc-4.9-64 $* + EXIT_CODE=$? + ;; + "x64-gcc-5") + echo "GCC-5 '$BUILD_TYPE', toolset root: $GCC5_ROOT" + $CI_SCRIPT_PATH/ci-script/cds-$BUILD_TYPE-gcc-5-64 $* + EXIT_CODE=$? + ;; + "x64-gcc-6") + echo "GCC-6 '$BUILD_TYPE', toolset root: $GCC6_ROOT" + $CI_SCRIPT_PATH/ci-script/cds-$BUILD_TYPE-gcc-6-64 $* + EXIT_CODE=$? + ;; + "x64-gcc-7") + echo "GCC-7 '$BUILD_TYPE', toolset root: $GCC7_ROOT" + $CI_SCRIPT_PATH/ci-script/cds-$BUILD_TYPE-gcc-7-64 $* + EXIT_CODE=$? + ;; + "x64-clang-3.6") + echo "clang-3.6 '$BUILD_TYPE', toolset root: $CLANG36_ROOT" + $CI_SCRIPT_PATH/ci-script/cds-$BUILD_TYPE-clang-3.6-64 $* + EXIT_CODE=$? + ;; + "x64-clang-3.7") + echo "clang-3.7 '$BUILD_TYPE', toolset root: $CLANG37_ROOT" + $CI_SCRIPT_PATH/ci-script/cds-$BUILD_TYPE-clang-3.7-64 $* + EXIT_CODE=$? + ;; + "x64-clang-3.8") + echo "clang-3.8 '$BUILD_TYPE', toolset root: $CLANG38_ROOT" + $CI_SCRIPT_PATH/ci-script/cds-$BUILD_TYPE-clang-3.8-64 $* + EXIT_CODE=$? + ;; + "x64-clang-3.9") + echo "clang-3.9 '$BUILD_TYPE', toolset root: $CLANG39_ROOT" + $CI_SCRIPT_PATH/ci-script/cds-$BUILD_TYPE-clang-3.9-64 $* + EXIT_CODE=$? + ;; + "x64-clang-4") + echo "clang-4 '$BUILD_TYPE', toolset root: $CLANG4_ROOT" + $CI_SCRIPT_PATH/ci-script/cds-$BUILD_TYPE-clang-4-64 $* + EXIT_CODE=$? + ;; + "x64-clang-5") + echo "clang-5 '$BUILD_TYPE', toolset root: $CLANG5_ROOT" + $CI_SCRIPT_PATH/ci-script/cds-$BUILD_TYPE-clang-5-64 $* + EXIT_CODE=$? + ;; + * ) + echo "Undefined toolset '$TOOLSET'" + exit 1 + ;; +esac + +exit $EXIT_CODE diff --git a/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/build/CI/VASEx-CI-2/ci-env b/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/build/CI/VASEx-CI-2/ci-env new file mode 100644 index 0000000..ef935e9 --- /dev/null +++ b/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/build/CI/VASEx-CI-2/ci-env @@ -0,0 +1,31 @@ +#CMAKE_2_8_12=/home/libcds-ci/bin/cmake/cmake-2.8.12/bin +#CMAKE_3_6_2=/home/libcds-ci/bin/cmake/cmake-3.6.2/bin +#CMAKE3=$CMAKE_3_6_2 + +#PATH=$CMAKE_2_8_12:$PATH:$HOME/.local/bin:$HOME/bin + +TOOLSET_ROOT=$HOME/bin + +GCC48_ROOT=$TOOLSET_ROOT/gcc-4.8 +GCC49_ROOT=$TOOLSET_ROOT/gcc-4.9 +GCC5_ROOT=$TOOLSET_ROOT/gcc-5 +GCC6_ROOT=$TOOLSET_ROOT/gcc-6 +GCC7_ROOT=$TOOLSET_ROOT/gcc-7 + +CLANG36_ROOT=$TOOLSET_ROOT/clang-3.6 +CLANG37_ROOT=$TOOLSET_ROOT/clang-3.7 +CLANG38_ROOT=$TOOLSET_ROOT/clang-3.8 +CLANG39_ROOT=$TOOLSET_ROOT/clang-3.9 +CLANG4_ROOT=$TOOLSET_ROOT/clang-4 +CLANG5_ROOT=$TOOLSET_ROOT/clang-5 + +CLANG_STDLIB="-stdlib=libc++" +CLANG37_CXXFLAGS=$CLANG_STDLIB +CLANG38_CXXFLAGS=$CLANG_STDLIB +CLANG39_CXXFLAGS=$CLANG_STDLIB +CLANG4_CXXFLAGS=$CLANG_STDLIB +CLANG5_CXXFLAGS=$CLANG_STDLIB + +LIB_ROOT=$HOME/lib +BOOST_ROOT=$LIB_ROOT/boost +GTEST_ROOT=$LIB_ROOT/gtest diff --git a/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/build/CI/VASEx-CI/cds-libs b/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/build/CI/VASEx-CI/cds-libs new file mode 100644 index 0000000..34d4ce1 --- /dev/null +++ b/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/build/CI/VASEx-CI/cds-libs @@ -0,0 +1,162 @@ +######################################### +# Generic parameters + +workspace: $WORKSPACE +libcds-source: source +make-job: 10 +gtest-include: $GTEST_ROOT/googletest/include + +######################################### +#GCC-4.8 +gcc-4.8-root: $GCC48_ROOT/bin +gcc-4.8-cxx: g++-4.8 +gcc-4.8-cc: gcc-4.8 +gcc-4.8-exe-ldflags: -L$GCC48_ROOT/lib64 -Wl,-rpath=$GCC48_ROOT/lib64 +gcc-4.8-boost: $BOOST_ROOT +gcc-4.8-64-boost-lib: stage64-gcc4.8/lib +gcc-4.8-gtest: $GTEST_ROOT +gcc-4.8-64-gtest-lib: $GTEST_ROOT/lib-gcc4.8/libgtest.a + + +######################################## +#GCC-4.9 +gcc-4.9-root: $GCC49_ROOT/bin +gcc-4.9-cxx: g++-4.9 +gcc-4.9-cc: gcc-4.9 +gcc-4.9-exe-ldflags: -Wl,-rpath=$GCC49_ROOT/lib64 +gcc-4.9-boost: $BOOST_ROOT +gcc-4.9-64-boost-lib: stage64-gcc4.9/lib +gcc-4.9-gtest: $GTEST_ROOT +gcc-4.9-64-gtest-lib: $GTEST_ROOT/lib-gcc4.9/libgtest.a + +######################################## +#GCC-5 +gcc-5-root: $GCC5_ROOT/bin +gcc-5-cxx: g++-5 +gcc-5-cc: gcc-5 +gcc-5-boost: $BOOST_ROOT +gcc-5-exe-ldflags: -Wl,-rpath=$GCC5_ROOT/lib64 +gcc-5-64-boost-lib: stage64-gcc5/lib +gcc-5-64-asan-boost-lib: stage64-gcc5-asan/lib +gcc-5-64-tsan-boost-lib: stage64-gcc5-tsan/lib +gcc-5-gtest: $GTEST_ROOT +gcc-5-64-gtest-lib: $GTEST_ROOT/lib-gcc5/libgtest.a + +######################################## +#GCC-6 +gcc-6-root: $GCC6_ROOT/bin +gcc-6-cxx: g++-6 +gcc-6-cc: gcc-6 +gcc-6-boost: $BOOST_ROOT +gcc-6-cxxflags: -march=native -std=c++14 +gcc-6-exe-ldflags: -Wl,-rpath=$GCC6_ROOT/lib64 +gcc-6-64-boost-lib: stage64-gcc6/lib +gcc-6-64-asan-boost-lib: stage64-gcc6-asan/lib +gcc-6-64-tsan-boost-lib: stage64-gcc6-tsan/lib +gcc-6-gtest: $GTEST_ROOT +gcc-6-64-gtest-lib: $GTEST_ROOT/lib-gcc6/libgtest.a + + +######################################## +#GCC-7 +gcc-7-root: $GCC7_ROOT/bin +gcc-7-cxx: g++-7 +gcc-7-cc: gcc-7 +gcc-7-boost: $BOOST_ROOT +gcc-7-cxxflags: -march=native -std=c++1z +gcc-7-exe-ldflags: -Wl,-rpath=$GCC7_ROOT/lib64 +gcc-7-64-boost-lib: stage64-gcc7/lib +gcc-7-64-asan-boost-lib: stage64-gcc7-asan/lib +gcc-7-64-tsan-boost-lib: stage64-gcc7-tsan/lib +gcc-7-gtest: $GTEST_ROOT +gcc-7-64-gtest-lib: $GTEST_ROOT/lib-gcc7/libgtest.a + + +######################################## +# clang-3.6 +clang-3.6-root: $CLANG36_ROOT/bin +clang-3.6-ld-lib-path: $GCC6_ROOT/lib64 +clang-3.6-cxx: clang++ +clang-3.6-cc: clang +clang-3.6-cxxflags: -Wdocumentation +clang-3.6-exe-ldflags: -L$GCC5_ROOT/lib64 -latomic -Wl,-rpath=$GCC5_ROOT/lib64 +clang-3.6-boost: $BOOST_ROOT +clang-3.6-64-boost-lib: stage64-clang3.6/lib +clang-3.6-gtest: $GTEST_ROOT +clang-3.6-64-gtest-lib: $GTEST_ROOT/lib-clang3.6/libgtest.a + +######################################## +# clang-3.7 +clang-3.7-root: $CLANG37_ROOT/bin +clang-3.7-cxx: clang++ +clang-3.7-cc: clang +clang-3.7-cxxflags: -stdlib=libc++ -Wdocumentation +clang-3.7-exe-ldflags: -L$CLANG37_ROOT/lib -Wl,-rpath=$CLANG37_ROOT/lib -lc++abi +clang-3.7-boost: $BOOST_ROOT +clang-3.7-64-boost-lib: stage64-clang3.7/lib +clang-3.7-gtest: $GTEST_ROOT +clang-3.7-64-gtest-lib: $GTEST_ROOT/lib-clang3.7/libgtest.a +clang-3.7-cmake-flags: -DCMAKE_C_COMPILER_WORKS=1 -DCMAKE_CXX_COMPILER_WORKS=1 + +######################################## +# clang-3.8 +clang-3.8-root: $CLANG38_ROOT/bin +clang-3.8-cxx: clang++ +clang-3.8-cc: clang +clang-3.8-cxxflags: -stdlib=libc++ -Wdocumentation +clang-3.8-exe-ldflags: -L$CLANG38_ROOT/lib -Wl,-rpath=$CLANG38_ROOT/lib +clang-3.8-boost: $BOOST_ROOT +clang-3.8-64-boost-lib: stage64-clang3.8/lib +clang-3.8-gtest: $GTEST_ROOT +clang-3.8-64-gtest-lib: $GTEST_ROOT/lib-clang3.8/libgtest.a + + +######################################## +# clang-3.9 +clang-3.9-root: $CLANG39_ROOT/bin +clang-3.9-cxx: clang++ +clang-3.9-cc: clang +clang-3.9-cxxflags: -stdlib=libc++ -Wdocumentation +clang-3.9-exe-ldflags: -L$CLANG39_ROOT/lib -Wl,-rpath=$CLANG39_ROOT/lib +clang-3.9-boost: $BOOST_ROOT +clang-3.9-64-boost-lib: stage64-clang3.9/lib +clang-3.9-64-asan-boost-lib: stage64-clang3.9-asan/lib +clang-3.9-64-tsan-boost-lib: stage64-clang3.9-tsan/lib +clang-3.9-gtest: $GTEST_ROOT +clang-3.9-64-gtest-lib: $GTEST_ROOT/lib-clang3.9/libgtest.a + + +######################################## +# clang-4 +clang-4-root: $CLANG4_ROOT/bin +clang-4-cxx: clang++ +clang-4-cc: clang +clang-4-cxxflags: -stdlib=libc++ -Wdocumentation -std=c++14 +clang-4-exe-ldflags: -L$CLANG4_ROOT/lib -Wl,-rpath=$CLANG4_ROOT/lib +clang-4-boost: $BOOST_ROOT +clang-4-64-boost-lib: stage64-clang4/lib +clang-4-64-asan-boost-lib: stage64-clang4-asan/lib +clang-4-64-tsan-boost-lib: stage64-clang4-tsan/lib +clang-4-gtest: $GTEST_ROOT +clang-4-64-gtest-lib: $GTEST_ROOT/lib-clang4/libgtest.a + +######################################## +# clang-5 +clang-5-root: $CLANG5_ROOT/bin +clang-5-cxx: clang++ +clang-5-cc: clang +clang-5-cxxflags: -stdlib=libc++ -Wdocumentation -std=c++1z +clang-5-exe-ldflags: -L$CLANG5_ROOT/lib -Wl,-rpath=$CLANG5_ROOT/lib +clang-5-boost: $LIB_ROOT/boost_1_65_1 +clang-5-64-boost-lib: stage64-clang5-std17/lib +clang-5-64-asan-boost-lib: stage64-clang5-asan/lib +clang-5-64-tsan-boost-lib: stage64-clang5-tsan/lib +clang-5-gtest: $GTEST_ROOT +clang-5-64-gtest-lib: $GTEST_ROOT/lib-clang5/libgtest.a + + + + + + + diff --git a/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/build/CI/VASEx-CI/ci-build b/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/build/CI/VASEx-CI/ci-build new file mode 100755 index 0000000..02e8cce --- /dev/null +++ b/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/build/CI/VASEx-CI/ci-build @@ -0,0 +1,71 @@ +#! /bin/bash + +# Useful envvars: +# CI_SCRIPT_PATH - path where to find scripts +# TOOLSET - toolset: x64-gcc-5, x64-clang-3.9 and so on +# BUILD_TYPE - build type: 'dbg', 'rel', 'asan', 'tsan' +# WORKSPACE - path where to build + +env|sort + +case "$TOOLSET" in + "x64-gcc-4.8") + echo "GCC-4.8 '$BUILD_TYPE', toolset root: $GCC48_ROOT" + $CI_SCRIPT_PATH/ci-script/cds-$BUILD_TYPE-gcc-4.8-64 $* + exit $? + ;; + "x64-gcc-4.9") + echo "GCC-4.9 '$BUILD_TYPE', toolset root: $GCC49_ROOT" + $CI_SCRIPT_PATH/ci-script/cds-$BUILD_TYPE-gcc-4.9-64 $* + exit $? + ;; + "x64-gcc-5") + echo "GCC-5 '$BUILD_TYPE', toolset root: $GCC5_ROOT" + $CI_SCRIPT_PATH/ci-script/cds-$BUILD_TYPE-gcc-5-64 $* + exit $? + ;; + "x64-gcc-6") + echo "GCC-6 '$BUILD_TYPE', toolset root: $GCC6_ROOT" + $CI_SCRIPT_PATH/ci-script/cds-$BUILD_TYPE-gcc-6-64 $* + exit $? + ;; + "x64-gcc-7") + echo "GCC-7 '$BUILD_TYPE', toolset root: $GCC7_ROOT" + $CI_SCRIPT_PATH/ci-script/cds-$BUILD_TYPE-gcc-7-64 $* + exit $? + ;; + "x64-clang-3.6") + echo "clang-3.6 '$BUILD_TYPE', toolset root: $CLANG36_ROOT" + $CI_SCRIPT_PATH/ci-script/cds-$BUILD_TYPE-clang-3.6-64 $* + exit $? + ;; + "x64-clang-3.7") + echo "clang-3.7 '$BUILD_TYPE', toolset root: $CLANG37_ROOT" + $CI_SCRIPT_PATH/ci-script/cds-$BUILD_TYPE-clang-3.7-64 $* + exit $? + ;; + "x64-clang-3.8") + echo "clang-3.8 '$BUILD_TYPE', toolset root: $CLANG38_ROOT" + $CI_SCRIPT_PATH/ci-script/cds-$BUILD_TYPE-clang-3.8-64 $* + exit $? + ;; + "x64-clang-3.9") + echo "clang-3.9 '$BUILD_TYPE', toolset root: $CLANG39_ROOT" + $CI_SCRIPT_PATH/ci-script/cds-$BUILD_TYPE-clang-3.9-64 $* + exit $? + ;; + "x64-clang-4") + echo "clang-4 '$BUILD_TYPE', toolset root: $CLANG4_ROOT" + $CI_SCRIPT_PATH/ci-script/cds-$BUILD_TYPE-clang-4-64 $* + exit $? + ;; + "x64-clang-5") + echo "clang-5 '$BUILD_TYPE', toolset root: $CLANG5_ROOT" + $CI_SCRIPT_PATH/ci-script/cds-$BUILD_TYPE-clang-5-64 $* + exit $? + ;; + * ) + echo "Undefined toolset '$TOOLSET'" + exit 1 + ;; +esac diff --git a/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/build/CI/VASEx-CI/ci-env b/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/build/CI/VASEx-CI/ci-env new file mode 100644 index 0000000..1b06cea --- /dev/null +++ b/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/build/CI/VASEx-CI/ci-env @@ -0,0 +1,32 @@ +CMAKE_2_8_12=/home/libcds-ci/bin/cmake/cmake-2.8.12/bin +CMAKE_3_6_2=/home/libcds-ci/bin/cmake/cmake-3.6.2/bin +CMAKE3=$CMAKE_3_6_2 + +PATH=$CMAKE_2_8_12:$PATH:$HOME/.local/bin:$HOME/bin + +TOOLSET_ROOT=$HOME/bin + +GCC48_ROOT=$TOOLSET_ROOT/gcc-4.8 +GCC49_ROOT=$TOOLSET_ROOT/gcc-4.9 +GCC5_ROOT=$TOOLSET_ROOT/gcc-5 +GCC6_ROOT=$TOOLSET_ROOT/gcc-6 +GCC7_ROOT=$TOOLSET_ROOT/gcc-7 + +CLANG35_ROOT=$TOOLSET_ROOT/clang-3.5 +CLANG36_ROOT=$TOOLSET_ROOT/clang-3.6 +CLANG37_ROOT=$TOOLSET_ROOT/clang-3.7 +CLANG38_ROOT=$TOOLSET_ROOT/clang-3.8 +CLANG39_ROOT=$TOOLSET_ROOT/clang-3.9 +CLANG4_ROOT=$TOOLSET_ROOT/clang-4 +CLANG5_ROOT=$TOOLSET_ROOT/clang-5 + +CLANG_STDLIB="-stdlib=libc++" +CLANG37_CXXFLAGS=$CLANG_STDLIB +CLANG38_CXXFLAGS=$CLANG_STDLIB +CLANG39_CXXFLAGS=$CLANG_STDLIB +CLANG4_CXXFLAGS=$CLANG_STDLIB +CLANG5_CXXFLAGS=$CLANG_STDLIB + +LIB_ROOT=$HOME/lib +BOOST_ROOT=$LIB_ROOT/boost +GTEST_ROOT=$LIB_ROOT/gtest diff --git a/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/build/CI/cmake-gen b/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/build/CI/cmake-gen new file mode 100755 index 0000000..d7a8041 --- /dev/null +++ b/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/build/CI/cmake-gen @@ -0,0 +1,111 @@ +#! /usr/bin/perl + +my $compiler=shift; +my $bitness =shift; +my $build =shift; +$build="rel" unless $build; + +my $cmake_build="RELEASE"; +$cmake_build="DEBUG" if $build eq 'dbg'; + +my $cds_libs="cds-libs"; + +# get generic props +my $workspace=get_gen_prop("workspace") || "$HOME"; +my $cds_source=get_gen_prop("libcds-source") || "../libcds"; +my $make_jobs=get_gen_prop("make-job") || 2; + +# get compiler-specific props +my $comp_root=get_prop("root"); +my $boost=get_prop( "boost" ); +my $boost_libs=get_prop( "boost-lib" ); +my $gtest=get_prop("gtest"); +my $gtest_lib=get_prop( "gtest-lib"); +my $gtest_inc=get_prop("gtest-include") || get_gen_prop("gtest-include"); +my $cxx=get_prop("cxx") or $compiler; +my $cc=get_prop("cc") or $compiler; +my $cxxflags=get_prop("cxxflags"); +my $ldflags=get_prop("ldflags"); +my $cmake_exe_ldflags=get_prop("exe-ldflags"); +my $ext_lib=get_prop("extlib"); +my $ld_lib_path=get_prop("ld-lib-path"); +my $cmake_flags=get_prop("cmake-flags"); + +my $filename="cds-$build-$compiler-$bitness"; +open( my $out, ">", $filename ) or die "Cannot open cds-$build-$compiler-$bitness"; + +print $out "#! /bin/sh\n\n"; +print $out "root=$workspace\n"; +print $out "CDS_SOURCE=\$root/$cds_source\n"; +print $out "OBJ_ROOT=\$root/obj\n"; +print $out "BIN_ROOT=\$root/bin\n"; +print $out "GTEST_ROOT=$gtest\n" if $gtest; +print $out "\n"; +print $out "rm -fr \$OBJ_ROOT\n"; +print $out "rm -fr \$BIN_ROOT\n"; +print $out "mkdir -p \$OBJ_ROOT\n"; +print $out "#cp -f run-ctest-rel \$OBJ_ROOT/run-ctest\n" if $build eq 'rel'; +print $out "#cp -f run-ctest-dbg \$OBJ_ROOT/run-ctest\n" unless $build eq 'rel'; +print $out "cd \$OBJ_ROOT\n"; +print $out "\n"; +print $out "LD_LIBRARY_PATH=$ld_lib_path:\$LD_LIBRARY_PATH \\\n" if $ld_lib_path; +print $out "LDFLAGS=\"$ldflags\" \\\n" if $ldflags; +print $out "cmake -G \"Unix Makefiles\" \\\n"; +print $out " -DCMAKE_BUILD_TYPE=$cmake_build \\\n"; +print $out " -DCMAKE_C_COMPILER=$comp_root/$cc \\\n"; +print $out " -DCMAKE_CXX_COMPILER=$comp_root/$cxx \\\n"; +print $out " -DCMAKE_CXX_FLAGS=\"$cxxflags\" \\\n" if $cxxflags; +print $out " -DCMAKE_EXE_LINKER_FLAGS=\"$cmake_exe_ldflags\" \\\n" if $cmake_exe_ldflags; +print $out " -DCDS_BIN_DIR=\$BIN_ROOT \\\n"; +print $out " -DWITH_TESTS=ON \\\n"; +print $out " -DWITH_ASAN=ON \\\n" if $build eq 'asan'; +print $out " -DWITH_TSAN=ON \\\n" if $build eq 'tsan'; +print $out " -DBOOST_ROOT=$boost \\\n"; +print $out " -DBOOST_LIBRARYDIR=$boost/$boost_libs \\\n" if $boost_libs; +print $out " -DGTEST_INCLUDE_DIRS=$gtest_inc \\\n" if $gtest_inc; +print $out " -DGTEST_LIBRARIES=$gtest_lib \\\n" if $gtest_lib; +print $out " -DEXTERNAL_SYSTEM_LIBS=\"$ext_lib\" \\\n" if $ext_lib; +print $out " $cmake_flags \\\n" if $cmake_flags; +print $out " \$CDS_SOURCE && \\\n"; +print $out "make -j $make_jobs \$* \n"; + +close $out; +chmod 0755, $filename; + +sub get_prop($@) +{ + my $what=shift; + my $key="$compiler-$bitness-$build-$what:"; + + my $grep = `grep -P $key $cds_libs`; + if ( $grep ) { + my @ret = $grep =~ /^$key\s+(\S.*\S*)\s+/; + return $ret[0] if @ret; + } + + $key = "$compiler-$bitness-$what:"; + my $grep = `grep -P $key $cds_libs`; + if ( $grep ) { + my @ret = $grep =~ /^$key\s+(\S.*\S*)\s+/; + return $ret[0] if @ret; + } + + $key = "$compiler-$what:"; + my $grep = `grep -P $key $cds_libs`; + if ( $grep ) { + my @ret = $grep =~ /^$key\s+(\S.*\S*)\s+/; + return $ret[0] if @ret; + } +} + +sub get_gen_prop($@) +{ + my $key=shift; + $key = "$key:"; + + my $grep = `grep -P $key $cds_libs`; + if ( $grep ) { + my @ret = $grep =~ /^$key\s+(\S.*\S*)\s+/; + return $ret[0] if @ret; + } +} \ No newline at end of file diff --git a/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/build/CI/gen-all b/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/build/CI/gen-all new file mode 100755 index 0000000..ebd9de3 --- /dev/null +++ b/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/build/CI/gen-all @@ -0,0 +1,36 @@ +#! /bin/sh + +./cmake-gen gcc-4.8 64 dbg +./cmake-gen gcc-4.8 64 rel +./cmake-gen gcc-4.9 64 dbg +./cmake-gen gcc-4.9 64 rel +./cmake-gen gcc-5 64 dbg +./cmake-gen gcc-5 64 rel +./cmake-gen gcc-5 64 tsan +./cmake-gen gcc-5 64 asan +./cmake-gen gcc-6 64 dbg +./cmake-gen gcc-6 64 rel +./cmake-gen gcc-6 64 tsan +./cmake-gen gcc-6 64 asan +./cmake-gen gcc-7 64 dbg +./cmake-gen gcc-7 64 rel +./cmake-gen gcc-7 64 tsan +./cmake-gen gcc-7 64 asan +./cmake-gen clang-3.6 64 dbg +./cmake-gen clang-3.6 64 rel +./cmake-gen clang-3.7 64 dbg +./cmake-gen clang-3.7 64 rel +./cmake-gen clang-3.8 64 dbg +./cmake-gen clang-3.8 64 rel +./cmake-gen clang-3.9 64 dbg +./cmake-gen clang-3.9 64 rel +./cmake-gen clang-3.9 64 asan +./cmake-gen clang-3.9 64 tsan +./cmake-gen clang-4 64 dbg +./cmake-gen clang-4 64 rel +./cmake-gen clang-4 64 asan +./cmake-gen clang-4 64 tsan +./cmake-gen clang-5 64 dbg +./cmake-gen clang-5 64 rel +./cmake-gen clang-5 64 asan +./cmake-gen clang-5 64 tsan \ No newline at end of file diff --git a/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/build/CI/travis-ci/install.sh b/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/build/CI/travis-ci/install.sh new file mode 100755 index 0000000..4b6b664 --- /dev/null +++ b/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/build/CI/travis-ci/install.sh @@ -0,0 +1,31 @@ +#!/bin/bash + +set -e +set -x + +if [[ "$(uname -s)" == 'Darwin' ]]; then + brew update || brew update + brew outdated pyenv || brew upgrade pyenv + brew install pyenv-virtualenv + brew install cmake || true + + if which pyenv > /dev/null; then + eval "$(pyenv init -)" + fi + + pyenv install 2.7.10 + pyenv virtualenv 2.7.10 conan + pyenv rehash + pyenv activate conan + + pip install conan --upgrade + pip install conan_package_tools + + conan user + exit 0 +fi + +pip install --user conan --upgrade +pip install --user conan_package_tools + +conan user diff --git a/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/build/CI/travis-ci/run.sh b/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/build/CI/travis-ci/run.sh new file mode 100755 index 0000000..2bff8c5 --- /dev/null +++ b/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/build/CI/travis-ci/run.sh @@ -0,0 +1,23 @@ +#!/bin/bash + +set -e +set -x + +if [[ "$(uname -s)" == 'Darwin' ]]; then + if which pyenv > /dev/null; then + eval "$(pyenv init -)" + fi + pyenv activate conan +fi + +#export CXX=$CXX_COMPILER +#export CC=$C_COMPILER +mkdir build-test && cd build-test +conan install --build=missing -s build_type=$BUILD_TYPE .. +cmake -DCMAKE_PREFIX_PATH="$TRAVIS_BUILD_DIR/build-test/deps" -DCMAKE_BUILD_TYPE=$BUILD_TYPE -DCMAKE_EXE_LINKER_FLAGS=$LINKER_FLAGS -DWITH_TESTS=ON .. +cmake --build . -- -j2 $TARGET + +if [[ "$(uname -s)" == 'Darwin' ]]; then + export DYLD_LIBRARY_PATH=$TRAVIS_BUILD_DIR/build-test/deps/lib +fi +ctest -VV -R $TARGET diff --git a/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/build/cmake/TargetArch.cmake b/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/build/cmake/TargetArch.cmake new file mode 100644 index 0000000..026eace --- /dev/null +++ b/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/build/cmake/TargetArch.cmake @@ -0,0 +1,141 @@ +# Source: https://github.com/axr/solar-cmake +# Based on the Qt 5 processor detection code, so should be very accurate +# https://qt.gitorious.org/qt/qtbase/blobs/master/src/corelib/global/qprocessordetection.h +# Currently handles arm (v5, v6, v7), x86 (32/64), ia64, and ppc (32/64) + +# Regarding POWER/PowerPC, just as is noted in the Qt source, +# "There are many more known variants/revisions that we do not handle/detect." + +set(archdetect_c_code " +#if defined(__arm__) || defined(__TARGET_ARCH_ARM) + #if defined(__ARM_ARCH_7__) \\ + || defined(__ARM_ARCH_7A__) \\ + || defined(__ARM_ARCH_7R__) \\ + || defined(__ARM_ARCH_7M__) \\ + || (defined(__TARGET_ARCH_ARM) && __TARGET_ARCH_ARM-0 >= 7) + #error cmake_ARCH armv7 + #elif defined(__ARM_ARCH_6__) \\ + || defined(__ARM_ARCH_6J__) \\ + || defined(__ARM_ARCH_6T2__) \\ + || defined(__ARM_ARCH_6Z__) \\ + || defined(__ARM_ARCH_6K__) \\ + || defined(__ARM_ARCH_6ZK__) \\ + || defined(__ARM_ARCH_6M__) \\ + || (defined(__TARGET_ARCH_ARM) && __TARGET_ARCH_ARM-0 >= 6) + #error cmake_ARCH armv6 + #elif defined(__ARM_ARCH_5TEJ__) \\ + || (defined(__TARGET_ARCH_ARM) && __TARGET_ARCH_ARM-0 >= 5) + #error cmake_ARCH armv5 + #else + #error cmake_ARCH arm + #endif +#elif defined(__aarch64__) + #if defined(__ARM_ARCH) && __ARM_ARCH == 8 + #error cmake_ARCH armv8 + #else + #error cmake_ARCH arm64 + #endif +#elif defined(__i386) || defined(__i386__) || defined(_M_IX86) + #error cmake_ARCH i386 +#elif defined(__x86_64) || defined(__x86_64__) || defined(__amd64) || defined(_M_X64) + #error cmake_ARCH x86_64 +#elif defined(__ia64) || defined(__ia64__) || defined(_M_IA64) + #error cmake_ARCH ia64 +#elif defined(__ppc__) || defined(__ppc) || defined(__powerpc__) \\ + || defined(_ARCH_COM) || defined(_ARCH_PWR) || defined(_ARCH_PPC) \\ + || defined(_M_MPPC) || defined(_M_PPC) + #if defined(__ppc64__) || defined(__powerpc64__) || defined(__64BIT__) + #error cmake_ARCH ppc64 + #else + #error cmake_ARCH ppc + #endif +#endif + +#error cmake_ARCH unknown +") + +# Set ppc_support to TRUE before including this file or ppc and ppc64 +# will be treated as invalid architectures since they are no longer supported by Apple + +function(target_architecture output_var) + if(APPLE AND CMAKE_OSX_ARCHITECTURES) + # On OS X we use CMAKE_OSX_ARCHITECTURES *if* it was set + # First let's normalize the order of the values + + # Note that it's not possible to compile PowerPC applications if you are using + # the OS X SDK version 10.6 or later - you'll need 10.4/10.5 for that, so we + # disable it by default + # See this page for more information: + # http://stackoverflow.com/questions/5333490/how-can-we-restore-ppc-ppc64-as-well-as-full-10-4-10-5-sdk-support-to-xcode-4 + + # Architecture defaults to i386 or ppc on OS X 10.5 and earlier, depending on the CPU type detected at runtime. + # On OS X 10.6+ the default is x86_64 if the CPU supports it, i386 otherwise. + + foreach(osx_arch ${CMAKE_OSX_ARCHITECTURES}) + if("${osx_arch}" STREQUAL "ppc" AND ppc_support) + set(osx_arch_ppc TRUE) + elseif("${osx_arch}" STREQUAL "i386") + set(osx_arch_i386 TRUE) + elseif("${osx_arch}" STREQUAL "x86_64") + set(osx_arch_x86_64 TRUE) + elseif("${osx_arch}" STREQUAL "ppc64" AND ppc_support) + set(osx_arch_ppc64 TRUE) + else() + message(FATAL_ERROR "Invalid OS X arch name: ${osx_arch}") + endif() + endforeach() + + # Now add all the architectures in our normalized order + if(osx_arch_ppc) + list(APPEND ARCH ppc) + endif() + + if(osx_arch_i386) + list(APPEND ARCH i386) + endif() + + if(osx_arch_x86_64) + list(APPEND ARCH x86_64) + endif() + + if(osx_arch_ppc64) + list(APPEND ARCH ppc64) + endif() + else() + file(WRITE "${CMAKE_BINARY_DIR}/arch.c" "${archdetect_c_code}") + + enable_language(C) + + # Detect the architecture in a rather creative way... + # This compiles a small C program which is a series of ifdefs that selects a + # particular #error preprocessor directive whose message string contains the + # target architecture. The program will always fail to compile (both because + # file is not a valid C program, and obviously because of the presence of the + # #error preprocessor directives... but by exploiting the preprocessor in this + # way, we can detect the correct target architecture even when cross-compiling, + # since the program itself never needs to be run (only the compiler/preprocessor) + try_run( + run_result_unused + compile_result_unused + "${CMAKE_BINARY_DIR}" + "${CMAKE_BINARY_DIR}/arch.c" + COMPILE_OUTPUT_VARIABLE ARCH + CMAKE_FLAGS CMAKE_OSX_ARCHITECTURES=${CMAKE_OSX_ARCHITECTURES} + ) + + # Parse the architecture name from the compiler output + string(REGEX MATCH "cmake_ARCH ([a-zA-Z0-9_]+)" ARCH "${ARCH}") + + # Get rid of the value marker leaving just the architecture name + string(REPLACE "cmake_ARCH " "" ARCH "${ARCH}") + + # If we are compiling with an unknown architecture this variable should + # already be set to "unknown" but in the case that it's empty (i.e. due + # to a typo in the code), then set it to unknown + if (NOT ARCH) + set(ARCH unknown) + endif() + endif() + + set(${output_var} "${ARCH}" PARENT_SCOPE) +endfunction() diff --git a/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/build/cmake/description.txt b/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/build/cmake/description.txt new file mode 100644 index 0000000..f4cdcbb --- /dev/null +++ b/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/build/cmake/description.txt @@ -0,0 +1 @@ +libcds - Concurrent Data Structure C++ library \ No newline at end of file diff --git a/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/build/cmake/post_install_script.sh b/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/build/cmake/post_install_script.sh new file mode 100755 index 0000000..aea3041 --- /dev/null +++ b/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/build/cmake/post_install_script.sh @@ -0,0 +1 @@ +ldconfig \ No newline at end of file diff --git a/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/build/cmake/post_uninstall_script.sh b/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/build/cmake/post_uninstall_script.sh new file mode 100755 index 0000000..aea3041 --- /dev/null +++ b/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/build/cmake/post_uninstall_script.sh @@ -0,0 +1 @@ +ldconfig \ No newline at end of file diff --git a/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/build/cmake/readme.md b/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/build/cmake/readme.md new file mode 100644 index 0000000..6a5aa6a --- /dev/null +++ b/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/build/cmake/readme.md @@ -0,0 +1,104 @@ +Building library with CMake +=============== + +CDS suports both in-source and out-of-source cmake build types. Now project uses: + +- CMake: general cross-platform building +- CTest: all unit tests can be run in a standard way by *ctest* command +- CPack: for making rpm/deb/nsys etc. packages + +Compiling and testing +---------- +**Building out-of-source in "RELEASE" mode ("DEBUG" is default)** + +- Wherever create empty directory for building, for instance `libcds-debug` +- Prepare: `cmake -DCMAKE_BUILD_TYPE=RELEASE ` +- Compile: `make -j4` +- As a result you'll see shared and static cds libraries in the build directory + +**Warning**: We strongly recommend not to use static cds library. Static library is not tested and not maintained. You can use it on own risk. + +After using command cmake -L one can see some additional variables, that can activate additional features: + +- `WITH_TESTS:BOOL=OFF`: if you want to build library with unit testing support use *-DWITH_TESTS=ON* on prepare step. Be careful with this flag, because compile time will dramatically increase +- `WITH_TESTS_COVERAGE:BOOL=OFF`: Analyze test coverage using gcov (only for gcc) +- `WITH_BOOST_ATOMIC:BOOL=OFF`: Use boost atomics (only for boost >= 1.54) +- `WITH_ASAN:BOOL=OFF`: compile libcds with AddressSanitizer instrumentation +- `WITH_TSAN:BOOL=OFF`: compile libcds with ThreadSanitizer instrumentation + +Additional gtest hints (for unit and stress tests only): +- `GTEST_INCLUDE_DIRS=path`: gives full `path` to gtest include dir. +- `GTEST_LIBRARY=path`: gives full `path` to `libgtest.a`. + + +Packaging +---------- + +In order to package library *CPack* is used, command *cpack -G * should create correspondent packages for particular operating system. Now the project supports building the following package types: + +- *RPM*: redhat-based linux distribs +- *DEB*: debian-based linux distribs +- *TGZ*: simple "*tgz*" archive with library and headers +- *NSYS*: windows installer package (NSYS should be installed) + +"Live" building and packaging example +---------- +- `git clone https://github.com/khizmax/libcds.git` +- `mkdir libcds-release` +- `cd libcds-release` +- `cmake -DWITH\_TESTS=ON -DCMAKE\_BUILD_TYPE=RELEASE ../libcds` +``` + -- The C compiler identification is GNU 4.8.3 + -- The CXX compiler identification is GNU 4.8.3 + ... + -- Found Threads: TRUE + -- Boost version: 1.54.0 + -- Found the following Boost libraries: + -- system + -- thread + Build type -- RELEASE + -- Configuring done + -- Generating done + -- Build files have been written to: <...>/libcds-release +``` +- `make -j4` +``` + Scanning dependencies of target cds + Scanning dependencies of target test-common + Scanning dependencies of target cds-s + Scanning dependencies of target test-hdr-offsetof + [ 1%] Building CXX object CMakeFiles/cds-s.dir/src/hp_gc.cpp.o + ... + [100%] Built target test-hdr +``` + +- `ctest` +``` + Test project /home/kel/projects_cds/libcds-debug + Start 1: test-hdr + 1/7 Test #1: test-hdr ......................... Passed 1352.24 sec + Start 2: cdsu-misc + 2/7 Test #2: cdsu-misc ........................ Passed 0.00 sec + Start 3: cdsu-map + ... +``` + +- `cpack -G RPM` +``` + CPack: Create package using RPM + CPack: Install projects + CPack: - Run preinstall target for: cds + CPack: - Install project: cds + CPack: - Install component: devel + CPack: - Install component: lib + CPack: Create package + CPackRPM:Debug: Adding /usr/local to builtin omit list. + CPackRPM: Will use GENERATED spec file: /home/kel/projects_cds/libcds-debug/_CPack_Packages/Linux/RPM/SPECS/cds-devel.spec + CPackRPM: Will use GENERATED spec file: /home/kel/projects_cds/libcds-debug/_CPack_Packages/Linux/RPM/SPECS/cds-lib.spec + CPack: - package: /home/kel/projects_cds/libcds-debug/cds-2.1.0-1-devel.rpm generated. + CPack: - package: /home/kel/projects_cds/libcds-debug/cds-2.1.0-1-lib.rpm generated. +``` + +Future development +---------- +- CDash: use CI system \ No newline at end of file diff --git a/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/algo/atomic.h b/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/algo/atomic.h new file mode 100644 index 0000000..f11a71a --- /dev/null +++ b/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/algo/atomic.h @@ -0,0 +1,521 @@ +/* + This file is a part of libcds - Concurrent Data Structures library + + (C) Copyright Maxim Khizhinsky (libcds.dev@gmail.com) 2006-2017 + + Source code repo: http://github.com/khizmax/libcds/ + Download: http://sourceforge.net/projects/libcds/files/ + + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. + + * Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ + +#ifndef CDSLIB_CXX11_ATOMIC_H +#define CDSLIB_CXX11_ATOMIC_H + +#include +#include + +namespace cds { + +/// C++11 Atomic library support +/** @anchor cds_cxx11_atomic + \p libcds can use the following implementations of the atomics: + - STL \p <atomic>. This is used by default + - \p boost.atomic for boost 1.54 and above. To use it you should define \p CDS_USE_BOOST_ATOMIC for + your compiler invocation, for example, for gcc specify \p -DCDS_USE_BOOST_ATOMIC + in command line + - \p libcds implementation of atomic operation according to C++11 standard as + specified in N3242, p.29. + \p libcds implementation is not the full standard compliant, it provides only C++ part of standard, + for example, \p libcds has no static initialization of the atomic variables and some other C features. + However, that imlementation is enough for the library purposes. Supported architecture: x86, amd64, + ia64 (Itanium) 64bit, 64bit Sparc. To use \p libcds atomic you should define \p CDS_USE_LIBCDS_ATOMIC + in the compiler command line (\p -DCDS_USE_LIBCDS_ATOMIC for gcc/clang). + + @note For Clang compiler \p libcds doesn't use native \p libc++ \p <atomic> due some problems. + Instead, \p libcds atomic is used by default, or you can try to use \p boost.atomic. + + The library defines \p atomics alias for atomic namespace: + - namespace atomics = std for STL + - namespace atomics = boost for \p boost.atomic + - namespace atomics = cds::cxx11_atomic for library-provided atomic implementation +*/ +namespace cxx11_atomic { +}} // namespace cds::cxx11_atomic + +//@cond +#if defined(CDS_USE_BOOST_ATOMIC) + // boost atomic +# include +# if BOOST_VERSION >= 105400 +# include + namespace atomics = boost; +# define CDS_CXX11_ATOMIC_BEGIN_NAMESPACE namespace boost { +# define CDS_CXX11_ATOMIC_END_NAMESPACE } +# else +# error "Boost version 1.54 or above is needed for boost.atomic" +# endif +#elif defined(CDS_USE_LIBCDS_ATOMIC) + // libcds atomic +# include + namespace atomics = cds::cxx11_atomic; +# define CDS_CXX11_ATOMIC_BEGIN_NAMESPACE namespace cds { namespace cxx11_atomic { +# define CDS_CXX11_ATOMIC_END_NAMESPACE }} +#else + // Compiler provided C++11 atomic +# include + namespace atomics = std; +# define CDS_CXX11_ATOMIC_BEGIN_NAMESPACE namespace std { +# define CDS_CXX11_ATOMIC_END_NAMESPACE } +#endif +//@endcond + +namespace cds { + + /// Atomic primitives + /** + This namespace contains useful primitives derived from std::atomic. + */ + namespace atomicity { + + /// Atomic event counter. + /** + This class is based on std::atomic_size_t. + It uses relaxed memory ordering \p memory_order_relaxed and may be used as a statistic counter. + */ + class event_counter + { + //@cond + atomics::atomic_size_t m_counter; + //@endcond + + public: + typedef size_t value_type ; ///< Type of counter + + public: + // Initializes event counter with zero + event_counter() noexcept + : m_counter(size_t(0)) + {} + + /// Assign operator + /** + Returns \p n. + */ + value_type operator =( + value_type n ///< new value of the counter + ) noexcept + { + m_counter.exchange( n, atomics::memory_order_relaxed ); + return n; + } + + /// Addition + /** + Returns new value of the atomic counter. + */ + size_t operator +=( + size_t n ///< addendum + ) noexcept + { + return m_counter.fetch_add( n, atomics::memory_order_relaxed ) + n; + } + + /// Substraction + /** + Returns new value of the atomic counter. + */ + size_t operator -=( + size_t n ///< subtrahend + ) noexcept + { + return m_counter.fetch_sub( n, atomics::memory_order_relaxed ) - n; + } + + /// Get current value of the counter + operator size_t () const noexcept + { + return m_counter.load( atomics::memory_order_relaxed ); + } + + /// Preincrement + size_t operator ++() noexcept + { + return m_counter.fetch_add( 1, atomics::memory_order_relaxed ) + 1; + } + /// Postincrement + size_t operator ++(int) noexcept + { + return m_counter.fetch_add( 1, atomics::memory_order_relaxed ); + } + + /// Predecrement + size_t operator --() noexcept + { + return m_counter.fetch_sub( 1, atomics::memory_order_relaxed ) - 1; + } + /// Postdecrement + size_t operator --(int) noexcept + { + return m_counter.fetch_sub( 1, atomics::memory_order_relaxed ); + } + + /// Get current value of the counter + size_t get() const noexcept + { + return m_counter.load( atomics::memory_order_relaxed ); + } + + /// Resets the counter to 0 + void reset() noexcept + { + m_counter.store( 0, atomics::memory_order_release ); + } + }; + + /// Atomic item counter + /** + This class is simplified interface around \p std::atomic_size_t. + The class supports getting current value of the counter and increment/decrement its value. + + See also: improved version that eliminates false sharing - \p cache_friendly_item_counter. + */ + class item_counter + { + public: + typedef atomics::atomic_size_t atomic_type; ///< atomic type used + typedef size_t counter_type; ///< Integral item counter type (size_t) + + private: + //@cond + atomic_type m_Counter; ///< Atomic item counter + //@endcond + + public: + /// Default ctor initializes the counter to zero. + item_counter() + : m_Counter(counter_type(0)) + {} + + /// Returns current value of the counter + counter_type value(atomics::memory_order order = atomics::memory_order_relaxed) const + { + return m_Counter.load( order ); + } + + /// Same as \ref value() with relaxed memory ordering + operator counter_type() const + { + return value(); + } + + /// Returns underlying atomic interface + atomic_type& getAtomic() + { + return m_Counter; + } + + /// Returns underlying atomic interface (const) + const atomic_type& getAtomic() const + { + return m_Counter; + } + + /// Increments the counter. Semantics: postincrement + counter_type inc(atomics::memory_order order = atomics::memory_order_relaxed ) + { + return m_Counter.fetch_add( 1, order ); + } + + /// Increments the counter. Semantics: postincrement + counter_type inc( counter_type count, atomics::memory_order order = atomics::memory_order_relaxed ) + { + return m_Counter.fetch_add( count, order ); + } + + /// Decrements the counter. Semantics: postdecrement + counter_type dec(atomics::memory_order order = atomics::memory_order_relaxed) + { + return m_Counter.fetch_sub( 1, order ); + } + + /// Decrements the counter. Semantics: postdecrement + counter_type dec( counter_type count, atomics::memory_order order = atomics::memory_order_relaxed ) + { + return m_Counter.fetch_sub( count, order ); + } + + /// Preincrement + counter_type operator ++() + { + return inc() + 1; + } + /// Postincrement + counter_type operator ++(int) + { + return inc(); + } + + /// Predecrement + counter_type operator --() + { + return dec() - 1; + } + /// Postdecrement + counter_type operator --(int) + { + return dec(); + } + + /// Increment by \p count + counter_type operator +=( counter_type count ) + { + return inc( count ) + count; + } + + /// Decrement by \p count + counter_type operator -=( counter_type count ) + { + return dec( count ) - count; + } + + /// Resets count to 0 + void reset(atomics::memory_order order = atomics::memory_order_relaxed) + { + m_Counter.store( 0, order ); + } + }; + +#if CDS_COMPILER == CDS_COMPILER_CLANG + // CLang unhappy: pad1_ and pad2_ - unused private field warning +# pragma GCC diagnostic push +# pragma GCC diagnostic ignored "-Wunused-private-field" +#endif + /// Atomic cache-friendly item counter + /** + Atomic item counter with cache-line padding to avoid false sharing. + Adding cache-line padding before and after atomic counter eliminates the contention + in read path of many containers and can notably improve search operations in sets/maps. + */ + class cache_friendly_item_counter + { + public: + typedef atomics::atomic_size_t atomic_type; ///< atomic type used + typedef size_t counter_type; ///< Integral item counter type (size_t) + + private: + //@cond + char pad1_[cds::c_nCacheLineSize]; + atomic_type m_Counter; ///< Atomic item counter + char pad2_[cds::c_nCacheLineSize - sizeof( atomic_type )]; + //@endcond + + public: + /// Default ctor initializes the counter to zero. + cache_friendly_item_counter() + : m_Counter(counter_type(0)) + {} + + /// Returns current value of the counter + counter_type value(atomics::memory_order order = atomics::memory_order_relaxed) const + { + return m_Counter.load( order ); + } + + /// Same as \ref value() with relaxed memory ordering + operator counter_type() const + { + return value(); + } + + /// Returns underlying atomic interface + atomic_type& getAtomic() + { + return m_Counter; + } + + /// Returns underlying atomic interface (const) + const atomic_type& getAtomic() const + { + return m_Counter; + } + + /// Increments the counter. Semantics: postincrement + counter_type inc(atomics::memory_order order = atomics::memory_order_relaxed ) + { + return m_Counter.fetch_add( 1, order ); + } + + /// Increments the counter. Semantics: postincrement + counter_type inc( counter_type count, atomics::memory_order order = atomics::memory_order_relaxed ) + { + return m_Counter.fetch_add( count, order ); + } + + /// Decrements the counter. Semantics: postdecrement + counter_type dec(atomics::memory_order order = atomics::memory_order_relaxed) + { + return m_Counter.fetch_sub( 1, order ); + } + + /// Decrements the counter. Semantics: postdecrement + counter_type dec( counter_type count, atomics::memory_order order = atomics::memory_order_relaxed ) + { + return m_Counter.fetch_sub( count, order ); + } + + /// Preincrement + counter_type operator ++() + { + return inc() + 1; + } + /// Postincrement + counter_type operator ++(int) + { + return inc(); + } + + /// Predecrement + counter_type operator --() + { + return dec() - 1; + } + /// Postdecrement + counter_type operator --(int) + { + return dec(); + } + + /// Increment by \p count + counter_type operator +=( counter_type count ) + { + return inc( count ) + count; + } + + /// Decrement by \p count + counter_type operator -=( counter_type count ) + { + return dec( count ) - count; + } + + /// Resets count to 0 + void reset(atomics::memory_order order = atomics::memory_order_relaxed) + { + m_Counter.store( 0, order ); + } + }; +#if CDS_COMPILER == CDS_COMPILER_CLANG +# pragma GCC diagnostic pop +#endif + + /// Empty item counter + /** + This class may be used instead of \ref item_counter when you do not need full \ref item_counter interface. + All methods of the class is empty and returns 0. + + The object of this class should not be used in data structure that behavior significantly depends on item counting + (for example, in many hash map implementation). + */ + class empty_item_counter { + public: + typedef size_t counter_type ; ///< Counter type + public: + /// Returns 0 + static counter_type value(atomics::memory_order /*order*/ = atomics::memory_order_relaxed) + { + return 0; + } + + /// Same as \ref value(), always returns 0. + operator counter_type() const + { + return value(); + } + + /// Dummy increment. Always returns 0 + static counter_type inc(atomics::memory_order /*order*/ = atomics::memory_order_relaxed) + { + return 0; + } + + /// Dummy increment. Always returns 0 + static counter_type inc( counter_type /*count*/, atomics::memory_order /*order*/ = atomics::memory_order_relaxed ) + { + return 0; + } + + /// Dummy increment. Always returns 0 + static counter_type dec(atomics::memory_order /*order*/ = atomics::memory_order_relaxed) + { + return 0; + } + + /// Dummy increment. Always returns 0 + static counter_type dec( counter_type /*count*/, atomics::memory_order /*order*/ = atomics::memory_order_relaxed ) + { + return 0; + } + + /// Dummy pre-increment. Always returns 0 + counter_type operator ++() const + { + return 0; + } + /// Dummy post-increment. Always returns 0 + counter_type operator ++(int) const + { + return 0; + } + + /// Dummy pre-decrement. Always returns 0 + counter_type operator --() const + { + return 0; + } + /// Dummy post-decrement. Always returns 0 + counter_type operator --(int) const + { + return 0; + } + + /// Dummy increment by \p count, always returns 0 + counter_type operator +=( counter_type count ) + { + CDS_UNUSED( count ); + return 0; + } + + /// Dummy decrement by \p count, always returns 0 + counter_type operator -=( counter_type count ) + { + CDS_UNUSED( count ); + return 0; + } + + /// Dummy function + static void reset(atomics::memory_order /*order*/ = atomics::memory_order_relaxed) + {} + }; + } // namespace atomicity +} // namespace cds + +#endif // #ifndef CDSLIB_CXX11_ATOMIC_H diff --git a/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/algo/backoff_strategy.h b/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/algo/backoff_strategy.h new file mode 100644 index 0000000..b241bf5 --- /dev/null +++ b/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/algo/backoff_strategy.h @@ -0,0 +1,464 @@ +/* + This file is a part of libcds - Concurrent Data Structures library + + (C) Copyright Maxim Khizhinsky (libcds.dev@gmail.com) 2006-2017 + + Source code repo: http://github.com/khizmax/libcds/ + Download: http://sourceforge.net/projects/libcds/files/ + + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. + + * Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ + +#ifndef CDSLIB_BACKOFF_STRATEGY_H +#define CDSLIB_BACKOFF_STRATEGY_H + +/* + Filename: backoff_strategy.h + Created 2007.03.01 by Maxim Khiszinsky + + Description: + Generic back-off strategies + + Editions: + 2007.03.01 Maxim Khiszinsky Created + 2008.10.02 Maxim Khiszinsky Backoff action transfers from contructor to operator() for all backoff schemas + 2009.09.10 Maxim Khiszinsky reset() function added +*/ + +#include // declval +#include +#include +#include + +namespace cds { + /// Different backoff schemes + /** + Back-off schema may be used in lock-free algorithms when the algorithm cannot perform some action because a conflict + with the other concurrent operation is encountered. In this case current thread can do another work or can call + processor's performance hint. + + The interface of back-off strategy is following: + \code + struct backoff_strategy { + void operator()(); + template bool operator()( Predicate pr ); + void reset(); + }; + \endcode + + \p operator() operator calls back-off strategy's action. It is main part of back-off strategy. + + Interruptible back-off template < typename Predicate > bool operator()( Predicate pr ) + allows to interrupt back-off spinning if \p pr predicate returns \p true. + \p Predicate is a functor with the following interface: + \code + struct predicate { + bool operator()(); + }; + \endcode + + \p reset() function resets internal state of back-off strategy to initial state. It is required for some + back-off strategies, for example, exponential back-off. + */ + namespace backoff { + + /// Empty backoff strategy. Do nothing + struct empty { + //@cond + void operator ()() const noexcept + {} + + template + bool operator()(Predicate pr) const noexcept( noexcept(std::declval()())) + { + return pr(); + } + + static void reset() noexcept + {} + //@endcond + }; + + /// Switch to another thread (yield). Good for thread preemption architecture. + struct yield { + //@cond + void operator ()() const noexcept + { + std::this_thread::yield(); + } + + template + bool operator()(Predicate pr) const noexcept( noexcept(std::declval()())) + { + if ( pr()) + return true; + operator()(); + return false; + } + + static void reset() noexcept + {} + //@endcond + }; + + /// Random pause + /** + This back-off strategy calls processor-specific pause hint instruction + if one is available for the processor architecture. + */ + struct pause { + //@cond + void operator ()() const noexcept + { +# ifdef CDS_backoff_hint_defined + platform::backoff_hint(); +# endif + } + + template + bool operator()(Predicate pr) const noexcept( noexcept(std::declval()())) + { + if ( pr()) + return true; + operator()(); + return false; + } + + static void reset() noexcept + {} + //@endcond + }; + + /// Processor hint back-off + /** + This back-off schema calls performance hint instruction if it is available for current processor. + Otherwise, it calls \p nop. + */ + struct hint + { + //@cond + void operator ()() const noexcept + { +# if defined(CDS_backoff_hint_defined) + platform::backoff_hint(); +# elif defined(CDS_backoff_nop_defined) + platform::backoff_nop(); +# endif + } + + template + bool operator()(Predicate pr) const noexcept(noexcept(std::declval()())) + { + if ( pr()) + return true; + operator()(); + return false; + } + + static void reset() noexcept + {} + //@endcond + }; + + /// \p backoff::exponential const traits + struct exponential_const_traits + { + typedef hint fast_path_backoff; ///< Fast-path back-off strategy + typedef yield slow_path_backoff; ///< Slow-path back-off strategy + + enum: size_t { + lower_bound = 16, ///< Minimum spinning limit + upper_bound = 16 * 1024 ///< Maximum spinning limit + }; + }; + + /// \p nackoff::exponential runtime traits + struct exponential_runtime_traits + { + typedef hint fast_path_backoff; ///< Fast-path back-off strategy + typedef yield slow_path_backoff; ///< Slow-path back-off strategy + + static size_t lower_bound; ///< Minimum spinning limit, default is 16 + static size_t upper_bound; ///< Maximum spinning limit, default is 16*1024 + }; + + /// Exponential back-off + /** + This back-off strategy is composite. It consists of \p SpinBkoff and \p YieldBkoff + back-off strategy. In first, the strategy tries to apply repeatedly \p SpinBkoff + (spinning phase) until internal counter of failed attempts reaches its maximum + spinning value. Then, the strategy transits to high-contention phase + where it applies \p YieldBkoff until \p reset() is called. + On each spinning iteration the internal spinning counter is doubled. + + Selecting the best value for maximum spinning limit is platform and application specific task. + The limits are described by \p Traits template parameter. + There are two types of \p Traits: + - constant traits \p exponential_const_traits - specifies the lower and upper limits + as a compile-time constants; to change the limits you should recompile your application + - runtime traits \p exponential_runtime_traits - specifies the limits as \p s_nExpMin + and \p s_nExpMax variables which can be changed at runtime to tune back-off strategy. + + The traits class must declare two data member: + - \p lower_bound - the lower bound of spinning loop + - \p upper_bound - the upper boudn of spinning loop + + You may use \p Traits template parameter to separate back-off implementations. + For example, you may define two \p exponential back-offs that is the best for your task A and B: + \code + + #include + namespace bkoff = cds::backoff; + + // the best bounds for task A + struct traits_A: public bkoff::exponential_const_traits + { + static size_t lower_bound; + static size_t upper_bound; + }; + size_t traits_A::lower_bound = 1024; + size_t traits_A::upper_bound = 8 * 1024; + + // the best bounds for task B + struct traits_B: public bkoff::exponential_const_traits + { + static size_t lower_bound; + static size_t upper_bound; + }; + size_t traits_A::lower_bound = 16; + size_t traits_A::upper_bound = 1024; + + // // define your back-off specialization + typedef bkoff::exponential expBackOffA; + typedef bkoff::exponential expBackOffB; + \endcode + */ + template + class exponential + { + public: + typedef Traits traits; ///< Traits + + typedef typename traits::fast_path_backoff spin_backoff ; ///< spin (fast-path) back-off strategy + typedef typename traits::slow_path_backoff yield_backoff ; ///< yield (slow-path) back-off strategy + + protected: + size_t m_nExpCur ; ///< Current spin counter in range [traits::s_nExpMin, traits::s_nExpMax] + + spin_backoff m_bkSpin ; ///< Spinning (fast-path) phase back-off strategy + yield_backoff m_bkYield ; ///< Yield phase back-off strategy + + public: + /// Default ctor + exponential() noexcept + : m_nExpCur( traits::lower_bound ) + {} + + //@cond + void operator ()() noexcept(noexcept(std::declval()()) && noexcept(std::declval()())) + { + if ( m_nExpCur <= traits::upper_bound ) { + for ( size_t n = 0; n < m_nExpCur; ++n ) + m_bkSpin(); + m_nExpCur *= 2; + } + else + m_bkYield(); + } + + template + bool operator()( Predicate pr ) noexcept( noexcept(std::declval()()) && noexcept(std::declval()()) && noexcept(std::declval()())) + { + if ( m_nExpCur <= traits::upper_bound ) { + for ( size_t n = 0; n < m_nExpCur; ++n ) { + if ( m_bkSpin(pr)) + return true; + } + m_nExpCur *= 2; + } + else + return m_bkYield(pr); + return false; + } + + void reset() noexcept( noexcept( std::declval().reset()) && noexcept( std::declval().reset())) + { + m_nExpCur = traits::lower_bound; + m_bkSpin.reset(); + m_bkYield.reset(); + } + //@endcond + }; + + //@cond + template + struct make_exponential + { + struct traits: public exponential_const_traits + { + typedef FastPathBkOff fast_path_backoff; + typedef SlowPathBkOff slow_path_backoff; + }; + + typedef exponential type; + }; + + template + using make_exponential_t = typename make_exponential::type; + //@endcond + + /// Constant traits for \ref delay back-off strategy + struct delay_const_traits + { + typedef std::chrono::milliseconds duration_type; ///< Timeout type + enum: unsigned { + timeout = 5 ///< Delay timeout + }; + }; + + /// Runtime traits for \ref delay back-off strategy + struct delay_runtime_traits + { + typedef std::chrono::milliseconds duration_type; ///< Timeout type + static unsigned timeout; ///< Delay timeout, default 5 + }; + + /// Delay back-off strategy + /** + Template arguments: + - \p Duration - duration type, default is \p std::chrono::milliseconds + - \p Traits - a class that defines default timeout. + + Choosing the best value for th timeout is platform and application specific task. + The default values for timeout is provided by \p Traits class that should + \p timeout data member. There are two predefined \p Traits implementation: + - \p delay_const_traits - defines \p timeout as a constant (enum). + To change timeout you should recompile your application. + - \p delay_runtime_traits - specifies timeout as static data member that can be changed + at runtime to tune the back-off strategy. + + You may use \p Traits template parameter to separate back-off implementations. + For example, you may define two \p delay back-offs for 5 and 10 ms timeout: + \code + + #include + namespace bkoff = cds::backoff; + + // 5ms delay + struct ms5 + { + typedef std::chrono::milliseconds duration_type; + enum: unsigned { timeout = 5 }; + }; + + // 10ms delay, runtime support + struct ms10 + { + typedef std::chrono::milliseconds duration_type; + static unsigned timeout; + }; + unsigned ms10::timeout = 10; + + // define your back-off specialization + typedef bkoff::delay delay5; + typedef bkoff::delay delay10; + + \endcode + */ + template + class delay + { + public: + typedef Traits traits; ///< Traits + typedef typename Traits::duration_type duration_type; ///< Duration type (default \p std::chrono::milliseconds) + + protected: + ///@cond + duration_type const timeout; + ///@endcond + + public: + /// Default ctor takes the timeout from \p traits::timeout + delay() noexcept + : timeout( traits::timeout ) + {} + + /// Initializes timeout from \p nTimeout + constexpr explicit delay( unsigned int nTimeout ) noexcept + : timeout( nTimeout ) + {} + + //@cond + void operator()() const + { + std::this_thread::sleep_for( timeout ); + } + + template + bool operator()(Predicate pr) const + { + for ( unsigned int i = 0; i < traits::timeout; i += 2 ) { + if ( pr()) + return true; + std::this_thread::sleep_for( duration_type( 2 )); + } + return false; + } + + static void reset() noexcept + {} + //@endcond + }; + + //@cond + template + struct make_delay_of + { + struct traits { + typedef Duration duration_type; + enum: unsigned { timeout = Timeout }; + }; + + typedef delay type; + }; + //@endcond + + /// Delay back-off strategy, template version + /** + This is a simplified version of \p backoff::delay class. + Template parameter \p Timeout sets a delay timeout of \p Duration unit. + */ + template + using delay_of = typename make_delay_of< Timeout, Duration >::type; + + + /// Default backoff strategy + typedef exponential Default; + + /// Default back-off strategy for lock primitives + typedef exponential LockDefault; + + } // namespace backoff +} // namespace cds + + +#endif // #ifndef CDSLIB_BACKOFF_STRATEGY_H diff --git a/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/algo/base.h b/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/algo/base.h new file mode 100644 index 0000000..a109e80 --- /dev/null +++ b/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/algo/base.h @@ -0,0 +1,43 @@ +/* + This file is a part of libcds - Concurrent Data Structures library + + (C) Copyright Maxim Khizhinsky (libcds.dev@gmail.com) 2006-2017 + + Source code repo: http://github.com/khizmax/libcds/ + Download: http://sourceforge.net/projects/libcds/files/ + + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. + + * Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ + +#ifndef CDSLIB_ALGO_BASE_H +#define CDSLIB_ALGO_BASE_H + +#include + +namespace cds { + + /// Different approaches and techniques for supporting high-concurrent data structure + namespace algo {} + +} // namespace cds + +#endif // #ifndef CDSLIB_ALGO_BASE_H diff --git a/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/algo/bit_reversal.h b/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/algo/bit_reversal.h new file mode 100644 index 0000000..b094f8d --- /dev/null +++ b/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/algo/bit_reversal.h @@ -0,0 +1,184 @@ +/* + This file is a part of libcds - Concurrent Data Structures library + + (C) Copyright Maxim Khizhinsky (libcds.dev@gmail.com) 2006-2017 + + Source code repo: http://github.com/khizmax/libcds/ + Download: http://sourceforge.net/projects/libcds/files/ + + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. + + * Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ + +#ifndef CDSLIB_ALGO_BIT_REVERSAL_H +#define CDSLIB_ALGO_BIT_REVERSAL_H + +#include + + // Source: http://stackoverflow.com/questions/746171/best-algorithm-for-bit-reversal-from-msb-lsb-to-lsb-msb-in-c +namespace cds { namespace algo { + + /// Bit reversal algorithms + namespace bit_reversal { + + /// SWAR algorithm (source: http://aggregate.org/MAGIC/#Bit%20Reversal) + struct swar { + /// 32bit + uint32_t operator()( uint32_t x ) const + { + x = ( ( ( x & 0xaaaaaaaa ) >> 1 ) | ( ( x & 0x55555555 ) << 1 )); + x = ( ( ( x & 0xcccccccc ) >> 2 ) | ( ( x & 0x33333333 ) << 2 )); + x = ( ( ( x & 0xf0f0f0f0 ) >> 4 ) | ( ( x & 0x0f0f0f0f ) << 4 )); + x = ( ( ( x & 0xff00ff00 ) >> 8 ) | ( ( x & 0x00ff00ff ) << 8 )); + return( ( x >> 16 ) | ( x << 16 )); + } + + /// 64bit + uint64_t operator()( uint64_t x ) const + { + return ( static_cast( operator()( static_cast( x ))) << 32 ) // low 32bit + | ( static_cast( operator()( static_cast( x >> 32 )))); // high 32bit + } + }; + + /// Lookup table algorithm + struct lookup { + /// 32bit + uint32_t operator()( uint32_t x ) const + { + static uint8_t const table[] = { + 0x00, 0x80, 0x40, 0xC0, 0x20, 0xA0, 0x60, 0xE0, 0x10, 0x90, 0x50, 0xD0, 0x30, 0xB0, 0x70, 0xF0, + 0x08, 0x88, 0x48, 0xC8, 0x28, 0xA8, 0x68, 0xE8, 0x18, 0x98, 0x58, 0xD8, 0x38, 0xB8, 0x78, 0xF8, + 0x04, 0x84, 0x44, 0xC4, 0x24, 0xA4, 0x64, 0xE4, 0x14, 0x94, 0x54, 0xD4, 0x34, 0xB4, 0x74, 0xF4, + 0x0C, 0x8C, 0x4C, 0xCC, 0x2C, 0xAC, 0x6C, 0xEC, 0x1C, 0x9C, 0x5C, 0xDC, 0x3C, 0xBC, 0x7C, 0xFC, + 0x02, 0x82, 0x42, 0xC2, 0x22, 0xA2, 0x62, 0xE2, 0x12, 0x92, 0x52, 0xD2, 0x32, 0xB2, 0x72, 0xF2, + 0x0A, 0x8A, 0x4A, 0xCA, 0x2A, 0xAA, 0x6A, 0xEA, 0x1A, 0x9A, 0x5A, 0xDA, 0x3A, 0xBA, 0x7A, 0xFA, + 0x06, 0x86, 0x46, 0xC6, 0x26, 0xA6, 0x66, 0xE6, 0x16, 0x96, 0x56, 0xD6, 0x36, 0xB6, 0x76, 0xF6, + 0x0E, 0x8E, 0x4E, 0xCE, 0x2E, 0xAE, 0x6E, 0xEE, 0x1E, 0x9E, 0x5E, 0xDE, 0x3E, 0xBE, 0x7E, 0xFE, + 0x01, 0x81, 0x41, 0xC1, 0x21, 0xA1, 0x61, 0xE1, 0x11, 0x91, 0x51, 0xD1, 0x31, 0xB1, 0x71, 0xF1, + 0x09, 0x89, 0x49, 0xC9, 0x29, 0xA9, 0x69, 0xE9, 0x19, 0x99, 0x59, 0xD9, 0x39, 0xB9, 0x79, 0xF9, + 0x05, 0x85, 0x45, 0xC5, 0x25, 0xA5, 0x65, 0xE5, 0x15, 0x95, 0x55, 0xD5, 0x35, 0xB5, 0x75, 0xF5, + 0x0D, 0x8D, 0x4D, 0xCD, 0x2D, 0xAD, 0x6D, 0xED, 0x1D, 0x9D, 0x5D, 0xDD, 0x3D, 0xBD, 0x7D, 0xFD, + 0x03, 0x83, 0x43, 0xC3, 0x23, 0xA3, 0x63, 0xE3, 0x13, 0x93, 0x53, 0xD3, 0x33, 0xB3, 0x73, 0xF3, + 0x0B, 0x8B, 0x4B, 0xCB, 0x2B, 0xAB, 0x6B, 0xEB, 0x1B, 0x9B, 0x5B, 0xDB, 0x3B, 0xBB, 0x7B, 0xFB, + 0x07, 0x87, 0x47, 0xC7, 0x27, 0xA7, 0x67, 0xE7, 0x17, 0x97, 0x57, 0xD7, 0x37, 0xB7, 0x77, 0xF7, + 0x0F, 0x8F, 0x4F, 0xCF, 0x2F, 0xAF, 0x6F, 0xEF, 0x1F, 0x9F, 0x5F, 0xDF, 0x3F, 0xBF, 0x7F, 0xFF + }; + static_assert( sizeof( table ) / sizeof( table[0] ) == 256, "Table size mismatch" ); + + return ( static_cast( table[x & 0xff] ) << 24 ) | + ( static_cast( table[( x >> 8 ) & 0xff] ) << 16 ) | + ( static_cast( table[( x >> 16 ) & 0xff] ) << 8 ) | + ( static_cast( table[( x >> 24 ) & 0xff] )); + } + + /// 64bit + uint64_t operator()( uint64_t x ) const + { + return ( static_cast( operator()( static_cast( x ))) << 32 ) | + static_cast( operator()( static_cast( x >> 32 ))); + } + }; + + + /// Mul-Div algorithm for 32bit architectire + + /// Mul-Div algorithm + struct muldiv { + //@cond + static uint8_t muldiv32_byte( uint8_t b ) + { + return static_cast( ( ( b * 0x0802LU & 0x22110LU ) | ( b * 0x8020LU & 0x88440LU )) * 0x10101LU >> 16 ); + } + + static uint8_t muldiv64_byte( uint8_t b ) + { + return static_cast( ( b * 0x0202020202ULL & 0x010884422010ULL ) % 1023 ); + } + + // for 32bit architecture + static uint32_t muldiv32( uint32_t x ) + { + return static_cast( muldiv32_byte( static_cast( x >> 24 ))) + | ( static_cast( muldiv32_byte( static_cast( x >> 16 ))) << 8 ) + | ( static_cast( muldiv32_byte( static_cast( x >> 8 ))) << 16 ) + | ( static_cast( muldiv32_byte( static_cast( x ))) << 24 ); + } + + static uint64_t muldiv32( uint64_t x ) + { + return static_cast( muldiv32_byte( static_cast( x >> 56 ))) + | ( static_cast( muldiv32_byte( static_cast( x >> 48 ))) << 8 ) + | ( static_cast( muldiv32_byte( static_cast( x >> 40 ))) << 16 ) + | ( static_cast( muldiv32_byte( static_cast( x >> 32 ))) << 24 ) + | ( static_cast( muldiv32_byte( static_cast( x >> 24 ))) << 32 ) + | ( static_cast( muldiv32_byte( static_cast( x >> 16 ))) << 40 ) + | ( static_cast( muldiv32_byte( static_cast( x >> 8 ))) << 48 ) + | ( static_cast( muldiv32_byte( static_cast( x ))) << 56 ); + } + + /// for 64bit architectire + static uint32_t muldiv64( uint32_t x ) + { + return static_cast( muldiv64_byte( static_cast( x >> 24 ))) + | ( static_cast( muldiv64_byte( static_cast( x >> 16 ))) << 8 ) + | ( static_cast( muldiv64_byte( static_cast( x >> 8 ))) << 16 ) + | ( static_cast( muldiv64_byte( static_cast( x ))) << 24 ); + } + + static uint64_t muldiv64( uint64_t x ) + { + return static_cast( muldiv64_byte( static_cast( x >> 56 ))) + | ( static_cast( muldiv64_byte( static_cast( x >> 48 ))) << 8 ) + | ( static_cast( muldiv64_byte( static_cast( x >> 40 ))) << 16 ) + | ( static_cast( muldiv64_byte( static_cast( x >> 32 ))) << 24 ) + | ( static_cast( muldiv64_byte( static_cast( x >> 24 ))) << 32 ) + | ( static_cast( muldiv64_byte( static_cast( x >> 16 ))) << 40 ) + | ( static_cast( muldiv64_byte( static_cast( x >> 8 ))) << 48 ) + | ( static_cast( muldiv64_byte( static_cast( x ))) << 56 ); + } + //@endcond + + /// 32bit + uint32_t operator()( uint32_t x ) const + { +# if CDS_BUILD_BITS == 32 + return muldiv32( x ); +# else + return muldiv64( x ); +# endif + } + + /// 64bit + uint64_t operator()( uint64_t x ) const + { +# if CDS_BUILD_BITS == 32 + return muldiv32( x ); +# else + return muldiv64( x ); +# endif + } + }; + + } // namespace bit_reversal +}} // namespace cds::algo + +#endif // #ifndef CDSLIB_ALGO_BIT_REVERSAL_H diff --git a/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/algo/bitop.h b/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/algo/bitop.h new file mode 100644 index 0000000..dc5900d --- /dev/null +++ b/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/algo/bitop.h @@ -0,0 +1,168 @@ +/* + This file is a part of libcds - Concurrent Data Structures library + + (C) Copyright Maxim Khizhinsky (libcds.dev@gmail.com) 2006-2017 + + Source code repo: http://github.com/khizmax/libcds/ + Download: http://sourceforge.net/projects/libcds/files/ + + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. + + * Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ + +#ifndef CDSLIB_BITOP_H +#define CDSLIB_BITOP_H + +/* + Different bit algorithms: + LSB get least significant bit number + MSB get most significant bit number + bswap swap byte order of word + RBO reverse bit order of word + + Editions: + 2007.10.08 Maxim.Khiszinsky Created +*/ + +#include +#include + +namespace cds { + /// Bit operations + namespace bitop { + + ///@cond none + namespace details { + template struct BitOps; + + // 32-bit bit ops + template <> struct BitOps<4> { + typedef uint32_t TUInt; + + static int MSB( TUInt x ) { return bitop::platform::msb32( x ); } + static int LSB( TUInt x ) { return bitop::platform::lsb32( x ); } + static int MSBnz( TUInt x ) { return bitop::platform::msb32nz( x ); } + static int LSBnz( TUInt x ) { return bitop::platform::lsb32nz( x ); } + static int SBC( TUInt x ) { return bitop::platform::sbc32( x ) ; } + static int ZBC( TUInt x ) { return bitop::platform::zbc32( x ) ; } + + static TUInt RBO( TUInt x ) { return bitop::platform::rbo32( x ); } + static bool complement( TUInt& x, int nBit ) { return bitop::platform::complement32( &x, nBit ); } + + static TUInt RandXorShift(TUInt x) { return bitop::platform::RandXorShift32(x); } + }; + + // 64-bit bit ops + template <> struct BitOps<8> { + typedef uint64_t TUInt; + + static int MSB( TUInt x ) { return bitop::platform::msb64( x ); } + static int LSB( TUInt x ) { return bitop::platform::lsb64( x ); } + static int MSBnz( TUInt x ) { return bitop::platform::msb64nz( x ); } + static int LSBnz( TUInt x ) { return bitop::platform::lsb64nz( x ); } + static int SBC( TUInt x ) { return bitop::platform::sbc64( x ) ; } + static int ZBC( TUInt x ) { return bitop::platform::zbc64( x ) ; } + + static TUInt RBO( TUInt x ) { return bitop::platform::rbo64( x ); } + static bool complement( TUInt& x, int nBit ) { return bitop::platform::complement64( &x, nBit ); } + + static TUInt RandXorShift(TUInt x) { return bitop::platform::RandXorShift64(x); } + }; + } // namespace details + //@endcond + + + /// Get least significant bit (LSB) number (1..32/64), 0 if nArg == 0 + template + static inline int LSB( T nArg ) + { + return details::BitOps< sizeof(T) >::LSB( (typename details::BitOps::TUInt) nArg ); + } + + /// Get least significant bit (LSB) number (0..31/63) + /** + Precondition: nArg != 0 + */ + template + static inline int LSBnz( T nArg ) + { + assert( nArg != 0 ); + return details::BitOps< sizeof(T) >::LSBnz( (typename details::BitOps::TUInt) nArg ); + } + + /// Get most significant bit (MSB) number (1..32/64), 0 if nArg == 0 + template + static inline int MSB( T nArg ) + { + return details::BitOps< sizeof(T) >::MSB( (typename details::BitOps::TUInt) nArg ); + } + + /// Get most significant bit (MSB) number (0..31/63) + /** + Precondition: nArg != 0 + */ + template + static inline int MSBnz( T nArg ) + { + assert( nArg != 0 ); + return details::BitOps< sizeof(T) >::MSBnz( (typename details::BitOps::TUInt) nArg ); + } + + /// Get non-zero bit count of a word + template + static inline int SBC( T nArg ) + { + return details::BitOps< sizeof(T) >::SBC( (typename details::BitOps::TUInt) nArg ); + } + + /// Get zero bit count of a word + template + static inline int ZBC( T nArg ) + { + return details::BitOps< sizeof(T) >::ZBC( (typename details::BitOps::TUInt) nArg ); + } + + /// Reverse bit order of \p nArg + template + static inline T RBO( T nArg ) + { + return (T) details::BitOps< sizeof(T) >::RBO( (typename details::BitOps::TUInt) nArg ); + } + + /// Complement bit \p nBit in \p nArg + template + static inline bool complement( T& nArg, int nBit ) + { + return details::BitOps< sizeof(T) >::complement( reinterpret_cast< typename details::BitOps::TUInt& >( nArg ), nBit ); + } + + /// Simple random number generator + template + static inline T RandXorShift( T x) + { + return (T) details::BitOps< sizeof(T) >::RandXorShift(x); + } + + } // namespace bitop +} //namespace cds + +#endif // #ifndef CDSLIB_BITOP_H + diff --git a/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/algo/elimination.h b/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/algo/elimination.h new file mode 100644 index 0000000..597e0f2 --- /dev/null +++ b/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/algo/elimination.h @@ -0,0 +1,86 @@ +/* + This file is a part of libcds - Concurrent Data Structures library + + (C) Copyright Maxim Khizhinsky (libcds.dev@gmail.com) 2006-2017 + + Source code repo: http://github.com/khizmax/libcds/ + Download: http://sourceforge.net/projects/libcds/files/ + + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. + + * Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ + +#ifndef CDSLIB_ALGO_ELIMINATION_H +#define CDSLIB_ALGO_ELIMINATION_H + +#include +#include +#include +#include + +namespace cds { namespace algo { + + /// Elimination technique + /** @anchor cds_elimination_description + Elimination technique allows highly distributed coupling and execution of operations with reverse + semantics like the pushes and pops on a stack. If a push followed by a pop are performed + on a stack, the data structure's state does not change (similarly for a pop followed by a push). + This means that if one can cause pairs of pushes and pops to meet and pair up in + separate locations, the threads can exchange values without having to touch a centralized structure + since they have anyhow "eliminated" each other's effect on it. Elimination can be implemented + by using a collision array in which threads pick random locations in order to try and collide. + Pairs of threads that "collide" in some location run through a synchronization protocol, + and all such disjoint collisions can be performed in parallel. If a thread has not met another + in the selected location or if it met a thread with an operation that cannot be eliminated + (such as two push operations), an alternative scheme must be used. + */ + namespace elimination { + + /// Base class describing an operation for eliminating + /** + This class contains some debugng info. + Actual operation descriptor depends on real container and its interface. + */ + struct operation_desc + { + record * pOwner; ///< Owner of the descriptor + }; + + /// Acquires elimination record for the current thread + template + static inline record * init_record( OperationDesc& op ) + { + record& rec = cds::threading::elimination_record(); + assert( rec.is_free()); + op.pOwner = &rec; + rec.pOp = static_cast( &op ); + return &rec; + } + + /// Releases elimination record for the current thread + static inline void clear_record() + { + cds::threading::elimination_record().pOp = nullptr; + } + } // namespace elimination +}} // namespace cds::algo + +#endif // CDSLIB_ALGO_ELIMINATION_H diff --git a/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/algo/elimination_opt.h b/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/algo/elimination_opt.h new file mode 100644 index 0000000..ecda1df --- /dev/null +++ b/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/algo/elimination_opt.h @@ -0,0 +1,65 @@ +/* + This file is a part of libcds - Concurrent Data Structures library + + (C) Copyright Maxim Khizhinsky (libcds.dev@gmail.com) 2006-2017 + + Source code repo: http://github.com/khizmax/libcds/ + Download: http://sourceforge.net/projects/libcds/files/ + + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. + + * Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ + +#ifndef CDSLIB_ALGO_ELIMINATION_OPT_H +#define CDSLIB_ALGO_ELIMINATION_OPT_H + +#include + +namespace cds { namespace opt { + + /// Enable \ref cds_elimination_description "elimination back-off" for the container + template + struct enable_elimination { + //@cond + template struct pack: public Base + { + static constexpr const bool enable_elimination = Enable; + }; + //@endcond + }; + + /// \ref cds_elimination_description "Elimination back-off strategy" option setter + /** + Back-off strategy for elimination. + Usually, elimination back-off strategy is \p cds::backoff::delay. + */ + template + struct elimination_backoff { + //@cond + template struct pack: public Base + { + typedef Type elimination_backoff; + }; + //@endcond + }; +}} // namespace cds::opt + +#endif // #ifndef CDSLIB_ALGO_ELIMINATION_OPT_H diff --git a/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/algo/elimination_tls.h b/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/algo/elimination_tls.h new file mode 100644 index 0000000..b97efbc --- /dev/null +++ b/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/algo/elimination_tls.h @@ -0,0 +1,62 @@ +/* + This file is a part of libcds - Concurrent Data Structures library + + (C) Copyright Maxim Khizhinsky (libcds.dev@gmail.com) 2006-2017 + + Source code repo: http://github.com/khizmax/libcds/ + Download: http://sourceforge.net/projects/libcds/files/ + + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. + + * Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ + +#ifndef CDSLIB_ALGO_ELIMINATION_TLS_H +#define CDSLIB_ALGO_ELIMINATION_TLS_H + +#include + +namespace cds { namespace algo { namespace elimination { + + // Forwards + struct operation_desc; + + /// Per-thread elimination record + /** @headerfile cds/algo/elimination.h + */ + struct record + { + operation_desc * pOp ; ///< Operation descriptor + + /// Initialization + record() + : pOp( nullptr ) + {} + + /// Checks if the record is free + bool is_free() const + { + return pOp == nullptr; + } + }; + +}}} // cds::algo::elimination + +#endif // #ifndef CDSLIB_ALGO_ELIMINATION_TLS_H diff --git a/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/algo/flat_combining.h b/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/algo/flat_combining.h new file mode 100644 index 0000000..10f406e --- /dev/null +++ b/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/algo/flat_combining.h @@ -0,0 +1,36 @@ +/* + This file is a part of libcds - Concurrent Data Structures library + + (C) Copyright Maxim Khizhinsky (libcds.dev@gmail.com) 2006-2017 + + Source code repo: http://github.com/khizmax/libcds/ + Download: http://sourceforge.net/projects/libcds/files/ + + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. + + * Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ + +#ifndef CDSLIB_ALGO_FLAT_COMBINING_H +#define CDSLIB_ALGO_FLAT_COMBINING_H + +#include + +#endif // #ifndef CDSLIB_ALGO_FLAT_COMBINING_H diff --git a/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/algo/flat_combining/defs.h b/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/algo/flat_combining/defs.h new file mode 100644 index 0000000..14608ca --- /dev/null +++ b/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/algo/flat_combining/defs.h @@ -0,0 +1,92 @@ +/* + This file is a part of libcds - Concurrent Data Structures library + + (C) Copyright Maxim Khizhinsky (libcds.dev@gmail.com) 2006-2017 + + Source code repo: http://github.com/khizmax/libcds/ + Download: http://sourceforge.net/projects/libcds/files/ + + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. + + * Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ + +#ifndef CDSLIB_ALGO_FLAT_COMBINING_DEFS_H +#define CDSLIB_ALGO_FLAT_COMBINING_DEFS_H + +#include + + +namespace cds { namespace algo { namespace flat_combining { + + /// Special values of \p publication_record::nRequest + enum request_value + { + req_EmptyRecord, ///< Publication record is empty + req_Response, ///< Operation is done + + req_Operation ///< First operation id for derived classes + }; + + /// \p publication_record state + enum record_state { + inactive, ///< Record is inactive + active, ///< Record is active + removed ///< Record should be removed + }; + + /// Record of publication list + /** + Each data structure based on flat combining contains a class derived from \p %publication_record + */ + struct publication_record { + atomics::atomic nRequest; ///< Request field (depends on data structure) + atomics::atomic nState; ///< Record state: inactive, active, removed + atomics::atomic nAge; ///< Age of the record + atomics::atomic pNext; ///< Next record in active publication list + atomics::atomic pNextAllocated; ///< Next record in allocated publication list + + /// Initializes publication record + publication_record() + : nRequest( req_EmptyRecord ) + , nAge( 0 ) + , pNext( nullptr ) + , pNextAllocated( nullptr ) + { + nState.store( inactive, atomics::memory_order_release ); + } + + /// Returns the value of \p nRequest field + unsigned int op( atomics::memory_order mo = atomics::memory_order_relaxed ) const + { + return nRequest.load( mo ); + } + + /// Checks if the operation is done + bool is_done() const + { + return nRequest.load( atomics::memory_order_relaxed ) == req_Response; + } + }; + + +}}} // namespace cds::algo::flat_combining + +#endif // CDSLIB_ALGO_FLAT_COMBINING_DEFS_H diff --git a/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/algo/flat_combining/kernel.h b/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/algo/flat_combining/kernel.h new file mode 100644 index 0000000..ea05169 --- /dev/null +++ b/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/algo/flat_combining/kernel.h @@ -0,0 +1,900 @@ +/* + This file is a part of libcds - Concurrent Data Structures library + + (C) Copyright Maxim Khizhinsky (libcds.dev@gmail.com) 2006-2017 + + Source code repo: http://github.com/khizmax/libcds/ + Download: http://sourceforge.net/projects/libcds/files/ + + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. + + * Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ + +#ifndef CDSLIB_ALGO_FLAT_COMBINING_KERNEL_H +#define CDSLIB_ALGO_FLAT_COMBINING_KERNEL_H + +#include +#include + +#include +#include +#include +#include + +namespace cds { namespace algo { + + /// @defgroup cds_flat_combining_intrusive Intrusive flat combining containers + /// @defgroup cds_flat_combining_container Non-intrusive flat combining containers + + /// Flat combining + /** + @anchor cds_flat_combining_description + Flat combining (FC) technique is invented by Hendler, Incze, Shavit and Tzafrir in their paper + [2010] "Flat Combining and the Synchronization-Parallelism Tradeoff". + The technique converts a sequential data structure to its concurrent implementation. + A few structures are added to the sequential implementation: a global lock, + a count of the number of combining passes, and a pointer to the head + of a publication list. The publication list is a list of thread-local records + of a size proportional to the number of threads that are concurrently accessing the shared object. + + Each thread \p t accessing the structure to perform an invocation of some method \p f() + on the shared object executes the following sequence of steps: +
    +
  1. Write the invocation opcode and parameters (if any) of the method \p f() to be applied + sequentially to the shared object in the request field of your thread local publication + record (there is no need to use a load-store memory barrier). The request field will later + be used to receive the response. If your thread local publication record is marked as active + continue to step 2, otherwise continue to step 5.
  2. +
  3. Check if the global lock is taken. If so (another thread is an active combiner), spin on the request + field waiting for a response to the invocation (one can add a yield at this point to allow other threads + on the same core to run). Once in a while while spinning check if the lock is still taken and that your + record is active (you may use any of \p wait_strategy instead of spinning). If your record is inactive proceed to step 5. + Once the response is available, reset the request field to null and return the response.
  4. +
  5. If the lock is not taken, attempt to acquire it and become a combiner. If you fail, + return to spinning in step 2.
  6. +
  7. Otherwise, you hold the lock and are a combiner. +
      +
    • Increment the combining pass count by one.
    • +
    • Execute a \p fc_apply() by traversing the publication list from the head, + combining all non-null method call invocations, setting the age of each of these records + to the current count, applying the combined method calls to the structure D, and returning + responses to all the invocations. This traversal is guaranteed to be wait-free.
    • +
    • If the count is such that a cleanup needs to be performed, traverse the publication + list from the head. Starting from the second item (we always leave the item pointed to + by the head in the list), remove from the publication list all records whose age is + much smaller than the current count. This is done by removing the node and marking it + as inactive.
    • +
    • Release the lock.
    • +
    +
  8. If you have no thread local publication record allocate one, marked as active. If you already + have one marked as inactive, mark it as active. Execute a store-load memory barrier. Proceed to insert + the record into the list with a successful CAS to the head. Then proceed to step 1.
  9. +
+ + As the test results show, the flat combining technique is suitable for non-intrusive containers + like stack, queue, deque. For intrusive concurrent containers the flat combining demonstrates + less impressive results. + + \ref cds_flat_combining_container "List of FC-based containers" in libcds. + + \ref cds_flat_combining_intrusive "List of intrusive FC-based containers" in libcds. + */ + namespace flat_combining { + + /// Flat combining internal statistics + template + struct stat + { + typedef Counter counter_type; ///< Event counter type + + counter_type m_nOperationCount ; ///< How many operations have been performed + counter_type m_nCombiningCount ; ///< Combining call count + counter_type m_nCompactPublicationList; ///< Count of publication list compacting + counter_type m_nDeactivatePubRecord; ///< How many publication records were deactivated during compacting + counter_type m_nActivatePubRecord; ///< Count of publication record activating + counter_type m_nPubRecordCreated ; ///< Count of created publication records + counter_type m_nPubRecordDeleted ; ///< Count of deleted publication records + counter_type m_nPassiveWaitCall; ///< Count of passive waiting call (\p kernel::wait_for_combining()) + counter_type m_nPassiveWaitIteration;///< Count of iteration inside passive waiting + counter_type m_nPassiveWaitWakeup; ///< Count of forcing wake-up of passive wait cycle + counter_type m_nInvokeExclusive; ///< Count of call \p kernel::invoke_exclusive() + counter_type m_nWakeupByNotifying; ///< How many times the passive thread be waked up by a notification + counter_type m_nPassiveToCombiner; ///< How many times the passive thread becomes the combiner + + /// Returns current combining factor + /** + Combining factor is how many operations perform in one combine pass: + combining_factor := m_nOperationCount / m_nCombiningCount + */ + double combining_factor() const + { + return m_nCombiningCount.get() ? double( m_nOperationCount.get()) / m_nCombiningCount.get() : 0.0; + } + + //@cond + void onOperation() { ++m_nOperationCount; } + void onCombining() { ++m_nCombiningCount; } + void onCompactPublicationList() { ++m_nCompactPublicationList; } + void onDeactivatePubRecord() { ++m_nDeactivatePubRecord; } + void onActivatePubRecord() { ++m_nActivatePubRecord; } + void onCreatePubRecord() { ++m_nPubRecordCreated; } + void onDeletePubRecord() { ++m_nPubRecordDeleted; } + void onPassiveWait() { ++m_nPassiveWaitCall; } + void onPassiveWaitIteration() { ++m_nPassiveWaitIteration; } + void onPassiveWaitWakeup() { ++m_nPassiveWaitWakeup; } + void onInvokeExclusive() { ++m_nInvokeExclusive; } + void onWakeupByNotifying() { ++m_nWakeupByNotifying; } + void onPassiveToCombiner() { ++m_nPassiveToCombiner; } + + //@endcond + }; + + /// Flat combining dummy internal statistics + struct empty_stat + { + //@cond + void onOperation() const {} + void onCombining() const {} + void onCompactPublicationList() const {} + void onDeactivatePubRecord() const {} + void onActivatePubRecord() const {} + void onCreatePubRecord() const {} + void onDeletePubRecord() const {} + void onPassiveWait() const {} + void onPassiveWaitIteration() const {} + void onPassiveWaitWakeup() const {} + void onInvokeExclusive() const {} + void onWakeupByNotifying() const {} + void onPassiveToCombiner() const {} + //@endcond + }; + + /// Type traits of \ref kernel class + /** + You can define different type traits for \ref kernel + by specifying your struct based on \p %traits + or by using \ref make_traits metafunction. + */ + struct traits + { + typedef cds::sync::spin lock_type; ///< Lock type + typedef cds::algo::flat_combining::wait_strategy::backoff< cds::backoff::delay_of<2>> wait_strategy; ///< Wait strategy + typedef CDS_DEFAULT_ALLOCATOR allocator; ///< Allocator used for TLS data (allocating \p publication_record derivatives) + typedef empty_stat stat; ///< Internal statistics + typedef opt::v::relaxed_ordering memory_model; ///< /// C++ memory ordering model + }; + + /// Metafunction converting option list to traits + /** + \p Options are: + - \p opt::lock_type - mutex type, default is \p cds::sync::spin + - \p opt::wait_strategy - wait strategy, see \p wait_strategy namespace, default is \p wait_strategy::backoff. + - \p opt::allocator - allocator type, default is \ref CDS_DEFAULT_ALLOCATOR + - \p opt::stat - internal statistics, possible type: \ref stat, \ref empty_stat (the default) + - \p opt::memory_model - C++ memory ordering model. + List of all available memory ordering see \p opt::memory_model. + Default is \p cds::opt::v::relaxed_ordering + */ + template + struct make_traits { +# ifdef CDS_DOXYGEN_INVOKED + typedef implementation_defined type ; ///< Metafunction result +# else + typedef typename cds::opt::make_options< + typename cds::opt::find_type_traits< traits, Options... >::type + ,Options... + >::type type; +# endif + }; + + /// The kernel of flat combining + /** + Template parameters: + - \p PublicationRecord - a type derived from \ref publication_record + - \p Traits - a type traits of flat combining, default is \p flat_combining::traits. + \ref make_traits metafunction can be used to create type traits + + The kernel object should be a member of a container class. The container cooperates with flat combining + kernel object. There are two ways to interact with the kernel: + - One-by-one processing the active records of the publication list. This mode provides by \p combine() function: + the container acquires its publication record by \p acquire_record(), fills its fields and calls + \p combine() function of its kernel object. If the current thread becomes a combiner, the kernel + calls \p fc_apply() function of the container for each active non-empty record. Then, the container + should release its publication record by \p release_record(). Only one pass through the publication + list is possible. + - Batch processing - \p batch_combine() function. It this mode the container obtains access + to entire publication list. This mode allows the container to perform an elimination, for example, + the stack can collide \p push() and \p pop() requests. The sequence of invocations is the following: + the container acquires its publication record by \p acquire_record(), fills its field and call + \p batch_combine() function of its kernel object. If the current thread becomes a combiner, + the kernel calls \p fc_process() function of the container passing two iterators pointing to + the begin and the end of publication list (see \ref iterator class). The iterators allow + multiple pass through active records of publication list. For each processed record the container + should call \p operation_done() function. On the end, the container should release + its record by \p release_record(). + */ + template < + typename PublicationRecord + ,typename Traits = traits + > + class kernel + { + public: + typedef Traits traits; ///< Type traits + typedef typename traits::lock_type global_lock_type; ///< Global lock type + typedef typename traits::wait_strategy wait_strategy; ///< Wait strategy type + typedef typename traits::allocator allocator; ///< Allocator type (used for allocating publication_record_type data) + typedef typename traits::stat stat; ///< Internal statistics + typedef typename traits::memory_model memory_model; ///< C++ memory model + + typedef typename wait_strategy::template make_publication_record::type publication_record_type; ///< Publication record type + + protected: + //@cond + typedef cds::details::Allocator< publication_record_type, allocator > cxx11_allocator; ///< internal helper cds::details::Allocator + typedef std::lock_guard lock_guard; + //@endcond + + protected: + atomics::atomic m_nCount; ///< Total count of combining passes. Used as an age. + publication_record_type* m_pHead; ///< Head of active publication list + publication_record_type* m_pAllocatedHead; ///< Head of allocated publication list + boost::thread_specific_ptr< publication_record_type > m_pThreadRec; ///< Thread-local publication record + mutable global_lock_type m_Mutex; ///< Global mutex + mutable stat m_Stat; ///< Internal statistics + unsigned int const m_nCompactFactor; ///< Publication list compacting factor (the list will be compacted through \p %m_nCompactFactor combining passes) + unsigned int const m_nCombinePassCount; ///< Number of combining passes + wait_strategy m_waitStrategy; ///< Wait strategy + + public: + /// Initializes the object + /** + Compact factor = 1024 + + Combiner pass count = 8 + */ + kernel() + : kernel( 1024, 8 ) + {} + + /// Initializes the object + kernel( + unsigned int nCompactFactor ///< Publication list compacting factor (the list will be compacted through \p nCompactFactor combining passes) + ,unsigned int nCombinePassCount ///< Number of combining passes for combiner thread + ) + : m_nCount(0) + , m_pHead( nullptr ) + , m_pAllocatedHead( nullptr ) + , m_pThreadRec( tls_cleanup ) + , m_nCompactFactor( static_cast( cds::beans::ceil2( static_cast( nCompactFactor )) - 1 )) // binary mask + , m_nCombinePassCount( nCombinePassCount ) + { + assert( m_pThreadRec.get() == nullptr ); + publication_record_type* pRec = cxx11_allocator().New(); + m_pAllocatedHead = + m_pHead = pRec; + m_pThreadRec.reset( pRec ); + m_Stat.onCreatePubRecord(); + } + + /// Destroys the object and all publication records + ~kernel() + { + m_pThreadRec.reset(); // calls tls_cleanup() + + // delete all publication records + for ( publication_record* p = m_pAllocatedHead; p; ) { + publication_record * pRec = p; + p = p->pNextAllocated.load( memory_model::memory_order_relaxed ); + free_publication_record( static_cast( pRec )); + } + } + + /// Gets publication list record for the current thread + /** + If there is no publication record for the current thread + the function allocates it. + */ + publication_record_type * acquire_record() + { + publication_record_type * pRec = m_pThreadRec.get(); + if ( !pRec ) { + // Allocate new publication record + pRec = cxx11_allocator().New(); + m_pThreadRec.reset( pRec ); + m_Stat.onCreatePubRecord(); + + // Insert in allocated list + assert( m_pAllocatedHead != nullptr ); + publication_record* p = m_pAllocatedHead->pNextAllocated.load( memory_model::memory_order_relaxed ); + do { + pRec->pNextAllocated.store( p, memory_model::memory_order_release ); + } while ( !m_pAllocatedHead->pNextAllocated.compare_exchange_weak( p, pRec, memory_model::memory_order_release, atomics::memory_order_acquire )); + + publish( pRec ); + } + else if ( pRec->nState.load( memory_model::memory_order_acquire ) != active ) + publish( pRec ); + + assert( pRec->op() == req_EmptyRecord ); + + return pRec; + } + + /// Marks publication record for the current thread as empty + void release_record( publication_record_type * pRec ) + { + assert( pRec->is_done()); + pRec->nRequest.store( req_EmptyRecord, memory_model::memory_order_release ); + } + + /// Trying to execute operation \p nOpId + /** + \p pRec is the publication record acquiring by \ref acquire_record earlier. + \p owner is a container that is owner of flat combining kernel object. + As a result the current thread can become a combiner or can wait for + another combiner performs \p pRec operation. + + If the thread becomes a combiner, the kernel calls \p owner.fc_apply + for each active non-empty publication record. + */ + template + void combine( unsigned int nOpId, publication_record_type * pRec, Container& owner ) + { + assert( nOpId >= req_Operation ); + assert( pRec ); + + pRec->nRequest.store( nOpId, memory_model::memory_order_release ); + m_Stat.onOperation(); + + try_combining( owner, pRec ); + } + + /// Trying to execute operation \p nOpId in batch-combine mode + /** + \p pRec is the publication record acquiring by \p acquire_record() earlier. + \p owner is a container that owns flat combining kernel object. + As a result the current thread can become a combiner or can wait for + another combiner performs \p pRec operation. + + If the thread becomes a combiner, the kernel calls \p owner.fc_process() + giving the container the full access over publication list. This function + is useful for an elimination technique if the container supports any kind of + that. The container can perform multiple pass through publication list. + + \p owner.fc_process() has two arguments - forward iterators on begin and end of + publication list, see \ref iterator class. For each processed record the container + should call \p operation_done() function to mark the record as processed. + + On the end of \p %batch_combine the \p combine() function is called + to process rest of publication records. + */ + template + void batch_combine( unsigned int nOpId, publication_record_type* pRec, Container& owner ) + { + assert( nOpId >= req_Operation ); + assert( pRec ); + + pRec->nRequest.store( nOpId, memory_model::memory_order_release ); + m_Stat.onOperation(); + + try_batch_combining( owner, pRec ); + } + + /// Invokes \p Func in exclusive mode + /** + Some operation in flat combining containers should be called in exclusive mode + i.e the current thread should become the combiner to process the operation. + The typical example is \p empty() function. + + \p %invoke_exclusive() allows do that: the current thread becomes the combiner, + invokes \p f exclusively but unlike a typical usage the thread does not process any pending request. + Instead, after end of \p f call the current thread wakes up a pending thread if any. + */ + template + void invoke_exclusive( Func f ) + { + { + lock_guard l( m_Mutex ); + f(); + } + m_waitStrategy.wakeup( *this ); + m_Stat.onInvokeExclusive(); + } + + /// Marks \p rec as executed + /** + This function should be called by container if \p batch_combine() mode is used. + For usual combining (see \p combine()) this function is excess. + */ + void operation_done( publication_record& rec ) + { + rec.nRequest.store( req_Response, memory_model::memory_order_release ); + m_waitStrategy.notify( *this, static_cast( rec )); + } + + /// Internal statistics + stat const& statistics() const + { + return m_Stat; + } + + //@cond + // For container classes based on flat combining + stat& internal_statistics() const + { + return m_Stat; + } + //@endcond + + /// Returns the compact factor + unsigned int compact_factor() const + { + return m_nCompactFactor + 1; + } + + /// Returns number of combining passes for combiner thread + unsigned int combine_pass_count() const + { + return m_nCombinePassCount; + } + + public: + /// Publication list iterator + /** + Iterators are intended for batch processing by container's + \p fc_process function. + The iterator allows iterate through active publication list. + */ + class iterator + { + //@cond + friend class kernel; + publication_record_type * m_pRec; + //@endcond + + protected: + //@cond + iterator( publication_record_type * pRec ) + : m_pRec( pRec ) + { + skip_inactive(); + } + + void skip_inactive() + { + while ( m_pRec && (m_pRec->nState.load( memory_model::memory_order_acquire ) != active + || m_pRec->op( memory_model::memory_order_relaxed) < req_Operation )) + { + m_pRec = static_cast(m_pRec->pNext.load( memory_model::memory_order_acquire )); + } + } + //@endcond + + public: + /// Initializes an empty iterator object + iterator() + : m_pRec( nullptr ) + {} + + /// Copy ctor + iterator( iterator const& src ) + : m_pRec( src.m_pRec ) + {} + + /// Pre-increment + iterator& operator++() + { + assert( m_pRec ); + m_pRec = static_cast( m_pRec->pNext.load( memory_model::memory_order_acquire )); + skip_inactive(); + return *this; + } + + /// Post-increment + iterator operator++(int) + { + assert( m_pRec ); + iterator it(*this); + ++(*this); + return it; + } + + /// Dereference operator, can return \p nullptr + publication_record_type* operator ->() + { + return m_pRec; + } + + /// Dereference operator, the iterator should not be an end iterator + publication_record_type& operator*() + { + assert( m_pRec ); + return *m_pRec; + } + + /// Iterator equality + friend bool operator==( iterator it1, iterator it2 ) + { + return it1.m_pRec == it2.m_pRec; + } + + /// Iterator inequality + friend bool operator!=( iterator it1, iterator it2 ) + { + return !( it1 == it2 ); + } + }; + + /// Returns an iterator to the first active publication record + iterator begin() { return iterator(m_pHead); } + + /// Returns an iterator to the end of publication list. Should not be dereferenced. + iterator end() { return iterator(); } + + public: + /// Gets current value of \p rec.nRequest + /** + This function is intended for invoking from a wait strategy + */ + int get_operation( publication_record& rec ) + { + return rec.op( memory_model::memory_order_acquire ); + } + + /// Wakes up any waiting thread + /** + This function is intended for invoking from a wait strategy + */ + void wakeup_any() + { + publication_record* pRec = m_pHead; + while ( pRec ) { + if ( pRec->nState.load( memory_model::memory_order_acquire ) == active + && pRec->op( memory_model::memory_order_acquire ) >= req_Operation ) + { + m_waitStrategy.notify( *this, static_cast( *pRec )); + break; + } + pRec = pRec->pNext.load( memory_model::memory_order_acquire ); + } + } + + private: + //@cond + static void tls_cleanup( publication_record_type* pRec ) + { + // Thread done + // pRec that is TLS data should be excluded from publication list + pRec->nState.store( removed, memory_model::memory_order_release ); + } + + void free_publication_record( publication_record_type* pRec ) + { + cxx11_allocator().Delete( pRec ); + m_Stat.onDeletePubRecord(); + } + + void publish( publication_record_type* pRec ) + { + assert( pRec->nState.load( memory_model::memory_order_relaxed ) == inactive ); + + pRec->nAge.store( m_nCount.load(memory_model::memory_order_relaxed), memory_model::memory_order_relaxed ); + pRec->nState.store( active, memory_model::memory_order_relaxed ); + + // Insert record to publication list + if ( m_pHead != static_cast(pRec)) { + publication_record * p = m_pHead->pNext.load( memory_model::memory_order_relaxed ); + if ( p != static_cast( pRec )) { + do { + pRec->pNext.store( p, memory_model::memory_order_release ); + // Failed CAS changes p + } while ( !m_pHead->pNext.compare_exchange_weak( p, static_cast(pRec), + memory_model::memory_order_release, atomics::memory_order_acquire )); + m_Stat.onActivatePubRecord(); + } + } + } + + void republish( publication_record_type* pRec ) + { + if ( pRec->nState.load( memory_model::memory_order_relaxed ) != active ) { + // The record has been excluded from publication list. Reinsert it + publish( pRec ); + } + } + + template + void try_combining( Container& owner, publication_record_type* pRec ) + { + if ( m_Mutex.try_lock()) { + // The thread becomes a combiner + lock_guard l( m_Mutex, std::adopt_lock_t()); + + // The record pRec can be excluded from publication list. Re-publish it + republish( pRec ); + + combining( owner ); + assert( pRec->op( memory_model::memory_order_relaxed ) == req_Response ); + } + else { + // There is another combiner, wait while it executes our request + if ( !wait_for_combining( pRec )) { + // The thread becomes a combiner + lock_guard l( m_Mutex, std::adopt_lock_t()); + + // The record pRec can be excluded from publication list. Re-publish it + republish( pRec ); + + combining( owner ); + assert( pRec->op( memory_model::memory_order_relaxed ) == req_Response ); + } + } + } + + template + void try_batch_combining( Container& owner, publication_record_type * pRec ) + { + if ( m_Mutex.try_lock()) { + // The thread becomes a combiner + lock_guard l( m_Mutex, std::adopt_lock_t()); + + // The record pRec can be excluded from publication list. Re-publish it + republish( pRec ); + + batch_combining( owner ); + assert( pRec->op( memory_model::memory_order_relaxed ) == req_Response ); + } + else { + // There is another combiner, wait while it executes our request + if ( !wait_for_combining( pRec )) { + // The thread becomes a combiner + lock_guard l( m_Mutex, std::adopt_lock_t()); + + // The record pRec can be excluded from publication list. Re-publish it + republish( pRec ); + + batch_combining( owner ); + assert( pRec->op( memory_model::memory_order_relaxed ) == req_Response ); + } + } + } + + template + void combining( Container& owner ) + { + // The thread is a combiner + assert( !m_Mutex.try_lock()); + + unsigned int const nCurAge = m_nCount.fetch_add( 1, memory_model::memory_order_relaxed ) + 1; + + unsigned int nEmptyPassCount = 0; + unsigned int nUsefulPassCount = 0; + for ( unsigned int nPass = 0; nPass < m_nCombinePassCount; ++nPass ) { + if ( combining_pass( owner, nCurAge )) + ++nUsefulPassCount; + else if ( ++nEmptyPassCount > nUsefulPassCount ) + break; + } + + m_Stat.onCombining(); + if ( ( nCurAge & m_nCompactFactor ) == 0 ) + compact_list( nCurAge ); + } + + template + bool combining_pass( Container& owner, unsigned int nCurAge ) + { + publication_record* p = m_pHead; + bool bOpDone = false; + while ( p ) { + switch ( p->nState.load( memory_model::memory_order_acquire )) { + case active: + if ( p->op( memory_model::memory_order_acquire ) >= req_Operation ) { + p->nAge.store( nCurAge, memory_model::memory_order_relaxed ); + owner.fc_apply( static_cast( p )); + operation_done( *p ); + bOpDone = true; + } + break; + case inactive: + // Only m_pHead can be inactive in the publication list + assert( p == m_pHead ); + break; + case removed: + // Such record will be removed on compacting phase + break; + default: + /// ??? That is impossible + assert( false ); + } + p = p->pNext.load( memory_model::memory_order_acquire ); + } + return bOpDone; + } + + template + void batch_combining( Container& owner ) + { + // The thread is a combiner + assert( !m_Mutex.try_lock()); + + unsigned int const nCurAge = m_nCount.fetch_add( 1, memory_model::memory_order_relaxed ) + 1; + + for ( unsigned int nPass = 0; nPass < m_nCombinePassCount; ++nPass ) + owner.fc_process( begin(), end()); + + combining_pass( owner, nCurAge ); + m_Stat.onCombining(); + if ( ( nCurAge & m_nCompactFactor ) == 0 ) + compact_list( nCurAge ); + } + + bool wait_for_combining( publication_record_type* pRec ) + { + m_waitStrategy.prepare( *pRec ); + m_Stat.onPassiveWait(); + + while ( pRec->op( memory_model::memory_order_acquire ) != req_Response ) { + // The record can be excluded from publication list. Reinsert it + republish( pRec ); + + m_Stat.onPassiveWaitIteration(); + + // Wait while operation processing + if ( m_waitStrategy.wait( *this, *pRec )) + m_Stat.onWakeupByNotifying(); + + if ( m_Mutex.try_lock()) { + if ( pRec->op( memory_model::memory_order_acquire ) == req_Response ) { + // Operation is done + m_Mutex.unlock(); + + // Wake up a pending threads + m_waitStrategy.wakeup( *this ); + m_Stat.onPassiveWaitWakeup(); + + break; + } + // The thread becomes a combiner + m_Stat.onPassiveToCombiner(); + return false; + } + } + return true; + } + + void compact_list( unsigned int nCurAge ) + { + // Compacts publication list + // This function is called only by combiner thread + + try_again: + publication_record * pPrev = m_pHead; + for ( publication_record * p = pPrev->pNext.load( memory_model::memory_order_acquire ); p; ) { + switch ( p->nState.load( memory_model::memory_order_relaxed )) { + case active: + if ( p->nAge.load( memory_model::memory_order_relaxed ) + m_nCompactFactor < nCurAge ) + { + publication_record * pNext = p->pNext.load( memory_model::memory_order_relaxed ); + if ( pPrev->pNext.compare_exchange_strong( p, pNext, + memory_model::memory_order_acquire, atomics::memory_order_relaxed )) + { + p->nState.store( inactive, memory_model::memory_order_release ); + p = pNext; + m_Stat.onDeactivatePubRecord(); + continue; + } + } + break; + + case removed: + publication_record * pNext = p->pNext.load( memory_model::memory_order_acquire ); + if ( cds_likely( pPrev->pNext.compare_exchange_strong( p, pNext, memory_model::memory_order_acquire, atomics::memory_order_relaxed ))) { + p = pNext; + continue; + } + else { + // CAS can be failed only in beginning of list + assert( pPrev == m_pHead ); + goto try_again; + } + } + pPrev = p; + p = p->pNext.load( memory_model::memory_order_acquire ); + } + + // Iterate over allocated list to find removed records + pPrev = m_pAllocatedHead; + for ( publication_record * p = pPrev->pNextAllocated.load( memory_model::memory_order_acquire ); p; ) { + if ( p->nState.load( memory_model::memory_order_relaxed ) == removed ) { + publication_record * pNext = p->pNextAllocated.load( memory_model::memory_order_relaxed ); + if ( pPrev->pNextAllocated.compare_exchange_strong( p, pNext, memory_model::memory_order_acquire, atomics::memory_order_relaxed )) { + free_publication_record( static_cast( p )); + p = pNext; + continue; + } + } + + pPrev = p; + p = p->pNextAllocated.load( memory_model::memory_order_relaxed ); + } + + m_Stat.onCompactPublicationList(); + } + //@endcond + }; + + //@cond + class container + { + public: + template + void fc_apply( PubRecord * ) + { + assert( false ); + } + + template + void fc_process( Iterator, Iterator ) + { + assert( false ); + } + }; + //@endcond + + } // namespace flat_combining +}} // namespace cds::algo + +/* + CppMem model (http://svr-pes20-cppmem.cl.cam.ac.uk/cppmem/) + + // Combiner thread - slave (waiting) thread +int main() { + atomic_int y = 0; // pRec->op + int x = 0; // pRec->data + {{{ + { // slave thread (not combiner) + // Op data + x = 1; + // Annotate request (op) + y.store(1, release); + // Wait while request done + y.load(acquire).readsvalue(2); + // Read result + r2=x; + } + ||| + { // Combiner thread + // Read request (op) + r1=y.load(acquire).readsvalue(1); + // Execute request - change request data + x = 2; + // store "request processed" flag (pRec->op := req_Response) + y.store(2, release); + } + }}}; + return 0; +} + +*/ + +#endif // #ifndef CDSLIB_ALGO_FLAT_COMBINING_KERNEL_H diff --git a/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/algo/flat_combining/wait_strategy.h b/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/algo/flat_combining/wait_strategy.h new file mode 100644 index 0000000..680a215 --- /dev/null +++ b/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/algo/flat_combining/wait_strategy.h @@ -0,0 +1,442 @@ +/* + This file is a part of libcds - Concurrent Data Structures library + + (C) Copyright Maxim Khizhinsky (libcds.dev@gmail.com) 2006-2017 + + Source code repo: http://github.com/khizmax/libcds/ + Download: http://sourceforge.net/projects/libcds/files/ + + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. + + * Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ + +#ifndef CDSLIB_ALGO_FLAT_COMBINING_WAIT_STRATEGY_H +#define CDSLIB_ALGO_FLAT_COMBINING_WAIT_STRATEGY_H + +#include +#include +#include +#include +#include // thread_specific_ptr + + +namespace cds { namespace opt { + + /// Wait strategy option for \p flat_combining::kernel + template + struct wait_strategy { + //@cond + template struct pack: public Base + { + typedef Strategy wait_strategy; + }; + //@endcond + }; + +}} // namespace cds::opt + +namespace cds { namespace algo { namespace flat_combining { + + /// Wait strategies for \p flat_combining technique + /** + Wait strategy specifies how a thread waits until its request is performed by the combiner. + See \p wait_strategy::empty wait strategy to explain the interface. + */ + namespace wait_strategy { + + /// Empty wait strategy + /** + Empty wait strategy is just spinning on request field. + All functions are empty. + */ + struct empty + { + /// Metafunction for defining a publication record for flat combining technique + /** + Any wait strategy may expand the publication record for storing + its own private data. + \p PublicationRecord is the type specified by \p flat_combining::kernel. + - If the strategy has no thread-private data, it should typedef \p PublicationRecord + as a return \p type of metafunction. + - Otherwise, if the strategy wants to store anything in thread-local data, + it should expand \p PublicationRecord, for example: + \code + template + struct make_publication_record { + struct type: public PublicationRecord + { + int strategy_data; + }; + }; + \endcode + */ + template + struct make_publication_record { + typedef PublicationRecord type; ///< Metafunction result + }; + + /// Prepares the strategy + /** + This function is called before enter to waiting cycle. + Some strategies need to prepare its thread-local data in \p rec. + + \p PublicationRecord is thread's publication record of type \p make_publication_record::type + */ + template + void prepare( PublicationRecord& rec ) + { + CDS_UNUSED( rec ); + } + + /// Waits for the combiner + /** + The thread calls this function to wait for the combiner process + the request. + The function returns \p true if the thread was waked up by the combiner, + otherwise it should return \p false. + + \p FCKernel is a \p flat_combining::kernel object, + \p PublicationRecord is thread's publication record of type \p make_publication_record::type + */ + template + bool wait( FCKernel& fc, PublicationRecord& rec ) + { + CDS_UNUSED( fc ); + CDS_UNUSED( rec ); + return false; + } + + /// Wakes up the thread + /** + The combiner calls \p %notify() when it has been processed the request. + + \p FCKernel is a \p flat_combining::kernel object, + \p PublicationRecord is thread's publication record of type \p make_publication_record::type + */ + template + void notify( FCKernel& fc, PublicationRecord& rec ) + { + CDS_UNUSED( fc ); + CDS_UNUSED( rec ); + } + + /// Moves control to other thread + /** + This function is called when the thread becomes the combiner + but the request of the thread is already processed. + The strategy may call \p fc.wakeup_any() instructs the kernel + to wake up any pending thread. + + \p FCKernel is a \p flat_combining::kernel object, + */ + template + void wakeup( FCKernel& fc ) + { + CDS_UNUSED( fc ); + } + }; + + /// Back-off wait strategy + /** + Template argument \p Backoff specifies back-off strategy, default is cds::backoff::delay_of<2> + */ + template > + struct backoff + { + typedef BackOff back_off; ///< Back-off strategy + + /// Incorporates back-off strategy into publication record + template + struct make_publication_record + { + //@cond + struct type: public PublicationRecord + { + back_off bkoff; + }; + //@endcond + }; + + /// Resets back-off strategy in \p rec + template + void prepare( PublicationRecord& rec ) + { + rec.bkoff.reset(); + } + + /// Calls back-off strategy + template + bool wait( FCKernel& /*fc*/, PublicationRecord& rec ) + { + rec.bkoff(); + return false; + } + + /// Does nothing + template + void notify( FCKernel& /*fc*/, PublicationRecord& /*rec*/ ) + {} + + /// Does nothing + template + void wakeup( FCKernel& ) + {} + }; + + /// Wait strategy based on the single mutex and the condition variable + /** + The strategy shares the mutex and conditional variable for all thread. + + Template parameter \p Milliseconds specifies waiting duration; + the minimal value is 1. + */ + template + class single_mutex_single_condvar + { + //@cond + std::mutex m_mutex; + std::condition_variable m_condvar; + bool m_wakeup; + + typedef std::unique_lock< std::mutex > unique_lock; + //@endcond + + public: + enum { + c_nWaitMilliseconds = Milliseconds < 1 ? 1 : Milliseconds ///< Waiting duration + }; + + /// Empty metafunction + template + struct make_publication_record { + typedef PublicationRecord type; ///< publication record type + }; + + /// Default ctor + single_mutex_single_condvar() + : m_wakeup( false ) + {} + + /// Does nothing + template + void prepare( PublicationRecord& /*rec*/ ) + {} + + /// Sleeps on condition variable waiting for notification from combiner + template + bool wait( FCKernel& fc, PublicationRecord& rec ) + { + if ( fc.get_operation( rec ) >= req_Operation ) { + unique_lock lock( m_mutex ); + if ( fc.get_operation( rec ) >= req_Operation ) { + if ( m_wakeup ) { + m_wakeup = false; + return true; + } + + bool ret = m_condvar.wait_for( lock, std::chrono::milliseconds( c_nWaitMilliseconds )) == std::cv_status::no_timeout; + m_wakeup = false; + return ret; + } + } + return false; + } + + /// Calls condition variable function \p notify_all() + template + void notify( FCKernel& fc, PublicationRecord& /*rec*/ ) + { + wakeup( fc ); + } + + /// Calls condition variable function \p notify_all() + template + void wakeup( FCKernel& /*fc*/ ) + { + unique_lock lock( m_mutex ); + m_wakeup = true; + m_condvar.notify_all(); + } + }; + + /// Wait strategy based on the single mutex and thread-local condition variables + /** + The strategy shares the mutex, but each thread has its own conditional variable + + Template parameter \p Milliseconds specifies waiting duration; + the minimal value is 1. + */ + template + class single_mutex_multi_condvar + { + //@cond + std::mutex m_mutex; + bool m_wakeup; + + typedef std::unique_lock< std::mutex > unique_lock; + //@endcond + + public: + enum { + c_nWaitMilliseconds = Milliseconds < 1 ? 1 : Milliseconds ///< Waiting duration + }; + + /// Incorporates a condition variable into \p PublicationRecord + template + struct make_publication_record { + /// Metafunction result + struct type: public PublicationRecord + { + //@cond + std::condition_variable m_condvar; + //@endcond + }; + }; + + /// Default ctor + single_mutex_multi_condvar() + : m_wakeup( false ) + {} + + /// Does nothing + template + void prepare( PublicationRecord& /*rec*/ ) + {} + + /// Sleeps on condition variable waiting for notification from combiner + template + bool wait( FCKernel& fc, PublicationRecord& rec ) + { + if ( fc.get_operation( rec ) >= req_Operation ) { + unique_lock lock( m_mutex ); + + if ( fc.get_operation( rec ) >= req_Operation ) { + if ( m_wakeup ) { + m_wakeup = false; + return true; + } + + bool ret = rec.m_condvar.wait_for( lock, std::chrono::milliseconds( c_nWaitMilliseconds )) == std::cv_status::no_timeout; + m_wakeup = false; + return ret; + } + } + return false; + } + + /// Calls condition variable function \p notify_one() + template + void notify( FCKernel& /*fc*/, PublicationRecord& rec ) + { + unique_lock lock( m_mutex ); + m_wakeup = true; + rec.m_condvar.notify_one(); + } + + /// Calls \p fc.wakeup_any() to wake up any pending thread + template + void wakeup( FCKernel& fc ) + { + fc.wakeup_any(); + } + }; + + /// Wait strategy where each thread has a mutex and a condition variable + /** + Template parameter \p Milliseconds specifies waiting duration; + the minimal value is 1. + */ + template + class multi_mutex_multi_condvar + { + //@cond + typedef std::unique_lock< std::mutex > unique_lock; + //@endcond + public: + enum { + c_nWaitMilliseconds = Milliseconds < 1 ? 1 : Milliseconds ///< Waiting duration + }; + + /// Incorporates a condition variable and a mutex into \p PublicationRecord + template + struct make_publication_record { + /// Metafunction result + struct type: public PublicationRecord + { + //@cond + std::mutex m_mutex; + std::condition_variable m_condvar; + bool m_wakeup; + + type() + : m_wakeup( false ) + {} + //@endcond + }; + }; + + /// Does nothing + template + void prepare( PublicationRecord& /*rec*/ ) + {} + + /// Sleeps on condition variable waiting for notification from combiner + template + bool wait( FCKernel& fc, PublicationRecord& rec ) + { + if ( fc.get_operation( rec ) >= req_Operation ) { + unique_lock lock( rec.m_mutex ); + + if ( fc.get_operation( rec ) >= req_Operation ) { + if ( rec.m_wakeup ) { + rec.m_wakeup = false; + return true; + } + + bool ret = rec.m_condvar.wait_for( lock, std::chrono::milliseconds( c_nWaitMilliseconds )) == std::cv_status::no_timeout; + rec.m_wakeup = false; + return ret; + } + } + return false; + } + + /// Calls condition variable function \p notify_one() + template + void notify( FCKernel& /*fc*/, PublicationRecord& rec ) + { + unique_lock lock( rec.m_mutex ); + rec.m_wakeup = true; + rec.m_condvar.notify_one(); + } + + /// Calls \p fc.wakeup_any() to wake up any pending thread + template + void wakeup( FCKernel& fc ) + { + fc.wakeup_any(); + } + }; + + } // namespace wait_strategy +}}} // namespace cds::algo::flat_combining + +#endif //CDSLIB_ALGO_FLAT_COMBINING_WAIT_STRATEGY_H diff --git a/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/algo/int_algo.h b/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/algo/int_algo.h new file mode 100644 index 0000000..c8dcddf --- /dev/null +++ b/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/algo/int_algo.h @@ -0,0 +1,172 @@ +/* + This file is a part of libcds - Concurrent Data Structures library + + (C) Copyright Maxim Khizhinsky (libcds.dev@gmail.com) 2006-2017 + + Source code repo: http://github.com/khizmax/libcds/ + Download: http://sourceforge.net/projects/libcds/files/ + + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. + + * Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ + +#ifndef CDSLIB_INT_ALGO_H +#define CDSLIB_INT_ALGO_H + +#include + +namespace cds { namespace beans { + /// Returns largest previous integer for log2( n ) + static inline size_t log2floor( size_t n ) + { + return n ? cds::bitop::MSBnz( n ) : 0; + } + + /// Returns smallest following integer for log2( n ) + static inline size_t log2ceil( size_t n ) + { + size_t i = log2floor( n ); + return ( size_t( 1 ) << i ) < n ? i + 1 : i; + } + + /// Returns largest previous power of 2 for \p n + /** + Examples: + \code + floor2(0) == 1 // !!! + floor2(1) == 1 + floor2(2) == 2 + floor2(3) == 2 + floor2(4) == 4 + floor2(15) == 8 + floor2(16) == 16 + floor2(17) == 16 + \endcode + */ + static inline size_t floor2( size_t n ) + { + return size_t(1) << log2floor( n ); + } + + /// Returns smallest following power of 2 for \p n + /** + Examples: + \code + ceil2(0) == 1 // !!! + ceil2(1) == 1 + ceil2(2) == 2 + ceil2(3) == 4 + ceil2(4) == 4 + ceil2(15) == 16 + ceil2(16) == 16 + ceil2(17) == 32 + \endcode + */ + static inline size_t ceil2( size_t n ) + { + return size_t(1) << log2ceil( n ); + } + + /// Checks if \p n is power of 2 + constexpr static inline bool is_power2( size_t n ) noexcept + { + return (n & (n - 1)) == 0 && n; + } + + /// Returns binary logarithm of \p n if \p n is power of two, otherwise returns 0 + static inline size_t log2( size_t n ) + { + return is_power2(n) ? log2floor(n) : 0; + } + +#if CDS_BUILD_BITS == 32 + //@cond + // 64bit specializations + +/// Returns largest previous integer for log2( n ) + static inline uint64_t log2floor( uint64_t n ) + { + return n ? cds::bitop::MSBnz( n ) : 0; + } + +/// Returns smallest following integer for log2( n ) + static inline uint64_t log2ceil( uint64_t n ) + { + uint64_t i = log2floor( n ); + return (uint64_t( 1 ) << i) < n ? i + 1 : i; + } + +/// Returns largest previous power of 2 for \p n + /** + Examples: + \code + floor2(0) == 1 // !!! + floor2(1) == 1 + floor2(2) == 2 + floor2(3) == 2 + floor2(4) == 4 + floor2(15) == 8 + floor2(16) == 16 + floor2(17) == 16 + \endcode + */ + static inline uint64_t floor2( uint64_t n ) + { + return uint64_t( 1 ) << log2floor( n ); + } + +/// Returns smallest following power of 2 for \p n + /** + Examples: + \code + ceil2(0) == 1 // !!! + ceil2(1) == 1 + ceil2(2) == 2 + ceil2(3) == 4 + ceil2(4) == 4 + ceil2(15) == 16 + ceil2(16) == 16 + ceil2(17) == 32 + \endcode + */ + static inline uint64_t ceil2( uint64_t n ) + { + return uint64_t( 1 ) << log2ceil( n ); + } + +/// Checks if \p n is power of 2 + constexpr static inline bool is_power2( uint64_t n ) noexcept + { + return (n & (n - 1)) == 0 && n; + } + +/// Returns binary logarithm of \p n if \p n is power of two, otherwise returns 0 + static inline uint64_t log2( uint64_t n ) + { + return is_power2( n ) ? log2floor( n ) : 0; + } + + //@endcond +#endif //#if CDS_BUILD_BITS == 32 + +}} // namespace cds::beans + +#endif // #ifndef CDSLIB_INT_ALGO_H diff --git a/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/algo/split_bitstring.h b/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/algo/split_bitstring.h new file mode 100644 index 0000000..18dfdc2 --- /dev/null +++ b/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/algo/split_bitstring.h @@ -0,0 +1,470 @@ +/* + This file is a part of libcds - Concurrent Data Structures library + + (C) Copyright Maxim Khizhinsky (libcds.dev@gmail.com) 2006-2017 + + Source code repo: http://github.com/khizmax/libcds/ + Download: http://sourceforge.net/projects/libcds/files/ + + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. + + * Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ + +#ifndef CDSLIB_ALGO_SPLIT_BITSTRING_H +#define CDSLIB_ALGO_SPLIT_BITSTRING_H + +#include + +namespace cds { namespace algo { + + /// Cuts a bit sequence from fixed-size bit-string + /** + The splitter can be used as an iterator over bit-string. + Each call of \p cut() or \p safe_cut() cuts the bit count specified + and keeps the position inside bit-string for the next call. + + The splitter stores a const reference to bit-string, not a copy. + The maximum count of bits that can be cut in a single call is sizeof(UInt) * 8 + + The splitter keeps byte order. + + Template parameters: + - \p BitString - a fixed-sized type that interprets as bit string + - \p BitStringSize - the size of \p BitString in bytes, default is sizeof( BitString ). + You can specify 0 for default. + - \p UInt - an unsigned integer, return type for \p cut(), default is \p unsigned + + There are specialized splitters: + - a simplified \p byte_splitter algorithm that is suitable when count is multiple of 8. + - \p number_splitter algorithm is suitable for a number + */ + template + class split_bitstring + { + public: + typedef BitString bitstring; ///< Bit-string type + typedef UInt uint_type; ///< Result type of \p cut() function + static constexpr size_t const c_bitstring_size = BitStringSize ? BitStringSize : sizeof( BitString ); ///< size of \p BitString in bytes + + //@cond + static constexpr unsigned const c_nBitPerByte = 8; + //@endcond + + public: + /// Initializises the splitter with reference to \p h and zero start bit offset + explicit split_bitstring( bitstring const& h ) + : cur_( reinterpret_cast( &h )) + , offset_( 0 ) + , first_( cur_ ) + , last_( cur_ + c_bitstring_size ) + {} + + /// Initializises the splitter with reference to \p h and start bit offset \p nBitOffset + split_bitstring( bitstring const& h, size_t nBitOffset ) + : cur_( reinterpret_cast( &h ) + nBitOffset / c_nBitPerByte ) + , offset_( nBitOffset % c_nBitPerByte ) + , first_( reinterpret_cast( &h )) + , last_( first_ + c_bitstring_size ) + {} + + /// Returns \p true if end-of-string is not reached yet + explicit operator bool() const + { + return !eos(); + } + + /// Returns \p true if end-of-stream encountered + bool eos() const + { + return cur_ >= last_; + } + + /// Cuts next \p count bits from bit-string + /** + For performance reason, the function does not manage out-of-bound condition. + To control that use \p safe_cut(). + */ + uint_type cut( unsigned count ) + { + assert( !eos()); + + uint_type result = 0; +# if defined( CDS_ARCH_LITTLE_ENDIAN ) + for ( unsigned done = 0; done < count; ) { + assert( cur_ < last_ ); + unsigned bits = count - done; + if ( bits > c_nBitPerByte - offset_ ) + bits = c_nBitPerByte - offset_; + + result |= static_cast(( *cur_ >> offset_ ) & (( 1 << bits ) - 1 )) << done; + + offset_ += bits; + assert( offset_ <= c_nBitPerByte ); + if ( offset_ == c_nBitPerByte ) { + offset_ = 0; + ++cur_; + } + done += bits; + } +# else + while ( count ) { + assert( cur_ < last_ ); + + unsigned bits = count <= ( c_nBitPerByte - offset_ ) ? count : c_nBitPerByte - offset_; + + result = ( result << bits ) | (( *cur_ >> offset_ ) & ( ( 1 << bits ) - 1 )); + + offset_ += bits; + assert( offset_ <= c_nBitPerByte ); + if ( offset_ == c_nBitPerByte ) { + offset_ = 0; + ++cur_; + } + count -= bits; + } +# endif + + return result; + } + + /// Cuts up to \p count from the bit-string + /** + Safe analog of \p cut() but if \p count is more than the rest of bit-string, + only the rest is returned. + When \p eos() condition is met the function returns 0. + */ + uint_type safe_cut( unsigned count ) + { + if ( eos()) + return 0; + + unsigned const rest = static_cast( last_ - cur_ - 1 ) * c_nBitPerByte + ( c_nBitPerByte - offset_ ); + if ( rest < count ) + count = rest; + return count ? cut( count ) : 0; + } + + /// Resets the splitter + void reset() noexcept + { + cur_ = first_; + offset_ = 0; + } + + /// Returns pointer to source bitstring + bitstring const * source() const + { + return reinterpret_cast( first_ ); + } + + /// Returns current bit offset from beginning of bit-string + size_t bit_offset() const + { + return offset_ + (cur_ - first_) * c_nBitPerByte; + } + + /// Returns how many bits remain + size_t rest_count() const + { + return c_bitstring_size * c_nBitPerByte - bit_offset(); + } + + /// Returns \p true for any argument + static constexpr bool is_correct( unsigned /*count*/ ) + { + return true; + } + + private: + //@cond + uint8_t const* cur_; + unsigned offset_; + uint8_t const* const first_; + uint8_t const* const last_; + //@endcond + }; + + /// Simplified \p split_bitstring algorithm when \p count is multiple of 8 + template + class byte_splitter + { + public: + typedef BitString bitstring; ///< Bit-string type + typedef UInt uint_type; ///< Result type of \p cut() function + static constexpr size_t const c_bitstring_size = BitStringSize ? BitStringSize : sizeof( BitString ); ///< size of \p BitString in bytes + + //@cond + static constexpr unsigned const c_nBitPerByte = 8; + //@endcond + + public: + /// Initializises the splitter with reference to \p h and zero start bit offset + explicit byte_splitter( bitstring const& h ) + : cur_( reinterpret_cast( &h )) + , first_( cur_ ) + , last_( cur_ + c_bitstring_size ) + {} + + /// Initializises the splitter with reference to \p h and start bit offset \p nBitOffset + byte_splitter( bitstring const& h, size_t nBitOffset ) + : cur_( reinterpret_cast( &h ) + nBitOffset / c_nBitPerByte ) + , first_( reinterpret_cast( &h )) + , last_( first_ + c_bitstring_size ) + { + assert( is_correct( static_cast( nBitOffset ))); + assert( !eos()); + } + + /// Returns \p true if end-of-string is not reached yet + explicit operator bool() const + { + return !eos(); + } + + /// Returns \p true if end-of-stream encountered + bool eos() const + { + return cur_ >= last_; + } + + /// Cuts next \p count bits (must be multiplier of 8) from bit-string + /** + For performance reason, the function does not manage out-of-bound condition. + To control that use \p safe_cut(). + */ + uint_type cut( unsigned count ) + { + assert( !eos()); + assert( is_correct( count )); + + uint_type result = 0; + +# if defined( CDS_ARCH_LITTLE_ENDIAN ) + for ( unsigned i = 0; i < count; i += c_nBitPerByte ) { + result |= static_cast( *cur_ ) << i; + ++cur_; + } +# else + for ( ; count; count -= c_nBitPerByte ) { + result = ( result << c_nBitPerByte ) | *cur_; + ++cur_; + } +# endif + + return result; + } + + /// Cuts up to \p count from the bit-string + /** + Safe analog of \p cut(): if \p count is more than the rest of bit-string, + only the rest is returned. + When \p eos() condition is met the function returns 0. + */ + uint_type safe_cut( unsigned count ) + { + if ( eos()) + return 0; + + unsigned const rest = static_cast( last_ - cur_ - 1 ) * c_nBitPerByte; + if ( rest < count ) + count = rest; + return count ? cut( count ) : 0; + } + + /// Resets the splitter + void reset() noexcept + { + cur_ = first_; + } + + /// Returns pointer to source bitstring + bitstring const* source() const + { + return reinterpret_cast( first_ ); + } + + /// Returns current bit offset from beginning of bit-string + size_t bit_offset() const + { + return (cur_ - first_) * c_nBitPerByte; + } + + /// Returns how many bits remain + size_t rest_count() const + { + return c_bitstring_size * c_nBitPerByte - bit_offset(); + } + + /// Checks if \p count is multiple of 8 + static constexpr bool is_correct( unsigned count ) + { + return count % 8 == 0; + } + + private: + //@cond + uint8_t const* cur_; + uint8_t const* const first_; + uint8_t const* const last_; + //@endcond + }; + + + /// Cuts a bit sequence from a number + /** + The splitter can be used as an iterator over bit representation of the number of type \p Int. + Each call of \p cut() or \p safe_cut() cuts the bit count specified + and keeps the position inside the number for the next call. + */ + template + class number_splitter + { + public: + typedef Int int_type; ///< Number type + typedef Int uint_type; ///< Result type of \p cut() function + + //@cond + static constexpr unsigned const c_nBitPerByte = 8; + //@endcond + + public: + /// Initalizes the splitter with nymber \p n and initial bit offset 0 + explicit number_splitter( int_type n ) + : number_( n ) + , shift_( 0 ) + {} + + /// Initalizes the splitter with nymber \p n and initial bit offset \p initial_offset + number_splitter( int_type n, size_t initial_offset ) + : number_( n ) + , shift_( static_cast( initial_offset )) + { + assert( initial_offset < sizeof( int_type ) * c_nBitPerByte ); + } + + /// Returns \p true if end-of-string is not reached yet + explicit operator bool() const + { + return !eos(); + } + + /// Returns \p true if end-of-stream encountered + bool eos() const + { + return shift_ >= sizeof( int_type ) * c_nBitPerByte; + } + + /// Cuts next \p count bits (must be multiplier of 8) from the number + /** + For performance reason, the function does not manage out-of-bound condition. + To control that use \p safe_cut(). + */ + int_type cut( unsigned count ) + { + assert( !eos()); + assert( is_correct( count )); + + int_type result = ( number_ >> shift_ ) & (( 1 << count ) - 1 ); + shift_ += count; + + return result; + } + + /// Cuts up to \p count from the bit-string + /** + Safe analog of \p cut(): if \p count is more than the rest of \p int_type, + only the rest is returned. + When \p eos() condition is met the function returns 0. + */ + int_type safe_cut( unsigned count ) + { + if ( eos()) + return 0; + + unsigned rest = static_cast( rest_count()); + if ( rest < count ) + count = rest; + return count ? cut( count ) : 0; + } + + /// Resets the splitter + void reset() noexcept + { + shift_ = 0; + } + + /// Returns initial number + int_type source() const + { + return number_; + } + + /// Returns current bit offset from beginning of the number + size_t bit_offset() const + { + return shift_; + } + + /// Returns how many bits remain + size_t rest_count() const + { + return sizeof( int_type ) * c_nBitPerByte - shift_; + } + + /// Checks if \p count is multiple of 8 + static constexpr bool is_correct( unsigned count ) + { + return count < sizeof( int_type ) * c_nBitPerByte; + } + + private: + //@cond + int_type const number_; + unsigned shift_; + //@endcond + }; + + /// Metafunctin to select a most suitable splitter for type \p BitString of size \p BitStringSize + template + struct select_splitter + { + typedef split_bitstring< BitString, BitStringSize > type; ///< metafunction result + }; + + //@cond +# define CDS_SELECT_NUMBER_SPLITTER( num_type ) \ + template <> struct select_splitter { typedef number_splitter type; } + + CDS_SELECT_NUMBER_SPLITTER( int ); + CDS_SELECT_NUMBER_SPLITTER( unsigned ); + CDS_SELECT_NUMBER_SPLITTER( short ); + CDS_SELECT_NUMBER_SPLITTER( unsigned short ); + CDS_SELECT_NUMBER_SPLITTER( long ); + CDS_SELECT_NUMBER_SPLITTER( unsigned long ); + CDS_SELECT_NUMBER_SPLITTER( long long ); + CDS_SELECT_NUMBER_SPLITTER( unsigned long long ); + +# undef CDS_SELECT_NUMBER_SPLITTER + //@endcond + +}} // namespace cds::algo + +#endif // #ifndef CDSLIB_ALGO_SPLIT_BITSTRING_H diff --git a/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/compiler/backoff.h b/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/compiler/backoff.h new file mode 100644 index 0000000..cdb51fd --- /dev/null +++ b/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/compiler/backoff.h @@ -0,0 +1,64 @@ +/* + This file is a part of libcds - Concurrent Data Structures library + + (C) Copyright Maxim Khizhinsky (libcds.dev@gmail.com) 2006-2017 + + Source code repo: http://github.com/khizmax/libcds/ + Download: http://sourceforge.net/projects/libcds/files/ + + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. + + * Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ + +#ifndef CDSLIB_COMPILER_BACKOFF_IMPL_H +#define CDSLIB_COMPILER_BACKOFF_IMPL_H + +#include + +#if CDS_COMPILER == CDS_COMPILER_MSVC || (CDS_COMPILER == CDS_COMPILER_INTEL && CDS_OS_INTERFACE == CDS_OSI_WINDOWS) +# if CDS_PROCESSOR_ARCH == CDS_PROCESSOR_X86 +# include +# elif CDS_PROCESSOR_ARCH == CDS_PROCESSOR_AMD64 +# include +# else +# error "MS VC++ compiler: unsupported processor architecture" +# endif +#elif CDS_COMPILER == CDS_COMPILER_GCC || CDS_COMPILER == CDS_COMPILER_CLANG || CDS_COMPILER == CDS_COMPILER_INTEL +# if CDS_PROCESSOR_ARCH == CDS_PROCESSOR_X86 +# include +# elif CDS_PROCESSOR_ARCH == CDS_PROCESSOR_AMD64 +# include +# elif CDS_PROCESSOR_ARCH == CDS_PROCESSOR_IA64 +# include +# elif CDS_PROCESSOR_ARCH == CDS_PROCESSOR_SPARC +# include +# elif CDS_PROCESSOR_ARCH == CDS_PROCESSOR_PPC64 +# include +# elif CDS_PROCESSOR_ARCH == CDS_PROCESSOR_ARM7 +# include +# elif CDS_PROCESSOR_ARCH == CDS_PROCESSOR_ARM8 +# include +# endif +#else +# error "Undefined compiler" +#endif + +#endif // #ifndef CDSLIB_COMPILER_BACKOFF_IMPL_H diff --git a/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/compiler/bitop.h b/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/compiler/bitop.h new file mode 100644 index 0000000..338cf09 --- /dev/null +++ b/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/compiler/bitop.h @@ -0,0 +1,68 @@ +/* + This file is a part of libcds - Concurrent Data Structures library + + (C) Copyright Maxim Khizhinsky (libcds.dev@gmail.com) 2006-2017 + + Source code repo: http://github.com/khizmax/libcds/ + Download: http://sourceforge.net/projects/libcds/files/ + + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. + + * Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ + +#ifndef CDSLIB_COMPILER_BITOP_H +#define CDSLIB_COMPILER_BITOP_H + +// Choose appropriate header for current architecture and compiler + +#if CDS_COMPILER == CDS_COMPILER_MSVC || (CDS_COMPILER == CDS_COMPILER_INTEL && CDS_OS_INTERFACE == CDS_OSI_WINDOWS) +/************************************************************************/ +/* MS Visual C++ */ +/************************************************************************/ + +# if CDS_PROCESSOR_ARCH == CDS_PROCESSOR_X86 +# include +# elif CDS_PROCESSOR_ARCH == CDS_PROCESSOR_AMD64 +# include +# endif + +#elif CDS_COMPILER == CDS_COMPILER_GCC || CDS_COMPILER == CDS_COMPILER_CLANG || CDS_COMPILER == CDS_COMPILER_INTEL +/************************************************************************/ +/* GCC */ +/************************************************************************/ + +# if CDS_PROCESSOR_ARCH == CDS_PROCESSOR_X86 +# include +# elif CDS_PROCESSOR_ARCH == CDS_PROCESSOR_AMD64 +# include +# elif CDS_PROCESSOR_ARCH == CDS_PROCESSOR_SPARC +# include +# elif CDS_PROCESSOR_ARCH == CDS_PROCESSOR_IA64 +# include +# elif CDS_PROCESSOR_ARCH == CDS_PROCESSOR_PPC64 +# include +# endif +#endif // Compiler choice + +// Generic (C) implementation +#include + +#endif // #ifndef CDSLIB_COMPILER_BITOP_H diff --git a/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/compiler/clang/defs.h b/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/compiler/clang/defs.h new file mode 100644 index 0000000..5496365 --- /dev/null +++ b/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/compiler/clang/defs.h @@ -0,0 +1,152 @@ +/* + This file is a part of libcds - Concurrent Data Structures library + + (C) Copyright Maxim Khizhinsky (libcds.dev@gmail.com) 2006-2017 + + Source code repo: http://github.com/khizmax/libcds/ + Download: http://sourceforge.net/projects/libcds/files/ + + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. + + * Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ + +#ifndef CDSLIB_COMPILER_CLANG_DEFS_H +#define CDSLIB_COMPILER_CLANG_DEFS_H + +// Compiler version +#define CDS_COMPILER_VERSION (__clang_major__ * 10000 + __clang_minor__ * 100 + __clang_patchlevel__) + +// Compiler name +#define CDS_COMPILER__NAME ("clang " __clang_version__) +#define CDS_COMPILER__NICK "clang" + +#if CDS_COMPILER_VERSION < 30600 +# error "Compiler version error. Clang version 3.6.0 and above is supported" +#endif + +#if __cplusplus < CDS_CPLUSPLUS_11 +# error C++11 and above is required +#endif + +#if defined(_LIBCPP_VERSION) && !defined(CDS_USE_BOOST_ATOMIC) && CDS_COMPILER_VERSION < 30700 + // Note: Clang libc++ atomic leads to program crash. + // So, we use libcds atomic implementation +# define CDS_USE_LIBCDS_ATOMIC +#endif + +// clang for Windows +#if defined( _MSC_VER ) +# define CDS_OS_INTERFACE CDS_OSI_WINDOWS +# if defined(_WIN64) +# define CDS_OS_TYPE CDS_OS_WIN64 +# define CDS_OS__NAME "Win64" +# define CDS_OS__NICK "Win64" +# elif defined(_WIN32) +# define CDS_OS_TYPE CDS_OS_WIN32 +# define CDS_OS__NAME "Win32" +# define CDS_OS__NICK "Win32" +# endif +#endif + +#include + +#define alignof __alignof__ + +// C++11 thread_local keyword +#if !(CDS_OS_TYPE == CDS_OS_OSX && CDS_COMPILER_VERSION < 30600) + // OS X error? + // See http://stackoverflow.com/questions/23791060/c-thread-local-storage-clang-503-0-40-mac-osx + // http://stackoverflow.com/questions/28094794/why-does-apple-clang-disallow-c11-thread-local-when-official-clang-supports + // clang 3.6 ok?.. +# define CDS_CXX11_THREAD_LOCAL_SUPPORT +#endif + +// Attributes +#if CDS_COMPILER_VERSION >= 30600 +# if __cplusplus == CDS_CPLUSPLUS_11 // C++11 +# define CDS_DEPRECATED( reason ) [[gnu::deprecated(reason)]] +# else // C++14 +# define CDS_DEPRECATED( reason ) [[deprecated(reason)]] +# endif +#endif + +#define CDS_NORETURN __attribute__((__noreturn__)) + +// ************************************************* +// Features +#if defined(__has_feature) && __has_feature(thread_sanitizer) +# ifndef CDS_THREAD_SANITIZER_ENABLED +# define CDS_THREAD_SANITIZER_ENABLED +# endif +#endif + +#if defined(__has_feature) && __has_feature(address_sanitizer) +# ifndef CDS_ADDRESS_SANITIZER_ENABLED +# define CDS_ADDRESS_SANITIZER_ENABLED +# endif +#endif + + +// ************************************************* +// Alignment macro + +#define CDS_TYPE_ALIGNMENT(n) __attribute__ ((aligned (n))) +#define CDS_CLASS_ALIGNMENT(n) __attribute__ ((aligned (n))) +#define CDS_DATA_ALIGNMENT(n) __attribute__ ((aligned (n))) + + +// likely/unlikely + +#define cds_likely( expr ) __builtin_expect( !!( expr ), 1 ) +#define cds_unlikely( expr ) __builtin_expect( !!( expr ), 0 ) + +// Exceptions +#if defined( __EXCEPTIONS ) && __EXCEPTIONS == 1 +# define CDS_EXCEPTION_ENABLED +#endif + + +// double-width CAS support - only for libc++ +// You can manually suppress wide-atomic support by defining in compiler command line: +// for 64bit platform: -DCDS_DISABLE_128BIT_ATOMIC +// for 32bit platform: -DCDS_DISABLE_64BIT_ATOMIC +#ifdef _LIBCPP_VERSION +# if CDS_BUILD_BITS == 64 +# if !defined( CDS_DISABLE_128BIT_ATOMIC ) && defined( __GCC_HAVE_SYNC_COMPARE_AND_SWAP_16 ) +# define CDS_DCAS_SUPPORT +# endif +# else +# if !defined( CDS_DISABLE_64BIT_ATOMIC ) && defined( __GCC_HAVE_SYNC_COMPARE_AND_SWAP_8 ) +# define CDS_DCAS_SUPPORT +# endif +# endif +#endif + +//if constexpr support (C++17) +#ifndef constexpr_if +# if defined( __cpp_if_constexpr ) && __cpp_if_constexpr >= 201606 +# define constexpr_if if constexpr +# endif +#endif + +#include + +#endif // #ifndef CDSLIB_COMPILER_GCC_DEFS_H diff --git a/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/compiler/cxx11_atomic.h b/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/compiler/cxx11_atomic.h new file mode 100644 index 0000000..a46fa12 --- /dev/null +++ b/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/compiler/cxx11_atomic.h @@ -0,0 +1,2232 @@ +/* + This file is a part of libcds - Concurrent Data Structures library + + (C) Copyright Maxim Khizhinsky (libcds.dev@gmail.com) 2006-2017 + + Source code repo: http://github.com/khizmax/libcds/ + Download: http://sourceforge.net/projects/libcds/files/ + + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. + + * Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ + +#ifndef CDSLIB_COMPILER_CXX11_ATOMIC_H +#define CDSLIB_COMPILER_CXX11_ATOMIC_H +//@cond + +#include // make_unsigned +#include +#include + +namespace cds { namespace cxx11_atomic { + typedef enum memory_order { + memory_order_relaxed, + memory_order_consume, + memory_order_acquire, + memory_order_release, + memory_order_acq_rel, + memory_order_seq_cst + } memory_order; + +}} // namespace cds::cxx11_atomic + + +#if CDS_COMPILER == CDS_COMPILER_MSVC || (CDS_COMPILER == CDS_COMPILER_INTEL && CDS_OS_INTERFACE == CDS_OSI_WINDOWS) +# if CDS_PROCESSOR_ARCH == CDS_PROCESSOR_X86 +# include +# elif CDS_PROCESSOR_ARCH == CDS_PROCESSOR_AMD64 +# include +# else +# error "MS VC++ compiler: unsupported processor architecture" +# endif +#elif CDS_COMPILER == CDS_COMPILER_GCC || CDS_COMPILER == CDS_COMPILER_CLANG || CDS_COMPILER == CDS_COMPILER_INTEL +# if CDS_PROCESSOR_ARCH == CDS_PROCESSOR_X86 +# include +# elif CDS_PROCESSOR_ARCH == CDS_PROCESSOR_AMD64 +# include +# elif CDS_PROCESSOR_ARCH == CDS_PROCESSOR_IA64 +# include +# elif CDS_PROCESSOR_ARCH == CDS_PROCESSOR_SPARC +# include +# elif CDS_PROCESSOR_ARCH == CDS_PROCESSOR_PPC64 +# include +//# elif CDS_PROCESSOR_ARCH == CDS_PROCESSOR_ARM7 +//# include +# else +# error "GCC compiler: unsupported processor architecture. Try to use native C++11 atomic or boost.atomic" +# endif +#else +# error "Undefined compiler" +#endif + +namespace cds { namespace cxx11_atomic { + + // forward declarations + template + struct atomic; + + namespace details { + + template + struct atomic_generic_ops; + + template + struct atomic_integral_ops; + + template + struct primary_type; + + template <> + struct primary_type<1> + { + typedef std::uint8_t type; + }; + template <> + struct primary_type<2> + { + typedef std::uint16_t type; + }; + template <> + struct primary_type<4> + { + typedef std::uint32_t type; + }; + template <> + struct primary_type<8> + { + typedef std::uint64_t type; + }; +#if CDS_BUILD_BITS == 64 && CDS_DCAS_SUPPORT + template <> + struct primary_type<16> + { + typedef unsigned __int128_t type; + }; +#endif + + template + struct make_atomic_primary + { + typedef T source_type; + typedef Primary primary_type; + + static primary_type volatile * ptr( source_type volatile * p ) noexcept + { + return reinterpret_cast(p); + } + static primary_type const volatile * ptr( source_type const volatile * p ) noexcept + { + return reinterpret_cast(p); + } + + static primary_type val( source_type v ) noexcept + { + return *reinterpret_cast(&v); + } + + static primary_type& ref( source_type& v ) noexcept + { + return reinterpret_cast(v); + } + + static primary_type const& ref( source_type const& v ) noexcept + { + return reinterpret_cast(v); + } + + static source_type ret( primary_type r ) noexcept + { + return *reinterpret_cast(&r); + } + }; + + template + struct make_atomic_primary + { + typedef T source_type; + typedef T primary_type; + + static primary_type volatile * ptr( source_type volatile * p ) noexcept + { + return p; + } + static primary_type const volatile * ptr( source_type const volatile * p ) noexcept + { + return p; + } + + static primary_type val( source_type v ) noexcept + { + return v; + } + + static primary_type& ref( source_type& v ) noexcept + { + return v; + } + + static source_type ret( primary_type r ) noexcept + { + return r; + } + }; + + template + struct atomic_integral_bitwise_ops + { + public: + typedef typename std::make_unsigned::type unsigned_type; + typedef atomic_generic_ops atomic_ops; + + static T fetch_and(T volatile * pDest, T val, memory_order order) noexcept + { + unsigned_type cur = atomic_ops::atomic_load_explicit( reinterpret_cast(pDest), memory_order_relaxed ); + do {} while ( !atomic_ops::atomic_compare_exchange_weak_explicit( + reinterpret_cast(pDest), &cur, cur & unsigned_type(val), order, memory_order_relaxed )); + return T(cur); + } + + static T fetch_or(T volatile * pDest, T val, memory_order order) noexcept + { + unsigned_type cur = atomic_ops::atomic_load_explicit( reinterpret_cast(pDest), memory_order_relaxed ); + do {} while ( !atomic_ops::atomic_compare_exchange_weak_explicit( + reinterpret_cast(pDest), &cur, cur | unsigned_type(val), order, memory_order_relaxed )); + return T(cur); + } + + static T fetch_xor(T volatile * pDest, T val, memory_order order) noexcept + { + unsigned_type cur = atomic_ops::atomic_load_explicit( reinterpret_cast(pDest), memory_order_relaxed ); + do {} while ( !atomic_ops::atomic_compare_exchange_weak_explicit( + reinterpret_cast(pDest), &cur, cur ^ unsigned_type(val), order, memory_order_relaxed )); + return T(cur); + } + }; + + + // 8-bit atomic operations + + template + struct atomic_generic_ops< T, 1, Primary > + { + typedef make_atomic_primary primary; + + // store + static void atomic_store_explicit( T volatile * pDest, T v, memory_order order ) noexcept + { + platform::store8( primary::ptr(pDest), primary::val(v), order ); + } + static void atomic_store_explicit( T * pDest, T v, memory_order order ) noexcept + { + platform::store8( primary::ptr(pDest), primary::val(v), order ); + } + static void atomic_store( T volatile * pDest, T v ) noexcept + { + atomic_store_explicit( pDest, v, memory_order_seq_cst ); + } + static void atomic_store( T * pDest, T v ) noexcept + { + atomic_store_explicit( pDest, v, memory_order_seq_cst ); + } + + // load + static T atomic_load_explicit( T volatile const * pSrc, memory_order order ) noexcept + { + return primary::ret( platform::load8( primary::ptr(pSrc), order )); + } + static T atomic_load_explicit( T const * pSrc, memory_order order ) noexcept + { + return primary::ret( platform::load8( primary::ptr(pSrc), order )); + } + static T atomic_load( T volatile const * pSrc ) noexcept + { + return atomic_load_explicit( pSrc, memory_order_seq_cst ); + } + static T atomic_load( T const * pSrc ) noexcept + { + return atomic_load_explicit( pSrc, memory_order_seq_cst ); + } + + // exchange + static T atomic_exchange_explicit( T volatile * pDest, T val, memory_order order ) noexcept + { + return primary::ret( platform::exchange8( primary::ptr(pDest), primary::val(val), order )); + } + static T atomic_exchange_explicit( T * pDest, T val, memory_order order ) noexcept + { + return primary::ret( platform::exchange8( primary::ptr(pDest), primary::val(val), order )); + } + static T atomic_exchange( T volatile * pDest, T val ) noexcept + { + return atomic_exchange_explicit( pDest, val, memory_order_seq_cst ); + } + static T atomic_exchange( T * pDest, T val ) noexcept + { + return atomic_exchange_explicit( pDest, val, memory_order_seq_cst ); + } + + // cas + static bool atomic_compare_exchange_weak_explicit( T volatile * pDest, T * expected, T desired, memory_order mo_success, memory_order mo_fail ) noexcept + { + assert( expected ); + return platform::cas8_weak( primary::ptr(pDest), primary::ref(*expected), primary::val(desired), mo_success, mo_fail ); + } + static bool atomic_compare_exchange_weak_explicit( T * pDest, T * expected, T desired, memory_order mo_success, memory_order mo_fail ) noexcept + { + assert( expected ); + return platform::cas8_weak( primary::ptr(pDest), primary::ref(*expected), primary::val(desired), mo_success, mo_fail ); + } + static bool atomic_compare_exchange_weak( T volatile * pDest, T * expected, T desired ) noexcept + { + return atomic_compare_exchange_weak_explicit( pDest, expected, desired, memory_order_seq_cst, memory_order_relaxed ); + } + static bool atomic_compare_exchange_weak( T * pDest, T * expected, T desired ) noexcept + { + return atomic_compare_exchange_weak_explicit( pDest, expected, desired, memory_order_seq_cst, memory_order_relaxed ); + } + static bool atomic_compare_exchange_strong_explicit( T volatile * pDest, T * expected, T desired, memory_order mo_success, memory_order mo_fail ) noexcept + { + assert( expected ); + return platform::cas8_strong( primary::ptr(pDest), primary::ref(*expected), primary::val(desired), mo_success, mo_fail ); + } + static bool atomic_compare_exchange_strong_explicit( T * pDest, T * expected, T desired, memory_order mo_success, memory_order mo_fail ) noexcept + { + assert( expected ); + return platform::cas8_strong( primary::ptr(pDest), primary::ref(*expected), primary::val(desired), mo_success, mo_fail ); + } + static bool atomic_compare_exchange_strong( T volatile * pDest, T * expected, T desired ) noexcept + { + return atomic_compare_exchange_strong_explicit( pDest, expected, desired, memory_order_seq_cst, memory_order_relaxed ); + } + static bool atomic_compare_exchange_strong( T * pDest, T * expected, T desired ) noexcept + { + return atomic_compare_exchange_strong_explicit( pDest, expected, desired, memory_order_seq_cst, memory_order_relaxed ); + } + }; + + template + struct atomic_integral_ops< T, 1 > + : atomic_generic_ops + , atomic_integral_bitwise_ops + { + typedef atomic_integral_bitwise_ops bitwise_ops; + + // fetch_add + static T atomic_fetch_add_explicit(T volatile * pDest, T val, memory_order order) noexcept + { +# ifdef CDS_ATOMIC_fetch8_add_defined + return platform::fetch8_add( pDest, val, order ); +# else + T cur = atomic_load_explicit( pDest, memory_order_relaxed ); + do {} while ( !atomic_compare_exchange_weak_explicit( pDest, &cur, cur + val, order, memory_order_relaxed )); + return cur; +# endif + } + static T atomic_fetch_add_explicit(T * pDest, T val , memory_order order) noexcept + { + return atomic_fetch_add_explicit( reinterpret_cast( pDest ), val, order ); + } + static T atomic_fetch_add( T volatile * pDest, T val ) noexcept + { + return atomic_fetch_add_explicit( pDest, val, memory_order_seq_cst ); + } + static T atomic_fetch_add( T * pDest, T val ) noexcept + { + return atomic_fetch_add_explicit( pDest, val, memory_order_seq_cst ); + } + + // fetch_sub + static T atomic_fetch_sub_explicit(T volatile * pDest, T val, memory_order order) noexcept + { +# ifdef CDS_ATOMIC_fetch8_sub_defined + return platform::fetch8_sub( pDest, val, order ); +# else + T cur = atomic_load_explicit( pDest, memory_order_relaxed ); + do {} while ( !atomic_compare_exchange_weak_explicit( pDest, &cur, cur - val, order, memory_order_relaxed )); + return cur; +# endif + } + static T atomic_fetch_sub_explicit(T * pDest, T val , memory_order order) noexcept + { + return atomic_fetch_sub_explicit( reinterpret_cast( pDest ), val, order ); + } + static T atomic_fetch_sub( T volatile * pDest, T val ) noexcept + { + return atomic_fetch_sub_explicit( pDest, val, memory_order_seq_cst ); + } + static T atomic_fetch_sub( T * pDest, T val ) noexcept + { + return atomic_fetch_sub_explicit( pDest, val, memory_order_seq_cst ); + } + + // fetch_and + static T atomic_fetch_and_explicit(T volatile * pDest, T val, memory_order order) noexcept + { +# ifdef CDS_ATOMIC_fetch8_and_defined + return platform::fetch8_and( pDest, val, order ); +# else + return bitwise_ops::fetch_and( pDest, val, order ); +# endif + } + static T atomic_fetch_and_explicit(T * pDest, T val , memory_order order) noexcept + { + return atomic_fetch_and_explicit( reinterpret_cast( pDest ), val, order ); + } + static T atomic_fetch_and( T volatile * pDest, T val ) noexcept + { + return atomic_fetch_and_explicit( pDest, val, memory_order_seq_cst ); + } + static T atomic_fetch_and( T * pDest, T val ) noexcept + { + return atomic_fetch_and_explicit( pDest, val, memory_order_seq_cst ); + } + + // fetch_or + static T atomic_fetch_or_explicit(T volatile * pDest, T val, memory_order order) noexcept + { +# ifdef CDS_ATOMIC_fetch8_or_defined + return platform::fetch8_or( pDest, val, order ); +# else + return bitwise_ops::fetch_or( pDest, val, order ); +# endif + } + static T atomic_fetch_or_explicit(T * pDest, T val , memory_order order) noexcept + { + return atomic_fetch_or_explicit( reinterpret_cast( pDest ), val, order ); + } + static T atomic_fetch_or( T volatile * pDest, T val ) noexcept + { + return atomic_fetch_or_explicit( pDest, val, memory_order_seq_cst ); + } + static T atomic_fetch_or( T * pDest, T val ) noexcept + { + return atomic_fetch_or_explicit( pDest, val, memory_order_seq_cst ); + } + + // fetch_xor + static T atomic_fetch_xor_explicit(T volatile * pDest, T val, memory_order order) noexcept + { +# ifdef CDS_ATOMIC_fetch8_xor_defined + return platform::fetch8_xor( pDest, val, order ); +# else + return bitwise_ops::fetch_xor( pDest, val, order ); +# endif + } + static T atomic_fetch_xor_explicit(T * pDest, T val , memory_order order) noexcept + { + return atomic_fetch_xor_explicit( reinterpret_cast( pDest ), val, order ); + } + static T atomic_fetch_xor( T volatile * pDest, T val ) noexcept + { + return atomic_fetch_xor_explicit( pDest, val, memory_order_seq_cst ); + } + static T atomic_fetch_xor( T * pDest, T val ) noexcept + { + return atomic_fetch_xor_explicit( pDest, val, memory_order_seq_cst ); + } + }; + + // 16-bit atomic operations + + template + struct atomic_generic_ops< T, 2, Primary > + { + typedef make_atomic_primary primary; + + // store + static void atomic_store_explicit( T volatile * pDest, T v, memory_order order ) noexcept + { + platform::store16( primary::ptr(pDest), primary::val(v), order ); + } + static void atomic_store_explicit( T * pDest, T v, memory_order order ) noexcept + { + platform::store16( primary::ptr(pDest), primary::val(v), order ); + } + static void atomic_store( T volatile * pDest, T v ) noexcept + { + atomic_store_explicit( pDest, v, memory_order_seq_cst ); + } + static void atomic_store( T * pDest, T v ) noexcept + { + atomic_store_explicit( pDest, v, memory_order_seq_cst ); + } + + // load + static T atomic_load_explicit( T volatile const * pSrc, memory_order order ) noexcept + { + return primary::ret( platform::load16( primary::ptr(pSrc), order )); + } + static T atomic_load_explicit( T const * pSrc, memory_order order ) noexcept + { + return primary::ret( platform::load16( primary::ptr(pSrc), order )); + } + static T atomic_load( T volatile const * pSrc ) noexcept + { + return atomic_load_explicit( pSrc, memory_order_seq_cst ); + } + static T atomic_load( T const * pSrc ) noexcept + { + return atomic_load_explicit( pSrc, memory_order_seq_cst ); + } + + // exchange + static T atomic_exchange_explicit( T volatile * pDest, T val, memory_order order ) noexcept + { + return primary::ret( platform::exchange16( primary::ptr(pDest), primary::val(val), order )); + } + static T atomic_exchange_explicit( T * pDest, T val, memory_order order ) noexcept + { + return primary::ret( platform::exchange16( primary::ptr(pDest), primary::val(val), order )); + } + static T atomic_exchange( T volatile * pDest, T val ) noexcept + { + return atomic_exchange_explicit( pDest, val, memory_order_seq_cst ); + } + static T atomic_exchange( T * pDest, T val ) noexcept + { + return atomic_exchange_explicit( pDest, val, memory_order_seq_cst ); + } + + // cas + static bool atomic_compare_exchange_weak_explicit( T volatile * pDest, T * expected, T desired, memory_order mo_success, memory_order mo_fail ) noexcept + { + assert( expected ); + return platform::cas16_weak( primary::ptr(pDest), primary::ref(*expected), primary::val(desired), mo_success, mo_fail ); + } + static bool atomic_compare_exchange_weak_explicit( T * pDest, T * expected, T desired, memory_order mo_success, memory_order mo_fail ) noexcept + { + assert( expected ); + return platform::cas16_weak( primary::ptr(pDest), primary::ref(*expected), primary::val(desired), mo_success, mo_fail ); + } + static bool atomic_compare_exchange_weak( T volatile * pDest, T * expected, T desired ) noexcept + { + return atomic_compare_exchange_weak_explicit( pDest, expected, primary::val(desired), memory_order_seq_cst, memory_order_relaxed ); + } + static bool atomic_compare_exchange_weak( T * pDest, T * expected, T desired ) noexcept + { + return atomic_compare_exchange_weak_explicit( pDest, expected, desired, memory_order_seq_cst, memory_order_relaxed ); + } + static bool atomic_compare_exchange_strong_explicit( T volatile * pDest, T * expected, T desired, memory_order mo_success, memory_order mo_fail ) noexcept + { + assert( expected ); + return platform::cas16_strong( primary::ptr(pDest), primary::ref(*expected), primary::val(desired), mo_success, mo_fail ); + } + static bool atomic_compare_exchange_strong_explicit( T * pDest, T * expected, T desired, memory_order mo_success, memory_order mo_fail ) noexcept + { + assert( expected ); + return platform::cas16_strong( primary::ptr(pDest), primary::ref(*expected), primary::val(desired), mo_success, mo_fail ); + } + static bool atomic_compare_exchange_strong( T volatile * pDest, T * expected, T desired ) noexcept + { + return atomic_compare_exchange_strong_explicit( pDest, expected, desired, memory_order_seq_cst, memory_order_relaxed ); + } + static bool atomic_compare_exchange_strong( T * pDest, T * expected, T desired ) noexcept + { + return atomic_compare_exchange_strong_explicit( pDest, expected, desired, memory_order_seq_cst, memory_order_relaxed ); + } + }; + + template + struct atomic_integral_ops< T, 2 > + : atomic_generic_ops< T, 2, T > + , atomic_integral_bitwise_ops + { + typedef atomic_integral_bitwise_ops bitwise_ops; + + // fetch_add + static T atomic_fetch_add_explicit(T volatile * pDest, T val, memory_order order) noexcept + { +# ifdef CDS_ATOMIC_fetch16_add_defined + return platform::fetch16_add( pDest, val, order ); +# else + T cur = atomic_load_explicit( pDest, memory_order_relaxed ); + do {} while ( !atomic_compare_exchange_weak_explicit( pDest, &cur, cur + val, order, memory_order_relaxed )); + return cur; +# endif + } + static T atomic_fetch_add_explicit(T * pDest, T val , memory_order order) noexcept + { + return atomic_fetch_add_explicit( reinterpret_cast( pDest ), val, order ); + } + static T atomic_fetch_add( T volatile * pDest, T val ) noexcept + { + return atomic_fetch_add_explicit( pDest, val, memory_order_seq_cst ); + } + static T atomic_fetch_add( T * pDest, T val ) noexcept + { + return atomic_fetch_add_explicit( pDest, val, memory_order_seq_cst ); + } + + // fetch_sub + static T atomic_fetch_sub_explicit(T volatile * pDest, T val, memory_order order) noexcept + { +# ifdef CDS_ATOMIC_fetch16_sub_defined + return platform::fetch16_sub( pDest, val, order ); +# else + T cur = atomic_load_explicit( pDest, memory_order_relaxed ); + do {} while ( !atomic_compare_exchange_weak_explicit( pDest, &cur, cur - val, order, memory_order_relaxed )); + return cur; +# endif + } + static T atomic_fetch_sub_explicit(T * pDest, T val , memory_order order) noexcept + { + return atomic_fetch_sub_explicit( reinterpret_cast( pDest ), val, order ); + } + static T atomic_fetch_sub( T volatile * pDest, T val ) noexcept + { + return atomic_fetch_sub_explicit( pDest, val, memory_order_seq_cst ); + } + static T atomic_fetch_sub( T * pDest, T val ) noexcept + { + return atomic_fetch_sub_explicit( pDest, val, memory_order_seq_cst ); + } + + // fetch_and + static T atomic_fetch_and_explicit(T volatile * pDest, T val, memory_order order) noexcept + { +# ifdef CDS_ATOMIC_fetch16_and_defined + return platform::fetch16_and( pDest, val, order ); +# else + return bitwise_ops::fetch_and( pDest, val, order ); +# endif + } + static T atomic_fetch_and_explicit(T * pDest, T val , memory_order order) noexcept + { + return atomic_fetch_and_explicit( reinterpret_cast( pDest ), val, order ); + } + static T atomic_fetch_and( T volatile * pDest, T val ) noexcept + { + return atomic_fetch_and_explicit( pDest, val, memory_order_seq_cst ); + } + static T atomic_fetch_and( T * pDest, T val ) noexcept + { + return atomic_fetch_and_explicit( pDest, val, memory_order_seq_cst ); + } + + // fetch_or + static T atomic_fetch_or_explicit(T volatile * pDest, T val, memory_order order) noexcept + { +# ifdef CDS_ATOMIC_fetch16_or_defined + return platform::fetch16_or( pDest, val, order ); +# else + return bitwise_ops::fetch_or( pDest, val, order ); +# endif + } + static T atomic_fetch_or_explicit(T * pDest, T val , memory_order order) noexcept + { + return atomic_fetch_or_explicit( reinterpret_cast( pDest ), val, order ); + } + static T atomic_fetch_or( T volatile * pDest, T val ) noexcept + { + return atomic_fetch_or_explicit( pDest, val, memory_order_seq_cst ); + } + static T atomic_fetch_or( T * pDest, T val ) noexcept + { + return atomic_fetch_or_explicit( pDest, val, memory_order_seq_cst ); + } + + // fetch_xor + static T atomic_fetch_xor_explicit(T volatile * pDest, T val, memory_order order) noexcept + { +# ifdef CDS_ATOMIC_fetch16_xor_defined + return platform::fetch16_xor( pDest, val, order ); +# else + return bitwise_ops::fetch_xor( pDest, val, order ); +# endif + } + static T atomic_fetch_xor_explicit(T * pDest, T val , memory_order order) noexcept + { + return atomic_fetch_xor_explicit( reinterpret_cast( pDest ), val, order ); + } + static T atomic_fetch_xor( T volatile * pDest, T val ) noexcept + { + return atomic_fetch_xor_explicit( pDest, val, memory_order_seq_cst ); + } + static T atomic_fetch_xor( T * pDest, T val ) noexcept + { + return atomic_fetch_xor_explicit( pDest, val, memory_order_seq_cst ); + } + }; + + // 32-bit atomic operations + + template + struct atomic_generic_ops< T, 4, Primary > + { + typedef make_atomic_primary primary; + + // store + static void atomic_store_explicit( T volatile * pDest, T v, memory_order order ) noexcept + { + platform::store32( primary::ptr(pDest), primary::val(v), order ); + } + static void atomic_store_explicit( T * pDest, T v, memory_order order ) noexcept + { + platform::store32( primary::ptr(pDest), primary::val(v), order ); + } + static void atomic_store( T volatile * pDest, T v ) noexcept + { + atomic_store_explicit( pDest, v, memory_order_seq_cst ); + } + static void atomic_store( T * pDest, T v ) noexcept + { + atomic_store_explicit( pDest, v, memory_order_seq_cst ); + } + + // load + static T atomic_load_explicit( T volatile const * pSrc, memory_order order ) noexcept + { + return primary::ret( platform::load32( primary::ptr(pSrc), order )); + } + static T atomic_load_explicit( T const * pSrc, memory_order order ) noexcept + { + return primary::ret( platform::load32( primary::ptr(pSrc), order )); + } + static T atomic_load( T volatile const * pSrc ) noexcept + { + return atomic_load_explicit( pSrc, memory_order_seq_cst ); + } + static T atomic_load( T const * pSrc ) noexcept + { + return atomic_load_explicit( pSrc, memory_order_seq_cst ); + } + + // exchange + static T atomic_exchange_explicit( T volatile * pDest, T val, memory_order order ) noexcept + { + return primary::ret( platform::exchange32( primary::ptr(pDest), primary::val(val), order )); + } + static T atomic_exchange_explicit( T * pDest, T val, memory_order order ) noexcept + { + return primary::ret( platform::exchange32( primary::ptr(pDest), primary::val(val), order )); + } + static T atomic_exchange( T volatile * pDest, T val ) noexcept + { + return atomic_exchange_explicit( pDest, val, memory_order_seq_cst ); + } + static T atomic_exchange( T * pDest, T val ) noexcept + { + return atomic_exchange_explicit( pDest, val, memory_order_seq_cst ); + } + + // cas + static bool atomic_compare_exchange_weak_explicit( T volatile * pDest, T * expected, T desired, memory_order mo_success, memory_order mo_fail ) noexcept + { + assert( expected ); + return platform::cas32_weak( primary::ptr(pDest), primary::ref(*expected), primary::val(desired), mo_success, mo_fail ); + } + static bool atomic_compare_exchange_weak_explicit( T * pDest, T * expected, T desired, memory_order mo_success, memory_order mo_fail ) noexcept + { + assert( expected ); + return platform::cas32_weak( primary::ptr(pDest), primary::ref(*expected), primary::val(desired), mo_success, mo_fail ); + } + static bool atomic_compare_exchange_weak( T volatile * pDest, T * expected, T desired ) noexcept + { + return atomic_compare_exchange_weak_explicit( pDest, expected, desired, memory_order_seq_cst, memory_order_relaxed ); + } + static bool atomic_compare_exchange_weak( T * pDest, T * expected, T desired ) noexcept + { + return atomic_compare_exchange_weak_explicit( pDest, expected, desired, memory_order_seq_cst, memory_order_relaxed ); + } + static bool atomic_compare_exchange_strong_explicit( T volatile * pDest, T * expected, T desired, memory_order mo_success, memory_order mo_fail ) noexcept + { + assert( expected ); + return platform::cas32_strong( primary::ptr(pDest), primary::ref(*expected), primary::val(desired), mo_success, mo_fail ); + } + static bool atomic_compare_exchange_strong_explicit( T * pDest, T * expected, T desired, memory_order mo_success, memory_order mo_fail ) noexcept + { + assert( expected ); + return platform::cas32_strong( primary::ptr(pDest), primary::ref(*expected), primary::val(desired), mo_success, mo_fail ); + } + static bool atomic_compare_exchange_strong( T volatile * pDest, T * expected, T desired ) noexcept + { + return atomic_compare_exchange_strong_explicit( pDest, expected, desired, memory_order_seq_cst, memory_order_relaxed ); + } + static bool atomic_compare_exchange_strong( T * pDest, T * expected, T desired ) noexcept + { + return atomic_compare_exchange_strong_explicit( pDest, expected, desired, memory_order_seq_cst, memory_order_relaxed ); + } + }; + + template + struct atomic_integral_ops< T, 4 > + : atomic_generic_ops< T, 4, T > + , atomic_integral_bitwise_ops + { + typedef atomic_integral_bitwise_ops bitwise_ops; + // fetch_add + static T atomic_fetch_add_explicit(T volatile * pDest, T val, memory_order order) noexcept + { +# ifdef CDS_ATOMIC_fetch32_add_defined + return platform::fetch32_add( pDest, val, order ); +# else + T cur = atomic_load_explicit( pDest, memory_order_relaxed ); + do {} while ( !atomic_compare_exchange_weak_explicit( pDest, &cur, cur + val, order, memory_order_relaxed )); + return cur; +# endif + } + static T atomic_fetch_add_explicit(T * pDest, T val , memory_order order) noexcept + { + return atomic_fetch_add_explicit( reinterpret_cast( pDest ), val, order ); + } + static T atomic_fetch_add( T volatile * pDest, T val ) noexcept + { + return atomic_fetch_add_explicit( pDest, val, memory_order_seq_cst ); + } + static T atomic_fetch_add( T * pDest, T val ) noexcept + { + return atomic_fetch_add_explicit( pDest, val, memory_order_seq_cst ); + } + + // fetch_sub + static T atomic_fetch_sub_explicit(T volatile * pDest, T val, memory_order order) noexcept + { +# ifdef CDS_ATOMIC_fetch32_sub_defined + return platform::fetch32_sub( pDest, val, order ); +# else + T cur = atomic_load_explicit( pDest, memory_order_relaxed ); + do {} while ( !atomic_compare_exchange_weak_explicit( pDest, &cur, cur - val, order, memory_order_relaxed )); + return cur; +# endif + } + static T atomic_fetch_sub_explicit(T * pDest, T val , memory_order order) noexcept + { + return atomic_fetch_sub_explicit( reinterpret_cast( pDest ), val, order ); + } + static T atomic_fetch_sub( T volatile * pDest, T val ) noexcept + { + return atomic_fetch_sub_explicit( pDest, val, memory_order_seq_cst ); + } + static T atomic_fetch_sub( T * pDest, T val ) noexcept + { + return atomic_fetch_sub_explicit( pDest, val, memory_order_seq_cst ); + } + + // fetch_and + static T atomic_fetch_and_explicit(T volatile * pDest, T val, memory_order order) noexcept + { +# ifdef CDS_ATOMIC_fetch32_and_defined + return platform::fetch32_and( pDest, val, order ); +# else + return bitwise_ops::fetch_and( pDest, val, order ); +# endif + } + static T atomic_fetch_and_explicit(T * pDest, T val , memory_order order) noexcept + { + return atomic_fetch_and_explicit( reinterpret_cast( pDest ), val, order ); + } + static T atomic_fetch_and( T volatile * pDest, T val ) noexcept + { + return atomic_fetch_and_explicit( pDest, val, memory_order_seq_cst ); + } + static T atomic_fetch_and( T * pDest, T val ) noexcept + { + return atomic_fetch_and_explicit( pDest, val, memory_order_seq_cst ); + } + + // fetch_or + static T atomic_fetch_or_explicit(T volatile * pDest, T val, memory_order order) noexcept + { +# ifdef CDS_ATOMIC_fetch32_or_defined + return platform::fetch32_or( pDest, val, order ); +# else + return bitwise_ops::fetch_or( pDest, val, order ); +# endif + } + static T atomic_fetch_or_explicit(T * pDest, T val , memory_order order) noexcept + { + return atomic_fetch_or_explicit( reinterpret_cast( pDest ), val, order ); + } + static T atomic_fetch_or( T volatile * pDest, T val ) noexcept + { + return atomic_fetch_or_explicit( pDest, val, memory_order_seq_cst ); + } + static T atomic_fetch_or( T * pDest, T val ) noexcept + { + return atomic_fetch_or_explicit( pDest, val, memory_order_seq_cst ); + } + + // fetch_xor + static T atomic_fetch_xor_explicit(T volatile * pDest, T val, memory_order order) noexcept + { +# ifdef CDS_ATOMIC_fetch32_xor_defined + return platform::fetch32_xor( pDest, val, order ); +# else + return bitwise_ops::fetch_xor( pDest, val, order ); +# endif + } + static T atomic_fetch_xor_explicit(T * pDest, T val , memory_order order) noexcept + { + return atomic_fetch_xor_explicit( reinterpret_cast( pDest ), val, order ); + } + static T atomic_fetch_xor( T volatile * pDest, T val ) noexcept + { + return atomic_fetch_xor_explicit( pDest, val, memory_order_seq_cst ); + } + static T atomic_fetch_xor( T * pDest, T val ) noexcept + { + return atomic_fetch_xor_explicit( pDest, val, memory_order_seq_cst ); + } + }; + + + // 64-bit atomic operations + + template + struct atomic_generic_ops< T, 8, Primary > + { + typedef make_atomic_primary primary; + + // store + static void atomic_store_explicit( T volatile * pDest, T v, memory_order order ) noexcept + { + platform::store64( primary::ptr(pDest), primary::val(v), order ); + } + static void atomic_store_explicit( T * pDest, T v, memory_order order ) noexcept + { + platform::store64( primary::ptr(pDest), primary::val(v), order ); + } + static void atomic_store( T volatile * pDest, T v ) noexcept + { + atomic_store_explicit( pDest, v, memory_order_seq_cst ); + } + static void atomic_store( T * pDest, T v ) noexcept + { + atomic_store_explicit( pDest, v, memory_order_seq_cst ); + } + + // load + static T atomic_load_explicit( T volatile const * pSrc, memory_order order ) noexcept + { + return primary::ret( platform::load64( primary::ptr(pSrc), order )); + } + static T atomic_load_explicit( T const * pSrc, memory_order order ) noexcept + { + return primary::ret( platform::load64( primary::ptr(pSrc), order )); + } + static T atomic_load( T volatile const * pSrc ) noexcept + { + return atomic_load_explicit( pSrc, memory_order_seq_cst ); + } + static T atomic_load( T const * pSrc ) noexcept + { + return atomic_load_explicit( pSrc, memory_order_seq_cst ); + } + + // exchange + static T atomic_exchange_explicit( T volatile * pDest, T val, memory_order order ) noexcept + { + return primary::ret( platform::exchange64( primary::ptr(pDest), primary::val(val), order )); + } + static T atomic_exchange_explicit( T * pDest, T val, memory_order order ) noexcept + { + return primary::ret( platform::exchange64( primary::ptr(pDest), primary::val(val), order )); + } + static T atomic_exchange( T volatile * pDest, T val ) noexcept + { + return atomic_exchange_explicit( pDest, val, memory_order_seq_cst ); + } + static T atomic_exchange( T * pDest, T val ) noexcept + { + return atomic_exchange_explicit( pDest, val, memory_order_seq_cst ); + } + + // cas + static bool atomic_compare_exchange_weak_explicit( T volatile * pDest, T * expected, T desired, memory_order mo_success, memory_order mo_fail ) noexcept + { + assert( expected ); + return platform::cas64_weak( primary::ptr(pDest), primary::ref(*expected), primary::val(desired), mo_success, mo_fail ); + } + static bool atomic_compare_exchange_weak_explicit( T * pDest, T * expected, T desired, memory_order mo_success, memory_order mo_fail ) noexcept + { + assert( expected ); + return platform::cas64_weak( primary::ptr(pDest), primary::ref(*expected), primary::val(desired), mo_success, mo_fail ); + } + static bool atomic_compare_exchange_weak( T volatile * pDest, T * expected, T desired ) noexcept + { + return atomic_compare_exchange_weak_explicit( pDest, expected, desired, memory_order_seq_cst, memory_order_relaxed ); + } + static bool atomic_compare_exchange_weak( T * pDest, T * expected, T desired ) noexcept + { + return atomic_compare_exchange_weak_explicit( pDest, expected, desired, memory_order_seq_cst, memory_order_relaxed ); + } + static bool atomic_compare_exchange_strong_explicit( T volatile * pDest, T * expected, T desired, memory_order mo_success, memory_order mo_fail ) noexcept + { + assert( expected ); + return platform::cas64_strong( primary::ptr(pDest), primary::ref(*expected), primary::val(desired), mo_success, mo_fail ); + } + static bool atomic_compare_exchange_strong_explicit( T * pDest, T * expected, T desired, memory_order mo_success, memory_order mo_fail ) noexcept + { + assert( expected ); + return platform::cas64_strong( primary::ptr(pDest), primary::ref(*expected), primary::val(desired), mo_success, mo_fail ); + } + static bool atomic_compare_exchange_strong( T volatile * pDest, T * expected, T desired ) noexcept + { + return atomic_compare_exchange_strong_explicit( pDest, expected, desired, memory_order_seq_cst, memory_order_relaxed ); + } + static bool atomic_compare_exchange_strong( T * pDest, T * expected, T desired ) noexcept + { + return atomic_compare_exchange_strong_explicit( pDest, expected, desired, memory_order_seq_cst, memory_order_relaxed ); + } + }; + + + template + struct atomic_integral_ops< T, 8 > + : atomic_generic_ops< T, 8, T > + , atomic_integral_bitwise_ops + { + typedef atomic_integral_bitwise_ops bitwise_ops; + typedef atomic_generic_ops general_ops; + + // fetch_add + static T atomic_fetch_add_explicit(T volatile * pDest, T val, memory_order order) noexcept + { +# ifdef CDS_ATOMIC_fetch64_add_defined + return platform::fetch64_add( pDest, val, order ); +# else + T cur = general_ops::atomic_load_explicit( pDest, memory_order_relaxed ); + do {} while ( !general_ops::atomic_compare_exchange_weak_explicit( pDest, &cur, cur + val, order, memory_order_relaxed )); + return cur; +# endif + } + static T atomic_fetch_add_explicit(T * pDest, T val , memory_order order) noexcept + { + return atomic_fetch_add_explicit( reinterpret_cast( pDest ), val, order ); + } + static T atomic_fetch_add( T volatile * pDest, T val ) noexcept + { + return atomic_fetch_add_explicit( pDest, val, memory_order_seq_cst ); + } + static T atomic_fetch_add( T * pDest, T val ) noexcept + { + return atomic_fetch_add_explicit( pDest, val, memory_order_seq_cst ); + } + + // fetch_sub + static T atomic_fetch_sub_explicit(T volatile * pDest, T val, memory_order order) noexcept + { +# ifdef CDS_ATOMIC_fetch64_sub_defined + return platform::fetch64_sub( pDest, val, order ); +# else + T cur = general_ops::atomic_load_explicit( pDest, memory_order_relaxed ); + do {} while ( !general_ops::atomic_compare_exchange_weak_explicit( pDest, &cur, cur - val, order, memory_order_relaxed )); + return cur; +# endif + } + static T atomic_fetch_sub_explicit(T * pDest, T val , memory_order order) noexcept + { + return atomic_fetch_sub_explicit( reinterpret_cast( pDest ), val, order ); + } + static T atomic_fetch_sub( T volatile * pDest, T val ) noexcept + { + return atomic_fetch_sub_explicit( pDest, val, memory_order_seq_cst ); + } + static T atomic_fetch_sub( T * pDest, T val ) noexcept + { + return atomic_fetch_sub_explicit( pDest, val, memory_order_seq_cst ); + } + + // fetch_and + static T atomic_fetch_and_explicit(T volatile * pDest, T val, memory_order order) noexcept + { +# ifdef CDS_ATOMIC_fetch64_and_defined + return platform::fetch64_and( pDest, val, order ); +# else + return bitwise_ops::fetch_and( pDest, val, order ); +# endif + } + static T atomic_fetch_and_explicit(T * pDest, T val , memory_order order) noexcept + { + return atomic_fetch_and_explicit( reinterpret_cast( pDest ), val, order ); + } + static T atomic_fetch_and( T volatile * pDest, T val ) noexcept + { + return atomic_fetch_and_explicit( pDest, val, memory_order_seq_cst ); + } + static T atomic_fetch_and( T * pDest, T val ) noexcept + { + return atomic_fetch_and_explicit( pDest, val, memory_order_seq_cst ); + } + + // fetch_or + static T atomic_fetch_or_explicit(T volatile * pDest, T val, memory_order order) noexcept + { +# ifdef CDS_ATOMIC_fetch64_or_defined + return platform::fetch64_or( pDest, val, order ); +# else + return bitwise_ops::fetch_or( pDest, val, order ); +# endif + } + static T atomic_fetch_or_explicit(T * pDest, T val , memory_order order) noexcept + { + return atomic_fetch_or_explicit( reinterpret_cast( pDest ), val, order ); + } + static T atomic_fetch_or( T volatile * pDest, T val ) noexcept + { + return atomic_fetch_or_explicit( pDest, val, memory_order_seq_cst ); + } + static T atomic_fetch_or( T * pDest, T val ) noexcept + { + return atomic_fetch_or_explicit( pDest, val, memory_order_seq_cst ); + } + + // fetch_xor + static T atomic_fetch_xor_explicit(T volatile * pDest, T val, memory_order order) noexcept + { +# ifdef CDS_ATOMIC_fetch64_xor_defined + return platform::fetch64_xor( pDest, val, order ); +# else + return bitwise_ops::fetch_xor( pDest, val, order ); +# endif + } + static T atomic_fetch_xor_explicit(T * pDest, T val , memory_order order) noexcept + { + return atomic_fetch_xor_explicit( reinterpret_cast( pDest ), val, order ); + } + static T atomic_fetch_xor( T volatile * pDest, T val ) noexcept + { + return atomic_fetch_xor_explicit( pDest, val, memory_order_seq_cst ); + } + static T atomic_fetch_xor( T * pDest, T val ) noexcept + { + return atomic_fetch_xor_explicit( pDest, val, memory_order_seq_cst ); + } + }; + + + // atomic pointer operations + template + struct atomic_pointer_base + { + // store + static void atomic_store_explicit( T * volatile * pDest, T * v, memory_order order ) noexcept + { + platform::store_ptr( pDest, v, order ); + } + static void atomic_store_explicit( T * * pDest, T * v, memory_order order ) noexcept + { + platform::store_ptr( pDest, v, order ); + } + static void atomic_store( T * volatile * pDest, T * v ) noexcept + { + atomic_store_explicit( pDest, v, memory_order_seq_cst ); + } + static void atomic_store( T * * pDest, T * v ) noexcept + { + atomic_store_explicit( pDest, v, memory_order_seq_cst ); + } + + // load + static T * atomic_load_explicit( T * volatile const * pSrc, memory_order order ) noexcept + { + return platform::load_ptr( pSrc, order ); + } + static T * atomic_load_explicit( T * const * pSrc, memory_order order ) noexcept + { + return platform::load_ptr( pSrc, order ); + } + static T * atomic_load( T * volatile const * pSrc ) noexcept + { + return atomic_load_explicit( pSrc, memory_order_seq_cst ); + } + static T * atomic_load( T * const * pSrc ) noexcept + { + return atomic_load_explicit( pSrc, memory_order_seq_cst ); + } + + // exchange + static T * atomic_exchange_explicit( T * volatile * pDest, T * val, memory_order order ) noexcept + { + return platform::exchange_ptr( pDest, val, order ); + } + static T * atomic_exchange_explicit( T * * pDest, T * val, memory_order order ) noexcept + { + return platform::exchange_ptr( pDest, val, order ); + } + static T * atomic_exchange( T * volatile * pDest, T * val ) noexcept + { + return atomic_exchange_explicit( pDest, val, memory_order_seq_cst ); + } + static T * atomic_exchange( T * * pDest, T * val ) noexcept + { + return atomic_exchange_explicit( pDest, val, memory_order_seq_cst ); + } + + // cas + static bool atomic_compare_exchange_weak_explicit( T * volatile * pDest, T * * expected, T * desired, memory_order mo_success, memory_order mo_fail ) noexcept + { + assert( expected ); + return platform::cas_ptr_weak( pDest, *expected, desired, mo_success, mo_fail ); + } + static bool atomic_compare_exchange_weak_explicit( T * * pDest, T * * expected, T * desired, memory_order mo_success, memory_order mo_fail ) noexcept + { + assert( expected ); + return platform::cas_ptr_weak( pDest, *expected, desired, mo_success, mo_fail ); + } + static bool atomic_compare_exchange_weak( T * volatile * pDest, T ** expected, T * desired ) noexcept + { + return atomic_compare_exchange_weak_explicit( pDest, expected, desired, memory_order_seq_cst, memory_order_relaxed ); + } + static bool atomic_compare_exchange_weak( T ** pDest, T ** expected, T * desired ) noexcept + { + return atomic_compare_exchange_weak_explicit( pDest, expected, desired, memory_order_seq_cst, memory_order_relaxed ); + } + static bool atomic_compare_exchange_strong_explicit( T * volatile * pDest, T ** expected, T * desired, memory_order mo_success, memory_order mo_fail ) noexcept + { + assert( expected ); + return platform::cas_ptr_strong( pDest, *expected, desired, mo_success, mo_fail ); + } + static bool atomic_compare_exchange_strong_explicit( T ** pDest, T ** expected, T * desired, memory_order mo_success, memory_order mo_fail ) noexcept + { + assert( expected ); + return platform::cas_ptr_strong( pDest, *expected, desired, mo_success, mo_fail ); + } + static bool atomic_compare_exchange_strong( T * volatile * pDest, T ** expected, T * desired ) noexcept + { + return atomic_compare_exchange_strong_explicit( pDest, expected, desired, memory_order_seq_cst, memory_order_relaxed ); + } + static bool atomic_compare_exchange_strong( T ** pDest, T ** expected, T * desired ) noexcept + { + return atomic_compare_exchange_strong_explicit( pDest, expected, desired, memory_order_seq_cst, memory_order_relaxed ); + } + }; + + template + struct atomic_pointer: public atomic_pointer_base + { + typedef atomic_pointer_base base_class; + // fetch_add + static T * atomic_fetch_add_explicit(T * volatile * pDest, ptrdiff_t val, memory_order order) noexcept + { +# ifdef CDS_ATOMIC_fetch_ptr_add_defined + platform::fetch_ptr_add( pDest, val, order ); +# else + T * cur = base_class::atomic_load_explicit( pDest, memory_order_relaxed ); + do {} while ( !base_class::atomic_compare_exchange_weak_explicit( pDest, &cur, cur + val, order, memory_order_relaxed )); + return cur; +# endif + } + static T * atomic_fetch_add_explicit(T * * pDest, ptrdiff_t val , memory_order order) noexcept + { + return atomic_fetch_add_explicit( reinterpret_cast( pDest ), val, order ); + } + static T * atomic_fetch_add( T * volatile * pDest, ptrdiff_t val ) noexcept + { + return atomic_fetch_add_explicit( pDest, val, memory_order_seq_cst ); + } + static T * atomic_fetch_add( T ** pDest, ptrdiff_t val ) noexcept + { + return atomic_fetch_add_explicit( pDest, val, memory_order_seq_cst ); + } + + // fetch_sub + static T * atomic_fetch_sub_explicit(T * volatile * pDest, ptrdiff_t val, memory_order order) noexcept + { +# ifdef CDS_ATOMIC_fetch_ptr_sub_defined + platform::fetch_ptr_sub( pDest, val, order ); +# else + T * cur = base_class::atomic_load_explicit( pDest, memory_order_relaxed ); + do {} while ( !base_class::atomic_compare_exchange_weak_explicit( pDest, &cur, cur - val, order, memory_order_relaxed )); + return cur; +# endif + } + static T * atomic_fetch_sub_explicit(T ** pDest, ptrdiff_t val , memory_order order) noexcept + { + return atomic_fetch_sub_explicit( reinterpret_cast( pDest ), val, order ); + } + static T * atomic_fetch_sub( T volatile * pDest, ptrdiff_t val ) noexcept + { + return atomic_fetch_sub_explicit( pDest, val, memory_order_seq_cst ); + } + static T * atomic_fetch_sub( T * pDest, ptrdiff_t val ) noexcept + { + return atomic_fetch_sub_explicit( pDest, val, memory_order_seq_cst ); + } + }; + + template <> + struct atomic_pointer: public atomic_pointer_base + { + typedef atomic_pointer_base base_class; + + // fetch_add + static void * atomic_fetch_add_explicit(void * volatile * pDest, ptrdiff_t val, memory_order order) noexcept + { + void * cur = base_class::atomic_load_explicit( pDest, memory_order_relaxed ); + do {} while ( !base_class::atomic_compare_exchange_weak_explicit( pDest, &cur, reinterpret_cast(cur) + val, order, memory_order_relaxed )); + return cur; + } + static void * atomic_fetch_add_explicit(void * * pDest, ptrdiff_t val , memory_order order) noexcept + { + return atomic_fetch_add_explicit( reinterpret_cast( pDest ), val, order ); + } + static void * atomic_fetch_add( void * volatile * pDest, ptrdiff_t val ) noexcept + { + return atomic_fetch_add_explicit( pDest, val, memory_order_seq_cst ); + } + static void * atomic_fetch_add( void ** pDest, ptrdiff_t val ) noexcept + { + return atomic_fetch_add_explicit( pDest, val, memory_order_seq_cst ); + } + + // fetch_sub + static void * atomic_fetch_sub_explicit(void * volatile * pDest, ptrdiff_t val, memory_order order) noexcept + { + void * cur = base_class::atomic_load_explicit( pDest, memory_order_relaxed ); + do {} while ( !base_class::atomic_compare_exchange_weak_explicit( pDest, &cur, reinterpret_cast(cur) - val, order, memory_order_relaxed )); + return cur; + } + static void * atomic_fetch_sub_explicit(void ** pDest, ptrdiff_t val , memory_order order) noexcept + { + return atomic_fetch_sub_explicit( reinterpret_cast( pDest ), val, order ); + } + static void * atomic_fetch_sub( void * volatile * pDest, ptrdiff_t val ) noexcept + { + return atomic_fetch_sub_explicit( pDest, val, memory_order_seq_cst ); + } + static void * atomic_fetch_sub( void ** pDest, ptrdiff_t val ) noexcept + { + return atomic_fetch_sub_explicit( pDest, val, memory_order_seq_cst ); + } + }; + + template + struct atomic_integral + { + private: + typename cds::details::aligned_type::type volatile m_val; + //T volatile m_val; + typedef atomic_integral_ops atomic_ops; + public: + typedef T atomic_type; + public: + bool is_lock_free() const volatile noexcept + { + return true; + } + bool is_lock_free() const noexcept + { + return true; + } + void store(T val, memory_order order = memory_order_seq_cst) volatile noexcept + { + atomic_ops::atomic_store_explicit( &m_val, val, order ); + } + void store(T val, memory_order order = memory_order_seq_cst) noexcept + { + atomic_ops::atomic_store_explicit( &m_val, val, order ); + } + + T load(memory_order order = memory_order_seq_cst) const volatile noexcept + { + return atomic_ops::atomic_load_explicit( &m_val, order ); + } + T load(memory_order order = memory_order_seq_cst) const noexcept + { + return atomic_ops::atomic_load_explicit( &m_val, order ); + } + + operator T() const volatile noexcept + { + return load(); + } + operator T() const noexcept + { + return load(); + } + + T exchange(T val, memory_order order = memory_order_seq_cst) volatile noexcept + { + return atomic_ops::atomic_exchange_explicit( &m_val, val, order ); + } + T exchange(T val, memory_order order = memory_order_seq_cst) noexcept + { + return atomic_ops::atomic_exchange_explicit( &m_val, val, order ); + } + + bool compare_exchange_weak(T& expected, T desired , memory_order success_order, memory_order failure_order) volatile noexcept + { + return atomic_ops::atomic_compare_exchange_weak_explicit( &m_val, &expected, desired, success_order, failure_order ); + } + bool compare_exchange_weak(T& expected, T desired , memory_order success_order, memory_order failure_order) noexcept + { + return atomic_ops::atomic_compare_exchange_weak_explicit( &m_val, &expected, desired, success_order, failure_order ); + } + bool compare_exchange_strong(T& expected, T desired , memory_order success_order, memory_order failure_order) volatile noexcept + { + return atomic_ops::atomic_compare_exchange_strong_explicit( &m_val, &expected, desired, success_order, failure_order ); + } + bool compare_exchange_strong(T& expected, T desired , memory_order success_order, memory_order failure_order) noexcept + { + return atomic_ops::atomic_compare_exchange_strong_explicit( &m_val, &expected, desired, success_order, failure_order ); + } + bool compare_exchange_weak(T& expected, T desired , memory_order success_order = memory_order_seq_cst) volatile noexcept + { + return compare_exchange_weak( expected, desired, success_order, memory_order_relaxed ); + } + bool compare_exchange_weak(T& expected, T desired , memory_order success_order = memory_order_seq_cst) noexcept + { + return compare_exchange_weak( expected, desired, success_order, memory_order_relaxed ); + } + bool compare_exchange_strong(T& expected, T desired , memory_order success_order = memory_order_seq_cst) volatile noexcept + { + return compare_exchange_strong( expected, desired, success_order, memory_order_relaxed ); + } + bool compare_exchange_strong(T& expected, T desired , memory_order success_order = memory_order_seq_cst) noexcept + { + return compare_exchange_strong( expected, desired, success_order, memory_order_relaxed ); + } + + T fetch_add(T val, memory_order order = memory_order_seq_cst) volatile noexcept + { + return atomic_ops::atomic_fetch_add_explicit( &m_val, val, order ); + } + T fetch_add(T val, memory_order order = memory_order_seq_cst) noexcept + { + return atomic_ops::atomic_fetch_add_explicit( &m_val, val, order ); + } + T fetch_sub(T val, memory_order order = memory_order_seq_cst) volatile noexcept + { + return atomic_ops::atomic_fetch_sub_explicit( &m_val, val, order ); + } + T fetch_sub(T val, memory_order order = memory_order_seq_cst) noexcept + { + return atomic_ops::atomic_fetch_sub_explicit( &m_val, val, order ); + } + T fetch_and(T val, memory_order order = memory_order_seq_cst) volatile noexcept + { + return atomic_ops::atomic_fetch_and_explicit( &m_val, val, order ); + } + T fetch_and(T val, memory_order order = memory_order_seq_cst) noexcept + { + return atomic_ops::atomic_fetch_and_explicit( &m_val, val, order ); + } + + T fetch_or(T val, memory_order order = memory_order_seq_cst) volatile noexcept + { + return atomic_ops::atomic_fetch_or_explicit( &m_val, val, order ); + } + T fetch_or(T val, memory_order order = memory_order_seq_cst) noexcept + { + return atomic_ops::atomic_fetch_or_explicit( &m_val, val, order ); + } + T fetch_xor(T val, memory_order order = memory_order_seq_cst) volatile noexcept + { + return atomic_ops::atomic_fetch_xor_explicit( &m_val, val, order ); + } + T fetch_xor(T val, memory_order order = memory_order_seq_cst) noexcept + { + return atomic_ops::atomic_fetch_xor_explicit( &m_val, val, order ); + } + + atomic_integral() = default; + constexpr atomic_integral(T val) noexcept + : m_val(val) + {} + + atomic_integral(const atomic_integral&) = delete; + atomic_integral& operator=(const atomic_integral&) = delete; + atomic_integral& operator=(const atomic_integral&) volatile = delete; + + T operator=(T val) volatile noexcept + { + store(val); + return val; + } + T operator=(T val) noexcept + { + store(val); + return val; + } + + // Post inc/dec + T operator++(int) volatile noexcept + { + return fetch_add( 1 ); + } + T operator++(int) noexcept + { + return fetch_add( 1 ); + } + T operator--(int) volatile noexcept + { + return fetch_sub( 1 ); + } + T operator--(int) noexcept + { + return fetch_sub( 1 ); + } + + // Pre inc/dec + T operator++() volatile noexcept + { + return fetch_add( 1 ) + 1; + } + T operator++() noexcept + { + return fetch_add( 1 ) + 1; + } + T operator--() volatile noexcept + { + return fetch_sub( 1 ) - 1; + } + T operator--() noexcept + { + return fetch_sub( 1 ) - 1; + } + + // op= + T operator+=(T val) volatile noexcept + { + return fetch_add( val ) + val; + } + T operator+=(T val) noexcept + { + return fetch_add( val ) + val; + } + T operator-=(T val) volatile noexcept + { + return fetch_sub( val ) - val; + } + T operator-=(T val) noexcept + { + return fetch_sub( val ) - val; + } + T operator&=(T val) volatile noexcept + { + return fetch_and( val ) & val; + } + T operator&=(T val) noexcept + { + return fetch_and( val ) & val; + } + T operator|=(T val) volatile noexcept + { + return fetch_or( val ) | val; + } + T operator|=(T val) noexcept + { + return fetch_or( val ) | val; + } + T operator^=(T val) volatile noexcept + { + return fetch_xor( val ) ^ val; + } + T operator^=(T val) noexcept + { + return fetch_xor( val ) ^ val; + } + }; + + template + struct select_primary_type { + typedef typename details::primary_type::type type; + }; + template <> + struct select_primary_type { + typedef bool type; + }; + + } // namespace details + + template + struct atomic + { + private: + typedef details::atomic_generic_ops::type > atomic_ops; + + T volatile m_data; + public: + bool is_lock_free() const volatile noexcept + { + return true; + } + bool is_lock_free() const noexcept + { + return true; + } + + void store(T val, memory_order order = memory_order_seq_cst) volatile noexcept + { + atomic_ops::atomic_store_explicit( &m_data, val, order ); + } + void store(T val, memory_order order = memory_order_seq_cst) noexcept + { + atomic_ops::atomic_store_explicit( &m_data, val, order ); + } + + T load(memory_order order = memory_order_seq_cst) const volatile noexcept + { + return atomic_ops::atomic_load_explicit( &m_data, order ); + } + T load(memory_order order = memory_order_seq_cst) const noexcept + { + return atomic_ops::atomic_load_explicit( &m_data, order ); + } + + operator T() const volatile noexcept + { + return load(); + } + operator T() const noexcept + { + return load(); + } + + T exchange(T val, memory_order order = memory_order_seq_cst) volatile noexcept + { + return atomic_ops::atomic_exchange_explicit( &m_data, val, order ); + } + T exchange(T val, memory_order order = memory_order_seq_cst) noexcept + { + return atomic_ops::atomic_exchange_explicit( &m_data, val, order ); + } + + bool compare_exchange_weak(T& expected, T desired, memory_order success_order, memory_order failure_order) volatile noexcept + { + return atomic_ops::atomic_compare_exchange_weak_explicit( &m_data, &expected, desired, success_order, failure_order ); + } + bool compare_exchange_weak(T& expected, T desired, memory_order success_order, memory_order failure_order) noexcept + { + return atomic_ops::atomic_compare_exchange_weak_explicit( &m_data, &expected, desired, success_order, failure_order ); + } + bool compare_exchange_strong(T& expected, T desired, memory_order success_order, memory_order failure_order) volatile noexcept + { + return atomic_ops::atomic_compare_exchange_strong_explicit( &m_data, &expected, desired, success_order, failure_order ); + } + bool compare_exchange_strong(T& expected, T desired, memory_order success_order, memory_order failure_order) noexcept + { + return atomic_ops::atomic_compare_exchange_strong_explicit( &m_data, &expected, desired, success_order, failure_order ); + } + bool compare_exchange_weak(T& expected, T desired, memory_order success_order = memory_order_seq_cst) volatile noexcept + { + return compare_exchange_weak( expected, desired, success_order, memory_order_relaxed ); + } + bool compare_exchange_weak(T& expected, T desired, memory_order success_order = memory_order_seq_cst) noexcept + { + return compare_exchange_weak( expected, desired, success_order, memory_order_relaxed ); + } + bool compare_exchange_strong(T& expected, T desired, memory_order success_order = memory_order_seq_cst) volatile noexcept + { + return compare_exchange_strong( expected, desired, success_order, memory_order_relaxed ); + } + bool compare_exchange_strong(T& expected, T desired, memory_order success_order = memory_order_seq_cst) noexcept + { + return compare_exchange_strong( expected, desired, success_order, memory_order_relaxed ); + } + + atomic() = default; + constexpr atomic(T val) + : m_data( val ) + {} + + atomic(const atomic&) = delete; + atomic& operator=(const atomic&) = delete; + atomic& operator=(const atomic&) volatile = delete; + + T operator=(T val) volatile noexcept + { + store( val ); + return val; + } + T operator=(T val) noexcept + { + store( val ); + return val; + } + }; + +# define CDS_DECLARE_ATOMIC_INTEGRAL( _type ) \ + template <> \ + struct atomic<_type>: public details::atomic_integral<_type> \ + { \ + private: \ + typedef details::atomic_integral<_type> base_class ; \ + public: \ + atomic() = default; \ + atomic(_type val) noexcept : base_class(val) {} \ + atomic(const atomic&) = delete; \ + atomic& operator=(const atomic&) = delete; \ + atomic& operator=(const atomic&) volatile = delete; \ + _type operator=(_type val) volatile noexcept { return base_class::operator=(val); } \ + _type operator=(_type val) noexcept { return base_class::operator=(val); } \ + }; + + CDS_DECLARE_ATOMIC_INTEGRAL(char) + CDS_DECLARE_ATOMIC_INTEGRAL(signed char) + CDS_DECLARE_ATOMIC_INTEGRAL(unsigned char) + CDS_DECLARE_ATOMIC_INTEGRAL(short) + CDS_DECLARE_ATOMIC_INTEGRAL(unsigned short) + CDS_DECLARE_ATOMIC_INTEGRAL(int) + CDS_DECLARE_ATOMIC_INTEGRAL(unsigned int) + CDS_DECLARE_ATOMIC_INTEGRAL(long) + CDS_DECLARE_ATOMIC_INTEGRAL(unsigned long) + CDS_DECLARE_ATOMIC_INTEGRAL(long long) + CDS_DECLARE_ATOMIC_INTEGRAL(unsigned long long) +//#if CDS_COMPILER == CDS_COMPILER_GCC && CDS_COMPILER_VERSION >= 40400 +// CDS_DECLARE_ATOMIC_INTEGRAL(char16_t) +// CDS_DECLARE_ATOMIC_INTEGRAL(char32_t) +//#endif +// CDS_DECLARE_ATOMIC_INTEGRAL(wchar_t) + +# undef CDS_DECLARE_ATOMIC_INTEGRAL + + + template + class atomic + { + private: + T * volatile m_ptr; + typedef details::atomic_pointer atomic_ops; + public: + bool is_lock_free() const volatile noexcept + { + return true; + } + bool is_lock_free() const noexcept + { + return true; + } + + void store(T * val, memory_order order = memory_order_seq_cst) volatile noexcept + { + atomic_ops::atomic_store_explicit( &m_ptr, val, order ); + } + void store(T * val, memory_order order = memory_order_seq_cst) noexcept + { + atomic_ops::atomic_store_explicit( &m_ptr, val, order ); + } + + T * load(memory_order order = memory_order_seq_cst) const volatile noexcept + { + return atomic_ops::atomic_load_explicit( &m_ptr, order ); + } + T * load(memory_order order = memory_order_seq_cst) const noexcept + { + return atomic_ops::atomic_load_explicit( &m_ptr, order ); + } + + operator T *() const volatile noexcept + { + return load(); + } + operator T *() const noexcept + { + return load(); + } + + T * exchange(T * val, memory_order order = memory_order_seq_cst) volatile noexcept + { + return atomic_ops::atomic_exchange_explicit( &m_ptr, val, order ); + } + T * exchange(T * val, memory_order order = memory_order_seq_cst) noexcept + { + return atomic_ops::atomic_exchange_explicit( &m_ptr, val, order ); + } + + bool compare_exchange_weak(T *& expected, T * desired, memory_order success_order, memory_order failure_order) volatile noexcept + { + return atomic_ops::atomic_compare_exchange_weak_explicit( &m_ptr, &expected, desired, success_order, failure_order ); + } + bool compare_exchange_weak(T *& expected, T * desired, memory_order success_order, memory_order failure_order) noexcept + { + return atomic_ops::atomic_compare_exchange_weak_explicit( &m_ptr, &expected, desired, success_order, failure_order ); + } + bool compare_exchange_strong(T *& expected, T * desired, memory_order success_order, memory_order failure_order) volatile noexcept + { + return atomic_ops::atomic_compare_exchange_strong_explicit( &m_ptr, &expected, desired, success_order, failure_order ); + } + bool compare_exchange_strong(T *& expected, T * desired, memory_order success_order, memory_order failure_order) noexcept + { + return atomic_ops::atomic_compare_exchange_strong_explicit( &m_ptr, &expected, desired, success_order, failure_order ); + } + bool compare_exchange_weak(T *& expected, T * desired, memory_order success_order = memory_order_seq_cst) volatile noexcept + { + return compare_exchange_weak( expected, desired, success_order, memory_order_relaxed ); + } + bool compare_exchange_weak(T *& expected, T * desired, memory_order success_order = memory_order_seq_cst) noexcept + { + return compare_exchange_weak( expected, desired, success_order, memory_order_relaxed ); + } + bool compare_exchange_strong(T *& expected, T * desired, memory_order success_order = memory_order_seq_cst) volatile noexcept + { + return compare_exchange_strong( expected, desired, success_order, memory_order_relaxed ); + } + bool compare_exchange_strong(T *& expected, T * desired, memory_order success_order = memory_order_seq_cst) noexcept + { + return compare_exchange_strong( expected, desired, success_order, memory_order_relaxed ); + } + + T * fetch_add(ptrdiff_t offset, memory_order order = memory_order_seq_cst) volatile noexcept + { + return atomic_ops::atomic_fetch_add_explicit( &m_ptr, offset, order ); + } + T * fetch_add(ptrdiff_t offset, memory_order order = memory_order_seq_cst) noexcept + { + return atomic_ops::atomic_fetch_add_explicit( &m_ptr, offset, order ); + } + + T * fetch_sub(ptrdiff_t offset, memory_order order = memory_order_seq_cst) volatile noexcept + { + return atomic_ops::atomic_fetch_sub_explicit( &m_ptr, offset, order ); + } + T * fetch_sub(ptrdiff_t offset, memory_order order = memory_order_seq_cst) noexcept + { + return atomic_ops::atomic_fetch_sub_explicit( &m_ptr, offset, order ); + } + + atomic() = default; + constexpr atomic(T * val) noexcept + : m_ptr( val ) + {} + + atomic(const atomic&) = delete; + atomic& operator=(const atomic&) = delete; + atomic& operator=(const atomic&) volatile = delete; + + T * operator=(T * val) volatile noexcept + { + store( val ); + return val; + } + T * operator=(T * val) noexcept + { + store( val ); + return val; + } + }; + + // Atomic typedefs + typedef atomic atomic_bool; + typedef atomic atomic_char; + typedef atomic atomic_schar; + typedef atomic atomic_uchar; + typedef atomic atomic_short; + typedef atomic atomic_ushort; + typedef atomic atomic_int; + typedef atomic atomic_uint; + typedef atomic atomic_long; + typedef atomic atomic_ulong; + typedef atomic atomic_llong; + typedef atomic atomic_ullong; +#if ( CDS_COMPILER == CDS_COMPILER_GCC && CDS_COMPILER_VERSION >= 40400 ) || CDS_COMPILER == CDS_COMPILER_CLANG + typedef atomic atomic_char16_t; + typedef atomic atomic_char32_t; +#endif + typedef atomic atomic_wchar_t; + + + typedef atomic atomic_int_least8_t; + typedef atomic atomic_uint_least8_t; + typedef atomic atomic_int_least16_t; + typedef atomic atomic_uint_least16_t; + typedef atomic atomic_int_least32_t; + typedef atomic atomic_uint_least32_t; + typedef atomic atomic_int_least64_t; + typedef atomic atomic_uint_least64_t; + typedef atomic atomic_int_fast8_t; + typedef atomic atomic_uint_fast8_t; + typedef atomic atomic_int_fast16_t; + typedef atomic atomic_uint_fast16_t; + typedef atomic atomic_int_fast32_t; + typedef atomic atomic_uint_fast32_t; + typedef atomic atomic_int_fast64_t; + typedef atomic atomic_uint_fast64_t; + typedef atomic atomic_intptr_t; + typedef atomic atomic_uintptr_t; + typedef atomic atomic_size_t; + typedef atomic atomic_ptrdiff_t; + typedef atomic atomic_intmax_t; + typedef atomic atomic_uintmax_t; + + template + static inline bool atomic_is_lock_free(const volatile atomic * p) noexcept + { + return p->is_lock_free(); + } + + template + static inline bool atomic_is_lock_free(const atomic * p ) noexcept + { + return p->is_lock_free(); + } + + /* + template + static inline void atomic_init(volatile atomic * p, T val) noexcept + { + p->init( val ); + } + + template + static inline void atomic_init( atomic * p, T val) noexcept + { + p->init( val ); + } + */ + + template + static inline void atomic_store(volatile atomic* p, T val) noexcept + { + p->store(val); + } + template + static inline void atomic_store(atomic* p, T val) noexcept + { + p->store( val ); + } + + template + static inline void atomic_store_explicit(volatile atomic* p, T val, memory_order order) noexcept + { + p->store( val, order ); + } + template + static inline void atomic_store_explicit(atomic* p, T val, memory_order order) noexcept + { + p->store( val, order ); + } + + template + static inline T atomic_load(const volatile atomic* p) noexcept + { + return p->load(); + } + template + static inline T atomic_load(const atomic* p) noexcept + { + return p->load(); + } + + template + static inline T atomic_load_explicit(const volatile atomic* p, memory_order order) noexcept + { + return p->load( order ); + } + template + static inline T atomic_load_explicit(const atomic* p, memory_order order) noexcept + { + return p->load( order ); + } + + template + static inline T atomic_exchange(volatile atomic* p, T val) noexcept + { + return p->exchange( val ); + } + template + static inline T atomic_exchange(atomic* p, T val ) noexcept + { + return p->exchange( val ); + } + + template + static inline T atomic_exchange_explicit(volatile atomic* p, T val, memory_order order) noexcept + { + return p->exchange( val, order ); + } + template + static inline T atomic_exchange_explicit(atomic* p, T val, memory_order order) noexcept + { + return p->exchange( val, order ); + } + + template + static inline bool atomic_compare_exchange_weak(volatile atomic* p, T* expected, T desired) noexcept + { + return p->compare_exchange_weak( *expected, desired ); + } + template + static inline bool atomic_compare_exchange_weak(atomic* p, T* expected, T desired) noexcept + { + return p->compare_exchange_weak( *expected, desired ); + } + + template + static inline bool atomic_compare_exchange_strong(volatile atomic* p, T* expected, T desired) noexcept + { + return p->compare_exchange_strong( *expected, desired ); + } + template + static inline bool atomic_compare_exchange_strong(atomic* p, T* expected, T desired) noexcept + { + return p->compare_exchange_strong( *expected, desired ); + } + + template + static inline bool atomic_compare_exchange_weak_explicit(volatile atomic* p, T* expected, T desired, memory_order success_order, memory_order failure_order) noexcept + { + return p->compare_exchange_weak( *expected, desired, success_order, failure_order ); + } + template + static inline bool atomic_compare_exchange_weak_explicit(atomic* p, T* expected, T desired, memory_order success_order, memory_order failure_order) noexcept + { + return p->compare_exchange_weak( *expected, desired, success_order, failure_order ); + } + + template + static inline bool atomic_compare_exchange_strong_explicit(volatile atomic* p, T* expected, T desired, memory_order success_order, memory_order failure_order) noexcept + { + return p->compare_exchange_strong( *expected, desired, success_order, failure_order ); + } + template + static inline bool atomic_compare_exchange_strong_explicit(atomic* p, T* expected, T desired, memory_order success_order, memory_order failure_order) noexcept + { + return p->compare_exchange_strong( *expected, desired, success_order, failure_order ); + } + + template + static inline T atomic_fetch_add(volatile atomic* p, T val) noexcept + { + return p->fetch_add( val ); + } + template + static inline T atomic_fetch_add(atomic* p, T val) noexcept + { + return p->fetch_add( val ); + } + template + static inline T * atomic_fetch_add(volatile atomic* p, ptrdiff_t offset) noexcept + { + return p->fetch_add( offset ); + } + template + static inline T * atomic_fetch_add(atomic* p, ptrdiff_t offset) noexcept + { + return p->fetch_add( offset ); + } + + template + static inline T atomic_fetch_add_explicit(volatile atomic* p, T val, memory_order order) noexcept + { + return p->fetch_add( val, order ); + } + template + static inline T atomic_fetch_add_explicit(atomic* p, T val, memory_order order) noexcept + { + return p->fetch_add( val, order ); + } + template + static inline T * atomic_fetch_add_explicit(volatile atomic* p, ptrdiff_t offset, memory_order order) noexcept + { + return p->fetch_add( offset, order ); + } + template + static inline T * atomic_fetch_add_explicit(atomic* p, ptrdiff_t offset, memory_order order) noexcept + { + return p->fetch_add( offset, order ); + } + + template + static inline T atomic_fetch_sub(volatile atomic* p, T val) noexcept + { + return p->fetch_sub( val ); + } + template + static inline T atomic_fetch_sub(atomic* p, T val) noexcept + { + return p->fetch_sub( val ); + } + template + static inline T * atomic_fetch_sub(volatile atomic* p, ptrdiff_t offset) noexcept + { + return p->fetch_sub( offset ); + } + template + static inline T * atomic_fetch_sub(atomic* p, ptrdiff_t offset) noexcept + { + return p->fetch_sub( offset ); + } + + template + static inline T atomic_fetch_sub_explicit(volatile atomic* p, T val, memory_order order) noexcept + { + return p->fetch_sub( val, order ); + } + template + static inline T atomic_fetch_sub_explicit(atomic* p, T val, memory_order order) noexcept + { + return p->fetch_sub( val, order ); + } + template + static inline T * atomic_fetch_sub_explicit(volatile atomic* p, ptrdiff_t offset, memory_order order) noexcept + { + return p->fetch_sub( offset, order ); + } + template + static inline T * atomic_fetch_sub_explicit(atomic* p, ptrdiff_t offset, memory_order order) noexcept + { + return p->fetch_sub( offset, order ); + } + + template + static inline T atomic_fetch_and(volatile atomic* p, T val) noexcept + { + return p->fetch_and( val ); + } + template + static inline T atomic_fetch_and(atomic* p, T val) noexcept + { + return p->fetch_and( val ); + } + + template + static inline T atomic_fetch_and_explicit(volatile atomic* p, T val, memory_order order) noexcept + { + return p->fetch_and( val, order ); + } + template + static inline T atomic_fetch_and_explicit(atomic* p, T val, memory_order order) noexcept + { + return p->fetch_and( val, order ); + } + + template + static inline T atomic_fetch_or(volatile atomic* p, T val) noexcept + { + return p->fetch_or( val ); + } + template + static inline T atomic_fetch_or(atomic* p, T val) noexcept + { + return p->fetch_or( val ); + } + + template + static inline T atomic_fetch_or_explicit(volatile atomic* p, T val, memory_order order) noexcept + { + return p->fetch_or( val, order ); + } + template + static inline T atomic_fetch_or_explicit(atomic* p, T val, memory_order order) noexcept + { + return p->fetch_or( val, order ); + } + + template + static inline T atomic_fetch_xor(volatile atomic* p, T val) noexcept + { + return p->fetch_xor( val ); + } + template + static inline T atomic_fetch_xor(atomic* p, T val) noexcept + { + return p->fetch_xor( val ); + } + + template + static inline T atomic_fetch_xor_explicit(volatile atomic* p, T val, memory_order order) noexcept + { + return p->fetch_xor( val, order ); + } + template + static inline T atomic_fetch_xor_explicit(atomic* p, T val, memory_order order) noexcept + { + return p->fetch_xor( val, order ); + } + + // Atomic flag type + typedef struct atomic_flag + { + void clear( memory_order order = memory_order_seq_cst ) volatile noexcept + { + assert( order != memory_order_acquire + && order != memory_order_acq_rel + && order != memory_order_consume + ); + platform::atomic_flag_clear( &m_Flag, order ); + } + void clear( memory_order order = memory_order_seq_cst ) noexcept + { + assert( order != memory_order_acquire + && order != memory_order_acq_rel + && order != memory_order_consume + ); + platform::atomic_flag_clear( &m_Flag, order ); + } + + bool test_and_set( memory_order order = memory_order_seq_cst ) volatile noexcept + { + return platform::atomic_flag_tas( &m_Flag, order ); + } + bool test_and_set( memory_order order = memory_order_seq_cst ) noexcept + { + return platform::atomic_flag_tas( &m_Flag, order ); + } + + atomic_flag() = default; + + atomic_flag(const atomic_flag&) = delete; + atomic_flag& operator=(const atomic_flag&) = delete; + atomic_flag& operator=(const atomic_flag&) volatile = delete; + + platform::atomic_flag_type volatile m_Flag; + } atomic_flag; + + static inline bool atomic_flag_test_and_set(volatile atomic_flag* p) noexcept + { + return p->test_and_set(); + } + static inline bool atomic_flag_test_and_set(atomic_flag * p) noexcept + { + return p->test_and_set(); + } + static inline bool atomic_flag_test_and_set_explicit(volatile atomic_flag* p, memory_order order) noexcept + { + return p->test_and_set( order ); + } + static inline bool atomic_flag_test_and_set_explicit(atomic_flag* p, memory_order order) noexcept + { + return p->test_and_set( order ); + } + static inline void atomic_flag_clear(volatile atomic_flag* p) noexcept + { + return p->clear(); + } + static inline void atomic_flag_clear(atomic_flag* p) noexcept + { + return p->clear(); + } + static inline void atomic_flag_clear_explicit(volatile atomic_flag* p, memory_order order) noexcept + { + return p->clear( order ); + } + static inline void atomic_flag_clear_explicit(atomic_flag* p, memory_order order) noexcept + { + return p->clear( order ); + } + + // Fences + static inline void atomic_thread_fence(memory_order order) noexcept + { + platform::thread_fence( order ); + CDS_COMPILER_RW_BARRIER; + } + static inline void atomic_signal_fence(memory_order order) noexcept + { + platform::signal_fence( order ); + } + +}} // namespace cds::cxx11_atomic + +//@endcond +#endif // #ifndef CDSLIB_COMPILER_CXX11_ATOMIC_H diff --git a/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/compiler/defs.h b/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/compiler/defs.h new file mode 100644 index 0000000..3e437ac --- /dev/null +++ b/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/compiler/defs.h @@ -0,0 +1,76 @@ +/* + This file is a part of libcds - Concurrent Data Structures library + + (C) Copyright Maxim Khizhinsky (libcds.dev@gmail.com) 2006-2017 + + Source code repo: http://github.com/khizmax/libcds/ + Download: http://sourceforge.net/projects/libcds/files/ + + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. + + * Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ + +#ifndef CDSLIB_COMPILER_DEFS_H +#define CDSLIB_COMPILER_DEFS_H + +// __cplusplus values +#define CDS_CPLUSPLUS_11 201103L +#define CDS_CPLUSPLUS_14 201402L +#define CDS_CPLUSPLUS_17 201703L + +// VC 2017 is not full C++11-compatible yet +//#if __cplusplus < CDS_CPLUSPLUS_11 +//# error C++11 and above is required +//#endif + + +#if CDS_COMPILER == CDS_COMPILER_MSVC +# include +#elif CDS_COMPILER == CDS_COMPILER_GCC +# include +#elif CDS_COMPILER == CDS_COMPILER_INTEL +# include +#elif CDS_COMPILER == CDS_COMPILER_CLANG +# include +#elif CDS_COMPILER == CDS_COMPILER_UNKNOWN +# error Unknown compiler. Compilation aborted +#else +# error Unknown value of CDS_COMPILER macro +#endif + +#ifndef CDS_EXPORT_API +# define CDS_EXPORT_API +#endif + +#ifndef cds_likely +# define cds_likely( expr ) expr +# define cds_unlikely( expr ) expr +#endif + +//if constexpr support (C++17) +#ifndef constexpr_if +# define constexpr_if if +#endif + +// Features +#include + +#endif // #ifndef CDSLIB_COMPILER_DEFS_H diff --git a/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/compiler/feature_tsan.h b/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/compiler/feature_tsan.h new file mode 100644 index 0000000..7599ee3 --- /dev/null +++ b/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/compiler/feature_tsan.h @@ -0,0 +1,116 @@ +/* + This file is a part of libcds - Concurrent Data Structures library + + (C) Copyright Maxim Khizhinsky (libcds.dev@gmail.com) 2006-2017 + + Source code repo: http://github.com/khizmax/libcds/ + Download: http://sourceforge.net/projects/libcds/files/ + + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. + + * Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ + +#ifndef CDSLIB_COMPILER_FEATURE_TSAN_H +#define CDSLIB_COMPILER_FEATURE_TSAN_H + +// Thread Sanitizer annotations. +// From http://llvm.org/viewvc/llvm-project/compiler-rt/trunk/test/tsan/annotate_happens_before.cc?view=markup + +//@cond + +#ifdef CDS_THREAD_SANITIZER_ENABLED +# define CDS_TSAN_ANNOTATE_HAPPENS_BEFORE(addr) AnnotateHappensBefore(__FILE__, __LINE__, reinterpret_cast(addr)) +# define CDS_TSAN_ANNOTATE_HAPPENS_AFTER(addr) AnnotateHappensAfter(__FILE__, __LINE__, reinterpret_cast(addr)) + +# define CDS_TSAN_ANNOTATE_IGNORE_READS_BEGIN AnnotateIgnoreReadsBegin(__FILE__, __LINE__) +# define CDS_TSAN_ANNOTATE_IGNORE_READS_END AnnotateIgnoreReadsEnd(__FILE__, __LINE__) +# define CDS_TSAN_ANNOTATE_IGNORE_WRITES_BEGIN AnnotateIgnoreWritesBegin(__FILE__, __LINE__) +# define CDS_TSAN_ANNOTATE_IGNORE_WRITES_END AnnotateIgnoreWritesEnd(__FILE__, __LINE__) +# define CDS_TSAN_ANNOTATE_IGNORE_RW_BEGIN \ + CDS_TSAN_ANNOTATE_IGNORE_READS_BEGIN; \ + CDS_TSAN_ANNOTATE_IGNORE_WRITES_BEGIN +# define CDS_TSAN_ANNOTATE_IGNORE_RW_END \ + CDS_TSAN_ANNOTATE_IGNORE_WRITES_END;\ + CDS_TSAN_ANNOTATE_IGNORE_READS_END +# define CDS_TSAN_ANNOTATE_NEW_MEMORY( addr, sz ) AnnotateNewMemory( __FILE__, __LINE__, reinterpret_cast(addr), sz ) + +// Publish/unpublish - DEPRECATED +#if 0 +# define CDS_TSAN_ANNOTATE_PUBLISH_MEMORY_RANGE( addr, sz ) AnnotatePublishMemoryRange( __FILE__, __LINE__, reinterpret_cast(addr), sz ) +# define CDS_TSAN_ANNOTATE_UNPUBLISH_MEMORY_RANGE( addr, sz ) AnnotateUnpublishMemoryRange( __FILE__, __LINE__, reinterpret_cast(addr), sz ) +#endif + +# define CDS_TSAN_ANNOTATE_MUTEX_CREATE( addr ) AnnotateRWLockCreate( __FILE__, __LINE__, reinterpret_cast(addr)) +# define CDS_TSAN_ANNOTATE_MUTEX_DESTROY( addr ) AnnotateRWLockDestroy( __FILE__, __LINE__, reinterpret_cast(addr)) + // must be called after actual acquire +# define CDS_TSAN_ANNOTATE_MUTEX_ACQUIRED( addr ) AnnotateRWLockAcquired( __FILE__, __LINE__, reinterpret_cast(addr), 1 ) + // must be called before actual release +# define CDS_TSAN_ANNOTATE_MUTEX_RELEASED( addr ) AnnotateRWLockReleased( __FILE__, __LINE__, reinterpret_cast(addr), 1 ) + + // provided by TSan + extern "C" { + void AnnotateHappensBefore(const char *f, int l, void *addr); + void AnnotateHappensAfter(const char *f, int l, void *addr); + + void AnnotateIgnoreReadsBegin(const char *f, int l); + void AnnotateIgnoreReadsEnd(const char *f, int l); + void AnnotateIgnoreWritesBegin(const char *f, int l); + void AnnotateIgnoreWritesEnd(const char *f, int l); + +#if 0 + void AnnotatePublishMemoryRange( const char *f, int l, void * mem, size_t size ); + void AnnotateUnpublishMemoryRange( const char *f, int l, void * addr, size_t size ); +#endif + void AnnotateNewMemory( const char *f, int l, void * mem, size_t size ); + + void AnnotateRWLockCreate( const char *f, int l, void* m ); + void AnnotateRWLockDestroy( const char *f, int l, void* m ); + void AnnotateRWLockAcquired( const char *f, int l, void *m, long is_w ); + void AnnotateRWLockReleased( const char *f, int l, void *m, long is_w ); + } + +#else // CDS_THREAD_SANITIZER_ENABLED + +# define CDS_TSAN_ANNOTATE_HAPPENS_BEFORE(addr) +# define CDS_TSAN_ANNOTATE_HAPPENS_AFTER(addr) + +# define CDS_TSAN_ANNOTATE_IGNORE_READS_BEGIN +# define CDS_TSAN_ANNOTATE_IGNORE_READS_END +# define CDS_TSAN_ANNOTATE_IGNORE_WRITES_BEGIN +# define CDS_TSAN_ANNOTATE_IGNORE_WRITES_END +# define CDS_TSAN_ANNOTATE_IGNORE_RW_BEGIN +# define CDS_TSAN_ANNOTATE_IGNORE_RW_END + +#if 0 +# define CDS_TSAN_ANNOTATE_PUBLISH_MEMORY_RANGE( addr, sz ) +# define CDS_TSAN_ANNOTATE_UNPUBLISH_MEMORY_RANGE( addr, sz ) +#endif +# define CDS_TSAN_ANNOTATE_NEW_MEMORY( addr, sz ) + +# define CDS_TSAN_ANNOTATE_MUTEX_CREATE( addr ) +# define CDS_TSAN_ANNOTATE_MUTEX_DESTROY( addr ) +# define CDS_TSAN_ANNOTATE_MUTEX_ACQUIRED( addr ) +# define CDS_TSAN_ANNOTATE_MUTEX_RELEASED( addr ) + +#endif + +//@endcond +#endif // #ifndef CDSLIB_COMPILER_FEATURE_TSAN_H diff --git a/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/compiler/gcc/amd64/backoff.h b/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/compiler/gcc/amd64/backoff.h new file mode 100644 index 0000000..9896260 --- /dev/null +++ b/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/compiler/gcc/amd64/backoff.h @@ -0,0 +1,60 @@ +/* + This file is a part of libcds - Concurrent Data Structures library + + (C) Copyright Maxim Khizhinsky (libcds.dev@gmail.com) 2006-2017 + + Source code repo: http://github.com/khizmax/libcds/ + Download: http://sourceforge.net/projects/libcds/files/ + + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. + + * Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ + +#ifndef CDSLIB_COMPILER_GCC_AMD64_BACKOFF_H +#define CDSLIB_COMPILER_GCC_AMD64_BACKOFF_H + +//@cond none + +namespace cds { namespace backoff { + namespace gcc { namespace amd64 { + +# define CDS_backoff_nop_defined + static inline void backoff_nop() + { + asm volatile ( "nop;" ); + } + +# define CDS_backoff_hint_defined + static inline void backoff_hint() + { + asm volatile ( "pause;" ); + } + + + }} // namespace gcc::amd64 + + namespace platform { + using namespace gcc::amd64; + } +}} // namespace cds::backoff + +//@endcond +#endif // #ifndef CDSLIB_COMPILER_GCC_AMD64_BACKOFF_H diff --git a/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/compiler/gcc/amd64/bitop.h b/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/compiler/gcc/amd64/bitop.h new file mode 100644 index 0000000..e57859a --- /dev/null +++ b/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/compiler/gcc/amd64/bitop.h @@ -0,0 +1,184 @@ +/* + This file is a part of libcds - Concurrent Data Structures library + + (C) Copyright Maxim Khizhinsky (libcds.dev@gmail.com) 2006-2017 + + Source code repo: http://github.com/khizmax/libcds/ + Download: http://sourceforge.net/projects/libcds/files/ + + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. + + * Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ + +#ifndef CDSLIB_COMPILER_GCC_AMD64_BITOP_H +#define CDSLIB_COMPILER_GCC_AMD64_BITOP_H + +//@cond none +namespace cds { + namespace bitop { namespace platform { namespace gcc { namespace amd64 { + // MSB - return index (1..32) of most significant bit in nArg. If nArg == 0 return 0 +# define cds_bitop_msb32_DEFINED + static inline int msb32( uint32_t nArg ) + { + int nRet; + __asm__ __volatile__ ( + "bsrl %[nArg], %[nRet] ;\n\t" + "jnz 1f ;\n\t" + "xorl %[nRet], %[nRet] ;\n\t" + "subl $1, %[nRet] ;\n\t" + "1:" + "addl $1, %[nRet] ;\n\t" + : [nRet] "=a" (nRet) + : [nArg] "r" (nArg) + : "cc" + ); + return nRet; + } + +# define cds_bitop_msb32nz_DEFINED + static inline int msb32nz( uint32_t nArg ) + { + assert( nArg != 0 ); + int nRet; + __asm__ __volatile__ ( + "bsrl %[nArg], %[nRet] ;" + : [nRet] "=a" (nRet) + : [nArg] "r" (nArg) + : "cc" + ); + return nRet; + } + + // LSB - return index (0..31) of least significant bit in nArg. If nArg == 0 return -1U +# define cds_bitop_lsb32_DEFINED + static inline int lsb32( uint32_t nArg ) + { + + int nRet; + __asm__ __volatile__ ( + "bsfl %[nArg], %[nRet] ;" + "jnz 1f ;" + "xorl %[nRet], %[nRet] ;" + "subl $1, %[nRet] ;" + "1:" + "addl $1, %[nRet] ;" + : [nRet] "=a" (nRet) + : [nArg] "r" (nArg) + : "cc" + ); + return nRet; + + } + + // LSB - return index (0..31) of least significant bit in nArg. + // Condition: nArg != 0 +# define cds_bitop_lsb32nz_DEFINED + static inline int lsb32nz( uint32_t nArg ) + { + assert( nArg != 0 ); + int nRet; + __asm__ __volatile__ ( + "bsfl %[nArg], %[nRet] ;" + : [nRet] "=a" (nRet) + : [nArg] "r" (nArg) + : "cc" + ); + return nRet; + } + +# define cds_bitop_msb64_DEFINED + static inline int msb64( uint64_t nArg ) + { + uint64_t nRet; + asm volatile ( + "bsrq %[nArg], %[nRet] ;\n\t" + "jnz 1f ;\n\t" + "xorq %[nRet], %[nRet] ;\n\t" + "subq $1, %[nRet] ;\n\t" + "1:" + "addq $1, %[nRet] ;\n\t" + : [nRet] "=a" (nRet) + : [nArg] "r" (nArg) + : "cc" + ); + return (int) nRet; + } + +# define cds_bitop_msb64nz_DEFINED + static inline int msb64nz( uint64_t nArg ) + { + assert( nArg != 0 ); + uint64_t nRet; + __asm__ __volatile__ ( + "bsrq %[nArg], %[nRet] ;" + : [nRet] "=a" (nRet) + : [nArg] "r" (nArg) + : "cc" + ); + return (int) nRet; + } + + // LSB - return index (0..31) of least significant bit in nArg. If nArg == 0 return -1U +# define cds_bitop_lsb64_DEFINED + static inline int lsb64( uint64_t nArg ) + { + uint64_t nRet; + __asm__ __volatile__ ( + "bsfq %[nArg], %[nRet] ;" + "jnz 1f ;" + "xorq %[nRet], %[nRet] ;" + "subq $1, %[nRet] ;" + "1:" + "addq $1, %[nRet] ;" + : [nRet] "=a" (nRet) + : [nArg] "r" (nArg) + : "cc" + ); + return (int) nRet; + + } + + // LSB - return index (0..31) of least significant bit in nArg. + // Condition: nArg != 0 +# define cds_bitop_lsb64nz_DEFINED + static inline int lsb64nz( uint64_t nArg ) + { + assert( nArg != 0 ); + uint64_t nRet; + __asm__ __volatile__ ( + "bsfq %[nArg], %[nRet] ;" + : [nRet] "=a" (nRet) + : [nArg] "r" (nArg) + : "cc" + ); + return (int) nRet; + } + + + }} // namespace gcc::amd64 + + using namespace gcc::amd64; + + }}} // namespace cds::bitop::platform + +//@endcond + +#endif // #ifndef CDSLIB_COMPILER_GCC_AMD64_BITOP_H diff --git a/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/compiler/gcc/amd64/cxx11_atomic.h b/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/compiler/gcc/amd64/cxx11_atomic.h new file mode 100644 index 0000000..ddcedef --- /dev/null +++ b/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/compiler/gcc/amd64/cxx11_atomic.h @@ -0,0 +1,228 @@ +/* + This file is a part of libcds - Concurrent Data Structures library + + (C) Copyright Maxim Khizhinsky (libcds.dev@gmail.com) 2006-2017 + + Source code repo: http://github.com/khizmax/libcds/ + Download: http://sourceforge.net/projects/libcds/files/ + + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. + + * Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ + +#ifndef CDSLIB_COMPILER_GCC_AMD64_CXX11_ATOMIC_H +#define CDSLIB_COMPILER_GCC_AMD64_CXX11_ATOMIC_H + +#include +#include + +//@cond +namespace cds { namespace cxx11_atomic { + namespace platform { inline namespace gcc { inline namespace amd64 { + + //----------------------------------------------------------------------------- + // 64bit primitives + //----------------------------------------------------------------------------- + + template + static inline bool cas64_strong( T volatile * pDest, T& expected, T desired, memory_order mo_success, memory_order mo_fail ) noexcept + { + static_assert( sizeof(T) == 8, "Illegal size of operand" ); + assert( cds::details::is_aligned( pDest, 8 )); + + T prev = expected; + fence_before(mo_success); + __asm__ __volatile__ ( + "lock ; cmpxchgq %[desired], %[pDest]" + : [prev] "+a" (prev), [pDest] "+m" (*pDest) + : [desired] "r" (desired) + ); + bool success = (prev == expected); + expected = prev; + if (success) + fence_after(mo_success); + else + fence_after(mo_fail); + return success; + } + + template + static inline bool cas64_weak( T volatile * pDest, T& expected, T desired, memory_order mo_success, memory_order mo_fail ) noexcept + { + return cas64_strong( pDest, expected, desired, mo_success, mo_fail ); + } + + template + static inline T load64( T volatile const * pSrc, memory_order order ) noexcept + { + static_assert( sizeof(T) == 8, "Illegal size of operand" ); + assert( order == memory_order_relaxed + || order == memory_order_consume + || order == memory_order_acquire + || order == memory_order_seq_cst + ); + assert( pSrc ); + assert( cds::details::is_aligned( pSrc, 8 )); + + T v = *pSrc; + fence_after_load( order ); + return v; + } + + + template + static inline T exchange64( T volatile * pDest, T v, memory_order order ) noexcept + { + static_assert( sizeof(T) == 8, "Illegal size of operand" ); + assert( cds::details::is_aligned( pDest, 8 )); + + fence_before(order); + __asm__ __volatile__ ( + "xchgq %[v], %[pDest]" + : [v] "+r" (v), [pDest] "+m" (*pDest) + ); + fence_after(order); + return v; + } + + template + static inline void store64( T volatile * pDest, T val, memory_order order ) noexcept + { + static_assert( sizeof(T) == 8, "Illegal size of operand" ); + assert( order == memory_order_relaxed + || order == memory_order_release + || order == memory_order_seq_cst + ); + assert( pDest ); + assert( cds::details::is_aligned( pDest, 8 )); + + if (order != memory_order_seq_cst) { + fence_before(order); + *pDest = val; + } + else { + exchange64( pDest, val, order); + } + } + +# define CDS_ATOMIC_fetch64_add_defined + template + static inline T fetch64_add( T volatile * pDest, T v, memory_order order) noexcept + { + static_assert( sizeof(T) == 8, "Illegal size of operand" ); + assert( cds::details::is_aligned( pDest, 8 )); + + fence_before(order); + __asm__ __volatile__ ( + "lock ; xaddq %[v], %[pDest]" + : [v] "+r" (v), [pDest] "+m" (*pDest) + ); + fence_after(order); + return v; + } + +# define CDS_ATOMIC_fetch64_sub_defined + template + static inline T fetch64_sub( T volatile * pDest, T v, memory_order order) noexcept + { + static_assert( sizeof(T) == 8, "Illegal size of operand" ); + assert( cds::details::is_aligned( pDest, 8 )); + + fence_before(order); + __asm__ __volatile__ ( + "negq %[v] ; \n" + "lock ; xaddq %[v], %[pDest]" + : [v] "+r" (v), [pDest] "+m" (*pDest) + ); + fence_after(order); + return v; + } + + + //----------------------------------------------------------------------------- + // pointer primitives + //----------------------------------------------------------------------------- + + template + static inline T * exchange_ptr( T * volatile * pDest, T * v, memory_order order ) noexcept + { + static_assert( sizeof(T *) == sizeof(void *), "Illegal size of operand" ); + + return (T *) exchange64( (uint64_t volatile *) pDest, (uint64_t) v, order ); + } + + template + static inline void store_ptr( T * volatile * pDest, T * src, memory_order order ) noexcept + { + static_assert( sizeof(T *) == sizeof(void *), "Illegal size of operand" ); + assert( order == memory_order_relaxed + || order == memory_order_release + || order == memory_order_seq_cst + ); + assert( pDest ); + + if ( order != memory_order_seq_cst ) { + fence_before( order ); + *pDest = src; + } + else { + exchange_ptr( pDest, src, order ); + } + } + + template + static inline T * load_ptr( T * volatile const * pSrc, memory_order order ) noexcept + { + static_assert( sizeof(T *) == sizeof(void *), "Illegal size of operand" ); + assert( order == memory_order_relaxed + || order == memory_order_consume + || order == memory_order_acquire + || order == memory_order_seq_cst + ); + assert( pSrc ); + + T * v = *pSrc; + fence_after_load( order ); + return v; + } + + template + static inline bool cas_ptr_strong( T * volatile * pDest, T *& expected, T * desired, memory_order mo_success, memory_order mo_fail ) noexcept + { + static_assert( sizeof(T *) == sizeof(void *), "Illegal size of operand" ); + + return cas64_strong( (uint64_t volatile *) pDest, *reinterpret_cast( &expected ), (uint64_t) desired, mo_success, mo_fail ); + } + + template + static inline bool cas_ptr_weak( T * volatile * pDest, T *& expected, T * desired, memory_order mo_success, memory_order mo_fail ) noexcept + { + return cas_ptr_strong( pDest, expected, desired, mo_success, mo_fail ); + } + + }} // namespace gcc::amd64 + + } // namespace platform + +}} // namespace cds::cxx11_atomic +//@endcond + +#endif // #ifndef CDSLIB_COMPILER_GCC_AMD64_CXX11_ATOMIC_H diff --git a/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/compiler/gcc/arm7/backoff.h b/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/compiler/gcc/arm7/backoff.h new file mode 100644 index 0000000..c5e3a57 --- /dev/null +++ b/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/compiler/gcc/arm7/backoff.h @@ -0,0 +1,52 @@ +/* + This file is a part of libcds - Concurrent Data Structures library + + (C) Copyright Maxim Khizhinsky (libcds.dev@gmail.com) 2006-2017 + + Source code repo: http://github.com/khizmax/libcds/ + Download: http://sourceforge.net/projects/libcds/files/ + + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. + + * Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ + +#ifndef CDSLIB_COMPILER_GCC_ARM7_BACKOFF_H +#define CDSLIB_COMPILER_GCC_ARM7_BACKOFF_H + +//@cond none + +namespace cds { namespace backoff { + namespace gcc { namespace arm7 { + +# define CDS_backoff_hint_defined + static inline void backoff_hint() + { + asm volatile( "yield" ::: "memory" ); + } + }} // namespace gcc::arm7 + + namespace platform { + using namespace gcc::arm7; + } +}} // namespace cds::backoff + +//@endcond +#endif // #ifndef CDSLIB_COMPILER_GCC_ARM7_BACKOFF_H diff --git a/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/compiler/gcc/arm8/backoff.h b/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/compiler/gcc/arm8/backoff.h new file mode 100644 index 0000000..72a6083 --- /dev/null +++ b/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/compiler/gcc/arm8/backoff.h @@ -0,0 +1,52 @@ +/* + This file is a part of libcds - Concurrent Data Structures library + + (C) Copyright Maxim Khizhinsky (libcds.dev@gmail.com) 2006-2017 + + Source code repo: http://github.com/khizmax/libcds/ + Download: http://sourceforge.net/projects/libcds/files/ + + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. + + * Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ + +#ifndef CDSLIB_COMPILER_GCC_ARM8_BACKOFF_H +#define CDSLIB_COMPILER_GCC_ARM8_BACKOFF_H + +//@cond none + +namespace cds { namespace backoff { + namespace gcc { namespace arm8 { + +# define CDS_backoff_hint_defined + static inline void backoff_hint() + { + asm volatile( "yield" ::: "memory" ); + } + }} // namespace gcc::arm8 + + namespace platform { + using namespace gcc::arm8; + } +}} // namespace cds::backoff + +//@endcond +#endif // #ifndef CDSLIB_COMPILER_GCC_ARM8_BACKOFF_H diff --git a/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/compiler/gcc/compiler_barriers.h b/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/compiler/gcc/compiler_barriers.h new file mode 100644 index 0000000..88133a3 --- /dev/null +++ b/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/compiler/gcc/compiler_barriers.h @@ -0,0 +1,38 @@ +/* + This file is a part of libcds - Concurrent Data Structures library + + (C) Copyright Maxim Khizhinsky (libcds.dev@gmail.com) 2006-2017 + + Source code repo: http://github.com/khizmax/libcds/ + Download: http://sourceforge.net/projects/libcds/files/ + + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. + + * Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ + +#ifndef CDSLIB_COMPILER_GCC_COMPILER_BARRIERS_H +#define CDSLIB_COMPILER_GCC_COMPILER_BARRIERS_H + +#define CDS_COMPILER_RW_BARRIER __asm__ __volatile__ ( "" ::: "memory" ) +#define CDS_COMPILER_R_BARRIER CDS_COMPILER_RW_BARRIER +#define CDS_COMPILER_W_BARRIER CDS_COMPILER_RW_BARRIER + +#endif // #ifndef CDSLIB_COMPILER_GCC_COMPILER_BARRIERS_H diff --git a/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/compiler/gcc/compiler_macro.h b/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/compiler/gcc/compiler_macro.h new file mode 100644 index 0000000..18178c8 --- /dev/null +++ b/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/compiler/gcc/compiler_macro.h @@ -0,0 +1,186 @@ +/* + This file is a part of libcds - Concurrent Data Structures library + + (C) Copyright Maxim Khizhinsky (libcds.dev@gmail.com) 2006-2017 + + Source code repo: http://github.com/khizmax/libcds/ + Download: http://sourceforge.net/projects/libcds/files/ + + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. + + * Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ + +#ifndef CDSLIB_COMPILER_GCC_COMPILER_MACRO_H +#define CDSLIB_COMPILER_GCC_COMPILER_MACRO_H + +// OS interface && OS name +#ifndef CDS_OS_TYPE +# if defined( __linux__ ) +# define CDS_OS_INTERFACE CDS_OSI_UNIX +# define CDS_OS_TYPE CDS_OS_LINUX +# define CDS_OS__NAME "linux" +# define CDS_OS__NICK "linux" +# elif defined( __sun__ ) +# define CDS_OS_INTERFACE CDS_OSI_UNIX +# define CDS_OS_TYPE CDS_OS_SUN_SOLARIS +# define CDS_OS__NAME "Sun Solaris" +# define CDS_OS__NICK "sun" +# elif defined( __hpux__ ) +# define CDS_OS_INTERFACE CDS_OSI_UNIX +# define CDS_OS_TYPE CDS_OS_HPUX +# define CDS_OS__NAME "HP-UX" +# define CDS_OS__NICK "hpux" +# elif defined( _AIX ) +# define CDS_OS_INTERFACE CDS_OSI_UNIX +# define CDS_OS_TYPE CDS_OS_AIX +# define CDS_OS__NAME "AIX" +# define CDS_OS__NICK "aix" +# elif defined( __FreeBSD__ ) +# define CDS_OS_INTERFACE CDS_OSI_UNIX +# define CDS_OS_TYPE CDS_OS_FREE_BSD +# define CDS_OS__NAME "FreeBSD" +# define CDS_OS__NICK "freebsd" +# elif defined( __OpenBSD__ ) +# define CDS_OS_INTERFACE CDS_OSI_UNIX +# define CDS_OS_TYPE CDS_OS_OPEN_BSD +# define CDS_OS__NAME "OpenBSD" +# define CDS_OS__NICK "openbsd" +# elif defined( __NetBSD__ ) +# define CDS_OS_INTERFACE CDS_OSI_UNIX +# define CDS_OS_TYPE CDS_OS_NET_BSD +# define CDS_OS__NAME "NetBSD" +# define CDS_OS__NICK "netbsd" +# elif defined(__MINGW32__) || defined( __MINGW64__) +# define CDS_OS_INTERFACE CDS_OSI_WINDOWS +# define CDS_OS_TYPE CDS_OS_MINGW +# define CDS_OS__NAME "MinGW" +# define CDS_OS__NICK "mingw" +# elif defined(__MACH__) +# define CDS_OS_INTERFACE CDS_OSI_UNIX +# define CDS_OS_TYPE CDS_OS_OSX +# define CDS_OS__NAME "OS X" +# define CDS_OS__NICK "osx" +# else +# define CDS_OS_INTERFACE CDS_OSI_UNIX +# define CDS_OS_TYPE CDS_OS_PTHREAD +# define CDS_OS__NAME "pthread" +# define CDS_OS__NICK "pthread" +# endif +#endif // #ifndef CDS_OS_TYPE + +// Processor architecture + +#if defined(__arm__) && !defined(__ARM_ARCH) +// GCC 4.6 does not defined __ARM_ARCH +# if defined(__ARM_ARCH_8A__) || defined(__ARM_ARCH_8S__) || defined(__aarch64__) || defined(__ARM_ARCH_ISA_A64) +# define __ARM_ARCH 8 +# elif defined(__ARM_ARCH_7A__) || defined(__ARM_ARCH_7R__) || defined(__ARM_ARCH_7S__) +# define __ARM_ARCH 7 +# else +# define __ARM_ARCH 5 +# endif +#endif + +#if defined(__x86_64__) || defined(__amd64__) || defined(__amd64) +# define CDS_PROCESSOR_ARCH CDS_PROCESSOR_AMD64 +# define CDS_BUILD_BITS 64 +# define CDS_PROCESSOR__NAME "Intel x86-64" +# define CDS_PROCESSOR__NICK "amd64" +#elif defined(__i386__) +# define CDS_PROCESSOR_ARCH CDS_PROCESSOR_X86 +# define CDS_BUILD_BITS 32 +# define CDS_PROCESSOR__NAME "Intel x86" +# define CDS_PROCESSOR__NICK "x86" +#elif defined(sparc) || defined (__sparc__) +# define CDS_PROCESSOR_ARCH CDS_PROCESSOR_SPARC +# define CDS_PROCESSOR__NAME "Sparc" +# define CDS_PROCESSOR__NICK "sparc" +# ifdef __arch64__ +# define CDS_BUILD_BITS 64 +# else +# error Sparc 32bit is not supported +# endif +#elif defined( __ia64__) +# define CDS_PROCESSOR_ARCH CDS_PROCESSOR_IA64 +# define CDS_BUILD_BITS 64 +# define CDS_PROCESSOR__NAME "Intel IA64" +# define CDS_PROCESSOR__NICK "ia64" +#elif defined(_ARCH_PPC64) +# define CDS_PROCESSOR_ARCH CDS_PROCESSOR_PPC64 +# define CDS_BUILD_BITS 64 +# define CDS_PROCESSOR__NAME "IBM PowerPC64" +# define CDS_PROCESSOR__NICK "ppc64" +#elif defined(__arm__) && __SIZEOF_POINTER__ == 4 && __ARM_ARCH >= 7 && __ARM_ARCH < 8 +# define CDS_PROCESSOR_ARCH CDS_PROCESSOR_ARM7 +# define CDS_BUILD_BITS 32 +# define CDS_PROCESSOR__NAME "ARM v7" +# define CDS_PROCESSOR__NICK "arm7" +#elif ( defined(__arm__) || defined(__aarch64__)) && __ARM_ARCH >= 8 +# define CDS_PROCESSOR_ARCH CDS_PROCESSOR_ARM8 +# define CDS_BUILD_BITS 64 +# define CDS_PROCESSOR__NAME "ARM v8" +# define CDS_PROCESSOR__NICK "arm8" +#else +# if defined(CDS_USE_LIBCDS_ATOMIC) +# error "Libcds does not support atomic implementation for the processor architecture. Try to use C++11-compatible compiler and remove CDS_USE_LIBCDS_ATOMIC flag from compiler command line" +# else +# define CDS_PROCESSOR_ARCH CDS_PROCESSOR_UNKNOWN +# define CDS_BUILD_BITS 32 +# define CDS_PROCESSOR__NAME "unknown" +# define CDS_PROCESSOR__NICK "unknown" +# endif +#endif + +#if CDS_OS_TYPE == CDS_OS_MINGW +# ifdef CDS_BUILD_LIB +# define CDS_EXPORT_API __declspec(dllexport) +# else +# define CDS_EXPORT_API __declspec(dllimport) +# endif +#else +# ifndef __declspec +# define __declspec(_x) +# endif +#endif + +// Byte order +#if !defined(CDS_ARCH_LITTLE_ENDIAN) && !defined(CDS_ARCH_BIG_ENDIAN) +# ifdef __BYTE_ORDER__ +# if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__ +# define CDS_ARCH_LITTLE_ENDIAN +# elif __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__ +# define CDS_ARCH_BIG_ENDIAN +# endif +# else +# warning "Undefined byte order for current architecture (no __BYTE_ORDER__ preprocessor definition)" +# endif +#endif + +// Sanitizer attributes +// Example: CDS_DISABLE_SANITIZE( "function" ) +#ifdef CDS_ADDRESS_SANITIZER_ENABLED +# define CDS_SUPPRESS_SANITIZE( ... ) __attribute__(( no_sanitize( __VA_ARGS__ ))) +#else +# define CDS_SUPPRESS_SANITIZE( ... ) +#endif + + +#endif // #ifndef CDSLIB_COMPILER_GCC_COMPILER_MACRO_H diff --git a/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/compiler/gcc/defs.h b/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/compiler/gcc/defs.h new file mode 100644 index 0000000..fc456a8 --- /dev/null +++ b/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/compiler/gcc/defs.h @@ -0,0 +1,132 @@ +/* + This file is a part of libcds - Concurrent Data Structures library + + (C) Copyright Maxim Khizhinsky (libcds.dev@gmail.com) 2006-2017 + + Source code repo: http://github.com/khizmax/libcds/ + Download: http://sourceforge.net/projects/libcds/files/ + + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. + + * Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ + +#ifndef CDSLIB_COMPILER_GCC_DEFS_H +#define CDSLIB_COMPILER_GCC_DEFS_H + +// Compiler version +#define CDS_COMPILER_VERSION (__GNUC__ * 10000 + __GNUC_MINOR__ * 100 + __GNUC_PATCHLEVEL__) + +#if CDS_COMPILER_VERSION < 40800 +# error "Compiler version error. GCC version 4.8.0 and above is supported" +#endif + +// Compiler name +#ifdef __VERSION__ +# define CDS_COMPILER__NAME ("GNU C++ " __VERSION__) +#else +# define CDS_COMPILER__NAME "GNU C++" +#endif +#define CDS_COMPILER__NICK "gcc" + +#if __cplusplus < CDS_CPLUSPLUS_11 +# error C++11 and above is required +#endif + + +#include + +#define alignof __alignof__ + +// *************************************** +// C++11 features + +// C++11 thread_local keyword +#define CDS_CXX11_THREAD_LOCAL_SUPPORT + +// ************************************************* +// Features +// If you run under Thread Sanitizer, pass -DCDS_THREAD_SANITIZER_ENABLED in compiler command line +// UPD: Seems, GCC 5+ has predefined macro __SANITIZE_THREAD__, see https://gcc.gnu.org/bugzilla/show_bug.cgi?id=64354 +#if defined(__SANITIZE_THREAD__) && !defined(CDS_THREAD_SANITIZER_ENABLED) +# define CDS_THREAD_SANITIZER_ENABLED +#endif + +// ************************************************* +// Alignment macro + +#define CDS_TYPE_ALIGNMENT(n) __attribute__ ((aligned (n))) +#define CDS_CLASS_ALIGNMENT(n) __attribute__ ((aligned (n))) +#define CDS_DATA_ALIGNMENT(n) __attribute__ ((aligned (n))) + +// Attributes +#if CDS_COMPILER_VERSION >= 40900 +# if __cplusplus == CDS_CPLUSPLUS_11 // C++11 +# define CDS_DEPRECATED( reason ) [[gnu::deprecated(reason)]] +# else // C++14 +# define CDS_DEPRECATED( reason ) [[deprecated(reason)]] +# endif +#else + // GCC 4.8 +# define CDS_DEPRECATED( reason ) __attribute__((deprecated( reason ))) +#endif + +#define CDS_NORETURN __attribute__((__noreturn__)) + +// likely/unlikely + +#define cds_likely( expr ) __builtin_expect( !!( expr ), 1 ) +#define cds_unlikely( expr ) __builtin_expect( !!( expr ), 0 ) + +// Exceptions +#if defined( __EXCEPTIONS ) && __EXCEPTIONS == 1 +# define CDS_EXCEPTION_ENABLED +#endif + +// double-width CAS support +// note: gcc-4.8 does not support double-word atomics +// gcc-4.9: a lot of crashes when use DCAS +// gcc-7: 128-bit atomic is not lock-free, see https://gcc.gnu.org/ml/gcc/2017-01/msg00167.html +// You can manually suppress wide-atomic support by defining in compiler command line: +// for 64bit platform: -DCDS_DISABLE_128BIT_ATOMIC +// for 32bit platform: -DCDS_DISABLE_64BIT_ATOMIC +#if CDS_COMPILER_VERSION >= 50000 +# if CDS_BUILD_BITS == 64 +# if !defined( CDS_DISABLE_128BIT_ATOMIC ) && defined( __GCC_HAVE_SYNC_COMPARE_AND_SWAP_16 ) && CDS_COMPILER_VERSION < 70000 +# define CDS_DCAS_SUPPORT +# endif +# else +# if !defined( CDS_DISABLE_64BIT_ATOMIC ) && defined( __GCC_HAVE_SYNC_COMPARE_AND_SWAP_8 ) +# define CDS_DCAS_SUPPORT +# endif +# endif +#endif + +//if constexpr support (C++17) +#ifndef constexpr_if +# if defined( __cpp_if_constexpr ) && __cpp_if_constexpr >= 201606 +# define constexpr_if if constexpr +# endif +#endif + + +#include + +#endif // #ifndef CDSLIB_COMPILER_GCC_DEFS_H diff --git a/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/compiler/gcc/ia64/backoff.h b/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/compiler/gcc/ia64/backoff.h new file mode 100644 index 0000000..5a0304e --- /dev/null +++ b/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/compiler/gcc/ia64/backoff.h @@ -0,0 +1,59 @@ +/* + This file is a part of libcds - Concurrent Data Structures library + + (C) Copyright Maxim Khizhinsky (libcds.dev@gmail.com) 2006-2017 + + Source code repo: http://github.com/khizmax/libcds/ + Download: http://sourceforge.net/projects/libcds/files/ + + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. + + * Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ + +#ifndef CDSLIB_COMPILER_GCC_IA64_BACKOFF_H +#define CDSLIB_COMPILER_GCC_IA64_BACKOFF_H + +//@cond none + +namespace cds { namespace backoff { + namespace gcc { namespace ia64 { + +# define CDS_backoff_hint_defined + static inline void backoff_hint() + { + asm volatile ( "hint @pause;;" ); + } + +# define CDS_backoff_nop_defined + static inline void backoff_nop() + { + asm volatile ( "nop;;" ); + } + + }} // namespace gcc::ia64 + + namespace platform { + using namespace gcc::ia64; + } +}} // namespace cds::backoff + +//@endcond +#endif // #ifndef CDSLIB_COMPILER_GCC_IA64_BACKOFF_H diff --git a/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/compiler/gcc/ia64/bitop.h b/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/compiler/gcc/ia64/bitop.h new file mode 100644 index 0000000..b8bef41 --- /dev/null +++ b/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/compiler/gcc/ia64/bitop.h @@ -0,0 +1,90 @@ +/* + This file is a part of libcds - Concurrent Data Structures library + + (C) Copyright Maxim Khizhinsky (libcds.dev@gmail.com) 2006-2017 + + Source code repo: http://github.com/khizmax/libcds/ + Download: http://sourceforge.net/projects/libcds/files/ + + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. + + * Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ + +#ifndef CDSLIB_COMPILER_GCC_IA64_BITOP_H +#define CDSLIB_COMPILER_GCC_IA64_BITOP_H + +//@cond none +namespace cds { + namespace bitop { namespace platform { namespace gcc { namespace ia64 { + + // MSB - return index (1..32) of most significant bit in x. If x == 0 return 0 +# define cds_bitop_msb32_DEFINED + static inline int msb32( uint32_t nArg ) + { + if ( !nArg ) + return 0; + uint64_t x = nArg; + x |= x >> 1; + x |= x >> 2; + x |= x >> 4; + x |= x >> 8; + x |= x >> 16; + + uint64_t nRes; + asm __volatile__( "popcnt %0=%1\n\t" : "=r" (nRes) : "r" (x)); + return (int) nRes; + } + + // It is not compiled on HP-UX. Why?.. +#if CDS_OS_TYPE != CDS_OS_HPUX + // MSB - return index (0..31) of most significant bit in nArg. + // !!! nArg != 0 +# define cds_bitop_msb32nz_DEFINED + static inline int msb32nz( uint32_t nArg ) + { + assert( nArg != 0 ); + long double d = nArg; + long nExp; + asm __volatile__("getf.exp %0=%1\n\t" : "=r"(nExp) : "f"(d)); + return (int) (nExp - 0xffff); + } + + // MSB - return index (0..63) of most significant bit in nArg. + // !!! nArg != 0 +# define cds_bitop_msb64nz_DEFINED + static inline int msb64nz( uint64_t nArg ) + { + assert( nArg != 0 ); + long double d = nArg; + long nExp; + asm __volatile__("getf.exp %0=%1\n\t" : "=r" (nExp) : "f" (d)); + return (int) (nExp - 0xffff); + } +#endif // #if CDS_OS_TYPE != CDS_OS_HPUX + + }} // namespace gcc::ia64 + + using namespace gcc::ia64; + +}}} // namespace cds::bitop::platform +//@endcond + +#endif // #ifndef CDSLIB_COMPILER_GCC_IA64_BITOP_H diff --git a/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/compiler/gcc/ia64/cxx11_atomic.h b/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/compiler/gcc/ia64/cxx11_atomic.h new file mode 100644 index 0000000..fc65352 --- /dev/null +++ b/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/compiler/gcc/ia64/cxx11_atomic.h @@ -0,0 +1,678 @@ +/* + This file is a part of libcds - Concurrent Data Structures library + + (C) Copyright Maxim Khizhinsky (libcds.dev@gmail.com) 2006-2017 + + Source code repo: http://github.com/khizmax/libcds/ + Download: http://sourceforge.net/projects/libcds/files/ + + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. + + * Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ + +#ifndef CDSLIB_COMPILER_GCC_IA64_CXX11_ATOMIC_H +#define CDSLIB_COMPILER_GCC_IA64_CXX11_ATOMIC_H + +/* + Source: + 1. load/store: http://www.decadent.org.uk/pipermail/cpp-threads/2008-December/001932.html + 2. Mapping to C++ Memory Model: http://www.cl.cam.ac.uk/~pes20/cpp/cpp0xmappings.html +*/ + +#include + +//@cond +namespace cds { namespace cxx11_atomic { + namespace platform { inline namespace gcc { inline namespace ia64 { + + static inline void itanium_full_fence() noexcept + { + __asm__ __volatile__ ( "mf \n\t" ::: "memory" ); + } + + static inline void fence_before( memory_order order ) noexcept + { + switch(order) { + case memory_order_relaxed: + case memory_order_consume: + case memory_order_acquire: + break; + case memory_order_release: + case memory_order_acq_rel: + CDS_COMPILER_RW_BARRIER; + break; + case memory_order_seq_cst: + itanium_full_fence(); + break; + } + } + + static inline void fence_after( memory_order order ) noexcept + { + switch(order) { + case memory_order_acquire: + case memory_order_acq_rel: + CDS_COMPILER_RW_BARRIER; + break; + case memory_order_relaxed: + case memory_order_consume: + case memory_order_release: + break; + case memory_order_seq_cst: + itanium_full_fence(); + break; + } + } + + + //----------------------------------------------------------------------------- + // fences + //----------------------------------------------------------------------------- + static inline void thread_fence(memory_order order) noexcept + { + switch(order) + { + case memory_order_relaxed: + case memory_order_consume: + break; + case memory_order_release: + case memory_order_acquire: + case memory_order_acq_rel: + CDS_COMPILER_RW_BARRIER; + break; + case memory_order_seq_cst: + itanium_full_fence(); + break; + default:; + } + } + + static inline void signal_fence(memory_order order) noexcept + { + // C++11: 29.8.8: only compiler optimization, no hardware instructions + switch(order) + { + case memory_order_relaxed: + break; + case memory_order_consume: + case memory_order_release: + case memory_order_acquire: + case memory_order_acq_rel: + case memory_order_seq_cst: + CDS_COMPILER_RW_BARRIER; + break; + default:; + } + } + +#define CDS_ITANIUM_ATOMIC_LOAD( n_bytes, n_bits ) \ + template \ + static inline T load##n_bits( T volatile const * pSrc, memory_order order ) noexcept \ + { \ + static_assert( sizeof(T) == n_bytes, "Illegal size of operand" ) ; \ + assert( order == memory_order_relaxed \ + || order == memory_order_consume \ + || order == memory_order_acquire \ + || order == memory_order_seq_cst \ + ) ; \ + assert( pSrc ) ; \ + T val ; \ + __asm__ __volatile__ ( \ + "ld" #n_bytes ".acq %[val] = [%[pSrc]] \n\t" \ + : [val] "=r" (val) \ + : [pSrc] "r" (pSrc) \ + : "memory" \ + ) ; \ + return val ; \ + } + +#define CDS_ITANIUM_ATOMIC_STORE( n_bytes, n_bits ) \ + template \ + static inline void store##n_bits( T volatile * pDest, T val, memory_order order ) noexcept \ + { \ + static_assert( sizeof(T) == n_bytes, "Illegal size of operand" ) ; \ + assert( order == memory_order_relaxed \ + || order == memory_order_release \ + || order == memory_order_seq_cst \ + ) ; \ + assert( pDest ) ; \ + if ( order == memory_order_seq_cst ) { \ + __asm__ __volatile__ ( \ + "st" #n_bytes ".rel [%[pDest]] = %[val] \n\t" \ + "mf \n\t" \ + :: [pDest] "r" (pDest), [val] "r" (val) \ + : "memory" \ + ) ; \ + } \ + else { \ + __asm__ __volatile__ ( \ + "st" #n_bytes ".rel [%[pDest]] = %[val] \n\t" \ + :: [pDest] "r" (pDest), [val] "r" (val) \ + : "memory" \ + ) ; \ + fence_after(order) ; \ + } \ + } + +#define CDS_ITANIUM_ATOMIC_CAS( n_bytes, n_bits ) \ + template \ + static inline bool cas##n_bits##_strong( T volatile * pDest, T& expected, T desired, memory_order mo_success, memory_order /*mo_fail*/ ) noexcept \ + { \ + static_assert( sizeof(T) == n_bytes, "Illegal size of operand" ) ; \ + T current ; \ + switch(mo_success) { \ + case memory_order_relaxed: \ + case memory_order_consume: \ + case memory_order_acquire: \ + __asm__ __volatile__ ( \ + "mov ar.ccv = %[expected] ;;\n\t" \ + "cmpxchg" #n_bytes ".acq %[current] = [%[pDest]], %[desired], ar.ccv\n\t" \ + : [current] "=r" (current) \ + : [pDest] "r" (pDest), [expected] "r" (expected), [desired] "r" (desired) \ + : "ar.ccv", "memory" \ + ); \ + break ; \ + case memory_order_release: \ + __asm__ __volatile__ ( \ + "mov ar.ccv = %[expected] ;;\n\t" \ + "cmpxchg" #n_bytes ".rel %[current] = [%[pDest]], %[desired], ar.ccv\n\t" \ + : [current] "=r" (current) \ + : [pDest] "r" (pDest), [expected] "r" (expected), [desired] "r" (desired) \ + : "ar.ccv", "memory" \ + ); \ + break ; \ + case memory_order_acq_rel: \ + case memory_order_seq_cst: \ + __asm__ __volatile__ ( \ + "mov ar.ccv = %[expected] ;;\n\t" \ + "cmpxchg" #n_bytes ".rel %[current] = [%[pDest]], %[desired], ar.ccv\n\t" \ + "mf \n\t" \ + : [current] "=r" (current) \ + : [pDest] "r" (pDest), [expected] "r" (expected), [desired] "r" (desired) \ + : "ar.ccv", "memory" \ + ); \ + break; \ + default: \ + assert(false); \ + } \ + bool bSuccess = expected == current ; \ + expected = current ; \ + return bSuccess ; \ + } \ + template \ + static inline bool cas##n_bits##_weak( T volatile * pDest, T& expected, T desired, memory_order mo_success, memory_order mo_fail ) noexcept \ + { return cas##n_bits##_strong( pDest, expected, desired, mo_success, mo_fail ); } + + // xchg is performed with acquire semantics +#define CDS_ITANIUM_ATOMIC_EXCHANGE( n_bytes, n_bits ) \ + template \ + static inline T exchange##n_bits( T volatile * pDest, T val, memory_order order ) noexcept \ + { \ + static_assert( sizeof(T) == n_bytes, "Illegal size of operand" ) ; \ + assert( pDest ) ; \ + T current ; \ + switch(order) \ + { \ + case memory_order_relaxed: \ + case memory_order_consume: \ + case memory_order_acquire: \ + __asm__ __volatile__ ( \ + "xchg" #n_bytes " %[current] = [%[pDest]], %[val]\n\t" \ + : [current] "=r" (current) \ + : [pDest] "r" (pDest), [val] "r" (val) \ + : "memory" \ + ); \ + break; \ + case memory_order_acq_rel: \ + case memory_order_release: \ + case memory_order_seq_cst: \ + __asm__ __volatile__ ( \ + "mf \n\t" \ + "xchg" #n_bytes " %[current] = [%[pDest]], %[val]\n\t" \ + : [current] "=r" (current) \ + : [pDest] "r" (pDest), [val] "r" (val) \ + : "memory" \ + ); \ + break; \ + default: assert(false); \ + } \ + return current ; \ + } + +#define CDS_ITANIUM_ATOMIC_FETCH_ADD( n_bytes, n_add ) \ + switch (order) { \ + case memory_order_relaxed: \ + case memory_order_consume: \ + case memory_order_acquire: \ + __asm__ __volatile__ ( \ + "fetchadd" #n_bytes ".acq %[cur] = [%[pDest]], " #n_add " \n\t" \ + : [cur] "=r" (cur) \ + : [pDest] "r" (pDest) \ + : "memory" \ + ); \ + break ; \ + case memory_order_release: \ + __asm__ __volatile__ ( \ + "fetchadd" #n_bytes ".rel %[cur] = [%[pDest]], " #n_add " \n\t" \ + : [cur] "=r" (cur) \ + : [pDest] "r" (pDest) \ + : "memory" \ + ); \ + break ; \ + case memory_order_acq_rel: \ + case memory_order_seq_cst: \ + __asm__ __volatile__ ( \ + "fetchadd" #n_bytes ".rel %[cur] = [%[pDest]], " #n_add " \n\t" \ + "mf \n\t" \ + : [cur] "=r" (cur) \ + : [pDest] "r" (pDest) \ + : "memory" \ + ); \ + break ; \ + default: \ + assert(false); \ + } + + //----------------------------------------------------------------------------- + // 8bit primitives + //----------------------------------------------------------------------------- + + CDS_ITANIUM_ATOMIC_LOAD( 1, 8 ) + CDS_ITANIUM_ATOMIC_STORE( 1, 8 ) + CDS_ITANIUM_ATOMIC_CAS( 1, 8 ) + CDS_ITANIUM_ATOMIC_EXCHANGE( 1, 8 ) + + //----------------------------------------------------------------------------- + // 16bit primitives + //----------------------------------------------------------------------------- + + CDS_ITANIUM_ATOMIC_LOAD( 2, 16 ) + CDS_ITANIUM_ATOMIC_STORE( 2, 16 ) + CDS_ITANIUM_ATOMIC_CAS( 2, 16 ) + CDS_ITANIUM_ATOMIC_EXCHANGE( 2, 16 ) + + //----------------------------------------------------------------------------- + // 32bit primitives + //----------------------------------------------------------------------------- + + CDS_ITANIUM_ATOMIC_LOAD( 4, 32 ) + CDS_ITANIUM_ATOMIC_STORE( 4, 32 ) + CDS_ITANIUM_ATOMIC_CAS( 4, 32 ) + CDS_ITANIUM_ATOMIC_EXCHANGE( 4, 32 ) + +# define CDS_ATOMIC_fetch32_add_defined + template + static inline T fetch32_add( T volatile * pDest, T val, memory_order order) noexcept + { + static_assert( sizeof(T) == 4, "Illegal size of operand" ); + assert( pDest ); + + T cur; + switch ( val ) { + case 1: + CDS_ITANIUM_ATOMIC_FETCH_ADD( 4, 1 ); + break; + case 4: + CDS_ITANIUM_ATOMIC_FETCH_ADD( 4, 4 ); + break; + case 8: + CDS_ITANIUM_ATOMIC_FETCH_ADD( 4, 8 ); + break; + case 16: + CDS_ITANIUM_ATOMIC_FETCH_ADD( 4, 16 ); + break; + default: + cur = load32( pDest, memory_order_relaxed ); + do {} while ( !cas32_strong( pDest, cur, cur + val, order, memory_order_relaxed )); + break; + } + return cur; + } + +# define CDS_ATOMIC_fetch32_sub_defined + template + static inline T fetch32_sub( T volatile * pDest, T val, memory_order order) noexcept + { + static_assert( sizeof(T) == 4, "Illegal size of operand" ); + assert( pDest ); + T cur; + switch ( val ) { + case 1: + CDS_ITANIUM_ATOMIC_FETCH_ADD( 4, -1 ); + break; + case 4: + CDS_ITANIUM_ATOMIC_FETCH_ADD( 4, -4 ); + break; + case 8: + CDS_ITANIUM_ATOMIC_FETCH_ADD( 4, -8 ); + break; + case 16: + CDS_ITANIUM_ATOMIC_FETCH_ADD( 4, -16 ); + break; + default: + cur = load32( pDest, memory_order_relaxed ); + do {} while ( !cas32_strong( pDest, cur, cur - val, order, memory_order_relaxed )); + break; + } + return cur; + } + + //----------------------------------------------------------------------------- + // 64bit primitives + //----------------------------------------------------------------------------- + + CDS_ITANIUM_ATOMIC_LOAD( 8, 64 ) + CDS_ITANIUM_ATOMIC_STORE( 8, 64 ) + CDS_ITANIUM_ATOMIC_CAS( 8, 64 ) + CDS_ITANIUM_ATOMIC_EXCHANGE( 8, 64 ) + +# define CDS_ATOMIC_fetch64_add_defined + template + static inline T fetch64_add( T volatile * pDest, T val, memory_order order) noexcept + { + static_assert( sizeof(T) == 8, "Illegal size of operand" ); + assert( pDest ); + + T cur; + switch ( val ) { + case 1: + CDS_ITANIUM_ATOMIC_FETCH_ADD( 8, 1 ); + break; + case 4: + CDS_ITANIUM_ATOMIC_FETCH_ADD( 8, 4 ); + break; + case 8: + CDS_ITANIUM_ATOMIC_FETCH_ADD( 8, 8 ); + break; + case 16: + CDS_ITANIUM_ATOMIC_FETCH_ADD( 8, 16 ); + break; + default: + cur = load64( pDest, memory_order_relaxed ); + do {} while ( !cas64_strong( pDest, cur, cur + val, order, memory_order_relaxed )); + break; + } + return cur; + } + +# define CDS_ATOMIC_fetch64_sub_defined + template + static inline T fetch64_sub( T volatile * pDest, T val, memory_order order) noexcept + { + static_assert( sizeof(T) == 8, "Illegal size of operand" ); + assert( pDest ); + T cur; + switch ( val ) { + case 1: + CDS_ITANIUM_ATOMIC_FETCH_ADD( 8, -1 ); + break; + case 4: + CDS_ITANIUM_ATOMIC_FETCH_ADD( 8, -4 ); + break; + case 8: + CDS_ITANIUM_ATOMIC_FETCH_ADD( 8, -8 ); + break; + case 16: + CDS_ITANIUM_ATOMIC_FETCH_ADD( 8, -16 ); + break; + default: + cur = load64( pDest, memory_order_relaxed ); + do {} while ( !cas64_strong( pDest, cur, cur - val, order, memory_order_relaxed )); + break; + } + return cur; + } + + //----------------------------------------------------------------------------- + // pointer primitives + //----------------------------------------------------------------------------- + template + static inline T * load_ptr( T * volatile const * pSrc, memory_order order ) noexcept + { + assert( order == memory_order_relaxed + || order == memory_order_consume + || order == memory_order_acquire + || order == memory_order_seq_cst + ); + assert( pSrc ); + T * val; + __asm__ __volatile__ ( + "ld8.acq %[val] = [%[pSrc]] \n\t" + : [val] "=r" (val) + : [pSrc] "r" (pSrc) + : "memory" + ); + return val; + } + + template + static inline void store_ptr( T * volatile * pDest, T * val, memory_order order ) noexcept + { + assert( order == memory_order_relaxed + || order == memory_order_release + || order == memory_order_seq_cst + ); + assert( pDest ); + + if ( order == memory_order_seq_cst ) { + __asm__ __volatile__ ( + "st8.rel [%[pDest]] = %[val] \n\t" + "mf \n\t" + :: [pDest] "r" (pDest), [val] "r" (val) + : "memory" + ); + } + else { + __asm__ __volatile__ ( + "st8.rel [%[pDest]] = %[val] \n\t" + :: [pDest] "r" (pDest), [val] "r" (val) + : "memory" + ); + fence_after(order); + } + } + + template + static inline bool cas_ptr_strong( T * volatile * pDest, T *& expected, T * desired, memory_order mo_success, memory_order mo_fail ) noexcept + { + static_assert( sizeof(T *) == 8, "Illegal size of operand" ); + assert( pDest ); + + T * current; + + switch(mo_success) { + case memory_order_relaxed: + case memory_order_consume: + case memory_order_acquire: + __asm__ __volatile__ ( + "mov ar.ccv = %[expected] ;;\n\t" + "cmpxchg8.acq %[current] = [%[pDest]], %[desired], ar.ccv\n\t" + : [current] "=r" (current) + : [pDest] "r" (pDest), [expected] "r" (expected), [desired] "r" (desired) + : "ar.ccv", "memory" + ); + break; + case memory_order_release: + __asm__ __volatile__ ( + "mov ar.ccv = %[expected] ;;\n\t" + "cmpxchg8.rel %[current] = [%[pDest]], %[desired], ar.ccv\n\t" + : [current] "=r" (current) + : [pDest] "r" (pDest), [expected] "r" (expected), [desired] "r" (desired) + : "ar.ccv", "memory" + ); + break; + case memory_order_acq_rel: + case memory_order_seq_cst: + __asm__ __volatile__ ( + "mov ar.ccv = %[expected] ;;\n\t" + "cmpxchg8.rel %[current] = [%[pDest]], %[desired], ar.ccv\n\t" + "mf \n\t" + : [current] "=r" (current) + : [pDest] "r" (pDest), [expected] "r" (expected), [desired] "r" (desired) + : "ar.ccv", "memory" + ); + break; + default: + assert(false); + } + + bool bSuccess = expected == current; + expected = current; + if ( !bSuccess ) + fence_after( mo_fail ); + return bSuccess; + } + + template + static inline bool cas_ptr_weak( T * volatile * pDest, T *& expected, T * desired, memory_order mo_success, memory_order mo_fail ) noexcept + { + return cas_ptr_strong( pDest, expected, desired, mo_success, mo_fail ); + } + + template + static inline T * exchange_ptr( T * volatile * pDest, T * val, memory_order order ) noexcept + { + static_assert( sizeof(T *) == 8, "Illegal size of operand" ); + assert( pDest ); + + T * current; + switch(order) { + case memory_order_relaxed: + case memory_order_consume: + case memory_order_acquire: + __asm__ __volatile__ ( + "xchg8 %[current] = [%[pDest]], %[val]\n\t" + : [current] "=r" (current) + : [pDest] "r" (pDest), [val] "r" (val) + : "memory" + ); + break; + case memory_order_acq_rel: + case memory_order_release: + case memory_order_seq_cst: + __asm__ __volatile__ ( + "mf \n\t" + "xchg8 %[current] = [%[pDest]], %[val]\n\t" + : [current] "=r" (current) + : [pDest] "r" (pDest), [val] "r" (val) + : "memory" + ); + break; + default: assert(false); + } + return current; + } + + + template struct atomic_pointer_sizeof { enum { value = sizeof(T) }; }; + template <> struct atomic_pointer_sizeof { enum { value = 1 }; }; + + // It does not work properly + // atomic.fetch_add( ... ) returns nullptr, why?.. +//# define CDS_ATOMIC_fetch_ptr_add_defined + template + static inline T * fetch_ptr_add( T * volatile * pDest, ptrdiff_t val, memory_order order) noexcept + { + static_assert( sizeof(T *) == 8, "Illegal size of operand" ); + assert( pDest ); + + T * cur; + val *= atomic_pointer_sizeof::value; + switch ( val ) { + case 1: + CDS_ITANIUM_ATOMIC_FETCH_ADD( 8, 1 ); + break; + case 4: + CDS_ITANIUM_ATOMIC_FETCH_ADD( 8, 4 ); + break; + case 8: + CDS_ITANIUM_ATOMIC_FETCH_ADD( 8, 8 ); + break; + case 16: + CDS_ITANIUM_ATOMIC_FETCH_ADD( 8, 16 ); + break; + default: + cur = load_ptr( pDest, memory_order_relaxed ); + do {} while ( !cas_ptr_strong( pDest, cur, reinterpret_cast(reinterpret_cast(cur) + val), order, memory_order_relaxed )); + break; + } + return cur; + } + + // It does not work properly + // atomic.fetch_sub( ... ) returns nullptr, why?.. +//# define CDS_ATOMIC_fetch_ptr_sub_defined + template + static inline T * fetch_ptr_sub( T * volatile * pDest, ptrdiff_t val, memory_order order) noexcept + { + static_assert( sizeof(T *) == 8, "Illegal size of operand" ); + assert( pDest ); + T * cur; + val *= atomic_pointer_sizeof::value; + switch ( val ) { + case 1: + CDS_ITANIUM_ATOMIC_FETCH_ADD( 8, -1 ); + break; + case 4: + CDS_ITANIUM_ATOMIC_FETCH_ADD( 8, -4 ); + break; + case 8: + CDS_ITANIUM_ATOMIC_FETCH_ADD( 8, -8 ); + break; + case 16: + CDS_ITANIUM_ATOMIC_FETCH_ADD( 8, -16 ); + break; + default: + cur = load_ptr( pDest, memory_order_relaxed ); + do {} while ( !cas_ptr_strong( pDest, cur, reinterpret_cast(reinterpret_cast(cur) - val), order, memory_order_relaxed )); + break; + } + return cur; + } + + //----------------------------------------------------------------------------- + // atomic flag primitives + //----------------------------------------------------------------------------- + + typedef bool atomic_flag_type; + static inline bool atomic_flag_tas( atomic_flag_type volatile * pFlag, memory_order order ) noexcept + { + return exchange8( pFlag, true, order ); + } + + static inline void atomic_flag_clear( atomic_flag_type volatile * pFlag, memory_order order ) noexcept + { + store8( pFlag, false, order ); + } + +#undef CDS_ITANIUM_ATOMIC_LOAD +#undef CDS_ITANIUM_ATOMIC_STORE +#undef CDS_ITANIUM_ATOMIC_CAS +#undef CDS_ITANIUM_ATOMIC_EXCHANGE +#undef CDS_ITANIUM_ATOMIC_FETCH_ADD + + }} // namespace gcc::ia64 + + } // namespace platform +}} // namespace cds::cxx11_atomic +//@endcond + +#endif // #ifndef CDSLIB_COMPILER_GCC_IA64_CXX11_ATOMIC_H diff --git a/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/compiler/gcc/ppc64/backoff.h b/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/compiler/gcc/ppc64/backoff.h new file mode 100644 index 0000000..75e4574 --- /dev/null +++ b/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/compiler/gcc/ppc64/backoff.h @@ -0,0 +1,54 @@ +/* + This file is a part of libcds - Concurrent Data Structures library + + (C) Copyright Maxim Khizhinsky (libcds.dev@gmail.com) 2006-2017 + + Source code repo: http://github.com/khizmax/libcds/ + Download: http://sourceforge.net/projects/libcds/files/ + + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. + + * Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ + +#ifndef CDSLIB_COMPILER_GCC_PPC64_BACKOFF_H +#define CDSLIB_COMPILER_GCC_PPC64_BACKOFF_H + +//@cond none + +namespace cds { namespace backoff { + namespace gcc { namespace ppc64 { + +# define CDS_backoff_hint_defined + static inline void backoff_hint() + { + // Provide a hint that performance will probably be improved + // if shared resources dedicated to the executing processor are released for use by other processors + asm volatile( "or 27,27,27 # yield" ); + } + }} // namespace gcc::ppc64 + + namespace platform { + using namespace gcc::ppc64; + } +}} // namespace cds::backoff + +//@endcond +#endif // #ifndef CDSLIB_COMPILER_GCC_PPC64_BACKOFF_H diff --git a/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/compiler/gcc/ppc64/bitop.h b/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/compiler/gcc/ppc64/bitop.h new file mode 100644 index 0000000..8872623 --- /dev/null +++ b/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/compiler/gcc/ppc64/bitop.h @@ -0,0 +1,45 @@ +/* + This file is a part of libcds - Concurrent Data Structures library + + (C) Copyright Maxim Khizhinsky (libcds.dev@gmail.com) 2006-2017 + + Source code repo: http://github.com/khizmax/libcds/ + Download: http://sourceforge.net/projects/libcds/files/ + + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. + + * Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ + +#ifndef CDSLIB_COMPILER_GCC_PPC64_BITOP_H +#define CDSLIB_COMPILER_GCC_PPC64_BITOP_H + +//@cond none +namespace cds { + namespace bitop { namespace platform { namespace gcc { namespace ppc64 { + + }} // namespace gcc::ppc64 + + using namespace gcc::ppc64; + +}}} // namespace cds::bitop::platform +//@endcond + +#endif // #ifndef CDSLIB_COMPILER_GCC_PPC64_BITOP_H diff --git a/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/compiler/gcc/sparc/backoff.h b/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/compiler/gcc/sparc/backoff.h new file mode 100644 index 0000000..41f7ac9 --- /dev/null +++ b/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/compiler/gcc/sparc/backoff.h @@ -0,0 +1,54 @@ +/* + This file is a part of libcds - Concurrent Data Structures library + + (C) Copyright Maxim Khizhinsky (libcds.dev@gmail.com) 2006-2017 + + Source code repo: http://github.com/khizmax/libcds/ + Download: http://sourceforge.net/projects/libcds/files/ + + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. + + * Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ + +#ifndef CDSLIB_COMPILER_GCC_SPARC_BACKOFF_H +#define CDSLIB_COMPILER_GCC_SPARC_BACKOFF_H + +//@cond none + +namespace cds { namespace backoff { + namespace gcc { namespace Sparc { + +# define CDS_backoff_nop_defined + static inline void backoff_nop() + { + asm volatile ( "nop;" ); + } + + + }} // namespace gcc::Sparc + + namespace platform { + using namespace gcc::Sparc; + } +}} // namespace cds::backoff + +//@endcond +#endif // #ifndef CDSLIB_COMPILER_GCC_SPARC_BACKOFF_H diff --git a/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/compiler/gcc/sparc/bitop.h b/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/compiler/gcc/sparc/bitop.h new file mode 100644 index 0000000..f2b9a97 --- /dev/null +++ b/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/compiler/gcc/sparc/bitop.h @@ -0,0 +1,70 @@ +/* + This file is a part of libcds - Concurrent Data Structures library + + (C) Copyright Maxim Khizhinsky (libcds.dev@gmail.com) 2006-2017 + + Source code repo: http://github.com/khizmax/libcds/ + Download: http://sourceforge.net/projects/libcds/files/ + + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. + + * Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ + +#ifndef CDSLIB_COMPILER_GCC_SPARC_BITOP_H +#define CDSLIB_COMPILER_GCC_SPARC_BITOP_H + +//@cond none +namespace cds { + namespace bitop { namespace platform { namespace gcc { namespace Sparc { + + // MSB - return index (1..64) of most significant bit in nArg. If nArg == 0 return 0 + // Source: UltraSPARC Architecture 2007 + // + // Test result: this variant and its variation about 100 times slower then generic implementation :-( + static inline int sparc_msb64( uint64_t nArg ) + { + uint64_t result; + asm volatile ( + "neg %[nArg], %[result] \n\t" + "xnor %[nArg], %[result], %%g5 \n\t" + "popc %%g5, %[result] \n\t" + "movrz %[nArg], %%g0, %[result] \n\t" + : [result] "=r" (result) + : [nArg] "r" (nArg) + : "g5" + ); + return result; + } + + // MSB - return index (1..32) of most significant bit in nArg. If nArg == 0 return 0 + static inline int sparc_msb32( uint32_t nArg ) + { + return sparc_msb64( (uint64_t) nArg ); + } + + }} // namespace gcc::Sparc + + using namespace gcc::Sparc; + +}}} // namespace cds::bitop::platform +//@endcond + +#endif // #ifndef CDSLIB_COMPILER_GCC_SPARC_BITOP_H diff --git a/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/compiler/gcc/sparc/cxx11_atomic.h b/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/compiler/gcc/sparc/cxx11_atomic.h new file mode 100644 index 0000000..5b53991 --- /dev/null +++ b/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/compiler/gcc/sparc/cxx11_atomic.h @@ -0,0 +1,635 @@ +/* + This file is a part of libcds - Concurrent Data Structures library + + (C) Copyright Maxim Khizhinsky (libcds.dev@gmail.com) 2006-2017 + + Source code repo: http://github.com/khizmax/libcds/ + Download: http://sourceforge.net/projects/libcds/files/ + + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. + + * Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ + +#ifndef CDSLIB_COMPILER_GCC_SPARC_CXX11_ATOMIC_H +#define CDSLIB_COMPILER_GCC_SPARC_CXX11_ATOMIC_H + +#include + +/* + Source: + + 1. [Doug Lea "JSR-133 Cookbook for Compiler Writers]: + + Acquire semantics: load; LoadLoad+LoadStore + Release semantics: LoadStore+StoreStore; store + + 2. boost::atomic library by Helge Bahman + 3. OpenSparc source code +*/ + +#if CDS_OS_TYPE == CDS_OS_LINUX +# define CDS_SPARC_RMO_MEMORY_MODEL +#endif + +#define CDS_SPARC_MB_FULL "membar #Sync \n\t" +#ifdef CDS_SPARC_RMO_MEMORY_MODEL + // RMO memory model (Linux only?..) Untested +# define CDS_SPARC_MB_LL_LS "membar #LoadLoad|#LoadStore \n\t" +# define CDS_SPARC_MB_LS_SS "membar #LoadStore|#StoreStore \n\t" +# define CDS_SPARC_MB_LL_LS_SS "membar #LoadLoad|#LoadStore|#StoreStore \n\t" +#else + // TSO memory model (default; Solaris uses this model) +# define CDS_SPARC_MB_LL_LS +# define CDS_SPARC_MB_LS_SS +# define CDS_SPARC_MB_LL_LS_SS +#endif + +#define CDS_SPARC_MB_ACQ CDS_SPARC_MB_LL_LS +#define CDS_SPARC_MB_REL CDS_SPARC_MB_LS_SS +#define CDS_SPARC_MB_ACQ_REL CDS_SPARC_MB_LL_LS_SS +#define CDS_SPARC_MB_SEQ_CST CDS_SPARC_MB_FULL + +//@cond +namespace cds { namespace cxx11_atomic { + namespace platform { inline namespace gcc { inline namespace Sparc { + + static inline void fence_before( memory_order order ) noexcept + { + switch(order) { + case memory_order_relaxed: + case memory_order_acquire: + case memory_order_consume: + break; + case memory_order_release: + case memory_order_acq_rel: + __asm__ __volatile__ ( "" CDS_SPARC_MB_REL ::: "memory" ); + break; + case memory_order_seq_cst: + __asm__ __volatile__ ( "" CDS_SPARC_MB_FULL ::: "memory" ); + break; + } + } + + static inline void fence_after( memory_order order ) noexcept + { + switch(order) { + case memory_order_relaxed: + case memory_order_consume: + case memory_order_release: + break; + case memory_order_acquire: + case memory_order_acq_rel: + __asm__ __volatile__ ( "" CDS_SPARC_MB_ACQ ::: "memory" ); + break; + case memory_order_seq_cst: + __asm__ __volatile__ ( "" CDS_SPARC_MB_FULL ::: "memory" ); + break; + } + } + + + //----------------------------------------------------------------------------- + // fences + //----------------------------------------------------------------------------- + static inline void thread_fence(memory_order order) noexcept + { + switch(order) + { + case memory_order_relaxed: + case memory_order_consume: + break; + case memory_order_acquire: + __asm__ __volatile__ ( "" CDS_SPARC_MB_ACQ ::: "memory" ); + break; + case memory_order_release: + __asm__ __volatile__ ( "" CDS_SPARC_MB_REL ::: "memory" ); + break; + case memory_order_acq_rel: + __asm__ __volatile__ ( "" CDS_SPARC_MB_ACQ_REL ::: "memory" ); + break; + case memory_order_seq_cst: + __asm__ __volatile__ ( "" CDS_SPARC_MB_SEQ_CST ::: "memory" ); + break; + default:; + } + } + + static inline void signal_fence(memory_order order) noexcept + { + // C++11: 29.8.8: only compiler optimization, no hardware instructions + switch(order) + { + case memory_order_relaxed: + break; + case memory_order_consume: + case memory_order_release: + case memory_order_acquire: + case memory_order_acq_rel: + case memory_order_seq_cst: + CDS_COMPILER_RW_BARRIER; + break; + default:; + } + } + + //----------------------------------------------------------------------------- + // atomic flag primitives + //----------------------------------------------------------------------------- + + typedef unsigned char atomic_flag_type; + static inline bool atomic_flag_tas( atomic_flag_type volatile * pFlag, memory_order order ) noexcept + { + atomic_flag_type fCur; + fence_before( order ); + __asm__ __volatile__( + "ldstub [%[pFlag]], %[fCur] \n\t" + : [fCur] "=r"(fCur) + : [pFlag] "r"(pFlag) + : "memory", "cc" + ); + fence_after( order ); + return fCur != 0; + } + + static inline void atomic_flag_clear( atomic_flag_type volatile * pFlag, memory_order order ) noexcept + { + fence_before( order ); + __asm__ __volatile__( + CDS_SPARC_MB_REL + "stub %%g0, [%[pFlag]] \n\t" + :: [pFlag] "r"(pFlag) + : "memory" + ); + fence_after( order ); + } + + //----------------------------------------------------------------------------- + // 32bit primitives + //----------------------------------------------------------------------------- + + template + static inline void store32( T volatile * pDest, T src, memory_order order ) noexcept + { + static_assert( sizeof(T) == 4, "Illegal size of operand" ); + assert( order == memory_order_relaxed + || order == memory_order_release + || order == memory_order_seq_cst + ); + assert( pDest ); + + fence_before(order); + *pDest = src; + fence_after(order); + } + + template + static inline T load32( T volatile const * pSrc, memory_order order ) noexcept + { + static_assert( sizeof(T) == 4, "Illegal size of operand" ); + assert( order == memory_order_relaxed + || order == memory_order_consume + || order == memory_order_acquire + || order == memory_order_seq_cst + ); + assert( pSrc ); + + fence_before(order); + T v = *pSrc; + fence_after(order); + return v; + } + + template + static inline bool cas32_strong( T volatile * pDest, T& expected, T desired, memory_order mo_success, memory_order mo_fail ) noexcept + { + static_assert( sizeof(T) == 4, "Illegal size of operand" ); + assert( pDest ); + + fence_before( mo_success ); + __asm__ __volatile__( + "cas [%[pDest]], %[expected], %[desired]" + : [desired] "+r" (desired) + : [pDest] "r" (pDest), [expected] "r" (expected) + : "memory" + ); + + // desired contains current value + + bool bSuccess = desired == expected; + if ( bSuccess ) + fence_after( mo_success ); + else { + fence_after(mo_fail); + expected = desired; + } + + return bSuccess; + } + + template + static inline bool cas32_weak( T volatile * pDest, T& expected, T desired, memory_order mo_success, memory_order mo_fail ) noexcept + { + return cas32_strong( pDest, expected, desired, mo_success, mo_fail ); + } + + template + static inline T exchange32( T volatile * pDest, T v, memory_order order ) noexcept + { + static_assert( sizeof(T) == 4, "Illegal size of operand" ); + assert( pDest ); + + // This primitive could be implemented via "swap" instruction but "swap" is deprecated in UltraSparc + + T cur = load32( pDest, memory_order_relaxed ); + do {} while ( !cas32_strong( pDest, cur, v, order, memory_order_relaxed )); + return cur; + } + + //----------------------------------------------------------------------------- + // 64bit primitives + //----------------------------------------------------------------------------- + + template + static inline T load64( T volatile const * pSrc, memory_order order ) noexcept + { + static_assert( sizeof(T) == 8, "Illegal size of operand" ); + assert( order == memory_order_relaxed + || order == memory_order_consume + || order == memory_order_acquire + || order == memory_order_seq_cst + ); + assert( pSrc ); + + fence_before(order); + T v = *pSrc; + fence_after(order); + return v; + } + + template + static inline void store64( T volatile * pDest, T val, memory_order order ) noexcept + { + static_assert( sizeof(T) == 8, "Illegal size of operand" ); + assert( order == memory_order_relaxed + || order == memory_order_release + || order == memory_order_seq_cst + ); + assert( pDest ); + + fence_before(order); + *pDest = val; + fence_after(order); + + } + + template + static inline bool cas64_strong( T volatile * pDest, T& expected, T desired, memory_order mo_success, memory_order mo_fail ) noexcept + { + static_assert( sizeof(T) == 8, "Illegal size of operand" ); + assert( pDest ); + + fence_before( mo_success ); + __asm__ __volatile__( + "casx [%[pDest]], %[expected], %[desired]" + : [desired] "+r" (desired) + : [pDest] "r" (pDest), [expected] "r" (expected) + : "memory" + ); + + // desired contains current value + + bool bSuccess = desired == expected; + if ( bSuccess ) { + fence_after( mo_success ); + } + else { + fence_after(mo_fail); + expected = desired; + } + + return bSuccess; + } + + template + static inline bool cas64_weak( T volatile * pDest, T& expected, T desired, memory_order mo_success, memory_order mo_fail ) noexcept + { + return cas64_strong( pDest, expected, desired, mo_success, mo_fail ); + } + + template + static inline T exchange64( T volatile * pDest, T v, memory_order order ) noexcept + { + static_assert( sizeof(T) == 8, "Illegal size of operand" ); + assert( pDest ); + + T cur = load64( pDest, memory_order_relaxed ); + do {} while ( !cas64_strong( pDest, cur, v, order, memory_order_relaxed )); + return cur; + } + + //----------------------------------------------------------------------------- + // 8bit primitives + //----------------------------------------------------------------------------- + + template + static inline void store8( T volatile * pDest, T src, memory_order order ) noexcept + { + static_assert( sizeof(T) == 1, "Illegal size of operand" ); + assert( order == memory_order_relaxed + || order == memory_order_release + || order == memory_order_seq_cst + ); + assert( pDest ); + + fence_before( order ); + *pDest = src; + fence_after( order ); + } + + template + static inline T load8( T volatile const * pSrc, memory_order order ) noexcept + { + static_assert( sizeof(T) == 1, "Illegal size of operand" ); + assert( order == memory_order_relaxed + || order == memory_order_consume + || order == memory_order_acquire + || order == memory_order_seq_cst + ); + assert( pSrc ); + + fence_before( order ); + T v = *pSrc; + fence_after( order ); + return v; + } + + template + static inline bool cas8_strong( T volatile * pDest, T& expected, T desired, memory_order mo_success, memory_order mo_fail ) noexcept + { + static_assert( sizeof(T) == 1, "Illegal size of operand" ); + assert( pDest ); + + union u32 { + uint32_t w; + T c[4]; + }; + static_assert( sizeof(u32) == sizeof(uint32_t), "Argument size error" ); + + u32 volatile * pDest32 = (u32 *)( uintptr_t( pDest ) & ~0x03 ); + size_t const nCharIdx = (size_t)( uintptr_t( pDest ) & 0x03 ); + u32 uExpected; + u32 uDesired; + + bool bSuccess; + for (;;) { + uExpected.w = + uDesired.w = pDest32->w; + uExpected.c[nCharIdx] = expected; + uDesired.c[nCharIdx] = desired; + + bSuccess = cas32_weak( reinterpret_cast(pDest32), uExpected.w, uDesired.w, mo_success, mo_fail ); + if ( bSuccess || uExpected.c[nCharIdx] != expected ) + break; + } + + expected = uExpected.c[nCharIdx]; + return bSuccess; + } + + template + static inline bool cas8_weak( T volatile * pDest, T& expected, T desired, memory_order mo_success, memory_order mo_fail ) noexcept + { + static_assert( sizeof(T) == 1, "Illegal size of operand" ); + assert( pDest ); + + union u32 { + uint32_t w; + T c[4]; + }; + static_assert( sizeof(u32) == sizeof(uint32_t), "Argument size error" ); + + u32 volatile * pDest32 = (u32 *)( uintptr_t( pDest ) & ~0x03 ); + size_t const nCharIdx = (size_t)( uintptr_t( pDest ) & 0x03 ); + u32 uExpected; + u32 uDesired; + + uExpected.w = + uDesired.w = pDest32->w; + uExpected.c[nCharIdx] = expected; + uDesired.c[nCharIdx] = desired; + + bool bSuccess = cas32_weak( reinterpret_cast(pDest32), uExpected.w, uDesired.w, mo_success, mo_fail ); + + expected = uExpected.c[nCharIdx]; + return bSuccess; + } + + template + static inline T exchange8( T volatile * pDest, T v, memory_order order ) noexcept + { + static_assert( sizeof(T) == 1, "Illegal size of operand" ); + assert( pDest ); + + T cur = load8( pDest, memory_order_relaxed ); + do {} while ( !cas8_strong( pDest, cur, v, order, memory_order_relaxed )); + return cur; + } + + //----------------------------------------------------------------------------- + // 16bit primitives + //----------------------------------------------------------------------------- + + template + static inline T load16( T volatile const * pSrc, memory_order order ) noexcept + { + static_assert( sizeof(T) == 2, "Illegal size of operand" ); + assert( order == memory_order_relaxed + || order == memory_order_consume + || order == memory_order_acquire + || order == memory_order_seq_cst + ); + assert( pSrc ); + + fence_before( order ); + T v = *pSrc; + fence_after( order ); + return v; + } + + template + static inline void store16( T volatile * pDest, T src, memory_order order ) noexcept + { + static_assert( sizeof(T) == 2, "Illegal size of operand" ); + assert( order == memory_order_relaxed + || order == memory_order_release + || order == memory_order_seq_cst + ); + assert( pDest ); + + fence_before(order); + *pDest = src; + fence_after(order); + } + + template + static inline bool cas16_strong( T volatile * pDest, T& expected, T desired, memory_order mo_success, memory_order mo_fail ) noexcept + { + static_assert( sizeof(T) == 2, "Illegal size of operand" ); + assert( pDest ); + + union u32 { + uint32_t w; + T c[2]; + }; + static_assert( sizeof(u32) == sizeof(uint32_t), "Argument size error" ); + + u32 volatile * pDest32 = (u32 *)( uintptr_t( pDest ) & ~0x03 ); + size_t const nIdx = (size_t)( (uintptr_t( pDest ) >> 1) & 0x01 ); + u32 uExpected; + u32 uDesired; + + bool bSuccess; + for (;;) { + uExpected.w = + uDesired.w = pDest32->w; + uExpected.c[nIdx] = expected; + uDesired.c[nIdx] = desired; + + bSuccess = cas32_weak( reinterpret_cast(pDest32), uExpected.w, uDesired.w, mo_success, mo_fail ); + if ( bSuccess || uExpected.c[nIdx] != expected ) + break; + } + + expected = uExpected.c[nIdx]; + return bSuccess; + } + + template + static inline bool cas16_weak( T volatile * pDest, T& expected, T desired, memory_order mo_success, memory_order mo_fail ) noexcept + { + static_assert( sizeof(T) == 2, "Illegal size of operand" ); + assert( pDest ); + + union u32 { + uint32_t w; + T c[2]; + }; + static_assert( sizeof(u32) == sizeof(uint32_t), "Argument size error" ); + + u32 volatile * pDest32 = (u32 *)( uintptr_t( pDest ) & ~0x03 ); + size_t const nIdx = (size_t)( (uintptr_t( pDest ) >> 1) & 0x01 ); + u32 uExpected; + u32 uDesired; + + uExpected.w = + uDesired.w = pDest32->w; + uExpected.c[nIdx] = expected; + uDesired.c[nIdx] = desired; + + bool bSuccess = cas32_weak( reinterpret_cast(pDest32), uExpected.w, uDesired.w, mo_success, mo_fail ); + + expected = uExpected.c[nIdx]; + return bSuccess; + } + + template + static inline T exchange16( T volatile * pDest, T v, memory_order order ) noexcept + { + static_assert( sizeof(T) == 2, "Illegal size of operand" ); + assert( pDest ); + + T cur = load16( pDest, memory_order_relaxed ); + do {} while ( !cas16_strong( pDest, cur, v, order, memory_order_relaxed )); + return cur; + } + + //----------------------------------------------------------------------------- + // pointer primitives + //----------------------------------------------------------------------------- + + template + static inline void store_ptr( T * volatile * pDest, T * src, memory_order order ) noexcept + { + static_assert( sizeof(T *) == sizeof(void *), "Illegal size of operand" ); + assert( order == memory_order_relaxed + || order == memory_order_release + || order == memory_order_seq_cst + ); + assert( pDest ); + + fence_before(order); + *pDest = src; + fence_after(order); + } + + template + static inline T * load_ptr( T * volatile const * pSrc, memory_order order ) noexcept + { + static_assert( sizeof(T *) == sizeof(void *), "Illegal size of operand" ); + assert( order == memory_order_relaxed + || order == memory_order_consume + || order == memory_order_acquire + || order == memory_order_seq_cst + ); + assert( pSrc ); + + fence_before( order ); + T * v = *pSrc; + fence_after( order ); + return v; + } + + template + static inline bool cas_ptr_strong( T * volatile * pDest, T *& expected, T * desired, memory_order mo_success, memory_order mo_fail ) noexcept + { + static_assert( sizeof(T *) == sizeof(void *), "Illegal size of operand" ); + + return cas64_strong( (uint64_t volatile *) pDest, *reinterpret_cast( &expected ), (uint64_t) desired, mo_success, mo_fail ); + } + + template + static inline bool cas_ptr_weak( T * volatile * pDest, T *& expected, T * desired, memory_order mo_success, memory_order mo_fail ) noexcept + { + return cas_ptr_strong( pDest, expected, desired, mo_success, mo_fail ); + } + + template + static inline T * exchange_ptr( T * volatile * pDest, T * v, memory_order order ) noexcept + { + static_assert( sizeof(T *) == sizeof(void *), "Illegal size of operand" ); + return (T *) exchange64( (uint64_t volatile *) pDest, (uint64_t) v, order ); + } + + }} // namespace gcc::Sparc + + } // namespace platform +}} // namespace cds::cxx11_atomic +//@endcond + +#undef CDS_SPARC_MB_ACQ +#undef CDS_SPARC_MB_REL +#undef CDS_SPARC_MB_SEQ_CST +#undef CDS_SPARC_MB_FULL +#undef CDS_SPARC_MB_LL_LS +#undef CDS_SPARC_MB_LS_SS +#undef CDS_SPARC_MB_LL_LS_SS + +#endif // #ifndef CDSLIB_COMPILER_GCC_AMD64_CXX11_ATOMIC_H diff --git a/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/compiler/gcc/x86/backoff.h b/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/compiler/gcc/x86/backoff.h new file mode 100644 index 0000000..3032406 --- /dev/null +++ b/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/compiler/gcc/x86/backoff.h @@ -0,0 +1,60 @@ +/* + This file is a part of libcds - Concurrent Data Structures library + + (C) Copyright Maxim Khizhinsky (libcds.dev@gmail.com) 2006-2017 + + Source code repo: http://github.com/khizmax/libcds/ + Download: http://sourceforge.net/projects/libcds/files/ + + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. + + * Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ + +#ifndef CDSLIB_COMPILER_GCC_X86_BACKOFF_H +#define CDSLIB_COMPILER_GCC_X86_BACKOFF_H + +//@cond none + +namespace cds { namespace backoff { + namespace gcc { namespace x86 { + +# define CDS_backoff_nop_defined + static inline void backoff_nop() + { + asm volatile ( "nop;" ); + } + +# define CDS_backoff_hint_defined + static inline void backoff_hint() + { + asm volatile ( "pause;" ); + } + + + }} // namespace gcc::x86 + + namespace platform { + using namespace gcc::x86; + } +}} // namespace cds::backoff + +//@endcond +#endif // #ifndef CDSLIB_COMPILER_GCC_X86_BACKOFF_H diff --git a/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/compiler/gcc/x86/bitop.h b/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/compiler/gcc/x86/bitop.h new file mode 100644 index 0000000..6bf2460 --- /dev/null +++ b/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/compiler/gcc/x86/bitop.h @@ -0,0 +1,114 @@ +/* + This file is a part of libcds - Concurrent Data Structures library + + (C) Copyright Maxim Khizhinsky (libcds.dev@gmail.com) 2006-2017 + + Source code repo: http://github.com/khizmax/libcds/ + Download: http://sourceforge.net/projects/libcds/files/ + + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. + + * Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ + +#ifndef CDSLIB_COMPILER_GCC_X86_BITOP_H +#define CDSLIB_COMPILER_GCC_X86_BITOP_H + +//@cond none +namespace cds { + namespace bitop { namespace platform { namespace gcc { namespace x86 { + // MSB - return index (1..32) of most significant bit in nArg. If nArg == 0 return 0 +# define cds_bitop_msb32_DEFINED + static inline int msb32( uint32_t nArg ) + { + int nRet; + __asm__ __volatile__ ( + "bsrl %[nArg], %[nRet] ;\n\t" + "jnz 1f ;\n\t" + "xorl %[nRet], %[nRet] ;\n\t" + "subl $1, %[nRet] ;\n\t" + "1:" + "addl $1, %[nRet] ;\n\t" + : [nRet] "=a" (nRet) + : [nArg] "r" (nArg) + : "cc" + ); + return nRet; + } + +# define cds_bitop_msb32nz_DEFINED + static inline int msb32nz( uint32_t nArg ) + { + assert( nArg != 0 ); + int nRet; + __asm__ __volatile__ ( + "bsrl %[nArg], %[nRet] ;" + : [nRet] "=a" (nRet) + : [nArg] "r" (nArg) + : "cc" + ); + return nRet; + } + + // LSB - return index (0..31) of least significant bit in nArg. If nArg == 0 return -1U +# define cds_bitop_lsb32_DEFINED + static inline int lsb32( uint32_t nArg ) + { + + int nRet; + __asm__ __volatile__ ( + "bsfl %[nArg], %[nRet] ;" + "jnz 1f ;" + "xorl %[nRet], %[nRet] ;" + "subl $1, %[nRet] ;" + "1:" + "addl $1, %[nRet] ;" + : [nRet] "=a" (nRet) + : [nArg] "r" (nArg) + : "cc" + ); + return nRet; + + } + + // LSB - return index (0..31) of least significant bit in nArg. + // Condition: nArg != 0 +# define cds_bitop_lsb32nz_DEFINED + static inline int lsb32nz( uint32_t nArg ) + { + assert( nArg != 0 ); + int nRet; + __asm__ __volatile__ ( + "bsfl %[nArg], %[nRet] ;" + : [nRet] "=a" (nRet) + : [nArg] "r" (nArg) + : "cc" + ); + return nRet; + } + + }} // namespace gcc::x86 + + using namespace gcc::x86; + +}}} // namespace cds::bitop::platform +//@endcond + +#endif // #ifndef CDSLIB_ARH_X86_GCC_BITOP_H diff --git a/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/compiler/gcc/x86/cxx11_atomic.h b/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/compiler/gcc/x86/cxx11_atomic.h new file mode 100644 index 0000000..27bcdec --- /dev/null +++ b/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/compiler/gcc/x86/cxx11_atomic.h @@ -0,0 +1,210 @@ +/* + This file is a part of libcds - Concurrent Data Structures library + + (C) Copyright Maxim Khizhinsky (libcds.dev@gmail.com) 2006-2017 + + Source code repo: http://github.com/khizmax/libcds/ + Download: http://sourceforge.net/projects/libcds/files/ + + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. + + * Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ + +#ifndef CDSLIB_COMPILER_GCC_X86_CXX11_ATOMIC_H +#define CDSLIB_COMPILER_GCC_X86_CXX11_ATOMIC_H + +#include +#include + +//@cond +namespace cds { namespace cxx11_atomic { + namespace platform { inline namespace gcc { inline namespace x86 { + + //----------------------------------------------------------------------------- + // 64bit primitives + //----------------------------------------------------------------------------- + + template + static inline bool cas64_strong( T volatile * pDest, T& expected, T desired, memory_order mo_success, memory_order mo_fail ) noexcept + { + static_assert( sizeof(T) == 8, "Illegal size of operand" ); + assert( cds::details::is_aligned( pDest, 8 )); + + uint32_t ebxStore; + T prev = expected; + + fence_before(mo_success); + + // We must save EBX in PIC mode + __asm__ __volatile__ ( + "movl %%ebx, %[ebxStore]\n" + "movl %[desiredLo], %%ebx\n" + "lock; cmpxchg8b 0(%[pDest])\n" + "movl %[ebxStore], %%ebx\n" + : [prev] "=A" (prev), [ebxStore] "=m" (ebxStore) + : [desiredLo] "D" ((int)desired), [desiredHi] "c" ((int)(desired >> 32)), [pDest] "S" (pDest), "0" (prev) + : "memory"); + bool success = (prev == expected); + if (success) + fence_after(mo_success); + else { + fence_after(mo_fail); + expected = prev; + } + return success; + } + + template + static inline bool cas64_weak( T volatile * pDest, T& expected, T desired, memory_order mo_success, memory_order mo_fail ) noexcept + { + return cas64_strong( pDest, expected, desired, mo_success, mo_fail ); + } + + template + static inline T load64( T volatile const * pSrc, memory_order order ) noexcept + { + static_assert( sizeof(T) == 8, "Illegal size of operand" ); + assert( order == memory_order_relaxed + || order == memory_order_consume + || order == memory_order_acquire + || order == memory_order_seq_cst + ); + assert( pSrc ); + assert( cds::details::is_aligned( pSrc, 8 )); + CDS_UNUSED( order ); + + T CDS_DATA_ALIGNMENT(8) v; + __asm__ __volatile__( + "movq (%[pSrc]), %[v] ; \n\t" + : [v] "=x" (v) + : [pSrc] "r" (pSrc) + : + ); + return v; + } + + + template + static inline T exchange64( T volatile * pDest, T v, memory_order order ) noexcept + { + static_assert( sizeof(T) == 8, "Illegal size of operand" ); + assert( cds::details::is_aligned( pDest, 8 )); + + T cur = load64( pDest, memory_order_relaxed ); + do { + } while (!cas64_weak( pDest, cur, v, order, memory_order_relaxed )); + return cur; + } + + template + static inline void store64( T volatile * pDest, T val, memory_order order ) noexcept + { + static_assert( sizeof(T) == 8, "Illegal size of operand" ); + assert( order == memory_order_relaxed + || order == memory_order_release + || order == memory_order_seq_cst + ); + assert( pDest ); + assert( cds::details::is_aligned( pDest, 8 )); + + if ( order != memory_order_seq_cst ) { + fence_before( order ); + // Atomically stores 64bit value by SSE instruction + __asm__ __volatile__( + "movq %[val], (%[pDest]) ; \n\t" + : + : [val] "x" (val), [pDest] "r" (pDest) + : "memory" + ); + } + else { + exchange64( pDest, val, order ); + } + } + + + //----------------------------------------------------------------------------- + // pointer primitives + //----------------------------------------------------------------------------- + + template + static inline T * exchange_ptr( T * volatile * pDest, T * v, memory_order order ) noexcept + { + static_assert( sizeof(T *) == sizeof(void *), "Illegal size of operand" ); + + return (T *) exchange32( (uint32_t volatile *) pDest, (uint32_t) v, order ); + } + + template + static inline void store_ptr( T * volatile * pDest, T * src, memory_order order ) noexcept + { + static_assert( sizeof(T *) == sizeof(void *), "Illegal size of operand" ); + assert( order == memory_order_relaxed + || order == memory_order_release + || order == memory_order_seq_cst + ); + assert( pDest ); + + if ( order != memory_order_seq_cst ) { + fence_before( order ); + *pDest = src; + } + else { + exchange_ptr( pDest, src, order ); + } + } + + template + static inline T * load_ptr( T * volatile const * pSrc, memory_order order ) noexcept + { + static_assert( sizeof(T *) == sizeof(void *), "Illegal size of operand" ); + assert( order == memory_order_relaxed + || order == memory_order_consume + || order == memory_order_acquire + || order == memory_order_seq_cst + ); + assert( pSrc ); + + T * v = *pSrc; + fence_after_load( order ); + return v; + } + + template + static inline bool cas_ptr_strong( T * volatile * pDest, T *& expected, T * desired, memory_order mo_success, memory_order mo_fail ) noexcept + { + static_assert( sizeof(T *) == sizeof(void *), "Illegal size of operand" ); + + return cas32_strong( (uint32_t volatile *) pDest, *reinterpret_cast( &expected ), (uint32_t) desired, mo_success, mo_fail ); + } + + template + static inline bool cas_ptr_weak( T * volatile * pDest, T *& expected, T * desired, memory_order mo_success, memory_order mo_fail ) noexcept + { + return cas_ptr_strong( pDest, expected, desired, mo_success, mo_fail ); + } + }} // namespace gcc::x86 + + } // namespace platform +}} // namespace cds::cxx11_atomic +//@endcond + +#endif // #ifndef CDSLIB_COMPILER_GCC_X86_CXX11_ATOMIC_H diff --git a/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/compiler/gcc/x86/cxx11_atomic32.h b/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/compiler/gcc/x86/cxx11_atomic32.h new file mode 100644 index 0000000..e4dee89 --- /dev/null +++ b/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/compiler/gcc/x86/cxx11_atomic32.h @@ -0,0 +1,502 @@ +/* + This file is a part of libcds - Concurrent Data Structures library + + (C) Copyright Maxim Khizhinsky (libcds.dev@gmail.com) 2006-2017 + + Source code repo: http://github.com/khizmax/libcds/ + Download: http://sourceforge.net/projects/libcds/files/ + + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. + + * Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ + +#ifndef CDSLIB_COMPILER_GCC_X86_CXX11_ATOMIC32_H +#define CDSLIB_COMPILER_GCC_X86_CXX11_ATOMIC32_H + +#include +#include + +//@cond +namespace cds { namespace cxx11_atomic { + namespace platform { inline namespace gcc { inline namespace x86 { + + static inline void fence_before( memory_order order ) noexcept + { + switch(order) { + case memory_order_relaxed: + case memory_order_acquire: + case memory_order_consume: + break; + case memory_order_release: + case memory_order_acq_rel: + CDS_COMPILER_RW_BARRIER; + break; + case memory_order_seq_cst: + CDS_COMPILER_RW_BARRIER; + break; + } + } + + static inline void fence_after( memory_order order ) noexcept + { + switch(order) { + case memory_order_acquire: + case memory_order_acq_rel: + CDS_COMPILER_RW_BARRIER; + break; + case memory_order_relaxed: + case memory_order_consume: + case memory_order_release: + break; + case memory_order_seq_cst: + CDS_COMPILER_RW_BARRIER; + break; + } + } + + + static inline void fence_after_load(memory_order order) noexcept + { + switch(order) { + case memory_order_relaxed: + case memory_order_release: + break; + case memory_order_acquire: + case memory_order_acq_rel: + CDS_COMPILER_RW_BARRIER; + break; + case memory_order_consume: + break; + case memory_order_seq_cst: + __asm__ __volatile__ ( "mfence" ::: "memory" ); + break; + default:; + } + } + + //----------------------------------------------------------------------------- + // fences + //----------------------------------------------------------------------------- + static inline void thread_fence(memory_order order) noexcept + { + switch(order) + { + case memory_order_relaxed: + case memory_order_consume: + break; + case memory_order_release: + case memory_order_acquire: + case memory_order_acq_rel: + CDS_COMPILER_RW_BARRIER; + break; + case memory_order_seq_cst: + __asm__ __volatile__ ( "mfence" ::: "memory" ); + break; + default:; + } + } + + static inline void signal_fence(memory_order order) noexcept + { + // C++11: 29.8.8: only compiler optimization, no hardware instructions + switch(order) + { + case memory_order_relaxed: + break; + case memory_order_consume: + case memory_order_release: + case memory_order_acquire: + case memory_order_acq_rel: + case memory_order_seq_cst: + CDS_COMPILER_RW_BARRIER; + break; + default:; + } + } + + //----------------------------------------------------------------------------- + // 8bit primitives + //----------------------------------------------------------------------------- + + template + static inline bool cas8_strong( T volatile * pDest, T& expected, T desired, memory_order mo_success, memory_order mo_fail ) noexcept + { + static_assert( sizeof(T) == 1, "Illegal size of operand" ); + + T prev = expected; + fence_before(mo_success); + __asm__ __volatile__ ( + "lock ; cmpxchgb %[desired], %[pDest]" + : [prev] "+a" (prev), [pDest] "+m" (*pDest) + : [desired] "q" (desired) + ); + bool success = (prev == expected); + expected = prev; + if (success) + fence_after(mo_success); + else + fence_after(mo_fail); + return success; + } + + template + static inline bool cas8_weak( T volatile * pDest, T& expected, T desired, memory_order mo_success, memory_order mo_fail ) noexcept + { + return cas8_strong( pDest, expected, desired, mo_success, mo_fail ); + } + + template + static inline T exchange8( T volatile * pDest, T v, memory_order order ) noexcept + { + static_assert( sizeof(T) == 1, "Illegal size of operand" ); + + fence_before(order); + __asm__ __volatile__ ( + "xchgb %[v], %[pDest]" + : [v] "+q" (v), [pDest] "+m" (*pDest) + ); + fence_after(order); + return v; + } + + template + static inline void store8( T volatile * pDest, T src, memory_order order ) noexcept + { + static_assert( sizeof(T) == 1, "Illegal size of operand" ); + assert( order == memory_order_relaxed + || order == memory_order_release + || order == memory_order_seq_cst + ); + assert( pDest != NULL ); + + if ( order != memory_order_seq_cst ) { + fence_before( order ); + *pDest = src; + } + else { + exchange8( pDest, src, order ); + } + } + + template + static inline T load8( T volatile const * pSrc, memory_order order ) noexcept + { + static_assert( sizeof(T) == 1, "Illegal size of operand" ); + assert( order == memory_order_relaxed + || order == memory_order_consume + || order == memory_order_acquire + || order == memory_order_seq_cst + ); + assert( pSrc != NULL ); + + T v = *pSrc; + fence_after_load( order ); + return v; + } + +# define CDS_ATOMIC_fetch8_add_defined + template + static inline T fetch8_add( T volatile * pDest, T val, memory_order order ) noexcept + { + fence_before(order); + __asm__ __volatile__ ( + "lock ; xaddb %[val], %[pDest]" + : [val] "+q" (val), [pDest] "+m" (*pDest) + ); + fence_after(order); + return val; + } + +# define CDS_ATOMIC_fetch8_sub_defined + template + static inline T fetch8_sub( T volatile * pDest, T val, memory_order order ) noexcept + { + fence_before(order); + __asm__ __volatile__ ( + "negb %[val] ; \n" + "lock ; xaddb %[val], %[pDest]" + : [val] "+q" (val), [pDest] "+m" (*pDest) + ); + fence_after(order); + return val; + } + + //----------------------------------------------------------------------------- + // atomic flag primitives + //----------------------------------------------------------------------------- + + typedef bool atomic_flag_type; + static inline bool atomic_flag_tas( atomic_flag_type volatile * pFlag, memory_order order ) noexcept + { + return exchange8( pFlag, true, order ); + } + + static inline void atomic_flag_clear( atomic_flag_type volatile * pFlag, memory_order order ) noexcept + { + store8( pFlag, false, order ); + } + + //----------------------------------------------------------------------------- + // 16bit primitives + //----------------------------------------------------------------------------- + + template + static inline T exchange16( T volatile * pDest, T v, memory_order order ) noexcept + { + static_assert( sizeof(T) == 2, "Illegal size of operand" ); + assert( cds::details::is_aligned( pDest, 2 )); + + fence_before(order); + __asm__ __volatile__ ( + "xchgw %[v], %[pDest]" + : [v] "+q" (v), [pDest] "+m" (*pDest) + ); + fence_after(order); + return v; + } + + template + static inline void store16( T volatile * pDest, T src, memory_order order ) noexcept + { + static_assert( sizeof(T) == 2, "Illegal size of operand" ); + assert( order == memory_order_relaxed + || order == memory_order_release + || order == memory_order_seq_cst + ); + assert( pDest != NULL ); + assert( cds::details::is_aligned( pDest, 2 )); + + if ( order != memory_order_seq_cst ) { + fence_before( order ); + *pDest = src; + } + else { + exchange16( pDest, src, order ); + } + } + + template + static inline T load16( T volatile const * pSrc, memory_order order ) noexcept + { + static_assert( sizeof(T) == 2, "Illegal size of operand" ); + assert( order == memory_order_relaxed + || order == memory_order_consume + || order == memory_order_acquire + || order == memory_order_seq_cst + ); + assert( pSrc != NULL ); + assert( cds::details::is_aligned( pSrc, 2 )); + + T v = *pSrc; + fence_after_load( order ); + return v; + } + + template + static inline bool cas16_strong( T volatile * pDest, T& expected, T desired, memory_order mo_success, memory_order mo_fail ) noexcept + { + static_assert( sizeof(T) == 2, "Illegal size of operand" ); + assert( cds::details::is_aligned( pDest, 2 )); + + T prev = expected; + fence_before(mo_success); + __asm__ __volatile__ ( + "lock ; cmpxchgw %[desired], %[pDest]" + : [prev] "+a" (prev), [pDest] "+m" (*pDest) + : [desired] "q" (desired) + ); + bool success = prev == expected; + if (success) + fence_after(mo_success); + else { + fence_after(mo_fail); + expected = prev; + } + + return success; + } + + template + static inline bool cas16_weak( T volatile * pDest, T& expected, T desired, memory_order mo_success, memory_order mo_fail ) noexcept + { + return cas16_strong( pDest, expected, desired, mo_success, mo_fail ); + } + +# define CDS_ATOMIC_fetch16_add_defined + template + static inline T fetch16_add( T volatile * pDest, T val, memory_order order ) noexcept + { + static_assert( sizeof(T) == 2, "Illegal size of operand" ); + assert( cds::details::is_aligned( pDest, 2 )); + + fence_before(order); + __asm__ __volatile__ ( + "lock ; xaddw %[val], %[pDest]" + : [val] "+q" (val), [pDest] "+m" (*pDest) + ); + fence_after(order); + return val; + } + +# define CDS_ATOMIC_fetch16_sub_defined + template + static inline T fetch16_sub( T volatile * pDest, T val, memory_order order ) noexcept + { + static_assert( sizeof(T) == 2, "Illegal size of operand" ); + assert( cds::details::is_aligned( pDest, 2 )); + + fence_before(order); + __asm__ __volatile__ ( + "negw %[val] ; \n" + "lock ; xaddw %[val], %[pDest]" + : [val] "+q" (val), [pDest] "+m" (*pDest) + ); + fence_after(order); + return val; + } + + //----------------------------------------------------------------------------- + // 32bit primitives + //----------------------------------------------------------------------------- + + template + static inline T exchange32( T volatile * pDest, T v, memory_order order ) noexcept + { + static_assert( sizeof(T) == 4, "Illegal size of operand" ); + assert( cds::details::is_aligned( pDest, 4 )); + + fence_before(order); + __asm__ __volatile__ ( + "xchgl %[v], %[pDest]" + : [v] "+r" (v), [pDest] "+m" (*pDest) + ); + fence_after(order); + return v; + } + + template + static inline void store32( T volatile * pDest, T src, memory_order order ) noexcept + { + static_assert( sizeof(T) == 4, "Illegal size of operand" ); + assert( order == memory_order_relaxed + || order == memory_order_release + || order == memory_order_seq_cst + ); + assert( pDest != NULL ); + assert( cds::details::is_aligned( pDest, 4 )); + + if ( order != memory_order_seq_cst ) { + fence_before( order ); + *pDest = src; + } + else { + exchange32( pDest, src, order ); + } + } + + template + static inline T load32( T volatile const * pSrc, memory_order order ) noexcept + { + static_assert( sizeof(T) == 4, "Illegal size of operand" ); + assert( order == memory_order_relaxed + || order == memory_order_consume + || order == memory_order_acquire + || order == memory_order_seq_cst + ); + assert( pSrc != NULL ); + assert( cds::details::is_aligned( pSrc, 4 )); + + T v = *pSrc; + fence_after_load( order ); + return v; + } + + template + static inline bool cas32_strong( T volatile * pDest, T& expected, T desired, memory_order mo_success, memory_order mo_fail ) noexcept + { + static_assert( sizeof(T) == 4, "Illegal size of operand" ); + assert( cds::details::is_aligned( pDest, 4 )); + + T prev = expected; + fence_before(mo_success); + __asm__ __volatile__ ( + "lock ; cmpxchgl %[desired], %[pDest]" + : [prev] "+a" (prev), [pDest] "+m" (*pDest) + : [desired] "r" (desired) + ); + bool success = prev == expected; + if (success) + fence_after(mo_success); + else { + fence_after(mo_fail); + expected = prev; + } + return success; + } + + template + static inline bool cas32_weak( T volatile * pDest, T& expected, T desired, memory_order mo_success, memory_order mo_fail ) noexcept + { + return cas32_strong( pDest, expected, desired, mo_success, mo_fail ); + } + + // fetch_xxx may be emulated via cas32 + // If the platform has special fetch_xxx instruction + // then it should define CDS_ATOMIC_fetch32_xxx_defined macro + +# define CDS_ATOMIC_fetch32_add_defined + template + static inline T fetch32_add( T volatile * pDest, T v, memory_order order) noexcept + { + static_assert( sizeof(T) == 4, "Illegal size of operand" ); + assert( cds::details::is_aligned( pDest, 4 )); + + fence_before(order); + __asm__ __volatile__ ( + "lock ; xaddl %[v], %[pDest]" + : [v] "+r" (v), [pDest] "+m" (*pDest) + ); + fence_after(order); + return v; + } + +# define CDS_ATOMIC_fetch32_sub_defined + template + static inline T fetch32_sub( T volatile * pDest, T v, memory_order order) noexcept + { + static_assert( sizeof(T) == 4, "Illegal size of operand" ); + assert( cds::details::is_aligned( pDest, 4 )); + + fence_before(order); + __asm__ __volatile__ ( + "negl %[v] ; \n" + "lock ; xaddl %[v], %[pDest]" + : [v] "+r" (v), [pDest] "+m" (*pDest) + ); + fence_after(order); + return v; + } + + }}} // namespace platform::gcc::x86 +}} // namespace cds::cxx11_atomic +//@endcond + +#endif // #ifndef CDSLIB_COMPILER_GCC_X86_CXX11_ATOMIC32_H diff --git a/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/compiler/icl/compiler_barriers.h b/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/compiler/icl/compiler_barriers.h new file mode 100644 index 0000000..e8b35bb --- /dev/null +++ b/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/compiler/icl/compiler_barriers.h @@ -0,0 +1,55 @@ +/* + This file is a part of libcds - Concurrent Data Structures library + + (C) Copyright Maxim Khizhinsky (libcds.dev@gmail.com) 2006-2017 + + Source code repo: http://github.com/khizmax/libcds/ + Download: http://sourceforge.net/projects/libcds/files/ + + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. + + * Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ + +#ifndef CDSLIB_COMPILER_ICL_COMPILER_BARRIERS_H +#define CDSLIB_COMPILER_ICL_COMPILER_BARRIERS_H + +#if defined(_MSC_VER) && _MSC_VER < 1700 + // VC++ up to vc10 +# include + +# pragma intrinsic(_ReadWriteBarrier) +# pragma intrinsic(_ReadBarrier) +# pragma intrinsic(_WriteBarrier) + +# define CDS_COMPILER_RW_BARRIER _ReadWriteBarrier() +# define CDS_COMPILER_R_BARRIER _ReadBarrier() +# define CDS_COMPILER_W_BARRIER _WriteBarrier() + +#else + // MS VC11+, linux +# include + +# define CDS_COMPILER_RW_BARRIER std::atomic_thread_fence( std::memory_order_acq_rel ) +# define CDS_COMPILER_R_BARRIER CDS_COMPILER_RW_BARRIER +# define CDS_COMPILER_W_BARRIER CDS_COMPILER_RW_BARRIER +#endif + +#endif // #ifndef CDSLIB_COMPILER_ICL_COMPILER_BARRIERS_H diff --git a/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/compiler/icl/defs.h b/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/compiler/icl/defs.h new file mode 100644 index 0000000..a1b3fe7 --- /dev/null +++ b/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/compiler/icl/defs.h @@ -0,0 +1,158 @@ +/* + This file is a part of libcds - Concurrent Data Structures library + + (C) Copyright Maxim Khizhinsky (libcds.dev@gmail.com) 2006-2017 + + Source code repo: http://github.com/khizmax/libcds/ + Download: http://sourceforge.net/projects/libcds/files/ + + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. + + * Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ + +#ifndef CDSLIB_COMPILER_ICL_DEFS_H +#define CDSLIB_COMPILER_ICL_DEFS_H +//@cond + +// Compiler version +#ifdef __ICL +# define CDS_COMPILER_VERSION __ICL +#else +# define CDS_COMPILER_VERSION __INTEL_COMPILER +#endif + +// Compiler name +// Supported compilers: MS VC 2008, 2010, 2012 +// +# define CDS_COMPILER__NAME "Intel C++" +# define CDS_COMPILER__NICK "icl" + +// OS name +#if defined(_WIN64) +# define CDS_OS_INTERFACE CDS_OSI_WINDOWS +# define CDS_OS_TYPE CDS_OS_WIN64 +# define CDS_OS__NAME "Win64" +# define CDS_OS__NICK "Win64" +#elif defined(_WIN32) +# define CDS_OS_INTERFACE CDS_OSI_WINDOWS +# define CDS_OS_TYPE CDS_OS_WIN32 +# define CDS_OS__NAME "Win32" +# define CDS_OS__NICK "Win32" +#elif defined( __linux__ ) +# define CDS_OS_INTERFACE CDS_OSI_UNIX +# define CDS_OS_TYPE CDS_OS_LINUX +# define CDS_OS__NAME "linux" +# define CDS_OS__NICK "linux" +#endif + +// Processor architecture +#if defined(_M_X64) || defined(_M_AMD64) || defined(__amd64__) || defined(__amd64) +# define CDS_BUILD_BITS 64 +# define CDS_PROCESSOR_ARCH CDS_PROCESSOR_AMD64 +# define CDS_PROCESSOR__NAME "AMD64" +# define CDS_PROCESSOR__NICK "amd64" +#elif defined(_M_IX86) || defined(__i386__) || defined(__i386) +# define CDS_BUILD_BITS 32 +# define CDS_PROCESSOR_ARCH CDS_PROCESSOR_X86 +# define CDS_PROCESSOR__NAME "Intel x86" +# define CDS_PROCESSOR__NICK "x86" +#else +# define CDS_BUILD_BITS -1 +# define CDS_PROCESSOR_ARCH CDS_PROCESSOR_UNKNOWN +# define CDS_PROCESSOR__NAME "<>" +# error Intel C++ compiler is supported for x86 only +#endif + +#if CDS_OS_INTERFACE == CDS_OSI_WINDOWS +# define __attribute__( _x ) +#endif + +#if CDS_OS_INTERFACE == CDS_OSI_WINDOWS +# ifdef CDS_BUILD_LIB +# define CDS_EXPORT_API __declspec(dllexport) +# else +# define CDS_EXPORT_API __declspec(dllimport) +# endif +#endif + +#if CDS_OS_INTERFACE == CDS_OSI_WINDOWS +# define alignof __alignof +#else +# define alignof __alignof__ +#endif + +// ************************************************* +// Alignment macro + +#if CDS_OS_INTERFACE == CDS_OSI_WINDOWS +# define CDS_TYPE_ALIGNMENT(n) __declspec( align(n)) +# define CDS_DATA_ALIGNMENT(n) __declspec( align(n)) +# define CDS_CLASS_ALIGNMENT(n) __declspec( align(n)) +#else +# define CDS_TYPE_ALIGNMENT(n) __attribute__ ((aligned (n))) +# define CDS_CLASS_ALIGNMENT(n) __attribute__ ((aligned (n))) +# define CDS_DATA_ALIGNMENT(n) __attribute__ ((aligned (n))) +#endif + +// Attributes +#if CDS_OS_INTERFACE == CDS_OSI_WINDOWS +# define CDS_DEPRECATED( reason ) __declspec(deprecated( reason )) +# define CDS_NORETURN __declspec(noreturn) +#else +# define CDS_DEPRECATED( reason ) __attribute__((deprecated( reason ))) +# define CDS_NORETURN __attribute__((__noreturn__)) +#endif + +// Exceptions +#if CDS_OS_INTERFACE == CDS_OSI_WINDOWS +# if defined( _CPPUNWIND ) +# define CDS_EXCEPTION_ENABLED +# endif +#else +# if defined( __EXCEPTIONS ) && __EXCEPTIONS == 1 +# define CDS_EXCEPTION_ENABLED +# endif +#endif + +// Byte order +#if !defined(CDS_ARCH_LITTLE_ENDIAN) && !defined(CDS_ARCH_BIG_ENDIAN) +# if CDS_OS_INTERFACE == CDS_OSI_WINDOWS +# define CDS_ARCH_LITTLE_ENDIAN +# else +# ifdef __BYTE_ORDER__ +# if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__ +# define CDS_ARCH_LITTLE_ENDIAN +# elif __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__ +# define CDS_ARCH_BIG_ENDIAN +# endif +# else +# warning "Undefined byte order for current architecture (no __BYTE_ORDER__ preprocessor definition)" +# endif +# endif +#endif + +// Sanitizer attributes (not supported) +#define CDS_SUPPRESS_SANITIZE( ... ) + +#include + +//@endcond +#endif // #ifndef CDSLIB_COMPILER_VC_DEFS_H diff --git a/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/compiler/vc/amd64/backoff.h b/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/compiler/vc/amd64/backoff.h new file mode 100644 index 0000000..87eec6a --- /dev/null +++ b/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/compiler/vc/amd64/backoff.h @@ -0,0 +1,60 @@ +/* + This file is a part of libcds - Concurrent Data Structures library + + (C) Copyright Maxim Khizhinsky (libcds.dev@gmail.com) 2006-2017 + + Source code repo: http://github.com/khizmax/libcds/ + Download: http://sourceforge.net/projects/libcds/files/ + + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. + + * Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ + +#ifndef CDSLIB_COMPILER_VC_AMD64_BACKOFF_H +#define CDSLIB_COMPILER_VC_AMD64_BACKOFF_H + +//@cond none +#include + +namespace cds { namespace backoff { + namespace vc { namespace amd64 { + +# define CDS_backoff_hint_defined + static inline void backoff_hint() + { + _mm_pause(); + } + +# define CDS_backoff_nop_defined + static inline void backoff_nop() + { + __nop(); + } + + }} // namespace vc::amd64 + + namespace platform { + using namespace vc::amd64; + } +}} // namespace cds::backoff + +//@endcond +#endif // #ifndef CDSLIB_COMPILER_VC_AMD64_BACKOFF_H diff --git a/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/compiler/vc/amd64/bitop.h b/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/compiler/vc/amd64/bitop.h new file mode 100644 index 0000000..04e1eea --- /dev/null +++ b/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/compiler/vc/amd64/bitop.h @@ -0,0 +1,154 @@ +/* + This file is a part of libcds - Concurrent Data Structures library + + (C) Copyright Maxim Khizhinsky (libcds.dev@gmail.com) 2006-2017 + + Source code repo: http://github.com/khizmax/libcds/ + Download: http://sourceforge.net/projects/libcds/files/ + + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. + + * Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ + +#ifndef CDSLIB_COMPILER_VC_AMD64_BITOP_H +#define CDSLIB_COMPILER_VC_AMD64_BITOP_H + +#if _MSC_VER == 1500 + /* + VC 2008 bug: + math.h(136) : warning C4985: 'ceil': attributes not present on previous declaration. + intrin.h(142) : see declaration of 'ceil' + + See http://connect.microsoft.com/VisualStudio/feedback/details/381422/warning-of-attributes-not-present-on-previous-declaration-on-ceil-using-both-math-h-and-intrin-h + */ +# pragma warning(push) +# pragma warning(disable: 4985) +# include +# pragma warning(pop) +#else +# include +#endif + +#pragma intrinsic(_BitScanReverse) +#pragma intrinsic(_BitScanForward) +#pragma intrinsic(_BitScanReverse64) +#pragma intrinsic(_BitScanForward64) + +//@cond none +namespace cds { + namespace bitop { namespace platform { namespace vc { namespace amd64 { + + // MSB - return index (1..32) of most significant bit in nArg. If nArg == 0 return 0 +# define cds_bitop_msb32_DEFINED + static inline int msb32( uint32_t nArg ) + { + unsigned long nIndex; + if ( _BitScanReverse( &nIndex, nArg )) + return (int) nIndex + 1; + return 0; + } + +# define cds_bitop_msb32nz_DEFINED + static inline int msb32nz( uint32_t nArg ) + { + assert( nArg != 0 ); + unsigned long nIndex; + _BitScanReverse( &nIndex, nArg ); + return (int) nIndex; + } + + // LSB - return index (1..32) of least significant bit in nArg. If nArg == 0 return -1U +# define cds_bitop_lsb32_DEFINED + static inline int lsb32( uint32_t nArg ) + { + unsigned long nIndex; + if ( _BitScanForward( &nIndex, nArg )) + return (int) nIndex + 1; + return 0; + } + +# define cds_bitop_lsb32nz_DEFINED + static inline int lsb32nz( uint32_t nArg ) + { + assert( nArg != 0 ); + unsigned long nIndex; + _BitScanForward( &nIndex, nArg ); + return (int) nIndex; + } + + +# define cds_bitop_msb64_DEFINED + static inline int msb64( uint64_t nArg ) + { + unsigned long nIndex; + if ( _BitScanReverse64( &nIndex, nArg )) + return (int) nIndex + 1; + return 0; + } + +# define cds_bitop_msb64nz_DEFINED + static inline int msb64nz( uint64_t nArg ) + { + assert( nArg != 0 ); + unsigned long nIndex; + _BitScanReverse64( &nIndex, nArg ); + return (int) nIndex; + } + +# define cds_bitop_lsb64_DEFINED + static inline int lsb64( uint64_t nArg ) + { + unsigned long nIndex; + if ( _BitScanForward64( &nIndex, nArg )) + return (int) nIndex + 1; + return 0; + } + +# define cds_bitop_lsb64nz_DEFINED + static inline int lsb64nz( uint64_t nArg ) + { + assert( nArg != 0 ); + unsigned long nIndex; + _BitScanForward64( &nIndex, nArg ); + return (int) nIndex; + } + +# define cds_bitop_complement32_DEFINED + static inline bool complement32( uint32_t * pArg, unsigned int nBit ) + { + return _bittestandcomplement( reinterpret_cast( pArg ), nBit ) != 0; + } + +# define cds_bitop_complement64_DEFINED + static inline bool complement64( uint64_t * pArg, unsigned int nBit ) + { + return _bittestandcomplement64( reinterpret_cast<__int64 *>( pArg ), nBit ) != 0; + } + + + }} // namespace vc::amd64 + + using namespace vc::amd64; + +}}} // namespace cds::bitop::platform +//@endcond + +#endif // #ifndef CDSLIB_COMPILER_VC_AMD64_BITOP_H diff --git a/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/compiler/vc/amd64/cxx11_atomic.h b/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/compiler/vc/amd64/cxx11_atomic.h new file mode 100644 index 0000000..449ba9a --- /dev/null +++ b/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/compiler/vc/amd64/cxx11_atomic.h @@ -0,0 +1,609 @@ +/* + This file is a part of libcds - Concurrent Data Structures library + + (C) Copyright Maxim Khizhinsky (libcds.dev@gmail.com) 2006-2017 + + Source code repo: http://github.com/khizmax/libcds/ + Download: http://sourceforge.net/projects/libcds/files/ + + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. + + * Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ + +#ifndef CDSLIB_COMPILER_VC_AMD64_CXX11_ATOMIC_H +#define CDSLIB_COMPILER_VC_AMD64_CXX11_ATOMIC_H + +#include +#include // for 128bit atomic load/store +#include + +#pragma intrinsic( _InterlockedIncrement ) +#pragma intrinsic( _InterlockedDecrement ) +#pragma intrinsic( _InterlockedCompareExchange ) +#pragma intrinsic( _InterlockedCompareExchangePointer ) +#pragma intrinsic( _InterlockedCompareExchange16 ) +#pragma intrinsic( _InterlockedCompareExchange64 ) +#pragma intrinsic( _InterlockedExchange ) +#pragma intrinsic( _InterlockedExchange64 ) +#pragma intrinsic( _InterlockedExchangePointer ) +#pragma intrinsic( _InterlockedExchangeAdd ) +#pragma intrinsic( _InterlockedExchangeAdd64 ) +//#pragma intrinsic( _InterlockedAnd ) +//#pragma intrinsic( _InterlockedOr ) +//#pragma intrinsic( _InterlockedXor ) +//#pragma intrinsic( _InterlockedAnd64 ) +//#pragma intrinsic( _InterlockedOr64 ) +//#pragma intrinsic( _InterlockedXor64 ) +#pragma intrinsic( _interlockedbittestandset ) +#if _MSC_VER >= 1600 +# pragma intrinsic( _InterlockedCompareExchange8 ) +# pragma intrinsic( _InterlockedExchange8 ) +# pragma intrinsic( _InterlockedExchange16 ) +#endif + +//@cond +namespace cds { namespace cxx11_atomic { + namespace platform { inline namespace vc { inline namespace amd64 { + + static inline void fence_before( memory_order order ) noexcept + { + switch(order) { + case memory_order_relaxed: + case memory_order_acquire: + case memory_order_consume: + break; + case memory_order_release: + case memory_order_acq_rel: + CDS_COMPILER_RW_BARRIER; + break; + case memory_order_seq_cst: + CDS_COMPILER_RW_BARRIER; + break; + } + } + + static inline void fence_after( memory_order order ) noexcept + { + switch(order) { + case memory_order_acquire: + case memory_order_acq_rel: + CDS_COMPILER_RW_BARRIER; + break; + case memory_order_relaxed: + case memory_order_consume: + case memory_order_release: + break; + case memory_order_seq_cst: + CDS_COMPILER_RW_BARRIER; + break; + } + } + + static inline void full_fence() + { + // MS VC does not support inline assembler in C code. + // So, we use InterlockedExchange for full fence instead of mfence inst + long t; + _InterlockedExchange( &t, 0 ); + } + + static inline void fence_after_load(memory_order order) noexcept + { + switch(order) { + case memory_order_relaxed: + case memory_order_release: + break; + case memory_order_acquire: + case memory_order_acq_rel: + CDS_COMPILER_RW_BARRIER; + break; + case memory_order_consume: + break; + case memory_order_seq_cst: + full_fence(); + break; + default:; + } + } + + //----------------------------------------------------------------------------- + // fences + //----------------------------------------------------------------------------- + static inline void thread_fence(memory_order order) noexcept + { + switch(order) + { + case memory_order_relaxed: + case memory_order_consume: + break; + case memory_order_release: + case memory_order_acquire: + case memory_order_acq_rel: + CDS_COMPILER_RW_BARRIER; + break; + case memory_order_seq_cst: + full_fence(); + break; + default:; + } + } + + static inline void signal_fence(memory_order order) noexcept + { + // C++11: 29.8.8: only compiler optimization, no hardware instructions + switch(order) + { + case memory_order_relaxed: + break; + case memory_order_consume: + case memory_order_release: + case memory_order_acquire: + case memory_order_acq_rel: + case memory_order_seq_cst: + CDS_COMPILER_RW_BARRIER; + break; + default:; + } + } + + //----------------------------------------------------------------------------- + // atomic flag primitives + //----------------------------------------------------------------------------- + + typedef unsigned char atomic_flag_type; + static inline bool atomic_flag_tas( atomic_flag_type volatile * pFlag, memory_order /*order*/ ) noexcept + { + return _interlockedbittestandset( (long volatile *) pFlag, 0 ) != 0; + } + + static inline void atomic_flag_clear( atomic_flag_type volatile * pFlag, memory_order order ) noexcept + { + assert( order != memory_order_acquire + && order != memory_order_acq_rel + ); + + fence_before( order ); + *pFlag = 0; + fence_after( order ); + } + + //----------------------------------------------------------------------------- + // 8bit primitives + //----------------------------------------------------------------------------- + +#if _MSC_VER >= 1600 +# pragma warning(push) + // Disable warning C4800: 'char' : forcing value to bool 'true' or 'false' (performance warning) +# pragma warning( disable: 4800 ) +#endif + template + static inline bool cas8_strong( T volatile * pDest, T& expected, T desired, memory_order /*mo_success*/, memory_order /*mo_fail*/ ) noexcept + { + static_assert( sizeof(T) == 1, "Illegal size of operand" ); + +# if _MSC_VER >= 1600 + // VC 2010 + + T prev = expected; + expected = (T) _InterlockedCompareExchange8( (char volatile*) pDest, (char) desired, (char) expected ); + return expected == prev; +# else + // VC 2008 + unsigned int * pnDest = (unsigned int *)( ((unsigned __int64) pDest) & ~(unsigned __int64(3))); + unsigned int nOffset = ((unsigned __int64) pDest) & 3; + unsigned int nExpected; + unsigned int nDesired; + + for (;;) { + nExpected = + nDesired = *pnDest; + memcpy( reinterpret_cast(&nExpected) + nOffset, &expected, sizeof(T)); + memcpy( reinterpret_cast(&nDesired) + nOffset, &desired, sizeof(T)); + + unsigned int nPrev = (unsigned int) _InterlockedCompareExchange( (long *) pnDest, (long) nDesired, (long) nExpected ); + if ( nPrev == nExpected ) + return true; + T nByte; + memcpy( &nByte, reinterpret_cast(&nPrev) + nOffset, sizeof(T)); + if ( nByte != expected ) { + expected = nByte; + return false; + } + } +# endif + } +#if _MSC_VER >= 1600 +# pragma warning(pop) +#endif + + template + static inline bool cas8_weak( T volatile * pDest, T& expected, T desired, memory_order mo_success, memory_order mo_fail ) noexcept + { + return cas8_strong( pDest, expected, desired, mo_success, mo_fail ); + } + +#if _MSC_VER >= 1600 +# pragma warning(push) + // Disable warning C4800: 'char' : forcing value to bool 'true' or 'false' (performance warning) +# pragma warning( disable: 4800 ) +#endif + template + static inline T exchange8( T volatile * pDest, T v, memory_order order ) noexcept + { + static_assert( sizeof(T) == 1, "Illegal size of operand" ); + +# if _MSC_VER >= 1600 + CDS_UNUSED(order); + return (T) _InterlockedExchange8( (char volatile *) pDest, (char) v ); +# else + T expected = *pDest; + do {} while ( !cas8_strong( pDest, expected, v, order, memory_order_relaxed )); + return expected; +# endif + } +#if _MSC_VER >= 1600 +# pragma warning(pop) +#endif + + template + static inline void store8( T volatile * pDest, T src, memory_order order ) noexcept + { + static_assert( sizeof(T) == 1, "Illegal size of operand" ); + assert( order == memory_order_relaxed + || order == memory_order_release + || order == memory_order_seq_cst + ); + assert( pDest ); + + if ( order != memory_order_seq_cst ) { + fence_before( order ); + *pDest = src; + } + else { + exchange8( pDest, src, order ); + } + } + + template + static inline T load8( T volatile const * pSrc, memory_order order ) noexcept + { + static_assert( sizeof(T) == 1, "Illegal size of operand" ); + assert( order == memory_order_relaxed + || order == memory_order_consume + || order == memory_order_acquire + || order == memory_order_seq_cst + ); + assert( pSrc ); + + T v = *pSrc; + fence_after_load( order ); + return v; + } + + //----------------------------------------------------------------------------- + // 16bit primitives + //----------------------------------------------------------------------------- + + template + static inline bool cas16_strong( T volatile * pDest, T& expected, T desired, memory_order /*mo_success*/, memory_order /*mo_fail*/ ) noexcept + { + static_assert( sizeof(T) == 2, "Illegal size of operand" ); + assert( cds::details::is_aligned( pDest, 2 )); + + // _InterlockedCompareExchange behave as read-write memory barriers + T prev = expected; + expected = (T) _InterlockedCompareExchange16( (short *) pDest, (short) desired, (short) expected ); + return expected == prev; + } + + template + static inline bool cas16_weak( T volatile * pDest, T& expected, T desired, memory_order mo_success, memory_order mo_fail ) noexcept + { + return cas16_strong( pDest, expected, desired, mo_success, mo_fail ); + } + + template + static inline T exchange16( T volatile * pDest, T v, memory_order order ) noexcept + { + static_assert( sizeof(T) == 2, "Illegal size of operand" ); + assert( cds::details::is_aligned( pDest, 2 )); + +# if _MSC_VER >= 1600 + order; + return (T) _InterlockedExchange16( (short volatile *) pDest, (short) v ); +# else + T expected = *pDest; + do {} while ( !cas16_strong( pDest, expected, v, order, memory_order_relaxed )); + return expected; +# endif + } + + template + static inline void store16( T volatile * pDest, T src, memory_order order ) noexcept + { + static_assert( sizeof(T) == 2, "Illegal size of operand" ); + assert( order == memory_order_relaxed + || order == memory_order_release + || order == memory_order_seq_cst + ); + assert( pDest ); + assert( cds::details::is_aligned( pDest, 2 )); + + if ( order != memory_order_seq_cst ) { + fence_before( order ); + *pDest = src; + } + else { + exchange16( pDest, src, order ); + } + } + + template + static inline T load16( T volatile const * pSrc, memory_order order ) noexcept + { + static_assert( sizeof(T) == 2, "Illegal size of operand" ); + assert( order == memory_order_relaxed + || order == memory_order_consume + || order == memory_order_acquire + || order == memory_order_seq_cst + ); + assert( pSrc ); + assert( cds::details::is_aligned( pSrc, 2 )); + + T v = *pSrc; + fence_after_load( order ); + return v; + } + + //----------------------------------------------------------------------------- + // 32bit primitives + //----------------------------------------------------------------------------- + + template + static inline T exchange32( T volatile * pDest, T v, memory_order /*order*/ ) noexcept + { + static_assert( sizeof(T) == 4, "Illegal size of operand" ); + assert( cds::details::is_aligned( pDest, 4 )); + + return (T) _InterlockedExchange( (long *) pDest, (long) v ); + } + + template + static inline void store32( T volatile * pDest, T src, memory_order order ) noexcept + { + static_assert( sizeof(T) == 4, "Illegal size of operand" ); + assert( order == memory_order_relaxed + || order == memory_order_release + || order == memory_order_seq_cst + ); + assert( pDest ); + assert( cds::details::is_aligned( pDest, 4 )); + + if ( order != memory_order_seq_cst ) { + fence_before( order ); + *pDest = src; + } + else { + exchange32( pDest, src, order ); + } + } + + template + static inline T load32( T volatile const * pSrc, memory_order order ) noexcept + { + static_assert( sizeof(T) == 4, "Illegal size of operand" ); + assert( order == memory_order_relaxed + || order == memory_order_consume + || order == memory_order_acquire + || order == memory_order_seq_cst + ); + assert( pSrc ); + assert( cds::details::is_aligned( pSrc, 4 )); + + T v = *pSrc; + fence_after_load( order ); + return v; + } + + template + static inline bool cas32_strong( T volatile * pDest, T& expected, T desired, memory_order /*mo_success*/, memory_order /*mo_fail*/ ) noexcept + { + static_assert( sizeof(T) == 4, "Illegal size of operand" ); + assert( cds::details::is_aligned( pDest, 4 )); + + // _InterlockedCompareExchange behave as read-write memory barriers + T prev = expected; + expected = (T) _InterlockedCompareExchange( (long *) pDest, (long) desired, (long) expected ); + return expected == prev; + } + + template + static inline bool cas32_weak( T volatile * pDest, T& expected, T desired, memory_order mo_success, memory_order mo_fail ) noexcept + { + return cas32_strong( pDest, expected, desired, mo_success, mo_fail ); + } + + // fetch_xxx may be emulated via cas32 + // If the platform has special fetch_xxx instruction + // then it should define CDS_ATOMIC_fetch32_xxx_defined macro + +# define CDS_ATOMIC_fetch32_add_defined + template + static inline T fetch32_add( T volatile * pDest, T v, memory_order /*order*/) noexcept + { + static_assert( sizeof(T) == 4, "Illegal size of operand" ); + assert( cds::details::is_aligned( pDest, 4 )); + + // _InterlockedExchangeAdd behave as read-write memory barriers + return (T) _InterlockedExchangeAdd( (long *) pDest, (long) v ); + } + + //----------------------------------------------------------------------------- + // 64bit primitives + //----------------------------------------------------------------------------- + + template + static inline bool cas64_strong( T volatile * pDest, T& expected, T desired, memory_order /*mo_success*/, memory_order /*mo_fail*/ ) noexcept + { + static_assert( sizeof(T) == 8, "Illegal size of operand" ); + assert( cds::details::is_aligned( pDest, 8 )); + + // _InterlockedCompareExchange behave as read-write memory barriers + T prev = expected; + expected = (T) _InterlockedCompareExchange64( (__int64 *) pDest, (__int64) desired, (__int64) expected ); + return expected == prev; + } + + template + static inline bool cas64_weak( T volatile * pDest, T& expected, T desired, memory_order mo_success, memory_order mo_fail ) noexcept + { + return cas64_strong( pDest, expected, desired, mo_success, mo_fail ); + } + + template + static inline T load64( T volatile const * pSrc, memory_order order ) noexcept + { + static_assert( sizeof(T) == 8, "Illegal size of operand" ); + assert( order == memory_order_relaxed + || order == memory_order_consume + || order == memory_order_acquire + || order == memory_order_seq_cst + ); + assert( pSrc ); + assert( cds::details::is_aligned( pSrc, 8 )); + + T v = *pSrc; + fence_after_load( order ); + return v; + } + + + template + static inline T exchange64( T volatile * pDest, T v, memory_order order ) noexcept + { + static_assert( sizeof(T) == 8, "Illegal size of operand" ); + + T cur = load64( pDest, memory_order_relaxed ); + do { + } while (!cas64_weak( pDest, cur, v, order, memory_order_relaxed )); + return cur; + } + + template + static inline void store64( T volatile * pDest, T val, memory_order order ) noexcept + { + static_assert( sizeof(T) == 8, "Illegal size of operand" ); + assert( order == memory_order_relaxed + || order == memory_order_release + || order == memory_order_seq_cst + ); + assert( pDest ); + assert( cds::details::is_aligned( pDest, 8 )); + + if ( order != memory_order_seq_cst ) { + fence_before( order ); + *pDest = val; + } + else { + exchange64( pDest, val, order ); + } + } + +# define CDS_ATOMIC_fetch64_add_defined + template + static inline T fetch64_add( T volatile * pDest, T v, memory_order /*order*/) noexcept + { + static_assert( sizeof(T) == 8, "Illegal size of operand" ); + assert( cds::details::is_aligned( pDest, 8 )); + + // _InterlockedExchangeAdd64 behave as read-write memory barriers + return (T) _InterlockedExchangeAdd64( (__int64 *) pDest, (__int64) v ); + } + + //----------------------------------------------------------------------------- + // pointer primitives + //----------------------------------------------------------------------------- + + template + static inline T * exchange_ptr( T * volatile * pDest, T * v, memory_order /*order*/ ) noexcept + { + static_assert( sizeof(T *) == sizeof(void *), "Illegal size of operand" ); + return (T *) _InterlockedExchangePointer( (void * volatile *) pDest, reinterpret_cast(v)); + } + + template + static inline void store_ptr( T * volatile * pDest, T * src, memory_order order ) noexcept + { + static_assert( sizeof(T *) == sizeof(void *), "Illegal size of operand" ); + assert( order == memory_order_relaxed + || order == memory_order_release + || order == memory_order_seq_cst + ); + assert( pDest ); + + if ( order != memory_order_seq_cst ) { + fence_before( order ); + *pDest = src; + } + else { + exchange_ptr( pDest, src, order ); + } + } + + template + static inline T * load_ptr( T * volatile const * pSrc, memory_order order ) noexcept + { + static_assert( sizeof(T *) == sizeof(void *), "Illegal size of operand" ); + assert( order == memory_order_relaxed + || order == memory_order_consume + || order == memory_order_acquire + || order == memory_order_seq_cst + ); + assert( pSrc ); + + T * v = *pSrc; + fence_after_load( order ); + return v; + } + + template + static inline bool cas_ptr_strong( T * volatile * pDest, T *& expected, T * desired, memory_order /*mo_success*/, memory_order /*mo_fail*/ ) noexcept + { + static_assert( sizeof(T *) == sizeof(void *), "Illegal size of operand" ); + + // _InterlockedCompareExchangePointer behave as read-write memory barriers + T * prev = expected; + expected = (T *) _InterlockedCompareExchangePointer( (void * volatile *) pDest, (void *) desired, (void *) expected ); + return expected == prev; + } + + template + static inline bool cas_ptr_weak( T * volatile * pDest, T *& expected, T * desired, memory_order mo_success, memory_order mo_fail ) noexcept + { + return cas_ptr_strong( pDest, expected, desired, mo_success, mo_fail ); + } + + }} // namespace vc::amd64 + + } // namespace platform +}} // namespace cds::cxx11_atomic +//@endcond + +#endif // #ifndef CDSLIB_COMPILER_VC_AMD64_CXX11_ATOMIC_H diff --git a/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/compiler/vc/compiler_barriers.h b/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/compiler/vc/compiler_barriers.h new file mode 100644 index 0000000..23e273d --- /dev/null +++ b/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/compiler/vc/compiler_barriers.h @@ -0,0 +1,57 @@ +/* + This file is a part of libcds - Concurrent Data Structures library + + (C) Copyright Maxim Khizhinsky (libcds.dev@gmail.com) 2006-2017 + + Source code repo: http://github.com/khizmax/libcds/ + Download: http://sourceforge.net/projects/libcds/files/ + + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. + + * Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ + +#ifndef CDSLIB_COMPILER_VC_COMPILER_BARRIERS_H +#define CDSLIB_COMPILER_VC_COMPILER_BARRIERS_H + +#if CDS_COMPILER_VERSION < 1700 + // VC++ up to vc10 + +# include + +# pragma intrinsic(_ReadWriteBarrier) +# pragma intrinsic(_ReadBarrier) +# pragma intrinsic(_WriteBarrier) + +# define CDS_COMPILER_RW_BARRIER _ReadWriteBarrier() +# define CDS_COMPILER_R_BARRIER _ReadBarrier() +# define CDS_COMPILER_W_BARRIER _WriteBarrier() + +#else + // MS VC11+ +# include + +# define CDS_COMPILER_RW_BARRIER std::atomic_thread_fence( std::memory_order_acq_rel ) +# define CDS_COMPILER_R_BARRIER CDS_COMPILER_RW_BARRIER +# define CDS_COMPILER_W_BARRIER CDS_COMPILER_RW_BARRIER + +#endif + +#endif // #ifndef CDSLIB_COMPILER_VC_COMPILER_BARRIERS_H diff --git a/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/compiler/vc/defs.h b/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/compiler/vc/defs.h new file mode 100644 index 0000000..b915520 --- /dev/null +++ b/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/compiler/vc/defs.h @@ -0,0 +1,164 @@ +/* + This file is a part of libcds - Concurrent Data Structures library + + (C) Copyright Maxim Khizhinsky (libcds.dev@gmail.com) 2006-2017 + + Source code repo: http://github.com/khizmax/libcds/ + Download: http://sourceforge.net/projects/libcds/files/ + + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. + + * Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ + +#ifndef CDSLIB_COMPILER_VC_DEFS_H +#define CDSLIB_COMPILER_VC_DEFS_H +//@cond + +// Compiler version +#define CDS_COMPILER_VERSION _MSC_VER + +// Compiler name +// Supported compilers: MS VC 2015 + +// C++ compiler versions: +#define CDS_COMPILER_MSVC14 1900 // 2015 vc14 +#define CDS_COMPILER_MSVC14_1 1910 // 2017 vc14.1 +#define CDS_COMPILER_MSVC14_1_3 1911 // 2017 vc14.1 (VS 15.3) +#define CDS_COMPILER_MSVC14_1_5 1912 // 2017 vc14.1 (VS 15.5) +#define CDS_COMPILER_MSVC15 2000 // next Visual Studio + +#if CDS_COMPILER_VERSION < CDS_COMPILER_MSVC14 +# error "Only MS Visual C++ 14 (2015) and above is supported" +#endif + +#if _MSC_VER == 1900 +# define CDS_COMPILER__NAME "MS Visual C++ 2015" +# define CDS_COMPILER__NICK "vc14" +# define CDS_COMPILER_LIBCDS_SUFFIX "vcv140" +#elif _MSC_VER < 2000 +# define CDS_COMPILER__NAME "MS Visual C++ 2017" +# define CDS_COMPILER__NICK "vc141" +# define CDS_COMPILER_LIBCDS_SUFFIX "vcv141" +#else +# define CDS_COMPILER__NAME "MS Visual C++" +# define CDS_COMPILER__NICK "msvc" +# define CDS_COMPILER_LIBCDS_SUFFIX "vc" +#endif + +// OS interface +#define CDS_OS_INTERFACE CDS_OSI_WINDOWS + +// OS name +#if defined(_WIN64) +# define CDS_OS_TYPE CDS_OS_WIN64 +# define CDS_OS__NAME "Win64" +# define CDS_OS__NICK "Win64" +#elif defined(_WIN32) +# define CDS_OS_TYPE CDS_OS_WIN32 +# define CDS_OS__NAME "Win32" +# define CDS_OS__NICK "Win32" +#endif + +// Processor architecture +#ifdef _M_IX86 +# define CDS_BUILD_BITS 32 +# define CDS_PROCESSOR_ARCH CDS_PROCESSOR_X86 +# define CDS_PROCESSOR__NAME "Intel x86" +# define CDS_PROCESSOR__NICK "x86" +#elif _M_X64 +# define CDS_BUILD_BITS 64 +# define CDS_PROCESSOR_ARCH CDS_PROCESSOR_AMD64 +# define CDS_PROCESSOR__NAME "AMD64" +# define CDS_PROCESSOR__NICK "amd64" +#else +# define CDS_BUILD_BITS -1 +# define CDS_PROCESSOR_ARCH CDS_PROCESSOR_UNKNOWN +# define CDS_PROCESSOR__NAME "<>" +# error Microsoft Visual C++ compiler is supported for x86 only +#endif + +#define __attribute__( _x ) + +#ifdef CDS_BUILD_LIB +# define CDS_EXPORT_API __declspec(dllexport) +#else +# define CDS_EXPORT_API __declspec(dllimport) +#endif + +#define alignof __alignof + +// Memory leaks detection (debug build only) +#ifdef _DEBUG +# define _CRTDBG_MAP_ALLOC +# define _CRTDBG_MAPALLOC +# include +# include +# define CDS_MSVC_MEMORY_LEAKS_DETECTING_ENABLED +#endif + +// ************************************************* +// Alignment macro + +#define CDS_TYPE_ALIGNMENT(n) __declspec( align(n)) +#define CDS_DATA_ALIGNMENT(n) __declspec( align(n)) +#define CDS_CLASS_ALIGNMENT(n) __declspec( align(n)) + +// Attributes +#define CDS_DEPRECATED( reason ) [[deprecated( reason )]] + +#define CDS_NORETURN __declspec(noreturn) + +// Exceptions +#if defined( _CPPUNWIND ) +# define CDS_EXCEPTION_ENABLED +#endif + + +// double-width CAS support +//#define CDS_DCAS_SUPPORT + +// Byte order +// It seems, MSVC works only on little-endian architecture?.. +#if !defined(CDS_ARCH_LITTLE_ENDIAN) && !defined(CDS_ARCH_BIG_ENDIAN) +# define CDS_ARCH_LITTLE_ENDIAN +#endif + +//if constexpr support (C++17) +#ifndef constexpr_if + // Standard way to check if the compiler supports "if constexpr" + // Of course, MS VC doesn't support any standard way +# if defined __cpp_if_constexpr +# if __cpp_if_constexpr >= 201606 +# define constexpr_if if constexpr +# endif +# elif CDS_COMPILER_VERSION >= CDS_COMPILER_MSVC14_1_3 && _MSVC_LANG > CDS_CPLUSPLUS_14 + // MS-specific WTF. + // Don't work in /std:c++17 because /std:c++17 key defines _MSVC_LANG=201402 (c++14) in VC 15.3 +# define constexpr_if if constexpr +# endif +#endif + +// Sanitizer attributes (not supported) +#define CDS_SUPPRESS_SANITIZE( ... ) + +#include + +//@endcond +#endif // #ifndef CDSLIB_COMPILER_VC_DEFS_H diff --git a/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/compiler/vc/x86/backoff.h b/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/compiler/vc/x86/backoff.h new file mode 100644 index 0000000..e2113e9 --- /dev/null +++ b/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/compiler/vc/x86/backoff.h @@ -0,0 +1,60 @@ +/* + This file is a part of libcds - Concurrent Data Structures library + + (C) Copyright Maxim Khizhinsky (libcds.dev@gmail.com) 2006-2017 + + Source code repo: http://github.com/khizmax/libcds/ + Download: http://sourceforge.net/projects/libcds/files/ + + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. + + * Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ + +#ifndef CDSLIB_COMPILER_VC_X86_BACKOFF_H +#define CDSLIB_COMPILER_VC_X86_BACKOFF_H + +//@cond none +#include + +namespace cds { namespace backoff { + namespace vc { namespace x86 { + +# define CDS_backoff_hint_defined + static inline void backoff_hint() + { + _mm_pause(); + } + +# define CDS_backoff_nop_defined + static inline void backoff_nop() + { + __nop(); + } + + }} // namespace vc::x86 + + namespace platform { + using namespace vc::x86; + } +}} // namespace cds::backoff + +//@endcond +#endif // #ifndef CDSLIB_COMPILER_VC_X86_BACKOFF_H diff --git a/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/compiler/vc/x86/bitop.h b/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/compiler/vc/x86/bitop.h new file mode 100644 index 0000000..e6d714a --- /dev/null +++ b/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/compiler/vc/x86/bitop.h @@ -0,0 +1,111 @@ +/* + This file is a part of libcds - Concurrent Data Structures library + + (C) Copyright Maxim Khizhinsky (libcds.dev@gmail.com) 2006-2017 + + Source code repo: http://github.com/khizmax/libcds/ + Download: http://sourceforge.net/projects/libcds/files/ + + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. + + * Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ + +#ifndef CDSLIB_COMPILER_VC_X86_BITOP_H +#define CDSLIB_COMPILER_VC_X86_BITOP_H + +#include +#pragma intrinsic(_BitScanReverse) +#pragma intrinsic(_BitScanForward) + +//@cond none +namespace cds { + namespace bitop { namespace platform { namespace vc { namespace x86 { + // MSB - return index (1..32) of most significant bit in nArg. If nArg == 0 return 0 +# define cds_bitop_msb32_DEFINED + static inline int msb32( uint32_t nArg ) + { + unsigned long nIndex; + if ( _BitScanReverse( &nIndex, nArg )) + return (int) nIndex + 1; + return 0; + } + +# define cds_bitop_msb32nz_DEFINED + static inline int msb32nz( uint32_t nArg ) + { + assert( nArg != 0 ); + unsigned long nIndex; + _BitScanReverse( &nIndex, nArg ); + return (int) nIndex; + } + + // LSB - return index (1..32) of least significant bit in nArg. If nArg == 0 return -1U +# define cds_bitop_lsb32_DEFINED + static inline int lsb32( uint32_t nArg ) + { + unsigned long nIndex; + if ( _BitScanForward( &nIndex, nArg )) + return (int) nIndex + 1; + return 0; + } + +# define cds_bitop_lsb32nz_DEFINED + static inline int lsb32nz( uint32_t nArg ) + { + assert( nArg != 0 ); + unsigned long nIndex; + _BitScanForward( &nIndex, nArg ); + return (int) nIndex; + } + + // bswap - Reverses the byte order of a 32-bit word +# define cds_bitop_bswap32_DEFINED + static inline uint32_t bswap32( uint32_t nArg ) + { + __asm { + mov eax, nArg; + bswap eax; + } + } + +# define cds_bitop_complement32_DEFINED + static inline bool complement32( uint32_t * pArg, unsigned int nBit ) + { + return _bittestandcomplement( reinterpret_cast( pArg ), nBit ) != 0; + } + +# define cds_bitop_complement64_DEFINED + static inline bool complement64( uint64_t * pArg, unsigned int nBit ) + { + if ( nBit < 32 ) + return _bittestandcomplement( reinterpret_cast( pArg ), nBit ) != 0; + else + return _bittestandcomplement( reinterpret_cast( pArg ) + 1, nBit - 32 ) != 0; + } + + }} // namespace vc::x86 + + using namespace vc::x86; + +}}} // namespace cds::bitop::platform +//@endcond + +#endif // #ifndef CDSLIB_COMPILER_VC_X86_BITOP_H diff --git a/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/compiler/vc/x86/cxx11_atomic.h b/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/compiler/vc/x86/cxx11_atomic.h new file mode 100644 index 0000000..aa8fda5 --- /dev/null +++ b/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/compiler/vc/x86/cxx11_atomic.h @@ -0,0 +1,581 @@ +/* + This file is a part of libcds - Concurrent Data Structures library + + (C) Copyright Maxim Khizhinsky (libcds.dev@gmail.com) 2006-2017 + + Source code repo: http://github.com/khizmax/libcds/ + Download: http://sourceforge.net/projects/libcds/files/ + + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. + + * Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ + +#ifndef CDSLIB_COMPILER_VC_X86_CXX11_ATOMIC_H +#define CDSLIB_COMPILER_VC_X86_CXX11_ATOMIC_H + +#include +#include // for 64bit atomic load/store +#include + +#pragma intrinsic( _InterlockedIncrement ) +#pragma intrinsic( _InterlockedDecrement ) +#pragma intrinsic( _InterlockedCompareExchange ) +//#pragma intrinsic( _InterlockedCompareExchangePointer ) // On the x86 architecture, _InterlockedCompareExchangePointer is a macro that calls _InterlockedCompareExchange +#pragma intrinsic( _InterlockedCompareExchange16 ) +#pragma intrinsic( _InterlockedCompareExchange64 ) +#pragma intrinsic( _InterlockedExchange ) +//#pragma intrinsic( _InterlockedExchangePointer ) // On the x86 architecture, _InterlockedExchangePointer is a macro that calls _InterlockedExchange +#pragma intrinsic( _InterlockedExchangeAdd ) +#pragma intrinsic( _InterlockedXor ) +#pragma intrinsic( _InterlockedOr ) +#pragma intrinsic( _InterlockedAnd ) +#pragma intrinsic( _interlockedbittestandset ) +#if _MSC_VER >= 1600 +# pragma intrinsic( _InterlockedCompareExchange8 ) +# pragma intrinsic( _InterlockedExchange8 ) +# pragma intrinsic( _InterlockedExchange16 ) +#endif + +//@cond +namespace cds { namespace cxx11_atomic { + namespace platform { inline namespace vc { inline namespace x86 { + + static inline void fence_before( memory_order order ) noexcept + { + switch(order) { + case memory_order_relaxed: + case memory_order_acquire: + case memory_order_consume: + break; + case memory_order_release: + case memory_order_acq_rel: + CDS_COMPILER_RW_BARRIER; + break; + case memory_order_seq_cst: + CDS_COMPILER_RW_BARRIER; + break; + } + } + + static inline void fence_after( memory_order order ) noexcept + { + switch(order) { + case memory_order_acquire: + case memory_order_acq_rel: + CDS_COMPILER_RW_BARRIER; + break; + case memory_order_relaxed: + case memory_order_consume: + case memory_order_release: + break; + case memory_order_seq_cst: + CDS_COMPILER_RW_BARRIER; + break; + } + } + + + static inline void fence_after_load(memory_order order) noexcept + { + switch(order) { + case memory_order_relaxed: + case memory_order_release: + break; + case memory_order_acquire: + case memory_order_acq_rel: + CDS_COMPILER_RW_BARRIER; + break; + case memory_order_consume: + break; + case memory_order_seq_cst: + __asm { mfence }; + break; + default:; + } + } + + //----------------------------------------------------------------------------- + // fences + //----------------------------------------------------------------------------- + static inline void thread_fence(memory_order order) noexcept + { + switch(order) + { + case memory_order_relaxed: + case memory_order_consume: + break; + case memory_order_release: + case memory_order_acquire: + case memory_order_acq_rel: + CDS_COMPILER_RW_BARRIER; + break; + case memory_order_seq_cst: + __asm { mfence }; + break; + default:; + } + } + + static inline void signal_fence(memory_order order) noexcept + { + // C++11: 29.8.8: only compiler optimization, no hardware instructions + switch(order) + { + case memory_order_relaxed: + break; + case memory_order_consume: + case memory_order_release: + case memory_order_acquire: + case memory_order_acq_rel: + case memory_order_seq_cst: + CDS_COMPILER_RW_BARRIER; + break; + default:; + } + } + + //----------------------------------------------------------------------------- + // atomic flag primitives + //----------------------------------------------------------------------------- + + typedef unsigned char atomic_flag_type; + static inline bool atomic_flag_tas( atomic_flag_type volatile * pFlag, memory_order /*order*/ ) noexcept + { + return _interlockedbittestandset( (long volatile *) pFlag, 0 ) != 0; + } + + static inline void atomic_flag_clear( atomic_flag_type volatile * pFlag, memory_order order ) noexcept + { + assert( order != memory_order_acquire + && order != memory_order_acq_rel + ); + + fence_before( order ); + *pFlag = 0; + fence_after( order ); + } + + + //----------------------------------------------------------------------------- + // 8bit primitives + //----------------------------------------------------------------------------- + +#if _MSC_VER >= 1600 +# pragma warning(push) + // Disable warning C4800: 'char' : forcing value to bool 'true' or 'false' (performance warning) +# pragma warning( disable: 4800 ) +#endif + template + static inline bool cas8_strong( T volatile * pDest, T& expected, T desired, memory_order mo_success, memory_order mo_fail ) noexcept + { + static_assert( sizeof(T) == 1, "Illegal operand size" ); + +# if _MSC_VER >= 1600 + T prev = expected; + expected = (T) _InterlockedCompareExchange8( reinterpret_cast(pDest), (char) desired, (char) expected ); + return expected == prev; +# else + bool bRet = false; + __asm { + mov ecx, pDest; + mov edx, expected; + mov al, byte ptr [edx]; + mov ah, desired; + lock cmpxchg byte ptr [ecx], ah; + mov byte ptr [edx], al; + setz bRet; + } + return bRet; +# endif + } +#if _MSC_VER >= 1600 +# pragma warning(pop) +#endif + + template + static inline bool cas8_weak( T volatile * pDest, T& expected, T desired, memory_order mo_success, memory_order mo_fail ) noexcept + { + return cas8_strong( pDest, expected, desired, mo_success, mo_fail ); + } + +#if _MSC_VER >= 1600 +# pragma warning(push) + // Disable warning C4800: 'char' : forcing value to bool 'true' or 'false' (performance warning) +# pragma warning( disable: 4800 ) +#endif + template + static inline T exchange8( T volatile * pDest, T v, memory_order order ) noexcept + { + static_assert( sizeof(T) == 1, "Illegal operand size" ); + +# if _MSC_VER >= 1600 + return (T) _InterlockedExchange8( reinterpret_cast(pDest), (char) v ); +# else + __asm { + mov al, v; + mov ecx, pDest; + lock xchg byte ptr [ecx], al; + } +# endif + } +#if _MSC_VER >= 1600 +# pragma warning(pop) +#endif + + template + static inline void store8( T volatile * pDest, T src, memory_order order ) noexcept + { + static_assert( sizeof(T) == 1, "Illegal operand size" ); + assert( order == memory_order_relaxed + || order == memory_order_release + || order == memory_order_seq_cst + ); + assert( pDest ); + + if ( order != memory_order_seq_cst ) { + fence_before( order ); + *pDest = src; + } + else { + exchange8( pDest, src, order ); + } + } + + template + static inline T load8( T volatile const * pSrc, memory_order order ) noexcept + { + static_assert( sizeof(T) == 1, "Illegal operand size" ); + assert( order == memory_order_relaxed + || order == memory_order_consume + || order == memory_order_acquire + || order == memory_order_seq_cst + ); + assert( pSrc ); + + T v = *pSrc; + fence_after_load( order ); + return v; + } + + //----------------------------------------------------------------------------- + // 16bit primitives + //----------------------------------------------------------------------------- + + template + static inline T exchange16( T volatile * pDest, T v, memory_order /*order*/ ) noexcept + { + static_assert( sizeof(T) == 2, "Illegal operand size" ); + assert( cds::details::is_aligned( pDest, 2 )); + +# if _MSC_VER >= 1600 + return (T) _InterlockedExchange16( (short volatile *) pDest, (short) v ); +# else + __asm { + mov ax, v; + mov ecx, pDest; + lock xchg word ptr [ecx], ax; + } +# endif + } + + template + static inline void store16( T volatile * pDest, T src, memory_order order ) noexcept + { + static_assert( sizeof(T) == 2, "Illegal operand size" ); + assert( order == memory_order_relaxed + || order == memory_order_release + || order == memory_order_seq_cst + ); + assert( pDest ); + assert( cds::details::is_aligned( pDest, 2 )); + + if ( order != memory_order_seq_cst ) { + fence_before( order ); + *pDest = src; + } + else { + exchange16( pDest, src, order ); + } + } + + template + static inline T load16( T volatile const * pSrc, memory_order order ) noexcept + { + static_assert( sizeof(T) == 2, "Illegal operand size" ); + assert( order == memory_order_relaxed + || order == memory_order_consume + || order == memory_order_acquire + || order == memory_order_seq_cst + ); + assert( pSrc ); + assert( cds::details::is_aligned( pSrc, 2 )); + + T v = *pSrc; + fence_after_load( order ); + return v; + } + + template + static inline bool cas16_strong( T volatile * pDest, T& expected, T desired, memory_order /*mo_success*/, memory_order /*mo_fail*/ ) noexcept + { + static_assert( sizeof(T) == 2, "Illegal operand size" ); + assert( cds::details::is_aligned( pDest, 2 )); + + // _InterlockedCompareExchange behave as read-write memory barriers + T prev = expected; + expected = (T) _InterlockedCompareExchange16( (short *) pDest, (short) desired, (short) expected ); + return expected == prev; + } + + template + static inline bool cas16_weak( T volatile * pDest, T& expected, T desired, memory_order mo_success, memory_order mo_fail ) noexcept + { + return cas16_strong( pDest, expected, desired, mo_success, mo_fail ); + } + + //----------------------------------------------------------------------------- + // 32bit primitives + //----------------------------------------------------------------------------- + + template + static inline T exchange32( T volatile * pDest, T v, memory_order /*order*/ ) noexcept + { + static_assert( sizeof(T) == 4, "Illegal operand size" ); + assert( cds::details::is_aligned( pDest, 4 )); + + return (T) _InterlockedExchange( (long *) pDest, (long) v ); + } + + template + static inline void store32( T volatile * pDest, T src, memory_order order ) noexcept + { + static_assert( sizeof(T) == 4, "Illegal operand size" ); + assert( order == memory_order_relaxed + || order == memory_order_release + || order == memory_order_seq_cst + ); + assert( pDest ); + assert( cds::details::is_aligned( pDest, 4 )); + + if ( order != memory_order_seq_cst ) { + fence_before( order ); + *pDest = src; + } + else { + exchange32( pDest, src, order ); + } + } + + template + static inline T load32( T volatile const * pSrc, memory_order order ) noexcept + { + static_assert( sizeof(T) == 4, "Illegal operand size" ); + assert( order == memory_order_relaxed + || order == memory_order_consume + || order == memory_order_acquire + || order == memory_order_seq_cst + ); + assert( pSrc ); + assert( cds::details::is_aligned( pSrc, 4 )); + + T v( *pSrc ); + fence_after_load( order ); + return v; + } + + template + static inline bool cas32_strong( T volatile * pDest, T& expected, T desired, memory_order /*mo_success*/, memory_order /*mo_fail*/ ) noexcept + { + static_assert( sizeof(T) == 4, "Illegal operand size" ); + assert( cds::details::is_aligned( pDest, 4 )); + + // _InterlockedCompareExchange behave as read-write memory barriers + T prev = expected; + expected = (T) _InterlockedCompareExchange( (long *) pDest, (long) desired, (long) expected ); + return expected == prev; + } + + template + static inline bool cas32_weak( T volatile * pDest, T& expected, T desired, memory_order mo_success, memory_order mo_fail ) noexcept + { + return cas32_strong( pDest, expected, desired, mo_success, mo_fail ); + } + + // fetch_xxx may be emulated via cas32 + // If the platform has special fetch_xxx instruction + // then it should define CDS_ATOMIC_fetch32_xxx_defined macro + +# define CDS_ATOMIC_fetch32_add_defined + template + static inline T fetch32_add( T volatile * pDest, T v, memory_order /*order*/) noexcept + { + static_assert( sizeof(T) == 4, "Illegal operand size" ); + assert( cds::details::is_aligned( pDest, 4 )); + + // _InterlockedExchangeAdd behave as read-write memory barriers + return (T) _InterlockedExchangeAdd( (long *) pDest, (long) v ); + } + + //----------------------------------------------------------------------------- + // 64bit primitives + //----------------------------------------------------------------------------- + + template + static inline bool cas64_strong( T volatile * pDest, T& expected, T desired, memory_order mo_success, memory_order mo_fail ) noexcept + { + static_assert( sizeof(T) == 8, "Illegal operand size" ); + assert( cds::details::is_aligned( pDest, 8 )); + + // _InterlockedCompareExchange behave as read-write memory barriers + T prev = expected; + expected = (T) _InterlockedCompareExchange64( (__int64 *) pDest, (__int64) desired, (__int64) expected ); + return expected == prev; + } + + template + static inline bool cas64_weak( T volatile * pDest, T& expected, T desired, memory_order mo_success, memory_order mo_fail ) noexcept + { + return cas64_strong( pDest, expected, desired, mo_success, mo_fail ); + } + + template + static inline T load64( T volatile const * pSrc, memory_order order ) noexcept + { + static_assert( sizeof(T) == 8, "Illegal operand size" ); + assert( order == memory_order_relaxed + || order == memory_order_consume + || order == memory_order_acquire + || order == memory_order_seq_cst + ); + assert( pSrc ); + assert( cds::details::is_aligned( pSrc, 8 )); + + // Atomically loads 64bit value by SSE intrinsics + __m128i volatile v = _mm_loadl_epi64( (__m128i const *) pSrc ); + fence_after_load( order ); + return (T) v.m128i_i64[0]; + } + + + template + static inline T exchange64( T volatile * pDest, T v, memory_order order ) noexcept + { + static_assert( sizeof(T) == 8, "Illegal operand size" ); + + T cur = load64( pDest, memory_order_relaxed ); + do { + } while (!cas64_weak( pDest, cur, v, order, memory_order_relaxed )); + return cur; + } + + template + static inline void store64( T volatile * pDest, T val, memory_order order ) noexcept + { + static_assert( sizeof(T) == 8, "Illegal operand size" ); + assert( order == memory_order_relaxed + || order == memory_order_release + || order == memory_order_seq_cst + ); + assert( pDest ); + assert( cds::details::is_aligned( pDest, 8 )); + + if ( order != memory_order_seq_cst ) { + __m128i v; + v.m128i_i64[0] = val; + fence_before( order ); + _mm_storel_epi64( (__m128i *) pDest, v ); + } + else { + exchange64( pDest, val, order ); + } + } + + + //----------------------------------------------------------------------------- + // pointer primitives + //----------------------------------------------------------------------------- + + template + static inline T * exchange_ptr( T * volatile * pDest, T * v, memory_order order ) noexcept + { + static_assert( sizeof(T *) == sizeof(void *), "Illegal operand size" ); + return (T *) _InterlockedExchange( (long volatile *) pDest, (uintptr_t) v ); + //return (T *) _InterlockedExchangePointer( (void * volatile *) pDest, reinterpret_cast(v)); + } + + template + static inline void store_ptr( T * volatile * pDest, T * src, memory_order order ) noexcept + { + static_assert( sizeof(T *) == sizeof(void *), "Illegal operand size" ); + assert( order == memory_order_relaxed + || order == memory_order_release + || order == memory_order_seq_cst + ); + assert( pDest ); + + if ( order != memory_order_seq_cst ) { + fence_before( order ); + *pDest = src; + } + else { + exchange_ptr( pDest, src, order ); + } + } + + template + static inline T * load_ptr( T * volatile const * pSrc, memory_order order ) noexcept + { + static_assert( sizeof(T *) == sizeof(void *), "Illegal operand size" ); + assert( order == memory_order_relaxed + || order == memory_order_consume + || order == memory_order_acquire + || order == memory_order_seq_cst + ); + assert( pSrc ); + + T * v = *pSrc; + fence_after_load( order ); + return v; + } + + template + static inline bool cas_ptr_strong( T * volatile * pDest, T *& expected, T * desired, memory_order mo_success, memory_order mo_fail ) noexcept + { + static_assert( sizeof(T *) == sizeof(void *), "Illegal operand size" ); + + // _InterlockedCompareExchangePointer behave as read-write memory barriers + T * prev = expected; + expected = (T *) _InterlockedCompareExchange( (long volatile *) pDest, (uintptr_t) desired, (uintptr_t) prev ); + return expected == prev; + } + + template + static inline bool cas_ptr_weak( T * volatile * pDest, T *& expected, T * desired, memory_order mo_success, memory_order mo_fail ) noexcept + { + return cas_ptr_strong( pDest, expected, desired, mo_success, mo_fail ); + } + }} // namespace vc::x86 + + } // namespace platform +}} // namespace cds::cxx11_atomic +//@endcond + +#endif // #ifndef CDSLIB_COMPILER_VC_X86_CXX11_ATOMIC_H diff --git a/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/container/basket_queue.h b/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/container/basket_queue.h new file mode 100644 index 0000000..7de2a13 --- /dev/null +++ b/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/container/basket_queue.h @@ -0,0 +1,481 @@ +/* + This file is a part of libcds - Concurrent Data Structures library + + (C) Copyright Maxim Khizhinsky (libcds.dev@gmail.com) 2006-2017 + + Source code repo: http://github.com/khizmax/libcds/ + Download: http://sourceforge.net/projects/libcds/files/ + + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. + + * Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ + +#ifndef CDSLIB_CONTAINER_BASKET_QUEUE_H +#define CDSLIB_CONTAINER_BASKET_QUEUE_H + +#include +#include +#include +//#include + +namespace cds { namespace container { + + /// BasketQueue related definitions + /** @ingroup cds_nonintrusive_helper + */ + namespace basket_queue { + + /// Internal statistics + template ::counter_type > + using stat = cds::intrusive::basket_queue::stat< Counter >; + + /// Dummy internal statistics + typedef cds::intrusive::basket_queue::empty_stat empty_stat; + + /// BasketQueue default type traits + struct traits + { + /// Node allocator + typedef CDS_DEFAULT_ALLOCATOR allocator; + + /// Back-off strategy + typedef cds::backoff::empty back_off; + + /// Item counting feature; by default, disabled. Use \p cds::atomicity::item_counter to enable item counting + typedef atomicity::empty_item_counter item_counter; + + /// Internal statistics (by default, disabled) + /** + Possible option value are: \p basket_queue::stat, \p basket_queue::empty_stat (the default), + user-provided class that supports \p %basket_queue::stat interface. + */ + typedef basket_queue::empty_stat stat; + + /// C++ memory ordering model + /** + Can be \p opt::v::relaxed_ordering (relaxed memory model, the default) + or \p opt::v::sequential_consistent (sequentially consisnent memory model). + */ + typedef opt::v::relaxed_ordering memory_model; + + /// Padding for internal critical atomic data. Default is \p opt::cache_line_padding + enum { padding = opt::cache_line_padding }; + }; + + /// Metafunction converting option list to \p basket_queue::traits + /** + Supported \p Options are: + - \p opt::allocator - allocator (like \p std::allocator) used for allocating queue nodes. Default is \ref CDS_DEFAULT_ALLOCATOR + - \p opt::back_off - back-off strategy used, default is \p cds::backoff::empty. + - \p opt::item_counter - the type of item counting feature. Default is \p cds::atomicity::empty_item_counter (item counting disabled) + To enable item counting use \p cds::atomicity::item_counter + - \ opt::stat - the type to gather internal statistics. + Possible statistics types are: \p basket_queue::stat, \p basket_queue::empty_stat, user-provided class that supports \p %basket_queue::stat interface. + Default is \p %basket_queue::empty_stat. + - \p opt::padding - padding for internal critical atomic data. Default is \p opt::cache_line_padding + - \p opt::memory_model - C++ memory ordering model. Can be \p opt::v::relaxed_ordering (relaxed memory model, the default) + or \p opt::v::sequential_consistent (sequentially consisnent memory model). + + Example: declare \p %BasketQueue with item counting and internal statistics + \code + typedef cds::container::BasketQueue< cds::gc::HP, Foo, + typename cds::container::basket_queue::make_traits< + cds::opt::item_counte< cds::atomicity::item_counter >, + cds::opt::stat< cds::intrusive::basket_queue::stat<> > + >::type + > myQueue; + \endcode + */ + template + struct make_traits { +# ifdef CDS_DOXYGEN_INVOKED + typedef implementation_defined type; ///< Metafunction result +# else + typedef typename cds::opt::make_options< + typename cds::opt::find_type_traits< traits, Options... >::type + , Options... + >::type type; +# endif + }; + } // namespace basket_queue + + //@cond + namespace details { + template + struct make_basket_queue + { + typedef GC gc; + typedef T value_type; + typedef Traits traits; + + struct node_type: public intrusive::basket_queue::node< gc > + { + value_type m_value; + + node_type( const value_type& val ) + : m_value( val ) + {} + template + node_type( Args&&... args ) + : m_value( std::forward(args)...) + {} + }; + + typedef typename traits::allocator::template rebind::other allocator_type; + typedef cds::details::Allocator< node_type, allocator_type > cxx_allocator; + + struct node_deallocator + { + void operator ()( node_type * pNode ) + { + cxx_allocator().Delete( pNode ); + } + }; + + struct intrusive_traits : public traits + { + typedef cds::intrusive::basket_queue::base_hook< opt::gc > hook; + typedef node_deallocator disposer; + static constexpr const cds::intrusive::opt::link_check_type link_checker = cds::intrusive::basket_queue::traits::link_checker; + }; + + typedef cds::intrusive::BasketQueue< gc, node_type, intrusive_traits > type; + }; + } + //@endcond + + /// Basket lock-free queue (non-intrusive variant) + /** @ingroup cds_nonintrusive_queue + It is non-intrusive version of basket queue algorithm based on intrusive::BasketQueue counterpart. + + \par Source: + [2007] Moshe Hoffman, Ori Shalev, Nir Shavit "The Baskets Queue" + + Key idea + + In the 'basket' approach, instead of + the traditional ordered list of nodes, the queue consists of an ordered list of groups + of nodes (logical baskets). The order of nodes in each basket need not be specified, and in + fact, it is easiest to maintain them in LIFO order. The baskets fulfill the following basic + rules: + - Each basket has a time interval in which all its nodes' enqueue operations overlap. + - The baskets are ordered by the order of their respective time intervals. + - For each basket, its nodes' dequeue operations occur after its time interval. + - The dequeue operations are performed according to the order of baskets. + + Two properties define the FIFO order of nodes: + - The order of nodes in a basket is not specified. + - The order of nodes in different baskets is the FIFO-order of their respective baskets. + + In algorithms such as the MS-queue or optimistic + queue, threads enqueue items by applying a Compare-and-swap (CAS) operation to the + queue's tail pointer, and all the threads that fail on a particular CAS operation (and also + the winner of that CAS) overlap in time. In particular, they share the time interval of + the CAS operation itself. Hence, all the threads that fail to CAS on the tail-node of + the queue may be inserted into the same basket. By integrating the basket-mechanism + as the back-off mechanism, the time usually spent on backing-off before trying to link + onto the new tail, can now be utilized to insert the failed operations into the basket, + allowing enqueues to complete sooner. In the meantime, the next successful CAS operations + by enqueues allow new baskets to be formed down the list, and these can be + filled concurrently. Moreover, the failed operations don't retry their link attempt on the + new tail, lowering the overall contention on it. This leads to a queue + algorithm that unlike all former concurrent queue algorithms requires virtually no tuning + of the backoff mechanisms to reduce contention, making the algorithm an attractive + out-of-the-box queue. + + In order to enqueue, just as in MSQueue, a thread first tries to link the new node to + the last node. If it failed to do so, then another thread has already succeeded. Thus it + tries to insert the new node into the new basket that was created by the winner thread. + To dequeue a node, a thread first reads the head of the queue to obtain the + oldest basket. It may then dequeue any node in the oldest basket. + + + Template arguments: + - \p GC - garbage collector type: \p gc::HP, \p gc::DHP + - \p T - type of value to be stored in the queue + - \p Traits - queue traits, default is \p basket_queue::traits. You can use \p basket_queue::make_traits + metafunction to make your traits or just derive your traits from \p %basket_queue::traits: + \code + struct myTraits: public cds::container::basket_queue::traits { + typedef cds::intrusive::basket_queue::stat<> stat; + typedef cds::atomicity::item_counter item_counter; + }; + typedef cds::container::BasketQueue< cds::gc::HP, Foo, myTraits > myQueue; + + // Equivalent make_traits example: + typedef cds::container::BasketQueue< cds::gc::HP, Foo, + typename cds::container::basket_queue::make_traits< + cds::opt::stat< cds::container::basket_queue::stat<> >, + cds::opt::item_counter< cds::atomicity::item_counter > + >::type + > myQueue; + \endcode + */ + template + class BasketQueue: +#ifdef CDS_DOXYGEN_INVOKED + private intrusive::BasketQueue< GC, intrusive::basket_queue::node< T >, Traits > +#else + protected details::make_basket_queue< GC, T, Traits >::type +#endif + { + //@cond + typedef details::make_basket_queue< GC, T, Traits > maker; + typedef typename maker::type base_class; + //@endcond + + public: + /// Rebind template arguments + template + struct rebind { + typedef BasketQueue< GC2, T2, Traits2> other ; ///< Rebinding result + }; + + public: + typedef GC gc; ///< Garbage collector + typedef T value_type; ///< Type of value to be stored in the queue + typedef Traits traits; ///< Queue's traits + + typedef typename base_class::back_off back_off; ///< Back-off strategy used + typedef typename maker::allocator_type allocator_type; ///< Allocator type used for allocate/deallocate the nodes + typedef typename base_class::item_counter item_counter; ///< Item counting policy used + typedef typename base_class::stat stat; ///< Internal statistics policy used + typedef typename base_class::memory_model memory_model; ///< Memory ordering. See cds::opt::memory_model option + + static constexpr const size_t c_nHazardPtrCount = base_class::c_nHazardPtrCount; ///< Count of hazard pointer required for the algorithm + + protected: + typedef typename maker::node_type node_type; ///< queue node type (derived from intrusive::basket_queue::node) + + //@cond + typedef typename maker::cxx_allocator cxx_allocator; + typedef typename maker::node_deallocator node_deallocator; // deallocate node + typedef typename base_class::node_traits node_traits; + //@endcond + + protected: + ///@cond + static node_type * alloc_node() + { + return cxx_allocator().New(); + } + static node_type * alloc_node( const value_type& val ) + { + return cxx_allocator().New( val ); + } + template + static node_type * alloc_node_move( Args&&... args ) + { + return cxx_allocator().MoveNew( std::forward( args )... ); + } + static void free_node( node_type * p ) + { + node_deallocator()( p ); + } + + struct node_disposer { + void operator()( node_type * pNode ) + { + free_node( pNode ); + } + }; + typedef std::unique_ptr< node_type, node_disposer > scoped_node_ptr; + //@endcond + + public: + /// Initializes empty queue + BasketQueue() + {} + + /// Destructor clears the queue + ~BasketQueue() + {} + + /// Enqueues \p val value into the queue. + /** + The function makes queue node in dynamic memory calling copy constructor for \p val + and then it calls \p intrusive::BasketQueue::enqueue(). + Returns \p true if success, \p false otherwise. + */ + bool enqueue( value_type const& val ) + { + scoped_node_ptr p( alloc_node(val)); + if ( base_class::enqueue( *p )) { + p.release(); + return true; + } + return false; + } + + /// Enqueues \p val value into the queue, move semantics + bool enqueue( value_type&& val ) + { + scoped_node_ptr p( alloc_node_move( std::move( val ))); + if ( base_class::enqueue( *p )) { + p.release(); + return true; + } + return false; + } + + /// Enqueues \p data to queue using a functor + /** + \p Func is a functor called to create node. + The functor \p f takes one argument - a reference to a new node of type \ref value_type : + \code + cds::container::BasketQueue< cds::gc::HP, Foo > myQueue; + Bar bar; + myQueue.enqueue_with( [&bar]( Foo& dest ) { dest = bar; } ); + \endcode + */ + template + bool enqueue_with( Func f ) + { + scoped_node_ptr p( alloc_node()); + f( p->m_value ); + if ( base_class::enqueue( *p )) { + p.release(); + return true; + } + return false; + } + + /// Synonym for \p enqueue() function + bool push( value_type const& val ) + { + return enqueue( val ); + } + + /// Synonym for \p enqueue() function, move semantics + bool push( value_type&& val ) + { + return enqueue( std::move( val )); + } + + /// Synonym for \p enqueue_with() function + template + bool push_with( Func f ) + { + return enqueue_with( f ); + } + + /// Enqueues data of type \ref value_type constructed with std::forward(args)... + template + bool emplace( Args&&... args ) + { + scoped_node_ptr p( alloc_node_move( std::forward(args)...)); + if ( base_class::enqueue( *p )) { + p.release(); + return true; + } + return false; + } + + /// Dequeues a value from the queue + /** + If queue is not empty, the function returns \p true, \p dest contains copy of + dequeued value. The assignment operator for \p value_type is invoked. + If queue is empty, the function returns \p false, \p dest is unchanged. + */ + bool dequeue( value_type& dest ) + { + return dequeue_with( [&dest]( value_type& src ) { + // TSan finds a race between this read of \p src and node_type constructor + // I think, it is wrong + CDS_TSAN_ANNOTATE_IGNORE_READS_BEGIN; + dest = std::move( src ); + CDS_TSAN_ANNOTATE_IGNORE_READS_END; + }); + } + + /// Dequeues a value using a functor + /** + \p Func is a functor called to copy dequeued value. + The functor takes one argument - a reference to removed node: + \code + cds:container::BasketQueue< cds::gc::HP, Foo > myQueue; + Bar bar; + myQueue.dequeue_with( [&bar]( Foo& src ) { bar = std::move( src );}); + \endcode + The functor is called only if the queue is not empty. + */ + template + bool dequeue_with( Func f ) + { + typename base_class::dequeue_result res; + if ( base_class::do_dequeue( res, true )) { + f( node_traits::to_value_ptr( *res.pNext )->m_value ); + return true; + } + return false; + } + + /// Synonym for \p dequeue() function + bool pop( value_type& dest ) + { + return dequeue( dest ); + } + + /// Synonym for \p dequeue_with() function + template + bool pop_with( Func f ) + { + return dequeue_with( f ); + } + + /// Checks if the queue is empty + /** + Note that this function is not \p const. + The function is based on \p dequeue() algorithm. + */ + bool empty() + { + return base_class::empty(); + } + + /// Clear the queue + /** + The function repeatedly calls \ref dequeue until it returns \p nullptr. + */ + void clear() + { + base_class::clear(); + } + + /// Returns queue's item count + /** \copydetails cds::intrusive::BasketQueue::size() + */ + size_t size() const + { + return base_class::size(); + } + + /// Returns reference to internal statistics + const stat& statistics() const + { + return base_class::statistics(); + } + + }; + +}} // namespace cds::container + +#endif // #ifndef CDSLIB_CONTAINER_BASKET_QUEUE_H diff --git a/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/container/bronson_avltree_map_rcu.h b/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/container/bronson_avltree_map_rcu.h new file mode 100644 index 0000000..0de4b98 --- /dev/null +++ b/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/container/bronson_avltree_map_rcu.h @@ -0,0 +1,712 @@ +/* + This file is a part of libcds - Concurrent Data Structures library + + (C) Copyright Maxim Khizhinsky (libcds.dev@gmail.com) 2006-2017 + + Source code repo: http://github.com/khizmax/libcds/ + Download: http://sourceforge.net/projects/libcds/files/ + + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. + + * Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ + +#ifndef CDSLIB_CONTAINER_BRONSON_AVLTREE_MAP_RCU_H +#define CDSLIB_CONTAINER_BRONSON_AVLTREE_MAP_RCU_H + +#include +#include + +namespace cds { namespace container { + + namespace bronson_avltree { + //@cond + namespace details { + template < class RCU, typename Key, typename T, typename Traits> + struct make_map + { + typedef Key key_type; + typedef T mapped_type; + typedef Traits original_traits; + + typedef cds::details::Allocator< mapped_type, typename original_traits::allocator > cxx_allocator; + + struct traits : public original_traits + { + struct disposer { + void operator()( mapped_type * p ) const + { + cxx_allocator().Delete( p ); + } + }; + }; + + // Metafunction result + typedef BronsonAVLTreeMap< RCU, Key, mapped_type *, traits > type; + }; + } // namespace details + //@endcond + } // namespace bronson_avltree + + /// Bronson et al AVL-tree (RCU specialization) + /** @ingroup cds_nonintrusive_map + @ingroup cds_nonintrusive_tree + @anchor cds_container_BronsonAVLTreeMap_rcu + + Source: + - [2010] N.Bronson, J.Casper, H.Chafi, K.Olukotun "A Practical Concurrent Binary Search Tree" + - Java implementation + + This is a concurrent AVL tree algorithm that uses hand-over-hand optimistic validation, + a concurrency control mechanism for searching and navigating a binary search tree. + This mechanism minimizes spurious retries when concurrent structural changes cannot + affect the correctness of the search or navigation result. + The algorithm is based on partially external trees, a simple scheme that simplifies deletions + by leaving a routing node in the tree when deleting a node that has two children, + then opportunistically unlinking routing nodes during rebalancing. As in external trees, + which store values only in leaf nodes, deletions can be performed locally while holding + a fixed number of locks. Partially external trees, however, require far fewer routing nodes + than an external tree for most sequences of insertions and deletions. + The algorithm uses optimistic concurrency control, but carefully manage the + tree in such a way that all atomic regions have fixed read and write sets + that are known ahead of time. This allows to reduce practical overheads by embedding + the concurrency control directly. To perform tree operations using only fixed sized + atomic regions the algo uses the following mechanisms: search operations overlap atomic blocks as + in the hand-over-hand locking technique; mutations perform rebalancing separately; + and deletions occasionally leave a routing node in the tree. + + Template arguments: + - \p RCU - one of \ref cds_urcu_gc "RCU type" + - \p Key - key type + - \p T - value type to be stored in tree's nodes. + - \p Traits - tree traits, default is \p bronson_avltree::traits + It is possible to declare option-based tree with \p bronson_avltree::make_traits metafunction + instead of \p Traits template argument. + + There is \ref cds_container_BronsonAVLTreeMap_rcu_ptr "a specialization" for "key -> value pointer" map. + + @note Before including you should include appropriate RCU header file, + see \ref cds_urcu_gc "RCU type" for list of existing RCU class and corresponding header files. + */ + template < + typename RCU, + typename Key, + typename T, +# ifdef CDS_DOXYGEN_INVOKED + typename Traits = bronson_avltree::traits +#else + typename Traits +#endif + > + class BronsonAVLTreeMap< cds::urcu::gc, Key, T, Traits > +#ifdef CDS_DOXYGEN_INVOKED + : private BronsonAVLTreeMap< cds::urcu::gc, Key, T*, Traits > +#else + : private bronson_avltree::details::make_map< cds::urcu::gc, Key, T, Traits >::type +#endif + { + //@cond + typedef bronson_avltree::details::make_map< cds::urcu::gc, Key, T, Traits > maker; + typedef typename maker::type base_class; + //@endcond + + public: + typedef cds::urcu::gc gc; ///< RCU Garbage collector + typedef Key key_type; ///< type of a key stored in the map + typedef T mapped_type; ///< type of value stored in the map + typedef Traits traits; ///< Traits template parameter + + typedef typename base_class::key_comparator key_comparator; ///< key compare functor based on \p Traits::compare and \p Traits::less + typedef typename traits::item_counter item_counter; ///< Item counting policy + typedef typename traits::memory_model memory_model; ///< Memory ordering, see \p cds::opt::memory_model option + typedef typename traits::allocator allocator_type; ///< allocator for value + typedef typename traits::node_allocator node_allocator_type;///< allocator for maintaining internal nodes + typedef typename traits::stat stat; ///< internal statistics + typedef typename traits::rcu_check_deadlock rcu_check_deadlock; ///< Deadlock checking policy + typedef typename traits::back_off back_off; ///< Back-off strategy + typedef typename traits::sync_monitor sync_monitor; ///< @ref cds_sync_monitor "Synchronization monitor" type for node-level locking + + /// Enabled or disabled @ref bronson_avltree::relaxed_insert "relaxed insertion" + static bool const c_bRelaxedInsert = traits::relaxed_insert; + + /// Group of \p extract_xxx functions does not require external locking + static constexpr const bool c_bExtractLockExternal = base_class::c_bExtractLockExternal; + + typedef typename base_class::rcu_lock rcu_lock; ///< RCU scoped lock + + /// Returned pointer to \p mapped_type of extracted node + typedef typename base_class::exempt_ptr exempt_ptr; + + protected: + //@cond + typedef typename base_class::node_type node_type; + typedef typename base_class::node_scoped_lock node_scoped_lock; + typedef typename maker::cxx_allocator cxx_allocator; + + typedef typename base_class::update_flags update_flags; + //@endcond + + public: + /// Creates empty map + BronsonAVLTreeMap() + {} + + /// Destroys the map + ~BronsonAVLTreeMap() + {} + + /// Inserts new node with \p key and default value + /** + The function creates a node with \p key and default value, and then inserts the node created into the map. + + Preconditions: + - The \p key_type should be constructible from a value of type \p K. + - The \p mapped_type should be default-constructible. + + RCU \p synchronize() can be called. RCU should not be locked. + + Returns \p true if inserting successful, \p false otherwise. + */ + template + bool insert( K const& key ) + { + return base_class::do_update(key, key_comparator(), + []( node_type * pNode ) -> mapped_type* + { + assert( pNode->m_pValue.load( memory_model::memory_order_relaxed ) == nullptr ); + CDS_UNUSED( pNode ); + return cxx_allocator().New(); + }, + update_flags::allow_insert + ) == update_flags::result_inserted; + } + + /// Inserts new node + /** + The function creates a node with copy of \p val value + and then inserts the node created into the map. + + Preconditions: + - The \p key_type should be constructible from \p key of type \p K. + - The \p mapped_type should be constructible from \p val of type \p V. + + RCU \p synchronize() method can be called. RCU should not be locked. + + Returns \p true if \p val is inserted into the map, \p false otherwise. + */ + template + bool insert( K const& key, V const& val ) + { + return base_class::do_update( key, key_comparator(), + [&val]( node_type * pNode ) -> mapped_type* + { + assert( pNode->m_pValue.load( memory_model::memory_order_relaxed ) == nullptr ); + CDS_UNUSED( pNode ); + return cxx_allocator().New( val ); + }, + update_flags::allow_insert + ) == update_flags::result_inserted; + } + + /// Inserts new node and initialize it by a functor + /** + This function inserts new node with key \p key and if inserting is successful then it calls + \p func functor with signature + \code + struct functor { + void operator()( key_type const& key, mapped_type& item ); + }; + \endcode + + The key_type should be constructible from value of type \p K. + + The function allows to split creating of new item into two part: + - create item from \p key; + - insert new item into the map; + - if inserting is successful, initialize the value of item by calling \p func functor + + This can be useful if complete initialization of object of \p value_type is heavyweight and + it is preferable that the initialization should be completed only if inserting is successful. + The functor is called under the node lock. + + RCU \p synchronize() method can be called. RCU should not be locked. + */ + template + bool insert_with( K const& key, Func func ) + { + return base_class::do_update( key, key_comparator(), + [&func]( node_type * pNode ) -> mapped_type* + { + assert( pNode->m_pValue.load( memory_model::memory_order_relaxed ) == nullptr ); + mapped_type * pVal = cxx_allocator().New(); + func( pNode->m_key, *pVal ); + return pVal; + }, + update_flags::allow_insert + ) == update_flags::result_inserted; + } + + /// For \p key inserts data of type \p mapped_type created in-place from \p args + /** + Returns \p true if inserting successful, \p false otherwise. + + RCU \p synchronize() method can be called. RCU should not be locked. + */ + template + bool emplace( K&& key, Args&&... args ) + { + struct scoped_ptr + { + mapped_type * pVal; + scoped_ptr( mapped_type * p ): pVal( p ) {} + ~scoped_ptr() { if ( pVal ) cxx_allocator().Delete( pVal ); } + void release() { pVal = nullptr; } + }; + + scoped_ptr p( cxx_allocator().MoveNew( std::forward( args )... )); + if ( base_class::insert( std::forward( key ), p.pVal )) { + p.release(); + return true; + } + return false; + } + + /// Updates the value for \p key + /** + The operation performs inserting or changing data with lock-free manner. + + If the \p key not found in the map, then the new item created from \p key + will be inserted into the map iff \p bAllowInsert is \p true + (note that in this case the \ref key_type should be constructible from type \p K). + Otherwise, the functor \p func is called with item found. + The functor \p Func signature is: + \code + struct my_functor { + void operator()( bool bNew, key_type const& key, mapped_type& item ); + }; + \endcode + + with arguments: + - \p bNew - \p true if the item has been inserted, \p false otherwise + - \p item - value + + The functor may change any fields of the \p item. The functor is called under the node lock, + the caller can change any field of \p item. + + RCU \p synchronize() method can be called. RCU should not be locked. + + Returns std::pair where \p first is \p true if operation is successful, + \p second is \p true if new item has been added or \p false if the item with \p key + already exists. + */ + template + std::pair update( K const& key, Func func, bool bAllowInsert = true ) + { + int result = base_class::do_update( key, key_comparator(), + [&func]( node_type * pNode ) -> mapped_type* + { + mapped_type * pVal = pNode->m_pValue.load( memory_model::memory_order_relaxed ); + if ( !pVal ) { + pVal = cxx_allocator().New(); + func( true, pNode->m_key, *pVal ); + } + else + func( false, pNode->m_key, *pVal ); + return pVal; + }, + (bAllowInsert ? update_flags::allow_insert : 0) | update_flags::allow_update + ); + return std::make_pair( result != 0, (result & update_flags::result_inserted) != 0 ); + } + + + /// Delete \p key from the map + /** + RCU \p synchronize() method can be called. RCU should not be locked. + + Return \p true if \p key is found and deleted, \p false otherwise + */ + template + bool erase( K const& key ) + { + return base_class::erase( key ); + } + + /// Deletes the item from the map using \p pred predicate for searching + /** + The function is an analog of \p erase(K const&) + but \p pred is used for key comparing. + \p Less functor has the interface like \p std::less. + \p Less must imply the same element order as the comparator used for building the map. + */ + template + bool erase_with( K const& key, Less pred ) + { + return base_class::erase_with( key, pred ); + } + + /// Delete \p key from the map + /** \anchor cds_nonintrusive_BronsonAVLTreeMap_rcu_erase_func + + The function searches an item with key \p key, calls \p f functor + and deletes the item. If \p key is not found, the functor is not called. + + The functor \p Func interface: + \code + struct extractor { + void operator()(key_type const& key, mapped_type& item) { ... } + }; + \endcode + + RCU \p synchronize method can be called. RCU should not be locked. + + Return \p true if key is found and deleted, \p false otherwise + */ + template + bool erase( K const& key, Func f ) + { + return base_class::erase( key, f ); + } + + /// Deletes the item from the map using \p pred predicate for searching + /** + The function is an analog of \ref cds_nonintrusive_BronsonAVLTreeMap_rcu_erase_func "erase(K const&, Func)" + but \p pred is used for key comparing. + \p Less functor has the interface like \p std::less. + \p Less must imply the same element order as the comparator used for building the map. + */ + template + bool erase_with( K const& key, Less pred, Func f ) + { + return base_class::erase_with( key, pred, f ); + } + + /// Extracts a value with minimal key from the map + /** + Returns \p exempt_ptr pointer to the leftmost item. + If the set is empty, returns empty \p exempt_ptr. + + Note that the function returns only the value for minimal key. + To retrieve its key use \p extract_min( Func ) member function. + + @note Due the concurrent nature of the map, the function extracts nearly minimum key. + It means that the function gets leftmost leaf of the tree and tries to unlink it. + During unlinking, a concurrent thread may insert an item with key less than leftmost item's key. + So, the function returns the item with minimum key at the moment of tree traversing. + + RCU \p synchronize method can be called. RCU should NOT be locked. + The function does not free the item. + The deallocator will be implicitly invoked when the returned object is destroyed or when + its \p release() member function is called. + */ + exempt_ptr extract_min() + { + return base_class::extract_min(); + } + + /// Extracts minimal key and corresponding value + /** + Returns \p exempt_ptr to the leftmost item. + If the tree is empty, returns empty \p exempt_ptr. + + \p Func functor is used to store minimal key. + \p Func has the following signature: + \code + struct functor { + void operator()( key_type const& key ); + }; + \endcode + If the tree is empty, \p f is not called. + Otherwise, is it called with minimal key, the pointer to corresponding value is returned + as \p exempt_ptr. + + @note Due the concurrent nature of the map, the function extracts nearly minimum key. + It means that the function gets leftmost leaf of the tree and tries to unlink it. + During unlinking, a concurrent thread may insert an item with key less than leftmost item's key. + So, the function returns the item with minimum key at the moment of tree traversing. + + RCU \p synchronize method can be called. RCU should NOT be locked. + The function does not free the item. + The deallocator will be implicitly invoked when the returned object is destroyed or when + its \p release() member function is called. + */ + template + exempt_ptr extract_min( Func f ) + { + return base_class::extract_min( f ); + } + + /// Extracts minimal key and corresponding value + /** + This function is a shortcut for the following call: + \code + key_type key; + exempt_ptr xp = theTree.extract_min( [&key]( key_type const& k ) { key = k; } ); + \endcode + \p key_type should be copy-assignable. The copy of minimal key + is returned in \p min_key argument. + */ + typename std::enable_if< std::is_copy_assignable::value, exempt_ptr >::type + extract_min_key( key_type& min_key ) + { + return base_class::extract_min_key( min_key ); + } + + /// Extracts an item with maximal key from the map + /** + Returns \p exempt_ptr pointer to the rightmost item. + If the set is empty, returns empty \p exempt_ptr. + + Note that the function returns only the value for maximal key. + To retrieve its key use \p extract_max( Func ) or \p extract_max_key(key_type&) member function. + + @note Due the concurrent nature of the map, the function extracts nearly maximal key. + It means that the function gets rightmost leaf of the tree and tries to unlink it. + During unlinking, a concurrent thread may insert an item with key greater than rightmost item's key. + So, the function returns the item with maximum key at the moment of tree traversing. + + RCU \p synchronize method can be called. RCU should NOT be locked. + The function does not free the item. + The deallocator will be implicitly invoked when the returned object is destroyed or when + its \p release() is called. + */ + exempt_ptr extract_max() + { + return base_class::extract_max(); + } + + /// Extracts the maximal key and corresponding value + /** + Returns \p exempt_ptr pointer to the rightmost item. + If the set is empty, returns empty \p exempt_ptr. + + \p Func functor is used to store maximal key. + \p Func has the following signature: + \code + struct functor { + void operator()( key_type const& key ); + }; + \endcode + If the tree is empty, \p f is not called. + Otherwise, is it called with maximal key, the pointer to corresponding value is returned + as \p exempt_ptr. + + @note Due the concurrent nature of the map, the function extracts nearly maximal key. + It means that the function gets rightmost leaf of the tree and tries to unlink it. + During unlinking, a concurrent thread may insert an item with key greater than rightmost item's key. + So, the function returns the item with maximum key at the moment of tree traversing. + + RCU \p synchronize method can be called. RCU should NOT be locked. + The function does not free the item. + The deallocator will be implicitly invoked when the returned object is destroyed or when + its \p release() is called. + */ + template + exempt_ptr extract_max( Func f ) + { + return base_class::extract_max( f ); + } + + /// Extracts the maximal key and corresponding value + /** + This function is a shortcut for the following call: + \code + key_type key; + exempt_ptr xp = theTree.extract_max( [&key]( key_type const& k ) { key = k; } ); + \endcode + \p key_type should be copy-assignable. The copy of maximal key + is returned in \p max_key argument. + */ + typename std::enable_if< std::is_copy_assignable::value, exempt_ptr >::type + extract_max_key( key_type& max_key ) + { + return base_class::extract_max_key( max_key ); + } + + /// Extracts an item from the map + /** + The function searches an item with key equal to \p key in the tree, + unlinks it, and returns \p exempt_ptr pointer to a value found. + If \p key is not found the function returns an empty \p exempt_ptr. + + RCU \p synchronize method can be called. RCU should NOT be locked. + The function does not destroy the value found. + The dealloctor will be implicitly invoked when the returned object is destroyed or when + its \p release() member function is called. + */ + template + exempt_ptr extract( Q const& key ) + { + return base_class::extract( key ); + } + + /// Extracts an item from the map using \p pred for searching + /** + The function is an analog of \p extract(Q const&) + but \p pred is used for key compare. + \p Less has the interface like \p std::less. + \p pred must imply the same element order as the comparator used for building the map. + */ + template + exempt_ptr extract_with( Q const& key, Less pred ) + { + return base_class::extract_with( key, pred ); + } + + /// Find the key \p key + /** + The function searches the item with key equal to \p key and calls the functor \p f for item found. + The interface of \p Func functor is: + \code + struct functor { + void operator()( key_type const& key, mapped_type& val ); + }; + \endcode + where \p val is the item found for \p key + The functor is called under node-level lock. + + The function applies RCU lock internally. + + The function returns \p true if \p key is found, \p false otherwise. + */ + template + bool find( K const& key, Func f ) + { + return base_class::find( key, f ); + } + + /// Finds the key \p val using \p pred predicate for searching + /** + The function is an analog of \p find(K const&, Func) + but \p pred is used for key comparing. + \p Less functor has the interface like \p std::less. + \p Less must imply the same element order as the comparator used for building the map. + */ + template + bool find_with( K const& key, Less pred, Func f ) + { + return base_class::find_with( key, pred, f ); + } + + /// Checks whether the map contains \p key + /** + The function searches the item with key equal to \p key + and returns \p true if it is found, and \p false otherwise. + + The function applies RCU lock internally. + */ + template + bool contains( K const& key ) + { + return base_class::contains( key ); + } + + /// Checks whether the map contains \p key using \p pred predicate for searching + /** + The function is similar to contains( key ) but \p pred is used for key comparing. + \p Less functor has the interface like \p std::less. + \p Less must imply the same element order as the comparator used for building the set. + */ + template + bool contains( K const& key, Less pred ) + { + return base_class::contains( key, pred ); + } + + /// Clears the map + void clear() + { + base_class::clear(); + } + + /// Checks if the map is empty + bool empty() const + { + return base_class::empty(); + } + + /// Returns item count in the map + /** + Only leaf nodes containing user data are counted. + + The value returned depends on item counter type provided by \p Traits template parameter. + If it is \p atomicity::empty_item_counter this function always returns 0. + + The function is not suitable for checking the tree emptiness, use \p empty() + member function for this purpose. + */ + size_t size() const + { + return base_class::size(); + } + + /// Returns const reference to internal statistics + stat const& statistics() const + { + return base_class::statistics(); + } + + /// Returns reference to \p sync_monitor object + sync_monitor& monitor() + { + return base_class::monitor(); + } + //@cond + sync_monitor const& monitor() const + { + return base_class::monitor(); + } + //@endcond + + /// Checks internal consistency (not atomic, not thread-safe) + /** + The debugging function to check internal consistency of the tree. + */ + bool check_consistency() const + { + return base_class::check_consistency(); + } + + /// Checks internal consistency (not atomic, not thread-safe) + /** + The debugging function to check internal consistency of the tree. + The functor \p Func is called if a violation of internal tree structure + is found: + \code + struct functor { + void operator()( size_t nLevel, size_t hLeft, size_t hRight ); + }; + \endcode + where + - \p nLevel - the level where the violation is found + - \p hLeft - the height of left subtree + - \p hRight - the height of right subtree + + The functor is called for each violation found. + */ + template + bool check_consistency( Func f ) const + { + return base_class::check_consistency( f ); + } + }; +}} // namespace cds::container + +#endif // #ifndef CDSLIB_CONTAINER_IMPL_BRONSON_AVLTREE_MAP_RCU_H diff --git a/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/container/cuckoo_map.h b/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/container/cuckoo_map.h new file mode 100644 index 0000000..d06083e --- /dev/null +++ b/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/container/cuckoo_map.h @@ -0,0 +1,772 @@ +/* + This file is a part of libcds - Concurrent Data Structures library + + (C) Copyright Maxim Khizhinsky (libcds.dev@gmail.com) 2006-2017 + + Source code repo: http://github.com/khizmax/libcds/ + Download: http://sourceforge.net/projects/libcds/files/ + + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. + + * Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ + +#ifndef CDSLIB_CONTAINER_CUCKOO_MAP_H +#define CDSLIB_CONTAINER_CUCKOO_MAP_H + +#include +#include + +namespace cds { namespace container { + + //@cond + namespace details { + template + struct make_cuckoo_map + { + typedef Key key_type; ///< key type + typedef T mapped_type; ///< type of value stored in the map + typedef std::pair value_type; ///< Pair type + + typedef Traits original_traits; + typedef typename original_traits::probeset_type probeset_type; + static bool const store_hash = original_traits::store_hash; + static unsigned int const store_hash_count = store_hash ? ((unsigned int) std::tuple_size< typename original_traits::hash::hash_tuple_type >::value) : 0; + + struct node_type: public intrusive::cuckoo::node + { + value_type m_val; + + template + node_type( K const& key ) + : m_val( std::make_pair( key_type(key), mapped_type())) + {} + + template + node_type( K const& key, Q const& v ) + : m_val( std::make_pair( key_type(key), mapped_type(v))) + {} + + template + node_type( K&& key, Args&&... args ) + : m_val( std::forward(key), std::move( mapped_type(std::forward(args)...))) + {} + }; + + struct key_accessor { + key_type const& operator()( node_type const& node ) const + { + return node.m_val.first; + } + }; + + struct intrusive_traits: public original_traits + { + typedef intrusive::cuckoo::base_hook< + cds::intrusive::cuckoo::probeset_type< probeset_type > + ,cds::intrusive::cuckoo::store_hash< store_hash_count > + > hook; + + typedef cds::intrusive::cuckoo::traits::disposer disposer; + + typedef typename std::conditional< + std::is_same< typename original_traits::equal_to, opt::none >::value + , opt::none + , cds::details::predicate_wrapper< node_type, typename original_traits::equal_to, key_accessor > + >::type equal_to; + + typedef typename std::conditional< + std::is_same< typename original_traits::compare, opt::none >::value + , opt::none + , cds::details::compare_wrapper< node_type, typename original_traits::compare, key_accessor > + >::type compare; + + typedef typename std::conditional< + std::is_same< typename original_traits::less, opt::none >::value + ,opt::none + ,cds::details::predicate_wrapper< node_type, typename original_traits::less, key_accessor > + >::type less; + + typedef opt::details::hash_list_wrapper< typename original_traits::hash, node_type, key_accessor > hash; + }; + + typedef intrusive::CuckooSet< node_type, intrusive_traits > type; + }; + } // namespace details + //@endcond + + /// Cuckoo hash map + /** @ingroup cds_nonintrusive_map + + Source + - [2007] M.Herlihy, N.Shavit, M.Tzafrir "Concurrent Cuckoo Hashing. Technical report" + - [2008] Maurice Herlihy, Nir Shavit "The Art of Multiprocessor Programming" + + About Cuckoo hashing + + [From "The Art of Multiprocessor Programming"] + Cuckoo hashing is a hashing algorithm in which a newly added item displaces any earlier item + occupying the same slot. For brevity, a table is a k-entry array of items. For a hash set f size + N = 2k we use a two-entry array of tables, and two independent hash functions, + h0, h1: KeyRange -> 0,...,k-1 + mapping the set of possible keys to entries in he array. To test whether a value \p x is in the set, + find(x) tests whether either table[0][h0(x)] or table[1][h1(x)] is + equal to \p x. Similarly, erase(x)checks whether \p x is in either table[0][h0(x)] + or table[1][h1(x)], ad removes it if found. + + The insert(x) successively "kicks out" conflicting items until every key has a slot. + To add \p x, the method swaps \p x with \p y, the current occupant of table[0][h0(x)]. + If the prior value was \p nullptr, it is done. Otherwise, it swaps the newly nest-less value \p y + for the current occupant of table[1][h1(y)] in the same way. As before, if the prior value + was \p nullptr, it is done. Otherwise, the method continues swapping entries (alternating tables) + until it finds an empty slot. We might not find an empty slot, either because the table is full, + or because the sequence of displacement forms a cycle. We therefore need an upper limit on the + number of successive displacements we are willing to undertake. When this limit is exceeded, + we resize the hash table, choose new hash functions and start over. + + For concurrent cuckoo hashing, rather than organizing the set as a two-dimensional table of + items, we use two-dimensional table of probe sets, where a probe set is a constant-sized set + of items with the same hash code. Each probe set holds at most \p PROBE_SIZE items, but the algorithm + tries to ensure that when the set is quiescent (i.e no method call in progress) each probe set + holds no more than THRESHOLD < PROBE_SET items. While method calls are in-flight, a probe + set may temporarily hold more than \p THRESHOLD but never more than \p PROBE_SET items. + + In current implementation, a probe set can be defined either as a (single-linked) list + or as a fixed-sized vector, optionally ordered. + + In description above two-table cuckoo hashing (k = 2) has been considered. + We can generalize this approach for k >= 2 when we have \p k hash functions + h[0], ... h[k-1] and \p k tables table[0], ... table[k-1]. + + The search in probe set is linear, the complexity is O(PROBE_SET) . + The probe set may be ordered or not. Ordered probe set can be a little better since + the average search complexity is O(PROBE_SET/2). + However, the overhead of sorting can eliminate a gain of ordered search. + + The probe set is ordered if \p compare or \p less is specified in \p Traits + template parameter. Otherwise, the probe set is unordered and \p Traits must contain + \p equal_to predicate. + + Template arguments: + - \p Key - key type + - \p T - the type stored in the map. + - \p Traits - map traits, default is \p cuckoo::traits. + It is possible to declare option-based set with \p cuckoo::make_traits metafunction + result as \p Traits template argument. + + Examples + + Declares cuckoo mapping from \p std::string to struct \p foo. + For cuckoo hashing we should provide at least two hash functions: + \code + struct hash1 { + size_t operator()(std::string const& s) const + { + return cds::opt::v::hash( s ); + } + }; + + struct hash2: private hash1 { + size_t operator()(std::string const& s) const + { + size_t h = ~( hash1::operator()(s)); + return ~h + 0x9e3779b9 + (h << 6) + (h >> 2); + } + }; + \endcode + + Cuckoo-map with list-based unordered probe set and storing hash values + \code + #include + + // Declare type traits + struct my_traits: public cds::container::cuckoo::traits + { + typedef std::equal_to< std::string > equal_to; + typedef std::tuple< hash1, hash2 > hash; + + static bool const store_hash = true; + }; + + // Declare CuckooMap type + typedef cds::container::CuckooMap< std::string, foo, my_traits > my_cuckoo_map; + + // Equal option-based declaration + typedef cds::container::CuckooMap< std::string, foo, + cds::container::cuckoo::make_traits< + cds::opt::hash< std::tuple< hash1, hash2 > > + ,cds::opt::equal_to< std::equal_to< std::string > > + ,cds::container::cuckoo::store_hash< true > + >::type + > opt_cuckoo_map; + \endcode + + If we provide \p less functor instead of \p equal_to + we get as a result a cuckoo map with ordered probe set that may improve + performance. + Example for ordered vector-based probe-set: + + \code + #include + + // Declare type traits + // We use a vector of capacity 4 as probe-set container and store hash values in the node + struct my_traits: public cds::container::cuckoo::traits + { + typedef std::less< std::string > less; + typedef std::tuple< hash1, hash2 > hash; + typedef cds::container::cuckoo::vector<4> probeset_type; + + static bool const store_hash = true; + }; + + // Declare CuckooMap type + typedef cds::container::CuckooMap< std::string, foo, my_traits > my_cuckoo_map; + + // Equal option-based declaration + typedef cds::container::CuckooMap< std::string, foo, + cds::container::cuckoo::make_traits< + cds::opt::hash< std::tuple< hash1, hash2 > > + ,cds::opt::less< std::less< std::string > > + ,cds::container::cuckoo::probeset_type< cds::container::cuckoo::vector<4> > + ,cds::container::cuckoo::store_hash< true > + >::type + > opt_cuckoo_map; + \endcode + + */ + template + class CuckooMap: +#ifdef CDS_DOXYGEN_INVOKED + protected intrusive::CuckooSet< std::pair< Key const, T>, Traits> +#else + protected details::make_cuckoo_map::type +#endif + { + //@cond + typedef details::make_cuckoo_map maker; + typedef typename maker::type base_class; + //@endcond + public: + typedef Key key_type; ///< key type + typedef T mapped_type; ///< value type stored in the container + typedef std::pair value_type; ///< Key-value pair type stored in the map + typedef Traits traits; ///< Map traits + + typedef typename traits::hash hash; ///< hash functor tuple wrapped for internal use + typedef typename base_class::hash_tuple_type hash_tuple_type; ///< hash tuple type + + typedef typename base_class::mutex_policy mutex_policy; ///< Concurrent access policy, see \p cuckoo::traits::mutex_policy + typedef typename base_class::stat stat; ///< internal statistics type + + static bool const c_isSorted = base_class::c_isSorted; ///< whether the probe set should be ordered + static size_t const c_nArity = base_class::c_nArity; ///< the arity of cuckoo hashing: the number of hash functors provided; minimum 2. + + typedef typename base_class::key_equal_to key_equal_to; ///< Key equality functor; used only for unordered probe-set + + typedef typename base_class::key_comparator key_comparator; ///< key comparing functor based on opt::compare and opt::less option setter. Used only for ordered probe set + + typedef typename base_class::allocator allocator; ///< allocator type used for internal bucket table allocations + + /// Node allocator type + typedef typename std::conditional< + std::is_same< typename traits::node_allocator, opt::none >::value, + allocator, + typename traits::node_allocator + >::type node_allocator; + + /// item counter type + typedef typename traits::item_counter item_counter; + + protected: + //@cond + typedef typename base_class::scoped_cell_lock scoped_cell_lock; + typedef typename base_class::scoped_full_lock scoped_full_lock; + typedef typename base_class::scoped_resize_lock scoped_resize_lock; + typedef typename maker::key_accessor key_accessor; + + typedef typename base_class::value_type node_type; + typedef cds::details::Allocator< node_type, node_allocator > cxx_node_allocator; + //@endcond + + public: + static unsigned int const c_nDefaultProbesetSize = base_class::c_nDefaultProbesetSize; ///< default probeset size + static size_t const c_nDefaultInitialSize = base_class::c_nDefaultInitialSize; ///< default initial size + static unsigned int const c_nRelocateLimit = base_class::c_nRelocateLimit; ///< Count of attempts to relocate before giving up + + protected: + //@cond + template + static node_type * alloc_node( K const& key ) + { + return cxx_node_allocator().New( key ); + } + template + static node_type * alloc_node( K&& key, Args&&... args ) + { + return cxx_node_allocator().MoveNew( std::forward( key ), std::forward(args)... ); + } + + static void free_node( node_type * pNode ) + { + cxx_node_allocator().Delete( pNode ); + } + //@endcond + + protected: + //@cond + struct node_disposer { + void operator()( node_type * pNode ) + { + free_node( pNode ); + } + }; + + typedef std::unique_ptr< node_type, node_disposer > scoped_node_ptr; + + //@endcond + + public: + /// Default constructor + /** + Initial size = \ref c_nDefaultInitialSize + + Probe set size: + - \ref c_nDefaultProbesetSize if \p probeset_type is \p cuckoo::list + - \p Capacity if \p probeset_type is cuckoo::vector + + Probe set threshold = probe set size - 1 + */ + CuckooMap() + {} + + /// Constructs an object with given probe set size and threshold + /** + If probe set type is cuckoo::vector vector + then \p nProbesetSize should be equal to vector's \p Capacity. + */ + CuckooMap( + size_t nInitialSize ///< Initial map size; if 0 - use default initial size \ref c_nDefaultInitialSize + , unsigned int nProbesetSize ///< probe set size + , unsigned int nProbesetThreshold = 0 ///< probe set threshold, nProbesetThreshold < nProbesetSize. If 0, nProbesetThreshold = nProbesetSize - 1 + ) + : base_class( nInitialSize, nProbesetSize, nProbesetThreshold ) + {} + + /// Constructs an object with given hash functor tuple + /** + The probe set size and threshold are set as default, see CuckooSet() + */ + CuckooMap( + hash_tuple_type const& h ///< hash functor tuple of type std::tuple where n == \ref c_nArity + ) + : base_class( h ) + {} + + /// Constructs a map with given probe set properties and hash functor tuple + /** + If probe set type is cuckoo::vector vector + then \p nProbesetSize should be equal to vector's \p Capacity. + */ + CuckooMap( + size_t nInitialSize ///< Initial map size; if 0 - use default initial size \ref c_nDefaultInitialSize + , unsigned int nProbesetSize ///< probe set size + , unsigned int nProbesetThreshold ///< probe set threshold, nProbesetThreshold < nProbesetSize. If 0, nProbesetThreshold = nProbesetSize - 1 + , hash_tuple_type const& h ///< hash functor tuple of type std::tuple where n == \ref c_nArity + ) + : base_class( nInitialSize, nProbesetSize, nProbesetThreshold, h ) + {} + + /// Constructs a map with given hash functor tuple (move semantics) + /** + The probe set size and threshold are set as default, see CuckooSet() + */ + CuckooMap( + hash_tuple_type&& h ///< hash functor tuple of type std::tuple where n == \ref c_nArity + ) + : base_class( std::forward(h)) + {} + + /// Constructs a map with given probe set properties and hash functor tuple (move semantics) + /** + If probe set type is cuckoo::vector vector + then \p nProbesetSize should be equal to vector's \p Capacity. + */ + CuckooMap( + size_t nInitialSize ///< Initial map size; if 0 - use default initial size \ref c_nDefaultInitialSize + , unsigned int nProbesetSize ///< probe set size + , unsigned int nProbesetThreshold ///< probe set threshold, nProbesetThreshold < nProbesetSize. If 0, nProbesetThreshold = nProbesetSize - 1 + , hash_tuple_type&& h ///< hash functor tuple of type std::tuple where n == \ref c_nArity + ) + : base_class( nInitialSize, nProbesetSize, nProbesetThreshold, std::forward(h)) + {} + + /// Destructor clears the map + ~CuckooMap() + { + clear(); + } + + public: + /// Inserts new node with key and default value + /** + The function creates a node with \p key and default value, and then inserts the node created into the map. + + Preconditions: + - The \ref key_type should be constructible from a value of type \p K. + In trivial case, \p K is equal to \ref key_type. + - The \ref mapped_type should be default-constructible. + + Returns \p true if inserting successful, \p false otherwise. + */ + template + bool insert( K const& key ) + { + return insert_with( key, [](value_type&){} ); + } + + /// Inserts new node + /** + The function creates a node with copy of \p val value + and then inserts the node created into the map. + + Preconditions: + - The \ref key_type should be constructible from \p key of type \p K. + - The \ref value_type should be constructible from \p val of type \p V. + + Returns \p true if \p val is inserted into the set, \p false otherwise. + */ + template + bool insert( K const& key, V const& val ) + { + return insert_with( key, [&val](value_type& item) { item.second = val ; } ); + } + + /// Inserts new node and initialize it by a functor + /** + This function inserts new node with key \p key and if inserting is successful then it calls + \p func functor with signature + \code + struct functor { + void operator()( value_type& item ); + }; + \endcode + + The argument \p item of user-defined functor \p func is the reference + to the map's item inserted: + - item.first is a const reference to item's key that cannot be changed. + - item.second is a reference to item's value that may be changed. + + The key_type should be constructible from value of type \p K. + + The function allows to split creating of new item into two part: + - create item from \p key; + - insert new item into the map; + - if inserting is successful, initialize the value of item by calling \p func functor + + This can be useful if complete initialization of object of \p value_type is heavyweight and + it is preferable that the initialization should be completed only if inserting is successful. + */ + template + bool insert_with( const K& key, Func func ) + { + scoped_node_ptr pNode( alloc_node( key )); + if ( base_class::insert( *pNode, [&func]( node_type& item ) { func( item.m_val ); } )) { + pNode.release(); + return true; + } + return false; + } + + /// For key \p key inserts data of type \ref value_type constructed with std::forward(args)... + /** + Returns \p true if inserting successful, \p false otherwise. + */ + template + bool emplace( K&& key, Args&&... args ) + { + scoped_node_ptr pNode( alloc_node( std::forward(key), std::forward(args)... )); + if ( base_class::insert( *pNode )) { + pNode.release(); + return true; + } + return false; + } + + /// Updates the node + /** + The operation performs inserting or changing data with lock-free manner. + + If \p key is not found in the map, then \p key is inserted iff \p bAllowInsert is \p true. + Otherwise, the functor \p func is called with item found. + The functor \p func signature is: + \code + struct my_functor { + void operator()( bool bNew, value_type& item ); + }; + \endcode + with arguments: + - \p bNew - \p true if the item has been inserted, \p false otherwise + - \p item - an item of the map for \p key + + Returns std::pair where \p first is \p true if operation is successful, + i.e. the node has been inserted or updated, + \p second is \p true if new item has been added or \p false if the item with \p key + already exists. + */ + template + std::pair update( K const& key, Func func, bool bAllowInsert = true ) + { + scoped_node_ptr pNode( alloc_node( key )); + std::pair res = base_class::update( *pNode, + [&func](bool bNew, node_type& item, node_type const& ){ func( bNew, item.m_val ); }, + bAllowInsert + ); + if ( res.first && res.second ) + pNode.release(); + return res; + } + //@cond + template + CDS_DEPRECATED("ensure() is deprecated, use update()") + std::pair ensure( K const& key, Func func ) + { + return update( key, func, true ); + } + //@endcond + + /// Delete \p key from the map + /** \anchor cds_nonintrusive_CuckooMap_erase_val + + Return \p true if \p key is found and deleted, \p false otherwise + */ + template + bool erase( K const& key ) + { + node_type * pNode = base_class::erase(key); + if ( pNode ) { + free_node( pNode ); + return true; + } + return false; + } + + /// Deletes the item from the list using \p pred predicate for searching + /** + The function is an analog of \ref cds_nonintrusive_CuckooMap_erase_val "erase(Q const&)" + but \p pred is used for key comparing. + If cuckoo map is ordered, then \p Predicate should have the interface and semantics like \p std::less. + If cuckoo map is unordered, then \p Predicate should have the interface and semantics like \p std::equal_to. + \p Predicate must imply the same element order as the comparator used for building the map. + */ + template + bool erase_with( K const& key, Predicate pred ) + { + CDS_UNUSED( pred ); + node_type * pNode = base_class::erase_with(key, cds::details::predicate_wrapper()); + if ( pNode ) { + free_node( pNode ); + return true; + } + return false; + } + + /// Delete \p key from the map + /** \anchor cds_nonintrusive_CuckooMap_erase_func + + The function searches an item with key \p key, calls \p f functor + and deletes the item. If \p key is not found, the functor is not called. + + The functor \p Func interface: + \code + struct extractor { + void operator()(value_type& item) { ... } + }; + \endcode + + Return \p true if key is found and deleted, \p false otherwise + + See also: \ref erase + */ + template + bool erase( K const& key, Func f ) + { + node_type * pNode = base_class::erase( key ); + if ( pNode ) { + f( pNode->m_val ); + free_node( pNode ); + return true; + } + return false; + } + + /// Deletes the item from the list using \p pred predicate for searching + /** + The function is an analog of \ref cds_nonintrusive_CuckooMap_erase_func "erase(Q const&, Func)" + but \p pred is used for key comparing. + If cuckoo map is ordered, then \p Predicate should have the interface and semantics like \p std::less. + If cuckoo map is unordered, then \p Predicate should have the interface and semantics like \p std::equal_to. + \p Predicate must imply the same element order as the comparator used for building the map. + */ + template + bool erase_with( K const& key, Predicate pred, Func f ) + { + CDS_UNUSED( pred ); + node_type * pNode = base_class::erase_with( key, cds::details::predicate_wrapper()); + if ( pNode ) { + f( pNode->m_val ); + free_node( pNode ); + return true; + } + return false; + } + + /// Find the key \p key + /** \anchor cds_nonintrusive_CuckooMap_find_func + + The function searches the item with key equal to \p key and calls the functor \p f for item found. + The interface of \p Func functor is: + \code + struct functor { + void operator()( value_type& item ); + }; + \endcode + where \p item is the item found. + + The functor may change \p item.second. + + The function returns \p true if \p key is found, \p false otherwise. + */ + template + bool find( K const& key, Func f ) + { + return base_class::find( key, [&f](node_type& item, K const& ) { f( item.m_val );}); + } + + /// Find the key \p val using \p pred predicate for comparing + /** + The function is an analog of \ref cds_nonintrusive_CuckooMap_find_func "find(K const&, Func)" + but \p pred is used for key comparison. + If you use ordered cuckoo map, then \p Predicate should have the interface and semantics like \p std::less. + If you use unordered cuckoo map, then \p Predicate should have the interface and semantics like \p std::equal_to. + \p pred must imply the same element order as the comparator used for building the map. + */ + template + bool find_with( K const& key, Predicate pred, Func f ) + { + CDS_UNUSED( pred ); + return base_class::find_with( key, cds::details::predicate_wrapper(), + [&f](node_type& item, K const& ) { f( item.m_val );}); + } + + /// Checks whether the map contains \p key + /** + The function searches the item with key equal to \p key + and returns \p true if it is found, and \p false otherwise. + */ + template + bool contains( K const& key ) + { + return base_class::contains( key ); + } + //@cond + template + CDS_DEPRECATED("the function is deprecated, use contains()") + bool find( K const& key ) + { + return contains( key ); + } + //@endcond + + /// Checks whether the map contains \p key using \p pred predicate for searching + /** + The function is similar to contains( key ) but \p pred is used for key comparing. + \p Less functor has the interface like \p std::less. + \p Less must imply the same element order as the comparator used for building the map. + */ + template + bool contains( K const& key, Predicate pred ) + { + CDS_UNUSED( pred ); + return base_class::contains( key, cds::details::predicate_wrapper()); + } + //@cond + template + CDS_DEPRECATED("the function is deprecated, use contains()") + bool find_with( K const& key, Predicate pred ) + { + return contains( key, pred ); + } + //@endcond + + /// Clears the map + void clear() + { + base_class::clear_and_dispose( node_disposer()); + } + + /// Checks if the map is empty + /** + Emptiness is checked by item counting: if item count is zero then the map is empty. + */ + bool empty() const + { + return base_class::empty(); + } + + /// Returns item count in the map + size_t size() const + { + return base_class::size(); + } + + /// Returns the size of hash table + /** + The hash table size is non-constant and can be increased via resizing. + */ + size_t bucket_count() const + { + return base_class::bucket_count(); + } + + /// Returns lock array size + /** + The lock array size is constant. + */ + size_t lock_count() const + { + return base_class::lock_count(); + } + + /// Returns const reference to internal statistics + stat const& statistics() const + { + return base_class::statistics(); + } + + /// Returns const reference to mutex policy internal statistics + typename mutex_policy::statistics_type const& mutex_policy_statistics() const + { + return base_class::mutex_policy_statistics(); + } + }; +}} // namespace cds::container + +#endif //#ifndef CDSLIB_CONTAINER_CUCKOO_MAP_H diff --git a/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/container/cuckoo_set.h b/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/container/cuckoo_set.h new file mode 100644 index 0000000..d29bf8d --- /dev/null +++ b/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/container/cuckoo_set.h @@ -0,0 +1,850 @@ +/* + This file is a part of libcds - Concurrent Data Structures library + + (C) Copyright Maxim Khizhinsky (libcds.dev@gmail.com) 2006-2017 + + Source code repo: http://github.com/khizmax/libcds/ + Download: http://sourceforge.net/projects/libcds/files/ + + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. + + * Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ + +#ifndef CDSLIB_CONTAINER_CUCKOO_SET_H +#define CDSLIB_CONTAINER_CUCKOO_SET_H + +#include +#include + +namespace cds { namespace container { + + //@cond + namespace details { + template + struct make_cuckoo_set + { + typedef T value_type; + typedef Traits original_traits; + typedef typename original_traits::probeset_type probeset_type; + static bool const store_hash = original_traits::store_hash; + static unsigned int const store_hash_count = store_hash ? ((unsigned int) std::tuple_size< typename original_traits::hash::hash_tuple_type >::value) : 0; + + struct node_type: public intrusive::cuckoo::node + { + value_type m_val; + + template + node_type( Q const& v ) + : m_val(v) + {} + + template + node_type( Args&&... args ) + : m_val( std::forward(args)...) + {} + }; + + struct value_accessor { + value_type const& operator()( node_type const& node ) const + { + return node.m_val; + } + }; + + template + using predicate_wrapper = cds::details::binary_functor_wrapper< ReturnValue, Pred, node_type, value_accessor >; + + struct intrusive_traits: public original_traits + { + typedef intrusive::cuckoo::base_hook< + cds::intrusive::cuckoo::probeset_type< probeset_type > + ,cds::intrusive::cuckoo::store_hash< store_hash_count > + > hook; + + typedef cds::intrusive::cuckoo::traits::disposer disposer; + + typedef typename std::conditional< + std::is_same< typename original_traits::equal_to, opt::none >::value + , opt::none + , predicate_wrapper< typename original_traits::equal_to, bool > + >::type equal_to; + + typedef typename std::conditional< + std::is_same< typename original_traits::compare, opt::none >::value + , opt::none + , predicate_wrapper< typename original_traits::compare, int > + >::type compare; + + typedef typename std::conditional< + std::is_same< typename original_traits::less, opt::none >::value + ,opt::none + ,predicate_wrapper< typename original_traits::less, bool > + >::type less; + + typedef opt::details::hash_list_wrapper< typename original_traits::hash, node_type, value_accessor > hash; + }; + + typedef intrusive::CuckooSet< node_type, intrusive_traits > type; + }; + } // namespace details + //@endcond + + /// Cuckoo hash set + /** @ingroup cds_nonintrusive_set + + Source + - [2007] M.Herlihy, N.Shavit, M.Tzafrir "Concurrent Cuckoo Hashing. Technical report" + - [2008] Maurice Herlihy, Nir Shavit "The Art of Multiprocessor Programming" + + About Cuckoo hashing + + [From "The Art of Multiprocessor Programming"] + Cuckoo hashing is a hashing algorithm in which a newly added item displaces any earlier item + occupying the same slot. For brevity, a table is a k-entry array of items. For a hash set f size + N = 2k we use a two-entry array of tables, and two independent hash functions, + h0, h1: KeyRange -> 0,...,k-1 + mapping the set of possible keys to entries in he array. To test whether a value \p x is in the set, + find(x) tests whether either table[0][h0(x)] or table[1][h1(x)] is + equal to \p x. Similarly, erase(x)checks whether \p x is in either table[0][h0(x)] + or table[1][h1(x)], ad removes it if found. + + The insert(x) successively "kicks out" conflicting items until every key has a slot. + To add \p x, the method swaps \p x with \p y, the current occupant of table[0][h0(x)]. + If the prior value was \p nullptr, it is done. Otherwise, it swaps the newly nest-less value \p y + for the current occupant of table[1][h1(y)] in the same way. As before, if the prior value + was \p nullptr, it is done. Otherwise, the method continues swapping entries (alternating tables) + until it finds an empty slot. We might not find an empty slot, either because the table is full, + or because the sequence of displacement forms a cycle. We therefore need an upper limit on the + number of successive displacements we are willing to undertake. When this limit is exceeded, + we resize the hash table, choose new hash functions and start over. + + For concurrent cuckoo hashing, rather than organizing the set as a two-dimensional table of + items, we use two-dimensional table of probe sets, where a probe set is a constant-sized set + of items with the same hash code. Each probe set holds at most \p PROBE_SIZE items, but the algorithm + tries to ensure that when the set is quiescent (i.e no method call in progress) each probe set + holds no more than THRESHOLD < PROBE_SET items. While method calls are in-flight, a probe + set may temporarily hold more than \p THRESHOLD but never more than \p PROBE_SET items. + + In current implementation, a probe set can be defined either as a (single-linked) list + or as a fixed-sized vector, optionally ordered. + + In description above two-table cuckoo hashing (k = 2) has been considered. + We can generalize this approach for k >= 2 when we have \p k hash functions + h[0], ... h[k-1] and \p k tables table[0], ... table[k-1]. + + The search in probe set is linear, the complexity is O(PROBE_SET) . + The probe set may be ordered or not. Ordered probe set can be a little better since + the average search complexity is O(PROBE_SET/2). + However, the overhead of sorting can eliminate a gain of ordered search. + + The probe set is ordered if \p compare or \p less is specified in \p Traits + template parameter. Otherwise, the probe set is unordered and \p Traits must contain + \p equal_to predicate. + + Template arguments: + - \p T - the type stored in the set. + - \p Traits - type traits. See cuckoo::traits for explanation. + It is possible to declare option-based set with cuckoo::make_traits metafunction result as \p Traits template argument. + + Examples + + Cuckoo-set with list-based unordered probe set and storing hash values + \code + #include + + // Data stored in cuckoo set + struct my_data + { + // key field + std::string strKey; + + // other data + // ... + }; + + // Provide equal_to functor for my_data since we will use unordered probe-set + struct my_data_equal_to { + bool operator()( const my_data& d1, const my_data& d2 ) const + { + return d1.strKey.compare( d2.strKey ) == 0; + } + + bool operator()( const my_data& d, const std::string& s ) const + { + return d.strKey.compare(s) == 0; + } + + bool operator()( const std::string& s, const my_data& d ) const + { + return s.compare( d.strKey ) == 0; + } + }; + + // Provide two hash functor for my_data + struct hash1 { + size_t operator()(std::string const& s) const + { + return cds::opt::v::hash( s ); + } + size_t operator()( my_data const& d ) const + { + return (*this)( d.strKey ); + } + }; + + struct hash2: private hash1 { + size_t operator()(std::string const& s) const + { + size_t h = ~( hash1::operator()(s)); + return ~h + 0x9e3779b9 + (h << 6) + (h >> 2); + } + size_t operator()( my_data const& d ) const + { + return (*this)( d.strKey ); + } + }; + + // Declare type traits + struct my_traits: public cds::container::cuckoo::traits + { + typedef my_data_equa_to equal_to; + typedef std::tuple< hash1, hash2 > hash; + + static bool const store_hash = true; + }; + + // Declare CuckooSet type + typedef cds::container::CuckooSet< my_data, my_traits > my_cuckoo_set; + + // Equal option-based declaration + typedef cds::container::CuckooSet< my_data, + cds::container::cuckoo::make_traits< + cds::opt::hash< std::tuple< hash1, hash2 > > + ,cds::opt::equal_to< my_data_equal_to > + ,cds::container::cuckoo::store_hash< true > + >::type + > opt_cuckoo_set; + \endcode + + If we provide \p compare function instead of \p equal_to for \p my_data + we get as a result a cuckoo set with ordered probe set that may improve + performance. + Example for ordered vector-based probe-set: + + \code + #include + + // Data stored in cuckoo set + struct my_data + { + // key field + std::string strKey; + + // other data + // ... + }; + + // Provide compare functor for my_data since we want to use ordered probe-set + struct my_data_compare { + int operator()( const my_data& d1, const my_data& d2 ) const + { + return d1.strKey.compare( d2.strKey ); + } + + int operator()( const my_data& d, const std::string& s ) const + { + return d.strKey.compare(s); + } + + int operator()( const std::string& s, const my_data& d ) const + { + return s.compare( d.strKey ); + } + }; + + // Provide two hash functor for my_data + struct hash1 { + size_t operator()(std::string const& s) const + { + return cds::opt::v::hash( s ); + } + size_t operator()( my_data const& d ) const + { + return (*this)( d.strKey ); + } + }; + + struct hash2: private hash1 { + size_t operator()(std::string const& s) const + { + size_t h = ~( hash1::operator()(s)); + return ~h + 0x9e3779b9 + (h << 6) + (h >> 2); + } + size_t operator()( my_data const& d ) const + { + return (*this)( d.strKey ); + } + }; + + // Declare type traits + // We use a vector of capacity 4 as probe-set container and store hash values in the node + struct my_traits: public cds::container::cuckoo::traits + { + typedef my_data_compare compare; + typedef std::tuple< hash1, hash2 > hash; + typedef cds::container::cuckoo::vector<4> probeset_type; + + static bool const store_hash = true; + }; + + // Declare CuckooSet type + typedef cds::container::CuckooSet< my_data, my_traits > my_cuckoo_set; + + // Equal option-based declaration + typedef cds::container::CuckooSet< my_data, + cds::container::cuckoo::make_traits< + cds::opt::hash< std::tuple< hash1, hash2 > > + ,cds::opt::compare< my_data_compare > + ,cds::container::cuckoo::probeset_type< cds::container::cuckoo::vector<4> > + ,cds::container::cuckoo::store_hash< true > + >::type + > opt_cuckoo_set; + \endcode + + */ + template + class CuckooSet: +#ifdef CDS_DOXYGEN_INVOKED + protected intrusive::CuckooSet +#else + protected details::make_cuckoo_set::type +#endif + { + //@cond + typedef details::make_cuckoo_set maker; + typedef typename maker::type base_class; + //@endcond + + public: + typedef T value_type ; ///< value type stored in the container + typedef Traits traits ; ///< traits + + typedef typename traits::hash hash; ///< hash functor tuple wrapped for internal use + typedef typename base_class::hash_tuple_type hash_tuple_type; ///< Type of hash tuple + + typedef typename base_class::mutex_policy mutex_policy; ///< Concurrent access policy, see cuckoo::traits::mutex_policy + typedef typename base_class::stat stat; ///< internal statistics type + + + static bool const c_isSorted = base_class::c_isSorted; ///< whether the probe set should be ordered + static size_t const c_nArity = base_class::c_nArity; ///< the arity of cuckoo hashing: the number of hash functors provided; minimum 2. + + typedef typename base_class::key_equal_to key_equal_to; ///< Key equality functor; used only for unordered probe-set + + typedef typename base_class::key_comparator key_comparator; ///< key comparing functor based on \p Traits::compare and \p Traits::less option setter. Used only for ordered probe set + + typedef typename base_class::allocator allocator; ///< allocator type used for internal bucket table allocations + + /// Node allocator type + typedef typename std::conditional< + std::is_same< typename traits::node_allocator, opt::none >::value, + allocator, + typename traits::node_allocator + >::type node_allocator; + + /// item counter type + typedef typename traits::item_counter item_counter; + + protected: + //@cond + typedef typename base_class::value_type node_type; + typedef cds::details::Allocator< node_type, node_allocator > cxx_node_allocator; + //@endcond + + public: + static unsigned int const c_nDefaultProbesetSize = base_class::c_nDefaultProbesetSize; ///< default probeset size + static size_t const c_nDefaultInitialSize = base_class::c_nDefaultInitialSize; ///< default initial size + static unsigned int const c_nRelocateLimit = base_class::c_nRelocateLimit; ///< Count of attempts to relocate before giving up + + protected: + //@cond + template + static node_type * alloc_node( Q const& v ) + { + return cxx_node_allocator().New( v ); + } + template + static node_type * alloc_node( Args&&... args ) + { + return cxx_node_allocator().MoveNew( std::forward(args)... ); + } + + static void free_node( node_type * pNode ) + { + cxx_node_allocator().Delete( pNode ); + } + //@endcond + + protected: + //@cond + struct node_disposer { + void operator()( node_type * pNode ) + { + free_node( pNode ); + } + }; + + typedef std::unique_ptr< node_type, node_disposer > scoped_node_ptr; + + //@endcond + + public: + /// Default constructor + /** + Initial size = \ref c_nDefaultInitialSize + + Probe set size: + - \ref c_nDefaultProbesetSize if \p probeset_type is \p cuckoo::list + - \p Capacity if \p probeset_type is cuckoo::vector + + Probe set threshold = probe set size - 1 + */ + CuckooSet() + {} + + /// Constructs the set object with given probe set size and threshold + /** + If probe set type is cuckoo::vector vector + then \p nProbesetSize should be equal to vector's \p Capacity. + */ + CuckooSet( + size_t nInitialSize ///< Initial set size; if 0 - use default initial size \ref c_nDefaultInitialSize + , unsigned int nProbesetSize ///< probe set size + , unsigned int nProbesetThreshold = 0 ///< probe set threshold, nProbesetThreshold < nProbesetSize. If 0, nProbesetThreshold = nProbesetSize - 1 + ) + : base_class( nInitialSize, nProbesetSize, nProbesetThreshold ) + {} + + /// Constructs the set object with given hash functor tuple + /** + The probe set size and threshold are set as default, see CuckooSet() + */ + CuckooSet( + hash_tuple_type const& h ///< hash functor tuple of type std::tuple where n == \ref c_nArity + ) + : base_class( h ) + {} + + /// Constructs the set object with given probe set properties and hash functor tuple + /** + If probe set type is cuckoo::vector vector + then \p nProbesetSize should be equal to vector's \p Capacity. + */ + CuckooSet( + size_t nInitialSize ///< Initial set size; if 0 - use default initial size \ref c_nDefaultInitialSize + , unsigned int nProbesetSize ///< probe set size + , unsigned int nProbesetThreshold ///< probe set threshold, nProbesetThreshold < nProbesetSize. If 0, nProbesetThreshold = nProbesetSize - 1 + , hash_tuple_type const& h ///< hash functor tuple of type std::tuple where n == \ref c_nArity + ) + : base_class( nInitialSize, nProbesetSize, nProbesetThreshold, h ) + {} + + /// Constructs the set object with given hash functor tuple (move semantics) + /** + The probe set size and threshold are set as default, see CuckooSet() + */ + CuckooSet( + hash_tuple_type&& h ///< hash functor tuple of type std::tuple where n == \ref c_nArity + ) + : base_class( std::forward(h)) + {} + + /// Constructs the set object with given probe set properties and hash functor tuple (move semantics) + /** + If probe set type is cuckoo::vector vector + then \p nProbesetSize should be equal to vector's \p Capacity. + */ + CuckooSet( + size_t nInitialSize ///< Initial set size; if 0 - use default initial size \ref c_nDefaultInitialSize + , unsigned int nProbesetSize ///< probe set size + , unsigned int nProbesetThreshold ///< probe set threshold, nProbesetThreshold < nProbesetSize. If 0, nProbesetThreshold = nProbesetSize - 1 + , hash_tuple_type&& h ///< hash functor tuple of type std::tuple where n == \ref c_nArity + ) + : base_class( nInitialSize, nProbesetSize, nProbesetThreshold, std::forward(h)) + {} + + /// Destructor clears the set + ~CuckooSet() + { + clear(); + } + + public: + /// Inserts new node + /** + The function creates a node with copy of \p val value + and then inserts the node created into the set. + + The type \p Q should contain as minimum the complete key for the node. + The object of \ref value_type should be constructible from a value of type \p Q. + In trivial case, \p Q is equal to \ref value_type. + + Returns \p true if \p val is inserted into the set, \p false otherwise. + */ + template + bool insert( Q const& val ) + { + return insert( val, []( value_type& ) {} ); + } + + /// Inserts new node + /** + The function allows to split creating of new item into two part: + - create item with key only + - insert new item into the set + - if inserting is success, calls \p f functor to initialize value-field of new item . + + The functor signature is: + \code + void func( value_type& item ); + \endcode + where \p item is the item inserted. + + The type \p Q can differ from \ref value_type of items storing in the set. + Therefore, the \p value_type should be constructible from type \p Q. + + The user-defined functor is called only if the inserting is success. + */ + template + bool insert( Q const& val, Func f ) + { + scoped_node_ptr pNode( alloc_node( val )); + if ( base_class::insert( *pNode, [&f]( node_type& node ) { f( node.m_val ); } )) { + pNode.release(); + return true; + } + return false; + } + + /// Inserts data of type \ref value_type constructed with std::forward(args)... + /** + Returns \p true if inserting successful, \p false otherwise. + */ + template + bool emplace( Args&&... args ) + { + scoped_node_ptr pNode( alloc_node( std::forward(args)... )); + if ( base_class::insert( *pNode )) { + pNode.release(); + return true; + } + return false; + } + + /// Updates the node + /** + The operation performs inserting or changing data with lock-free manner. + + If the item \p val is not found in the set, then \p val is inserted into the set + iff \p bAllowInsert is \p true. + Otherwise, the functor \p func is called with item found. + The functor \p func signature is: + \code + struct my_functor { + void operator()( bool bNew, value_type& item, const Q& val ); + }; + \endcode + with arguments: + - \p bNew - \p true if the item has been inserted, \p false otherwise + - \p item - item of the set + - \p val - argument \p val passed into the \p %update() function + If new item has been inserted (i.e. \p bNew is \p true) then \p item and \p val arguments + refer to the same thing. + + Returns std::pair where \p first is \p true if operation is successful, + i.e. the node has been inserted or updated, + \p second is \p true if new item has been added or \p false if the item with \p key + already exists. + */ + template + std::pair update( Q const& val, Func func, bool bAllowInsert = true ) + { + scoped_node_ptr pNode( alloc_node( val )); + std::pair res = base_class::update( *pNode, + [&val,&func](bool bNew, node_type& item, node_type const& ){ func( bNew, item.m_val, val ); }, + bAllowInsert + ); + if ( res.first && res.second ) + pNode.release(); + return res; + } + //@cond + template + CDS_DEPRECATED("ensure() is deprecated, use update()") + std::pair ensure( Q const& val, Func func ) + { + return update( val, func, true ); + } + //@endcond + + /// Delete \p key from the set + /** \anchor cds_nonintrusive_CuckooSet_erase + + Since the key of set's item type \ref value_type is not explicitly specified, + template parameter \p Q defines the key type searching in the list. + The set item comparator should be able to compare the type \p value_type + and the type \p Q. + + Return \p true if key is found and deleted, \p false otherwise + */ + template + bool erase( Q const& key ) + { + node_type * pNode = base_class::erase( key ); + if ( pNode ) { + free_node( pNode ); + return true; + } + return false; + } + + /// Deletes the item from the list using \p pred predicate for searching + /** + The function is an analog of \ref cds_nonintrusive_CuckooSet_erase "erase(Q const&)" + but \p pred is used for key comparing. + If cuckoo set is ordered, then \p Predicate should have the interface and semantics like \p std::less. + If cuckoo set is unordered, then \p Predicate should have the interface and semantics like \p std::equal_to. + \p Predicate must imply the same element order as the comparator used for building the set. + */ + template + bool erase_with( Q const& key, Predicate pred ) + { + CDS_UNUSED( pred ); + node_type * pNode = base_class::erase_with( key, typename maker::template predicate_wrapper()); + if ( pNode ) { + free_node( pNode ); + return true; + } + return false; + } + + /// Delete \p key from the set + /** \anchor cds_nonintrusive_CuckooSet_erase_func + + The function searches an item with key \p key, calls \p f functor + and deletes the item. If \p key is not found, the functor is not called. + + The functor \p Func interface is: + \code + struct functor { + void operator()(value_type const& val); + }; + \endcode + + Return \p true if key is found and deleted, \p false otherwise + */ + template + bool erase( Q const& key, Func f ) + { + node_type * pNode = base_class::erase( key ); + if ( pNode ) { + f( pNode->m_val ); + free_node( pNode ); + return true; + } + return false; + } + + /// Deletes the item from the list using \p pred predicate for searching + /** + The function is an analog of \ref cds_nonintrusive_CuckooSet_erase_func "erase(Q const&, Func)" + but \p pred is used for key comparing. + If you use ordered cuckoo set, then \p Predicate should have the interface and semantics like \p std::less. + If you use unordered cuckoo set, then \p Predicate should have the interface and semantics like \p std::equal_to. + \p Predicate must imply the same element order as the comparator used for building the set. + */ + template + bool erase_with( Q const& key, Predicate pred, Func f ) + { + CDS_UNUSED( pred ); + node_type * pNode = base_class::erase_with( key, typename maker::template predicate_wrapper()); + if ( pNode ) { + f( pNode->m_val ); + free_node( pNode ); + return true; + } + return false; + } + + /// Find the key \p val + /** \anchor cds_nonintrusive_CuckooSet_find_func + + The function searches the item with key equal to \p val and calls the functor \p f for item found. + The interface of \p Func functor is: + \code + struct functor { + void operator()( value_type& item, Q& val ); + }; + \endcode + where \p item is the item found, \p val is the find function argument. + + The functor can change non-key fields of \p item. + The \p val argument is non-const since it can be used as \p f functor destination i.e., the functor + can modify both arguments. + + The type \p Q can differ from \ref value_type of items storing in the container. + Therefore, the \p value_type should be comparable with type \p Q. + + The function returns \p true if \p val is found, \p false otherwise. + */ + template + bool find( Q& val, Func f ) + { + return base_class::find( val, [&f](node_type& item, Q& v) { f( item.m_val, v );}); + } + //@cond + template + bool find( Q const& val, Func f ) + { + return base_class::find( val, [&f](node_type& item, Q const& v) { f( item.m_val, v );}); + } + //@endcond + + /// Find the key \p val using \p pred predicate for comparing + /** + The function is an analog of \ref cds_nonintrusive_CuckooSet_find_func "find(Q&, Func)" + but \p pred is used for key comparison. + If you use ordered cuckoo set, then \p Predicate should have the interface and semantics like \p std::less. + If you use unordered cuckoo set, then \p Predicate should have the interface and semantics like \p std::equal_to. + \p pred must imply the same element order as the comparator used for building the set. + */ + template + bool find_with( Q& val, Predicate pred, Func f ) + { + CDS_UNUSED( pred ); + return base_class::find_with( val, typename maker::template predicate_wrapper(), + [&f](node_type& item, Q& v) { f( item.m_val, v );}); + } + //@cond + template + bool find_with( Q const& val, Predicate pred, Func f ) + { + CDS_UNUSED( pred ); + return base_class::find_with( val, typename maker::template predicate_wrapper(), + [&f](node_type& item, Q const& v) { f( item.m_val, v );}); + } + //@endcond + + /// Checks whether the set contains \p key + /** + The function searches the item with key equal to \p key + and returns \p true if it is found, and \p false otherwise. + */ + template + bool contains( Q const& key ) + { + return base_class::find( key, [](node_type&, Q const&) {}); + } + //@cond + template + CDS_DEPRECATED("the function is deprecated, use contains()") + bool find( Q const& key ) + { + return contains( key ); + } + //@endcond + + /// Checks whether the set contains \p key using \p pred predicate for searching + /** + The function is similar to contains( key ) but \p pred is used for key comparing. + \p Less functor has the interface like \p std::less. + \p Less must imply the same element order as the comparator used for building the set. + */ + template + bool contains( Q const& key, Predicate pred ) + { + CDS_UNUSED( pred ); + return base_class::find_with( key, typename maker::template predicate_wrapper(), [](node_type&, Q const&) {}); + } + //@cond + template + CDS_DEPRECATED("the function is deprecated, use contains()") + bool find_with( Q const& key, Predicate pred ) + { + return contains( key, pred ); + } + //@endcond + + /// Clears the set + /** + The function erases all items from the set. + */ + void clear() + { + return base_class::clear_and_dispose( node_disposer()); + } + + /// Checks if the set is empty + /** + Emptiness is checked by item counting: if item count is zero then the set is empty. + */ + bool empty() const + { + return base_class::empty(); + } + + /// Returns item count in the set + size_t size() const + { + return base_class::size(); + } + + /// Returns the size of hash table + /** + The hash table size is non-constant and can be increased via resizing. + */ + size_t bucket_count() const + { + return base_class::bucket_count(); + } + + /// Returns lock array size + size_t lock_count() const + { + return base_class::lock_count(); + } + + /// Returns const reference to internal statistics + stat const& statistics() const + { + return base_class::statistics(); + } + + /// Returns const reference to mutex policy internal statistics + typename mutex_policy::statistics_type const& mutex_policy_statistics() const + { + return base_class::mutex_policy_statistics(); + } + }; + +}} // namespace cds::container + +#endif //#ifndef CDSLIB_CONTAINER_CUCKOO_SET_H diff --git a/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/container/details/base.h b/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/container/details/base.h new file mode 100644 index 0000000..18156ed --- /dev/null +++ b/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/container/details/base.h @@ -0,0 +1,103 @@ +/* + This file is a part of libcds - Concurrent Data Structures library + + (C) Copyright Maxim Khizhinsky (libcds.dev@gmail.com) 2006-2017 + + Source code repo: http://github.com/khizmax/libcds/ + Download: http://sourceforge.net/projects/libcds/files/ + + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. + + * Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ + +#ifndef CDSLIB_CONTAINER_DETAILS_BASE_H +#define CDSLIB_CONTAINER_DETAILS_BASE_H + +#include + +namespace cds { + +/// Standard (non-intrusive) containers +/** + @ingroup cds_nonintrusive_containers + This namespace contains implementations of non-intrusive (std-like) lock-free containers. +*/ +namespace container { + + /// Common options for non-intrusive containers + /** @ingroup cds_nonintrusive_helper + This namespace contains options for non-intrusive containers that is, in general, the same as for the intrusive containers. + It imports all definitions from cds::opt and cds::intrusive::opt namespaces + */ + namespace opt { + using namespace cds::intrusive::opt; + } // namespace opt + + /// @defgroup cds_nonintrusive_containers Non-intrusive containers + /** @defgroup cds_nonintrusive_helper Helper structs for non-intrusive containers + @ingroup cds_nonintrusive_containers + */ + + /** @defgroup cds_nonintrusive_stack Stack + @ingroup cds_nonintrusive_containers + */ + /** @defgroup cds_nonintrusive_queue Queue + @ingroup cds_nonintrusive_containers + */ + /** @defgroup cds_nonintrusive_deque Deque + @ingroup cds_nonintrusive_containers + */ + /** @defgroup cds_nonintrusive_priority_queue Priority queue + @ingroup cds_nonintrusive_containers + */ + /** @defgroup cds_nonintrusive_map Map + @ingroup cds_nonintrusive_containers + */ + /** @defgroup cds_nonintrusive_set Set + @ingroup cds_nonintrusive_containers + */ + /** @defgroup cds_nonintrusive_list List + @ingroup cds_nonintrusive_containers + */ + /** @defgroup cds_nonintrusive_tree Tree + @ingroup cds_nonintrusive_containers + */ + + + // Tag for selecting iterable list implementation + /** @ingroup cds_nonintrusive_helper + This struct is empty and it is used only as a tag for selecting \p IterableList + as ordered list implementation in declaration of some classes. + + See \p split_list::traits::ordered_list as an example. + */ + typedef intrusive::iterable_list_tag iterable_list_tag; + + //@cond + template + struct is_iterable_list: public cds::intrusive::is_iterable_list< List > + {}; + //@endcond + +} // namespace container +} // namespace cds + +#endif // #ifndef CDSLIB_CONTAINER_DETAILS_BASE_H diff --git a/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/container/details/bronson_avltree_base.h b/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/container/details/bronson_avltree_base.h new file mode 100644 index 0000000..865c504 --- /dev/null +++ b/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/container/details/bronson_avltree_base.h @@ -0,0 +1,528 @@ +/* + This file is a part of libcds - Concurrent Data Structures library + + (C) Copyright Maxim Khizhinsky (libcds.dev@gmail.com) 2006-2017 + + Source code repo: http://github.com/khizmax/libcds/ + Download: http://sourceforge.net/projects/libcds/files/ + + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. + + * Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ + +#ifndef CDSLIB_CONTAINER_DETAILS_BRONSON_AVLTREE_BASE_H +#define CDSLIB_CONTAINER_DETAILS_BRONSON_AVLTREE_BASE_H + +#include +#include +#include +#include +#include + +namespace cds { namespace container { + + /// BronsonAVLTree related declarations + namespace bronson_avltree { + + template + struct node; + + //@cond + template + struct link_node + { + typedef Node node_type; + typedef T mapped_type; + typedef uint32_t version_type; ///< version type (internal) + + enum + { + shrinking = 1, + unlinked = 2, + version_flags = shrinking | unlinked + // the rest is version counter + }; + + atomics::atomic< int > m_nHeight; ///< Node height + atomics::atomic m_nVersion; ///< Version bits + atomics::atomic m_pParent; ///< Parent node + atomics::atomic m_pLeft; ///< Left child + atomics::atomic m_pRight; ///< Right child + typename SyncMonitor::node_injection m_SyncMonitorInjection; ///< @ref cds_sync_monitor "synchronization monitor" injected data + atomics::atomic m_pValue; ///< Value + + public: + link_node() + : m_nHeight( 0 ) + , m_nVersion( 0 ) + , m_pParent( nullptr ) + , m_pLeft( nullptr ) + , m_pRight( nullptr ) + { + m_pValue.store( nullptr, atomics::memory_order_release ); + } + + link_node( int nHeight, version_type version, node_type * pParent, node_type * pLeft, node_type * pRight ) + : m_nHeight( nHeight ) + , m_nVersion( version ) + , m_pParent( pParent ) + , m_pLeft( pLeft ) + , m_pRight( pRight ) + { + m_pValue.store( nullptr, atomics::memory_order_release ); + } + + node_type * parent( atomics::memory_order order ) const + { + return m_pParent.load( order ); + } + + void parent( node_type * p, atomics::memory_order order ) + { + m_pParent.store( p, order ); + } + + node_type * child( int nDirection, atomics::memory_order order ) const + { + assert( nDirection != 0 ); + return nDirection < 0 ? m_pLeft.load( order ) : m_pRight.load( order ); + } + + void child( node_type * pChild, int nDirection, atomics::memory_order order ) + { + assert( nDirection != 0 ); + if ( nDirection < 0 ) + m_pLeft.store( pChild, order ); + else + m_pRight.store( pChild, order ); + } + + version_type version( atomics::memory_order order ) const + { + return m_nVersion.load( order ); + } + + void version( version_type ver, atomics::memory_order order ) + { + m_nVersion.store( ver, order ); + } + + void exchange_version( version_type ver, atomics::memory_order order ) + { + m_nVersion.exchange( ver, order ); + } + + int height( atomics::memory_order order ) const + { + return m_nHeight.load( order ); + } + + void height( int h, atomics::memory_order order ) + { + m_nHeight.store( h, order ); + } + + template + void wait_until_shrink_completed( atomics::memory_order order ) const + { + BackOff bkoff; + while ( is_shrinking( order )) + bkoff(); + } + + bool is_unlinked( atomics::memory_order order ) const + { + return m_nVersion.load( order ) == unlinked; + } + + bool is_shrinking( atomics::memory_order order ) const + { + return (m_nVersion.load( order ) & shrinking) != 0; + } + + mapped_type * value( atomics::memory_order order ) const + { + return m_pValue.load( order ); + } + + bool is_valued( atomics::memory_order order ) const + { + return value( order ) != nullptr; + } + }; + //@endcond + + /// BronsonAVLTree internal node + template + struct node: public link_node< node, T, SyncMonitor > + { + //@cond + typedef link_node< node, T, SyncMonitor > base_class; + //@endcond + + typedef Key key_type; ///< key type + typedef T mapped_type; ///< value type + //@cond + typedef typename base_class::version_type version_type; + //@endcond + + key_type const m_key; ///< Key + node * m_pNextRemoved; ///< thread-local list of removed node + + public: + //@cond + template + node( Q&& key ) + : base_class() + , m_key( std::forward( key )) + , m_pNextRemoved( nullptr ) + {} + + template + node( Q&& key, int nHeight, version_type version, node * pParent, node * pLeft, node * pRight ) + : base_class( nHeight, version, pParent, pLeft, pRight ) + , m_key( std::forward( key )) + , m_pNextRemoved( nullptr ) + {} + //@endcond + }; + + /// BronsonAVLTreeMap internal statistics + template + struct stat { + typedef Counter event_counter; ///< Event counter type + + event_counter m_nFindSuccess; ///< Count of success \p find() call + event_counter m_nFindFailed; ///< Count of failed \p find() call + event_counter m_nFindRetry; ///< Count of retries during \p find() + event_counter m_nFindWaitShrinking; ///< Count of waiting until shrinking completed duting \p find() call + + event_counter m_nInsertSuccess; ///< Count of inserting data node + event_counter m_nInsertFailed; ///< Count of insert failures + event_counter m_nRelaxedInsertFailed; ///< Count of false creating of data nodes (only if @ref bronson_avltree::relaxed_insert "relaxed insertion" is enabled) + event_counter m_nInsertRetry; ///< Count of insert retries via concurrent operations + event_counter m_nUpdateWaitShrinking; ///< Count of waiting until shrinking completed during \p update() call + event_counter m_nUpdateRetry; ///< Count of update retries via concurrent operations + event_counter m_nUpdateRootWaitShrinking; ///< Count of waiting until root shrinking completed duting \p update() call + event_counter m_nUpdateSuccess; ///< Count of updating data node + event_counter m_nUpdateUnlinked; ///< Count of attempts to update unlinked node + event_counter m_nDisposedNode; ///< Count of disposed node + event_counter m_nDisposedValue; ///< Count of disposed value + event_counter m_nExtractedValue; ///< Count of extracted value + event_counter m_nRemoveSuccess; ///< Count of successfully \p erase() call + event_counter m_nRemoveFailed; ///< Count of failed \p erase() call + event_counter m_nRemoveRetry; ///< Count o erase/extract retries + event_counter m_nExtractSuccess; ///< Count of successfully \p extract() call + event_counter m_nExtractFailed; ///< Count of failed \p extract() call + event_counter m_nRemoveWaitShrinking; ///< ount of waiting until shrinking completed during \p erase() or \p extract() call + event_counter m_nRemoveRootWaitShrinking; ///< Count of waiting until root shrinking completed duting \p erase() or \p extract() call + event_counter m_nMakeRoutingNode; ///< How many nodes were converted to routing (valueless) nodes + + event_counter m_nRightRotation; ///< Count of single right rotation + event_counter m_nLeftRotation; ///< Count of single left rotation + event_counter m_nLeftRightRotation; ///< Count of double left-over-right rotation + event_counter m_nRightLeftRotation; ///< Count of double right-over-left rotation + + event_counter m_nRotateAfterRightRotation; ///< Count of rotation required after single right rotation + event_counter m_nRemoveAfterRightRotation; ///< Count of removal required after single right rotation + event_counter m_nDamageAfterRightRotation; ///< Count of damaged node after single right rotation + + event_counter m_nRotateAfterLeftRotation; ///< Count of rotation required after signle left rotation + event_counter m_nRemoveAfterLeftRotation; ///< Count of removal required after single left rotation + event_counter m_nDamageAfterLeftRotation; ///< Count of damaged node after single left rotation + + event_counter m_nRotateAfterRLRotation; ///< Count of rotation required after right-over-left rotation + event_counter m_nRemoveAfterRLRotation; ///< Count of removal required after right-over-left rotation + event_counter m_nRotateAfterLRRotation; ///< Count of rotation required after left-over-right rotation + event_counter m_nRemoveAfterLRRotation; ///< Count of removal required after left-over-right rotation + + event_counter m_nInsertRebalanceReq; ///< Count of rebalance required after inserting + event_counter m_nRemoveRebalanceReq; ///< Count of rebalance required after removing + + //@cond + void onFindSuccess() { ++m_nFindSuccess ; } + void onFindFailed() { ++m_nFindFailed ; } + void onFindRetry() { ++m_nFindRetry ; } + void onFindWaitShrinking() { ++m_nFindWaitShrinking; } + + void onInsertSuccess() { ++m_nInsertSuccess; } + void onInsertFailed() { ++m_nInsertFailed; } + void onRelaxedInsertFailed() { ++m_nRelaxedInsertFailed; } + void onInsertRetry() { ++m_nInsertRetry ; } + void onUpdateWaitShrinking() { ++m_nUpdateWaitShrinking; } + void onUpdateRetry() { ++m_nUpdateRetry; } + void onUpdateRootWaitShrinking() { ++m_nUpdateRootWaitShrinking; } + void onUpdateSuccess() { ++m_nUpdateSuccess; } + void onUpdateUnlinked() { ++m_nUpdateUnlinked; } + void onDisposeNode() { ++m_nDisposedNode; } + void onDisposeValue() { ++m_nDisposedValue; } + void onExtractValue() { ++m_nExtractedValue; } + void onRemove(bool bSuccess) + { + if ( bSuccess ) + ++m_nRemoveSuccess; + else + ++m_nRemoveFailed; + } + void onExtract( bool bSuccess ) + { + if ( bSuccess ) + ++m_nExtractSuccess; + else + ++m_nExtractFailed; + } + void onRemoveRetry() { ++m_nRemoveRetry; } + void onRemoveWaitShrinking() { ++m_nRemoveWaitShrinking; } + void onRemoveRootWaitShrinking() { ++m_nRemoveRootWaitShrinking; } + void onMakeRoutingNode() { ++m_nMakeRoutingNode; } + + void onRotateRight() { ++m_nRightRotation; } + void onRotateLeft() { ++m_nLeftRotation; } + void onRotateRightOverLeft() { ++m_nRightLeftRotation; } + void onRotateLeftOverRight() { ++m_nLeftRightRotation; } + + void onRotateAfterRightRotation() { ++m_nRotateAfterRightRotation; } + void onRemoveAfterRightRotation() { ++m_nRemoveAfterRightRotation; } + void onDamageAfterRightRotation() { ++m_nDamageAfterRightRotation; } + + void onRotateAfterLeftRotation() { ++m_nRotateAfterLeftRotation; } + void onRemoveAfterLeftRotation() { ++m_nRemoveAfterLeftRotation; } + void onDamageAfterLeftRotation() { ++m_nDamageAfterLeftRotation; } + + void onRotateAfterRLRotation() { ++m_nRotateAfterRLRotation; } + void onRemoveAfterRLRotation() { ++m_nRemoveAfterRLRotation; } + void onRotateAfterLRRotation() { ++m_nRotateAfterLRRotation; } + void onRemoveAfterLRRotation() { ++m_nRemoveAfterLRRotation; } + + void onInsertRebalanceRequired() { ++m_nInsertRebalanceReq; } + void onRemoveRebalanceRequired() { ++m_nRemoveRebalanceReq; } + //@endcond + }; + + /// BronsonAVLTreeMap empty statistics + struct empty_stat { + //@cond + void onFindSuccess() const {} + void onFindFailed() const {} + void onFindRetry() const {} + void onFindWaitShrinking() const {} + + void onInsertSuccess() const {} + void onInsertFailed() const {} + void onRelaxedInsertFailed() const {} + void onInsertRetry() const {} + void onUpdateWaitShrinking() const {} + void onUpdateRetry() const {} + void onUpdateRootWaitShrinking() const {} + void onUpdateSuccess() const {} + void onUpdateUnlinked() const {} + void onDisposeNode() const {} + void onDisposeValue() const {} + void onExtractValue() const {} + void onRemove(bool /*bSuccess*/) const {} + void onExtract(bool /*bSuccess*/) const {} + void onRemoveRetry() const {} + void onRemoveWaitShrinking() const {} + void onRemoveRootWaitShrinking() const {} + void onMakeRoutingNode() const {} + + void onRotateRight() const {} + void onRotateLeft() const {} + void onRotateRightOverLeft() const {} + void onRotateLeftOverRight() const {} + + void onRotateAfterRightRotation() const {} + void onRemoveAfterRightRotation() const {} + void onDamageAfterRightRotation() const {} + + void onRotateAfterLeftRotation() const {} + void onRemoveAfterLeftRotation() const {} + void onDamageAfterLeftRotation() const {} + + void onRotateAfterRLRotation() const {} + void onRemoveAfterRLRotation() const {} + void onRotateAfterLRRotation() const {} + void onRemoveAfterLRRotation() const {} + + void onInsertRebalanceRequired() const {} + void onRemoveRebalanceRequired() const {} + //@endcond + }; + + /// Option to allow relaxed insert into \ref cds_container_BronsonAVLTreeMap_rcu "Bronson et al AVL-tree" + /** + By default, this option is disabled and the new node is created under its parent lock. + In this case, it is guaranteed the new node will be attached to its parent. + On the other hand, constructing of the new node can be too complex to make it under the lock, + that can lead to lock contention. + + When this option is enabled, the new node is created before locking the parent node. + After that, the parent is locked and checked whether the new node can be attached to the parent. + In this case, false node creating can be performed, but locked section can be significantly small. + */ + template + struct relaxed_insert { + //@cond + template struct pack : public Base + { + enum { relaxed_insert = Enable }; + }; + //@endcond + }; + + /// \p BronsonAVLTreeMap traits + /** + Note that there are two main specialization of Bronson et al AVL-tree: + - \ref cds_container_BronsonAVLTreeMap_rcu_ptr "pointer-oriented" - the tree node stores an user-provided pointer to value + - \ref cds_container_BronsonAVLTreeMap_rcu "data-oriented" - the tree node contains a copy of values + + Depends on tree specialization, different traits member can be used. + */ + struct traits + { + /// Key comparison functor + /** + No default functor is provided. If the option is not specified, the \p less is used. + + See \p cds::opt::compare option description for functor interface. + + You should provide \p compare or \p less functor. + */ + typedef opt::none compare; + + /// Specifies binary predicate used for key compare. + /** + See \p cds::opt::less option description for predicate interface. + + You should provide \p compare or \p less functor. + */ + typedef opt::none less; + + /// Allocator for internal node + typedef CDS_DEFAULT_ALLOCATOR node_allocator; + + /// Allocator for node's value (not used in \p BronsonAVLTreeMap specialisation) + typedef CDS_DEFAULT_ALLOCATOR allocator; + + /// Disposer (only for pointer-oriented tree specialization) + /** + The functor used for dispose removed values. + The user-provided disposer is used only for pointer-oriented tree specialization + like \p BronsonAVLTreeMap. When the node becomes the routing node without value, + the disposer will be called to signal that the memory for the value can be safely freed. + Default is \ref cds::intrusive::opt::delete_disposer "cds::container::opt::v::delete_disposer<>" which calls \p delete operator. + */ + typedef opt::v::delete_disposer<> disposer; + + /// @ref cds_sync_monitor "Synchronization monitor" type for node-level locking + typedef cds::sync::injecting_monitor sync_monitor; + + /// Enable relaxed insertion. + /** + About relaxed insertion see \p bronson_avltree::relaxed_insert option. + By default, this option is disabled. + */ + static bool const relaxed_insert = false; + + /// Item counter + /** + The type for item counter, by default it is disabled (\p atomicity::empty_item_counter). + To enable it use \p atomicity::item_counter or \p atomicity::cache_friendly_item_counter. + */ + typedef atomicity::empty_item_counter item_counter; + + /// C++ memory ordering model + /** + List of available memory ordering see \p opt::memory_model + */ + typedef opt::v::relaxed_ordering memory_model; + + /// Internal statistics + /** + By default, internal statistics is disabled (\p bronson_avltree::empty_stat). + To enable it use \p bronson_avltree::stat. + */ + typedef empty_stat stat; + + /// Back-off strategy + typedef cds::backoff::empty back_off; + + /// RCU deadlock checking policy + /** + List of available options see \p opt::rcu_check_deadlock + */ + typedef cds::opt::v::rcu_throw_deadlock rcu_check_deadlock; + }; + + /// Metafunction converting option list to BronsonAVLTreeMap traits + /** + Note that there are two main specialization of Bronson et al AVL-tree: + - \ref cds_container_BronsonAVLTreeMap_rcu_ptr "pointer-oriented" - the tree node stores an user-provided pointer to value + - \ref cds_container_BronsonAVLTreeMap_rcu "data-oriented" - the tree node contains a copy of values + + Depends on tree specialization, different options can be specified. + + \p Options are: + - \p opt::compare - key compare functor. No default functor is provided. + If the option is not specified, \p %opt::less is used. + - \p opt::less - specifies binary predicate used for key compare. At least \p %opt::compare or \p %opt::less should be defined. + - \p opt::node_allocator - the allocator for internal nodes. Default is \ref CDS_DEFAULT_ALLOCATOR. + - \p opt::allocator - the allocator for node's value. Default is \ref CDS_DEFAULT_ALLOCATOR. + This option is not used in \p BronsonAVLTreeMap specialisation + - \p cds::intrusive::opt::disposer - the functor used for dispose removed values. + The user-provided disposer is used only for pointer-oriented tree specialization + like \p BronsonAVLTreeMap. When the node becomes the rounting node without value, + the disposer will be called to signal that the memory for the value can be safely freed. + Default is \p cds::intrusive::opt::delete_disposer which calls \p delete operator. + Due the nature of GC schema the disposer may be called asynchronously. + - \p opt::sync_monitor - @ref cds_sync_monitor "synchronization monitor" type for node-level locking, + default is \p cds::sync::injecting_monitor + - \p bronson_avltree::relaxed_insert - enable (\p true) or disable (\p false, the default) + @ref bronson_avltree::relaxed_insert "relaxed insertion" + - \p opt::item_counter - the type of item counting feature, by default it is disabled (\p atomicity::empty_item_counter) + To enable it use \p atomicity::item_counter or \p atomicity::cache_friendly_item_counter + - \p opt::memory_model - C++ memory ordering model. Can be \p opt::v::relaxed_ordering (relaxed memory model, the default) + or \p opt::v::sequential_consistent (sequentially consisnent memory model). + - \p opt::stat - internal statistics, by default it is disabled (\p bronson_avltree::empty_stat) + To enable statistics use \p \p bronson_avltree::stat + - \p opt::backoff - back-off strategy, by default no strategy is used (\p cds::backoff::empty) + - \p opt::rcu_check_deadlock - a deadlock checking policy for RCU-based tree, default is \p opt::v::rcu_throw_deadlock + */ + template + struct make_traits { +# ifdef CDS_DOXYGEN_INVOKED + typedef implementation_defined type ; ///< Metafunction result +# else + typedef typename cds::opt::make_options< + typename cds::opt::find_type_traits< traits, Options... >::type + ,Options... + >::type type; +# endif + }; + } // namespace bronson_avltree + + // Forwards + template < class GC, typename Key, typename T, class Traits = bronson_avltree::traits > + class BronsonAVLTreeMap; + +}} // namespace cds::container + +#endif // #ifndef CDSLIB_CONTAINER_DETAILS_BRONSON_AVLTREE_BASE_H diff --git a/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/container/details/cuckoo_base.h b/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/container/details/cuckoo_base.h new file mode 100644 index 0000000..d0b5b83 --- /dev/null +++ b/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/container/details/cuckoo_base.h @@ -0,0 +1,269 @@ +/* + This file is a part of libcds - Concurrent Data Structures library + + (C) Copyright Maxim Khizhinsky (libcds.dev@gmail.com) 2006-2017 + + Source code repo: http://github.com/khizmax/libcds/ + Download: http://sourceforge.net/projects/libcds/files/ + + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. + + * Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ + +#ifndef CDSLIB_CONTAINER_DETAILS_CUCKOO_BASE_H +#define CDSLIB_CONTAINER_DETAILS_CUCKOO_BASE_H + +#include + +namespace cds { namespace container { + + /// CuckooSet and CuckooMap related definitions + /** @ingroup cds_nonintrusive_helper + */ + namespace cuckoo { +#ifdef CDS_DOXYGEN_INVOKED + /// Lock striping concurrent access policy. This is typedef for intrusive::cuckoo::striping template + class striping + {}; +#else + using intrusive::cuckoo::striping; +#endif + +#ifdef CDS_DOXYGEN_INVOKED + /// Refinable concurrent access policy. This is typedef for intrusive::cuckoo::refinable template + class refinable + {}; +#else + using intrusive::cuckoo::refinable; +#endif + +#ifdef CDS_DOXYGEN_INVOKED + /// Striping internal statistics. This is typedef for intrusive::cuckoo::striping_stat + class striping_stat + {}; +#else + using intrusive::cuckoo::striping_stat; +#endif + +#ifdef CDS_DOXYGEN_INVOKED + /// Empty striping internal statistics. This is typedef for intrusive::cuckoo::empty_striping_stat + class empty_striping_stat + {}; +#else + using intrusive::cuckoo::empty_striping_stat; +#endif + +#ifdef CDS_DOXYGEN_INVOKED + /// Refinable internal statistics. This is typedef for intrusive::cuckoo::refinable_stat + class refinable_stat + {}; +#else + using intrusive::cuckoo::refinable_stat; +#endif + +#ifdef CDS_DOXYGEN_INVOKED + /// Empty refinable internal statistics. This is typedef for intrusive::cuckoo::empty_refinable_stat + class empty_refinable_stat + {}; +#else + using intrusive::cuckoo::empty_refinable_stat; +#endif + +#ifdef CDS_DOXYGEN_INVOKED + /// Cuckoo statistics. This is typedef for intrusive::cuckoo::stat + class stat + {}; +#else + using intrusive::cuckoo::stat; +#endif + +#ifdef CDS_DOXYGEN_INVOKED + /// Cuckoo empty statistics.This is typedef for intrusive::cuckoo::empty_stat + class empty_stat + {}; +#else + using intrusive::cuckoo::empty_stat; +#endif + + /// Option specifying whether to store hash values in the node + /** + This option reserves additional space in the hook to store the hash value of the object once it's introduced in the container. + When this option is used, the unordered container will store the calculated hash value in the hook and rehashing operations won't need + to recalculate the hash of the value. This option will improve the performance of unordered containers + when rehashing is frequent or hashing the value is a slow operation + + The \p Enable template parameter toggles the feature: + - the value \p true enables storing the hash values + - the value \p false disables storing the hash values + */ + template + struct store_hash + { + //@cond + template + struct pack: public Base { + static bool const store_hash = Enable; + }; + //@endcond + }; + +#ifdef CDS_DOXYGEN_INVOKED + /// Probe set type option + /** + @copydetails cds::intrusive::cuckoo::probeset_type + */ + template + struct probeset_type + {}; +#else + using intrusive::cuckoo::probeset_type; +#endif + + using intrusive::cuckoo::list; + using intrusive::cuckoo::vector; + + /// Type traits for CuckooSet and CuckooMap classes + struct traits + { + /// Hash functors tuple + /** + This is mandatory type and has no predefined one. + + At least, two hash functors should be provided. All hash functor + should be orthogonal (different): for each i,j: i != j => h[i](x) != h[j](x) . + The hash functors are defined as std::tuple< H1, H2, ... Hn > : + \@code cds::opt::hash< std::tuple< h1, h2 > > \@endcode + The number of hash functors specifies the number \p k - the count of hash tables in cuckoo hashing. + + To specify hash tuple in traits you should use \p cds::opt::hash_tuple: + \code + struct my_traits: public cds::container::cuckoo::traits { + typedef cds::opt::hash_tuple< hash1, hash2 > hash; + }; + \endcode + */ + typedef cds::opt::none hash; + + /// Concurrent access policy + /** + Available opt::mutex_policy types: + - cuckoo::striping - simple, but the lock array is not resizable + - cuckoo::refinable - resizable lock array, but more complex access to set data. + + Default is cuckoo::striping. + */ + typedef cuckoo::striping<> mutex_policy; + + /// Key equality functor + /** + Default is std::equal_to + */ + typedef opt::none equal_to; + + /// Key comparison functor + /** + No default functor is provided. If the option is not specified, the \p less is used. + */ + typedef opt::none compare; + + /// specifies binary predicate used for key comparison. + /** + Default is \p std::less. + */ + typedef opt::none less; + + /// Item counter + /** + The type for item counting feature. + Default is cds::atomicity::item_counter + + Only atomic item counter type is allowed. + */ + typedef cds::intrusive::cuckoo::traits::item_counter item_counter; + + /// Allocator type + /** + The allocator type for allocating bucket tables. + Default is \p CDS_DEFAULT_ALLOCATOR that is \p std::allocator + */ + typedef CDS_DEFAULT_ALLOCATOR allocator; + + /// Node allocator type + /** + If this type is not set explicitly, the \ref allocator type is used. + */ + typedef opt::none node_allocator; + + /// Store hash value into items. See cuckoo::store_hash for explanation + static bool const store_hash = false; + + /// Probe-set type. See \ref probeset_type option for explanation + typedef cuckoo::list probeset_type; + + /// Internal statistics + typedef empty_stat stat; + }; + + /// Metafunction converting option list to CuckooSet/CuckooMap traits + /** + Template argument list \p Options... are: + - \p opt::hash - hash functor tuple, mandatory option. At least, two hash functors should be provided. All hash functor + should be orthogonal (different): for each i,j: i != j => h[i](x) != h[j](x) . + The hash functors are passed as std::tuple< H1, H2, ... Hn > . The number of hash functors specifies + the number \p k - the count of hash tables in cuckoo hashing. + - \p opt::mutex_policy - concurrent access policy. + Available policies: \p cuckoo::striping, \p cuckoo::refinable. + Default is \p %cuckoo::striping. + - \p opt::equal_to - key equality functor like \p std::equal_to. + If this functor is defined then the probe-set will be unordered. + If \p %opt::compare or \p %opt::less option is specified too, then the probe-set will be ordered + and \p %opt::equal_to will be ignored. + - \p opt::compare - key comparison functor. No default functor is provided. + If the option is not specified, the \p %opt::less is used. + If \p %opt::compare or \p %opt::less option is specified, then the probe-set will be ordered. + - \p opt::less - specifies binary predicate used for key comparison. Default is \p std::less. + If \p %opt::compare or \p %opt::less option is specified, then the probe-set will be ordered. + - \p opt::item_counter - the type of item counting feature. Default is \p opt::v::sequential_item_counter. + - \p opt::allocator - the allocator type using for allocating bucket tables. + Default is \ref CDS_DEFAULT_ALLOCATOR + - \p opt::node_allocator - the allocator type using for allocating set's items. If this option + is not specified then the type defined in \p %opt::allocator option is used. + - \p cuckoo::store_hash - this option reserves additional space in the node to store the hash value + of the object once it's introduced in the container. When this option is used, + the unordered container will store the calculated hash value in the node and rehashing operations won't need + to recalculate the hash of the value. This option will improve the performance of unordered containers + when rehashing is frequent or hashing the value is a slow operation. Default value is \p false. + - \ref intrusive::cuckoo::probeset_type "cuckoo::probeset_type" - type of probe set, may be \p cuckoo::list or cuckoo::vector, + Default is \p cuckoo::list. + - \p opt::stat - internal statistics. Possibly types: \p cuckoo::stat, \p cuckoo::empty_stat. + Default is \p %cuckoo::empty_stat + */ + template + struct make_traits { + typedef typename cds::opt::make_options< + typename cds::opt::find_type_traits< cuckoo::traits, Options... >::type + ,Options... + >::type type ; ///< Result of metafunction + }; + } // namespace cuckoo +}} // namespace cds::container + +#endif // #ifndef CDSLIB_CONTAINER_DETAILS_CUCKOO_BASE_H diff --git a/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/container/details/ellen_bintree_base.h b/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/container/details/ellen_bintree_base.h new file mode 100644 index 0000000..5daf245 --- /dev/null +++ b/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/container/details/ellen_bintree_base.h @@ -0,0 +1,460 @@ +/* + This file is a part of libcds - Concurrent Data Structures library + + (C) Copyright Maxim Khizhinsky (libcds.dev@gmail.com) 2006-2017 + + Source code repo: http://github.com/khizmax/libcds/ + Download: http://sourceforge.net/projects/libcds/files/ + + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. + + * Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ + +#ifndef CDSLIB_CONTAINER_DETAILS_ELLEN_BINTREE_BASE_H +#define CDSLIB_CONTAINER_DETAILS_ELLEN_BINTREE_BASE_H + +#include +#include +#include +#include + + +namespace cds { namespace container { + /// EllenBinTree related definitions + /** @ingroup cds_nonintrusive_helper + */ + namespace ellen_bintree { +#ifdef CDS_DOXYGEN_INVOKED + /// Typedef for \p cds::intrusive::ellen_bintree::update_desc + typedef cds::intrusive::ellen_bintree::update_desc update_desc; + + /// Typedef for \p cds::intrusive::ellen_bintree::internal_node + typedef cds::intrusive::ellen_bintree::internal_node internal_node; + + /// Typedef for \p cds::intrusive::ellen_bintree::key_extractor + typedef cds::intrusive::ellen_bintree::key_extractor key_extractor; + + /// Typedef for \p cds::intrusive::ellen_bintree::update_desc_allocator + typedef cds::intrusive::ellen_bintree::update_desc_allocator update_desc_allocator; +#else + using cds::intrusive::ellen_bintree::update_desc; + using cds::intrusive::ellen_bintree::internal_node; + using cds::intrusive::ellen_bintree::key_extractor; + using cds::intrusive::ellen_bintree::update_desc_allocator; + using cds::intrusive::ellen_bintree::node_types; +#endif + /// EllenBinTree internal statistics + template ::event_counter > + using stat = cds::intrusive::ellen_bintree::stat< Counter >; + + /// EllenBinTree empty internal statistics + typedef cds::intrusive::ellen_bintree::empty_stat empty_stat; + + /// EllenBinTree leaf node + template + struct node: public cds::intrusive::ellen_bintree::node + { + typedef T value_type ; ///< Value type + + T m_Value ; ///< Value + + /// Default ctor + node() + {} + + /// Initializing ctor + template + node(Q const& v) + : m_Value(v) + {} + + /// Copy constructor + template + node( Args const&... args ) + : m_Value( args... ) + {} + + /// Move constructor + template + node( Args&&... args ) + : m_Value( std::forward( args )... ) + {} + }; + + /// EllenBinTreeMap leaf node + template + struct map_node: public cds::intrusive::ellen_bintree::node< GC > + { + typedef Key key_type ; ///< key type + typedef T mapped_type ; ///< value type + typedef std::pair value_type ; ///< key-value pair stored in the map + + value_type m_Value ; ///< Key-value pair stored in map leaf node + + /// Initializes key field, value if default-constructed + template + map_node( K const& key ) + : m_Value( std::make_pair( key_type(key), mapped_type())) + {} + + /// Initializes key and value fields + template + map_node( K const& key, Q const& v ) + : m_Value( std::make_pair(key_type(key), mapped_type(v))) + {} + }; + + /// Type traits for \p EllenBinTreeSet and \p EllenBinTreeMap + struct traits + { + /// Key extracting functor (only for \p EllenBinTreeSet) + /** + This is mandatory functor for \p %EllenBinTreeSet. + It has the following prototype: + \code + struct key_extractor { + void operator ()( Key& dest, T const& src ); + }; + \endcode + It should initialize \p dest key from \p src data. + The functor is used to initialize internal nodes of \p %EllenBinTreeSet + */ + typedef opt::none key_extractor; + + /// Key comparison functor + /** + No default functor is provided. If the option is not specified, the \p less is used. + + See \p cds::opt::compare option description for functor interface. + + You should provide \p compare or \p less functor. + See \ref cds_container_EllenBinTreeSet_rcu_less "predicate requirements". + */ + typedef opt::none compare; + + /// Specifies binary predicate used for key compare. + /** + See \p cds::opt::less option description. + + You should provide \p compare or \p less functor. + See \ref cds_container_EllenBinTreeSet_rcu_less "predicate requirements". + */ + typedef opt::none less; + + /// Item counter + /** + The type for item counter, by default it is disabled (\p atomicity::empty_item_counter). + To enable it use \p atomicity::item_counter or \p atomicity::cache_friendly_item_counter + */ + typedef atomicity::empty_item_counter item_counter; + + /// C++ memory ordering model + /** + List of available memory ordering see \p opt::memory_model + */ + typedef opt::v::relaxed_ordering memory_model; + + /// Allocator for update descriptors + /** + The allocator type is used for \p ellen_bintree::update_desc. + + Update descriptor is helping data structure with short lifetime and it is good candidate + for pooling. The number of simultaneously existing descriptors is a small number + limited the number of threads working with the tree. + Therefore, a bounded lock-free container like \p cds::container::VyukovMPMCCycleQueue + is good choice for the free-list of update descriptors, + see \p cds::memory::vyukov_queue_pool free-list implementation. + + Also notice that size of update descriptor is not dependent on the type of data + stored in the tree so single free-list object can be used for several \p EllenBinTree object. + */ + typedef CDS_DEFAULT_ALLOCATOR update_desc_allocator; + + /// Allocator for internal nodes + /** + The allocator type is used for \p ellen_bintree::internal_node. + */ + typedef CDS_DEFAULT_ALLOCATOR node_allocator; + + /// Allocator for leaf nodes + /** + Each leaf node contains data stored in the container. + */ + typedef CDS_DEFAULT_ALLOCATOR allocator; + + /// Internal statistics + /** + By default, internal statistics is disabled (\p ellen_bintree::empty_stat). + To enable it use \p ellen_bintree::stat. + */ + typedef empty_stat stat; + + /// Back-off strategy + typedef cds::backoff::empty back_off; + + /// RCU deadlock checking policy (only for RCU-based EllenBinTreeXXX classes) + /** + List of available options see \p opt::rcu_check_deadlock + */ + typedef cds::opt::v::rcu_throw_deadlock rcu_check_deadlock; + + /// Key copy policy (for \p EllenBinTreeMap) + /** + The key copy policy defines a functor to copy leaf node's key to internal node. + This policy is used only in \p EllenBinTreeMap. + By default, assignment operator is used. + + The copy functor interface is: + \code + struct copy_functor { + void operator()( Key& dest, Key const& src ); + }; + \endcode + */ + typedef opt::none copy_policy; + }; + + + /// Metafunction converting option list to \p EllenBinTreeSet traits + /** + \p Options are: + - \p ellen_bintree::key_extractor - key extracting functor, mandatory option. The functor has the following prototype: + \code + struct key_extractor { + void operator ()( Key& dest, T const& src ); + }; + \endcode + It should initialize \p dest key from \p src data. The functor is used to initialize internal nodes. + - \p opt::compare - key compare functor. No default functor is provided. + If the option is not specified, \p %opt::less is used. + - \p opt::less - specifies binary predicate used for key compare. At least \p %opt::compare or \p %opt::less should be defined. + - \p opt::item_counter - the type of item counter, default is disabled (\p atomicity::empty_item_counter). + To enable it use \p atomicity::item_counter or \p atomicity::cache_friendly_item_counter + - \p opt::memory_model - C++ memory ordering model. Can be \p opt::v::relaxed_ordering (relaxed memory model, the default) + or \p opt::v::sequential_consistent (sequentially consisnent memory model). + - \p opt::allocator - the allocator for \ref ellen_bintree::node "leaf nodes" which contains data. + Default is \ref CDS_DEFAULT_ALLOCATOR. + - \p opt::node_allocator - the allocator for internal nodes. Default is \ref CDS_DEFAULT_ALLOCATOR. + - \p ellen_bintree::update_desc_allocator - an allocator of \ref ellen_bintree::update_desc "update descriptors", + default is \ref CDS_DEFAULT_ALLOCATOR. + Note that update descriptor is helping data structure with short lifetime and it is good candidate for pooling. + The number of simultaneously existing descriptors is a relatively small number limited the number of threads + working with the tree and RCU buffer size. + Therefore, a bounded lock-free container like \p cds::container::VyukovMPMCCycleQueue is good choice for the free-list + of update descriptors, see \p cds::memory::vyukov_queue_pool free-list implementation. + Also notice that size of update descriptor is not dependent on the type of data + stored in the tree so single free-list object can be used for several EllenBinTree-based object. + - \p opt::stat - internal statistics, by default disabled (\p ellen_bintree::empty_stat). To enable + it use \p ellen_bintree::stat. + - \p opt::backoff - back-off strategy, by default no strategy is used (\p cds::backoff::empty) + - \p opt::rcu_check_deadlock - a deadlock checking policy, only for RCU-based tree. + Default is \p opt::v::rcu_throw_deadlock. + */ + template + struct make_set_traits { +# ifdef CDS_DOXYGEN_INVOKED + typedef implementation_defined type ; ///< Metafunction result +# else + typedef typename cds::opt::make_options< + typename cds::opt::find_type_traits< traits, Options... >::type + ,Options... + >::type type; +# endif + }; + + /// Metafunction converting option list to \p EllenBinTreeMap traits + /** + \p Options are: + - \p opt::compare - key compare functor. No default functor is provided. + If the option is not specified, \p %opt::less is used. + - \p opt::less - specifies binary predicate used for key compare. At least \p %opt::compare or \p %opt::less should be defined. + - \p opt::item_counter - the type of item counter, default is disabled (\p atomicity::empty_item_counter). + To enable it use \p atomicity::item_counter or \p atomicity::cache_friendly_item_counter + - opt::memory_model - C++ memory ordering model. Can be \p opt::v::relaxed_ordering (relaxed memory model, the default) + or \p opt::v::sequential_consistent (sequentially consisnent memory model). + - \p opt::allocator - the allocator used for \ref ellen_bintree::map_node "leaf nodes" which contains data. + Default is \ref CDS_DEFAULT_ALLOCATOR. + - \p opt::node_allocator - the allocator used for \ref ellen_bintree::internal_node "internal nodes". + Default is \ref CDS_DEFAULT_ALLOCATOR. + - \p ellen_bintree::update_desc_allocator - an allocator of \ref ellen_bintree::update_desc "update descriptors", + default is \ref CDS_DEFAULT_ALLOCATOR. + Note that update descriptor is helping data structure with short lifetime and it is good candidate for pooling. + The number of simultaneously existing descriptors is a relatively small number limited the number of threads + working with the tree and RCU buffer size. + Therefore, a bounded lock-free container like \p cds::container::VyukovMPMCCycleQueue is good choice for the free-list + of update descriptors, see \p cds::memory::vyukov_queue_pool free-list implementation. + Also notice that size of update descriptor is not dependent on the type of data + stored in the tree so single free-list object can be used for several EllenBinTree-based object. + - \p opt::stat - internal statistics, by default disabled (\p ellen_bintree::empty_stat). To enable + it use \p ellen_bintree::stat. + - \p opt::backoff - back-off strategy, by default no strategy is used (\p cds::backoff::empty) + - \p opt::rcu_check_deadlock - a deadlock checking policy, only for RCU-based tree. Default is \p opt::v::rcu_throw_deadlock + - opt::copy_policy - key copying policy defines a functor to copy leaf node's key to internal node. + By default, assignment operator is used. + The copy functor interface is: + \code + struct copy_functor { + void operator()( Key& dest, Key const& src ); + }; + \endcode + */ + template + struct make_map_traits { +# ifdef CDS_DOXYGEN_INVOKED + typedef implementation_defined type ; ///< Metafunction result +# else + typedef typename cds::opt::make_options< + typename cds::opt::find_type_traits< traits, Options... >::type + ,Options... + >::type type; +# endif + }; + + //@cond + namespace details { + + template < class GC, typename Key, typename T, class Traits> + struct make_ellen_bintree_set + { + typedef GC gc; + typedef Key key_type; + typedef T value_type; + typedef Traits original_traits; + + typedef node< gc, value_type > leaf_node; + + struct intrusive_key_extractor + { + void operator()( key_type& dest, leaf_node const& src ) const + { + typename original_traits::key_extractor()( dest, src.m_Value ); + } + }; + + struct value_accessor + { + value_type const& operator()( leaf_node const& node ) const + { + return node.m_Value; + } + }; + + typedef typename cds::opt::details::make_comparator< value_type, original_traits, false >::type key_comparator; + + typedef cds::details::Allocator< leaf_node, typename original_traits::allocator> cxx_leaf_node_allocator; + struct leaf_deallocator + { + void operator()( leaf_node * p ) const + { + cxx_leaf_node_allocator().Delete( p ); + } + }; + + struct intrusive_traits: public original_traits + { + typedef cds::intrusive::ellen_bintree::base_hook< cds::opt::gc< gc >> hook; + typedef intrusive_key_extractor key_extractor; + typedef leaf_deallocator disposer; + typedef cds::details::compare_wrapper< leaf_node, key_comparator, value_accessor > compare; + }; + + // Metafunction result + typedef cds::intrusive::EllenBinTree< gc, key_type, leaf_node, intrusive_traits > type; + }; + + template < class GC, typename Key, typename T, class Traits> + struct make_ellen_bintree_map + { + typedef GC gc; + typedef Key key_type; + typedef T mapped_type; + typedef map_node< gc, key_type, mapped_type > leaf_node; + typedef typename leaf_node::value_type value_type; + + typedef Traits original_traits; + + struct assignment_copy_policy { + void operator()( key_type& dest, key_type const& src ) + { + dest = src; + } + }; + typedef typename std::conditional< + std::is_same< typename original_traits::copy_policy, opt::none >::value, + assignment_copy_policy, + typename original_traits::copy_policy + >::type copy_policy; + + struct intrusive_key_extractor + { + void operator()( key_type& dest, leaf_node const& src ) const + { + copy_policy()( dest, src.m_Value.first ); + } + }; + + struct key_accessor + { + key_type const& operator()( leaf_node const& node ) const + { + return node.m_Value.first; + } + }; + + typedef typename cds::opt::details::make_comparator< key_type, original_traits, false >::type key_comparator; + + typedef cds::details::Allocator< leaf_node, typename original_traits::allocator> cxx_leaf_node_allocator; + struct leaf_deallocator + { + void operator()( leaf_node * p ) const + { + cxx_leaf_node_allocator().Delete( p ); + } + }; + + struct intrusive_traits: public original_traits + { + typedef cds::intrusive::ellen_bintree::base_hook< cds::opt::gc< gc > > hook; + typedef intrusive_key_extractor key_extractor; + typedef leaf_deallocator disposer; + typedef cds::details::compare_wrapper< leaf_node, key_comparator, key_accessor > compare; + }; + + // Metafunction result + typedef cds::intrusive::EllenBinTree< gc, key_type, leaf_node, intrusive_traits > type; + }; + + } // namespace details + //@endcond + } // namespace ellen_bintree + + // Forward declarations + //@cond + template < class GC, typename Key, typename T, class Traits = ellen_bintree::traits > + class EllenBinTreeSet; + + template < class GC, typename Key, typename T, class Traits = ellen_bintree::traits > + class EllenBinTreeMap; + //@endcond + +}} // namespace cds::container + +#endif // #ifndef CDSLIB_CONTAINER_DETAILS_ELLEN_BINTREE_BASE_H diff --git a/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/container/details/feldman_hashmap_base.h b/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/container/details/feldman_hashmap_base.h new file mode 100644 index 0000000..11abb02 --- /dev/null +++ b/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/container/details/feldman_hashmap_base.h @@ -0,0 +1,391 @@ +/* + This file is a part of libcds - Concurrent Data Structures library + + (C) Copyright Maxim Khizhinsky (libcds.dev@gmail.com) 2006-2017 + + Source code repo: http://github.com/khizmax/libcds/ + Download: http://sourceforge.net/projects/libcds/files/ + + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. + + * Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ + +#ifndef CDSLIB_CONTAINER_DETAILS_FELDMAN_HASHMAP_BASE_H +#define CDSLIB_CONTAINER_DETAILS_FELDMAN_HASHMAP_BASE_H + +#include +#include +#include + +namespace cds { namespace container { + /// \p FeldmanHashMap related definitions + /** @ingroup cds_nonintrusive_helper + */ + namespace feldman_hashmap { + /// \p FeldmanHashMap internal statistics, see cds::intrusive::feldman_hashset::stat + template + using stat = cds::intrusive::feldman_hashset::stat< EventCounter >; + + /// \p FeldmanHashMap empty internal statistics + typedef cds::intrusive::feldman_hashset::empty_stat empty_stat; + + /// Bit-wise memcmp-based comparator for hash value \p T + template + using bitwise_compare = cds::intrusive::feldman_hashset::bitwise_compare< T >; + + /// \p FeldmanHashMap level statistics + typedef cds::intrusive::feldman_hashset::level_statistics level_statistics; + + /// Key size option + /** + @copydetails cds::container::feldman_hashmap::traits::hash_size + */ + template + using hash_size = cds::intrusive::feldman_hashset::hash_size< Size >; + + /// Hash splitter option + /** + @copydetails cds::container::feldman_hashmap::traits::hash_splitter + */ + template + using hash_splitter = cds::intrusive::feldman_hashset::hash_splitter< Splitter >; + + + /// \p FeldmanHashMap traits + struct traits + { + /// Hash functor, default is \p opt::none + /** + \p FeldmanHashMap may use any hash functor converting a key to + fixed-sized bit-string, for example, SHA1, SHA2, + MurmurHash, + CityHash + or its successor FarmHash. + + If you use a fixed-sized key you can use it directly instead of a hash. + In such case \p %traits::hash should be specified as \p opt::none. + However, if you want to use the hash values or if your key type is not fixed-sized + you must specify a proper hash functor in your traits. + For example: + fixed-sized key - IP4 address map + @code + // Key - IP address + struct ip4_address { + uint8_t ip[4]; + }; + + // IP compare + struct ip4_cmp { + int operator()( ip4_address const& lhs, ip4_address const& rhs ) const + { + return memcmp( &lhs, &rhs, sizeof(lhs)); + } + }; + + // Value - statistics for the IP address + struct statistics { + // ... + }; + + // Traits + // Key type (ip4_addr) is fixed-sized so we may use the map without any hash functor + struct ip4_map_traits: public cds::container::multilevl_hashmap::traits + { + typedef ip4_cmp compare; + }; + + // IP4 address - statistics map + typedef cds::container::FeldmanHashMap< cds::gc::HP, ip4_address, statistics, ip4_map_traits > ip4_map; + @endcode + + variable-size key requires a hash functor: URL map + @code + // Value - statistics for the URL + struct statistics { + // ... + }; + + // Traits + // Key type (std::string) is variable-sized so we must provide a hash functor in our traits + // We do not specify any comparing predicate (less or compare) so std::less will be used by default + struct url_map_traits: public cds::container::multilevl_hashmap::traits + { + typedef std::hash hash; + }; + + // URL statistics map + typedef cds::container::FeldmanHashMap< cds::gc::HP, std::string, statistics, url_map_traits > url_map; + @endcode + */ + typedef opt::none hash; + + /// The size of hash value in bytes + /** + By default, the size of hash value is sizeof( hash_type ) + where \p hash_type is type of \p hash() result or sizeof( key ) if you use fixed-sized key. + + Sometimes that size is wrong, for example, for that 6-byte key: + \code + struct key_type { + uint32_t key; + uint16_t subkey; + }; + + static_assert( sizeof( key_type ) == 6, "Key type size mismatch" ); + \endcode + Here sizeof( key_type ) == 8 so \p static_assert will be thrown. + + For that case you can specify \p hash_size explicitly. + + Value \p 0 means auto-calculated sizeof( key_type ). + */ + static constexpr size_t const hash_size = 0; + + /// Hash splitter + /** + @copydetails cds::intrusive::feldman_hashset::traits::hash_splitter + */ + typedef cds::opt::none hash_splitter; + + /// Hash comparing functor + /** + @copydetails cds::intrusive::feldman_hashset::traits::compare + */ + typedef cds::opt::none compare; + + /// Specifies binary predicate used for hash compare. + /** + @copydetails cds::intrusive::feldman_hashset::traits::less + */ + typedef cds::opt::none less; + + /// Item counter + /** + @copydetails cds::intrusive::feldman_hashset::traits::item_counter + */ + typedef cds::atomicity::item_counter item_counter; + + /// Item allocator + /** + Default is \ref CDS_DEFAULT_ALLOCATOR + */ + typedef CDS_DEFAULT_ALLOCATOR allocator; + + /// Array node allocator + /** + @copydetails cds::intrusive::feldman_hashset::traits::node_allocator + */ + typedef CDS_DEFAULT_ALLOCATOR node_allocator; + + /// C++ memory ordering model + /** + @copydetails cds::intrusive::feldman_hashset::traits::memory_model + */ + typedef cds::opt::v::relaxed_ordering memory_model; + + /// Back-off strategy + typedef cds::backoff::Default back_off; + + /// Internal statistics + /** + @copydetails cds::intrusive::feldman_hashset::traits::stat + */ + typedef empty_stat stat; + + /// RCU deadlock checking policy (only for \ref cds_container_FeldmanHashMap_rcu "RCU-based FeldmanHashMap") + /** + @copydetails cds::intrusive::feldman_hashset::traits::rcu_check_deadlock + */ + typedef cds::opt::v::rcu_throw_deadlock rcu_check_deadlock; + }; + + /// Metafunction converting option list to \p feldman_hashmap::traits + /** + Supported \p Options are: + - \p opt::hash - a hash functor, default is \p std::hash + @copydetails traits::hash + - \p feldman_hashmap::hash_size - the size of hash value in bytes. + @copydetails traits::hash_size + - \p opt::allocator - item allocator + @copydetails traits::allocator + - \p opt::node_allocator - array node allocator. + @copydetails traits::node_allocator + - \p opt::compare - hash comparison functor. No default functor is provided. + If the option is not specified, the \p opt::less is used. + - \p opt::less - specifies binary predicate used for hash comparison. + @copydetails cds::container::feldman_hashmap::traits::less + - \p opt::back_off - back-off strategy used. If the option is not specified, the \p cds::backoff::Default is used. + - \p opt::item_counter - the type of item counting feature. + @copydetails cds::container::feldman_hashmap::traits::item_counter + - \p opt::memory_model - C++ memory ordering model. Can be \p opt::v::relaxed_ordering (relaxed memory model, the default) + or \p opt::v::sequential_consistent (sequentially consisnent memory model). + - \p opt::stat - internal statistics. By default, it is disabled (\p feldman_hashmap::empty_stat). + To enable it use \p feldman_hashmap::stat + - \p opt::rcu_check_deadlock - a deadlock checking policy for \ref cds_intrusive_FeldmanHashSet_rcu "RCU-based FeldmanHashSet" + Default is \p opt::v::rcu_throw_deadlock + */ + template + struct make_traits + { +# ifdef CDS_DOXYGEN_INVOKED + typedef implementation_defined type ; ///< Metafunction result +# else + typedef typename cds::opt::make_options< + typename cds::opt::find_type_traits< traits, Options... >::type + ,Options... + >::type type; +# endif + }; + } // namespace feldman_hashmap + + //@cond + // Forward declaration + template < class GC, typename Key, typename T, class Traits = feldman_hashmap::traits > + class FeldmanHashMap; + //@endcond + + //@cond + namespace details { + + template + struct hash_selector + { + typedef Key key_type; + typedef Value mapped_type; + typedef Hash hasher; + + typedef typename std::decay< + typename std::remove_reference< + decltype(hasher()(std::declval())) + >::type + >::type hash_type; + + struct node_type + { + std::pair< key_type const, mapped_type> m_Value; + hash_type const m_hash; + + node_type() = delete; + node_type(node_type const&) = delete; + + template + node_type(hasher& h, Q const& key) + : m_Value( std::move( std::make_pair( key_type( key ), mapped_type()))) + , m_hash( h( m_Value.first )) + {} + + template + node_type(hasher& h, Q const& key, U const& val) + : m_Value( std::move( std::make_pair( key_type( key ), mapped_type(val)))) + , m_hash( h( m_Value.first )) + {} + + template + node_type(hasher& h, Q&& key, Args&&... args) + : m_Value( std::move(std::make_pair( key_type( std::forward(key)), std::move( mapped_type(std::forward(args)...))))) + , m_hash( h( m_Value.first )) + {} + }; + + struct hash_accessor + { + hash_type const& operator()(node_type const& node) const + { + return node.m_hash; + } + }; + }; + + template + struct hash_selector + { + typedef Key key_type; + typedef Value mapped_type; + + struct hasher { + key_type const& operator()(key_type const& k) const + { + return k; + } + }; + typedef key_type hash_type; + + struct node_type + { + std::pair< key_type const, mapped_type> m_Value; + + node_type() = delete; + node_type(node_type const&) = delete; + + template + node_type( hasher /*h*/, Q&& key, Args&&... args ) + : m_Value( std::make_pair( key_type( std::forward( key )), mapped_type( std::forward(args)...))) + {} + }; + + struct hash_accessor + { + hash_type const& operator()(node_type const& node) const + { + return node.m_Value.first; + } + }; + }; + + template + struct make_feldman_hashmap + { + typedef GC gc; + typedef Key key_type; + typedef T mapped_type; + typedef Traits original_traits; + + + typedef hash_selector< key_type, mapped_type, typename original_traits::hash > select; + typedef typename select::hasher hasher; + typedef typename select::hash_type hash_type; + typedef typename select::node_type node_type; + + typedef cds::details::Allocator< node_type, typename original_traits::allocator > cxx_node_allocator; + + struct node_disposer + { + void operator()( node_type * p ) const + { + cxx_node_allocator().Delete( p ); + } + }; + + struct intrusive_traits: public original_traits + { + typedef typename select::hash_accessor hash_accessor; + typedef node_disposer disposer; + }; + + // Metafunction result + typedef cds::intrusive::FeldmanHashSet< GC, node_type, intrusive_traits > type; + }; + } // namespace details + //@endcond + +}} // namespace cds::container + +#endif // #ifndef CDSLIB_CONTAINER_DETAILS_FELDMAN_HASHMAP_BASE_H diff --git a/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/container/details/feldman_hashset_base.h b/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/container/details/feldman_hashset_base.h new file mode 100644 index 0000000..545f3f6 --- /dev/null +++ b/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/container/details/feldman_hashset_base.h @@ -0,0 +1,230 @@ +/* + This file is a part of libcds - Concurrent Data Structures library + + (C) Copyright Maxim Khizhinsky (libcds.dev@gmail.com) 2006-2017 + + Source code repo: http://github.com/khizmax/libcds/ + Download: http://sourceforge.net/projects/libcds/files/ + + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. + + * Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ + +#ifndef CDSLIB_CONTAINER_DETAILS_FELDMAN_HASHSET_BASE_H +#define CDSLIB_CONTAINER_DETAILS_FELDMAN_HASHSET_BASE_H + +#include +#include + +namespace cds { namespace container { + /// \p FeldmanHashSet related definitions + /** @ingroup cds_nonintrusive_helper + */ + namespace feldman_hashset { + /// Hash accessor option + /** + @copydetails cds::intrusive::feldman_hashset::traits::hash_accessor + */ + template + using hash_accessor = cds::intrusive::feldman_hashset::hash_accessor< Accessor >; + + /// Hash size option + /** + @copydetails cds::intrusive::feldman_hashset::traits::hash_size + */ + template + using hash_size = cds::intrusive::feldman_hashset::hash_size< Size >; + + /// Hash splitter + /** + @copydetails cds::intrusive::feldman_hashset::traits::hash_splitter + */ + template + using hash_splitter = cds::intrusive::feldman_hashset::hash_splitter< Splitter >; + + /// \p FeldmanHashSet internal statistics, see cds::intrusive::feldman_hashset::stat + template + using stat = cds::intrusive::feldman_hashset::stat< EventCounter >; + + /// \p FeldmanHashSet empty internal statistics + typedef cds::intrusive::feldman_hashset::empty_stat empty_stat; + + /// Bit-wise memcmp-based comparator for hash value \p T + template + using bitwise_compare = cds::intrusive::feldman_hashset::bitwise_compare< T >; + + /// \p FeldmanHashSet level statistics + typedef cds::intrusive::feldman_hashset::level_statistics level_statistics; + + /// \p FeldmanHashSet traits + struct traits + { + /// Mandatory functor to get hash value from data node + /** + @copydetails cds::intrusive::feldman_hashset::traits::hash_accessor + */ + typedef cds::opt::none hash_accessor; + + /// The size of hash value in bytes + /** + @copydetails cds::intrusive::feldman_hashset::traits::hash_size + */ + static constexpr size_t const hash_size = 0; + + /// Hash splitter + /** + @copydetails cds::intrusive::feldman_hashset::traits::hash_splitter + */ + typedef cds::opt::none hash_splitter; + + /// Hash comparing functor + /** + @copydetails cds::intrusive::feldman_hashset::traits::compare + */ + typedef cds::opt::none compare; + + /// Specifies binary predicate used for hash compare. + /** + @copydetails cds::intrusive::feldman_hashset::traits::less + */ + typedef cds::opt::none less; + + /// Item counter + /** + @copydetails cds::intrusive::feldman_hashset::traits::item_counter + */ + typedef cds::atomicity::item_counter item_counter; + + /// Item allocator + /** + Default is \ref CDS_DEFAULT_ALLOCATOR + */ + typedef CDS_DEFAULT_ALLOCATOR allocator; + + /// Array node allocator + /** + @copydetails cds::intrusive::feldman_hashset::traits::node_allocator + */ + typedef CDS_DEFAULT_ALLOCATOR node_allocator; + + /// C++ memory ordering model + /** + @copydetails cds::intrusive::feldman_hashset::traits::memory_model + */ + typedef cds::opt::v::relaxed_ordering memory_model; + + /// Back-off strategy + typedef cds::backoff::Default back_off; + + /// Internal statistics + /** + @copydetails cds::intrusive::feldman_hashset::traits::stat + */ + typedef empty_stat stat; + + /// RCU deadlock checking policy (only for \ref cds_container_FeldmanHashSet_rcu "RCU-based FeldmanHashSet") + /** + @copydetails cds::intrusive::feldman_hashset::traits::rcu_check_deadlock + */ + typedef cds::opt::v::rcu_throw_deadlock rcu_check_deadlock; + }; + + /// Metafunction converting option list to \p feldman_hashset::traits + /** + Supported \p Options are: + - \p feldman_hashset::hash_accessor - mandatory option, hash accessor functor. + @copydetails traits::hash_accessor + - \p feldman_hashset::hash_size - the size of hash value in bytes. + @copydetails traits::hash_size + - \p feldman_hashset::hash_splitter - a hash splitter algorithm + @copydetails traits::hash_splitter + - \p opt::allocator - item allocator + @copydetails traits::allocator + - \p opt::node_allocator - array node allocator. + @copydetails traits::node_allocator + - \p opt::compare - hash comparison functor. No default functor is provided. + If the option is not specified, the \p opt::less is used. + - \p opt::less - specifies binary predicate used for hash comparison. + @copydetails cds::container::feldman_hashset::traits::less + - \p opt::back_off - back-off strategy used. If the option is not specified, the \p cds::backoff::Default is used. + - \p opt::item_counter - the type of item counting feature. + @copydetails cds::intrusive::feldman_hashset::traits::item_counter + - \p opt::memory_model - C++ memory ordering model. Can be \p opt::v::relaxed_ordering (relaxed memory model, the default) + or \p opt::v::sequential_consistent (sequentially consisnent memory model). + - \p opt::stat - internal statistics. By default, it is disabled (\p feldman_hashset::empty_stat). + To enable it use \p feldman_hashset::stat + - \p opt::rcu_check_deadlock - a deadlock checking policy for \ref cds_intrusive_FeldmanHashSet_rcu "RCU-based FeldmanHashSet" + Default is \p opt::v::rcu_throw_deadlock + */ + template + struct make_traits + { +# ifdef CDS_DOXYGEN_INVOKED + typedef implementation_defined type ; ///< Metafunction result +# else + typedef typename cds::opt::make_options< + typename cds::opt::find_type_traits< traits, Options... >::type + ,Options... + >::type type; +# endif + }; + } // namespace feldman_hashset + + //@cond + // Forward declaration + template < class GC, typename T, class Traits = cds::container::feldman_hashset::traits > + class FeldmanHashSet; + //@endcond + + //@cond + namespace details { + + template + struct make_feldman_hashset + { + typedef GC gc; + typedef T value_type; + typedef Traits original_traits; + + typedef cds::details::Allocator< value_type, typename original_traits::allocator > cxx_node_allocator; + + struct node_disposer + { + void operator()( value_type * p ) const + { + cxx_node_allocator().Delete( p ); + } + }; + + struct intrusive_traits: public original_traits + { + typedef node_disposer disposer; + }; + + // Metafunction result + typedef cds::intrusive::FeldmanHashSet< GC, T, intrusive_traits > type; + }; + } // namespace details + //@endcond + +}} // namespace cds::container + +#endif // #ifndef CDSLIB_CONTAINER_DETAILS_FELDMAN_HASHSET_BASE_H diff --git a/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/container/details/guarded_ptr_cast.h b/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/container/details/guarded_ptr_cast.h new file mode 100644 index 0000000..c07676d --- /dev/null +++ b/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/container/details/guarded_ptr_cast.h @@ -0,0 +1,58 @@ +/* + This file is a part of libcds - Concurrent Data Structures library + + (C) Copyright Maxim Khizhinsky (libcds.dev@gmail.com) 2006-2017 + + Source code repo: http://github.com/khizmax/libcds/ + Download: http://sourceforge.net/projects/libcds/files/ + + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. + + * Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ + +#ifndef CDSLIB_CONTAINER_DETAILS_GUARDED_PTR_CAST_H +#define CDSLIB_CONTAINER_DETAILS_GUARDED_PTR_CAST_H +//@cond + +#include + +namespace cds { namespace container { namespace details { + + template + struct guarded_ptr_cast_set { + T * operator()(Node* pNode ) const noexcept + { + return &(pNode->m_Value); + } + }; + + template + struct guarded_ptr_cast_map { + T * operator()(Node* pNode ) const noexcept + { + return &(pNode->m_Data); + } + }; + +}}} // namespace cds::container::details + +//@endcond +#endif // #ifndef CDSLIB_CONTAINER_DETAILS_GUARDED_PTR_CAST_H diff --git a/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/container/details/iterable_list_base.h b/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/container/details/iterable_list_base.h new file mode 100644 index 0000000..6991801 --- /dev/null +++ b/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/container/details/iterable_list_base.h @@ -0,0 +1,152 @@ +/* + This file is a part of libcds - Concurrent Data Structures library + + (C) Copyright Maxim Khizhinsky (libcds.dev@gmail.com) 2006-2017 + + Source code repo: http://github.com/khizmax/libcds/ + Download: http://sourceforge.net/projects/libcds/files/ + + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. + + * Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ + +#ifndef CDSLIB_CONTAINER_DETAILS_ITERABLE_LIST_BASE_H +#define CDSLIB_CONTAINER_DETAILS_ITERABLE_LIST_BASE_H + +#include +#include +#include + +namespace cds { namespace container { + + /// \p IterableList ordered list related definitions + /** @ingroup cds_nonintrusive_helper + */ + namespace iterable_list { + + /// \p IterableList internal statistics, see \p cds::intrusive::iterable_list::stat + template ::event_counter > + using stat = cds::intrusive::iterable_list::stat< EventCounter >; + + /// \p IterableList empty internal statistics, see \p cds::intrusive::iterable_list::empty_stat + typedef cds::intrusive::iterable_list::empty_stat empty_stat; + + //@cond + template ::stat_type > + using wrapped_stat = cds::intrusive::iterable_list::wrapped_stat< Stat >; + //@endif + + /// \p IterableList traits + struct traits + { + /// Allocator used to allocate new data + typedef CDS_DEFAULT_ALLOCATOR allocator; + + /// Node allocator + typedef intrusive::iterable_list::traits::node_allocator node_allocator; + + /// Key comparison functor + /** + No default functor is provided. If the option is not specified, the \p less is used. + */ + typedef opt::none compare; + + /// Specifies binary predicate used for key comparison. + /** + Default is \p std::less. + */ + typedef opt::none less; + + /// Back-off strategy + typedef intrusive::iterable_list::traits::back_off back_off; + + /// Item counting feature; by default, disabled. Use \p cds::atomicity::item_counter or \p atomicity::cache_friendly_item_counter to enable item counting + typedef intrusive::iterable_list::traits::item_counter item_counter; + + /// Internal statistics + /** + By default, internal statistics is disabled (\p iterable_list::empty_stat). + Use \p iterable_list::stat to enable it. + */ + typedef intrusive::iterable_list::traits::stat stat; + + /// C++ memory ordering model + /** + Can be \p opt::v::relaxed_ordering (relaxed memory model, the default) + or \p opt::v::sequential_consistent (sequentially consisnent memory model). + */ + typedef opt::v::relaxed_ordering memory_model; + + /// RCU deadlock checking policy (only for \ref cds_intrusive_MichaelList_rcu "RCU-based MichaelList") + /** + List of available options see opt::rcu_check_deadlock + */ + typedef opt::v::rcu_throw_deadlock rcu_check_deadlock; + + //@cond + // IterableKVList: supporting for split-ordered list + // key accessor (opt::none = internal key type is equal to user key type) + typedef opt::none key_accessor; + //@endcond + }; + + /// Metafunction converting option list to \p iterable_list::traits + /** + Supported \p Options are: + - \p opt::compare - key comparison functor. No default functor is provided. + If the option is not specified, the \p opt::less is used. + - \p opt::less - specifies binary predicate used for key comparison. Default is \p std::less. + - \p opt::allocator - an allocator for data, default is \p CDS_DEFAULT_ALLOCATOR + - \p opt::node_allocator - node allocator, default is \p std::allocator. + - \p opt::back_off - back-off strategy used. If the option is not specified, the \p cds::backoff::Default is used. + - \p opt::item_counter - the type of item counting feature. Default is disabled (\p atomicity::empty_item_counter). + To enable item counting use \p atomicity::item_counter or \p atomicity::cache_friendly_item_counter. + - \p opt::stat - internal statistics. By default, it is disabled (\p iterable_list::empty_stat). + To enable it use \p iterable_list::stat + - \p opt::memory_model - C++ memory ordering model. Can be \p opt::v::relaxed_ordering (relaxed memory model, the default) + or \p opt::v::sequential_consistent (sequentially consistent memory model). + */ + template + struct make_traits { +# ifdef CDS_DOXYGEN_INVOKED + typedef implementation_defined type ; ///< Metafunction result +# else + typedef typename cds::opt::make_options< + typename cds::opt::find_type_traits< traits, Options... >::type + ,Options... + >::type type; +#endif + }; + + + } // namespace iterable_list + + // Forward declarations + template + class IterableList; + + template + class IterableKVList; + +}} // namespace cds::container + + +#endif // #ifndef CDSLIB_CONTAINER_DETAILS_ITERABLE_LIST_BASE_H diff --git a/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/container/details/lazy_list_base.h b/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/container/details/lazy_list_base.h new file mode 100644 index 0000000..95e6f2f --- /dev/null +++ b/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/container/details/lazy_list_base.h @@ -0,0 +1,189 @@ +/* + This file is a part of libcds - Concurrent Data Structures library + + (C) Copyright Maxim Khizhinsky (libcds.dev@gmail.com) 2006-2017 + + Source code repo: http://github.com/khizmax/libcds/ + Download: http://sourceforge.net/projects/libcds/files/ + + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. + + * Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ + +#ifndef CDSLIB_CONTAINER_DETAILS_LAZY_LIST_BASE_H +#define CDSLIB_CONTAINER_DETAILS_LAZY_LIST_BASE_H + +#include +#include +#include + +namespace cds { namespace container { + + /// \p LazyList ordered list related definitions + /** @ingroup cds_nonintrusive_helper + */ + namespace lazy_list { + + /// \p LazyList internal statistics, see \p cds::intrusive::lazy_list::stat + template ::event_counter> + using stat = cds::intrusive::lazy_list::stat< EventCounter >; + + /// \p LazyList empty internal statistics, see \p cds::intrusive::lazy_list::empty_stat + typedef cds::intrusive::lazy_list::empty_stat empty_stat; + + //@cond + template ::stat_type> + using wrapped_stat = cds::intrusive::lazy_list::wrapped_stat< Stat >; + //@endif + + /// LazyList traits + /** + Either \p compare or \p less or both must be specified. + */ + struct traits + { + /// allocator used to allocate new node + typedef CDS_DEFAULT_ALLOCATOR allocator; + + /// Key comparing functor + /** + No default functor is provided. If the option is not specified, the \p less is used. + */ + typedef opt::none compare; + + /// Specifies binary predicate used for key comparing + /** + Default is \p std::less. + */ + typedef opt::none less; + + /// Specifies binary functor used for comparing keys for equality + /** + No default functor is provided. If \p equal_to option is not spcified, \p compare is used, if \p compare is not + specified, \p less is used. + */ + typedef opt::none equal_to; + + /// Specifies list ordering policy. + /** + If \p sort is \p true, than list maintains items in sorted order, otherwise items are unordered. Default is \p true. + Note that if \p sort is \p false then lookup operations scan entire list. + */ + static const bool sort = true; + + /// Lock type used to lock modifying items + /** + Default is cds::sync::spin + */ + typedef cds::sync::spin lock_type; + + /// back-off strategy used + typedef cds::backoff::Default back_off; + + /// Item counting feature; by default, disabled. Use \p cds::atomicity::item_counter to enable item counting + typedef atomicity::empty_item_counter item_counter; + + /// Internal statistics + /** + By default, internal statistics is disabled (\p lazy_list::empty_stat). + Use \p lazy_list::stat to enable it. + */ + typedef empty_stat stat; + + /// C++ memory ordering model + /** + Can be \p opt::v::relaxed_ordering (relaxed memory model, the default) + or \p opt::v::sequential_consistent (sequentially consistent memory model). + */ + typedef opt::v::relaxed_ordering memory_model; + + /// RCU deadlock checking policy (only for \ref cds_intrusive_LazyList_rcu "RCU-based LazyList") + /** + List of available options see \p opt::rcu_check_deadlock + */ + typedef opt::v::rcu_throw_deadlock rcu_check_deadlock; + + //@cond + // LazyKVList: supporting for split-ordered list + // key accessor (opt::none = internal key type is equal to user key type) + typedef opt::none key_accessor; + + //@endcond + }; + + /// Metafunction converting option list to \p lazy_list::traits + /** + \p Options are: + - \p opt::lock_type - lock type for node-level locking. Default \p is cds::sync::spin. Note that each node + of the list has member of type \p lock_type, therefore, heavy-weighted locking primitive is not + acceptable as candidate for \p lock_type. + - \p opt::compare - key compare functor. No default functor is provided. + If the option is not specified, the \p opt::less is used. + - \p opt::less - specifies binary predicate used for key compare. Default is \p std::less. + - \p opt::equal_to - specifies binary functor for comparing keys for equality. This option is applicable only for unordered list. + No default is provided. If \p equal_to is not specified, \p compare is used, if \p compare is not specified, \p less is used. + - \p opt::sort - specifies ordering policy. Default value is \p true, i.e. the list is ordered. + Note: unordering feature is not fully supported yet. + - \p opt::back_off - back-off strategy used. If the option is not specified, \p cds::backoff::Default is used. + - \p opt::item_counter - the type of item counting feature. Default is disabled (\p atomicity::empty_item_counter). + To enable item counting use \p atomicity::item_counter or \p atomicity::cache_friendly_item_counter + - \p opt::stat - internal statistics. By default, it is disabled (\p lazy_list::empty_stat). + To enable it use \p lazy_list::stat + - \p opt::allocator - the allocator used for creating and freeing list's item. Default is \ref CDS_DEFAULT_ALLOCATOR macro. + - \p opt::memory_model - C++ memory ordering model. Can be \p opt::v::relaxed_ordering (relaxed memory model, the default) + or \p opt::v::sequential_consistent (sequentially consistent memory model). + */ + template + struct make_traits { +# ifdef CDS_DOXYGEN_INVOKED + typedef implementation_defined type ; ///< Metafunction result +# else + typedef typename cds::opt::make_options< + typename cds::opt::find_type_traits< traits, Options... >::type + ,Options... + >::type type; +#endif + }; + + + } // namespace lazy_list + + // Forward declarations + template + class LazyList; + + template + class LazyKVList; + + // Tag for selecting lazy list implementation + /** + This empty struct is used only as a tag for selecting \p LazyList + as ordered list implementation in declaration of some classes. + + See \p split_list::traits::ordered_list as an example. + */ + struct lazy_list_tag + {}; + +}} // namespace cds::container + + +#endif // #ifndef CDSLIB_CONTAINER_DETAILS_LAZY_LIST_BASE_H diff --git a/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/container/details/make_iterable_kvlist.h b/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/container/details/make_iterable_kvlist.h new file mode 100644 index 0000000..0c76e95 --- /dev/null +++ b/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/container/details/make_iterable_kvlist.h @@ -0,0 +1,103 @@ +/* + This file is a part of libcds - Concurrent Data Structures library + + (C) Copyright Maxim Khizhinsky (libcds.dev@gmail.com) 2006-2017 + + Source code repo: http://github.com/khizmax/libcds/ + Download: http://sourceforge.net/projects/libcds/files/ + + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. + + * Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ + +#ifndef CDSLIB_CONTAINER_DETAILS_MAKE_ITERABLE_KVLIST_H +#define CDSLIB_CONTAINER_DETAILS_MAKE_ITERABLE_KVLIST_H + +#include +#include + +namespace cds { namespace container { + + //@cond + namespace details { + + template + struct make_iterable_kvlist + { + typedef Traits original_type_traits; + + typedef GC gc; + typedef K key_type; + typedef T mapped_type; + typedef std::pair value_type; + + typedef typename original_type_traits::allocator::template rebind::other data_allocator_type; + typedef cds::details::Allocator< value_type, data_allocator_type > cxx_data_allocator; + + typedef typename original_type_traits::memory_model memory_model; + + struct data_disposer + { + void operator ()( value_type * pData ) + { + cxx_data_allocator().Delete( pData ); + } + }; + + struct key_field_accessor { + key_type const& operator()( value_type const& data ) + { + return data.first; + } + }; + + template + struct less_wrapper + { + template + bool operator()( value_type const& lhs, Q const& rhs ) const + { + return Less()( lhs.first, rhs ); + } + + template + bool operator()( Q const& lhs, value_type const& rhs ) const + { + return Less()( lhs, rhs.first ); + } + }; + + typedef typename opt::details::make_comparator< key_type, original_type_traits >::type key_comparator; + + struct base_traits: public original_type_traits + { + typedef data_disposer disposer; + typedef cds::details::compare_wrapper< value_type, key_comparator, key_field_accessor > compare; + }; + + typedef container::IterableList type; + }; + } // namespace details + //@endcond + +}} // namespace cds::container + +#endif // #ifndef CDSLIB_CONTAINER_DETAILS_MAKE_ITERABLE_KVLIST_H diff --git a/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/container/details/make_iterable_list.h b/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/container/details/make_iterable_list.h new file mode 100644 index 0000000..3cd54fd --- /dev/null +++ b/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/container/details/make_iterable_list.h @@ -0,0 +1,82 @@ +/* + This file is a part of libcds - Concurrent Data Structures library + + (C) Copyright Maxim Khizhinsky (libcds.dev@gmail.com) 2006-2017 + + Source code repo: http://github.com/khizmax/libcds/ + Download: http://sourceforge.net/projects/libcds/files/ + + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. + + * Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ + +#ifndef CDSLIB_CONTAINER_DETAILS_MAKE_ITERABLE_LIST_H +#define CDSLIB_CONTAINER_DETAILS_MAKE_ITERABLE_LIST_H + +#include +#include + +namespace cds { namespace container { + + //@cond + namespace details { + + template + struct make_iterable_list + { + typedef GC gc; + typedef T value_type; + + typedef Traits original_traits; + + typedef typename original_traits::allocator::template rebind::other data_allocator_type; + typedef cds::details::Allocator< value_type, data_allocator_type > cxx_data_allocator; + + typedef typename original_traits::memory_model memory_model; + + struct data_disposer + { + void operator ()( value_type* data ) + { + cxx_data_allocator().Delete( data ); + } + }; + + template + struct less_wrapper { + typedef cds::opt::details::make_comparator_from_less type; + }; + + struct intrusive_traits: public original_traits + { + typedef data_disposer disposer; + }; + + typedef intrusive::IterableList type; + + typedef typename type::key_comparator key_comparator; + }; + } // namespace details + //@endcond + +}} // namespace cds::container + +#endif // #ifndef CDSLIB_CONTAINER_DETAILS_MAKE_ITERABLE_LIST_H diff --git a/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/container/details/make_lazy_kvlist.h b/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/container/details/make_lazy_kvlist.h new file mode 100644 index 0000000..788c18b --- /dev/null +++ b/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/container/details/make_lazy_kvlist.h @@ -0,0 +1,160 @@ +/* + This file is a part of libcds - Concurrent Data Structures library + + (C) Copyright Maxim Khizhinsky (libcds.dev@gmail.com) 2006-2017 + + Source code repo: http://github.com/khizmax/libcds/ + Download: http://sourceforge.net/projects/libcds/files/ + + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. + + * Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ + +#ifndef CDSLIB_CONTAINER_DETAILS_MAKE_LAZY_KVLIST_H +#define CDSLIB_CONTAINER_DETAILS_MAKE_LAZY_KVLIST_H + +#include + +namespace cds { namespace container { + + //@cond + namespace details { + + template + struct make_lazy_kvlist + { + typedef Traits original_type_traits; + + typedef GC gc; + typedef K key_type; + typedef T mapped_type; + typedef std::pair value_type; + + struct node_type: public intrusive::lazy_list::node + { + value_type m_Data; + + node_type( key_type const& key ) + : m_Data( key, mapped_type()) + {} + + template + node_type( Q const& key ) + : m_Data( key_type( key ), mapped_type()) + {} + + template + explicit node_type( std::pair const& pair ) + : m_Data( pair ) + {} + + node_type( key_type const& key, mapped_type const& value ) + : m_Data( key, value ) + {} + + template + node_type( key_type const& key, R const& value ) + : m_Data( key, mapped_type( value )) + {} + + template + node_type( Q const& key, mapped_type const& value ) + : m_Data( key_type( key ), value ) + {} + + template + node_type( Q const& key, R const& value ) + : m_Data( key_type( key ), mapped_type( value )) + {} + + template + node_type( Ky&& key, Args&&... args ) + : m_Data( key_type( std::forward( key )), std::move( mapped_type( std::forward( args )... ))) + {} + }; + + typedef typename original_type_traits::allocator::template rebind::other allocator_type; + typedef cds::details::Allocator< node_type, allocator_type > cxx_allocator; + + struct node_deallocator + { + void operator ()( node_type * pNode ) + { + cxx_allocator().Delete( pNode ); + } + }; + + struct key_field_accessor { + key_type const& operator()( node_type const& pair ) + { + return pair.m_Data.first; + } + }; + + typedef typename std::conditional< original_type_traits::sort, + typename opt::details::make_comparator< value_type, original_type_traits >::type, + typename opt::details::make_equal_to< value_type, original_type_traits >::type + >::type key_comparator; + + + template + struct less_wrapper { + typedef cds::details::compare_wrapper< node_type, cds::opt::details::make_comparator_from_less, key_field_accessor > type; + }; + + template + struct equal_to_wrapper { + typedef cds::details::predicate_wrapper< node_type, Equal, key_field_accessor > type; + }; + + struct intrusive_traits: public original_type_traits + { + typedef intrusive::lazy_list::base_hook< opt::gc, opt::lock_type< typename original_type_traits::lock_type >> hook; + typedef node_deallocator disposer; + + typedef typename std::conditional< std::is_same< typename original_type_traits::equal_to, cds::opt::none >::value, + cds::opt::none, + typename equal_to_wrapper< typename original_type_traits::equal_to >::type + >::type equal_to; + + typedef typename std::conditional< + original_type_traits::sort + || !std::is_same< typename original_type_traits::compare, cds::opt::none >::value + || !std::is_same< typename original_type_traits::less, cds::opt::none >::value, + cds::details::compare_wrapper< + node_type, + typename opt::details::make_comparator< value_type, original_type_traits >::type, + key_field_accessor + >, + cds::opt::none + >::type compare; + + static const opt::link_check_type link_checker = cds::intrusive::lazy_list::traits::link_checker; + }; + + typedef intrusive::LazyList type; + }; + } // namespace details + //@endcond + +}} // namespace cds::container + +#endif // #ifndef CDSLIB_CONTAINER_DETAILS_MAKE_LAZY_KVLIST_H diff --git a/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/container/details/make_lazy_list.h b/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/container/details/make_lazy_list.h new file mode 100644 index 0000000..375d7b8 --- /dev/null +++ b/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/container/details/make_lazy_list.h @@ -0,0 +1,129 @@ +/* + This file is a part of libcds - Concurrent Data Structures library + + (C) Copyright Maxim Khizhinsky (libcds.dev@gmail.com) 2006-2017 + + Source code repo: http://github.com/khizmax/libcds/ + Download: http://sourceforge.net/projects/libcds/files/ + + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. + + * Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ + +#ifndef CDSLIB_CONTAINER_DETAILS_MAKE_LAZY_LIST_H +#define CDSLIB_CONTAINER_DETAILS_MAKE_LAZY_LIST_H + +#include + +namespace cds { namespace container { + + //@cond + namespace details { + + template + struct make_lazy_list + { + typedef GC gc; + typedef T value_type; + typedef Traits original_type_traits; + + struct node_type : public intrusive::lazy_list::node + { + value_type m_Value; + + node_type() + {} + + template + node_type( Q const& v ) + : m_Value(v) + {} + template + node_type( Args&&... args ) + : m_Value( std::forward(args)...) + {} + }; + + typedef typename original_type_traits::allocator::template rebind::other allocator_type; + typedef cds::details::Allocator< node_type, allocator_type > cxx_allocator; + + struct node_deallocator + { + void operator ()( node_type * pNode ) + { + cxx_allocator().Delete( pNode ); + } + }; + + typedef typename std::conditional< original_type_traits::sort, + typename opt::details::make_comparator< value_type, original_type_traits >::type, + typename opt::details::make_equal_to< value_type, original_type_traits >::type + >::type key_comparator; + + struct value_accessor { + value_type const & operator()( node_type const & node ) const + { + return node.m_Value; + } + }; + + template + struct less_wrapper { + typedef cds::details::compare_wrapper< node_type, cds::opt::details::make_comparator_from_less, value_accessor > type; + }; + + template + struct equal_to_wrapper { + typedef cds::details::predicate_wrapper< node_type, Equal, value_accessor > type; + }; + + struct intrusive_traits: public original_type_traits + { + typedef intrusive::lazy_list::base_hook< opt::gc, cds::opt::lock_type< typename original_type_traits::lock_type >> hook; + typedef node_deallocator disposer; + static constexpr const opt::link_check_type link_checker = cds::intrusive::lazy_list::traits::link_checker; + + typedef typename std::conditional< std::is_same< typename original_type_traits::equal_to, cds::opt::none >::value, + cds::opt::none, + typename equal_to_wrapper< typename original_type_traits::equal_to >::type + >::type equal_to; + + typedef typename std::conditional< + original_type_traits::sort + || !std::is_same::value + || !std::is_same::value, + cds::details::compare_wrapper< + node_type, + typename opt::details::make_comparator< value_type, original_type_traits >::type, + value_accessor + >, + cds::opt::none + >::type compare; + }; + + typedef intrusive::LazyList type; + }; + } // namespace details + //@endcond + +}} // namespace cds::container + +#endif // #ifndef CDSLIB_CONTAINER_DETAILS_MAKE_MICHAEL_LIST_H diff --git a/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/container/details/make_michael_kvlist.h b/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/container/details/make_michael_kvlist.h new file mode 100644 index 0000000..cba16f1 --- /dev/null +++ b/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/container/details/make_michael_kvlist.h @@ -0,0 +1,134 @@ +/* + This file is a part of libcds - Concurrent Data Structures library + + (C) Copyright Maxim Khizhinsky (libcds.dev@gmail.com) 2006-2017 + + Source code repo: http://github.com/khizmax/libcds/ + Download: http://sourceforge.net/projects/libcds/files/ + + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. + + * Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ + +#ifndef CDSLIB_CONTAINER_DETAILS_MAKE_MICHAEL_KVLIST_H +#define CDSLIB_CONTAINER_DETAILS_MAKE_MICHAEL_KVLIST_H + +#include + +namespace cds { namespace container { + + //@cond + namespace details { + + template + struct make_michael_kvlist + { + typedef Traits original_type_traits; + + typedef GC gc; + typedef K key_type; + typedef T value_type; + typedef std::pair pair_type; + + struct node_type: public intrusive::michael_list::node + { + pair_type m_Data; + + node_type( key_type const& key ) + : m_Data( key, value_type()) + {} + + template + node_type( Q const& key ) + : m_Data( key_type(key), value_type()) + {} + + template + explicit node_type( std::pair const& pair ) + : m_Data( pair ) + {} + + node_type( key_type const& key, value_type const& value ) + : m_Data( key, value ) + {} + + template + node_type( key_type const& key, R const& value ) + : m_Data( key, value_type( value )) + {} + + template + node_type( Q const& key, value_type const& value ) + : m_Data( key_type( key ), value ) + {} + + template + node_type( Q const& key, R const& value ) + : m_Data( key_type( key ), value_type( value )) + {} + + template< typename Ky, typename... Args> + node_type( Ky&& key, Args&&... args ) + : m_Data( key_type( std::forward(key)), std::move( value_type( std::forward(args)...))) + {} + }; + + typedef typename original_type_traits::allocator::template rebind::other allocator_type; + typedef cds::details::Allocator< node_type, allocator_type > cxx_allocator; + + struct node_deallocator + { + void operator ()( node_type * pNode ) + { + cxx_allocator().Delete( pNode ); + } + }; + + struct key_field_accessor { + key_type const& operator()( node_type const& pair ) + { + return pair.m_Data.first; + } + }; + + typedef typename opt::details::make_comparator< key_type, original_type_traits >::type key_comparator; + + template + struct less_wrapper { + typedef cds::details::compare_wrapper< node_type, cds::opt::details::make_comparator_from_less, key_field_accessor > type; + }; + + struct intrusive_traits: public original_type_traits + { + typedef intrusive::michael_list::base_hook< opt::gc > hook; + typedef node_deallocator disposer; + typedef cds::details::compare_wrapper< node_type, key_comparator, key_field_accessor > compare; + static const opt::link_check_type link_checker = intrusive::michael_list::traits::link_checker; + }; + + typedef intrusive::MichaelList type; + }; + } // namespace details + //@endcond + +}} // namespace cds::container + +#endif // #ifndef CDSLIB_CONTAINER_DETAILS_MAKE_MICHAEL_KVLIST_H diff --git a/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/container/details/make_michael_list.h b/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/container/details/make_michael_list.h new file mode 100644 index 0000000..dd14053 --- /dev/null +++ b/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/container/details/make_michael_list.h @@ -0,0 +1,108 @@ +/* + This file is a part of libcds - Concurrent Data Structures library + + (C) Copyright Maxim Khizhinsky (libcds.dev@gmail.com) 2006-2017 + + Source code repo: http://github.com/khizmax/libcds/ + Download: http://sourceforge.net/projects/libcds/files/ + + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. + + * Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ + +#ifndef CDSLIB_CONTAINER_DETAILS_MAKE_MICHAEL_LIST_H +#define CDSLIB_CONTAINER_DETAILS_MAKE_MICHAEL_LIST_H + +#include + +namespace cds { namespace container { + + //@cond + namespace details { + + template + struct make_michael_list + { + typedef GC gc; + typedef T value_type; + + struct node_type : public intrusive::michael_list::node + { + value_type m_Value; + + node_type() + {} + + template + node_type( Q const& v ) + : m_Value(v) + {} + + template + node_type( Args&&... args ) + : m_Value( std::forward(args)... ) + {} + }; + + typedef Traits original_traits; + + typedef typename original_traits::allocator::template rebind::other allocator_type; + typedef cds::details::Allocator< node_type, allocator_type > cxx_allocator; + + struct node_deallocator + { + void operator ()( node_type * pNode ) + { + cxx_allocator().Delete( pNode ); + } + }; + + typedef typename opt::details::make_comparator< value_type, original_traits >::type key_comparator; + + struct value_accessor + { + value_type const & operator()( node_type const& node ) const + { + return node.m_Value; + } + }; + + template + struct less_wrapper { + typedef cds::details::compare_wrapper< node_type, cds::opt::details::make_comparator_from_less, value_accessor > type; + }; + + struct intrusive_traits: public original_traits + { + typedef intrusive::michael_list::base_hook< opt::gc > hook; + typedef node_deallocator disposer; + typedef cds::details::compare_wrapper< node_type, key_comparator, value_accessor > compare; + static constexpr const opt::link_check_type link_checker = cds::intrusive::michael_list::traits::link_checker; + }; + + typedef intrusive::MichaelList type; + }; + } // namespace details + //@endcond + +}} // namespace cds::container + +#endif // #ifndef CDSLIB_CONTAINER_DETAILS_MAKE_MICHAEL_LIST_H diff --git a/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/container/details/make_skip_list_map.h b/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/container/details/make_skip_list_map.h new file mode 100644 index 0000000..8cb6fab --- /dev/null +++ b/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/container/details/make_skip_list_map.h @@ -0,0 +1,149 @@ +/* + This file is a part of libcds - Concurrent Data Structures library + + (C) Copyright Maxim Khizhinsky (libcds.dev@gmail.com) 2006-2017 + + Source code repo: http://github.com/khizmax/libcds/ + Download: http://sourceforge.net/projects/libcds/files/ + + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. + + * Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ + +#ifndef CDSLIB_CONTAINER_DETAILS_MAKE_SKIP_LIST_MAP_H +#define CDSLIB_CONTAINER_DETAILS_MAKE_SKIP_LIST_MAP_H + +#include +#include + +//@cond +namespace cds { namespace container { namespace details { + + template + struct make_skip_list_map + { + typedef GC gc; + typedef K key_type; + typedef T mapped_type; + typedef std::pair< key_type const, mapped_type> value_type; + typedef Traits traits; + + typedef cds::intrusive::skip_list::node< gc > intrusive_node_type; + struct node_type: public intrusive_node_type + { + typedef intrusive_node_type base_class; + typedef typename base_class::atomic_marked_ptr atomic_marked_ptr; + typedef value_type stored_value_type; + + value_type m_Value; + //atomic_marked_ptr m_arrTower[] ; // allocated together with node_type in single memory block + + template + node_type( unsigned int nHeight, atomic_marked_ptr * pTower, Q&& key ) + : m_Value( std::make_pair( std::forward( key ), mapped_type())) + { + init_tower( nHeight, pTower ); + } + + template + node_type( unsigned int nHeight, atomic_marked_ptr * pTower, Q&& key, Args&&... args ) + : m_Value( std::forward(key), mapped_type( std::forward(args)... )) + { + init_tower( nHeight, pTower ); + } + + node_type() = delete; + + private: + void init_tower( unsigned int nHeight, atomic_marked_ptr * pTower ) + { + if ( nHeight > 1 ) { + new (pTower) atomic_marked_ptr[ nHeight - 1 ]; + base_class::make_tower( nHeight, pTower ); + } + } + }; + + class node_allocator : public skip_list::details::node_allocator< node_type, traits> + { + typedef skip_list::details::node_allocator< node_type, traits> base_class; + public: + template + node_type * New( unsigned int nHeight, Q const& key ) + { + return base_class::New( nHeight, key_type( key )); + } + template + node_type * New( unsigned int nHeight, Q const& key, U const& val ) + { + unsigned char * pMem = base_class::alloc_space( nHeight ); + return new( pMem ) + node_type( nHeight, + nHeight > 1 ? reinterpret_cast( pMem + base_class::c_nNodeSize ) : nullptr, + key_type( key ), mapped_type( val ) + ); + } + template + node_type * New( unsigned int nHeight, Args&&... args ) + { + unsigned char * pMem = base_class::alloc_space( nHeight ); + return new( pMem ) + node_type( nHeight, + nHeight > 1 ? reinterpret_cast( pMem + base_class::c_nNodeSize ) : nullptr, + std::forward(args)... + ); + } + }; + + struct node_deallocator { + void operator ()( node_type * pNode ) + { + node_allocator().Delete( pNode ); + } + }; + + typedef skip_list::details::dummy_node_builder dummy_node_builder; + + struct key_accessor + { + key_type const & operator()( node_type const& node ) const + { + return node.m_Value.first; + } + }; + typedef typename opt::details::make_comparator< key_type, traits >::type key_comparator; + + class intrusive_type_traits: public cds::intrusive::skip_list::make_traits< + cds::opt::type_traits< traits > + ,cds::intrusive::opt::hook< intrusive::skip_list::base_hook< cds::opt::gc< gc > > > + ,cds::intrusive::opt::disposer< node_deallocator > + ,cds::intrusive::skip_list::internal_node_builder< dummy_node_builder > + ,cds::opt::compare< cds::details::compare_wrapper< node_type, key_comparator, key_accessor > > + >::type + {}; + + typedef cds::intrusive::SkipListSet< gc, node_type, intrusive_type_traits> type; + }; + +}}} // namespace cds::container::details +//@endcond + +#endif // CDSLIB_CONTAINER_DETAILS_MAKE_SKIP_LIST_MAP_H diff --git a/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/container/details/make_skip_list_set.h b/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/container/details/make_skip_list_set.h new file mode 100644 index 0000000..c88dd81 --- /dev/null +++ b/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/container/details/make_skip_list_set.h @@ -0,0 +1,122 @@ +/* + This file is a part of libcds - Concurrent Data Structures library + + (C) Copyright Maxim Khizhinsky (libcds.dev@gmail.com) 2006-2017 + + Source code repo: http://github.com/khizmax/libcds/ + Download: http://sourceforge.net/projects/libcds/files/ + + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. + + * Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ + +#ifndef CDSLIB_CONTAINER_DETAILS_MAKE_SKIP_LIST_SET_H +#define CDSLIB_CONTAINER_DETAILS_MAKE_SKIP_LIST_SET_H + +#include +#include + +//@cond +namespace cds { namespace container { namespace details { + + template + struct make_skip_list_set + { + typedef GC gc; + typedef T value_type; + typedef Traits traits; + + typedef cds::intrusive::skip_list::node< gc > intrusive_node_type; + struct node_type: public intrusive_node_type + { + typedef intrusive_node_type base_class; + typedef typename base_class::atomic_marked_ptr atomic_marked_ptr; + typedef value_type stored_value_type; + + value_type m_Value; + //atomic_marked_ptr m_arrTower[] ; // allocated together with node_type in single memory block + + template + node_type( unsigned int nHeight, atomic_marked_ptr * pTower, Q&& v ) + : m_Value( std::forward( v )) + { + init_tower( nHeight, pTower ); + } + + template + node_type( unsigned int nHeight, atomic_marked_ptr * pTower, Q&& q, Args&&... args ) + : m_Value( std::forward(q), std::forward(args)... ) + { + init_tower( nHeight, pTower ); + } + + node_type() = delete; + + private: + void init_tower( unsigned nHeight, atomic_marked_ptr* pTower ) + { + if ( nHeight > 1 ) { + new ( pTower ) atomic_marked_ptr[nHeight - 1]; + base_class::make_tower( nHeight, pTower ); + } + } + }; + + typedef skip_list::details::node_allocator< node_type, traits> node_allocator; + + struct node_deallocator { + void operator ()( node_type * pNode ) + { + node_allocator().Delete( pNode ); + } + }; + + typedef skip_list::details::dummy_node_builder dummy_node_builder; + + struct value_accessor + { + value_type const& operator()( node_type const& node ) const + { + return node.m_Value; + } + }; + typedef typename opt::details::make_comparator< value_type, traits >::type key_comparator; + + template + struct less_wrapper { + typedef cds::details::compare_wrapper< node_type, cds::opt::details::make_comparator_from_less, value_accessor > type; + }; + + class intrusive_traits: public cds::intrusive::skip_list::make_traits< + cds::opt::type_traits< traits > + ,cds::intrusive::opt::hook< intrusive::skip_list::base_hook< cds::opt::gc< gc > > > + ,cds::intrusive::opt::disposer< node_deallocator > + ,cds::intrusive::skip_list::internal_node_builder< dummy_node_builder > + ,cds::opt::compare< cds::details::compare_wrapper< node_type, key_comparator, value_accessor > > + >::type + {}; + + typedef cds::intrusive::SkipListSet< gc, node_type, intrusive_traits> type; + }; +}}} // namespace cds::container::details +//@endcond + +#endif //#ifndef CDSLIB_CONTAINER_DETAILS_MAKE_SKIP_LIST_SET_H diff --git a/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/container/details/make_split_list_set.h b/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/container/details/make_split_list_set.h new file mode 100644 index 0000000..c2e03e4 --- /dev/null +++ b/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/container/details/make_split_list_set.h @@ -0,0 +1,61 @@ +/* + This file is a part of libcds - Concurrent Data Structures library + + (C) Copyright Maxim Khizhinsky (libcds.dev@gmail.com) 2006-2017 + + Source code repo: http://github.com/khizmax/libcds/ + Download: http://sourceforge.net/projects/libcds/files/ + + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. + + * Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ + +#ifndef CDSLIB_CONTAINER_DETAILS_MAKE_SPLIT_LIST_SET_H +#define CDSLIB_CONTAINER_DETAILS_MAKE_SPLIT_LIST_SET_H + +#include +#include +#include + +//@cond +namespace cds { namespace container { + + // Forward declaration + struct michael_list_tag; + struct lazy_list_tag; + +}} // namespace cds::container +//@endcond + + +#ifdef CDSLIB_CONTAINER_DETAILS_MICHAEL_LIST_BASE_H +# include +#endif + +#ifdef CDSLIB_CONTAINER_DETAILS_LAZY_LIST_BASE_H +# include +#endif + +#ifdef CDSLIB_CONTAINER_DETAILS_ITERABLE_LIST_BASE_H +# include +#endif + +#endif // #ifndef CDSLIB_CONTAINER_DETAILS_MAKE_SPLIT_LIST_SET_H diff --git a/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/container/details/make_split_list_set_iterable_list.h b/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/container/details/make_split_list_set_iterable_list.h new file mode 100644 index 0000000..4a505bb --- /dev/null +++ b/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/container/details/make_split_list_set_iterable_list.h @@ -0,0 +1,138 @@ +/* + This file is a part of libcds - Concurrent Data Structures library + + (C) Copyright Maxim Khizhinsky (libcds.dev@gmail.com) 2006-2017 + + Source code repo: http://github.com/khizmax/libcds/ + Download: http://sourceforge.net/projects/libcds/files/ + + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. + + * Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ + +#ifndef CDSLIB_CONTAINER_DETAILS_MAKE_SPLIT_LIST_SET_ITERABLE_LIST_H +#define CDSLIB_CONTAINER_DETAILS_MAKE_SPLIT_LIST_SET_ITERABLE_LIST_H + +//@cond +namespace cds { namespace container { namespace details { + + template + struct make_split_list_set< GC, T, iterable_list_tag, Traits > + { + typedef GC gc; + typedef T value_type; + typedef Traits original_traits; + + typedef typename cds::opt::select_default< + typename original_traits::ordered_list_traits, + cds::container::iterable_list::traits + >::type original_ordered_list_traits; + + struct node_type: public cds::intrusive::split_list::node< void > + { + value_type m_Value; + + template + explicit node_type( Q&& v ) + : m_Value( std::forward( v )) + {} + + template + explicit node_type( Q&& q, Args&&... args ) + : m_Value( std::forward(q), std::forward(args)... ) + {} + + node_type() = delete; + }; + + typedef typename cds::opt::select_default< + typename original_traits::ordered_list_traits, + typename original_traits::allocator, + typename cds::opt::select_default< + typename original_traits::ordered_list_traits::allocator, + typename original_traits::allocator + >::type + >::type node_allocator_; + + typedef typename node_allocator_::template rebind::other node_allocator_type; + + typedef cds::details::Allocator< node_type, node_allocator_type > cxx_node_allocator; + struct node_deallocator + { + void operator ()( node_type * pNode ) + { + cxx_node_allocator().Delete( pNode ); + } + }; + + typedef typename opt::details::make_comparator< value_type, original_ordered_list_traits >::type key_comparator; + + typedef typename original_traits::key_accessor key_accessor; + + struct value_accessor + { + typename key_accessor::key_type const& operator()( node_type const& node ) const + { + return key_accessor()(node.m_Value); + } + }; + + template + struct predicate_wrapper { + typedef cds::details::predicate_wrapper< node_type, Predicate, value_accessor > type; + }; + + struct ordered_list_traits: public original_ordered_list_traits + { + typedef cds::atomicity::empty_item_counter item_counter; + typedef node_deallocator disposer; + typedef cds::details::compare_wrapper< node_type, key_comparator, value_accessor > compare; + }; + + struct traits: public original_traits + { + struct hash: public original_traits::hash + { + typedef typename original_traits::hash base_class; + + size_t operator()(node_type const& v ) const + { + return base_class::operator()( key_accessor()( v.m_Value )); + } + + template + size_t operator()( Q const& k ) const + { + return base_class::operator()( k ); + } + }; + }; + + class ordered_list: public cds::intrusive::IterableList< gc, node_type, ordered_list_traits > + {}; + + typedef cds::intrusive::SplitListSet< gc, ordered_list, traits > type; + }; + +}}} // namespace cds::container::details +//@endcond + +#endif // #ifndef CDSLIB_CONTAINER_DETAILS_MAKE_SPLIT_LIST_SET_ITERABLE_LIST_H diff --git a/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/container/details/make_split_list_set_lazy_list.h b/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/container/details/make_split_list_set_lazy_list.h new file mode 100644 index 0000000..d633f10 --- /dev/null +++ b/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/container/details/make_split_list_set_lazy_list.h @@ -0,0 +1,147 @@ +/* + This file is a part of libcds - Concurrent Data Structures library + + (C) Copyright Maxim Khizhinsky (libcds.dev@gmail.com) 2006-2017 + + Source code repo: http://github.com/khizmax/libcds/ + Download: http://sourceforge.net/projects/libcds/files/ + + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. + + * Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ + +#ifndef CDSLIB_CONTAINER_DETAILS_MAKE_SPLIT_LIST_SET_LAZY_LIST_H +#define CDSLIB_CONTAINER_DETAILS_MAKE_SPLIT_LIST_SET_LAZY_LIST_H + +//@cond +namespace cds { namespace container { namespace details { + + template + struct make_split_list_set< GC, T, lazy_list_tag, Traits > + { + typedef GC gc; + typedef T value_type; + typedef Traits original_traits; + + typedef typename cds::opt::select_default< + typename original_traits::ordered_list_traits, + cds::container::lazy_list::traits + >::type original_ordered_list_traits; + + typedef typename cds::opt::select_default< + typename original_ordered_list_traits::lock_type, + typename cds::container::lazy_list::traits::lock_type + >::type lock_type; + + typedef cds::intrusive::split_list::node< cds::intrusive::lazy_list::node > primary_node_type; + struct node_type: public primary_node_type + { + value_type m_Value; + + template + explicit node_type( Q&& v ) + : m_Value( std::forward( v )) + {} + + template + explicit node_type( Q&& q, Args&&... args ) + : m_Value( std::forward(q), std::forward(args)... ) + {} + + node_type() = delete; + }; + + typedef typename cds::opt::select_default< + typename original_traits::ordered_list_traits, + typename original_traits::allocator, + typename cds::opt::select_default< + typename original_traits::ordered_list_traits::allocator, + typename original_traits::allocator + >::type + >::type node_allocator_; + + typedef typename node_allocator_::template rebind::other node_allocator_type; + + typedef cds::details::Allocator< node_type, node_allocator_type > cxx_node_allocator; + struct node_deallocator + { + void operator ()( node_type * pNode ) + { + cxx_node_allocator().Delete( pNode ); + } + }; + + typedef typename opt::details::make_comparator< value_type, original_ordered_list_traits >::type key_comparator; + + typedef typename original_traits::key_accessor key_accessor; + + struct value_accessor + { + typename key_accessor::key_type const & operator()( node_type const & node ) const + { + return key_accessor()(node.m_Value); + } + }; + + template + struct predicate_wrapper { + typedef cds::details::predicate_wrapper< node_type, Predicate, value_accessor > type; + }; + + struct ordered_list_traits: public original_ordered_list_traits + { + typedef cds::intrusive::lazy_list::base_hook< + opt::gc + ,opt::lock_type< lock_type > + > hook; + typedef cds::atomicity::empty_item_counter item_counter; + typedef node_deallocator disposer; + typedef cds::details::compare_wrapper< node_type, key_comparator, value_accessor > compare; + static constexpr const opt::link_check_type link_checker = cds::intrusive::lazy_list::traits::link_checker; + }; + + struct traits: public original_traits + { + struct hash: public original_traits::hash + { + typedef typename original_traits::hash base_class; + + size_t operator()(node_type const& v ) const + { + return base_class::operator()( key_accessor()( v.m_Value )); + } + template + size_t operator()( Q const& k ) const + { + return base_class::operator()( k ); + } + }; + }; + + class ordered_list: public cds::intrusive::LazyList< gc, node_type, ordered_list_traits > + {}; + + typedef cds::intrusive::SplitListSet< gc, ordered_list, traits > type; + }; +}}} // namespace cds::container::details +//@endcond + +#endif // #ifndef CDSLIB_CONTAINER_DETAILS_MAKE_SPLIT_LIST_SET_LAZY_LIST_H diff --git a/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/container/details/make_split_list_set_michael_list.h b/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/container/details/make_split_list_set_michael_list.h new file mode 100644 index 0000000..a7be789 --- /dev/null +++ b/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/container/details/make_split_list_set_michael_list.h @@ -0,0 +1,141 @@ +/* + This file is a part of libcds - Concurrent Data Structures library + + (C) Copyright Maxim Khizhinsky (libcds.dev@gmail.com) 2006-2017 + + Source code repo: http://github.com/khizmax/libcds/ + Download: http://sourceforge.net/projects/libcds/files/ + + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. + + * Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ + +#ifndef CDSLIB_CONTAINER_DETAILS_MAKE_SPLIT_LIST_SET_MICHAEL_LIST_H +#define CDSLIB_CONTAINER_DETAILS_MAKE_SPLIT_LIST_SET_MICHAEL_LIST_H + +//@cond +namespace cds { namespace container { namespace details { + + template + struct make_split_list_set< GC, T, michael_list_tag, Traits > + { + typedef GC gc; + typedef T value_type; + typedef Traits original_traits; + + typedef typename cds::opt::select_default< + typename original_traits::ordered_list_traits, + cds::container::michael_list::traits + >::type original_ordered_list_traits; + + typedef cds::intrusive::split_list::node< cds::intrusive::michael_list::node > primary_node_type; + struct node_type: public primary_node_type + { + value_type m_Value; + + template + explicit node_type( Q&& v ) + : m_Value( std::forward( v )) + {} + template + explicit node_type( Q&& q, Args&&... args ) + : m_Value( std::forward(q), std::forward(args)... ) + {} + + node_type() = delete; + }; + + typedef typename cds::opt::select_default< + typename original_traits::ordered_list_traits, + typename original_traits::allocator, + typename cds::opt::select_default< + typename original_traits::ordered_list_traits::allocator, + typename original_traits::allocator + >::type + >::type node_allocator_; + + typedef typename node_allocator_::template rebind::other node_allocator_type; + + typedef cds::details::Allocator< node_type, node_allocator_type > cxx_node_allocator; + struct node_deallocator + { + void operator ()( node_type * pNode ) + { + cxx_node_allocator().Delete( pNode ); + } + }; + + typedef typename opt::details::make_comparator< value_type, original_ordered_list_traits >::type key_comparator; + + typedef typename original_traits::key_accessor key_accessor; + + struct value_accessor + { + typename key_accessor::key_type const& operator()( node_type const& node ) const + { + return key_accessor()(node.m_Value); + } + }; + + template + struct predicate_wrapper { + typedef cds::details::predicate_wrapper< node_type, Predicate, value_accessor > type; + }; + + struct ordered_list_traits: public original_ordered_list_traits + { + typedef cds::intrusive::michael_list::base_hook< + opt::gc + > hook; + typedef cds::atomicity::empty_item_counter item_counter; + typedef node_deallocator disposer; + typedef cds::details::compare_wrapper< node_type, key_comparator, value_accessor > compare; + static constexpr const opt::link_check_type link_checker = cds::intrusive::michael_list::traits::link_checker; + }; + + struct traits: public original_traits + { + struct hash: public original_traits::hash + { + typedef typename original_traits::hash base_class; + + size_t operator()(node_type const& v ) const + { + return base_class::operator()( key_accessor()( v.m_Value )); + } + template + size_t operator()( Q const& k ) const + { + return base_class::operator()( k ); + } + }; + }; + + class ordered_list: public cds::intrusive::MichaelList< gc, node_type, ordered_list_traits > + {}; + + typedef cds::intrusive::SplitListSet< gc, ordered_list, traits > type; + }; + +}}} // namespace cds::container::details +//@endcond + +#endif // #ifndef CDSLIB_CONTAINER_DETAILS_MAKE_SPLIT_LIST_SET_MICHAEL_LIST_H diff --git a/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/container/details/michael_list_base.h b/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/container/details/michael_list_base.h new file mode 100644 index 0000000..866f4a8 --- /dev/null +++ b/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/container/details/michael_list_base.h @@ -0,0 +1,159 @@ +/* + This file is a part of libcds - Concurrent Data Structures library + + (C) Copyright Maxim Khizhinsky (libcds.dev@gmail.com) 2006-2017 + + Source code repo: http://github.com/khizmax/libcds/ + Download: http://sourceforge.net/projects/libcds/files/ + + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. + + * Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ + +#ifndef CDSLIB_CONTAINER_DETAILS_MICHAEL_LIST_BASE_H +#define CDSLIB_CONTAINER_DETAILS_MICHAEL_LIST_BASE_H + +#include +#include +#include + +namespace cds { namespace container { + + /// MichaelList ordered list related definitions + /** @ingroup cds_nonintrusive_helper + */ + namespace michael_list { + + /// \p MichaelList internal statistics, see \p cds::intrusive::michael_list::stat + template ::event_counter > + using stat = cds::intrusive::michael_list::stat< EventCounter >; + + /// \p MichaelList empty internal statistics, see \p cds::intrusive::michael_list::empty_stat + typedef cds::intrusive::michael_list::empty_stat empty_stat; + + //@cond + template ::stat_type> + using wrapped_stat = cds::intrusive::michael_list::wrapped_stat< Stat >; + //@endif + + /// MichaelList traits + struct traits + { + typedef CDS_DEFAULT_ALLOCATOR allocator; ///< allocator used to allocate new node + + /// Key comparison functor + /** + No default functor is provided. If the option is not specified, the \p less is used. + */ + typedef opt::none compare; + + /// Specifies binary predicate used for key comparison. + /** + Default is \p std::less. + */ + typedef opt::none less; + + /// Back-off strategy + typedef cds::backoff::empty back_off; + + /// Item counting feature; by default, disabled. Use \p cds::atomicity::item_counter or \p atomicity::cache_friendly_item_counter to enable item counting + typedef atomicity::empty_item_counter item_counter; + + /// Internal statistics + /** + By default, internal statistics is disabled (\p michael_list::empty_stat). + Use \p michael_list::stat to enable it. + */ + typedef empty_stat stat; + + /// C++ memory ordering model + /** + Can be \p opt::v::relaxed_ordering (relaxed memory model, the default) + or \p opt::v::sequential_consistent (sequentially consisnent memory model). + */ + typedef opt::v::relaxed_ordering memory_model; + + /// RCU deadlock checking policy (only for \ref cds_intrusive_MichaelList_rcu "RCU-based MichaelList") + /** + List of available options see opt::rcu_check_deadlock + */ + typedef opt::v::rcu_throw_deadlock rcu_check_deadlock; + + //@cond + // MichaelKVList: supporting for split-ordered list + // key accessor (opt::none = internal key type is equal to user key type) + typedef opt::none key_accessor; + //@endcond + }; + + /// Metafunction converting option list to \p michael_list::traits + /** + Supported \p Options are: + - \p opt::compare - key comparison functor. No default functor is provided. + If the option is not specified, the \p opt::less is used. + - \p opt::less - specifies binary predicate used for key comparison. Default is \p std::less. + - \p opt::allocator - an allocator, default is \p CDS_DEFAULT_ALLOCATOR + - \p opt::back_off - back-off strategy used. If the option is not specified, the \p cds::backoff::Default is used. + - \p opt::item_counter - the type of item counting feature. Default is disabled (\p atomicity::empty_item_counter). + To enable item counting use \p atomicity::item_counter or \p atomicity::cache_friendly_item_counter + - \p opt::stat - internal statistics. By default, it is disabled (\p michael_list::empty_stat). + To enable it use \p michael_list::stat + - \p opt::memory_model - C++ memory ordering model. Can be \p opt::v::relaxed_ordering (relaxed memory model, the default) + or \p opt::v::sequential_consistent (sequentially consistent memory model). + - \p opt::rcu_check_deadlock - a deadlock checking policy for \ref cds_intrusive_MichaelList_rcu "RCU-based MichaelList" + Default is \p opt::v::rcu_throw_deadlock + */ + template + struct make_traits { +# ifdef CDS_DOXYGEN_INVOKED + typedef implementation_defined type ; ///< Metafunction result +# else + typedef typename cds::opt::make_options< + typename cds::opt::find_type_traits< traits, Options... >::type + ,Options... + >::type type; +#endif + }; + + + } // namespace michael_list + + // Forward declarations + template + class MichaelList; + + template + class MichaelKVList; + + // Tag for selecting Michael's list implementation + /** + This struct is empty and it is used only as a tag for selecting \p MichaelList + as ordered list implementation in declaration of some classes. + + See \p split_list::traits::ordered_list as an example. + */ + struct michael_list_tag + {}; + +}} // namespace cds::container + + +#endif // #ifndef CDSLIB_CONTAINER_DETAILS_MICHAEL_LIST_BASE_H diff --git a/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/container/details/michael_map_base.h b/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/container/details/michael_map_base.h new file mode 100644 index 0000000..c1181d1 --- /dev/null +++ b/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/container/details/michael_map_base.h @@ -0,0 +1,66 @@ +/* + This file is a part of libcds - Concurrent Data Structures library + + (C) Copyright Maxim Khizhinsky (libcds.dev@gmail.com) 2006-2017 + + Source code repo: http://github.com/khizmax/libcds/ + Download: http://sourceforge.net/projects/libcds/files/ + + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. + + * Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ + +#ifndef CDSLIB_CONTAINER_DETAILS_MICHAEL_MAP_BASE_H +#define CDSLIB_CONTAINER_DETAILS_MICHAEL_MAP_BASE_H + +#include + +namespace cds { namespace container { + + /// MichaelHashMap related definitions + /** @ingroup cds_nonintrusive_helper + */ + namespace michael_map { + /// \p MichaelHashMap traits + typedef container::michael_set::traits traits; + + /// Metafunction converting option list to \p michael_map::traits + template + using make_traits = cds::intrusive::michael_set::make_traits< Options... >; + + //@cond + namespace details { + using michael_set::details::init_hash_bitmask; + } + //@endcond + + } // namespace michael_map + + //@cond + // Forward declarations + template + class MichaelHashMap; + //@endcond + +}} // namespace cds::container + + +#endif // ifndef CDSLIB_CONTAINER_DETAILS_MICHAEL_MAP_BASE_H diff --git a/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/container/details/michael_set_base.h b/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/container/details/michael_set_base.h new file mode 100644 index 0000000..30ed9e8 --- /dev/null +++ b/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/container/details/michael_set_base.h @@ -0,0 +1,66 @@ +/* + This file is a part of libcds - Concurrent Data Structures library + + (C) Copyright Maxim Khizhinsky (libcds.dev@gmail.com) 2006-2017 + + Source code repo: http://github.com/khizmax/libcds/ + Download: http://sourceforge.net/projects/libcds/files/ + + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. + + * Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ + +#ifndef CDSLIB_CONTAINER_DETAILS_MICHAEL_SET_BASE_H +#define CDSLIB_CONTAINER_DETAILS_MICHAEL_SET_BASE_H + +#include + +namespace cds { namespace container { + + /// MichaelHashSet related definitions + /** @ingroup cds_nonintrusive_helper + */ + namespace michael_set { + /// MichaelHashSet traits + typedef cds::intrusive::michael_set::traits traits; + + /// Metafunction converting option list to \p michael_set::traits + template + using make_traits = cds::intrusive::michael_set::make_traits< Options... >; + + //@cond + namespace details { + using cds::intrusive::michael_set::details::init_hash_bitmask; + using cds::intrusive::michael_set::details::list_iterator_selector; + using cds::intrusive::michael_set::details::iterator; + } + //@endcond + } + + //@cond + // Forward declarations + template + class MichaelHashSet; + //@endcond + +}} // namespace cds::container + +#endif // #ifndef CDSLIB_CONTAINER_DETAILS_MICHAEL_SET_BASE_H diff --git a/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/container/details/skip_list_base.h b/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/container/details/skip_list_base.h new file mode 100644 index 0000000..2480e4f --- /dev/null +++ b/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/container/details/skip_list_base.h @@ -0,0 +1,356 @@ +/* + This file is a part of libcds - Concurrent Data Structures library + + (C) Copyright Maxim Khizhinsky (libcds.dev@gmail.com) 2006-2017 + + Source code repo: http://github.com/khizmax/libcds/ + Download: http://sourceforge.net/projects/libcds/files/ + + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. + + * Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ + +#ifndef CDSLIB_CONTAINER_DETAILS_SKIP_LIST_BASE_H +#define CDSLIB_CONTAINER_DETAILS_SKIP_LIST_BASE_H + +#include +#include + +namespace cds { namespace container { + + /// SkipListSet related definitions + /** @ingroup cds_nonintrusive_helper + */ + namespace skip_list { + /// Option specifying random level generator + template + using random_level_generator = cds::intrusive::skip_list::random_level_generator; + + /// Xor-shift random level generator + template + using xor_shift = cds::intrusive::skip_list::xor_shift; + + /// Xor-shift random level generator, max height 32 + typedef cds::intrusive::skip_list::xorshift32 xorshift32; + + /// Xor-shift random level generator, max height 24 + typedef cds::intrusive::skip_list::xorshift24 xorshift24; + + /// Xor-shift random level generator, max height 16 + typedef cds::intrusive::skip_list::xorshift16 xorshift16; + + //@cond + // for backward compatibility + using cds::intrusive::skip_list::xorshift; + //@endcond + + /// Turbo-pascal random level generator + template + using turbo = cds::intrusive::skip_list::turbo; + + /// Turbo-pascal random level generator, max height 32 + typedef cds::intrusive::skip_list::turbo32 turbo32; + + /// Turbo-pascal random level generator, max height 24 + typedef cds::intrusive::skip_list::turbo24 turbo24; + + /// Turbo-pascal random level generator, max height 16 + typedef cds::intrusive::skip_list::turbo16 turbo16; + + //@cond + // for backward compatibility + using cds::intrusive::skip_list::turbo_pascal; + //@endcond + + /// Skip list internal statistics + template + using stat = cds::intrusive::skip_list::stat < EventCounter >; + + /// Skip list empty internal statistics + typedef cds::intrusive::skip_list::empty_stat empty_stat; + + /// SkipListSet traits + struct traits + { + /// Key comparison functor + /** + No default functor is provided. If the option is not specified, the \p less is used. + */ + typedef opt::none compare; + + /// specifies binary predicate used for key compare. + /** + Default is \p std::less. + */ + typedef opt::none less; + + /// Item counter + /** + The type for item counting feature, + by defaulr disabled (\p atomicity::empty_item_counter) + */ + typedef atomicity::empty_item_counter item_counter; + + /// C++ memory ordering model + /** + List of available memory ordering see \p opt::memory_model + */ + typedef opt::v::relaxed_ordering memory_model; + + /// Random level generator + /** + The random level generator is an important part of skip-list algorithm. + The node height in the skip-list have a probabilistic distribution + where half of the nodes that have level \p i also have level i+1 + (i = 0..30). The height of a node is in range [0..31]. + + See \p skip_list::random_level_generator option setter. + */ + typedef turbo32 random_level_generator; + + /// Allocator for skip-list nodes, \p std::allocator interface + typedef CDS_DEFAULT_ALLOCATOR allocator; + + /// back-off strategy, default is \p cds::backoff::Default + typedef cds::backoff::Default back_off; + + /// Internal statistics, by default disabled. To enable, use \p split_list::stat + typedef empty_stat stat; + + /// RCU deadlock checking policy (for \ref cds_nonintrusive_SkipListSet_rcu "RCU-based SkipListSet") + /** + List of available options see opt::rcu_check_deadlock + */ + typedef opt::v::rcu_throw_deadlock rcu_check_deadlock; + + //@cond + // For internal use only + typedef opt::none key_accessor; + //@endcond + }; + + /// Metafunction converting option list to SkipListSet traits + /** + \p Options are: + - \p opt::compare - key comparison functor. No default functor is provided. + If the option is not specified, the \p opt::less is used. + - \p opt::less - specifies binary predicate used for key comparison. Default is \p std::less. + - \p opt::item_counter - the type of item counting feature. Default is \p atomicity::empty_item_counter that is no item counting. + - \p opt::memory_model - C++ memory ordering model. Can be \p opt::v::relaxed_ordering (relaxed memory model, the default) + or \p opt::v::sequential_consistent (sequentially consisnent memory model). + - \p skip_list::random_level_generator - random level generator. Can be \p skip_list::xor_shift, \p skip_list::turbo or + user-provided one. Default is \p %skip_list::turbo32. + - \p opt::allocator - allocator for skip-list node. Default is \ref CDS_DEFAULT_ALLOCATOR. + - \p opt::back_off - back-off strategy used. If the option is not specified, the \p cds::backoff::Default is used. + - \p opt::stat - internal statistics. Available types: \p skip_list::stat, \p skip_list::empty_stat (the default) + - \p opt::rcu_check_deadlock - a deadlock checking policy for RCU-based skip-list. + Default is \p opt::v::rcu_throw_deadlock + + */ + template + struct make_traits { +# ifdef CDS_DOXYGEN_INVOKED + typedef implementation_defined type ; ///< Metafunction result +# else + typedef typename cds::opt::make_options< + typename cds::opt::find_type_traits< traits, Options... >::type + ,Options... + >::type type; +# endif + }; + + //@cond + namespace details { + + template + class node_allocator + { + protected: + typedef Node node_type; + typedef Traits traits; + + typedef typename node_type::tower_item_type node_tower_item; + typedef typename traits::allocator::template rebind::other tower_allocator_type; + typedef typename traits::allocator::template rebind::other node_allocator_type; + + static size_t const c_nTowerItemSize = sizeof(node_tower_item); + static size_t const c_nNodePadding = sizeof(node_type) % c_nTowerItemSize; + static size_t const c_nNodeSize = sizeof(node_type) + (c_nNodePadding ? (c_nTowerItemSize - c_nNodePadding) : 0); + + static constexpr size_t node_size( unsigned int nHeight ) noexcept + { + return c_nNodeSize + (nHeight - 1) * c_nTowerItemSize; + } + + static unsigned char * alloc_space( unsigned int nHeight ) + { + unsigned char * pMem; + size_t const sz = node_size( nHeight ); + + if ( nHeight > 1 ) { + pMem = tower_allocator_type().allocate( sz ); + + // check proper alignments + assert( (((uintptr_t) pMem) & (alignof(node_type) - 1)) == 0 ); + assert( (((uintptr_t) (pMem + c_nNodeSize)) & (alignof(node_tower_item) - 1)) == 0 ); + return pMem; + } + else + pMem = reinterpret_cast( node_allocator_type().allocate( 1 )); + + return pMem; + } + + static void free_space( unsigned char * p, unsigned int nHeight ) + { + assert( p != nullptr ); + + if ( nHeight == 1 ) + node_allocator_type().deallocate( reinterpret_cast(p), 1 ); + else + tower_allocator_type().deallocate( p, node_size(nHeight)); + } + + public: + template + node_type * New( unsigned int nHeight, Q const& v ) + { + unsigned char * pMem = alloc_space( nHeight ); + node_type * p = new( pMem ) + node_type( nHeight, nHeight > 1 ? reinterpret_cast(pMem + c_nNodeSize) : nullptr, v ); + return p; + } + + template + node_type * New( unsigned int nHeight, Args&&... args ) + { + unsigned char * pMem = alloc_space( nHeight ); + node_type * p = new( pMem ) + node_type( nHeight, nHeight > 1 ? reinterpret_cast(pMem + c_nNodeSize) : nullptr, + std::forward(args)... ); + return p; + } + + void Delete( node_type * p ) + { + assert( p != nullptr ); + + unsigned int nHeight = p->height(); + node_allocator_type().destroy( p ); + free_space( reinterpret_cast(p), nHeight ); + } + }; + + template + struct dummy_node_builder { + typedef IntrusiveNode intrusive_node_type; + + template + static intrusive_node_type * make_tower( intrusive_node_type * pNode, RandomGen& /*gen*/ ) { return pNode ; } + static intrusive_node_type * make_tower( intrusive_node_type * pNode, unsigned int /*nHeight*/ ) { return pNode ; } + static void dispose_tower( intrusive_node_type * pNode ) + { + pNode->release_tower(); + } + + struct node_disposer { + void operator()( intrusive_node_type * /*pNode*/ ) const {} + }; + }; + + template + class iterator + { + typedef ForwardIterator intrusive_iterator; + typedef typename intrusive_iterator::value_type node_type; + typedef typename node_type::stored_value_type value_type; + static bool const c_isConst = intrusive_iterator::c_isConst; + + typedef typename std::conditional< c_isConst, value_type const&, value_type&>::type value_ref; + template friend class iterator; + + intrusive_iterator m_It; + + public: // for internal use only!!! + iterator( intrusive_iterator const& it ) + : m_It( it ) + {} + + public: + iterator() + : m_It() + {} + + iterator( iterator const& s) + : m_It( s.m_It ) + {} + + value_type * operator ->() const + { + return &( m_It.operator->()->m_Value ); + } + + value_ref operator *() const + { + return m_It.operator*().m_Value; + } + + /// Pre-increment + iterator& operator ++() + { + ++m_It; + return *this; + } + + iterator& operator = (iterator const& src) + { + m_It = src.m_It; + return *this; + } + + template + bool operator ==(iterator const& i ) const + { + return m_It == i.m_It; + } + template + bool operator !=(iterator const& i ) const + { + return !( *this == i ); + } + }; + + } // namespace details + //@endcond + + } // namespace skip_list + + // Forward declaration + template + class SkipListSet; + + // Forward declaration + template + class SkipListMap; + +}} // namespace cds::container + +#endif // #ifndef CDSLIB_CONTAINER_DETAILS_SKIP_LIST_BASE_H diff --git a/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/container/details/split_list_base.h b/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/container/details/split_list_base.h new file mode 100644 index 0000000..852b158 --- /dev/null +++ b/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/container/details/split_list_base.h @@ -0,0 +1,217 @@ +/* + This file is a part of libcds - Concurrent Data Structures library + + (C) Copyright Maxim Khizhinsky (libcds.dev@gmail.com) 2006-2017 + + Source code repo: http://github.com/khizmax/libcds/ + Download: http://sourceforge.net/projects/libcds/files/ + + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. + + * Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ + +#ifndef CDSLIB_CONTAINER_DETAILS_SPLIT_LIST_BASE_H +#define CDSLIB_CONTAINER_DETAILS_SPLIT_LIST_BASE_H + +#include + +namespace cds { namespace container { + + // forward declaration + struct michael_list_tag; + + /// SplitListSet related definitions + /** @ingroup cds_nonintrusive_helper + */ + namespace split_list { + /// Internal statistics, see \p cds::intrusive::split_list::stat + template ::counter_type > + using stat = cds::intrusive::split_list::stat; + + /// Disabled internal statistics, see \p cds::intrusive::split_list::empty_stat + typedef cds::intrusive::split_list::empty_stat empty_stat; + + /// Selector of bucket table implementation = typedef for \p intrusive::split_list::dynamic_bucket_table + template + using dynamic_bucket_table = cds::intrusive::split_list::dynamic_bucket_table; + + /// @copydoc cds::intrusive::split_list::bit_reversal + template + using bit_reversal = cds::intrusive::split_list::bit_reversal; + + using cds::intrusive::split_list::static_bucket_table; + using cds::intrusive::split_list::expandable_bucket_table; + + //@cond + namespace details { + + template + struct wrap_map_traits_helper { + typedef Opt key_accessor; + }; + + template + struct wrap_map_traits_helper + { + struct key_accessor + { + typedef Key key_type; + key_type const & operator()( std::pair const & val ) const + { + return val.first; + } + }; + }; + + template + struct wrap_map_traits: public Traits + { + typedef typename wrap_map_traits_helper::key_accessor key_accessor; + }; + + template + struct wrap_set_traits_helper { + typedef Opt key_accessor; + }; + + template + struct wrap_set_traits_helper + { + struct key_accessor + { + typedef Value key_type; + key_type const& operator()( Value const& val ) const + { + return val; + } + }; + }; + + template + struct wrap_set_traits: public Traits + { + typedef typename wrap_set_traits_helper::key_accessor key_accessor; + }; + } // namespace details + //@endcond + + + /// \p SplitListSet traits + struct traits: public intrusive::split_list::traits + { + // Ordered list implementation + /** + Selects appropriate ordered-list implementation for split-list. + Supported types are: + - \p michael_list_tag - for \p MichaelList + - \p lazy_list_tag - for \p LazyList + - \p iterable_list_tag - for \p IterableList + */ + typedef michael_list_tag ordered_list; + + // Ordered list traits + /** + Specifyes traits for selected ordered list type, default type: + - for \p michael_list_tag: \p container::michael_list::traits. + - for \p lazy_list_tag: \p container::lazy_list::traits. + - for \p iterable_list_tag: \p container::iterable_list::traits. + + If this type is \p opt::none, the ordered list traits is combined with default + ordered list traits and split-list traits. + */ + typedef opt::none ordered_list_traits; + + //@cond + typedef opt::none key_accessor; + //@endcond + }; + + /// Option to select ordered list class for split-list + /** + This option selects appropriate ordered list class for containers based on split-list. + Template parameter \p Type may be \p michael_list_tag or \p lazy_list_tag. + */ + template + struct ordered_list + { + //@cond + template struct pack: public Base + { + typedef Type ordered_list; + }; + //@endcond + }; + + /// Option to specify ordered list type traits + /** + The \p Type template parameter specifies ordered list type traits. + It depends on type of ordered list selected. + */ + template + struct ordered_list_traits + { + //@cond + template struct pack: public Base + { + typedef Type ordered_list_traits; + }; + //@endcond + }; + + /// Metafunction converting option list to traits struct + /** + Available \p Options: + - \p split_list::ordered_list - a tag for ordered list implementation. + - \p split_list::ordered_list_traits - type traits for ordered list implementation. + For \p MichaelList use \p container::michael_list::traits or derivatives, + for \p LazyList use \p container::lazy_list::traits or derivatives. + - plus any option from \p intrusive::split_list::make_traits + */ + template + struct make_traits { + typedef typename cds::opt::make_options< traits, Options...>::type type ; ///< Result of metafunction + }; + } // namespace split_list + + //@cond + // Forward declarations + template + class SplitListSet; + + template + class SplitListMap; + //@endcond + + //@cond + // Forward declaration + namespace details { + template + struct make_split_list_set; + + template + struct make_split_list_map; + } + //@endcond + +}} // namespace cds::container + + +#endif // #ifndef CDSLIB_CONTAINER_DETAILS_SPLIT_LIST_BASE_H diff --git a/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/container/ellen_bintree_map_dhp.h b/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/container/ellen_bintree_map_dhp.h new file mode 100644 index 0000000..5018191 --- /dev/null +++ b/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/container/ellen_bintree_map_dhp.h @@ -0,0 +1,37 @@ +/* + This file is a part of libcds - Concurrent Data Structures library + + (C) Copyright Maxim Khizhinsky (libcds.dev@gmail.com) 2006-2017 + + Source code repo: http://github.com/khizmax/libcds/ + Download: http://sourceforge.net/projects/libcds/files/ + + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. + + * Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ + +#ifndef CDSLIB_CONTAINER_ELLEN_BINTREE_MAP_DHP_H +#define CDSLIB_CONTAINER_ELLEN_BINTREE_MAP_DHP_H + +#include +#include + +#endif // #ifndef CDSLIB_CONTAINER_ELLEN_BINTREE_MAP_DHP_H diff --git a/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/container/ellen_bintree_map_hp.h b/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/container/ellen_bintree_map_hp.h new file mode 100644 index 0000000..6c7fdc8 --- /dev/null +++ b/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/container/ellen_bintree_map_hp.h @@ -0,0 +1,37 @@ +/* + This file is a part of libcds - Concurrent Data Structures library + + (C) Copyright Maxim Khizhinsky (libcds.dev@gmail.com) 2006-2017 + + Source code repo: http://github.com/khizmax/libcds/ + Download: http://sourceforge.net/projects/libcds/files/ + + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. + + * Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ + +#ifndef CDSLIB_CONTAINER_ELLEN_BINTREE_MAP_HP_H +#define CDSLIB_CONTAINER_ELLEN_BINTREE_MAP_HP_H + +#include +#include + +#endif // #ifndef CDSLIB_CONTAINER_ELLEN_BINTREE_MAP_HP_H diff --git a/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/container/ellen_bintree_map_rcu.h b/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/container/ellen_bintree_map_rcu.h new file mode 100644 index 0000000..d238d07 --- /dev/null +++ b/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/container/ellen_bintree_map_rcu.h @@ -0,0 +1,603 @@ +/* + This file is a part of libcds - Concurrent Data Structures library + + (C) Copyright Maxim Khizhinsky (libcds.dev@gmail.com) 2006-2017 + + Source code repo: http://github.com/khizmax/libcds/ + Download: http://sourceforge.net/projects/libcds/files/ + + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. + + * Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ + +#ifndef CDSLIB_CONTAINER_ELLEN_BINTREE_MAP_RCU_H +#define CDSLIB_CONTAINER_ELLEN_BINTREE_MAP_RCU_H + +#include +#include + +namespace cds { namespace container { + + /// Map based on Ellen's et al binary search tree (RCU specialization) + /** @ingroup cds_nonintrusive_map + @ingroup cds_nonintrusive_tree + @anchor cds_container_EllenBinTreeMap_rcu + + Source: + - [2010] F.Ellen, P.Fatourou, E.Ruppert, F.van Breugel "Non-blocking Binary Search Tree" + + %EllenBinTreeMap is an unbalanced leaf-oriented binary search tree that implements the map + abstract data type. Nodes maintains child pointers but not parent pointers. + Every internal node has exactly two children, and all data of type std::pair + currently in the tree are stored in the leaves. Internal nodes of the tree are used to direct \p find + operation along the path to the correct leaf. The keys (of \p Key type) stored in internal nodes + may or may not be in the map. + Unlike \ref cds_container_EllenBinTreeSet_rcu "EllenBinTreeSet" keys are not a part of \p T type. + The map can be represented as a set containing std::pair< Key const, T> values. + + Due to \p extract_min and \p extract_max member functions the \p %EllenBinTreeMap can act as + a priority queue. In this case you should provide unique compound key, for example, + the priority value plus some uniformly distributed random value. + + @warning Recall the tree is unbalanced. The complexity of operations is O(log N) + for uniformly distributed random keys, but in the worst case the complexity is O(N). + + @note In the current implementation we do not use helping technique described in original paper. + So, the current implementation is near to fine-grained lock-based tree. + Helping will be implemented in future release + + Template arguments : + - \p RCU - one of \ref cds_urcu_gc "RCU type" + - \p Key - key type + - \p T - value type to be stored in tree's leaf nodes. + - \p Traits - map traits, default is \p ellen_bintree::traits. + It is possible to declare option-based tree with \p ellen_bintree::make_map_traits metafunction + instead of \p Traits template argument. + + @note Before including you should include appropriate RCU header file, + see \ref cds_urcu_gc "RCU type" for list of existing RCU class and corresponding header files. + */ + template < + class RCU, + typename Key, + typename T, +#ifdef CDS_DOXYGEN_INVOKED + class Traits = ellen_bintree::traits +#else + class Traits +#endif + > + class EllenBinTreeMap< cds::urcu::gc, Key, T, Traits > +#ifdef CDS_DOXYGEN_INVOKED + : public cds::intrusive::EllenBinTree< cds::urcu::gc, Key, T, Traits > +#else + : public ellen_bintree::details::make_ellen_bintree_map< cds::urcu::gc, Key, T, Traits >::type +#endif + { + //@cond + typedef ellen_bintree::details::make_ellen_bintree_map< cds::urcu::gc, Key, T, Traits > maker; + typedef typename maker::type base_class; + //@endcond + public: + typedef cds::urcu::gc gc; ///< RCU Garbage collector + typedef Key key_type; ///< type of a key stored in the map + typedef T mapped_type; ///< type of value stored in the map + typedef std::pair< key_type const, mapped_type > value_type; ///< Key-value pair stored in leaf node of the mp + typedef Traits traits; ///< Traits template parameter + + static_assert( std::is_default_constructible::value, "Key should be default constructible type" ); + +# ifdef CDS_DOXYGEN_INVOKED + typedef implementation_defined key_comparator ; ///< key compare functor based on \p Traits::compare and \p Traits::less +# else + typedef typename maker::intrusive_traits::compare key_comparator; +# endif + typedef typename base_class::item_counter item_counter; ///< Item counting policy + typedef typename base_class::memory_model memory_model; ///< Memory ordering, see \p cds::opt::memory_model option + typedef typename base_class::node_allocator node_allocator_type; ///< allocator for maintaining internal node + typedef typename base_class::stat stat; ///< internal statistics + typedef typename base_class::rcu_check_deadlock rcu_check_deadlock; ///< Deadlock checking policy + typedef typename traits::copy_policy copy_policy; ///< key copy policy + typedef typename traits::back_off back_off; ///< Back-off strategy + + typedef typename traits::allocator allocator_type; ///< Allocator for leaf nodes + typedef typename base_class::node_allocator node_allocator; ///< Internal node allocator + typedef typename base_class::update_desc_allocator update_desc_allocator; ///< Update descriptor allocator + + static constexpr const bool c_bExtractLockExternal = base_class::c_bExtractLockExternal; ///< Group of \p extract_xxx functions do not require external locking + + protected: + //@cond + typedef typename base_class::value_type leaf_node; + typedef typename base_class::internal_node internal_node; + typedef typename base_class::update_desc update_desc; + + typedef typename maker::cxx_leaf_node_allocator cxx_leaf_node_allocator; + + typedef std::unique_ptr< leaf_node, typename maker::leaf_deallocator > scoped_node_ptr; + //@endcond + + public: + typedef typename gc::scoped_lock rcu_lock ; ///< RCU scoped lock + + /// pointer to extracted node + using exempt_ptr = cds::urcu::exempt_ptr < gc, leaf_node, value_type, typename maker::intrusive_traits::disposer, + cds::urcu::details::conventional_exempt_member_cast < leaf_node, value_type > + >; + + public: + /// Default constructor + EllenBinTreeMap() + : base_class() + {} + + /// Clears the map + ~EllenBinTreeMap() + {} + + /// Inserts new node with key and default value + /** + The function creates a node with \p key and default value, and then inserts the node created into the map. + + Preconditions: + - The \p key_type should be constructible from a value of type \p K. + - The \p mapped_type should be default-constructible. + + RCU \p synchronize() can be called. RCU should not be locked. + + Returns \p true if inserting successful, \p false otherwise. + */ + template + bool insert( K const& key ) + { + return insert_with( key, [](value_type&){} ); + } + + /// Inserts new node + /** + The function creates a node with copy of \p val value + and then inserts the node created into the map. + + Preconditions: + - The \p key_type should be constructible from \p key of type \p K. + - The \p value_type should be constructible from \p val of type \p V. + + RCU \p synchronize() method can be called. RCU should not be locked. + + Returns \p true if \p val is inserted into the map, \p false otherwise. + */ + template + bool insert( K const& key, V const& val ) + { + scoped_node_ptr pNode( cxx_leaf_node_allocator().New( key, val )); + if ( base_class::insert( *pNode )) + { + pNode.release(); + return true; + } + return false; + } + + /// Inserts new node and initialize it by a functor + /** + This function inserts new node with key \p key and if inserting is successful then it calls + \p func functor with signature + \code + struct functor { + void operator()( value_type& item ); + }; + \endcode + + The argument \p item of user-defined functor \p func is the reference + to the map's item inserted: + - item.first is a const reference to item's key that cannot be changed. + - item.second is a reference to item's value that may be changed. + + The key_type should be constructible from value of type \p K. + + The function allows to split creating of new item into two part: + - create item from \p key; + - insert new item into the map; + - if inserting is successful, initialize the value of item by calling \p func functor + + This can be useful if complete initialization of object of \p value_type is heavyweight and + it is preferable that the initialization should be completed only if inserting is successful. + + RCU \p synchronize() method can be called. RCU should not be locked. + */ + template + bool insert_with( K const& key, Func func ) + { + scoped_node_ptr pNode( cxx_leaf_node_allocator().New( key )); + if ( base_class::insert( *pNode, [&func]( leaf_node& item ) { func( item.m_Value ); } )) { + pNode.release(); + return true; + } + return false; + } + + /// For key \p key inserts data of type \p value_type created in-place from \p args + /** + Returns \p true if inserting successful, \p false otherwise. + + RCU \p synchronize() method can be called. RCU should not be locked. + */ + template + bool emplace( K&& key, Args&&... args ) + { + scoped_node_ptr pNode( cxx_leaf_node_allocator().MoveNew( key_type( std::forward(key)), mapped_type( std::forward(args)... ))); + if ( base_class::insert( *pNode )) { + pNode.release(); + return true; + } + return false; + } + + /// Updates the node + /** + The operation performs inserting or changing data with lock-free manner. + + If the item \p val is not found in the map, then \p val is inserted iff \p bAllowInsert is \p true. + Otherwise, the functor \p func is called with item found. + The functor \p func signature is: + \code + struct my_functor { + void operator()( bool bNew, value_type& item ); + }; + \endcode + + with arguments: + - \p bNew - \p true if the item has been inserted, \p false otherwise + - \p item - item of the map + + The functor may change any fields of the \p item.second that is \p mapped_type; + however, \p func must guarantee that during changing no any other modifications + could be made on this item by concurrent threads. + + RCU \p synchronize() method can be called. RCU should not be locked. + + Returns std::pair where \p first is \p true if operation is successful, + i.e. the node has been inserted or updated, + \p second is \p true if new item has been added or \p false if the item with \p key + already exists. + + @warning See \ref cds_intrusive_item_creating "insert item troubleshooting" + */ + template + std::pair update( K const& key, Func func, bool bAllowInsert = true ) + { + scoped_node_ptr pNode( cxx_leaf_node_allocator().New( key )); + std::pair res = base_class::update( *pNode, + [&func](bool bNew, leaf_node& item, leaf_node const& ){ func( bNew, item.m_Value ); }, + bAllowInsert + ); + if ( res.first && res.second ) + pNode.release(); + return res; + } + //@cond + template + CDS_DEPRECATED("ensure() is deprecated, use update()") + std::pair ensure( K const& key, Func func ) + { + return update( key, func, true ); + } + //@endcond + + /// Delete \p key from the map + /**\anchor cds_nonintrusive_EllenBinTreeMap_rcu_erase_val + + RCU \p synchronize() method can be called. RCU should not be locked. + + Return \p true if \p key is found and deleted, \p false otherwise + */ + template + bool erase( K const& key ) + { + return base_class::erase(key); + } + + /// Deletes the item from the map using \p pred predicate for searching + /** + The function is an analog of \ref cds_nonintrusive_EllenBinTreeMap_rcu_erase_val "erase(K const&)" + but \p pred is used for key comparing. + \p Less functor has the interface like \p std::less. + \p Less must imply the same element order as the comparator used for building the map. + */ + template + bool erase_with( K const& key, Less pred ) + { + CDS_UNUSED( pred ); + return base_class::erase_with( key, cds::details::predicate_wrapper< leaf_node, Less, typename maker::key_accessor >()); + } + + /// Delete \p key from the map + /** \anchor cds_nonintrusive_EllenBinTreeMap_rcu_erase_func + + The function searches an item with key \p key, calls \p f functor + and deletes the item. If \p key is not found, the functor is not called. + + The functor \p Func interface: + \code + struct extractor { + void operator()(value_type& item) { ... } + }; + \endcode + + RCU \p synchronize method can be called. RCU should not be locked. + + Return \p true if key is found and deleted, \p false otherwise + */ + template + bool erase( K const& key, Func f ) + { + return base_class::erase( key, [&f]( leaf_node& node) { f( node.m_Value ); } ); + } + + /// Deletes the item from the map using \p pred predicate for searching + /** + The function is an analog of \ref cds_nonintrusive_EllenBinTreeMap_rcu_erase_func "erase(K const&, Func)" + but \p pred is used for key comparing. + \p Less functor has the interface like \p std::less. + \p Less must imply the same element order as the comparator used for building the map. + */ + template + bool erase_with( K const& key, Less pred, Func f ) + { + CDS_UNUSED( pred ); + return base_class::erase_with( key, cds::details::predicate_wrapper< leaf_node, Less, typename maker::key_accessor >(), + [&f]( leaf_node& node) { f( node.m_Value ); } ); + } + + /// Extracts an item with minimal key from the map + /** + Returns \ref cds::urcu::exempt_ptr "exempt_ptr" pointer to the leftmost item. + If the set is empty, returns empty \p exempt_ptr. + + @note Due the concurrent nature of the map, the function extracts nearly minimum key. + It means that the function gets leftmost leaf of the tree and tries to unlink it. + During unlinking, a concurrent thread may insert an item with key less than leftmost item's key. + So, the function returns the item with minimum key at the moment of tree traversing. + + RCU \p synchronize method can be called. RCU should NOT be locked. + The function does not free the item. + The deallocator will be implicitly invoked when the returned object is destroyed or when + its \p release() member function is called. + */ + exempt_ptr extract_min() + { + return exempt_ptr( base_class::extract_min_()); + } + + /// Extracts an item with maximal key from the map + /** + Returns \ref cds::urcu::exempt_ptr "exempt_ptr" pointer to the rightmost item. + If the set is empty, returns empty \p exempt_ptr. + + @note Due the concurrent nature of the map, the function extracts nearly maximal key. + It means that the function gets rightmost leaf of the tree and tries to unlink it. + During unlinking, a concurrent thread may insert an item with key great than leftmost item's key. + So, the function returns the item with maximum key at the moment of tree traversing. + + RCU \p synchronize method can be called. RCU should NOT be locked. + The function does not free the item. + The deallocator will be implicitly invoked when the returned object is destroyed or when + its \p release() is called. + @note Before reusing \p result object you should call its \p release() method. + */ + exempt_ptr extract_max() + { + return exempt_ptr( base_class::extract_max_()); + } + + /// Extracts an item from the map + /** \anchor cds_nonintrusive_EllenBinTreeMap_rcu_extract + The function searches an item with key equal to \p key in the tree, + unlinks it, and returns \ref cds::urcu::exempt_ptr "exempt_ptr" pointer to an item found. + If \p key is not found the function returns an empty \p exempt_ptr. + + RCU \p synchronize method can be called. RCU should NOT be locked. + The function does not destroy the item found. + The dealloctor will be implicitly invoked when the returned object is destroyed or when + its \p release() member function is called. + */ + template + exempt_ptr extract( Q const& key ) + { + return exempt_ptr( base_class::extract_( key, typename base_class::node_compare())); + } + + /// Extracts an item from the map using \p pred for searching + /** + The function is an analog of \p extract(Q const&) + but \p pred is used for key compare. + \p Less has the interface like \p std::less and should meet \ref cds_container_EllenBinTreeSet_rcu_less + "predicate requirements". + \p pred must imply the same element order as the comparator used for building the map. + */ + template + exempt_ptr extract_with( Q const& key, Less pred ) + { + CDS_UNUSED( pred ); + return exempt_ptr( base_class::extract_with_( key, + cds::details::predicate_wrapper< leaf_node, Less, typename maker::key_accessor >())); + } + + /// Find the key \p key + /** \anchor cds_nonintrusive_EllenBinTreeMap_rcu_find_cfunc + + The function searches the item with key equal to \p key and calls the functor \p f for item found. + The interface of \p Func functor is: + \code + struct functor { + void operator()( value_type& item ); + }; + \endcode + where \p item is the item found. + + The functor may change \p item.second. + + The function applies RCU lock internally. + + The function returns \p true if \p key is found, \p false otherwise. + */ + template + bool find( K const& key, Func f ) + { + return base_class::find( key, [&f](leaf_node& item, K const& ) { f( item.m_Value );}); + } + + /// Finds the key \p val using \p pred predicate for searching + /** + The function is an analog of \ref cds_nonintrusive_EllenBinTreeMap_rcu_find_cfunc "find(K const&, Func)" + but \p pred is used for key comparing. + \p Less functor has the interface like \p std::less. + \p Less must imply the same element order as the comparator used for building the map. + */ + template + bool find_with( K const& key, Less pred, Func f ) + { + CDS_UNUSED( pred ); + return base_class::find_with( key, cds::details::predicate_wrapper< leaf_node, Less, typename maker::key_accessor >(), + [&f](leaf_node& item, K const& ) { f( item.m_Value );}); + } + + /// Checks whether the map contains \p key + /** + The function searches the item with key equal to \p key + and returns \p true if it is found, and \p false otherwise. + + The function applies RCU lock internally. + */ + template + bool contains( K const& key ) + { + return base_class::contains( key ); + } + //@cond + template + CDS_DEPRECATED("deprecated, use contains()") + bool find( K const& key ) + { + return contains( key ); + } + //@endcond + + /// Checks whether the map contains \p key using \p pred predicate for searching + /** + The function is similar to contains( key ) but \p pred is used for key comparing. + \p Less functor has the interface like \p std::less and should meet \ref cds_intrusive_EllenBinTree_rcu_less + "Predicate requirements". + \p Less must imply the same element order as the comparator used for building the set. + */ + template + bool contains( K const& key, Less pred ) + { + CDS_UNUSED( pred ); + return base_class::contains( key, cds::details::predicate_wrapper< leaf_node, Less, typename maker::key_accessor >()); + } + //@cond + template + CDS_DEPRECATED("deprecated, use contains()") + bool find_with( K const& key, Less pred ) + { + return contains( key, pred ); + } + //@endcond + + /// Finds \p key and return the item found + /** \anchor cds_nonintrusive_EllenBinTreeMap_rcu_get + The function searches the item with key equal to \p key and returns the pointer to item found. + If \p key is not found it returns \p nullptr. + + RCU should be locked before call the function. + Returned pointer is valid while RCU is locked. + */ + template + value_type * get( Q const& key ) const + { + leaf_node * pNode = base_class::get( key ); + return pNode ? &pNode->m_Value : nullptr; + } + + /// Finds \p key with \p pred predicate and return the item found + /** + The function is an analog of \ref cds_nonintrusive_EllenBinTreeMap_rcu_get "get(Q const&)" + but \p pred is used for comparing the keys. + + \p Less functor has the semantics like \p std::less but should take arguments of type \p key_type + and \p Q in any order. + \p pred must imply the same element order as the comparator used for building the map. + */ + template + value_type * get_with( Q const& key, Less pred ) const + { + CDS_UNUSED( pred ); + leaf_node * pNode = base_class::get_with( key, + cds::details::predicate_wrapper< leaf_node, Less, typename maker::key_accessor >()); + return pNode ? &pNode->m_Value : nullptr; + } + + /// Clears the map + void clear() + { + base_class::clear(); + } + + /// Checks if the map is empty + bool empty() const + { + return base_class::empty(); + } + + /// Returns item count in the map + /** + Only leaf nodes containing user data are counted. + + The value returned depends on item counter type provided by \p Traits template parameter. + If it is \p atomicity::empty_item_counter this function always returns 0. + + The function is not suitable for checking the tree emptiness, use \p empty() + member function for this purpose. + */ + size_t size() const + { + return base_class::size(); + } + + /// Returns const reference to internal statistics + stat const& statistics() const + { + return base_class::statistics(); + } + + /// Checks internal consistency (not atomic, not thread-safe) + /** + The debugging function to check internal consistency of the tree. + */ + bool check_consistency() const + { + return base_class::check_consistency(); + } + }; +}} // namespace cds::container + +#endif //#ifndef CDSLIB_CONTAINER_ELLEN_BINTREE_MAP_RCU_H diff --git a/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/container/ellen_bintree_set_dhp.h b/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/container/ellen_bintree_set_dhp.h new file mode 100644 index 0000000..da73786 --- /dev/null +++ b/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/container/ellen_bintree_set_dhp.h @@ -0,0 +1,37 @@ +/* + This file is a part of libcds - Concurrent Data Structures library + + (C) Copyright Maxim Khizhinsky (libcds.dev@gmail.com) 2006-2017 + + Source code repo: http://github.com/khizmax/libcds/ + Download: http://sourceforge.net/projects/libcds/files/ + + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. + + * Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ + +#ifndef CDSLIB_CONTAINER_ELLEN_BINTREE_SET_DHP_H +#define CDSLIB_CONTAINER_ELLEN_BINTREE_SET_DHP_H + +#include +#include + +#endif // #ifndef CDSLIB_CONTAINER_ELLEN_BINTREE_SET_DHP_H diff --git a/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/container/ellen_bintree_set_hp.h b/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/container/ellen_bintree_set_hp.h new file mode 100644 index 0000000..1d8ec8c --- /dev/null +++ b/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/container/ellen_bintree_set_hp.h @@ -0,0 +1,37 @@ +/* + This file is a part of libcds - Concurrent Data Structures library + + (C) Copyright Maxim Khizhinsky (libcds.dev@gmail.com) 2006-2017 + + Source code repo: http://github.com/khizmax/libcds/ + Download: http://sourceforge.net/projects/libcds/files/ + + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. + + * Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ + +#ifndef CDSLIB_CONTAINER_ELLEN_BINTREE_SET_HP_H +#define CDSLIB_CONTAINER_ELLEN_BINTREE_SET_HP_H + +#include +#include + +#endif // #ifndef CDSLIB_CONTAINER_ELLEN_BINTREE_SET_HP_H diff --git a/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/container/ellen_bintree_set_rcu.h b/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/container/ellen_bintree_set_rcu.h new file mode 100644 index 0000000..da12401 --- /dev/null +++ b/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/container/ellen_bintree_set_rcu.h @@ -0,0 +1,653 @@ +/* + This file is a part of libcds - Concurrent Data Structures library + + (C) Copyright Maxim Khizhinsky (libcds.dev@gmail.com) 2006-2017 + + Source code repo: http://github.com/khizmax/libcds/ + Download: http://sourceforge.net/projects/libcds/files/ + + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. + + * Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ + +#ifndef CDSLIB_CONTAINER_ELLEN_BINTREE_SET_RCU_H +#define CDSLIB_CONTAINER_ELLEN_BINTREE_SET_RCU_H + +#include +#include + +namespace cds { namespace container { + + /// Set based on Ellen's et al binary search tree (RCU specialization) + /** @ingroup cds_nonintrusive_set + @ingroup cds_nonintrusive_tree + @anchor cds_container_EllenBinTreeSet_rcu + + Source: + - [2010] F.Ellen, P.Fatourou, E.Ruppert, F.van Breugel "Non-blocking Binary Search Tree" + + %EllenBinTreeSet is an unbalanced leaf-oriented binary search tree that implements the set + abstract data type. Nodes maintains child pointers but not parent pointers. + Every internal node has exactly two children, and all data of type \p T currently in + the tree are stored in the leaves. Internal nodes of the tree are used to direct \p find + operation along the path to the correct leaf. The keys (of \p Key type) stored in internal nodes + may or may not be in the set. \p Key type is a subset of \p T type. + There should be exactly defined a key extracting functor for converting object of type \p T to + object of type \p Key. + + Due to \p extract_min and \p extract_max member functions the \p %EllenBinTreeSet can act as + a priority queue. In this case you should provide unique compound key, for example, + the priority value plus some uniformly distributed random value. + + @warning Recall the tree is unbalanced. The complexity of operations is O(log N) + for uniformly distributed random keys, but in the worst case the complexity is O(N). + + @note In the current implementation we do not use helping technique described in original paper. + So, the current implementation is near to fine-grained lock-based tree. + Helping will be implemented in future release + + Template arguments : + - \p RCU - one of \ref cds_urcu_gc "RCU type" + - \p Key - key type, a subset of \p T + - \p T - type to be stored in tree's leaf nodes. + - \p Traits - set traits, default is \p ellen_bintree::traits. + It is possible to declare option-based tree with \p ellen_bintree::make_set_traits metafunction + instead of \p Traits template argument. + + @note Before including you should include appropriate RCU header file, + see \ref cds_urcu_gc "RCU type" for list of existing RCU class and corresponding header files. + + @anchor cds_container_EllenBinTreeSet_rcu_less + Predicate requirements + + opt::less, opt::compare and other predicates using with member fuctions should accept at least parameters + of type \p T and \p Key in any combination. + For example, for \p Foo struct with \p std::string key field the appropiate \p less functor is: + \code + struct Foo + { + std::string m_strKey; + ... + }; + + struct less { + bool operator()( Foo const& v1, Foo const& v2 ) const + { return v1.m_strKey < v2.m_strKey ; } + + bool operator()( Foo const& v, std::string const& s ) const + { return v.m_strKey < s ; } + + bool operator()( std::string const& s, Foo const& v ) const + { return s < v.m_strKey ; } + + // Support comparing std::string and char const * + bool operator()( std::string const& s, char const * p ) const + { return s.compare(p) < 0 ; } + + bool operator()( Foo const& v, char const * p ) const + { return v.m_strKey.compare(p) < 0 ; } + + bool operator()( char const * p, std::string const& s ) const + { return s.compare(p) > 0; } + + bool operator()( char const * p, Foo const& v ) const + { return v.m_strKey.compare(p) > 0; } + }; + \endcode + + */ + template < + class RCU, + typename Key, + typename T, +#ifdef CDS_DOXYGEN_INVOKED + class Traits = ellen_bintree::traits +#else + class Traits +#endif + > + class EllenBinTreeSet< cds::urcu::gc, Key, T, Traits > +#ifdef CDS_DOXYGEN_INVOKED + : public cds::intrusive::EllenBinTree< cds::urcu::gc, Key, T, Traits > +#else + : public ellen_bintree::details::make_ellen_bintree_set< cds::urcu::gc, Key, T, Traits >::type +#endif + { + //@cond + typedef ellen_bintree::details::make_ellen_bintree_set< cds::urcu::gc, Key, T, Traits > maker; + typedef typename maker::type base_class; + //@endcond + + public: + typedef cds::urcu::gc gc; ///< RCU Garbage collector + typedef Key key_type; ///< type of a key stored in internal nodes; key is a part of \p value_type + typedef T value_type; ///< type of value stored in the binary tree + typedef Traits traits; ///< Traits template parameter + +# ifdef CDS_DOXYGEN_INVOKED + typedef implementation_defined key_comparator; ///< key compare functor based on \p Traits::compare and \p Traits::less +# else + typedef typename maker::intrusive_traits::compare key_comparator; +# endif + typedef typename base_class::item_counter item_counter; ///< Item counting policy + typedef typename base_class::memory_model memory_model; ///< Memory ordering, see \p cds::opt::memory_model + typedef typename base_class::stat stat; ///< internal statistics type + typedef typename base_class::rcu_check_deadlock rcu_check_deadlock; ///< Deadlock checking policy + typedef typename traits::key_extractor key_extractor; ///< key extracting functor + typedef typename traits::back_off back_off; ///< Back-off strategy + + + typedef typename traits::allocator allocator_type; ///< Allocator for leaf nodes + typedef typename base_class::node_allocator node_allocator; ///< Internal node allocator + typedef typename base_class::update_desc_allocator update_desc_allocator; ///< Update descriptor allocator + + static constexpr const bool c_bExtractLockExternal = base_class::c_bExtractLockExternal; ///< Group of \p extract_xxx functions do not require external locking + + protected: + //@cond + typedef typename maker::cxx_leaf_node_allocator cxx_leaf_node_allocator; + typedef typename base_class::value_type leaf_node; + typedef typename base_class::internal_node internal_node; + typedef std::unique_ptr< leaf_node, typename maker::intrusive_traits::disposer > scoped_node_ptr; + //@endcond + + public: + typedef typename gc::scoped_lock rcu_lock; ///< RCU scoped lock + + /// pointer to extracted node + using exempt_ptr = cds::urcu::exempt_ptr < gc, leaf_node, value_type, typename maker::intrusive_traits::disposer, + cds::urcu::details::conventional_exempt_member_cast < leaf_node, value_type > + >; + + public: + /// Default constructor + EllenBinTreeSet() + : base_class() + {} + + /// Clears the set + ~EllenBinTreeSet() + {} + + /// Inserts new node + /** + The function creates a node with copy of \p val value + and then inserts the node created into the set. + + The type \p Q should contain at least the complete key for the node. + The object of \ref value_type should be constructible from a value of type \p Q. + In trivial case, \p Q is equal to \ref value_type. + + RCU \p synchronize() method can be called. RCU should not be locked. + + Returns \p true if \p val is inserted into the set, \p false otherwise. + */ + template + bool insert( Q const& val ) + { + scoped_node_ptr sp( cxx_leaf_node_allocator().New( val )); + if ( base_class::insert( *sp.get())) { + sp.release(); + return true; + } + return false; + } + + /// Inserts new node + /** + The function allows to split creating of new item into two part: + - create item with key only + - insert new item into the set + - if inserting is success, calls \p f functor to initialize value-fields of \p val. + + The functor signature is: + \code + void func( value_type& val ); + \endcode + where \p val is the item inserted. User-defined functor \p f should guarantee that during changing + \p val no any other changes could be made on this set's item by concurrent threads. + The user-defined functor is called only if the inserting is success. + + RCU \p synchronize() can be called. RCU should not be locked. + */ + template + bool insert( Q const& val, Func f ) + { + scoped_node_ptr sp( cxx_leaf_node_allocator().New( val )); + if ( base_class::insert( *sp.get(), [&f]( leaf_node& v ) { f( v.m_Value ); } )) { + sp.release(); + return true; + } + return false; + } + + /// Updates the node + /** + The operation performs inserting or changing data with lock-free manner. + + If the item \p val is not found in the set, then \p val is inserted into the set + iff \p bAllowInsert is \p true. + Otherwise, the functor \p func is called with item found. + The functor \p func signature is: + \code + void func( bool bNew, value_type& item, value_type& val ); + \endcode + with arguments: + - \p bNew - \p true if the item has been inserted, \p false otherwise + - \p item - item of the set + - \p val - argument \p val passed into the \p %update() function + + The functor can change non-key fields of the \p item; however, \p func must guarantee + that during changing no any other modifications could be made on this item by concurrent threads. + + RCU \p synchronize method can be called. RCU should not be locked. + + Returns std::pair where \p first is \p true if operation is successful, + i.e. the node has been inserted or updated, + \p second is \p true if new item has been added or \p false if the item with \p key + already exists. + + @warning See \ref cds_intrusive_item_creating "insert item troubleshooting" + */ + template + std::pair update( Q const& val, Func func, bool bAllowInsert = true ) + { + scoped_node_ptr sp( cxx_leaf_node_allocator().New( val )); + std::pair bRes = base_class::update( *sp, + [&func, &val](bool bNew, leaf_node& node, leaf_node&){ func( bNew, node.m_Value, val ); }, + bAllowInsert ); + if ( bRes.first && bRes.second ) + sp.release(); + return bRes; + } + //@cond + template + CDS_DEPRECATED("ensure() is deprecated, use update()") + std::pair ensure( const Q& val, Func func ) + { + return update( val, func, true ); + } + //@endcond + + /// Inserts data of type \p value_type created in-place from \p args + /** + Returns \p true if inserting successful, \p false otherwise. + + RCU \p synchronize method can be called. RCU should not be locked. + */ + template + bool emplace( Args&&... args ) + { + scoped_node_ptr sp( cxx_leaf_node_allocator().MoveNew( std::forward(args)... )); + if ( base_class::insert( *sp.get())) { + sp.release(); + return true; + } + return false; + } + + /// Delete \p key from the set + /** \anchor cds_nonintrusive_EllenBinTreeSet_rcu_erase_val + + The item comparator should be able to compare the type \p value_type + and the type \p Q. + + RCU \p synchronize method can be called. RCU should not be locked. + + Return \p true if key is found and deleted, \p false otherwise + */ + template + bool erase( Q const& key ) + { + return base_class::erase( key ); + } + + /// Deletes the item from the set using \p pred predicate for searching + /** + The function is an analog of \ref cds_nonintrusive_EllenBinTreeSet_rcu_erase_val "erase(Q const&)" + but \p pred is used for key comparing. + \p Less functor has the interface like \p std::less. + \p Less must imply the same element order as the comparator used for building the set. + */ + template + bool erase_with( Q const& key, Less pred ) + { + CDS_UNUSED( pred ); + return base_class::erase_with( key, cds::details::predicate_wrapper< leaf_node, Less, typename maker::value_accessor >()); + } + + /// Delete \p key from the set + /** \anchor cds_nonintrusive_EllenBinTreeSet_rcu_erase_func + + The function searches an item with key \p key, calls \p f functor + and deletes the item. If \p key is not found, the functor is not called. + + The functor \p Func interface: + \code + struct extractor { + void operator()(value_type const& val); + }; + \endcode + + Since the key of MichaelHashSet's \p value_type is not explicitly specified, + template parameter \p Q defines the key type searching in the list. + The list item comparator should be able to compare the type \p T of list item + and the type \p Q. + + RCU \p synchronize method can be called. RCU should not be locked. + + Return \p true if key is found and deleted, \p false otherwise + + See also: \ref erase + */ + template + bool erase( Q const& key, Func f ) + { + return base_class::erase( key, [&f]( leaf_node const& node) { f( node.m_Value ); } ); + } + + /// Deletes the item from the set using \p pred predicate for searching + /** + The function is an analog of \ref cds_nonintrusive_EllenBinTreeSet_rcu_erase_func "erase(Q const&, Func)" + but \p pred is used for key comparing. + \p Less functor has the interface like \p std::less. + \p Less must imply the same element order as the comparator used for building the set. + */ + template + bool erase_with( Q const& key, Less pred, Func f ) + { + CDS_UNUSED( pred ); + return base_class::erase_with( key, cds::details::predicate_wrapper< leaf_node, Less, typename maker::value_accessor >(), + [&f]( leaf_node const& node) { f( node.m_Value ); } ); + } + + /// Extracts an item with minimal key from the set + /** + Returns \ref cds::urcu::exempt_ptr "exempt_ptr" pointer to the leftmost item. + If the set is empty, returns empty \p exempt_ptr. + + @note Due the concurrent nature of the set, the function extracts nearly minimum key. + It means that the function gets leftmost leaf of the tree and tries to unlink it. + During unlinking, a concurrent thread may insert an item with key less than leftmost item's key. + So, the function returns the item with minimum key at the moment of tree traversing. + + RCU \p synchronize method can be called. RCU should NOT be locked. + The function does not free the item. + The deallocator will be implicitly invoked when the returned object is destroyed or when + its \p release() member function is called. + */ + exempt_ptr extract_min() + { + return exempt_ptr( base_class::extract_min_()); + } + + /// Extracts an item with maximal key from the set + /** + Returns \ref cds::urcu::exempt_ptr "exempt_ptr" pointer to the rightmost item. + If the set is empty, returns empty \p exempt_ptr. + + @note Due the concurrent nature of the set, the function extracts nearly maximal key. + It means that the function gets rightmost leaf of the tree and tries to unlink it. + During unlinking, a concurrent thread may insert an item with key great than leftmost item's key. + So, the function returns the item with maximum key at the moment of tree traversing. + + RCU \p synchronize method can be called. RCU should NOT be locked. + The function does not free the item. + The deallocator will be implicitly invoked when the returned object is destroyed or when + its \p release() member function is called. + */ + exempt_ptr extract_max() + { + return exempt_ptr( base_class::extract_max_()); + } + + /// Extracts an item from the set + /** \anchor cds_nonintrusive_EllenBinTreeSet_rcu_extract + The function searches an item with key equal to \p key in the tree, + unlinks it, and returns \ref cds::urcu::exempt_ptr "exempt_ptr" pointer to an item found. + If \p key is not found the function returns an empty \p exempt_ptr. + + RCU \p synchronize method can be called. RCU should NOT be locked. + The function does not destroy the item found. + The dealloctor will be implicitly invoked when the returned object is destroyed or when + its release() member function is called. + */ + template + exempt_ptr extract( Q const& key ) + { + return exempt_ptr( base_class::extract_( key, typename base_class::node_compare())); + } + + /// Extracts an item from the set using \p pred for searching + /** + The function is an analog of \p extract(Q const&) but \p pred is used for key compare. + \p Less has the interface like \p std::less and should meet \ref cds_container_EllenBinTreeSet_rcu_less + "predicate requirements". + \p pred must imply the same element order as the comparator used for building the set. + */ + template + exempt_ptr extract_with( Q const& key, Less pred ) + { + CDS_UNUSED( pred ); + return exempt_ptr( base_class::extract_with_( key, + cds::details::predicate_wrapper< leaf_node, Less, typename maker::value_accessor >())); + } + + /// Find the key \p key + /** + @anchor cds_nonintrusive_EllenBinTreeSet_rcu_find_func + + The function searches the item with key equal to \p key and calls the functor \p f for item found. + The interface of \p Func functor is: + \code + struct functor { + void operator()( value_type& item, Q& key ); + }; + \endcode + where \p item is the item found, \p key is the find function argument. + + The functor may change non-key fields of \p item. Note that the functor is only guarantee + that \p item cannot be disposed during functor is executing. + The functor does not serialize simultaneous access to the set's \p item. If such access is + possible you must provide your own synchronization schema on item level to exclude unsafe item modifications. + + The \p key argument is non-const since it can be used as \p f functor destination i.e., the functor + can modify both arguments. + + Note the hash functor specified for class \p Traits template parameter + should accept a parameter of type \p Q that may be not the same as \p value_type. + + The function applies RCU lock internally. + + The function returns \p true if \p key is found, \p false otherwise. + */ + template + bool find( Q& key, Func f ) const + { + return base_class::find( key, [&f]( leaf_node& node, Q& v ) { f( node.m_Value, v ); }); + } + //@cond + template + bool find( Q const& key, Func f ) const + { + return base_class::find( key, [&f]( leaf_node& node, Q const& v ) { f( node.m_Value, v ); } ); + } + //@endcond + + /// Finds the key \p key using \p pred predicate for searching + /** + The function is an analog of \ref cds_nonintrusive_EllenBinTreeSet_rcu_find_func "find(Q&, Func)" + but \p pred is used for key comparing. + \p Less functor has the interface like \p std::less. + \p Less must imply the same element order as the comparator used for building the set. + */ + template + bool find_with( Q& key, Less pred, Func f ) const + { + CDS_UNUSED( pred ); + return base_class::find_with( key, cds::details::predicate_wrapper< leaf_node, Less, typename maker::value_accessor >(), + [&f]( leaf_node& node, Q& v ) { f( node.m_Value, v ); } ); + } + //@cond + template + bool find_with( Q const& key, Less pred, Func f ) const + { + CDS_UNUSED( pred ); + return base_class::find_with( key, cds::details::predicate_wrapper< leaf_node, Less, typename maker::value_accessor >(), + [&f]( leaf_node& node, Q const& v ) { f( node.m_Value, v ); } ); + } + //@endcond + + /// Checks whether the set contains \p key + /** + The function searches the item with key equal to \p key + and returns \p true if it is found, and \p false otherwise. + + The function applies RCU lock internally. + */ + template + bool contains( Q const& key ) const + { + return base_class::contains( key ); + } + //@cond + template + CDS_DEPRECATED("deprecated, use contains()") + bool find( Q const& key ) const + { + return contains( key ); + } + //@endcond + + /// Checks whether the set contains \p key using \p pred predicate for searching + /** + The function is similar to contains( key ) but \p pred is used for key comparing. + \p Less functor has the interface like \p std::less and should meet \ref cds_intrusive_EllenBinTree_rcu_less + "Predicate requirements". + \p Less must imply the same element order as the comparator used for building the set. + \p pred should accept arguments of type \p Q, \p key_type, \p value_type in any combination. + */ + template + bool contains( Q const& key, Less pred ) const + { + CDS_UNUSED( pred ); + return base_class::contains( key, cds::details::predicate_wrapper< leaf_node, Less, typename maker::value_accessor >()); + } + //@cond + template + CDS_DEPRECATED("deprecated, use contains()") + bool find_with( Q const& key, Less pred ) const + { + return contains( key, pred ); + } + //@endcond + + /// Finds \p key and return the item found + /** \anchor cds_nonintrusive_EllenBinTreeSet_rcu_get + The function searches the item with key equal to \p key and returns the pointer to item found. + If \p key is not found it returns \p nullptr. + + RCU should be locked before call the function. + Returned pointer is valid while RCU is locked. + */ + template + value_type * get( Q const& key ) const + { + leaf_node * pNode = base_class::get( key ); + return pNode ? &pNode->m_Value : nullptr; + } + + /// Finds \p key with \p pred predicate and return the item found + /** + The function is an analog of \ref cds_nonintrusive_EllenBinTreeSet_rcu_get "get(Q const&)" + but \p pred is used for comparing the keys. + + \p Less functor has the semantics like \p std::less but should take arguments of type \ref value_type + and \p Q in any order. + \p pred must imply the same element order as the comparator used for building the set. + */ + template + value_type * get_with( Q const& key, Less pred ) const + { + CDS_UNUSED( pred ); + leaf_node * pNode = base_class::get_with( key, + cds::details::predicate_wrapper< leaf_node, Less, typename maker::value_accessor >()); + return pNode ? &pNode->m_Value : nullptr; + } + + /// Clears the set (non-atomic) + /** + The function unlink all items from the tree. + The function is not atomic, thus, in multi-threaded environment with parallel insertions + this sequence + \code + set.clear(); + assert( set.empty()); + \endcode + the assertion could be raised. + + For each leaf the \ref disposer will be called after unlinking. + + RCU \p synchronize method can be called. RCU should not be locked. + */ + void clear() + { + base_class::clear(); + } + + /// Checks if the set is empty + bool empty() const + { + return base_class::empty(); + } + + /// Returns item count in the set + /** + Only leaf nodes containing user data are counted. + + The value returned depends on item counter type provided by \p Traits template parameter. + If it is \p atomicity::empty_item_counter \p %size() always returns 0. + Therefore, the function is not suitable for checking the tree emptiness, use \p empty() + member function for this purpose. + */ + size_t size() const + { + return base_class::size(); + } + + /// Returns const reference to internal statistics + stat const& statistics() const + { + return base_class::statistics(); + } + + /// Checks internal consistency (not atomic, not thread-safe) + /** + The debugging function to check internal consistency of the tree. + */ + bool check_consistency() const + { + return base_class::check_consistency(); + } + }; +}} // namespace cds::container + +#endif // #ifndef CDSLIB_CONTAINER_ELLEN_BINTREE_SET_RCU_H diff --git a/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/container/fcdeque.h b/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/container/fcdeque.h new file mode 100644 index 0000000..698908c --- /dev/null +++ b/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/container/fcdeque.h @@ -0,0 +1,607 @@ +/* + This file is a part of libcds - Concurrent Data Structures library + + (C) Copyright Maxim Khizhinsky (libcds.dev@gmail.com) 2006-2017 + + Source code repo: http://github.com/khizmax/libcds/ + Download: http://sourceforge.net/projects/libcds/files/ + + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. + + * Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ + +#ifndef CDSLIB_CONTAINER_FCDEQUE_H +#define CDSLIB_CONTAINER_FCDEQUE_H + +#include +#include +#include + +namespace cds { namespace container { + + /// FCDeque related definitions + /** @ingroup cds_nonintrusive_helper + */ + namespace fcdeque { + + /// FCDeque internal statistics + template + struct stat: public cds::algo::flat_combining::stat + { + typedef cds::algo::flat_combining::stat flat_combining_stat; ///< Flat-combining statistics + typedef typename flat_combining_stat::counter_type counter_type; ///< Counter type + + counter_type m_nPushFront ; ///< Count of push_front operations + counter_type m_nPushFrontMove ; ///< Count of push_front operations with move semantics + counter_type m_nPushBack ; ///< Count of push_back operations + counter_type m_nPushBackMove ; ///< Count of push_back operations with move semantics + counter_type m_nPopFront ; ///< Count of success pop_front operations + counter_type m_nFailedPopFront; ///< Count of failed pop_front operations (pop from empty deque) + counter_type m_nPopBack ; ///< Count of success pop_back operations + counter_type m_nFailedPopBack ; ///< Count of failed pop_back operations (pop from empty deque) + counter_type m_nCollided ; ///< How many pairs of push/pop were collided, if elimination is enabled + + //@cond + void onPushFront() { ++m_nPushFront; } + void onPushFrontMove() { ++m_nPushFrontMove; } + void onPushBack() { ++m_nPushBack; } + void onPushBackMove() { ++m_nPushBackMove; } + void onPopFront( bool bFailed ) { if ( bFailed ) ++m_nFailedPopFront; else ++m_nPopFront; } + void onPopBack( bool bFailed ) { if ( bFailed ) ++m_nFailedPopBack; else ++m_nPopBack; } + void onCollide() { ++m_nCollided; } + //@endcond + }; + + /// FCDeque dummy statistics, no overhead + struct empty_stat: public cds::algo::flat_combining::empty_stat + { + //@cond + void onPushFront() {} + void onPushFrontMove() {} + void onPushBack() {} + void onPushBackMove() {} + void onPopFront(bool) {} + void onPopBack(bool) {} + void onCollide() {} + //@endcond + }; + + /// FCDeque type traits + struct traits: public cds::algo::flat_combining::traits + { + typedef empty_stat stat; ///< Internal statistics + static constexpr const bool enable_elimination = false; ///< Enable \ref cds_elimination_description "elimination" + }; + + /// Metafunction converting option list to traits + /** + \p Options are: + - any \p cds::algo::flat_combining::make_traits options + - \p opt::stat - internal statistics, possible type: \ref stat, \ref empty_stat (the default) + - \p opt::enable_elimination - enable/disable operation \ref cds_elimination_description "elimination" + By default, the elimination is disabled. For queue, the elimination is possible if the queue + is empty. + */ + template + struct make_traits { +# ifdef CDS_DOXYGEN_INVOKED + typedef implementation_defined type ; ///< Metafunction result +# else + typedef typename cds::opt::make_options< + typename cds::opt::find_type_traits< traits, Options... >::type + ,Options... + >::type type; +# endif + }; + + } // namespace fcqueue + + /// Flat-combining deque + /** + @ingroup cds_nonintrusive_deque + @ingroup cds_flat_combining_container + + \ref cds_flat_combining_description "Flat combining" sequential deque. + The class can be considered as a concurrent FC-based wrapper for \p std::deque. + + Template parameters: + - \p T - a value type stored in the deque + - \p Deque - sequential deque implementation, for example, \p std::deque (the default) + or \p boost::container::deque + - \p Trats - type traits of flat combining, default is \p fcdeque::traits. + \p fcdeque::make_traits metafunction can be used to construct specialized \p %fcdeque::traits + */ + template , + typename Traits = fcdeque::traits + > + class FCDeque +#ifndef CDS_DOXYGEN_INVOKED + : public cds::algo::flat_combining::container +#endif + { + public: + typedef T value_type; ///< Value type + typedef Deque deque_type; ///< Sequential deque class + typedef Traits traits; ///< Deque type traits + + typedef typename traits::stat stat; ///< Internal statistics type + static constexpr const bool c_bEliminationEnabled = traits::enable_elimination; ///< \p true if elimination is enabled + + protected: + //@cond + /// Deque operation IDs + enum fc_operation { + op_push_front = cds::algo::flat_combining::req_Operation, ///< Push front + op_push_front_move, ///< Push front (move semantics) + op_push_back, ///< Push back + op_push_back_move, ///< Push back (move semantics) + op_pop_front, ///< Pop front + op_pop_back, ///< Pop back + op_clear ///< Clear + }; + + /// Flat combining publication list record + struct fc_record: public cds::algo::flat_combining::publication_record + { + union { + value_type const * pValPush; ///< Value to push + value_type * pValPop; ///< Pop destination + }; + bool bEmpty; ///< \p true if the deque is empty + }; + //@endcond + + /// Flat combining kernel + typedef cds::algo::flat_combining::kernel< fc_record, traits > fc_kernel; + + protected: + //@cond + mutable fc_kernel m_FlatCombining; + deque_type m_Deque; + //@endcond + + public: + /// Initializes empty deque object + FCDeque() + {} + + /// Initializes empty deque object and gives flat combining parameters + FCDeque( + unsigned int nCompactFactor ///< Flat combining: publication list compacting factor + ,unsigned int nCombinePassCount ///< Flat combining: number of combining passes for combiner thread + ) + : m_FlatCombining( nCompactFactor, nCombinePassCount ) + {} + + /// Inserts a new element at the beginning of the deque container + /** + The function always returns \p true + */ + bool push_front( + value_type const& val ///< Value to be copied to inserted element + ) + { + auto pRec = m_FlatCombining.acquire_record(); + pRec->pValPush = &val; + + constexpr_if ( c_bEliminationEnabled ) + m_FlatCombining.batch_combine( op_push_front, pRec, *this ); + else + m_FlatCombining.combine( op_push_front, pRec, *this ); + + assert( pRec->is_done()); + m_FlatCombining.release_record( pRec ); + m_FlatCombining.internal_statistics().onPushFront(); + return true; + } + + /// Inserts a new element at the beginning of the deque container (move semantics) + /** + The function always returns \p true + */ + bool push_front( + value_type&& val ///< Value to be moved to inserted element + ) + { + auto pRec = m_FlatCombining.acquire_record(); + pRec->pValPush = &val; + + constexpr_if ( c_bEliminationEnabled ) + m_FlatCombining.batch_combine( op_push_front_move, pRec, *this ); + else + m_FlatCombining.combine( op_push_front_move, pRec, *this ); + + assert( pRec->is_done()); + m_FlatCombining.release_record( pRec ); + m_FlatCombining.internal_statistics().onPushFrontMove(); + return true; + } + + /// Inserts a new element at the end of the deque container + /** + The function always returns \p true + */ + bool push_back( + value_type const& val ///< Value to be copied to inserted element + ) + { + auto pRec = m_FlatCombining.acquire_record(); + pRec->pValPush = &val; + + constexpr_if ( c_bEliminationEnabled ) + m_FlatCombining.batch_combine( op_push_back, pRec, *this ); + else + m_FlatCombining.combine( op_push_back, pRec, *this ); + + assert( pRec->is_done()); + m_FlatCombining.release_record( pRec ); + m_FlatCombining.internal_statistics().onPushBack(); + return true; + } + + /// Inserts a new element at the end of the deque container (move semantics) + /** + The function always returns \p true + */ + bool push_back( + value_type&& val ///< Value to be moved to inserted element + ) + { + auto pRec = m_FlatCombining.acquire_record(); + pRec->pValPush = &val; + + constexpr_if ( c_bEliminationEnabled ) + m_FlatCombining.batch_combine( op_push_back_move, pRec, *this ); + else + m_FlatCombining.combine( op_push_back_move, pRec, *this ); + + assert( pRec->is_done()); + m_FlatCombining.release_record( pRec ); + m_FlatCombining.internal_statistics().onPushBackMove(); + return true; + } + + /// Removes the first element in the deque container + /** + The function returns \p false if the deque is empty, \p true otherwise. + If the deque is empty \p val is not changed. + */ + bool pop_front( + value_type& val ///< Target to be received the copy of removed element + ) + { + auto pRec = m_FlatCombining.acquire_record(); + pRec->pValPop = &val; + + constexpr_if ( c_bEliminationEnabled ) + m_FlatCombining.batch_combine( op_pop_front, pRec, *this ); + else + m_FlatCombining.combine( op_pop_front, pRec, *this ); + + assert( pRec->is_done()); + m_FlatCombining.release_record( pRec ); + m_FlatCombining.internal_statistics().onPopFront( pRec->bEmpty ); + return !pRec->bEmpty; + } + + /// Removes the last element in the deque container + /** + The function returns \p false if the deque is empty, \p true otherwise. + If the deque is empty \p val is not changed. + */ + bool pop_back( + value_type& val ///< Target to be received the copy of removed element + ) + { + auto pRec = m_FlatCombining.acquire_record(); + pRec->pValPop = &val; + + constexpr_if ( c_bEliminationEnabled ) + m_FlatCombining.batch_combine( op_pop_back, pRec, *this ); + else + m_FlatCombining.combine( op_pop_back, pRec, *this ); + + assert( pRec->is_done()); + m_FlatCombining.release_record( pRec ); + m_FlatCombining.internal_statistics().onPopBack( pRec->bEmpty ); + return !pRec->bEmpty; + } + + /// Clears the deque + void clear() + { + auto pRec = m_FlatCombining.acquire_record(); + + constexpr_if ( c_bEliminationEnabled ) + m_FlatCombining.batch_combine( op_clear, pRec, *this ); + else + m_FlatCombining.combine( op_clear, pRec, *this ); + + assert( pRec->is_done()); + m_FlatCombining.release_record( pRec ); + } + + /// Exclusive access to underlying deque object + /** + The functor \p f can do any operation with underlying \p deque_type in exclusive mode. + For example, you can iterate over the deque. + \p Func signature is: + \code + void f( deque_type& deque ); + \endcode + */ + template + void apply( Func f ) + { + auto& deque = m_Deque; + m_FlatCombining.invoke_exclusive( [&deque, &f]() { f( deque ); } ); + } + + /// Exclusive access to underlying deque object + /** + The functor \p f can do any operation with underlying \p deque_type in exclusive mode. + For example, you can iterate over the deque. + \p Func signature is: + \code + void f( deque_type const& deque ); + \endcode + */ + template + void apply( Func f ) const + { + auto const& deque = m_Deque; + m_FlatCombining.invoke_exclusive( [&deque, &f]() { f( deque ); } ); + } + + /// Returns the number of elements in the deque. + /** + Note that size() == 0 is not mean that the deque is empty because + combining record can be in process. + To check emptiness use \ref empty function. + */ + size_t size() const + { + return m_Deque.size(); + } + + /// Checks if the deque is empty + /** + If the combining is in process the function waits while combining done. + */ + bool empty() const + { + bool bRet = false; + auto const& deq = m_Deque; + m_FlatCombining.invoke_exclusive( [&deq, &bRet]() { bRet = deq.empty(); } ); + return bRet; + } + + /// Internal statistics + stat const& statistics() const + { + return m_FlatCombining.statistics(); + } + + public: // flat combining cooperation, not for direct use! + //@cond + /// Flat combining supporting function. Do not call it directly! + /** + The function is called by \ref cds::algo::flat_combining::kernel "flat combining kernel" + object if the current thread becomes a combiner. Invocation of the function means that + the deque should perform an action recorded in \p pRec. + */ + void fc_apply( fc_record * pRec ) + { + assert( pRec ); + + switch ( pRec->op()) { + case op_push_front: + assert( pRec->pValPush ); + m_Deque.push_front( *(pRec->pValPush)); + break; + case op_push_front_move: + assert( pRec->pValPush ); + m_Deque.push_front( std::move( *(pRec->pValPush ))); + break; + case op_push_back: + assert( pRec->pValPush ); + m_Deque.push_back( *(pRec->pValPush)); + break; + case op_push_back_move: + assert( pRec->pValPush ); + m_Deque.push_back( std::move( *(pRec->pValPush ))); + break; + case op_pop_front: + assert( pRec->pValPop ); + pRec->bEmpty = m_Deque.empty(); + if ( !pRec->bEmpty ) { + *(pRec->pValPop) = std::move( m_Deque.front()); + m_Deque.pop_front(); + } + break; + case op_pop_back: + assert( pRec->pValPop ); + pRec->bEmpty = m_Deque.empty(); + if ( !pRec->bEmpty ) { + *(pRec->pValPop) = std::move( m_Deque.back()); + m_Deque.pop_back(); + } + break; + case op_clear: + while ( !m_Deque.empty()) + m_Deque.pop_front(); + break; + default: + assert(false); + break; + } + } + + /// Batch-processing flat combining + void fc_process( typename fc_kernel::iterator itBegin, typename fc_kernel::iterator itEnd ) + { + typedef typename fc_kernel::iterator fc_iterator; + + for ( fc_iterator it = itBegin, itPrev = itEnd; it != itEnd; ++it ) { + switch ( it->op( atomics::memory_order_acquire )) { + case op_push_front: + if ( itPrev != itEnd + && (itPrev->op() == op_pop_front || (m_Deque.empty() && itPrev->op() == op_pop_back))) + { + collide( *it, *itPrev ); + itPrev = itEnd; + } + else + itPrev = it; + break; + case op_push_front_move: + if ( itPrev != itEnd + && (itPrev->op() == op_pop_front || ( m_Deque.empty() && itPrev->op() == op_pop_back ))) + { + collide_move( *it, *itPrev ); + itPrev = itEnd; + } + else + itPrev = it; + break; + case op_push_back: + if ( itPrev != itEnd + && (itPrev->op() == op_pop_back || (m_Deque.empty() && itPrev->op() == op_pop_front))) + { + collide( *it, *itPrev ); + itPrev = itEnd; + } + else + itPrev = it; + break; + case op_push_back_move: + if ( itPrev != itEnd + && (itPrev->op() == op_pop_back || ( m_Deque.empty() && itPrev->op() == op_pop_front ))) + { + collide_move( *it, *itPrev ); + itPrev = itEnd; + } + else + itPrev = it; + break; + case op_pop_front: + if ( itPrev != itEnd ) { + if ( m_Deque.empty()) { + switch ( itPrev->op()) { + case op_push_back: + collide( *itPrev, *it ); + itPrev = itEnd; + break; + case op_push_back_move: + collide_move( *itPrev, *it ); + itPrev = itEnd; + break; + default: + itPrev = it; + break; + } + } + else { + switch ( itPrev->op()) { + case op_push_front: + collide( *itPrev, *it ); + itPrev = itEnd; + break; + case op_push_front_move: + collide_move( *itPrev, *it ); + itPrev = itEnd; + break; + default: + itPrev = it; + break; + } + } + } + else + itPrev = it; + break; + case op_pop_back: + if ( itPrev != itEnd ) { + if ( m_Deque.empty()) { + switch ( itPrev->op()) { + case op_push_front: + collide( *itPrev, *it ); + itPrev = itEnd; + break; + case op_push_front_move: + collide_move( *itPrev, *it ); + itPrev = itEnd; + break; + default: + itPrev = it; + break; + } + } + else { + switch ( itPrev->op()) { + case op_push_back: + collide( *itPrev, *it ); + itPrev = itEnd; + break; + case op_push_back_move: + collide_move( *itPrev, *it ); + itPrev = itEnd; + break; + default: + itPrev = it; + break; + } + } + } + else + itPrev = it; + break; + } + } + } + //@endcond + + private: + //@cond + void collide( fc_record& recPush, fc_record& recPop ) + { + *(recPop.pValPop) = *(recPush.pValPush); + recPop.bEmpty = false; + m_FlatCombining.operation_done( recPush ); + m_FlatCombining.operation_done( recPop ); + m_FlatCombining.internal_statistics().onCollide(); + } + + void collide_move( fc_record& recPush, fc_record& recPop ) + { + *(recPop.pValPop) = std::move( *(recPush.pValPush)); + recPop.bEmpty = false; + m_FlatCombining.operation_done( recPush ); + m_FlatCombining.operation_done( recPop ); + m_FlatCombining.internal_statistics().onCollide(); + } + //@endcond + }; + +}} // namespace cds::container + +#endif // #ifndef CDSLIB_CONTAINER_FCDEQUE_H diff --git a/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/container/fcpriority_queue.h b/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/container/fcpriority_queue.h new file mode 100644 index 0000000..ded206d --- /dev/null +++ b/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/container/fcpriority_queue.h @@ -0,0 +1,350 @@ +/* + This file is a part of libcds - Concurrent Data Structures library + + (C) Copyright Maxim Khizhinsky (libcds.dev@gmail.com) 2006-2017 + + Source code repo: http://github.com/khizmax/libcds/ + Download: http://sourceforge.net/projects/libcds/files/ + + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. + + * Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ + +#ifndef CDSLIB_CONTAINER_FCPRIORITY_QUEUE_H +#define CDSLIB_CONTAINER_FCPRIORITY_QUEUE_H + +#include +#include +#include + +namespace cds { namespace container { + + /// FCPriorityQueue related definitions + /** @ingroup cds_nonintrusive_helper + */ + namespace fcpqueue { + + /// FCPriorityQueue internal statistics + template + struct stat: public cds::algo::flat_combining::stat + { + typedef cds::algo::flat_combining::stat flat_combining_stat; ///< Flat-combining statistics + typedef typename flat_combining_stat::counter_type counter_type; ///< Counter type + + counter_type m_nPush ; ///< Count of push operations + counter_type m_nPushMove ; ///< Count of push operations with move semantics + counter_type m_nPop ; ///< Count of success pop operations + counter_type m_nFailedPop; ///< Count of failed pop operations (pop from empty queue) + + //@cond + void onPush() { ++m_nPush; } + void onPushMove() { ++m_nPushMove; } + void onPop( bool bFailed ) { if ( bFailed ) ++m_nFailedPop; else ++m_nPop; } + //@endcond + }; + + /// FCPriorityQueue dummy statistics, no overhead + struct empty_stat: public cds::algo::flat_combining::empty_stat + { + //@cond + void onPush() {} + void onPushMove() {} + void onPop(bool) {} + //@endcond + }; + + /// FCPriorityQueue traits + struct traits: public cds::algo::flat_combining::traits + { + typedef empty_stat stat; ///< Internal statistics + }; + + /// Metafunction converting option list to traits + /** + \p Options are: + - any \p cds::algo::flat_combining::make_traits options + - \p opt::stat - internal statistics, possible type: \p fcpqueue::stat, \p fcpqueue::empty_stat (the default) + */ + template + struct make_traits { +# ifdef CDS_DOXYGEN_INVOKED + typedef implementation_defined type ; ///< Metafunction result +# else + typedef typename cds::opt::make_options< + typename cds::opt::find_type_traits< traits, Options... >::type + ,Options... + >::type type; +# endif + }; + + } // namespace fcpqueue + + /// Flat-combining priority queue + /** + @ingroup cds_nonintrusive_priority_queue + @ingroup cds_flat_combining_container + + \ref cds_flat_combining_description "Flat combining" sequential priority queue. + The class can be considered as a concurrent FC-based wrapper for \p std::priority_queue. + + Template parameters: + - \p T - a value type stored in the queue + - \p PriorityQueue - sequential priority queue implementation, default is \p std::priority_queue + - \p Traits - type traits of flat combining, default is \p fcpqueue::traits. + \p fcpqueue::make_traits metafunction can be used to construct specialized \p %fcpqueue::traits + */ + template , + typename Traits = fcpqueue::traits + > + class FCPriorityQueue +#ifndef CDS_DOXYGEN_INVOKED + : public cds::algo::flat_combining::container +#endif + { + public: + typedef T value_type; ///< Value type + typedef PriorityQueue priority_queue_type; ///< Sequential priority queue class + typedef Traits traits; ///< Priority queue type traits + + typedef typename traits::stat stat; ///< Internal statistics type + + protected: + //@cond + // Priority queue operation IDs + enum fc_operation { + op_push = cds::algo::flat_combining::req_Operation, + op_push_move, + op_pop, + op_clear + }; + + // Flat combining publication list record + struct fc_record: public cds::algo::flat_combining::publication_record + { + union { + value_type const * pValPush; // Value to push + value_type * pValPop; // Pop destination + }; + bool bEmpty; // true if the queue is empty + }; + //@endcond + + /// Flat combining kernel + typedef cds::algo::flat_combining::kernel< fc_record, traits > fc_kernel; + + protected: + //@cond + mutable fc_kernel m_FlatCombining; + priority_queue_type m_PQueue; + //@endcond + + public: + /// Initializes empty priority queue object + FCPriorityQueue() + {} + + /// Initializes empty priority queue object and gives flat combining parameters + FCPriorityQueue( + unsigned int nCompactFactor ///< Flat combining: publication list compacting factor + ,unsigned int nCombinePassCount ///< Flat combining: number of combining passes for combiner thread + ) + : m_FlatCombining( nCompactFactor, nCombinePassCount ) + {} + + /// Inserts a new element in the priority queue + /** + The function always returns \p true + */ + bool push( + value_type const& val ///< Value to be copied to inserted element + ) + { + auto pRec = m_FlatCombining.acquire_record(); + pRec->pValPush = &val; + + m_FlatCombining.combine( op_push, pRec, *this ); + + assert( pRec->is_done()); + m_FlatCombining.release_record( pRec ); + m_FlatCombining.internal_statistics().onPush(); + return true; + } + + /// Inserts a new element in the priority queue (move semantics) + /** + The function always returns \p true + */ + bool push( + value_type&& val ///< Value to be moved to inserted element + ) + { + auto pRec = m_FlatCombining.acquire_record(); + pRec->pValPush = &val; + + m_FlatCombining.combine( op_push_move, pRec, *this ); + + assert( pRec->is_done()); + m_FlatCombining.release_record( pRec ); + m_FlatCombining.internal_statistics().onPushMove(); + return true; + } + + /// Removes the top element from priority queue + /** + The function returns \p false if the queue is empty, \p true otherwise. + If the queue is empty \p val is not changed. + */ + bool pop( + value_type& val ///< Target to be received the copy of top element + ) + { + auto pRec = m_FlatCombining.acquire_record(); + pRec->pValPop = &val; + + m_FlatCombining.combine( op_pop, pRec, *this ); + + assert( pRec->is_done()); + m_FlatCombining.release_record( pRec ); + m_FlatCombining.internal_statistics().onPop( pRec->bEmpty ); + return !pRec->bEmpty; + } + + /// Exclusive access to underlying priority queue object + /** + The functor \p f can do any operation with underlying \p priority_queue_type in exclusive mode. + For example, you can iterate over the queue. + \p Func signature is: + \code + void f( priority_queue_type& deque ); + \endcode + */ + template + void apply( Func f ) + { + auto& pqueue = m_PQueue; + m_FlatCombining.invoke_exclusive( [&pqueue, &f]() { f( pqueue ); } ); + } + + /// Exclusive access to underlying priority queue object + /** + The functor \p f can do any operation with underlying \p proiprity_queue_type in exclusive mode. + For example, you can iterate over the queue. + \p Func signature is: + \code + void f( priority_queue_type const& queue ); + \endcode + */ + template + void apply( Func f ) const + { + auto const& pqueue = m_PQueue; + m_FlatCombining.invoke_exclusive( [&pqueue, &f]() { f( pqueue ); } ); + } + + /// Clears the priority queue + void clear() + { + auto pRec = m_FlatCombining.acquire_record(); + + m_FlatCombining.combine( op_clear, pRec, *this ); + + assert( pRec->is_done()); + m_FlatCombining.release_record( pRec ); + } + + /// Returns the number of elements in the priority queue. + /** + Note that size() == 0 does not mean that the queue is empty because + combining record can be in process. + To check emptiness use \ref empty function. + */ + size_t size() const + { + return m_PQueue.size(); + } + + /// Checks if the priority queue is empty + /** + If the combining is in process the function waits while combining done. + */ + bool empty() + { + bool bRet = false; + auto const& pq = m_PQueue; + m_FlatCombining.invoke_exclusive( [&pq, &bRet]() { bRet = pq.empty(); } ); + return bRet; + } + + /// Internal statistics + stat const& statistics() const + { + return m_FlatCombining.statistics(); + } + + public: // flat combining cooperation, not for direct use! + //@cond + /* + The function is called by \ref cds::algo::flat_combining::kernel "flat combining kernel" + object if the current thread becomes a combiner. Invocation of the function means that + the priority queue should perform an action recorded in \p pRec. + */ + void fc_apply( fc_record * pRec ) + { + assert( pRec ); + + // this function is called under FC mutex, so switch TSan off + //CDS_TSAN_ANNOTATE_IGNORE_RW_BEGIN; + + switch ( pRec->op()) { + case op_push: + assert( pRec->pValPush ); + m_PQueue.push( *(pRec->pValPush)); + break; + case op_push_move: + assert( pRec->pValPush ); + m_PQueue.push( std::move( *(pRec->pValPush ))); + break; + case op_pop: + assert( pRec->pValPop ); + pRec->bEmpty = m_PQueue.empty(); + if ( !pRec->bEmpty ) { + *(pRec->pValPop) = std::move( m_PQueue.top()); + m_PQueue.pop(); + } + break; + case op_clear: + while ( !m_PQueue.empty()) + m_PQueue.pop(); + break; + default: + assert(false); + break; + } + + //CDS_TSAN_ANNOTATE_IGNORE_RW_END; + } + //@endcond + }; + +}} // namespace cds::container + +#endif // #ifndef CDSLIB_CONTAINER_FCPRIORITY_QUEUE_H diff --git a/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/container/fcqueue.h b/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/container/fcqueue.h new file mode 100644 index 0000000..f9c3cf4 --- /dev/null +++ b/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/container/fcqueue.h @@ -0,0 +1,442 @@ +/* + This file is a part of libcds - Concurrent Data Structures library + + (C) Copyright Maxim Khizhinsky (libcds.dev@gmail.com) 2006-2017 + + Source code repo: http://github.com/khizmax/libcds/ + Download: http://sourceforge.net/projects/libcds/files/ + + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. + + * Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ + +#ifndef CDSLIB_CONTAINER_FCQUEUE_H +#define CDSLIB_CONTAINER_FCQUEUE_H + +#include +#include +#include + +namespace cds { namespace container { + + /// FCQueue related definitions + /** @ingroup cds_nonintrusive_helper + */ + namespace fcqueue { + + /// FCQueue internal statistics + template + struct stat: public cds::algo::flat_combining::stat + { + typedef cds::algo::flat_combining::stat flat_combining_stat; ///< Flat-combining statistics + typedef typename flat_combining_stat::counter_type counter_type; ///< Counter type + + counter_type m_nEnqueue ; ///< Count of enqueue operations + counter_type m_nEnqMove ; ///< Count of enqueue operations with move semantics + counter_type m_nDequeue ; ///< Count of success dequeue operations + counter_type m_nFailedDeq ; ///< Count of failed dequeue operations (pop from empty queue) + counter_type m_nCollided ; ///< How many pairs of enqueue/dequeue were collided, if elimination is enabled + + //@cond + void onEnqueue() { ++m_nEnqueue; } + void onEnqMove() { ++m_nEnqMove; } + void onDequeue( bool bFailed ) { if ( bFailed ) ++m_nFailedDeq; else ++m_nDequeue; } + void onCollide() { ++m_nCollided; } + //@endcond + }; + + /// FCQueue dummy statistics, no overhead + struct empty_stat: public cds::algo::flat_combining::empty_stat + { + //@cond + void onEnqueue() {} + void onEnqMove() {} + void onDequeue(bool) {} + void onCollide() {} + //@endcond + }; + + /// FCQueue type traits + struct traits: public cds::algo::flat_combining::traits + { + typedef empty_stat stat; ///< Internal statistics + static constexpr const bool enable_elimination = false; ///< Enable \ref cds_elimination_description "elimination" + }; + + /// Metafunction converting option list to traits + /** + \p Options are: + - any \p cds::algo::flat_combining::make_traits options + - \p opt::stat - internal statistics, possible type: \p fcqueue::stat, \p fcqueue::empty_stat (the default) + - \p opt::enable_elimination - enable/disable operation \ref cds_elimination_description "elimination" + By default, the elimination is disabled. For queue, the elimination is possible if the queue + is empty. + */ + template + struct make_traits { +# ifdef CDS_DOXYGEN_INVOKED + typedef implementation_defined type ; ///< Metafunction result +# else + typedef typename cds::opt::make_options< + typename cds::opt::find_type_traits< traits, Options... >::type + ,Options... + >::type type; +# endif + }; + + } // namespace fcqueue + + /// Flat-combining queue + /** + @ingroup cds_nonintrusive_queue + @ingroup cds_flat_combining_container + + \ref cds_flat_combining_description "Flat combining" sequential queue. + The class can be considered as a concurrent FC-based wrapper for \p std::queue. + + Template parameters: + - \p T - a value type stored in the queue + - \p Queue - sequential queue implementation, default is \p std::queue + - \p Trats - type traits of flat combining, default is \p fcqueue::traits. + \p fcqueue::make_traits metafunction can be used to construct \p %fcqueue::traits specialization. + */ + template , + typename Traits = fcqueue::traits + > + class FCQueue +#ifndef CDS_DOXYGEN_INVOKED + : public cds::algo::flat_combining::container +#endif + { + public: + typedef T value_type; ///< Value type + typedef Queue queue_type; ///< Sequential queue class + typedef Traits traits; ///< Queue type traits + + typedef typename traits::stat stat; ///< Internal statistics type + static constexpr const bool c_bEliminationEnabled = traits::enable_elimination; ///< \p true if elimination is enabled + + protected: + //@cond + /// Queue operation IDs + enum fc_operation { + op_enq = cds::algo::flat_combining::req_Operation, ///< Enqueue + op_enq_move, ///< Enqueue (move semantics) + op_deq, ///< Dequeue + op_clear ///< Clear + }; + + /// Flat combining publication list record + struct fc_record: public cds::algo::flat_combining::publication_record + { + union { + value_type const * pValEnq; ///< Value to enqueue + value_type * pValDeq; ///< Dequeue destination + }; + bool bEmpty; ///< \p true if the queue is empty + }; + //@endcond + + /// Flat combining kernel + typedef cds::algo::flat_combining::kernel< fc_record, traits > fc_kernel; + + protected: + //@cond + mutable fc_kernel m_FlatCombining; + queue_type m_Queue; + //@endcond + + public: + /// Initializes empty queue object + FCQueue() + {} + + /// Initializes empty queue object and gives flat combining parameters + FCQueue( + unsigned int nCompactFactor ///< Flat combining: publication list compacting factor + ,unsigned int nCombinePassCount ///< Flat combining: number of combining passes for combiner thread + ) + : m_FlatCombining( nCompactFactor, nCombinePassCount ) + {} + + /// Inserts a new element at the end of the queue + /** + The content of the new element initialized to a copy of \p val. + + The function always returns \p true + */ + bool enqueue( value_type const& val ) + { + auto pRec = m_FlatCombining.acquire_record(); + pRec->pValEnq = &val; + + constexpr_if ( c_bEliminationEnabled ) + m_FlatCombining.batch_combine( op_enq, pRec, *this ); + else + m_FlatCombining.combine( op_enq, pRec, *this ); + + assert( pRec->is_done()); + m_FlatCombining.release_record( pRec ); + m_FlatCombining.internal_statistics().onEnqueue(); + return true; + } + + /// Inserts a new element at the end of the queue (a synonym for \ref enqueue) + bool push( value_type const& val ) + { + return enqueue( val ); + } + + /// Inserts a new element at the end of the queue (move semantics) + /** + \p val is moved to inserted element + */ + bool enqueue( value_type&& val ) + { + auto pRec = m_FlatCombining.acquire_record(); + pRec->pValEnq = &val; + + constexpr_if ( c_bEliminationEnabled ) + m_FlatCombining.batch_combine( op_enq_move, pRec, *this ); + else + m_FlatCombining.combine( op_enq_move, pRec, *this ); + + assert( pRec->is_done()); + m_FlatCombining.release_record( pRec ); + + m_FlatCombining.internal_statistics().onEnqMove(); + return true; + } + + /// Inserts a new element at the end of the queue (move semantics, synonym for \p enqueue) + bool push( value_type&& val ) + { + return enqueue( val ); + } + + /// Removes the next element from the queue + /** + \p val takes a copy of the element + */ + bool dequeue( value_type& val ) + { + auto pRec = m_FlatCombining.acquire_record(); + pRec->pValDeq = &val; + + constexpr_if ( c_bEliminationEnabled ) + m_FlatCombining.batch_combine( op_deq, pRec, *this ); + else + m_FlatCombining.combine( op_deq, pRec, *this ); + + assert( pRec->is_done()); + m_FlatCombining.release_record( pRec ); + + m_FlatCombining.internal_statistics().onDequeue( pRec->bEmpty ); + return !pRec->bEmpty; + } + + /// Removes the next element from the queue (a synonym for \ref dequeue) + bool pop( value_type& val ) + { + return dequeue( val ); + } + + /// Clears the queue + void clear() + { + auto pRec = m_FlatCombining.acquire_record(); + + constexpr_if ( c_bEliminationEnabled ) + m_FlatCombining.batch_combine( op_clear, pRec, *this ); + else + m_FlatCombining.combine( op_clear, pRec, *this ); + + assert( pRec->is_done()); + m_FlatCombining.release_record( pRec ); + } + + /// Exclusive access to underlying queue object + /** + The functor \p f can do any operation with underlying \p queue_type in exclusive mode. + For example, you can iterate over the queue. + \p Func signature is: + \code + void f( queue_type& queue ); + \endcode + */ + template + void apply( Func f ) + { + auto& queue = m_Queue; + m_FlatCombining.invoke_exclusive( [&queue, &f]() { f( queue ); } ); + } + + /// Exclusive access to underlying queue object + /** + The functor \p f can do any operation with underlying \p queue_type in exclusive mode. + For example, you can iterate over the queue. + \p Func signature is: + \code + void f( queue_type const& queue ); + \endcode + */ + template + void apply( Func f ) const + { + auto const& queue = m_Queue; + m_FlatCombining.invoke_exclusive( [&queue, &f]() { f( queue ); } ); + } + + /// Returns the number of elements in the queue. + /** + Note that size() == 0 is not mean that the queue is empty because + combining record can be in process. + To check emptiness use \ref empty function. + */ + size_t size() const + { + return m_Queue.size(); + } + + /// Checks if the queue is empty + /** + If the combining is in process the function waits while combining done. + */ + bool empty() const + { + bool bRet = false; + auto const& queue = m_Queue; + m_FlatCombining.invoke_exclusive( [&queue, &bRet]() { bRet = queue.empty(); } ); + return bRet; + } + + /// Internal statistics + stat const& statistics() const + { + return m_FlatCombining.statistics(); + } + + public: // flat combining cooperation, not for direct use! + //@cond + /// Flat combining supporting function. Do not call it directly! + /** + The function is called by \ref cds::algo::flat_combining::kernel "flat combining kernel" + object if the current thread becomes a combiner. Invocation of the function means that + the queue should perform an action recorded in \p pRec. + */ + void fc_apply( fc_record * pRec ) + { + assert( pRec ); + + switch ( pRec->op()) { + case op_enq: + assert( pRec->pValEnq ); + m_Queue.push( *(pRec->pValEnq )); + break; + case op_enq_move: + assert( pRec->pValEnq ); + m_Queue.push( std::move( *(pRec->pValEnq ))); + break; + case op_deq: + assert( pRec->pValDeq ); + pRec->bEmpty = m_Queue.empty(); + if ( !pRec->bEmpty ) { + *(pRec->pValDeq) = std::move( m_Queue.front()); + m_Queue.pop(); + } + break; + case op_clear: + while ( !m_Queue.empty()) + m_Queue.pop(); + break; + default: + assert(false); + break; + } + } + + /// Batch-processing flat combining + void fc_process( typename fc_kernel::iterator itBegin, typename fc_kernel::iterator itEnd ) + { + typedef typename fc_kernel::iterator fc_iterator; + + for ( fc_iterator it = itBegin, itPrev = itEnd; it != itEnd; ++it ) { + switch ( it->op( atomics::memory_order_acquire )) { + case op_enq: + case op_enq_move: + case op_deq: + if ( m_Queue.empty()) { + if ( itPrev != itEnd && collide( *itPrev, *it )) + itPrev = itEnd; + else + itPrev = it; + } + break; + } + } + } + //@endcond + + private: + //@cond + bool collide( fc_record& rec1, fc_record& rec2 ) + { + switch ( rec1.op()) { + case op_enq: + if ( rec2.op() == op_deq ) { + assert(rec1.pValEnq); + assert(rec2.pValDeq); + *rec2.pValDeq = *rec1.pValEnq; + rec2.bEmpty = false; + goto collided; + } + break; + case op_enq_move: + if ( rec2.op() == op_deq ) { + assert(rec1.pValEnq); + assert(rec2.pValDeq); + *rec2.pValDeq = std::move( *rec1.pValEnq ); + rec2.bEmpty = false; + goto collided; + } + break; + case op_deq: + switch ( rec2.op()) { + case op_enq: + case op_enq_move: + return collide( rec2, rec1 ); + } + } + return false; + + collided: + m_FlatCombining.operation_done( rec1 ); + m_FlatCombining.operation_done( rec2 ); + m_FlatCombining.internal_statistics().onCollide(); + return true; + } + //@endcond + + }; +}} // namespace cds::container + +#endif // #ifndef CDSLIB_CONTAINER_FCQUEUE_H diff --git a/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/container/fcstack.h b/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/container/fcstack.h new file mode 100644 index 0000000..9799e13 --- /dev/null +++ b/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/container/fcstack.h @@ -0,0 +1,427 @@ +/* + This file is a part of libcds - Concurrent Data Structures library + + (C) Copyright Maxim Khizhinsky (libcds.dev@gmail.com) 2006-2017 + + Source code repo: http://github.com/khizmax/libcds/ + Download: http://sourceforge.net/projects/libcds/files/ + + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. + + * Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ + +#ifndef CDSLIB_CONTAINER_FCSTACK_H +#define CDSLIB_CONTAINER_FCSTACK_H + +#include +#include +#include + +namespace cds { namespace container { + + /// FCStack related definitions + /** @ingroup cds_nonintrusive_helper + */ + namespace fcstack { + + /// FCStack internal statistics + template + struct stat: public cds::algo::flat_combining::stat + { + typedef cds::algo::flat_combining::stat flat_combining_stat; ///< Flat-combining statistics + typedef typename flat_combining_stat::counter_type counter_type; ///< Counter type + + counter_type m_nPush ; ///< Count of push operations + counter_type m_nPushMove ; ///< Count of push operations with move semantics + counter_type m_nPop ; ///< Count of success pop operations + counter_type m_nFailedPop; ///< Count of failed pop operations (pop from empty stack) + counter_type m_nCollided ; ///< How many pairs of push/pop were collided, if elimination is enabled + + //@cond + void onPush() { ++m_nPush; } + void onPushMove() { ++m_nPushMove; } + void onPop( bool bFailed ) { if ( bFailed ) ++m_nFailedPop; else ++m_nPop; } + void onCollide() { ++m_nCollided; } + //@endcond + }; + + /// FCStack dummy statistics, no overhead + struct empty_stat: public cds::algo::flat_combining::empty_stat + { + //@cond + void onPush() {} + void onPushMove() {} + void onPop(bool) {} + void onCollide() {} + //@endcond + }; + + /// FCStack type traits + struct traits: public cds::algo::flat_combining::traits + { + typedef empty_stat stat; ///< Internal statistics + static constexpr const bool enable_elimination = false; ///< Enable \ref cds_elimination_description "elimination" + }; + + /// Metafunction converting option list to traits + /** + \p Options are: + - any \p cds::algo::flat_combining::make_traits options + - \p opt::stat - internal statistics, possible type: \p fcstack::stat, \p fcstack::empty_stat (the default) + - \p opt::enable_elimination - enable/disable operation \ref cds_elimination_description "elimination" + By default, the elimination is disabled. + */ + template + struct make_traits { +# ifdef CDS_DOXYGEN_INVOKED + typedef implementation_defined type ; ///< Metafunction result +# else + typedef typename cds::opt::make_options< + typename cds::opt::find_type_traits< traits, Options... >::type + ,Options... + >::type type; +# endif + }; + + } // namespace fcstack + + /// Flat-combining stack + /** + @ingroup cds_nonintrusive_stack + @ingroup cds_flat_combining_container + + \ref cds_flat_combining_description "Flat combining" sequential stack. + + Template parameters: + - \p T - a value type stored in the stack + - \p Stack - sequential stack implementation, default is \p std::stack + - \p Trats - type traits of flat combining, default is \p fcstack::traits + \p fcstack::make_traits metafunction can be used to construct specialized \p %fcstack::traits + */ + template , + typename Traits = fcstack::traits + > + class FCStack +#ifndef CDS_DOXYGEN_INVOKED + : public cds::algo::flat_combining::container +#endif + { + public: + typedef T value_type; ///< Value type + typedef Stack stack_type; ///< Sequential stack class + typedef Traits traits; ///< Stack traits + + typedef typename traits::stat stat; ///< Internal statistics type + static constexpr const bool c_bEliminationEnabled = traits::enable_elimination; ///< \p true if elimination is enabled + + protected: + //@cond + /// Stack operation IDs + enum fc_operation { + op_push = cds::algo::flat_combining::req_Operation, ///< Push + op_push_move, ///< Push (move semantics) + op_pop, ///< Pop + op_clear, ///< Clear + op_empty ///< Empty + }; + + /// Flat combining publication list record + struct fc_record: public cds::algo::flat_combining::publication_record + { + union { + value_type const * pValPush; ///< Value to push + value_type * pValPop; ///< Pop destination + }; + bool bEmpty; ///< \p true if the stack is empty + }; + //@endcond + + /// Flat combining kernel + typedef cds::algo::flat_combining::kernel< fc_record, traits > fc_kernel; + + protected: + //@cond + mutable fc_kernel m_FlatCombining; + stack_type m_Stack; + //@endcond + + public: + /// Initializes empty stack object + FCStack() + {} + + /// Initializes empty stack object and gives flat combining parameters + FCStack( + unsigned int nCompactFactor ///< Flat combining: publication list compacting factor + ,unsigned int nCombinePassCount ///< Flat combining: number of combining passes for combiner thread + ) + : m_FlatCombining( nCompactFactor, nCombinePassCount ) + {} + + /// Inserts a new element at the top of stack + /** + The content of the new element initialized to a copy of \p val. + */ + bool push( value_type const& val ) + { + auto pRec = m_FlatCombining.acquire_record(); + pRec->pValPush = &val; + + constexpr_if ( c_bEliminationEnabled ) + m_FlatCombining.batch_combine( op_push, pRec, *this ); + else + m_FlatCombining.combine( op_push, pRec, *this ); + + assert( pRec->is_done()); + m_FlatCombining.release_record( pRec ); + m_FlatCombining.internal_statistics().onPush(); + return true; + } + + /// Inserts a new element at the top of stack (move semantics) + /** + The content of the new element initialized to a copy of \p val. + */ + bool push( value_type&& val ) + { + auto pRec = m_FlatCombining.acquire_record(); + pRec->pValPush = &val; + + constexpr_if ( c_bEliminationEnabled ) + m_FlatCombining.batch_combine( op_push_move, pRec, *this ); + else + m_FlatCombining.combine( op_push_move, pRec, *this ); + + assert( pRec->is_done()); + m_FlatCombining.release_record( pRec ); + + m_FlatCombining.internal_statistics().onPushMove(); + return true; + } + + /// Removes the element on top of the stack + /** + \p val takes a copy of top element + */ + bool pop( value_type& val ) + { + auto pRec = m_FlatCombining.acquire_record(); + pRec->pValPop = &val; + + constexpr_if ( c_bEliminationEnabled ) + m_FlatCombining.batch_combine( op_pop, pRec, *this ); + else + m_FlatCombining.combine( op_pop, pRec, *this ); + + assert( pRec->is_done()); + m_FlatCombining.release_record( pRec ); + + m_FlatCombining.internal_statistics().onPop( pRec->bEmpty ); + return !pRec->bEmpty; + } + + /// Clears the stack + void clear() + { + auto pRec = m_FlatCombining.acquire_record(); + + constexpr_if ( c_bEliminationEnabled ) + m_FlatCombining.batch_combine( op_clear, pRec, *this ); + else + m_FlatCombining.combine( op_clear, pRec, *this ); + + assert( pRec->is_done()); + m_FlatCombining.release_record( pRec ); + } + + /// Exclusive access to underlying stack object + /** + The functor \p f can do any operation with underlying \p stack_type in exclusive mode. + For example, you can iterate over the stack. + \p Func signature is: + \code + void f( stack_type& stack ); + \endcode + */ + template + void apply( Func f ) + { + auto& stack = m_Stack; + m_FlatCombining.invoke_exclusive( [&stack, &f]() { f( stack ); } ); + } + + /// Exclusive access to underlying stack object + /** + The functor \p f can do any operation with underlying \p stack_type in exclusive mode. + For example, you can iterate over the stack. + \p Func signature is: + \code + void f( stack_type const& stack ); + \endcode + */ + template + void apply( Func f ) const + { + auto const& stack = m_Stack; + m_FlatCombining.invoke_exclusive( [&stack, &f]() { f( stack ); } ); + } + + /// Returns the number of elements in the stack. + /** + Note that size() == 0 is not mean that the stack is empty because + combining record can be in process. + To check emptiness use \ref empty() function. + */ + size_t size() const + { + return m_Stack.size(); + } + + /// Checks if the stack is empty + /** + If the combining is in process the function waits while combining done. + */ + bool empty() + { + auto pRec = m_FlatCombining.acquire_record(); + + constexpr_if ( c_bEliminationEnabled ) + m_FlatCombining.batch_combine( op_empty, pRec, *this ); + else + m_FlatCombining.combine( op_empty, pRec, *this ); + + assert( pRec->is_done()); + m_FlatCombining.release_record( pRec ); + return pRec->bEmpty; + } + + /// Internal statistics + stat const& statistics() const + { + return m_FlatCombining.statistics(); + } + + + public: // flat combining cooperation, not for direct use! + //@cond + /// Flat combining supporting function. Do not call it directly! + /** + The function is called by \ref cds::algo::flat_combining::kernel "flat combining kernel" + object if the current thread becomes a combiner. Invocation of the function means that + the stack should perform an action recorded in \p pRec. + */ + void fc_apply( fc_record * pRec ) + { + assert( pRec ); + + switch ( pRec->op()) { + case op_push: + assert( pRec->pValPush ); + m_Stack.push( *(pRec->pValPush )); + break; + case op_push_move: + assert( pRec->pValPush ); + m_Stack.push( std::move( *(pRec->pValPush ))); + break; + case op_pop: + assert( pRec->pValPop ); + pRec->bEmpty = m_Stack.empty(); + if ( !pRec->bEmpty ) { + *(pRec->pValPop) = std::move( m_Stack.top()); + m_Stack.pop(); + } + break; + case op_clear: + while ( !m_Stack.empty()) + m_Stack.pop(); + break; + case op_empty: + pRec->bEmpty = m_Stack.empty(); + break; + default: + assert(false); + break; + } + } + + /// Batch-processing flat combining + void fc_process( typename fc_kernel::iterator itBegin, typename fc_kernel::iterator itEnd ) + { + typedef typename fc_kernel::iterator fc_iterator; + for ( fc_iterator it = itBegin, itPrev = itEnd; it != itEnd; ++it ) { + switch ( it->op( atomics::memory_order_acquire )) { + case op_push: + case op_push_move: + case op_pop: + if ( itPrev != itEnd && collide( *itPrev, *it )) + itPrev = itEnd; + else + itPrev = it; + break; + } + } + } + //@endcond + + private: + //@cond + bool collide( fc_record& rec1, fc_record& rec2 ) + { + switch ( rec1.op()) { + case op_push: + if ( rec2.op() == op_pop ) { + assert(rec1.pValPush); + assert(rec2.pValPop); + *rec2.pValPop = *rec1.pValPush; + rec2.bEmpty = false; + goto collided; + } + break; + case op_push_move: + if ( rec2.op() == op_pop ) { + assert(rec1.pValPush); + assert(rec2.pValPop); + *rec2.pValPop = std::move( *rec1.pValPush ); + rec2.bEmpty = false; + goto collided; + } + break; + case op_pop: + switch ( rec2.op()) { + case op_push: + case op_push_move: + return collide( rec2, rec1 ); + } + } + return false; + + collided: + m_FlatCombining.operation_done( rec1 ); + m_FlatCombining.operation_done( rec2 ); + m_FlatCombining.internal_statistics().onCollide(); + return true; + } + //@endcond + }; +}} // namespace cds::container + +#endif // #ifndef CDSLIB_CONTAINER_FCSTACK_H diff --git a/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/container/feldman_hashmap_dhp.h b/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/container/feldman_hashmap_dhp.h new file mode 100644 index 0000000..595e199 --- /dev/null +++ b/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/container/feldman_hashmap_dhp.h @@ -0,0 +1,37 @@ +/* + This file is a part of libcds - Concurrent Data Structures library + + (C) Copyright Maxim Khizhinsky (libcds.dev@gmail.com) 2006-2017 + + Source code repo: http://github.com/khizmax/libcds/ + Download: http://sourceforge.net/projects/libcds/files/ + + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. + + * Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ + +#ifndef CDSLIB_CONTAINER_FELDMAN_HASHMAP_DHP_H +#define CDSLIB_CONTAINER_FELDMAN_HASHMAP_DHP_H + +#include +#include + +#endif // #ifndef CDSLIB_CONTAINER_FELDMAN_HASHMAP_DHP_H diff --git a/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/container/feldman_hashmap_hp.h b/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/container/feldman_hashmap_hp.h new file mode 100644 index 0000000..806ce95 --- /dev/null +++ b/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/container/feldman_hashmap_hp.h @@ -0,0 +1,37 @@ +/* + This file is a part of libcds - Concurrent Data Structures library + + (C) Copyright Maxim Khizhinsky (libcds.dev@gmail.com) 2006-2017 + + Source code repo: http://github.com/khizmax/libcds/ + Download: http://sourceforge.net/projects/libcds/files/ + + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. + + * Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ + +#ifndef CDSLIB_CONTAINER_FELDMAN_HASHMAP_HP_H +#define CDSLIB_CONTAINER_FELDMAN_HASHMAP_HP_H + +#include +#include + +#endif // #ifndef CDSLIB_CONTAINER_FELDMAN_HASHMAP_HP_H diff --git a/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/container/feldman_hashmap_rcu.h b/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/container/feldman_hashmap_rcu.h new file mode 100644 index 0000000..f8656de --- /dev/null +++ b/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/container/feldman_hashmap_rcu.h @@ -0,0 +1,825 @@ +/* + This file is a part of libcds - Concurrent Data Structures library + + (C) Copyright Maxim Khizhinsky (libcds.dev@gmail.com) 2006-2017 + + Source code repo: http://github.com/khizmax/libcds/ + Download: http://sourceforge.net/projects/libcds/files/ + + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. + + * Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ + +#ifndef CDSLIB_CONTAINER_FELDMAN_HASHMAP_RCU_H +#define CDSLIB_CONTAINER_FELDMAN_HASHMAP_RCU_H + +#include +#include + +namespace cds { namespace container { + + /// Hash map based on multi-level array + /** @ingroup cds_nonintrusive_map + @anchor cds_container_FeldmanHashMap_rcu + + Source: + - [2013] Steven Feldman, Pierre LaBorde, Damian Dechev "Concurrent Multi-level Arrays: + Wait-free Extensible Hash Maps" + + See algorithm short description @ref cds_container_FeldmanHashMap_hp "here" + + @note Two important things you should keep in mind when you're using \p %FeldmanHashMap: + - all keys is converted to fixed-size bit-string by hash functor provided. + You can use variable-length keys, for example, \p std::string as a key for \p %FeldmanHashMap, + but real key in the map will be fixed-size hash values of your keys. + For the strings you may use well-known hashing algorithms like SHA1, SHA2, + MurmurHash, CityHash + or its successor FarmHash and so on, which + converts variable-length strings to fixed-length bit-strings, and such hash values will be the keys in \p %FeldmanHashMap. + If your key is fixed-sized the hash functor is optional, see \p feldman_hashmap::traits::hash for explanation and examples. + - \p %FeldmanHashMap uses a perfect hashing. It means that if two different keys, for example, of type \p std::string, + have identical hash then you cannot insert both that keys in the map. \p %FeldmanHashMap does not maintain the key, + it maintains its fixed-size hash value. + + The map supports @ref cds_container_FeldmanHashMap_rcu_iterators "bidirectional thread-safe iterators". + + Template parameters: + - \p RCU - one of \ref cds_urcu_gc "RCU type" + - \p Key - a key type to be stored in the map + - \p T - a value type to be stored in the map + - \p Traits - type traits, the structure based on \p feldman_hashmap::traits or result of \p feldman_hashmap::make_traits metafunction. + + @note Before including you should include appropriate RCU header file, + see \ref cds_urcu_gc "RCU type" for list of existing RCU class and corresponding header files. + */ + template < + class RCU + ,typename Key + ,typename T +#ifdef CDS_DOXYGEN_INVOKED + ,class Traits = feldman_hashmap::traits +#else + ,class Traits +#endif + > + class FeldmanHashMap< cds::urcu::gc< RCU >, Key, T, Traits > +#ifdef CDS_DOXYGEN_INVOKED + : protected cds::intrusive::FeldmanHashSet< cds::urcu::gc< RCU >, std::pair, Traits > +#else + : protected cds::container::details::make_feldman_hashmap< cds::urcu::gc< RCU >, Key, T, Traits >::type +#endif + { + //@cond + typedef cds::container::details::make_feldman_hashmap< cds::urcu::gc< RCU >, Key, T, Traits > maker; + typedef typename maker::type base_class; + //@endcond + public: + typedef cds::urcu::gc< RCU > gc; ///< RCU garbage collector + typedef Key key_type; ///< Key type + typedef T mapped_type; ///< Mapped type + typedef std::pair< key_type const, mapped_type> value_type; ///< Key-value pair to be stored in the map + typedef Traits traits; ///< Map traits +#ifdef CDS_DOXYGEN_INVOKED + typedef typename traits::hash hasher; ///< Hash functor, see \p feldman_hashmap::traits::hash +#else + typedef typename maker::hasher hasher; +#endif + + typedef typename maker::hash_type hash_type; ///< Hash type deduced from \p hasher return type + typedef typename base_class::hash_comparator hash_comparator; ///< hash compare functor based on \p Traits::compare and \p Traits::less + typedef typename traits::item_counter item_counter; ///< Item counter type + typedef typename traits::allocator allocator; ///< Element allocator + typedef typename traits::node_allocator node_allocator; ///< Array node allocator + typedef typename traits::memory_model memory_model; ///< Memory model + typedef typename traits::back_off back_off; ///< Back-off strategy + typedef typename traits::stat stat; ///< Internal statistics type + typedef typename traits::rcu_check_deadlock rcu_check_deadlock; ///< Deadlock checking policy + typedef typename gc::scoped_lock rcu_lock; ///< RCU scoped lock + static constexpr const bool c_bExtractLockExternal = false; ///< Group of \p extract_xxx functions does not require external locking + + /// Level statistics + typedef feldman_hashmap::level_statistics level_statistics; + + protected: + //@cond + typedef typename maker::node_type node_type; + typedef typename maker::cxx_node_allocator cxx_node_allocator; + typedef std::unique_ptr< node_type, typename maker::node_disposer > scoped_node_ptr; + typedef typename base_class::check_deadlock_policy check_deadlock_policy; + + struct node_cast + { + value_type * operator()(node_type * p) const + { + return p ? &p->m_Value : nullptr; + } + }; + + public: + /// pointer to extracted node + using exempt_ptr = cds::urcu::exempt_ptr< gc, node_type, value_type, typename base_class::disposer, node_cast >; + + protected: + template + class bidirectional_iterator: public base_class::iterator_base + { + friend class FeldmanHashMap; + typedef typename base_class::iterator_base iterator_base; + + protected: + static constexpr bool const c_bConstantIterator = IsConst; + + public: + typedef typename std::conditional< IsConst, value_type const*, value_type*>::type value_ptr; ///< Value pointer + typedef typename std::conditional< IsConst, value_type const&, value_type&>::type value_ref; ///< Value reference + + public: + bidirectional_iterator() noexcept + {} + + bidirectional_iterator( bidirectional_iterator const& rhs ) noexcept + : iterator_base( rhs ) + {} + + bidirectional_iterator& operator=(bidirectional_iterator const& rhs) noexcept + { + iterator_base::operator=( rhs ); + return *this; + } + + bidirectional_iterator& operator++() + { + iterator_base::operator++(); + return *this; + } + + bidirectional_iterator& operator--() + { + iterator_base::operator--(); + return *this; + } + + value_ptr operator ->() const noexcept + { + node_type * p = iterator_base::pointer(); + return p ? &p->m_Value : nullptr; + } + + value_ref operator *() const noexcept + { + node_type * p = iterator_base::pointer(); + assert( p ); + return p->m_Value; + } + + void release() + { + iterator_base::release(); + } + + template + bool operator ==(bidirectional_iterator const& rhs) const noexcept + { + return iterator_base::operator==( rhs ); + } + + template + bool operator !=(bidirectional_iterator const& rhs) const noexcept + { + return !( *this == rhs ); + } + + public: // for internal use only! + bidirectional_iterator( base_class const& set, typename base_class::array_node * pNode, size_t idx, bool ) + : iterator_base( set, pNode, idx, false ) + {} + + bidirectional_iterator( base_class const& set, typename base_class::array_node * pNode, size_t idx ) + : iterator_base( set, pNode, idx ) + {} + }; + + /// Reverse bidirectional iterator + template + class reverse_bidirectional_iterator : public base_class::iterator_base + { + friend class FeldmanHashMap; + typedef typename base_class::iterator_base iterator_base; + + public: + typedef typename std::conditional< IsConst, value_type const*, value_type*>::type value_ptr; ///< Value pointer + typedef typename std::conditional< IsConst, value_type const&, value_type&>::type value_ref; ///< Value reference + + public: + reverse_bidirectional_iterator() noexcept + : iterator_base() + {} + + reverse_bidirectional_iterator( reverse_bidirectional_iterator const& rhs ) noexcept + : iterator_base( rhs ) + {} + + reverse_bidirectional_iterator& operator=( reverse_bidirectional_iterator const& rhs) noexcept + { + iterator_base::operator=( rhs ); + return *this; + } + + reverse_bidirectional_iterator& operator++() + { + iterator_base::operator--(); + return *this; + } + + reverse_bidirectional_iterator& operator--() + { + iterator_base::operator++(); + return *this; + } + + value_ptr operator ->() const noexcept + { + node_type * p = iterator_base::pointer(); + return p ? &p->m_Value : nullptr; + } + + value_ref operator *() const noexcept + { + node_type * p = iterator_base::pointer(); + assert( p ); + return p->m_Value; + } + + void release() + { + iterator_base::release(); + } + + template + bool operator ==(reverse_bidirectional_iterator const& rhs) const + { + return iterator_base::operator==( rhs ); + } + + template + bool operator !=(reverse_bidirectional_iterator const& rhs) + { + return !( *this == rhs ); + } + + public: // for internal use only! + reverse_bidirectional_iterator( base_class const& set, typename base_class::array_node * pNode, size_t idx, bool ) + : iterator_base( set, pNode, idx, false ) + {} + + reverse_bidirectional_iterator( base_class const& set, typename base_class::array_node * pNode, size_t idx ) + : iterator_base( set, pNode, idx, false ) + { + iterator_base::backward(); + } + }; + //@endcond + + public: +#ifdef CDS_DOXYGEN_INVOKED + typedef implementation_defined iterator; ///< @ref cds_container_FeldmanHashMap_rcu_iterators "bidirectional iterator" type + typedef implementation_defined const_iterator; ///< @ref cds_container_FeldmanHashMap_rcu_iterators "bidirectional const iterator" type + typedef implementation_defined reverse_iterator; ///< @ref cds_container_FeldmanHashMap_rcu_iterators "bidirectional reverse iterator" type + typedef implementation_defined const_reverse_iterator; ///< @ref cds_container_FeldmanHashMap_rcu_iterators "bidirectional reverse const iterator" type +#else + typedef bidirectional_iterator iterator; + typedef bidirectional_iterator const_iterator; + typedef reverse_bidirectional_iterator reverse_iterator; + typedef reverse_bidirectional_iterator const_reverse_iterator; +#endif + + protected: + //@cond + hasher m_Hasher; + //@endcond + + public: + /// Creates empty map + /** + @param head_bits - 2head_bits specifies the size of head array, minimum is 4. + @param array_bits - 2array_bits specifies the size of array node, minimum is 2. + + Equation for \p head_bits and \p array_bits: + \code + sizeof(hash_type) * 8 == head_bits + N * array_bits + \endcode + where \p N is multi-level array depth. + */ + FeldmanHashMap( size_t head_bits = 8, size_t array_bits = 4 ) + : base_class( head_bits, array_bits ) + {} + + /// Destructs the map and frees all data + ~FeldmanHashMap() + {} + + /// Inserts new element with key and default value + /** + The function creates an element with \p key and default value, and then inserts the node created into the map. + + Preconditions: + - The \p key_type should be constructible from a value of type \p K. + In trivial case, \p K is equal to \p key_type. + - The \p mapped_type should be default-constructible. + + Returns \p true if inserting successful, \p false otherwise. + + The function locks RCU internally. + */ + template + bool insert( K&& key ) + { + scoped_node_ptr sp( cxx_node_allocator().MoveNew( m_Hasher, std::forward(key))); + if ( base_class::insert( *sp )) { + sp.release(); + return true; + } + return false; + } + + /// Inserts new element + /** + The function creates a node with copy of \p val value + and then inserts the node created into the map. + + Preconditions: + - The \p key_type should be constructible from \p key of type \p K. + - The \p value_type should be constructible from \p val of type \p V. + + Returns \p true if \p val is inserted into the map, \p false otherwise. + + The function locks RCU internally. + */ + template + bool insert( K&& key, V&& val ) + { + scoped_node_ptr sp( cxx_node_allocator().MoveNew( m_Hasher, std::forward(key), std::forward(val))); + if ( base_class::insert( *sp )) { + sp.release(); + return true; + } + return false; + } + + /// Inserts new element and initialize it by a functor + /** + This function inserts new element with key \p key and if inserting is successful then it calls + \p func functor with signature + \code + struct functor { + void operator()( value_type& item ); + }; + \endcode + + The argument \p item of user-defined functor \p func is the reference + to the map's item inserted: + - item.first is a const reference to item's key that cannot be changed. + - item.second is a reference to item's value that may be changed. + + \p key_type should be constructible from value of type \p K. + + The function allows to split creating of new item into two part: + - create item from \p key; + - insert new item into the map; + - if inserting is successful, initialize the value of item by calling \p func functor + + This can be useful if complete initialization of object of \p value_type is heavyweight and + it is preferable that the initialization should be completed only if inserting is successful. + + The function locks RCU internally. + */ + template + bool insert_with( K&& key, Func func ) + { + scoped_node_ptr sp( cxx_node_allocator().MoveNew( m_Hasher, std::forward(key))); + if ( base_class::insert( *sp, [&func]( node_type& item ) { func( item.m_Value ); } )) { + sp.release(); + return true; + } + return false; + } + + /// For key \p key inserts data of type \p value_type created in-place from std::forward(args)... + /** + Returns \p true if inserting successful, \p false otherwise. + + The function locks RCU internally. + */ + template + bool emplace( K&& key, Args&&... args ) + { + scoped_node_ptr sp( cxx_node_allocator().MoveNew( m_Hasher, std::forward(key), std::forward(args)... )); + if ( base_class::insert( *sp )) { + sp.release(); + return true; + } + return false; + } + + /// Updates data by \p key + /** + The operation performs inserting or replacing the element with lock-free manner. + + If the \p key not found in the map, then the new item created from \p key + will be inserted into the map iff \p bInsert is \p true + (note that in this case the \ref key_type should be constructible from type \p K). + Otherwise, if \p key is found, it is replaced with a new item created from + \p key. + The functor \p Func signature: + \code + struct my_functor { + void operator()( value_type& item, value_type * old ); + }; + \endcode + where: + - \p item - item of the map + - \p old - old item of the map, if \p nullptr - the new item was inserted + + The functor may change any fields of the \p item.second. + + Returns std::pair where \p first is \p true if operation is successful, + \p second is \p true if new item has been added or \p false if \p key already exists. + + The function locks RCU internally. + + @warning See \ref cds_intrusive_item_creating "insert item troubleshooting" + */ + template + std::pair update( K&& key, Func func, bool bInsert = true ) + { + scoped_node_ptr sp( cxx_node_allocator().MoveNew( m_Hasher, std::forward(key))); + std::pair result = base_class::do_update( *sp, + [&func]( node_type& node, node_type * old ) { func( node.m_Value, old ? &old->m_Value : nullptr );}, + bInsert ); + if ( result.first ) + sp.release(); + return result; + } + + /// Delete \p key from the map + /** + \p key_type must be constructible from value of type \p K. + The function deletes the element with hash value equal to hash( key_type( key )) + + Return \p true if \p key is found and deleted, \p false otherwise. + + RCU should not be locked. The function locks RCU internally. + */ + template + bool erase( K const& key ) + { + return base_class::erase(m_Hasher(key_type(key))); + } + + /// Delete \p key from the map + /** + The function searches an item with hash value equal to hash( key_type( key )), + calls \p f functor and deletes the item. If \p key is not found, the functor is not called. + + The functor \p Func interface: + \code + struct extractor { + void operator()(value_type& item) { ... } + }; + \endcode + where \p item is the element found. + + \p key_type must be constructible from value of type \p K. + + Return \p true if key is found and deleted, \p false otherwise + + RCU should not be locked. The function locks RCU internally. + */ + template + bool erase( K const& key, Func f ) + { + return base_class::erase(m_Hasher(key_type(key)), [&f]( node_type& node) { f( node.m_Value ); }); + } + + /// Extracts the item from the map with specified \p key + /** + The function searches an item with key equal to hash( key_type( key )) in the map, + unlinks it from the map, and returns a guarded pointer to the item found. + If \p key is not found the function returns an empty guarded pointer. + + RCU \p synchronize method can be called. RCU should NOT be locked. + The function does not call the disposer for the item found. + The disposer will be implicitly invoked when the returned object is destroyed or when + its \p release() member function is called. + Example: + \code + typedef cds::container::FeldmanHashMap< cds::urcu::gc< cds::urcu::general_buffered<>>, int, foo, my_traits > map_type; + map_type theMap; + // ... + + typename map_type::exempt_ptr ep( theMap.extract( 5 )); + if ( ep ) { + // Deal with ep + //... + + // Dispose returned item. + ep.release(); + } + \endcode + */ + template + exempt_ptr extract( K const& key ) + { + check_deadlock_policy::check(); + + node_type * p; + { + rcu_lock rcuLock; + p = base_class::do_erase( m_Hasher( key_type(key)), [](node_type const&) -> bool {return true;}); + } + return exempt_ptr(p); + } + + /// Checks whether the map contains \p key + /** + The function searches the item by its hash that is equal to hash( key_type( key )) + and returns \p true if it is found, or \p false otherwise. + */ + template + bool contains( K const& key ) + { + return base_class::contains( m_Hasher( key_type( key ))); + } + + /// Find the key \p key + /** + + The function searches the item by its hash that is equal to hash( key_type( key )) + and calls the functor \p f for item found. + The interface of \p Func functor is: + \code + struct functor { + void operator()( value_type& item ); + }; + \endcode + where \p item is the item found. + + The functor may change \p item.second. + + The function returns \p true if \p key is found, \p false otherwise. + */ + template + bool find( K const& key, Func f ) + { + return base_class::find( m_Hasher( key_type( key )), [&f](node_type& node) { f( node.m_Value );}); + } + + /// Finds the key \p key and return the item found + /** + The function searches the item by its \p hash + and returns the pointer to the item found. + If \p hash is not found the function returns \p nullptr. + + RCU should be locked before the function invocation. + Returned pointer is valid only while RCU is locked. + + Usage: + \code + typedef cds::container::FeldmanHashMap< your_template_params > my_map; + my_map theMap; + // ... + { + // lock RCU + my_map::rcu_lock; + + foo * p = theMap.get( 5 ); + if ( p ) { + // Deal with p + //... + } + } + \endcode + */ + template + value_type * get( K const& key ) + { + node_type * p = base_class::get( m_Hasher( key_type( key ))); + return p ? &p->m_Value : nullptr; + } + + /// Clears the map (non-atomic) + /** + The function unlink all data node from the map. + The function is not atomic but is thread-safe. + After \p %clear() the map may not be empty because another threads may insert items. + */ + void clear() + { + base_class::clear(); + } + + /// Checks if the map is empty + /** + Emptiness is checked by item counting: if item count is zero then the map is empty. + Thus, the correct item counting feature is an important part of the map implementation. + */ + bool empty() const + { + return base_class::empty(); + } + + /// Returns item count in the map + size_t size() const + { + return base_class::size(); + } + + /// Returns const reference to internal statistics + stat const& statistics() const + { + return base_class::statistics(); + } + + /// Returns the size of head node + size_t head_size() const + { + return base_class::head_size(); + } + + /// Returns the size of the array node + size_t array_node_size() const + { + return base_class::array_node_size(); + } + + /// Collects tree level statistics into \p stat + /** + The function traverses the set and collects statistics for each level of the tree + into \p feldman_hashset::level_statistics struct. The element of \p stat[i] + represents statistics for level \p i, level 0 is head array. + The function is thread-safe and may be called in multi-threaded environment. + + Result can be useful for estimating efficiency of hash functor you use. + */ + void get_level_statistics(std::vector< feldman_hashmap::level_statistics>& stat) const + { + base_class::get_level_statistics(stat); + } + + public: + ///@name Thread-safe iterators + /** @anchor cds_container_FeldmanHashMap_rcu_iterators + The map supports thread-safe iterators: you may iterate over the map in multi-threaded environment + under explicit RCU lock. + RCU lock requirement means that inserting or searching is allowed but you must not erase the items from the map + since erasing under RCU lock can lead to a deadlock. However, another thread can call \p erase() safely + while your thread is iterating. + + A typical example is: + \code + struct foo { + // ... other fields + uint32_t payload; // only for example + }; + typedef cds::urcu::gc< cds::urcu::general_buffered<>> rcu; + typedef cds::container::FeldmanHashMap< rcu, std::string, foo> map_type; + + map_type m; + + // ... + + // iterate over the map + { + // lock the RCU. + typename set_type::rcu_lock l; // scoped RCU lock + + // traverse the map + for ( auto i = m.begin(); i != s.end(); ++i ) { + // deal with i. Remember, erasing is prohibited here! + i->second.payload++; + } + } // at this point RCU lock is released + \endcode + + Each iterator object supports the common interface: + - dereference operators: + @code + value_type [const] * operator ->() noexcept + value_type [const] & operator *() noexcept + @endcode + - pre-increment and pre-decrement. Post-operators is not supported + - equality operators == and !=. + Iterators are equal iff they point to the same cell of the same array node. + Note that for two iterators \p it1 and \p it2 the condition it1 == it2 + does not entail &(*it1) == &(*it2) : welcome to concurrent containers + + @note It is possible the item can be iterated more that once, for example, if an iterator points to the item + in an array node that is being splitted. + */ + ///@{ + /// Returns an iterator to the beginning of the map + iterator begin() + { + return base_class::template init_begin(); + } + + /// Returns an const iterator to the beginning of the map + const_iterator begin() const + { + return base_class::template init_begin(); + } + + /// Returns an const iterator to the beginning of the map + const_iterator cbegin() + { + return base_class::template init_begin(); + } + + /// Returns an iterator to the element following the last element of the map. This element acts as a placeholder; attempting to access it results in undefined behavior. + iterator end() + { + return base_class::template init_end(); + } + + /// Returns a const iterator to the element following the last element of the map. This element acts as a placeholder; attempting to access it results in undefined behavior. + const_iterator end() const + { + return base_class::template init_end(); + } + + /// Returns a const iterator to the element following the last element of the map. This element acts as a placeholder; attempting to access it results in undefined behavior. + const_iterator cend() + { + return base_class::template init_end(); + } + + /// Returns a reverse iterator to the first element of the reversed map + reverse_iterator rbegin() + { + return base_class::template init_rbegin(); + } + + /// Returns a const reverse iterator to the first element of the reversed map + const_reverse_iterator rbegin() const + { + return base_class::template init_rbegin(); + } + + /// Returns a const reverse iterator to the first element of the reversed map + const_reverse_iterator crbegin() + { + return base_class::template init_rbegin(); + } + + /// Returns a reverse iterator to the element following the last element of the reversed map + /** + It corresponds to the element preceding the first element of the non-reversed container. + This element acts as a placeholder, attempting to access it results in undefined behavior. + */ + reverse_iterator rend() + { + return base_class::template init_rend(); + } + + /// Returns a const reverse iterator to the element following the last element of the reversed map + /** + It corresponds to the element preceding the first element of the non-reversed container. + This element acts as a placeholder, attempting to access it results in undefined behavior. + */ + const_reverse_iterator rend() const + { + return base_class::template init_rend(); + } + + /// Returns a const reverse iterator to the element following the last element of the reversed map + /** + It corresponds to the element preceding the first element of the non-reversed container. + This element acts as a placeholder, attempting to access it results in undefined behavior. + */ + const_reverse_iterator crend() + { + return base_class::template init_rend(); + } + ///@} + }; +}} // namespace cds::container + +#endif // #ifndef CDSLIB_CONTAINER_FELDMAN_HASHMAP_RCU_H diff --git a/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/container/feldman_hashset_dhp.h b/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/container/feldman_hashset_dhp.h new file mode 100644 index 0000000..df5a4df --- /dev/null +++ b/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/container/feldman_hashset_dhp.h @@ -0,0 +1,37 @@ +/* + This file is a part of libcds - Concurrent Data Structures library + + (C) Copyright Maxim Khizhinsky (libcds.dev@gmail.com) 2006-2017 + + Source code repo: http://github.com/khizmax/libcds/ + Download: http://sourceforge.net/projects/libcds/files/ + + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. + + * Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ + +#ifndef CDSLIB_CONTAINER_FELDMAN_HASHSET_DHP_H +#define CDSLIB_CONTAINER_FELDMAN_HASHSET_DHP_H + +#include +#include + +#endif // #ifndef CDSLIB_CONTAINER_FELDMAN_HASHSET_DHP_H diff --git a/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/container/feldman_hashset_hp.h b/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/container/feldman_hashset_hp.h new file mode 100644 index 0000000..cf9bab9 --- /dev/null +++ b/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/container/feldman_hashset_hp.h @@ -0,0 +1,37 @@ +/* + This file is a part of libcds - Concurrent Data Structures library + + (C) Copyright Maxim Khizhinsky (libcds.dev@gmail.com) 2006-2017 + + Source code repo: http://github.com/khizmax/libcds/ + Download: http://sourceforge.net/projects/libcds/files/ + + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. + + * Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ + +#ifndef CDSLIB_CONTAINER_FELDMAN_HASHSET_HP_H +#define CDSLIB_CONTAINER_FELDMAN_HASHSET_HP_H + +#include +#include + +#endif // #ifndef CDSLIB_CONTAINER_FELDMAN_HASHSET_HP_H diff --git a/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/container/feldman_hashset_rcu.h b/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/container/feldman_hashset_rcu.h new file mode 100644 index 0000000..53a150f --- /dev/null +++ b/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/container/feldman_hashset_rcu.h @@ -0,0 +1,598 @@ +/* + This file is a part of libcds - Concurrent Data Structures library + + (C) Copyright Maxim Khizhinsky (libcds.dev@gmail.com) 2006-2017 + + Source code repo: http://github.com/khizmax/libcds/ + Download: http://sourceforge.net/projects/libcds/files/ + + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. + + * Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ + +#ifndef CDSLIB_CONTAINER_FELDMAN_HASHSET_RCU_H +#define CDSLIB_CONTAINER_FELDMAN_HASHSET_RCU_H + +#include +#include + +namespace cds { namespace container { + + /// Hash set based on multi-level array, \ref cds_urcu_desc "RCU" specialization + /** @ingroup cds_nonintrusive_set + @anchor cds_container_FeldmanHashSet_rcu + + Source: + - [2013] Steven Feldman, Pierre LaBorde, Damian Dechev "Concurrent Multi-level Arrays: + Wait-free Extensible Hash Maps" + + See algorithm short description @ref cds_intrusive_FeldmanHashSet_hp "here" + + @note Two important things you should keep in mind when you're using \p %FeldmanHashSet: + - all keys must be fixed-size. It means that you cannot use \p std::string as a key for \p %FeldmanHashSet. + Instead, for the strings you should use well-known hashing algorithms like SHA1, SHA2, + MurmurHash, CityHash + or its successor FarmHash and so on, which + converts variable-length strings to fixed-length bit-strings, and use that hash as a key in \p %FeldmanHashSet. + - \p %FeldmanHashSet uses a perfect hashing. It means that if two different keys, for example, of type \p std::string, + have identical hash then you cannot insert both that keys in the set. \p %FeldmanHashSet does not maintain the key, + it maintains its fixed-size hash value. + + The set supports @ref cds_container_FeldmanHashSet_iterators "bidirectional thread-safe iterators". + + Template parameters: + - \p RCU - one of \ref cds_urcu_gc "RCU type" + - \p T - a value type to be stored in the set + - \p Traits - type traits, the structure based on \p feldman_hashset::traits or result of \p feldman_hashset::make_traits metafunction. + \p Traits is the mandatory argument because it has one mandatory type - an @ref feldman_hashset::traits::hash_accessor "accessor" + to hash value of \p T. The set algorithm does not calculate that hash value. + + @note Before including you should include appropriate RCU header file, + see \ref cds_urcu_gc "RCU type" for list of existing RCU class and corresponding header files. + + The set supports @ref cds_container_FeldmanHashSet_rcu_iterators "bidirectional thread-safe iterators" + with some restrictions. + */ + template < + class RCU + , typename T +#ifdef CDS_DOXYGEN_INVOKED + , class Traits = feldman_hashset::traits +#else + , class Traits +#endif + > + class FeldmanHashSet< cds::urcu::gc< RCU >, T, Traits > +#ifdef CDS_DOXYGEN_INVOKED + : protected cds::intrusive::FeldmanHashSet< cds::urcu::gc< RCU >, T, Traits > +#else + : protected cds::container::details::make_feldman_hashset< cds::urcu::gc< RCU >, T, Traits >::type +#endif + { + //@cond + typedef cds::container::details::make_feldman_hashset< cds::urcu::gc< RCU >, T, Traits > maker; + typedef typename maker::type base_class; + //@endcond + + public: + typedef cds::urcu::gc< RCU > gc; ///< RCU garbage collector + typedef T value_type; ///< type of value stored in the set + typedef Traits traits; ///< Traits template parameter, see \p feldman_hashset::traits + + typedef typename base_class::hash_accessor hash_accessor; ///< Hash accessor functor + typedef typename base_class::hash_type hash_type; ///< Hash type deduced from \p hash_accessor return type + typedef typename base_class::hash_comparator hash_comparator; ///< hash compare functor based on \p opt::compare and \p opt::less option setter + + typedef typename traits::item_counter item_counter; ///< Item counter type + typedef typename traits::allocator allocator; ///< Element allocator + typedef typename traits::node_allocator node_allocator; ///< Array node allocator + typedef typename traits::memory_model memory_model; ///< Memory model + typedef typename traits::back_off back_off; ///< Backoff strategy + typedef typename traits::stat stat; ///< Internal statistics type + typedef typename traits::rcu_check_deadlock rcu_check_deadlock; ///< Deadlock checking policy + typedef typename gc::scoped_lock rcu_lock; ///< RCU scoped lock + static constexpr const bool c_bExtractLockExternal = false; ///< Group of \p extract_xxx functions does not require external locking + typedef typename base_class::exempt_ptr exempt_ptr; ///< pointer to extracted node + + /// The size of hash_type in bytes, see \p feldman_hashset::traits::hash_size for explanation + static constexpr size_t const c_hash_size = base_class::c_hash_size; + + /// Level statistics + typedef feldman_hashset::level_statistics level_statistics; + + protected: + //@cond + typedef typename maker::cxx_node_allocator cxx_node_allocator; + typedef std::unique_ptr< value_type, typename maker::node_disposer > scoped_node_ptr; + //@endcond + + public: + /// Creates empty set + /** + @param head_bits - 2head_bits specifies the size of head array, minimum is 4. + @param array_bits - 2array_bits specifies the size of array node, minimum is 2. + + Equation for \p head_bits and \p array_bits: + \code + sizeof(hash_type) * 8 == head_bits + N * array_bits + \endcode + where \p N is multi-level array depth. + */ + FeldmanHashSet( size_t head_bits = 8, size_t array_bits = 4 ) + : base_class( head_bits, array_bits ) + {} + + /// Destructs the set and frees all data + ~FeldmanHashSet() + {} + + /// Inserts new element + /** + The function creates an element with copy of \p val value and then inserts it into the set. + + The type \p Q should contain as minimum the complete hash for the element. + The object of \ref value_type should be constructible from a value of type \p Q. + In trivial case, \p Q is equal to \ref value_type. + + Returns \p true if \p val is inserted into the set, \p false otherwise. + + The function locks RCU internally. + */ + template + bool insert( Q const& val ) + { + scoped_node_ptr sp( cxx_node_allocator().New( val )); + if ( base_class::insert( *sp )) { + sp.release(); + return true; + } + return false; + } + + /// Inserts new element + /** + The function allows to split creating of new item into two part: + - create item with key only + - insert new item into the set + - if inserting is success, calls \p f functor to initialize value-fields of \p val. + + The functor signature is: + \code + void func( value_type& val ); + \endcode + where \p val is the item inserted. User-defined functor \p f should guarantee that during changing + \p val no any other changes could be made on this set's item by concurrent threads. + The user-defined functor is called only if the inserting is success. + + The function locks RCU internally. + */ + template + bool insert( Q const& val, Func f ) + { + scoped_node_ptr sp( cxx_node_allocator().New( val )); + if ( base_class::insert( *sp, f )) { + sp.release(); + return true; + } + return false; + } + + /// Updates the element + /** + The operation performs inserting or replacing with lock-free manner. + + If the \p val key not found in the set, then the new item created from \p val + will be inserted into the set iff \p bInsert is \p true. + Otherwise, if \p val is found, it is replaced with new item created from \p val + and previous item is disposed. + In both cases \p func functor is called. + + The functor \p Func signature: + \code + struct my_functor { + void operator()( value_type& cur, value_type * prev ); + }; + \endcode + where: + - \p cur - current element + - \p prev - pointer to previous element with such hash. \p prev is \p nullptr + if \p cur was just inserted. + + The functor may change non-key fields of the \p item; however, \p func must guarantee + that during changing no any other modifications could be made on this item by concurrent threads. + + Returns std::pair where \p first is \p true if operation is successful, + i.e. the item has been inserted or updated, + \p second is \p true if the new item has been added or \p false if the item with key equal to \p val + already exists. + */ + template + std::pair update( Q const& val, Func func, bool bInsert = true ) + { + scoped_node_ptr sp( cxx_node_allocator().New( val )); + std::pair bRes = base_class::do_update( *sp, func, bInsert ); + if ( bRes.first ) + sp.release(); + return bRes; + } + + /// Inserts data of type \p value_type created in-place from std::forward(args)... + /** + Returns \p true if inserting successful, \p false otherwise. + */ + template + bool emplace( Args&&... args ) + { + scoped_node_ptr sp( cxx_node_allocator().MoveNew( std::forward(args)... )); + if ( base_class::insert( *sp )) { + sp.release(); + return true; + } + return false; + } + + /// Deletes the item from the set + /** + The function searches \p hash in the set, + deletes the item found, and returns \p true. + If that item is not found the function returns \p false. + + RCU should not be locked. The function locks RCU internally. + */ + bool erase( hash_type const& hash ) + { + return base_class::erase( hash ); + } + + /// Deletes the item from the set + /** + The function searches \p hash in the set, + call \p f functor with item found, and deltes the element from the set. + + The \p Func interface is + \code + struct functor { + void operator()( value_type& item ); + }; + \endcode + + If \p hash is not found the function returns \p false. + + RCU should not be locked. The function locks RCU internally. + */ + template + bool erase( hash_type const& hash, Func f ) + { + return base_class::erase( hash, f ); + } + + /// Extracts the item with specified \p hash + /** + The function searches \p hash in the set, + unlinks it from the set, and returns \ref cds::urcu::exempt_ptr "exempt_ptr" pointer to the item found. + If the item with key equal to \p key is not found the function returns an empty \p exempt_ptr. + + RCU \p synchronize method can be called. RCU should NOT be locked. + The function does not call the disposer for the item found. + The disposer will be implicitly invoked when the returned object is destroyed or when + its \p release() member function is called. + Example: + \code + typedef cds::container::FeldmanHashSet< cds::urcu::gc< cds::urcu::general_buffered<> >, foo, my_traits > set_type; + set_type theSet; + // ... + + typename set_type::exempt_ptr ep( theSet.extract( 5 )); + if ( ep ) { + // Deal with ep + //... + + // Dispose returned item. + ep.release(); + } + \endcode + */ + exempt_ptr extract( hash_type const& hash ) + { + return base_class::extract( hash ); + } + + /// Finds an item by it's \p hash + /** + The function searches the item by \p hash and calls the functor \p f for item found. + The interface of \p Func functor is: + \code + struct functor { + void operator()( value_type& item ); + }; + \endcode + where \p item is the item found. + + The functor may change non-key fields of \p item. Note that the functor is only guarantee + that \p item cannot be disposed during the functor is executing. + The functor does not serialize simultaneous access to the set's \p item. If such access is + possible you must provide your own synchronization schema on item level to prevent unsafe item modifications. + + The function returns \p true if \p hash is found, \p false otherwise. + */ + template + bool find( hash_type const& hash, Func f ) + { + return base_class::find( hash, f ); + } + + /// Checks whether the set contains \p hash + /** + The function searches the item by its \p hash + and returns \p true if it is found, or \p false otherwise. + */ + bool contains( hash_type const& hash ) + { + return base_class::contains( hash ); + } + + /// Finds an item by it's \p hash and returns the item found + /** + The function searches the item by its \p hash + and returns the pointer to the item found. + If \p hash is not found the function returns \p nullptr. + + RCU should be locked before the function invocation. + Returned pointer is valid only while RCU is locked. + + Usage: + \code + typedef cds::container::FeldmanHashSet< your_template_params > my_set; + my_set theSet; + // ... + { + // lock RCU + my_set::rcu_lock lock; + + foo * p = theSet.get( 5 ); + if ( p ) { + // Deal with p + //... + } + } + \endcode + */ + value_type * get( hash_type const& hash ) + { + return base_class::get( hash ); + } + + /// Clears the set (non-atomic) + /** + The function unlink all data node from the set. + The function is not atomic but is thread-safe. + After \p %clear() the set may not be empty because another threads may insert items. + */ + void clear() + { + base_class::clear(); + } + + /// Checks if the set is empty + /** + Emptiness is checked by item counting: if item count is zero then the set is empty. + Thus, the correct item counting feature is an important part of the set implementation. + */ + bool empty() const + { + return base_class::empty(); + } + + /// Returns item count in the set + size_t size() const + { + return base_class::size(); + } + + /// Returns const reference to internal statistics + stat const& statistics() const + { + return base_class::statistics(); + } + + /// Returns the size of head node + size_t head_size() const + { + return base_class::head_size(); + } + + /// Returns the size of the array node + size_t array_node_size() const + { + return base_class::array_node_size(); + } + + /// Collects tree level statistics into \p stat + /** + The function traverses the set and collects statistics for each level of the tree + into \p feldman_hashset::level_statistics struct. The element of \p stat[i] + represents statistics for level \p i, level 0 is head array. + The function is thread-safe and may be called in multi-threaded environment. + + Result can be useful for estimating efficiency of hash functor you use. + */ + void get_level_statistics(std::vector< feldman_hashset::level_statistics>& stat) const + { + base_class::get_level_statistics(stat); + } + + public: + ///@name Thread-safe iterators + ///@{ + /// Bidirectional iterator + /** @anchor cds_container_FeldmanHashSet_rcu_iterators + The set supports thread-safe iterators: you may iterate over the set in multi-threaded environment + under explicit RCU lock. + RCU lock requirement means that inserting or searching is allowed but you must not erase the items from the set + since erasing under RCU lock can lead to a deadlock. However, another thread can call \p erase() safely + while your thread is iterating. + + A typical example is: + \code + struct foo { + uint32_t hash; + // ... other fields + uint32_t payload; // only for example + }; + struct set_traits: cds::container::feldman_hashset::traits + { + struct hash_accessor { + uint32_t operator()( foo const& src ) const + { + retur src.hash; + } + }; + }; + + typedef cds::urcu::gc< cds::urcu::general_buffered<>> rcu; + typedef cds::container::FeldmanHashSet< rcu, foo, set_traits > set_type; + + set_type s; + + // ... + + // iterate over the set + { + // lock the RCU. + typename set_type::rcu_lock l; // scoped RCU lock + + // traverse the set + for ( auto i = s.begin(); i != s.end(); ++i ) { + // deal with i. Remember, erasing is prohibited here! + i->payload++; + } + } // at this point RCU lock is released + \endcode + + Each iterator object supports the common interface: + - dereference operators: + @code + value_type [const] * operator ->() noexcept + value_type [const] & operator *() noexcept + @endcode + - pre-increment and pre-decrement. Post-operators is not supported + - equality operators == and !=. + Iterators are equal iff they point to the same cell of the same array node. + Note that for two iterators \p it1 and \p it2 the condition it1 == it2 + does not entail &(*it1) == &(*it2) : welcome to concurrent containers + + @note It is possible the item can be iterated more that once, for example, if an iterator points to the item + in an array node that is being splitted. + */ + typedef typename base_class::iterator iterator; + typedef typename base_class::const_iterator const_iterator; ///< @ref cds_container_FeldmanHashSet_rcu_iterators "bidirectional const iterator" type + typedef typename base_class::reverse_iterator reverse_iterator; ///< @ref cds_container_FeldmanHashSet_rcu_iterators "bidirectional reverse iterator" type + typedef typename base_class::const_reverse_iterator const_reverse_iterator; ///< @ref cds_container_FeldmanHashSet_rcu_iterators "bidirectional reverse const iterator" type + + /// Returns an iterator to the beginning of the set + iterator begin() + { + return base_class::begin(); + } + + /// Returns an const iterator to the beginning of the set + const_iterator begin() const + { + return base_class::begin(); + } + + /// Returns an const iterator to the beginning of the set + const_iterator cbegin() + { + return base_class::cbegin(); + } + + /// Returns an iterator to the element following the last element of the set. This element acts as a placeholder; attempting to access it results in undefined behavior. + iterator end() + { + return base_class::end(); + } + + /// Returns a const iterator to the element following the last element of the set. This element acts as a placeholder; attempting to access it results in undefined behavior. + const_iterator end() const + { + return base_class::end(); + } + + /// Returns a const iterator to the element following the last element of the set. This element acts as a placeholder; attempting to access it results in undefined behavior. + const_iterator cend() + { + return base_class::cend(); + } + + /// Returns a reverse iterator to the first element of the reversed set + reverse_iterator rbegin() + { + return base_class::rbegin(); + } + + /// Returns a const reverse iterator to the first element of the reversed set + const_reverse_iterator rbegin() const + { + return base_class::rbegin(); + } + + /// Returns a const reverse iterator to the first element of the reversed set + const_reverse_iterator crbegin() + { + return base_class::crbegin(); + } + + /// Returns a reverse iterator to the element following the last element of the reversed set + /** + It corresponds to the element preceding the first element of the non-reversed container. + This element acts as a placeholder, attempting to access it results in undefined behavior. + */ + reverse_iterator rend() + { + return base_class::rend(); + } + + /// Returns a const reverse iterator to the element following the last element of the reversed set + /** + It corresponds to the element preceding the first element of the non-reversed container. + This element acts as a placeholder, attempting to access it results in undefined behavior. + */ + const_reverse_iterator rend() const + { + return base_class::rend(); + } + + /// Returns a const reverse iterator to the element following the last element of the reversed set + /** + It corresponds to the element preceding the first element of the non-reversed container. + This element acts as a placeholder, attempting to access it results in undefined behavior. + */ + const_reverse_iterator crend() + { + return base_class::crend(); + } + ///@} + }; + +}} // namespace cds::container + +#endif // #ifndef CDSLIB_CONTAINER_FELDMAN_HASHSET_RCU_H diff --git a/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/container/impl/bronson_avltree_map_rcu.h b/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/container/impl/bronson_avltree_map_rcu.h new file mode 100644 index 0000000..f7bb9fd --- /dev/null +++ b/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/container/impl/bronson_avltree_map_rcu.h @@ -0,0 +1,2248 @@ +/* + This file is a part of libcds - Concurrent Data Structures library + + (C) Copyright Maxim Khizhinsky (libcds.dev@gmail.com) 2006-2017 + + Source code repo: http://github.com/khizmax/libcds/ + Download: http://sourceforge.net/projects/libcds/files/ + + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. + + * Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ + +#ifndef CDSLIB_CONTAINER_IMPL_BRONSON_AVLTREE_MAP_RCU_H +#define CDSLIB_CONTAINER_IMPL_BRONSON_AVLTREE_MAP_RCU_H + +#include // is_base_of +#include +#include +#include + +namespace cds { namespace container { + + /// Bronson et al AVL-tree (RCU specialization for pointers) + /** @ingroup cds_nonintrusive_map + @ingroup cds_nonintrusive_tree + @headerfile cds/container/bronson_avltree_map_rcu.h + @anchor cds_container_BronsonAVLTreeMap_rcu_ptr + + This is the specialization of \ref cds_container_BronsonAVLTreeMap_rcu "RCU-based Bronson et al AVL-tree" + for "key -> value pointer" map. This specialization stores the pointer to user-allocated values instead of the copy + of the value. When a tree node is removed, the algorithm does not free the value pointer directly, instead, it call + the disposer functor provided by \p Traits template parameter. + + Template arguments: + - \p RCU - one of \ref cds_urcu_gc "RCU type" + - \p Key - key type + - \p T - value type to be stored in tree's nodes. Note, the specialization stores the pointer to user-allocated + value, not the copy. + - \p Traits - tree traits, default is \p bronson_avltree::traits + It is possible to declare option-based tree with \p bronson_avltree::make_traits metafunction + instead of \p Traits template argument. + + @note Before including you should include appropriate RCU header file, + see \ref cds_urcu_gc "RCU type" for list of existing RCU class and corresponding header files. + */ + template < + typename RCU, + typename Key, + typename T, +# ifdef CDS_DOXYGEN_INVOKED + typename Traits = bronson_avltree::traits +#else + typename Traits +#endif + > + class BronsonAVLTreeMap< cds::urcu::gc, Key, T*, Traits > + { + public: + typedef cds::urcu::gc gc; ///< RCU Garbage collector + typedef Key key_type; ///< type of a key stored in the map + typedef T * mapped_type; ///< type of value stored in the map + typedef Traits traits; ///< Traits template parameter + +# ifdef CDS_DOXYGEN_INVOKED + typedef implementation_defined key_comparator; ///< key compare functor based on \p Traits::compare and \p Traits::less +# else + typedef typename opt::details::make_comparator< key_type, traits >::type key_comparator; +#endif + typedef typename traits::item_counter item_counter; ///< Item counting policy + typedef typename traits::memory_model memory_model; ///< Memory ordering, see \p cds::opt::memory_model option + typedef typename traits::node_allocator node_allocator_type; ///< allocator for maintaining internal nodes + typedef typename traits::stat stat; ///< internal statistics + typedef typename traits::rcu_check_deadlock rcu_check_deadlock; ///< Deadlock checking policy + typedef typename traits::back_off back_off; ///< Back-off strategy + typedef typename traits::disposer disposer; ///< Value disposer + typedef typename traits::sync_monitor sync_monitor; ///< @ref cds_sync_monitor "Synchronization monitor" type for node-level locking + + /// Enabled or disabled @ref bronson_avltree::relaxed_insert "relaxed insertion" + static constexpr bool const c_bRelaxedInsert = traits::relaxed_insert; + + /// Group of \p extract_xxx functions does not require external locking + static constexpr const bool c_bExtractLockExternal = false; + +# ifdef CDS_DOXYGEN_INVOKED + /// Returned pointer to \p mapped_type of extracted node + typedef cds::urcu::exempt_ptr< gc, T, T, disposer, void > exempt_ptr; +# else + typedef cds::urcu::exempt_ptr< gc, + typename std::remove_pointer::type, + typename std::remove_pointer::type, + disposer, + void + > exempt_ptr; +# endif + + typedef typename gc::scoped_lock rcu_lock; ///< RCU scoped lock + + protected: + //@cond + typedef bronson_avltree::node< key_type, mapped_type, sync_monitor > node_type; + typedef typename node_type::version_type version_type; + + typedef cds::details::Allocator< node_type, node_allocator_type > cxx_allocator; + typedef cds::urcu::details::check_deadlock_policy< gc, rcu_check_deadlock > check_deadlock_policy; + + enum class find_result + { + not_found, + found, + retry + }; + + struct update_flags + { + enum { + allow_insert = 1, + allow_update = 2, + //allow_remove = 4, + + retry = 1024, + + failed = 0, + result_inserted = allow_insert, + result_updated = allow_update, + result_removed = 4 + }; + }; + + enum node_condition + { + nothing_required = -3, + rebalance_required = -2, + unlink_required = -1 + }; + + enum direction { + left_child = -1, + right_child = 1 + }; + + typedef typename sync_monitor::template scoped_lock node_scoped_lock; + //@endcond + + protected: + //@cond + template + static node_type * alloc_node( K&& key, int nHeight, version_type version, node_type * pParent, node_type * pLeft, node_type * pRight ) + { + return cxx_allocator().New( std::forward( key ), nHeight, version, pParent, pLeft, pRight ); + } + + static void free_node( node_type * pNode ) + { + // Free node without disposer + assert( !pNode->is_valued( memory_model::memory_order_relaxed )); + assert( pNode->m_SyncMonitorInjection.check_free()); + cxx_allocator().Delete( pNode ); + } + + static void free_value( mapped_type pVal ) + { + disposer()(pVal); + } + + static node_type * child( node_type * pNode, int nDir, atomics::memory_order order ) + { + return pNode->child( nDir, order ); + } + + static node_type * parent( node_type * pNode, atomics::memory_order order ) + { + return pNode->parent( order ); + } + + // RCU safe disposer + class rcu_disposer + { + node_type * m_pRetiredList; ///< head of retired node list + mapped_type m_pRetiredValue; ///< value retired + + public: + rcu_disposer() + : m_pRetiredList( nullptr ) + , m_pRetiredValue( nullptr ) + {} + + ~rcu_disposer() + { + clean(); + } + + void dispose( node_type * pNode ) + { + assert( !pNode->is_valued( memory_model::memory_order_relaxed )); + pNode->m_pNextRemoved = m_pRetiredList; + m_pRetiredList = pNode; + } + + void dispose_value( mapped_type pVal ) + { + assert( m_pRetiredValue == nullptr ); + m_pRetiredValue = pVal; + } + + private: + struct internal_disposer + { + void operator()( node_type * p ) const + { + free_node( p ); + } + }; + + void clean() + { + assert( !gc::is_locked()); + + // TODO: use RCU::batch_retire + + // Dispose nodes + for ( node_type * p = m_pRetiredList; p; ) { + node_type * pNext = static_cast( p->m_pNextRemoved ); + // Value already disposed + gc::template retire_ptr( p ); + p = pNext; + } + + // Dispose value + if ( m_pRetiredValue ) + gc::template retire_ptr( m_pRetiredValue ); + } + }; + + //@endcond + + protected: + //@cond + typename node_type::base_class m_Root; + node_type * m_pRoot; + item_counter m_ItemCounter; + mutable sync_monitor m_Monitor; + mutable stat m_stat; + //@endcond + + public: + /// Creates empty map + BronsonAVLTreeMap() + : m_pRoot( static_cast( &m_Root )) + {} + + /// Destroys the map + ~BronsonAVLTreeMap() + { + unsafe_clear(); + } + + /// Inserts new node + /** + The \p key_type should be constructible from a value of type \p K. + + RCU \p synchronize() can be called. RCU should not be locked. + + Returns \p true if inserting successful, \p false otherwise. + */ + template + bool insert( K const& key, mapped_type pVal ) + { + return do_update(key, key_comparator(), + [pVal]( node_type * pNode ) -> mapped_type + { + assert( pNode->m_pValue.load( memory_model::memory_order_relaxed ) == nullptr ); + CDS_UNUSED( pNode ); + return pVal; + }, + update_flags::allow_insert + ) == update_flags::result_inserted; + } + + /// Updates the value for \p key + /** + The operation performs inserting or updating the value for \p key with lock-free manner. + If \p bInsert is \p false, only updating of existing node is possible. + + If \p key is not found and inserting is allowed (i.e. \p bInsert is \p true), + then the new node created from \p key will be inserted into the map; note that in this case the \ref key_type should be + constructible from type \p K. + Otherwise, the value for \p key will be changed to \p pVal. + + RCU \p synchronize() method can be called. RCU should not be locked. + + Returns std::pair where \p first is \p true if operation is successful, + \p second is \p true if new node has been added or \p false if the node with \p key + already exists. + */ + template + std::pair update( K const& key, mapped_type pVal, bool bInsert = true ) + { + int result = do_update( key, key_comparator(), + [pVal]( node_type * ) -> mapped_type + { + return pVal; + }, + update_flags::allow_update | (bInsert ? update_flags::allow_insert : 0) + ); + return std::make_pair( result != 0, (result & update_flags::result_inserted) != 0 ); + } + + //@endcond + + /// Delete \p key from the map + /** + RCU \p synchronize() method can be called. RCU should not be locked. + + Return \p true if \p key is found and deleted, \p false otherwise + */ + template + bool erase( K const& key ) + { + return do_remove( + key, + key_comparator(), + []( key_type const&, mapped_type pVal, rcu_disposer& disp ) -> bool { disp.dispose_value( pVal ); return true; } + ); + } + + /// Deletes the item from the map using \p pred predicate for searching + /** + The function is an analog of \p erase(K const&) + but \p pred is used for key comparing. + \p Less functor has the interface like \p std::less. + \p Less must imply the same element order as the comparator used for building the map. + */ + template + bool erase_with( K const& key, Less pred ) + { + CDS_UNUSED( pred ); + return do_remove( + key, + cds::opt::details::make_comparator_from_less(), + []( key_type const&, mapped_type pVal, rcu_disposer& disp ) -> bool { disp.dispose_value( pVal ); return true; } + ); + } + + /// Delete \p key from the map + /** + The function searches an item with key \p key, calls \p f functor + and deletes the item. If \p key is not found, the functor is not called. + + The functor \p Func interface: + \code + struct functor { + void operator()( key_type const& key, std::remove_pointer::type& val) { ... } + }; + \endcode + + RCU \p synchronize method can be called. RCU should not be locked. + + Return \p true if key is found and deleted, \p false otherwise + */ + template + bool erase( K const& key, Func f ) + { + return do_remove( + key, + key_comparator(), + [&f]( key_type const& k, mapped_type pVal, rcu_disposer& disp ) -> bool { + assert( pVal ); + f( k, *pVal ); + disp.dispose_value(pVal); + return true; + } + ); + } + + /// Deletes the item from the map using \p pred predicate for searching + /** + The function is an analog of \p erase(K const&, Func) + but \p pred is used for key comparing. + \p Less functor has the interface like \p std::less. + \p Less must imply the same element order as the comparator used for building the map. + */ + template + bool erase_with( K const& key, Less pred, Func f ) + { + CDS_UNUSED( pred ); + return do_remove( + key, + cds::opt::details::make_comparator_from_less(), + [&f]( key_type const& k, mapped_type pVal, rcu_disposer& disp ) -> bool { + assert( pVal ); + f( k, *pVal ); + disp.dispose_value(pVal); + return true; + } + ); + } + + /// Extracts a value with minimal key from the map + /** + Returns \p exempt_ptr to the leftmost item. + If the tree is empty, returns empty \p exempt_ptr. + + Note that the function returns only the value for minimal key. + To retrieve its key use \p extract_min( Func ) member function. + + @note Due the concurrent nature of the map, the function extracts nearly minimum key. + It means that the function gets leftmost leaf of the tree and tries to unlink it. + During unlinking, a concurrent thread may insert an item with key less than leftmost item's key. + So, the function returns the item with minimum key at the moment of tree traversing. + + RCU \p synchronize method can be called. RCU should NOT be locked. + The function does not free the item. + The deallocator will be implicitly invoked when the returned object is destroyed or when + its \p release() member function is called. + */ + exempt_ptr extract_min() + { + return exempt_ptr(do_extract_min( []( key_type const& ) {})); + } + + /// Extracts minimal key and corresponding value + /** + Returns \p exempt_ptr to the leftmost item. + If the tree is empty, returns empty \p exempt_ptr. + + \p Func functor is used to store minimal key. + \p Func has the following signature: + \code + struct functor { + void operator()( key_type const& key ); + }; + \endcode + If the tree is empty, \p f is not called. + Otherwise, it is called with minimal key, the pointer to corresponding value is returned + as \p exempt_ptr. + + @note Due the concurrent nature of the map, the function extracts nearly minimum key. + It means that the function gets leftmost leaf of the tree and tries to unlink it. + During unlinking, a concurrent thread may insert an item with key less than leftmost item's key. + So, the function returns the item with minimum key at the moment of tree traversing. + + RCU \p synchronize method can be called. RCU should NOT be locked. + The function does not free the item. + The deallocator will be implicitly invoked when the returned object is destroyed or when + its \p release() member function is called. + */ + template + exempt_ptr extract_min( Func f ) + { + return exempt_ptr(do_extract_min( [&f]( key_type const& key ) { f(key); })); + } + + /// Extracts minimal key and corresponding value + /** + This function is a shortcut for the following call: + \code + key_type key; + exempt_ptr xp = theTree.extract_min( [&key]( key_type const& k ) { key = k; } ); + \endcode + \p key_type should be copy-assignable. The copy of minimal key + is returned in \p min_key argument. + */ + typename std::enable_if< std::is_copy_assignable::value, exempt_ptr >::type + extract_min_key( key_type& min_key ) + { + return exempt_ptr(do_extract_min( [&min_key]( key_type const& key ) { min_key = key; })); + } + + /// Extracts a value with maximal key from the tree + /** + Returns \p exempt_ptr pointer to the rightmost item. + If the set is empty, returns empty \p exempt_ptr. + + Note that the function returns only the value for maximal key. + To retrieve its key use \p extract_max( Func ) member function. + + @note Due the concurrent nature of the map, the function extracts nearly maximal key. + It means that the function gets rightmost leaf of the tree and tries to unlink it. + During unlinking, a concurrent thread may insert an item with key greater than rightmost item's key. + So, the function returns the item with maximum key at the moment of tree traversing. + + RCU \p synchronize method can be called. RCU should NOT be locked. + The function does not free the item. + The deallocator will be implicitly invoked when the returned object is destroyed or when + its \p release() is called. + */ + exempt_ptr extract_max() + { + return exempt_ptr(do_extract_max( []( key_type const& ) {})); + } + + /// Extracts the maximal key and corresponding value + /** + Returns \p exempt_ptr pointer to the rightmost item. + If the set is empty, returns empty \p exempt_ptr. + + \p Func functor is used to store maximal key. + \p Func has the following signature: + \code + struct functor { + void operator()( key_type const& key ); + }; + \endcode + If the tree is empty, \p f is not called. + Otherwise, it is called with maximal key, the pointer to corresponding value is returned + as \p exempt_ptr. + + @note Due the concurrent nature of the map, the function extracts nearly maximal key. + It means that the function gets rightmost leaf of the tree and tries to unlink it. + During unlinking, a concurrent thread may insert an item with key greater than rightmost item's key. + So, the function returns the item with maximum key at the moment of tree traversing. + + RCU \p synchronize method can be called. RCU should NOT be locked. + The function does not free the item. + The deallocator will be implicitly invoked when the returned object is destroyed or when + its \p release() is called. + */ + template + exempt_ptr extract_max( Func f ) + { + return exempt_ptr(do_extract_max( [&f]( key_type const& key ) { f(key); })); + } + + /// Extracts the maximal key and corresponding value + /** + This function is a shortcut for the following call: + \code + key_type key; + exempt_ptr xp = theTree.extract_max( [&key]( key_type const& k ) { key = k; } ); + \endcode + \p key_type should be copy-assignable. The copy of maximal key + is returned in \p max_key argument. + */ + typename std::enable_if< std::is_copy_assignable::value, exempt_ptr >::type + extract_max_key( key_type& max_key ) + { + return exempt_ptr(do_extract_max( [&max_key]( key_type const& key ) { max_key = key; })); + } + + /// Extracts an item from the map + /** + The function searches an item with key equal to \p key in the tree, + unlinks it, and returns \p exempt_ptr pointer to a value found. + If \p key is not found the function returns an empty \p exempt_ptr. + + RCU \p synchronize method can be called. RCU should NOT be locked. + The function does not destroy the value found. + The disposer will be implicitly invoked when the returned object is destroyed or when + its \p release() member function is called. + */ + template + exempt_ptr extract( Q const& key ) + { + return exempt_ptr(do_extract( key )); + } + + + /// Extracts an item from the map using \p pred for searching + /** + The function is an analog of \p extract(Q const&) + but \p pred is used for key compare. + \p Less has the interface like \p std::less. + \p pred must imply the same element order as the comparator used for building the tree. + */ + template + exempt_ptr extract_with( Q const& key, Less pred ) + { + return exempt_ptr(do_extract_with( key, pred )); + } + + /// Find the key \p key + /** + The function searches the item with key equal to \p key and calls the functor \p f for item found. + The interface of \p Func functor is: + \code + struct functor { + void operator()( key_type const& key, std::remove_pointer< mapped_type )::type& item ); + }; + \endcode + where \p item is the item found. + The functor is called under node-level lock. + + The function applies RCU lock internally. + + The function returns \p true if \p key is found, \p false otherwise. + */ + template + bool find( K const& key, Func f ) + { + return do_find( key, key_comparator(), + [&f]( node_type * pNode ) -> bool { + assert( pNode != nullptr ); + mapped_type pVal = pNode->m_pValue.load( memory_model::memory_order_relaxed ); + if ( pVal ) { + f( pNode->m_key, *pVal ); + return true; + } + return false; + } + ); + } + + /// Finds the key \p val using \p pred predicate for searching + /** + The function is an analog of \p find(K const&, Func) + but \p pred is used for key comparing. + \p Less functor has the interface like \p std::less. + \p Less must imply the same element order as the comparator used for building the map. + */ + template + bool find_with( K const& key, Less pred, Func f ) + { + CDS_UNUSED( pred ); + return do_find( key, cds::opt::details::make_comparator_from_less(), + [&f]( node_type * pNode ) -> bool { + assert( pNode != nullptr ); + mapped_type pVal = pNode->m_pValue.load( memory_model::memory_order_relaxed ); + if ( pVal ) { + f( pNode->m_key, *pVal ); + return true; + } + return false; + } + ); + } + + /// Checks whether the map contains \p key + /** + The function searches the item with key equal to \p key + and returns \p true if it is found, and \p false otherwise. + + The function applies RCU lock internally. + */ + template + bool contains( K const& key ) + { + return do_find( key, key_comparator(), []( node_type * ) -> bool { return true; }); + } + + /// Checks whether the map contains \p key using \p pred predicate for searching + /** + The function is similar to contains( key ) but \p pred is used for key comparing. + \p Less functor has the interface like \p std::less. + \p Less must imply the same element order as the comparator used for building the set. + */ + template + bool contains( K const& key, Less pred ) + { + CDS_UNUSED( pred ); + return do_find( key, cds::opt::details::make_comparator_from_less(), []( node_type * ) -> bool { return true; } ); + } + + /// Clears the tree (thread safe, not atomic) + /** + The function unlink all items from the tree. + The function is thread safe but not atomic: in multi-threaded environment with parallel insertions + this sequence + \code + set.clear(); + assert( set.empty()); + \endcode + the assertion could be raised. + + For each node the \ref disposer will be called after unlinking. + + RCU \p synchronize method can be called. RCU should not be locked. + */ + void clear() + { + while ( extract_min()); + } + + /// Clears the tree (not thread safe) + /** + This function is not thread safe and may be called only when no other thread deals with the tree. + The function is used in the tree destructor. + */ + void unsafe_clear() + { + clear(); // temp solution + //TODO + } + + /// Checks if the map is empty + bool empty() const + { + return m_Root.m_pRight.load( memory_model::memory_order_relaxed ) == nullptr; + } + + /// Returns item count in the map + /** + Only leaf nodes containing user data are counted. + + The value returned depends on item counter type provided by \p Traits template parameter. + If it is \p atomicity::empty_item_counter this function always returns 0. + + The function is not suitable for checking the tree emptiness, use \p empty() + member function for this purpose. + */ + size_t size() const + { + return m_ItemCounter; + } + + /// Returns const reference to internal statistics + stat const& statistics() const + { + return m_stat; + } + + /// Returns reference to \p sync_monitor object + sync_monitor& monitor() + { + return m_Monitor; + } + //@cond + sync_monitor const& monitor() const + { + return m_Monitor; + } + //@endcond + + /// Checks internal consistency (not atomic, not thread-safe) + /** + The debugging function to check internal consistency of the tree. + */ + bool check_consistency() const + { + return check_consistency([]( size_t /*nLevel*/, size_t /*hLeft*/, size_t /*hRight*/ ){} ); + } + + /// Checks internal consistency (not atomic, not thread-safe) + /** + The debugging function to check internal consistency of the tree. + The functor \p Func is called if a violation of internal tree structure + is found: + \code + struct functor { + void operator()( size_t nLevel, size_t hLeft, size_t hRight ); + }; + \endcode + where + - \p nLevel - the level where the violation is found + - \p hLeft - the height of left subtree + - \p hRight - the height of right subtree + + The functor is called for each violation found. + */ + template + bool check_consistency( Func f ) const + { + node_type * pChild = child( m_pRoot, right_child, memory_model::memory_order_acquire ); + if ( pChild ) { + size_t nErrors = 0; + do_check_consistency( pChild, 1, f, nErrors ); + return nErrors == 0; + } + return true; + } + + protected: + //@cond + template + size_t do_check_consistency( node_type * pNode, size_t nLevel, Func f, size_t& nErrors ) const + { + if ( pNode ) { + key_comparator cmp; + node_type * pLeft = child( pNode, left_child, memory_model::memory_order_acquire ); + node_type * pRight = child( pNode, right_child, memory_model::memory_order_acquire ); + if ( pLeft && cmp( pLeft->m_key, pNode->m_key ) > 0 ) + ++nErrors; + if ( pRight && cmp( pNode->m_key, pRight->m_key ) > 0 ) + ++nErrors; + + size_t hLeft = do_check_consistency( pLeft, nLevel + 1, f, nErrors ); + size_t hRight = do_check_consistency( pRight, nLevel + 1, f, nErrors ); + + if ( hLeft >= hRight ) { + if ( hLeft - hRight > 1 ) { + f( nLevel, hLeft, hRight ); + ++nErrors; + } + return hLeft; + } + else { + if ( hRight - hLeft > 1 ) { + f( nLevel, hLeft, hRight ); + ++nErrors; + } + return hRight; + } + } + return 0; + } + + template + bool do_find( Q& key, Compare cmp, Func f ) const + { + find_result result; + { + rcu_lock l; + result = try_find( key, cmp, f, m_pRoot, right_child, 0 ); + } + assert( result != find_result::retry ); + return result == find_result::found; + } + + template + int do_update( K const& key, Compare cmp, Func funcUpdate, int nFlags ) + { + check_deadlock_policy::check(); + + rcu_disposer removed_list; + { + rcu_lock l; + return try_update_root( key, cmp, nFlags, funcUpdate, removed_list ); + } + } + + template + bool do_remove( K const& key, Compare cmp, Func func ) + { + // Func must return true if the value was disposed + // or false if the value was extracted + + check_deadlock_policy::check(); + + rcu_disposer removed_list; + { + rcu_lock l; + return try_remove_root( key, cmp, func, removed_list ); + } + } + + template + mapped_type do_extract_min( Func f ) + { + mapped_type pExtracted = nullptr; + do_extract_minmax( + left_child, + [&pExtracted, &f]( key_type const& key, mapped_type pVal, rcu_disposer& ) -> bool { f( key ); pExtracted = pVal; return false; } + ); + return pExtracted; + } + + template + mapped_type do_extract_max( Func f ) + { + mapped_type pExtracted = nullptr; + do_extract_minmax( + right_child, + [&pExtracted, &f]( key_type const& key, mapped_type pVal, rcu_disposer& ) -> bool { f( key ); pExtracted = pVal; return false; } + ); + return pExtracted; + } + + template + void do_extract_minmax( int nDir, Func func ) + { + check_deadlock_policy::check(); + + rcu_disposer removed_list; + { + rcu_lock l; + + while ( true ) { + int result; + + // get right child of root + node_type * pChild = child( m_pRoot, right_child, memory_model::memory_order_acquire ); + if ( pChild ) { + version_type nChildVersion = pChild->version( memory_model::memory_order_acquire ); + if ( nChildVersion & node_type::shrinking ) { + m_stat.onRemoveRootWaitShrinking(); + pChild->template wait_until_shrink_completed( memory_model::memory_order_acquire ); + result = update_flags::retry; + } + else if ( pChild == child( m_pRoot, right_child, memory_model::memory_order_acquire )) { + result = try_extract_minmax( nDir, func, m_pRoot, pChild, nChildVersion, removed_list ); + } + else + result = update_flags::retry; + } + else + return; + + if ( result == update_flags::retry ) + m_stat.onRemoveRetry(); + else { + m_stat.onExtract( result == update_flags::result_removed ); + return; + } + } + } + } + + template + mapped_type do_extract( Q const& key ) + { + mapped_type pExtracted = nullptr; + do_remove( + key, + key_comparator(), + [&pExtracted]( key_type const&, mapped_type pVal, rcu_disposer& ) -> bool { pExtracted = pVal; return false; } + ); + m_stat.onExtract( pExtracted != nullptr ); + return pExtracted; + } + + template + mapped_type do_extract_with( Q const& key, Less pred ) + { + CDS_UNUSED( pred ); + mapped_type pExtracted = nullptr; + do_remove( + key, + cds::opt::details::make_comparator_from_less(), + [&pExtracted]( key_type const&, mapped_type pVal, rcu_disposer& ) -> bool { pExtracted = pVal; return false; } + ); + m_stat.onExtract( pExtracted != nullptr ); + return pExtracted; + } + //@endcond + + private: + //@cond + static int height( node_type * pNode, atomics::memory_order order ) + { + assert( pNode ); + return pNode->m_nHeight.load( order ); + } + static void set_height( node_type * pNode, int h, atomics::memory_order order ) + { + assert( pNode ); + pNode->m_nHeight.store( h, order ); + } + static int height_null( node_type * pNode, atomics::memory_order order ) + { + return pNode ? height( pNode, order ) : 0; + } + + static constexpr int const c_stackSize = 64; + + template + find_result try_find( Q const& key, Compare cmp, Func f, node_type * pNode, int nDir, version_type nVersion ) const + { + assert( gc::is_locked()); + assert( pNode ); + + struct stack_record + { + node_type * pNode; + version_type nVersion; + int nDir; + }; + + stack_record stack[c_stackSize]; + int pos = 0; + stack[0].pNode = pNode; + stack[0].nVersion = nVersion; + stack[0].nDir = nDir; + + while ( pos >= 0 ) { + pNode = stack[pos].pNode; + nVersion = stack[pos].nVersion; + nDir = stack[pos].nDir; + + while ( true ) { + node_type * pChild = child( pNode, nDir, memory_model::memory_order_acquire ); + if ( !pChild ) { + if ( pNode->version(memory_model::memory_order_acquire) != nVersion ) { + --pos; + m_stat.onFindRetry(); + break; // retry + } + m_stat.onFindFailed(); + return find_result::not_found; + } + + int nCmp = cmp( key, pChild->m_key ); + if ( nCmp == 0 ) { + if ( pChild->is_valued( memory_model::memory_order_acquire )) { + // key found + node_scoped_lock l( m_Monitor, *pChild ); + if ( child(pNode, nDir, memory_model::memory_order_acquire) == pChild ) { + if ( pChild->is_valued( memory_model::memory_order_relaxed )) { + if ( f( pChild )) { + m_stat.onFindSuccess(); + return find_result::found; + } + } + } + else { + m_stat.onFindRetry(); + continue; + } + } + m_stat.onFindFailed(); + return find_result::not_found; + } + else { + version_type nChildVersion = pChild->version( memory_model::memory_order_acquire ); + if ( nChildVersion & node_type::shrinking ) { + m_stat.onFindWaitShrinking(); + pChild->template wait_until_shrink_completed( memory_model::memory_order_acquire ); + + if ( pNode->version(memory_model::memory_order_acquire) != nVersion ) { + --pos; + m_stat.onFindRetry(); + break; // retry + } + } + else if ( nChildVersion != node_type::unlinked && child( pNode, nDir, memory_model::memory_order_acquire ) == pChild ) + { + if ( pNode->version(memory_model::memory_order_acquire) != nVersion ) { + --pos; + m_stat.onFindRetry(); + break; // retry + } + + ++pos; + assert(pos < c_stackSize); + stack[pos].pNode = pChild; + stack[pos].nVersion = nChildVersion; + stack[pos].nDir = nCmp; + break; // child iteration + } + + if ( pNode->version(memory_model::memory_order_acquire) != nVersion ) { + --pos; + m_stat.onFindRetry(); + break; // retry + } + } + m_stat.onFindRetry(); + } + } + return find_result::retry; + } + + template + int try_update_root( K const& key, Compare cmp, int nFlags, Func funcUpdate, rcu_disposer& disp ) + { + assert( gc::is_locked()); + + while ( true ) { + int result; + + // get right child of root + node_type * pChild = child( m_pRoot, right_child, memory_model::memory_order_acquire ); + if ( pChild ) { + version_type nChildVersion = pChild->version( memory_model::memory_order_acquire ); + if ( nChildVersion & node_type::shrinking ) { + m_stat.onUpdateRootWaitShrinking(); + pChild->template wait_until_shrink_completed( memory_model::memory_order_acquire ); + result = update_flags::retry; + } + else if ( pChild == child( m_pRoot, right_child, memory_model::memory_order_acquire )) + result = try_update( key, cmp, nFlags, funcUpdate, pChild, nChildVersion, disp ); + else + result = update_flags::retry; + } + else { + // the tree is empty + if ( nFlags & update_flags::allow_insert ) { + // insert into tree as right child of the root + { + node_scoped_lock l( m_Monitor, *m_pRoot ); + if ( child( m_pRoot, right_child, memory_model::memory_order_acquire ) != nullptr ) { + result = update_flags::retry; + continue; + } + + node_type * pNew = alloc_node( key, 1, 0, m_pRoot, nullptr, nullptr ); + mapped_type pVal = funcUpdate( pNew ); + assert( pVal != nullptr ); + pNew->m_pValue.store( pVal, memory_model::memory_order_release ); + + m_pRoot->child( pNew, right_child, memory_model::memory_order_release); + set_height( m_pRoot, 2, memory_model::memory_order_release ); + } + + ++m_ItemCounter; + m_stat.onInsertSuccess(); + return update_flags::result_inserted; + } + + return update_flags::failed; + } + + if ( result != update_flags::retry ) + return result; + } + } + + template + bool try_remove_root( K const& key, Compare cmp, Func func, rcu_disposer& disp ) + { + assert( gc::is_locked()); + + while ( true ) { + int result; + + // get right child of root + node_type * pChild = child( m_pRoot, right_child, memory_model::memory_order_acquire ); + if ( pChild ) { + version_type nChildVersion = pChild->version( memory_model::memory_order_acquire ); + if ( nChildVersion & node_type::shrinking ) { + m_stat.onRemoveRootWaitShrinking(); + pChild->template wait_until_shrink_completed( memory_model::memory_order_acquire ); + result = update_flags::retry; + } + else if ( pChild == child( m_pRoot, right_child, memory_model::memory_order_acquire )) { + result = try_remove( key, cmp, func, m_pRoot, pChild, nChildVersion, disp ); + } + else + result = update_flags::retry; + } + else + return false; + + if ( result == update_flags::retry ) + m_stat.onRemoveRetry(); + else { + m_stat.onRemove( result == update_flags::result_removed ); + return result == update_flags::result_removed; + } + } + } + + template + int try_update( K const& key, Compare cmp, int nFlags, Func funcUpdate, node_type * pNode, version_type nVersion, rcu_disposer& disp ) + { + assert( gc::is_locked()); + assert( nVersion != node_type::unlinked ); + + struct stack_record + { + node_type * pNode; + version_type nVersion; + }; + + stack_record stack[c_stackSize]; + int pos = 0; + stack[0].pNode = pNode; + stack[0].nVersion = nVersion; + + while ( pos >= 0 ) { + pNode = stack[pos].pNode; + nVersion = stack[pos].nVersion; + + int nCmp = cmp( key, pNode->m_key ); + if ( nCmp == 0 ) { + int result = try_update_node( nFlags, funcUpdate, pNode, nVersion, disp ); + if ( result != update_flags::retry ) + return result; + --pos; + m_stat.onUpdateRetry(); + continue; + } + + while ( true ) { + node_type * pChild = child( pNode, nCmp, memory_model::memory_order_acquire ); + if ( pNode->version(memory_model::memory_order_acquire) != nVersion ) { + --pos; + m_stat.onUpdateRetry(); + break; + } + + if ( pChild == nullptr ) { + // insert new node + if ( nFlags & update_flags::allow_insert ) { + int result = try_insert_node( key, funcUpdate, pNode, nCmp, nVersion, disp ); + if ( result != update_flags::retry ) + return result; + --pos; + m_stat.onUpdateRetry(); + break; + } + else + return update_flags::failed; + } + else { + // update child + version_type nChildVersion = pChild->version( memory_model::memory_order_acquire ); + if ( nChildVersion & node_type::shrinking ) { + m_stat.onUpdateWaitShrinking(); + pChild->template wait_until_shrink_completed( memory_model::memory_order_acquire ); + // retry + } + else if ( pChild == child( pNode, nCmp, memory_model::memory_order_acquire )) { + // this second read is important, because it is protected by nChildVersion + + // validate the read that our caller took to get to node + if ( pNode->version( memory_model::memory_order_acquire ) != nVersion ) { + --pos; + m_stat.onUpdateRetry(); + break; // retry + } + // At this point we know that the traversal our parent took to get to node is still valid. + // The recursive implementation will validate the traversal from node to + // child, so just prior to the node nVersion validation both traversals were definitely okay. + // This means that we are no longer vulnerable to node shrinks, and we don't need + // to validate node version any more. + ++pos; + assert( pos < c_stackSize ); + stack[pos].pNode = pChild; + stack[pos].nVersion = nChildVersion; + assert( nChildVersion != node_type::unlinked ); + break; // child iteration + } + m_stat.onUpdateRetry(); + } + } + } + return update_flags::retry; + } + + template + int try_remove( K const& key, Compare cmp, Func func, node_type * pParent, node_type * pNode, version_type nVersion, rcu_disposer& disp ) + { + assert( gc::is_locked()); + assert( nVersion != node_type::unlinked ); + + struct stack_record + { + node_type * pParent; + node_type * pNode; + version_type nVersion; + }; + + stack_record stack[c_stackSize]; + int pos = 0; + stack[0].pParent = pParent; + stack[0].pNode = pNode; + stack[0].nVersion = nVersion; + + while ( pos >= 0 ) { + pParent = stack[pos].pParent; + pNode = stack[pos].pNode; + nVersion = stack[pos].nVersion; + + int nCmp = cmp( key, pNode->m_key ); + if ( nCmp == 0 ) { + int result = try_remove_node( pParent, pNode, nVersion, func, disp ); + if ( result != update_flags::retry ) + return result; + --pos; + m_stat.onRemoveRetry(); + continue; + } + + while ( true ) { + node_type * pChild = child( pNode, nCmp, memory_model::memory_order_acquire ); + if ( pNode->version(memory_model::memory_order_acquire) != nVersion ) { + --pos; + m_stat.onRemoveRetry(); + break; + } + + if ( pChild == nullptr ) + return update_flags::failed; + + // update child + version_type nChildVersion = pChild->version( memory_model::memory_order_acquire ); + if ( nChildVersion & node_type::shrinking ) { + m_stat.onRemoveWaitShrinking(); + pChild->template wait_until_shrink_completed( memory_model::memory_order_acquire ); + // retry + } + else if ( pChild == child( pNode, nCmp, memory_model::memory_order_acquire )) { + // this second read is important, because it is protected by nChildVersion + + // validate the read that our caller took to get to node + if ( pNode->version(memory_model::memory_order_acquire) != nVersion ) { + --pos; + m_stat.onRemoveRetry(); + break; + } + + // At this point we know that the traversal our parent took to get to node is still valid. + // The recursive implementation will validate the traversal from node to + // child, so just prior to the node nVersion validation both traversals were definitely okay. + // This means that we are no longer vulnerable to node shrinks, and we don't need + // to validate node version any more. + ++pos; + assert( pos < c_stackSize ); + stack[pos].pParent = pNode; + stack[pos].pNode = pChild; + stack[pos].nVersion = nChildVersion; + break; // child iteration + } + m_stat.onRemoveRetry(); + } + } + return update_flags::retry; + } + + template + int try_extract_minmax( int nDir, Func func, node_type * pParent, node_type * pNode, version_type nVersion, rcu_disposer& disp ) + { + assert( gc::is_locked()); + assert( nVersion != node_type::unlinked ); + + struct stack_record + { + node_type * pParent; + node_type * pNode; + version_type nVersion; + }; + + stack_record stack[c_stackSize]; + int pos = 0; + stack[0].pParent = pParent; + stack[0].pNode = pNode; + stack[0].nVersion = nVersion; + + while ( pos >= 0 ) { + pParent = stack[pos].pParent; + pNode = stack[pos].pNode; + nVersion = stack[pos].nVersion; + + while ( true ) { + int iterDir = nDir; + node_type * pChild = child( pNode, iterDir, memory_model::memory_order_acquire ); + if ( pNode->version(memory_model::memory_order_acquire) != nVersion ) { + --pos; + m_stat.onRemoveRetry(); + break; + } + + if ( !pChild ) { + // Found min/max + if ( pNode->is_valued( memory_model::memory_order_acquire )) { + int result = try_remove_node( pParent, pNode, nVersion, func, disp ); + + if ( result == update_flags::result_removed ) + return result; + + --pos; + m_stat.onRemoveRetry(); + break; + } + else { + // check right (for min) or left (for max) child node + iterDir = -iterDir; + pChild = child( pNode, iterDir, memory_model::memory_order_acquire ); + if ( !pChild ) { + --pos; + m_stat.onRemoveRetry(); + break; + } + } + } + + version_type nChildVersion = pChild->version( memory_model::memory_order_acquire ); + if ( nChildVersion & node_type::shrinking ) { + m_stat.onRemoveWaitShrinking(); + pChild->template wait_until_shrink_completed( memory_model::memory_order_acquire ); + // retry + } + else if ( pChild == child( pNode, iterDir, memory_model::memory_order_acquire )) { + // this second read is important, because it is protected by nChildVersion + + // validate the read that our caller took to get to node + if ( pNode->version( memory_model::memory_order_acquire ) != nVersion ) { + --pos; + m_stat.onRemoveRetry(); + break; + } + + // At this point we know that the traversal our parent took to get to node is still valid. + // The recursive implementation will validate the traversal from node to + // child, so just prior to the node nVersion validation both traversals were definitely okay. + // This means that we are no longer vulnerable to node shrinks, and we don't need + // to validate node version any more. + ++pos; + assert( pos < c_stackSize ); + stack[pos].pParent = pNode; + stack[pos].pNode = pChild; + stack[pos].nVersion = nChildVersion; + break; // child iteration + } + m_stat.onRemoveRetry(); + } + } + return update_flags::retry; + } + + template + int try_insert_node( K const& key, Func funcUpdate, node_type * pNode, int nDir, version_type nVersion, rcu_disposer& disp ) + { + node_type * pNew; + + auto fnCreateNode = [&funcUpdate]( node_type * node ) { + mapped_type pVal = funcUpdate( node ); + assert( pVal != nullptr ); + node->m_pValue.store( pVal, memory_model::memory_order_release ); + }; + + constexpr_if ( c_bRelaxedInsert ) { + if ( pNode->version( memory_model::memory_order_acquire ) != nVersion + || child( pNode, nDir, memory_model::memory_order_acquire ) != nullptr ) + { + m_stat.onInsertRetry(); + return update_flags::retry; + } + + fnCreateNode( pNew = alloc_node( key, 1, 0, pNode, nullptr, nullptr )); + } + + node_type * pDamaged; + { + assert( pNode != nullptr ); + node_scoped_lock l( m_Monitor, *pNode ); + + if ( pNode->version( memory_model::memory_order_acquire ) != nVersion + || child( pNode, nDir, memory_model::memory_order_acquire ) != nullptr ) + { + constexpr_if ( c_bRelaxedInsert ) { + mapped_type pVal = pNew->m_pValue.load( memory_model::memory_order_relaxed ); + pNew->m_pValue.store( nullptr, memory_model::memory_order_relaxed ); + free_value( pVal ); + free_node( pNew ); + m_stat.onRelaxedInsertFailed(); + } + + m_stat.onInsertRetry(); + return update_flags::retry; + } + + constexpr_if ( !c_bRelaxedInsert ) + fnCreateNode( pNew = alloc_node( key, 1, 0, pNode, nullptr, nullptr )); + + pNode->child( pNew, nDir, memory_model::memory_order_release ); + pDamaged = fix_height_locked( pNode ); + } + + ++m_ItemCounter; + m_stat.onInsertSuccess(); + + if ( pDamaged ) { + fix_height_and_rebalance( pDamaged, disp ); + m_stat.onInsertRebalanceRequired(); + } + + return update_flags::result_inserted; + } + + template + int try_update_node( int nFlags, Func funcUpdate, node_type * pNode, version_type nVersion, rcu_disposer& disp ) + { + mapped_type pOld; + bool bInserted; + assert( pNode != nullptr ); + { + node_scoped_lock l( m_Monitor, *pNode ); + + if ( pNode->version(memory_model::memory_order_acquire) != nVersion ) + return update_flags::retry; + + if ( pNode->is_unlinked( memory_model::memory_order_acquire )) { + m_stat.onUpdateUnlinked(); + return update_flags::retry; + } + + if ( pNode->is_valued( memory_model::memory_order_relaxed ) && !(nFlags & update_flags::allow_update)) { + m_stat.onInsertFailed(); + return update_flags::failed; + } + + pOld = pNode->value( memory_model::memory_order_relaxed ); + bInserted = pOld == nullptr; + mapped_type pVal = funcUpdate( pNode ); + if ( pVal == pOld ) + pOld = nullptr; + else { + assert( pVal != nullptr ); + pNode->m_pValue.store( pVal, memory_model::memory_order_release ); + } + } + + if ( pOld ) { + disp.dispose_value(pOld); + m_stat.onDisposeValue(); + } + + if ( bInserted ) { + ++m_ItemCounter; + m_stat.onInsertSuccess(); + return update_flags::result_inserted; + } + + m_stat.onUpdateSuccess(); + return update_flags::result_updated; + } + + template + int try_remove_node( node_type * pParent, node_type * pNode, version_type nVersion, Func func, rcu_disposer& disp ) + { + assert( pParent != nullptr ); + assert( pNode != nullptr ); + + if ( !pNode->is_valued( memory_model::memory_order_acquire )) + return update_flags::failed; + + if ( child( pNode, left_child, memory_model::memory_order_acquire ) == nullptr + || child( pNode, right_child, memory_model::memory_order_acquire ) == nullptr ) + { + // pNode can be replaced with its child + + node_type * pDamaged; + mapped_type pOld; + { + node_scoped_lock lp( m_Monitor, *pParent ); + if ( pParent->is_unlinked( memory_model::memory_order_acquire ) || parent( pNode, memory_model::memory_order_acquire ) != pParent ) + return update_flags::retry; + + { + node_scoped_lock ln( m_Monitor, *pNode ); + if ( pNode->version( memory_model::memory_order_acquire ) != nVersion ) + return update_flags::retry; + + pOld = pNode->value( memory_model::memory_order_relaxed ); + if ( !pOld ) + return update_flags::failed; + + if ( !try_unlink_locked( pParent, pNode, disp )) + return update_flags::retry; + } + pDamaged = fix_height_locked( pParent ); + } + + --m_ItemCounter; + if ( func( pNode->m_key, pOld, disp )) // calls pOld disposer inside + m_stat.onDisposeValue(); + else + m_stat.onExtractValue(); + + if ( pDamaged ) { + fix_height_and_rebalance( pDamaged, disp ); + m_stat.onRemoveRebalanceRequired(); + } + } + else { + // pNode is an internal with two children + + mapped_type pOld; + { + node_scoped_lock ln( m_Monitor, *pNode ); + pOld = pNode->value( memory_model::memory_order_relaxed ); + if ( pNode->version( memory_model::memory_order_relaxed ) != nVersion ) + return update_flags::retry; + if ( !pOld ) + return update_flags::failed; + + pNode->m_pValue.store( nullptr, memory_model::memory_order_release ); + m_stat.onMakeRoutingNode(); + } + + --m_ItemCounter; + if ( func( pNode->m_key, pOld, disp )) // calls pOld disposer inside + m_stat.onDisposeValue(); + else + m_stat.onExtractValue(); + } + return update_flags::result_removed; + } + + bool try_unlink_locked( node_type * pParent, node_type * pNode, rcu_disposer& disp ) + { + // pParent and pNode must be locked + assert( !pParent->is_unlinked(memory_model::memory_order_relaxed)); + + node_type * pParentLeft = child( pParent, left_child, memory_model::memory_order_relaxed ); + node_type * pParentRight = child( pParent, right_child, memory_model::memory_order_relaxed ); + if ( pNode != pParentLeft && pNode != pParentRight ) { + // node is no longer a child of parent + return false; + } + + assert( !pNode->is_unlinked( memory_model::memory_order_relaxed )); + assert( pParent == parent( pNode, memory_model::memory_order_relaxed )); + + node_type * pLeft = child( pNode, left_child, memory_model::memory_order_relaxed ); + node_type * pRight = child( pNode, right_child, memory_model::memory_order_relaxed ); + if ( pLeft != nullptr && pRight != nullptr ) { + // splicing is no longer possible + return false; + } + node_type * pSplice = pLeft ? pLeft : pRight; + + if ( pParentLeft == pNode ) + pParent->m_pLeft.store( pSplice, memory_model::memory_order_release ); + else + pParent->m_pRight.store( pSplice, memory_model::memory_order_release ); + + if ( pSplice ) + pSplice->parent( pParent, memory_model::memory_order_release ); + + // Mark the node as unlinked + pNode->version( node_type::unlinked, memory_model::memory_order_release ); + + // The value will be disposed by calling function + pNode->m_pValue.store( nullptr, memory_model::memory_order_release ); + + disp.dispose( pNode ); + m_stat.onDisposeNode(); + + return true; + } + + //@endcond + + private: // rotations + //@cond + int check_node_ordering( node_type* pParent, node_type* pChild ) + { + return key_comparator()( pParent->m_key, pChild->m_key ); + } + + int estimate_node_condition( node_type * pNode ) + { + node_type * pLeft = child( pNode, left_child, memory_model::memory_order_acquire ); + node_type * pRight = child( pNode, right_child, memory_model::memory_order_acquire ); + + if ( (pLeft == nullptr || pRight == nullptr) && !pNode->is_valued( memory_model::memory_order_acquire )) + return unlink_required; + + int h = height( pNode, memory_model::memory_order_acquire ); + int hL = height_null( pLeft, memory_model::memory_order_acquire ); + int hR = height_null( pRight, memory_model::memory_order_acquire ); + + int hNew = 1 + std::max( hL, hR ); + int nBalance = hL - hR; + + if ( nBalance < -1 || nBalance > 1 ) + return rebalance_required; + + return h != hNew ? hNew : nothing_required; + } + + node_type * fix_height( node_type * pNode ) + { + assert( pNode != nullptr ); + node_scoped_lock l( m_Monitor, *pNode ); + return fix_height_locked( pNode ); + } + + node_type * fix_height_locked( node_type * pNode ) + { + // pNode must be locked!!! + int h = estimate_node_condition( pNode ); + switch ( h ) { + case rebalance_required: + case unlink_required: + return pNode; + case nothing_required: + return nullptr; + default: + set_height( pNode, h, memory_model::memory_order_release ); + return parent( pNode, memory_model::memory_order_relaxed ); + } + } + + void fix_height_and_rebalance( node_type * pNode, rcu_disposer& disp ) + { + while ( pNode && parent( pNode, memory_model::memory_order_acquire )) { + int nCond = estimate_node_condition( pNode ); + if ( nCond == nothing_required || pNode->is_unlinked( memory_model::memory_order_acquire )) + return; + + if ( nCond != unlink_required && nCond != rebalance_required ) + pNode = fix_height( pNode ); + else { + node_type * pParent = parent( pNode, memory_model::memory_order_acquire ); + assert( pParent != nullptr ); + { + node_scoped_lock lp( m_Monitor, *pParent ); + if ( !pParent->is_unlinked( memory_model::memory_order_relaxed ) && parent( pNode, memory_model::memory_order_acquire ) == pParent ) { + node_scoped_lock ln( m_Monitor, *pNode ); + pNode = rebalance_locked( pParent, pNode, disp ); + } + } + } + } + } + + node_type * rebalance_locked( node_type * pParent, node_type * pNode, rcu_disposer& disp ) + { + // pParent and pNode should be locked. + // Returns a damaged node, or nullptr if no more rebalancing is necessary + assert( parent( pNode, memory_model::memory_order_relaxed ) == pParent ); + + node_type * pLeft = child( pNode, left_child, memory_model::memory_order_relaxed ); + node_type * pRight = child( pNode, right_child, memory_model::memory_order_relaxed ); + + if ( (pLeft == nullptr || pRight == nullptr) && !pNode->is_valued( memory_model::memory_order_relaxed )) { + if ( try_unlink_locked( pParent, pNode, disp )) + return fix_height_locked( pParent ); + else { + // retry needed for pNode + return pNode; + } + } + + assert( child( pParent, left_child, memory_model::memory_order_relaxed ) == pNode + || child( pParent, right_child, memory_model::memory_order_relaxed ) == pNode ); + + int h = height( pNode, memory_model::memory_order_acquire ); + int hL = height_null( pLeft, memory_model::memory_order_acquire ); + int hR = height_null( pRight, memory_model::memory_order_acquire ); + int hNew = 1 + std::max( hL, hR ); + int balance = hL - hR; + + if ( balance > 1 ) + return rebalance_to_right_locked( pParent, pNode, pLeft, hR ); + else if ( balance < -1 ) + return rebalance_to_left_locked( pParent, pNode, pRight, hL ); + else if ( hNew != h ) { + set_height( pNode, hNew, memory_model::memory_order_release ); + + // pParent is already locked + return fix_height_locked( pParent ); + } + else + return nullptr; + } + + node_type * rebalance_to_right_locked( node_type * pParent, node_type * pNode, node_type * pLeft, int hR ) + { + assert( parent( pNode, memory_model::memory_order_relaxed ) == pParent ); + assert( child( pParent, left_child, memory_model::memory_order_relaxed ) == pNode + || child( pParent, right_child, memory_model::memory_order_relaxed ) == pNode ); + + // pParent and pNode is locked yet + // pNode->pLeft is too large, we will rotate-right. + // If pLeft->pRight is taller than pLeft->pLeft, then we will first rotate-left pLeft. + + assert( pLeft != nullptr ); + node_scoped_lock l( m_Monitor, *pLeft ); + if ( pNode->m_pLeft.load( memory_model::memory_order_relaxed ) != pLeft ) + return pNode; // retry for pNode + + assert( check_node_ordering( pNode, pLeft ) > 0 ); + + int hL = height( pLeft, memory_model::memory_order_acquire ); + if ( hL - hR <= 1 ) + return pNode; // retry + + node_type * pLRight = child( pLeft, right_child, memory_model::memory_order_relaxed ); + int hLR = height_null( pLRight, memory_model::memory_order_acquire ); + node_type * pLLeft = child( pLeft, left_child, memory_model::memory_order_relaxed ); + int hLL = height_null( pLLeft, memory_model::memory_order_acquire ); + + if ( pLRight ) { + { + node_scoped_lock lr( m_Monitor, *pLRight ); + if ( pLeft->m_pRight.load( memory_model::memory_order_acquire ) != pLRight ) + return pNode; // retry + + assert( check_node_ordering( pLeft, pLRight ) < 0 ); + + hLR = height( pLRight, memory_model::memory_order_acquire ); + if ( hLL > hLR ) + return rotate_right_locked( pParent, pNode, pLeft, hR, hLL, pLRight, hLR ); + + int hLRL = height_null( child( pLRight, left_child, memory_model::memory_order_relaxed ), memory_model::memory_order_acquire ); + int balance = hLL - hLRL; + if ( balance >= -1 && balance <= 1 && !( ( hLL == 0 || hLRL == 0 ) && !pLeft->is_valued( memory_model::memory_order_relaxed ))) { + // nParent.child.left won't be damaged after a double rotation + return rotate_right_over_left_locked( pParent, pNode, pLeft, hR, hLL, pLRight, hLRL ); + } + } + + // focus on pLeft, if necessary pNode will be balanced later + return rebalance_to_left_locked( pNode, pLeft, pLRight, hLL ); + } + else if ( hLL > hLR ) { + // rotate right + return rotate_right_locked( pParent, pNode, pLeft, hR, hLL, pLRight, hLR ); + } + + return pNode; // retry + } + + node_type * rebalance_to_left_locked( node_type * pParent, node_type * pNode, node_type * pRight, int hL ) + { + assert( parent( pNode, memory_model::memory_order_relaxed ) == pParent ); + assert( child( pParent, left_child, memory_model::memory_order_relaxed ) == pNode + || child( pParent, right_child, memory_model::memory_order_relaxed ) == pNode ); + + // pParent and pNode is locked yet + assert( pRight != nullptr ); + node_scoped_lock l( m_Monitor, *pRight ); + if ( pNode->m_pRight.load( memory_model::memory_order_relaxed ) != pRight ) + return pNode; // retry for pNode + + assert( check_node_ordering( pNode, pRight ) < 0 ); + + int hR = height( pRight, memory_model::memory_order_acquire ); + if ( hL - hR >= -1 ) + return pNode; // retry + + node_type * pRLeft = child( pRight, left_child, memory_model::memory_order_relaxed ); + int hRL = height_null( pRLeft, memory_model::memory_order_acquire ); + node_type * pRRight = child( pRight, right_child, memory_model::memory_order_relaxed ); + int hRR = height_null( pRRight, memory_model::memory_order_acquire ); + + if ( pRLeft ) { + { + node_scoped_lock lrl( m_Monitor, *pRLeft ); + if ( pRight->m_pLeft.load( memory_model::memory_order_acquire ) != pRLeft ) + return pNode; // retry + + assert( check_node_ordering( pRight, pRLeft ) > 0 ); + + hRL = height( pRLeft, memory_model::memory_order_acquire ); + if ( hRR >= hRL ) + return rotate_left_locked( pParent, pNode, hL, pRight, pRLeft, hRL, hRR ); + + node_type * pRLRight = child( pRLeft, right_child, memory_model::memory_order_relaxed ); + int hRLR = height_null( pRLRight, memory_model::memory_order_acquire ); + int balance = hRR - hRLR; + if ( balance >= -1 && balance <= 1 && !( ( hRR == 0 || hRLR == 0 ) && !pRight->is_valued( memory_model::memory_order_relaxed ))) + return rotate_left_over_right_locked( pParent, pNode, hL, pRight, pRLeft, hRR, hRLR ); + } + + return rebalance_to_right_locked( pNode, pRight, pRLeft, hRR ); + } + else if ( hRR > hRL ) + return rotate_left_locked( pParent, pNode, hL, pRight, pRLeft, hRL, hRR ); + + return pNode; // retry + } + + static void begin_change( node_type * pNode, version_type version ) + { + assert(pNode->version(memory_model::memory_order_acquire) == version ); + assert( (version & node_type::shrinking) == 0 ); + pNode->exchange_version( version | node_type::shrinking, memory_model::memory_order_acquire ); + } + static void end_change( node_type * pNode, version_type version ) + { + // Clear shrinking and unlinked flags and increment version + pNode->version( (version | node_type::version_flags) + 1, memory_model::memory_order_release ); + } + + node_type * rotate_right_locked( node_type * pParent, node_type * pNode, node_type * pLeft, int hR, int hLL, node_type * pLRight, int hLR ) + { + version_type nodeVersion = pNode->version( memory_model::memory_order_relaxed ); + node_type * pParentLeft = child( pParent, left_child, memory_model::memory_order_relaxed ); + + begin_change( pNode, nodeVersion ); + + pNode->m_pLeft.store( pLRight, memory_model::memory_order_release ); + + if ( pLRight != nullptr ) { + atomics::atomic_thread_fence( memory_model::memory_order_acq_rel ); + CDS_TSAN_ANNOTATE_HAPPENS_BEFORE( &pLRight->m_pParent ); + pLRight->parent( pNode, memory_model::memory_order_relaxed ); + assert( check_node_ordering( pNode, pLRight ) > 0 ); + } + + atomics::atomic_thread_fence( memory_model::memory_order_acq_rel ); + CDS_TSAN_ANNOTATE_HAPPENS_BEFORE( &pLeft->m_pRight ); + pLeft->m_pRight.store( pNode, memory_model::memory_order_relaxed ); + + atomics::atomic_thread_fence( memory_model::memory_order_acq_rel ); + + CDS_TSAN_ANNOTATE_HAPPENS_BEFORE( &pNode->m_pParent ); + pNode->parent( pLeft, memory_model::memory_order_relaxed ); + assert( check_node_ordering( pLeft, pNode ) < 0 ); + + atomics::atomic_thread_fence( memory_model::memory_order_acq_rel ); + if ( pParentLeft == pNode ) { + CDS_TSAN_ANNOTATE_HAPPENS_BEFORE( &pParent->m_pLeft ); + pParent->m_pLeft.store( pLeft, memory_model::memory_order_relaxed ); + } + else { + assert( pParent->m_pRight.load( memory_model::memory_order_relaxed ) == pNode ); + CDS_TSAN_ANNOTATE_HAPPENS_BEFORE( &pParent->m_pRight ); + pParent->m_pRight.store( pLeft, memory_model::memory_order_relaxed ); + } + + atomics::atomic_thread_fence( memory_model::memory_order_acq_rel ); + CDS_TSAN_ANNOTATE_HAPPENS_BEFORE( &pLeft->m_pParent ); + pLeft->parent( pParent, memory_model::memory_order_relaxed ); + + atomics::atomic_thread_fence( memory_model::memory_order_acq_rel ); + + // fix up heights links + int hNode = 1 + std::max( hLR, hR ); + set_height( pNode, hNode, memory_model::memory_order_release ); + set_height( pLeft, 1 + std::max( hLL, hNode ), memory_model::memory_order_release); + + end_change( pNode, nodeVersion ); + m_stat.onRotateRight(); + + // We have damaged pParent, pNode (now parent.child.right), and pLeft (now + // parent.child). pNode is the deepest. Perform as many fixes as we can + // with the locks we've got. + + // We've already fixed the height for pNode, but it might still be outside + // our allowable balance range. In that case a simple fix_height_locked() + // won't help. + int nodeBalance = hLR - hR; + if ( nodeBalance < -1 || nodeBalance > 1 ) { + // we need another rotation at pNode + m_stat.onRotateAfterRightRotation(); + return pNode; + } + + // we've fixed balance and height damage for pNode, now handle + // extra-routing node damage + if ( (pLRight == nullptr || hR == 0) && !pNode->is_valued(memory_model::memory_order_relaxed)) { + // we need to remove pNode and then repair + m_stat.onRemoveAfterRightRotation(); + return pNode; + } + + // we've already fixed the height at pLeft, do we need a rotation here? + int leftBalance = hLL - hNode; + if ( leftBalance < -1 || leftBalance > 1 ) { + m_stat.onRotateAfterRightRotation(); + return pLeft; + } + + // pLeft might also have routing node damage (if pLeft.left was null) + if ( hLL == 0 && !pLeft->is_valued(memory_model::memory_order_acquire)) { + m_stat.onDamageAfterRightRotation(); + return pLeft; + } + + // try to fix the parent height while we've still got the lock + return fix_height_locked( pParent ); + } + + node_type * rotate_left_locked( node_type * pParent, node_type * pNode, int hL, node_type * pRight, node_type * pRLeft, int hRL, int hRR ) + { + version_type nodeVersion = pNode->version( memory_model::memory_order_relaxed ); + node_type * pParentLeft = child( pParent, left_child, memory_model::memory_order_relaxed ); + + begin_change( pNode, nodeVersion ); + + // fix up pNode links, careful to be compatible with concurrent traversal for all but pNode + pNode->m_pRight.store( pRLeft, memory_model::memory_order_release ); + if ( pRLeft != nullptr ) { + atomics::atomic_thread_fence( memory_model::memory_order_acq_rel ); + CDS_TSAN_ANNOTATE_HAPPENS_BEFORE( &pRLeft->m_pParent ); + pRLeft->parent( pNode, memory_model::memory_order_relaxed ); + } + + atomics::atomic_thread_fence( memory_model::memory_order_acq_rel ); + CDS_TSAN_ANNOTATE_HAPPENS_BEFORE( &pRight->m_pLeft ); + pRight->m_pLeft.store( pNode, memory_model::memory_order_relaxed ); + + atomics::atomic_thread_fence( memory_model::memory_order_acq_rel ); + CDS_TSAN_ANNOTATE_HAPPENS_BEFORE( &pNode->m_pParent ); + pNode->parent( pRight, memory_model::memory_order_relaxed ); + assert( check_node_ordering( pRight, pNode ) > 0 ); + + atomics::atomic_thread_fence( memory_model::memory_order_acq_rel ); + if ( pParentLeft == pNode ) { + CDS_TSAN_ANNOTATE_HAPPENS_BEFORE( &pParent->m_pLeft ); + pParent->m_pLeft.store( pRight, memory_model::memory_order_relaxed ); + } + else { + assert( pParent->m_pRight.load( memory_model::memory_order_relaxed ) == pNode ); + CDS_TSAN_ANNOTATE_HAPPENS_BEFORE( &pParent->m_pRight ); + pParent->m_pRight.store( pRight, memory_model::memory_order_relaxed ); + } + + atomics::atomic_thread_fence( memory_model::memory_order_acq_rel ); + CDS_TSAN_ANNOTATE_HAPPENS_BEFORE( &pRight->m_pParent ); + pRight->parent( pParent, memory_model::memory_order_relaxed ); + + atomics::atomic_thread_fence( memory_model::memory_order_acq_rel ); + + // fix up heights + int hNode = 1 + std::max( hL, hRL ); + set_height( pNode, hNode, memory_model::memory_order_release ); + set_height( pRight, 1 + std::max( hNode, hRR ), memory_model::memory_order_release); + + end_change( pNode, nodeVersion ); + m_stat.onRotateLeft(); + + int nodeBalance = hRL - hL; + if ( nodeBalance < -1 || nodeBalance > 1 ) { + m_stat.onRotateAfterLeftRotation(); + return pNode; + } + + if ( (pRLeft == nullptr || hL == 0) && !pNode->is_valued(memory_model::memory_order_relaxed)) { + m_stat.onRemoveAfterLeftRotation(); + return pNode; + } + + int rightBalance = hRR - hNode; + if ( rightBalance < -1 || rightBalance > 1 ) { + m_stat.onRotateAfterLeftRotation(); + return pRight; + } + + if ( hRR == 0 && !pRight->is_valued(memory_model::memory_order_acquire)) { + m_stat.onDamageAfterLeftRotation(); + return pRight; + } + + return fix_height_locked( pParent ); + } + + node_type * rotate_right_over_left_locked( node_type * pParent, node_type * pNode, node_type * pLeft, int hR, int hLL, node_type * pLRight, int hLRL ) + { + version_type nodeVersion = pNode->version( memory_model::memory_order_relaxed ); + version_type leftVersion = pLeft->version( memory_model::memory_order_acquire ); + + node_type * pPL = child( pParent, left_child, memory_model::memory_order_relaxed ); + node_type * pLRL = child( pLRight, left_child, memory_model::memory_order_acquire ); + node_type * pLRR = child( pLRight, right_child, memory_model::memory_order_acquire ); + int hLRR = height_null( pLRR, memory_model::memory_order_acquire ); + + begin_change( pNode, nodeVersion ); + begin_change( pLeft, leftVersion ); + + // fix up pNode links, careful about the order! + pNode->m_pLeft.store( pLRR, memory_model::memory_order_release ); + if ( pLRR != nullptr ) { + atomics::atomic_thread_fence( memory_model::memory_order_acq_rel ); + CDS_TSAN_ANNOTATE_HAPPENS_BEFORE( &pLRR->m_pParent ); + pLRR->parent( pNode, memory_model::memory_order_relaxed ); + } + + atomics::atomic_thread_fence( memory_model::memory_order_acq_rel ); + CDS_TSAN_ANNOTATE_HAPPENS_BEFORE( &pLeft->m_pRight ); + pLeft->m_pRight.store( pLRL, memory_model::memory_order_relaxed ); + + if ( pLRL != nullptr ) { + atomics::atomic_thread_fence( memory_model::memory_order_acq_rel ); + CDS_TSAN_ANNOTATE_HAPPENS_BEFORE( &pLRL->m_pParent ); + pLRL->parent( pLeft, memory_model::memory_order_relaxed ); + } + + atomics::atomic_thread_fence( memory_model::memory_order_acq_rel ); + CDS_TSAN_ANNOTATE_HAPPENS_BEFORE( &pLRight->m_pLeft ); + pLRight->m_pLeft.store( pLeft, memory_model::memory_order_relaxed ); + + atomics::atomic_thread_fence( memory_model::memory_order_acq_rel ); + CDS_TSAN_ANNOTATE_HAPPENS_BEFORE( &pLeft->m_pParent ); + pLeft->parent( pLRight, memory_model::memory_order_relaxed ); + assert( check_node_ordering( pLRight, pLeft ) > 0 ); + + atomics::atomic_thread_fence( memory_model::memory_order_acq_rel ); + CDS_TSAN_ANNOTATE_HAPPENS_BEFORE( &pLRight->m_pRight ); + pLRight->m_pRight.store( pNode, memory_model::memory_order_relaxed ); + + atomics::atomic_thread_fence( memory_model::memory_order_acq_rel ); + CDS_TSAN_ANNOTATE_HAPPENS_BEFORE( &pNode->m_pParent ); + pNode->parent( pLRight, memory_model::memory_order_relaxed ); + assert( check_node_ordering( pLRight, pNode ) < 0 ); + + atomics::atomic_thread_fence( memory_model::memory_order_acq_rel ); + if ( pPL == pNode ) { + CDS_TSAN_ANNOTATE_HAPPENS_BEFORE( &pParent->m_pLeft ); + pParent->m_pLeft.store( pLRight, memory_model::memory_order_relaxed ); + } + else { + assert( child( pParent, right_child, memory_model::memory_order_relaxed ) == pNode ); + CDS_TSAN_ANNOTATE_HAPPENS_BEFORE( &pParent->m_pRight ); + pParent->m_pRight.store( pLRight, memory_model::memory_order_relaxed ); + } + + atomics::atomic_thread_fence( memory_model::memory_order_acq_rel ); + CDS_TSAN_ANNOTATE_HAPPENS_BEFORE( &pLRight->m_pParent ); + pLRight->parent( pParent, memory_model::memory_order_relaxed ); + + atomics::atomic_thread_fence( memory_model::memory_order_acq_rel ); + + // fix up heights + int hNode = 1 + std::max( hLRR, hR ); + set_height( pNode, hNode, memory_model::memory_order_release ); + int hLeft = 1 + std::max( hLL, hLRL ); + set_height( pLeft, hLeft, memory_model::memory_order_release ); + set_height( pLRight, 1 + std::max( hLeft, hNode ), memory_model::memory_order_release); + + end_change( pNode, nodeVersion ); + end_change( pLeft, leftVersion ); + m_stat.onRotateRightOverLeft(); + + // caller should have performed only a single rotation if pLeft was going + // to end up damaged + assert( hLL - hLRL <= 1 && hLRL - hLL <= 1 ); + assert( !((hLL == 0 || pLRL == nullptr) && !pLeft->is_valued( memory_model::memory_order_acquire ))); + + // We have damaged pParent, pLR (now parent.child), and pNode (now + // parent.child.right). pNode is the deepest. Perform as many fixes as we + // can with the locks we've got. + + // We've already fixed the height for pNode, but it might still be outside + // our allowable balance range. In that case a simple fix_height_locked() + // won't help. + int nodeBalance = hLRR - hR; + if ( nodeBalance < -1 || nodeBalance > 1 ) { + // we need another rotation at pNode + m_stat.onRotateAfterRLRotation(); + return pNode; + } + + // pNode might also be damaged by being an unnecessary routing node + if ( (pLRR == nullptr || hR == 0) && !pNode->is_valued( memory_model::memory_order_relaxed )) { + // repair involves splicing out pNode and maybe more rotations + m_stat.onRemoveAfterRLRotation(); + return pNode; + } + + // we've already fixed the height at pLRight, do we need a rotation here? + int balanceLR = hLeft - hNode; + if ( balanceLR < -1 || balanceLR > 1 ) { + m_stat.onRotateAfterRLRotation(); + return pLRight; + } + + // try to fix the parent height while we've still got the lock + return fix_height_locked( pParent ); + } + + node_type * rotate_left_over_right_locked( node_type * pParent, node_type * pNode, int hL, node_type * pRight, node_type * pRLeft, int hRR, int hRLR ) + { + version_type nodeVersion = pNode->version( memory_model::memory_order_relaxed ); + version_type rightVersion = pRight->version( memory_model::memory_order_acquire ); + + node_type * pPL = child( pParent, left_child, memory_model::memory_order_relaxed ); + node_type * pRLL = child( pRLeft, left_child, memory_model::memory_order_acquire ); + node_type * pRLR = child( pRLeft, right_child, memory_model::memory_order_acquire ); + int hRLL = height_null( pRLL, memory_model::memory_order_acquire ); + + begin_change( pNode, nodeVersion ); + begin_change( pRight, rightVersion ); + + // fix up pNode links, careful about the order! + pNode->m_pRight.store( pRLL, memory_model::memory_order_release ); + if ( pRLL != nullptr ) { + atomics::atomic_thread_fence( memory_model::memory_order_acq_rel ); + CDS_TSAN_ANNOTATE_HAPPENS_BEFORE( &pRLL->m_pParent ); + pRLL->parent( pNode, memory_model::memory_order_relaxed ); + } + + atomics::atomic_thread_fence( memory_model::memory_order_acq_rel ); + CDS_TSAN_ANNOTATE_HAPPENS_BEFORE( &pRight->m_pLeft ); + pRight->m_pLeft.store( pRLR, memory_model::memory_order_relaxed ); + + if ( pRLR != nullptr ) { + atomics::atomic_thread_fence( memory_model::memory_order_acq_rel ); + CDS_TSAN_ANNOTATE_HAPPENS_BEFORE( &pRLR->m_pParent ); + pRLR->parent( pRight, memory_model::memory_order_relaxed ); + } + + atomics::atomic_thread_fence( memory_model::memory_order_acq_rel ); + CDS_TSAN_ANNOTATE_HAPPENS_BEFORE( &pRLeft->m_pRight ); + pRLeft->m_pRight.store( pRight, memory_model::memory_order_relaxed ); + + atomics::atomic_thread_fence( memory_model::memory_order_acq_rel ); + CDS_TSAN_ANNOTATE_HAPPENS_BEFORE( &pRight->m_pParent ); + pRight->parent( pRLeft, memory_model::memory_order_relaxed ); + assert( check_node_ordering( pRLeft, pRight ) < 0 ); + + atomics::atomic_thread_fence( memory_model::memory_order_acq_rel ); + CDS_TSAN_ANNOTATE_HAPPENS_BEFORE( &pRLeft->m_pLeft ); + pRLeft->m_pLeft.store( pNode, memory_model::memory_order_relaxed ); + + atomics::atomic_thread_fence( memory_model::memory_order_acq_rel ); + CDS_TSAN_ANNOTATE_HAPPENS_BEFORE( &pNode->m_pParent ); + pNode->parent( pRLeft, memory_model::memory_order_relaxed ); + assert( check_node_ordering( pRLeft, pNode ) > 0 ); + + atomics::atomic_thread_fence( memory_model::memory_order_acq_rel ); + if ( pPL == pNode ) { + CDS_TSAN_ANNOTATE_HAPPENS_BEFORE( &pParent->m_pLeft ); + pParent->m_pLeft.store( pRLeft, memory_model::memory_order_relaxed ); + } + else { + assert( pParent->m_pRight.load( memory_model::memory_order_relaxed ) == pNode ); + CDS_TSAN_ANNOTATE_HAPPENS_BEFORE( &pParent->m_pRight ); + pParent->m_pRight.store( pRLeft, memory_model::memory_order_relaxed ); + } + + atomics::atomic_thread_fence( memory_model::memory_order_acq_rel ); + CDS_TSAN_ANNOTATE_HAPPENS_BEFORE( &pRLeft->m_pParent ); + pRLeft->parent( pParent, memory_model::memory_order_relaxed ); + + atomics::atomic_thread_fence( memory_model::memory_order_acq_rel ); + + // fix up heights + int hNode = 1 + std::max( hL, hRLL ); + set_height( pNode, hNode, memory_model::memory_order_release ); + int hRight = 1 + std::max( hRLR, hRR ); + set_height( pRight, hRight, memory_model::memory_order_release ); + set_height( pRLeft, 1 + std::max( hNode, hRight ), memory_model::memory_order_release); + + end_change( pNode, nodeVersion ); + end_change( pRight, rightVersion ); + m_stat.onRotateLeftOverRight(); + + assert( hRR - hRLR <= 1 && hRLR - hRR <= 1 ); + + int nodeBalance = hRLL - hL; + if ( nodeBalance < -1 || nodeBalance > 1 ) { + m_stat.onRotateAfterLRRotation(); + return pNode; + } + + if ( (pRLL == nullptr || hL == 0) && !pNode->is_valued(memory_model::memory_order_relaxed)) { + m_stat.onRemoveAfterLRRotation(); + return pNode; + } + + int balRL = hRight - hNode; + if ( balRL < -1 || balRL > 1 ) { + m_stat.onRotateAfterLRRotation(); + return pRLeft; + } + + return fix_height_locked( pParent ); + } + + //@endcond + }; +}} // namespace cds::container + +#endif // #ifndef CDSLIB_CONTAINER_IMPL_BRONSON_AVLTREE_MAP_RCU_H diff --git a/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/container/impl/ellen_bintree_map.h b/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/container/impl/ellen_bintree_map.h new file mode 100644 index 0000000..e9dd938 --- /dev/null +++ b/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/container/impl/ellen_bintree_map.h @@ -0,0 +1,581 @@ +/* + This file is a part of libcds - Concurrent Data Structures library + + (C) Copyright Maxim Khizhinsky (libcds.dev@gmail.com) 2006-2017 + + Source code repo: http://github.com/khizmax/libcds/ + Download: http://sourceforge.net/projects/libcds/files/ + + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. + + * Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ + +#ifndef CDSLIB_CONTAINER_IMPL_ELLEN_BINTREE_MAP_H +#define CDSLIB_CONTAINER_IMPL_ELLEN_BINTREE_MAP_H + +#include +#include +#include +#include + +namespace cds { namespace container { + + /// Map based on Ellen's et al binary search tree + /** @ingroup cds_nonintrusive_map + @ingroup cds_nonintrusive_tree + @anchor cds_container_EllenBinTreeMap + + Source: + - [2010] F.Ellen, P.Fatourou, E.Ruppert, F.van Breugel "Non-blocking Binary Search Tree" + + %EllenBinTreeMap is an unbalanced leaf-oriented binary search tree that implements the map + abstract data type. Nodes maintains child pointers but not parent pointers. + Every internal node has exactly two children, and all data of type std::pair + currently in the tree are stored in the leaves. Internal nodes of the tree are used to direct \p find + operation along the path to the correct leaf. The keys (of \p Key type) stored in internal nodes + may or may not be in the map. + Unlike \ref cds_container_EllenBinTreeSet "EllenBinTreeSet" keys are not a part of \p T type. + The map can be represented as a set containing std::pair< Key const, T> values. + + Due to \p extract_min and \p extract_max member functions the \p %EllenBinTreeMap can act as + a priority queue. In this case you should provide unique compound key, for example, + the priority value plus some uniformly distributed random value. + + @warning Recall the tree is unbalanced. The complexity of operations is O(log N) + for uniformly distributed random keys, but in the worst case the complexity is O(N). + + @note In the current implementation we do not use helping technique described in the original paper. + In Hazard Pointer schema helping is too complicated and does not give any observable benefits. + Instead of helping, when a thread encounters a concurrent operation it just spins waiting for + the operation done. Such solution allows greatly simplify implementation of the tree. + + Template arguments : + - \p GC - safe memory reclamation (i.e. light-weight garbage collector) type, like \p cds::gc::HP, \p cds::gc::DHP + - \p Key - key type. Should be default-constructible + - \p T - value type to be stored in tree's leaf nodes. + - \p Traits - map traits, default is \p ellen_bintree::traits + It is possible to declare option-based tree with \p ellen_bintree::make_map_traits metafunction + instead of \p Traits template argument. + + @note Do not include header file directly. + There are header file for each GC type: + - - for Hazard Pointer GC cds::gc::HP + - - for Dynamic Hazard Pointer GC cds::gc::DHP + - - for RCU GC + (see \ref cds_container_EllenBinTreeMap_rcu "RCU-based EllenBinTreeMap") + */ + template < + class GC, + typename Key, + typename T, +#ifdef CDS_DOXYGEN_INVOKED + class Traits = ellen_bintree::traits +#else + class Traits +#endif + > + class EllenBinTreeMap +#ifdef CDS_DOXYGEN_INVOKED + : public cds::intrusive::EllenBinTree< GC, Key, T, Traits > +#else + : public ellen_bintree::details::make_ellen_bintree_map< GC, Key, T, Traits >::type +#endif + { + //@cond + typedef ellen_bintree::details::make_ellen_bintree_map< GC, Key, T, Traits > maker; + typedef typename maker::type base_class; + //@endcond + public: + typedef GC gc; ///< Garbage collector + typedef Key key_type; ///< type of a key stored in the map + typedef T mapped_type; ///< type of value stored in the map + typedef std::pair< key_type const, mapped_type > value_type ; ///< Key-value pair stored in leaf node of the mp + typedef Traits traits; ///< Map traits + + static_assert( std::is_default_constructible::value, "Key should be default constructible type"); + +# ifdef CDS_DOXYGEN_INVOKED + typedef implementation_defined key_comparator; ///< key compare functor based on \p Traits::compare and \p Traits::less +# else + typedef typename maker::intrusive_traits::compare key_comparator; +# endif + typedef typename base_class::item_counter item_counter; ///< Item counting policy + typedef typename base_class::memory_model memory_model; ///< Memory ordering, see \p cds::opt::memory_model + typedef typename base_class::node_allocator node_allocator_type; ///< allocator for maintaining internal node + typedef typename base_class::stat stat; ///< internal statistics type + typedef typename traits::copy_policy copy_policy; ///< key copy policy + typedef typename traits::back_off back_off; ///< Back-off strategy + + typedef typename traits::allocator allocator_type; ///< Allocator for leaf nodes + typedef typename base_class::node_allocator node_allocator; ///< Internal node allocator + typedef typename base_class::update_desc_allocator update_desc_allocator; ///< Update descriptor allocator + + protected: + //@cond + typedef typename base_class::value_type leaf_node; + typedef typename base_class::internal_node internal_node; + typedef typename base_class::update_desc update_desc; + + typedef typename maker::cxx_leaf_node_allocator cxx_leaf_node_allocator; + + typedef std::unique_ptr< leaf_node, typename maker::leaf_deallocator > scoped_node_ptr; + //@endcond + + public: + /// Guarded pointer + typedef typename gc::template guarded_ptr< leaf_node, value_type, details::guarded_ptr_cast_set > guarded_ptr; + + public: + /// Default constructor + EllenBinTreeMap() + : base_class() + {} + + /// Clears the map + ~EllenBinTreeMap() + {} + + /// Inserts new node with key and default value + /** + The function creates a node with \p key and default value, and then inserts the node created into the map. + + Preconditions: + - The \ref key_type should be constructible from a value of type \p K. + In trivial case, \p K is equal to \ref key_type. + - The \ref mapped_type should be default-constructible. + + Returns \p true if inserting successful, \p false otherwise. + */ + template + bool insert( K const& key ) + { + return insert_with( key, [](value_type&){} ); + } + + /// Inserts new node + /** + The function creates a node with copy of \p val value + and then inserts the node created into the map. + + Preconditions: + - The \p key_type should be constructible from \p key of type \p K. + - The \p value_type should be constructible from \p val of type \p V. + + Returns \p true if \p val is inserted into the map, \p false otherwise. + */ + template + bool insert( K const& key, V const& val ) + { + scoped_node_ptr pNode( cxx_leaf_node_allocator().New( key, val )); + if ( base_class::insert( *pNode )) + { + pNode.release(); + return true; + } + return false; + } + + /// Inserts new node and initialize it by a functor + /** + This function inserts new node with key \p key and if inserting is successful then it calls + \p func functor with signature + \code + struct functor { + void operator()( value_type& item ); + }; + \endcode + + The argument \p item of user-defined functor \p func is the reference + to the map's item inserted: + - item.first is a const reference to item's key that cannot be changed. + - item.second is a reference to item's value that may be changed. + + The key_type should be constructible from value of type \p K. + + The function allows to split creating of new item into two part: + - create item from \p key; + - insert new item into the map; + - if inserting is successful, initialize the value of item by calling \p func functor + + This can be useful if complete initialization of object of \p value_type is heavyweight and + it is preferable that the initialization should be completed only if inserting is successful. + */ + template + bool insert_with( const K& key, Func func ) + { + scoped_node_ptr pNode( cxx_leaf_node_allocator().New( key )); + if ( base_class::insert( *pNode, [&func]( leaf_node& item ) { func( item.m_Value ); } )) { + pNode.release(); + return true; + } + return false; + } + + /// For key \p key inserts data of type \p value_type created in-place from \p args + /** + Returns \p true if inserting successful, \p false otherwise. + */ + template + bool emplace( K&& key, Args&&... args ) + { + scoped_node_ptr pNode( cxx_leaf_node_allocator().MoveNew( key_type( std::forward(key)), mapped_type( std::forward(args)... ))); + if ( base_class::insert( *pNode )) { + pNode.release(); + return true; + } + return false; + } + + /// Updates the node + /** + The operation performs inserting or changing data with lock-free manner. + + If the item \p val is not found in the map, then \p val is inserted iff \p bAllowInsert is \p true. + Otherwise, the functor \p func is called with item found. + The functor \p func signature is: + \code + struct my_functor { + void operator()( bool bNew, value_type& item ); + }; + \endcode + + with arguments: + - \p bNew - \p true if the item has been inserted, \p false otherwise + - \p item - item of the map + + The functor may change any fields of the \p item.second that is \ref mapped_type; + however, \p func must guarantee that during changing no any other modifications + could be made on this item by concurrent threads. + + Returns std::pair where \p first is \p true if operation is successful, + i.e. the node has been inserted or updated, + \p second is \p true if new item has been added or \p false if the item with \p key + already exists. + + @warning See \ref cds_intrusive_item_creating "insert item troubleshooting" + */ + template + std::pair update( K const& key, Func func, bool bAllowInsert = true ) + { + scoped_node_ptr pNode( cxx_leaf_node_allocator().New( key )); + std::pair res = base_class::update( *pNode, + [&func](bool bNew, leaf_node& item, leaf_node const& ){ func( bNew, item.m_Value ); }, + bAllowInsert + ); + if ( res.first && res.second ) + pNode.release(); + return res; + } + //@cond + template + CDS_DEPRECATED("ensure() is deprecated, use update()") + std::pair ensure( K const& key, Func func ) + { + return update( key, func, true ); + } + //@endcond + + /// Delete \p key from the map + /**\anchor cds_nonintrusive_EllenBinTreeMap_erase_val + + Return \p true if \p key is found and deleted, \p false otherwise + */ + template + bool erase( K const& key ) + { + return base_class::erase(key); + } + + /// Deletes the item from the map using \p pred predicate for searching + /** + The function is an analog of \ref cds_nonintrusive_EllenBinTreeMap_erase_val "erase(K const&)" + but \p pred is used for key comparing. + \p Less functor has the interface like \p std::less. + \p Less must imply the same element order as the comparator used for building the map. + */ + template + bool erase_with( K const& key, Less pred ) + { + CDS_UNUSED( pred ); + return base_class::erase_with( key, cds::details::predicate_wrapper< leaf_node, Less, typename maker::key_accessor >()); + } + + /// Delete \p key from the map + /** \anchor cds_nonintrusive_EllenBinTreeMap_erase_func + + The function searches an item with key \p key, calls \p f functor + and deletes the item. If \p key is not found, the functor is not called. + + The functor \p Func interface: + \code + struct extractor { + void operator()(value_type& item) { ... } + }; + \endcode + + Return \p true if key is found and deleted, \p false otherwise + */ + template + bool erase( K const& key, Func f ) + { + return base_class::erase( key, [&f]( leaf_node& node) { f( node.m_Value ); } ); + } + + /// Deletes the item from the map using \p pred predicate for searching + /** + The function is an analog of \ref cds_nonintrusive_EllenBinTreeMap_erase_func "erase(K const&, Func)" + but \p pred is used for key comparing. + \p Less functor has the interface like \p std::less. + \p Less must imply the same element order as the comparator used for building the map. + */ + template + bool erase_with( K const& key, Less pred, Func f ) + { + CDS_UNUSED( pred ); + return base_class::erase_with( key, cds::details::predicate_wrapper< leaf_node, Less, typename maker::key_accessor >(), + [&f]( leaf_node& node) { f( node.m_Value ); } ); + } + + /// Extracts an item with minimal key from the map + /** + If the map is not empty, the function returns an guarded pointer to minimum value. + If the map is empty, the function returns an empty \p guarded_ptr. + + @note Due the concurrent nature of the map, the function extracts nearly minimum key. + It means that the function gets leftmost leaf of the tree and tries to unlink it. + During unlinking, a concurrent thread may insert an item with key less than leftmost item's key. + So, the function returns the item with minimum key at the moment of tree traversing. + + The guarded pointer prevents deallocation of returned item, + see \p cds::gc::guarded_ptr for explanation. + @note Each \p guarded_ptr object uses the GC's guard that can be limited resource. + */ + guarded_ptr extract_min() + { + return guarded_ptr( base_class::extract_min_()); + } + + /// Extracts an item with maximal key from the map + /** + If the map is not empty, the function returns a guarded pointer to maximal value. + If the map is empty, the function returns an empty \p guarded_ptr. + + @note Due the concurrent nature of the map, the function extracts nearly maximal key. + It means that the function gets rightmost leaf of the tree and tries to unlink it. + During unlinking, a concurrent thread may insert an item with key great than leftmost item's key. + So, the function returns the item with maximum key at the moment of tree traversing. + + The guarded pointer prevents deallocation of returned item, + see \p cds::gc::guarded_ptr for explanation. + @note Each \p guarded_ptr object uses the GC's guard that can be limited resource. + */ + guarded_ptr extract_max() + { + return guarded_ptr( base_class::extract_max_()); + } + + /// Extracts an item from the tree + /** \anchor cds_nonintrusive_EllenBinTreeMap_extract + The function searches an item with key equal to \p key in the tree, + unlinks it, and returns a guarded pointer to an item found. + If the item is not found the function returns an empty \p guarded_ptr. + + The guarded pointer prevents deallocation of returned item, + see \p cds::gc::guarded_ptr for explanation. + @note Each \p guarded_ptr object uses the GC's guard that can be limited resource. + */ + template + guarded_ptr extract( Q const& key ) + { + return guarded_ptr( base_class::extract_( key )); + } + + /// Extracts an item from the map using \p pred for searching + /** + The function is an analog of \ref cds_nonintrusive_EllenBinTreeMap_extract "extract(Q const&)" + but \p pred is used for key compare. + \p Less has the interface like \p std::less. + \p pred must imply the same element order as the comparator used for building the map. + */ + template + guarded_ptr extract_with( Q const& key, Less pred ) + { + CDS_UNUSED( pred ); + return guarded_ptr( base_class::extract_with_( key, + cds::details::predicate_wrapper< leaf_node, Less, typename maker::key_accessor >())); + } + + /// Find the key \p key + /** \anchor cds_nonintrusive_EllenBinTreeMap_find_cfunc + + The function searches the item with key equal to \p key and calls the functor \p f for item found. + The interface of \p Func functor is: + \code + struct functor { + void operator()( value_type& item ); + }; + \endcode + where \p item is the item found. + + The functor may change \p item.second. + + The function returns \p true if \p key is found, \p false otherwise. + */ + template + bool find( K const& key, Func f ) + { + return base_class::find( key, [&f](leaf_node& item, K const& ) { f( item.m_Value );}); + } + + /// Finds the key \p val using \p pred predicate for searching + /** + The function is an analog of \ref cds_nonintrusive_EllenBinTreeMap_find_cfunc "find(K const&, Func)" + but \p pred is used for key comparing. + \p Less functor has the interface like \p std::less. + \p Less must imply the same element order as the comparator used for building the map. + */ + template + bool find_with( K const& key, Less pred, Func f ) + { + CDS_UNUSED( pred ); + return base_class::find_with( key, cds::details::predicate_wrapper< leaf_node, Less, typename maker::key_accessor >(), + [&f](leaf_node& item, K const& ) { f( item.m_Value );}); + } + + /// Checks whether the map contains \p key + /** + The function searches the item with key equal to \p key + and returns \p true if it is found, and \p false otherwise. + */ + template + bool contains( K const& key ) + { + return base_class::contains( key ); + } + //@cond + template + CDS_DEPRECATED("deprecated, use contains()") + bool find( K const& key ) + { + return contains( key ); + } + //@endcond + + /// Checks whether the map contains \p key using \p pred predicate for searching + /** + The function is similar to contains( key ) but \p pred is used for key comparing. + \p Less functor has the interface like \p std::less. + \p Less must imply the same element order as the comparator used for building the set. + */ + template + bool contains( K const& key, Less pred ) + { + CDS_UNUSED( pred ); + return base_class::contains( key, cds::details::predicate_wrapper< leaf_node, Less, typename maker::key_accessor >()); + } + //@cond + template + CDS_DEPRECATED("deprecated, use contains()") + bool find_with( K const& key, Less pred ) + { + return contains( key, pred ); + } + //@endcond + + /// Finds \p key and returns the item found + /** @anchor cds_nonintrusive_EllenBinTreeMap_get + The function searches the item with key equal to \p key and returns the item found as a guarded pointer. + If \p key is not foudn the function returns an empty \p guarded_ptr. + + The guarded pointer prevents deallocation of returned item, + see \p cds::gc::guarded_ptr for explanation. + @note Each \p guarded_ptr object uses the GC's guard that can be limited resource. + */ + template + guarded_ptr get( Q const& key ) + { + return guarded_ptr( base_class::get_( key )); + } + + /// Finds \p key with predicate \p pred and returns the item found + /** + The function is an analog of \ref cds_nonintrusive_EllenBinTreeMap_get "get(Q const&)" + but \p pred is used for key comparing. + \p Less functor has the interface like \p std::less. + \p pred must imply the same element order as the comparator used for building the map. + */ + template + guarded_ptr get_with( Q const& key, Less pred ) + { + CDS_UNUSED( pred ); + return guarded_ptr( base_class::get_with_( key, + cds::details::predicate_wrapper< leaf_node, Less, typename maker::key_accessor >())); + } + + /// Clears the map (not atomic) + void clear() + { + base_class::clear(); + } + + /// Checks if the map is empty + /** + Emptiness is checked by item counting: if item count is zero then the map is empty. + */ + bool empty() const + { + return base_class::empty(); + } + + /// Returns item count in the set + /** + Only leaf nodes containing user data are counted. + + The value returned depends on item counter type provided by \p Traits template parameter. + If it is \p atomicity::empty_item_counter this function always returns 0. + + The function is not suitable for checking the tree emptiness, use \p empty() + member function for this purpose. + */ + size_t size() const + { + return base_class::size(); + } + + /// Returns const reference to internal statistics + stat const& statistics() const + { + return base_class::statistics(); + } + + /// Checks internal consistency (not atomic, not thread-safe) + /** + The debugging function to check internal consistency of the tree. + */ + bool check_consistency() const + { + return base_class::check_consistency(); + } + + }; +}} // namespace cds::container + +#endif //#ifndef CDSLIB_CONTAINER_IMPL_ELLEN_BINTREE_MAP_H diff --git a/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/container/impl/ellen_bintree_set.h b/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/container/impl/ellen_bintree_set.h new file mode 100644 index 0000000..5f93f46 --- /dev/null +++ b/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/container/impl/ellen_bintree_set.h @@ -0,0 +1,629 @@ +/* + This file is a part of libcds - Concurrent Data Structures library + + (C) Copyright Maxim Khizhinsky (libcds.dev@gmail.com) 2006-2017 + + Source code repo: http://github.com/khizmax/libcds/ + Download: http://sourceforge.net/projects/libcds/files/ + + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. + + * Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ + +#ifndef CDSLIB_CONTAINER_IMPL_ELLEN_BINTREE_SET_H +#define CDSLIB_CONTAINER_IMPL_ELLEN_BINTREE_SET_H + +#include +#include +#include +#include + +namespace cds { namespace container { + + /// Set based on Ellen's et al binary search tree + /** @ingroup cds_nonintrusive_set + @ingroup cds_nonintrusive_tree + @anchor cds_container_EllenBinTreeSet + + Source: + - [2010] F.Ellen, P.Fatourou, E.Ruppert, F.van Breugel "Non-blocking Binary Search Tree" + + %EllenBinTreeSet is an unbalanced leaf-oriented binary search tree that implements the set + abstract data type. Nodes maintains child pointers but not parent pointers. + Every internal node has exactly two children, and all data of type \p T currently in + the tree are stored in the leaves. Internal nodes of the tree are used to direct \p find + operation along the path to the correct leaf. The keys (of \p Key type) stored in internal nodes + may or may not be in the set. \p Key type is a subset of \p T type. + There should be exactly defined a key extracting functor for converting object of type \p T to + object of type \p Key. + + Due to \p extract_min and \p extract_max member functions the \p %EllenBinTreeSet can act as + a priority queue. In this case you should provide unique compound key, for example, + the priority value plus some uniformly distributed random value. + + @warning Recall the tree is unbalanced. The complexity of operations is O(log N) + for uniformly distributed random keys, but in the worst case the complexity is O(N). + + @note In the current implementation we do not use helping technique described in the original paper. + In Hazard Pointer schema helping is too complicated and does not give any observable benefits. + Instead of helping, when a thread encounters a concurrent operation it just spins waiting for + the operation done. Such solution allows greatly simplify the implementation of tree. + + Template arguments : + - \p GC - safe memory reclamation (i.e. light-weight garbage collector) type, like \p cds::gc::HP, cds::gc::DHP + - \p Key - key type, a subset of \p T + - \p T - type to be stored in tree's leaf nodes. + - \p Traits - set traits, default is \p ellen_bintree::traits + It is possible to declare option-based tree with \p ellen_bintree::make_set_traits metafunction + instead of \p Traits template argument. + + @note Do not include header file directly. + There are header file for each GC type: + - - for \p cds::gc::HP + - - for \p cds::gc::DHP + - - for RCU GC + (see \ref cds_container_EllenBinTreeSet_rcu "RCU-based EllenBinTreeSet") + + @anchor cds_container_EllenBinTreeSet_less + Predicate requirements + + \p Traits::less, \p Traits::compare and other predicates using with member fuctions should accept at least parameters + of type \p T and \p Key in any combination. + For example, for \p Foo struct with \p std::string key field the appropiate \p less functor is: + \code + struct Foo + { + std::string m_strKey; + ... + }; + + struct less { + bool operator()( Foo const& v1, Foo const& v2 ) const + { return v1.m_strKey < v2.m_strKey ; } + + bool operator()( Foo const& v, std::string const& s ) const + { return v.m_strKey < s ; } + + bool operator()( std::string const& s, Foo const& v ) const + { return s < v.m_strKey ; } + + // Support comparing std::string and char const * + bool operator()( std::string const& s, char const * p ) const + { return s.compare(p) < 0 ; } + + bool operator()( Foo const& v, char const * p ) const + { return v.m_strKey.compare(p) < 0 ; } + + bool operator()( char const * p, std::string const& s ) const + { return s.compare(p) > 0; } + + bool operator()( char const * p, Foo const& v ) const + { return v.m_strKey.compare(p) > 0; } + }; + \endcode + */ + template < + class GC, + typename Key, + typename T, +#ifdef CDS_DOXYGEN_INVOKED + class Traits = ellen_bintree::traits +#else + class Traits +#endif + > + class EllenBinTreeSet +#ifdef CDS_DOXYGEN_INVOKED + : public cds::intrusive::EllenBinTree< GC, Key, T, Traits > +#else + : public ellen_bintree::details::make_ellen_bintree_set< GC, Key, T, Traits >::type +#endif + { + //@cond + typedef ellen_bintree::details::make_ellen_bintree_set< GC, Key, T, Traits > maker; + typedef typename maker::type base_class; + //@endcond + + public: + typedef GC gc; ///< Garbage collector + typedef Key key_type; ///< type of a key to be stored in internal nodes; key is a part of \p value_type + typedef T value_type; ///< type of value to be stored in the binary tree + typedef Traits traits; ///< Traits template parameter + +# ifdef CDS_DOXYGEN_INVOKED + typedef implementation_defined key_comparator ; ///< key compare functor based on opt::compare and opt::less option setter. +# else + typedef typename maker::intrusive_traits::compare key_comparator; +# endif + typedef typename base_class::item_counter item_counter; ///< Item counting policy used + typedef typename base_class::memory_model memory_model; ///< Memory ordering. See cds::opt::memory_model option + typedef typename base_class::stat stat; ///< internal statistics type + typedef typename traits::key_extractor key_extractor; ///< key extracting functor + typedef typename traits::back_off back_off; ///< Back-off strategy + + typedef typename traits::allocator allocator_type; ///< Allocator for leaf nodes + typedef typename base_class::node_allocator node_allocator; ///< Internal node allocator + typedef typename base_class::update_desc_allocator update_desc_allocator; ///< Update descriptor allocator + + protected: + //@cond + typedef typename maker::cxx_leaf_node_allocator cxx_leaf_node_allocator; + typedef typename base_class::value_type leaf_node; + typedef typename base_class::internal_node internal_node; + + typedef std::unique_ptr< leaf_node, typename maker::leaf_deallocator > scoped_node_ptr; + //@endcond + + public: + /// Guarded pointer + typedef typename gc::template guarded_ptr< leaf_node, value_type, details::guarded_ptr_cast_set > guarded_ptr; + + public: + /// Default constructor + EllenBinTreeSet() + : base_class() + {} + + /// Clears the set + ~EllenBinTreeSet() + {} + + /// Inserts new node + /** + The function creates a node with copy of \p val value + and then inserts the node created into the set. + + The type \p Q should contain at least the complete key for the node. + The object of \ref value_type should be constructible from a value of type \p Q. + In trivial case, \p Q is equal to \ref value_type. + + Returns \p true if \p val is inserted into the set, \p false otherwise. + */ + template + bool insert( Q const& val ) + { + scoped_node_ptr sp( cxx_leaf_node_allocator().New( val )); + if ( base_class::insert( *sp.get())) { + sp.release(); + return true; + } + return false; + } + + /// Inserts new node + /** + The function allows to split creating of new item into two part: + - create item with key only + - insert new item into the set + - if inserting is success, calls \p f functor to initialize value-fields of \p val. + + The functor signature is: + \code + void func( value_type& val ); + \endcode + where \p val is the item inserted. User-defined functor \p f should guarantee that during changing + \p val no any other changes could be made on this set's item by concurrent threads. + The user-defined functor is called only if the inserting is success. + */ + template + bool insert( Q const& val, Func f ) + { + scoped_node_ptr sp( cxx_leaf_node_allocator().New( val )); + if ( base_class::insert( *sp.get(), [&f]( leaf_node& v ) { f( v.m_Value ); } )) { + sp.release(); + return true; + } + return false; + } + + /// Updates the node + /** + The operation performs inserting or changing data with lock-free manner. + + If the item \p val is not found in the set, then \p val is inserted into the set + iff \p bAllowInsert is \p true. + Otherwise, the functor \p func is called with item found. + The functor \p func signature is: + \code + struct my_functor { + void operator()( bool bNew, value_type& item, const Q& val ); + }; + \endcode + with arguments: + with arguments: + - \p bNew - \p true if the item has been inserted, \p false otherwise + - \p item - item of the set + - \p val - argument \p key passed into the \p %update() function + + The functor can change non-key fields of the \p item; however, \p func must guarantee + that during changing no any other modifications could be made on this item by concurrent threads. + + Returns std::pair where \p first is \p true if operation is successful, + i.e. the node has been inserted or updated, + \p second is \p true if new item has been added or \p false if the item with \p key + already exists. + + @warning See \ref cds_intrusive_item_creating "insert item troubleshooting" + */ + template + std::pair update( const Q& val, Func func, bool bAllowInsert = true ) + { + scoped_node_ptr sp( cxx_leaf_node_allocator().New( val )); + std::pair bRes = base_class::update( *sp, + [&func, &val](bool bNew, leaf_node& node, leaf_node&){ func( bNew, node.m_Value, val ); }, + bAllowInsert ); + if ( bRes.first && bRes.second ) + sp.release(); + return bRes; + } + //@cond + template + CDS_DEPRECATED("ensure() is deprecated, use update()") + std::pair ensure( const Q& val, Func func ) + { + return update( val, func, true ); + } + //@endcond + + /// Inserts data of type \p value_type created in-place from \p args + /** + Returns \p true if inserting successful, \p false otherwise. + */ + template + bool emplace( Args&&... args ) + { + scoped_node_ptr sp( cxx_leaf_node_allocator().MoveNew( std::forward(args)... )); + if ( base_class::insert( *sp.get())) { + sp.release(); + return true; + } + return false; + } + + /// Delete \p key from the set + /** \anchor cds_nonintrusive_EllenBinTreeSet_erase_val + + The item comparator should be able to compare the type \p value_type + and the type \p Q. + + Return \p true if key is found and deleted, \p false otherwise + */ + template + bool erase( Q const& key ) + { + return base_class::erase( key ); + } + + /// Deletes the item from the set using \p pred predicate for searching + /** + The function is an analog of \ref cds_nonintrusive_EllenBinTreeSet_erase_val "erase(Q const&)" + but \p pred is used for key comparing. + \p Less functor has the interface like \p std::less. + \p Less must imply the same element order as the comparator used for building the set. + */ + template + bool erase_with( Q const& key, Less pred ) + { + CDS_UNUSED( pred ); + return base_class::erase_with( key, cds::details::predicate_wrapper< leaf_node, Less, typename maker::value_accessor >()); + } + + /// Delete \p key from the set + /** \anchor cds_nonintrusive_EllenBinTreeSet_erase_func + + The function searches an item with key \p key, calls \p f functor + and deletes the item. If \p key is not found, the functor is not called. + + The functor \p Func interface: + \code + struct extractor { + void operator()(value_type const& val); + }; + \endcode + + Since the key of MichaelHashSet's \p value_type is not explicitly specified, + template parameter \p Q defines the key type searching in the list. + The list item comparator should be able to compare the type \p T of list item + and the type \p Q. + + Return \p true if key is found and deleted, \p false otherwise + */ + template + bool erase( Q const& key, Func f ) + { + return base_class::erase( key, [&f]( leaf_node const& node) { f( node.m_Value ); } ); + } + + /// Deletes the item from the set using \p pred predicate for searching + /** + The function is an analog of \ref cds_nonintrusive_EllenBinTreeSet_erase_func "erase(Q const&, Func)" + but \p pred is used for key comparing. + \p Less functor has the interface like \p std::less. + \p Less must imply the same element order as the comparator used for building the set. + */ + template + bool erase_with( Q const& key, Less pred, Func f ) + { + CDS_UNUSED( pred ); + return base_class::erase_with( key, cds::details::predicate_wrapper< leaf_node, Less, typename maker::value_accessor >(), + [&f]( leaf_node const& node) { f( node.m_Value ); } ); + } + + /// Extracts an item with minimal key from the set + /** + If the set is not empty, the function returns a guarded pointer to minimum value. + If the set is empty, the function returns an empty \p guarded_ptr. + + @note Due the concurrent nature of the set, the function extracts nearly minimum key. + It means that the function gets leftmost leaf of the tree and tries to unlink it. + During unlinking, a concurrent thread may insert an item with key less than leftmost item's key. + So, the function returns the item with minimum key at the moment of tree traversing. + + The guarded pointer prevents deallocation of returned item, + see \p cds::gc::guarded_ptr for explanation. + @note Each \p guarded_ptr object uses the GC's guard that can be limited resource. + */ + guarded_ptr extract_min() + { + return guarded_ptr( base_class::extract_min_()); + } + + /// Extracts an item with maximal key from the set + /** + If the set is not empty, the function returns a guarded pointer to maximal value. + If the set is empty, the function returns an empty \p guarded_ptr. + + @note Due the concurrent nature of the set, the function extracts nearly maximal key. + It means that the function gets rightmost leaf of the tree and tries to unlink it. + During unlinking, a concurrent thread may insert an item with key great than leftmost item's key. + So, the function returns the item with maximum key at the moment of tree traversing. + + The guarded pointer prevents deallocation of returned item, + see \p cds::gc::guarded_ptr for explanation. + @note Each \p guarded_ptr object uses the GC's guard that can be limited resource. + */ + guarded_ptr extract_max() + { + return guarded_ptr( base_class::extract_max_()); + } + + /// Extracts an item from the tree + /** \anchor cds_nonintrusive_EllenBinTreeSet_extract + The function searches an item with key equal to \p key in the tree, + unlinks it, and returns an guarded pointer to it. + If the item is not found the function returns an empty \p guarded_ptr. + + The guarded pointer prevents deallocation of returned item, + see \p cds::gc::guarded_ptr for explanation. + @note Each \p guarded_ptr object uses the GC's guard that can be limited resource. + */ + template + guarded_ptr extract( Q const& key ) + { + return base_class::extract_( key ); + } + + /// Extracts an item from the set using \p pred for searching + /** + The function is an analog of \ref cds_nonintrusive_EllenBinTreeSet_extract "extract(Q const&)" + but \p pred is used for key compare. + \p Less has the interface like \p std::less. + \p pred must imply the same element order as the comparator used for building the set. + */ + template + guarded_ptr extract_with( Q const& key, Less pred ) + { + CDS_UNUSED( pred ); + return base_class::extract_with_( key, + cds::details::predicate_wrapper< leaf_node, Less, typename maker::value_accessor >()); + } + + /// Find the key \p key + /** + @anchor cds_nonintrusive_EllenBinTreeSet_find_func + + The function searches the item with key equal to \p key and calls the functor \p f for item found. + The interface of \p Func functor is: + \code + struct functor { + void operator()( value_type& item, Q& key ); + }; + \endcode + where \p item is the item found, \p key is the find function argument. + + The functor may change non-key fields of \p item. Note that the functor is only guarantee + that \p item cannot be disposed during functor is executing. + The functor does not serialize simultaneous access to the set's \p item. If such access is + possible you must provide your own synchronization schema on item level to exclude unsafe item modifications. + + The \p key argument is non-const since it can be used as \p f functor destination i.e., the functor + can modify both arguments. + + Note the hash functor specified for class \p Traits template parameter + should accept a parameter of type \p Q that may be not the same as \p value_type. + + The function returns \p true if \p key is found, \p false otherwise. + */ + template + bool find( Q& key, Func f ) + { + return base_class::find( key, [&f]( leaf_node& node, Q& v ) { f( node.m_Value, v ); }); + } + //@cond + template + bool find( Q const& key, Func f ) + { + return base_class::find( key, [&f]( leaf_node& node, Q const& v ) { f( node.m_Value, v ); } ); + } + //@endcond + + /// Finds the key \p key using \p pred predicate for searching + /** + The function is an analog of \ref cds_nonintrusive_EllenBinTreeSet_find_func "find(Q&, Func)" + but \p pred is used for key comparing. + \p Less functor has the interface like \p std::less. + \p Less must imply the same element order as the comparator used for building the set. + */ + template + bool find_with( Q& key, Less pred, Func f ) + { + CDS_UNUSED( pred ); + return base_class::find_with( key, cds::details::predicate_wrapper< leaf_node, Less, typename maker::value_accessor >(), + [&f]( leaf_node& node, Q& v ) { f( node.m_Value, v ); } ); + } + //@cond + template + bool find_with( Q const& key, Less pred, Func f ) + { + CDS_UNUSED( pred ); + return base_class::find_with( key, cds::details::predicate_wrapper< leaf_node, Less, typename maker::value_accessor >(), + [&f]( leaf_node& node, Q const& v ) { f( node.m_Value, v ); } ); + } + //@endcond + + /// Checks whether the set contains \p key + /** + The function searches the item with key equal to \p key + and returns \p true if it is found, and \p false otherwise. + */ + template + bool contains( Q const & key ) + { + return base_class::contains( key ); + } + //@cond + template + CDS_DEPRECATED("deprecated, use contains()") + bool find( Q const & key ) + { + return contains( key ); + } + //@endcond + + /// Checks whether the set contains \p key using \p pred predicate for searching + /** + The function is similar to contains( key ) but \p pred is used for key comparing. + \p Less functor has the interface like \p std::less. + \p Less must imply the same element order as the comparator used for building the set. + */ + template + bool contains( Q const& key, Less pred ) + { + CDS_UNUSED( pred ); + return base_class::contains( key, cds::details::predicate_wrapper< leaf_node, Less, typename maker::value_accessor >()); + } + //@cond + template + CDS_DEPRECATED("deprecated, use contains()") + bool find_with( Q const& key, Less pred ) + { + return contains( key, pred ); + } + //@endcond + + /// Finds \p key and returns the item found + /** @anchor cds_nonintrusive_EllenBinTreeSet_get + The function searches the item with key equal to \p key and returns the item found as an guarded pointer. + The function returns \p true if \p key is found, \p false otherwise. + + The guarded pointer prevents deallocation of returned item, + see \p cds::gc::guarded_ptr for explanation. + @note Each \p guarded_ptr object uses the GC's guard that can be limited resource. + */ + template + guarded_ptr get( Q const& key ) + { + return base_class::get_( key ); + } + + /// Finds \p key with predicate \p pred and returns the item found + /** + The function is an analog of \ref cds_nonintrusive_EllenBinTreeSet_get "get(Q const&)" + but \p pred is used for key comparing. + \p Less functor has the interface like \p std::less. + \p pred must imply the same element order as the comparator used for building the set. + */ + template + guarded_ptr get_with( Q const& key, Less pred ) + { + CDS_UNUSED(pred); + return base_class::get_with_( key, + cds::details::predicate_wrapper< leaf_node, Less, typename maker::value_accessor >()); + } + + /// Clears the set (not atomic) + /** + The function unlink all items from the tree. + The function is not atomic, thus, in multi-threaded environment with parallel insertions + this sequence + \code + set.clear(); + assert( set.empty()); + \endcode + the assertion could be raised. + + For each leaf the \ref disposer will be called after unlinking. + */ + void clear() + { + base_class::clear(); + } + + /// Checks if the set is empty + bool empty() const + { + return base_class::empty(); + } + + /// Returns item count in the set + /** + Only leaf nodes containing user data are counted. + + The value returned depends on item counter type provided by \p Traits template parameter. + If it is \p atomicity::empty_item_counter this function always returns 0. + + The function is not suitable for checking the tree emptiness, use \p empty() + member function for this purpose. + */ + size_t size() const + { + return base_class::size(); + } + + /// Returns const reference to internal statistics + stat const& statistics() const + { + return base_class::statistics(); + } + + /// Checks internal consistency (not atomic, not thread-safe) + /** + The debugging function to check internal consistency of the tree. + */ + bool check_consistency() const + { + return base_class::check_consistency(); + } + }; + +}} // namespace cds::container + +#endif // #ifndef CDSLIB_CONTAINER_IMPL_ELLEN_BINTREE_SET_H diff --git a/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/container/impl/feldman_hashmap.h b/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/container/impl/feldman_hashmap.h new file mode 100644 index 0000000..c521528 --- /dev/null +++ b/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/container/impl/feldman_hashmap.h @@ -0,0 +1,847 @@ +/* + This file is a part of libcds - Concurrent Data Structures library + + (C) Copyright Maxim Khizhinsky (libcds.dev@gmail.com) 2006-2017 + + Source code repo: http://github.com/khizmax/libcds/ + Download: http://sourceforge.net/projects/libcds/files/ + + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. + + * Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ + +#ifndef CDSLIB_CONTAINER_IMPL_FELDMAN_HASHMAP_H +#define CDSLIB_CONTAINER_IMPL_FELDMAN_HASHMAP_H + +#include +#include +#include + +namespace cds { namespace container { + + /// Hash map based on multi-level array + /** @ingroup cds_nonintrusive_map + @anchor cds_container_FeldmanHashMap_hp + + Source: + - [2013] Steven Feldman, Pierre LaBorde, Damian Dechev "Concurrent Multi-level Arrays: + Wait-free Extensible Hash Maps" + + [From the paper] The hardest problem encountered while developing a parallel hash map is how to perform + a global resize, the process of redistributing the elements in a hash map that occurs when adding new + buckets. The negative impact of blocking synchronization is multiplied during a global resize, because all + threads will be forced to wait on the thread that is performing the involved process of resizing the hash map + and redistributing the elements. \p %FeldmanHashSet implementation avoids global resizes through new array + allocation. By allowing concurrent expansion this structure is free from the overhead of an explicit resize, + which facilitates concurrent operations. + + The presented design includes dynamic hashing, the use of sub-arrays within the hash map data structure; + which, in combination with perfect hashing, means that each element has a unique final, as well as current, position. + It is important to note that the perfect hash function required by our hash map is trivial to realize as + any hash function that permutes the bits of the key is suitable. This is possible because of our approach + to the hash function; we require that it produces hash values that are equal in size to that of the key. + We know that if we expand the hash map a fixed number of times there can be no collision as duplicate keys + are not provided for in the standard semantics of a hash map. + + \p %FeldmanHashMap is a multi-level array which has an internal structure similar to a tree: + @image html feldman_hashset.png + The multi-level array differs from a tree in that each position on the tree could hold an array of nodes or a single node. + A position that holds a single node is a \p dataNode which holds the hash value of a key and the value that is associated + with that key; it is a simple struct holding two variables. A \p dataNode in the multi-level array could be marked. + A \p markedDataNode refers to a pointer to a \p dataNode that has been bitmarked at the least significant bit (LSB) + of the pointer to the node. This signifies that this \p dataNode is contended. An expansion must occur at this node; + any thread that sees this \p markedDataNode will try to replace it with an \p arrayNode; which is a position that holds + an array of nodes. The pointer to an \p arrayNode is differentiated from that of a pointer to a \p dataNode by a bitmark + on the second-least significant bit. + + \p %FeldmanHashMap multi-level array is similar to a tree in that we keep a pointer to the root, which is a memory array + called \p head. The length of the \p head memory array is unique, whereas every other \p arrayNode has a uniform length; + a normal \p arrayNode has a fixed power-of-two length equal to the binary logarithm of a variable called \p arrayLength. + The maximum depth of the tree, \p maxDepth, is the maximum number of pointers that must be followed to reach any node. + We define \p currentDepth as the number of memory arrays that we need to traverse to reach the \p arrayNode on which + we need to operate; this is initially one, because of \p head. + + That approach to the structure of the hash map uses an extensible hashing scheme; the hash value is treated as a bit + string and rehash incrementally. + + @note Two important things you should keep in mind when you're using \p %FeldmanHashMap: + - all keys is converted to fixed-size bit-string by hash functor provided. + You can use variable-length keys, for example, \p std::string as a key for \p %FeldmanHashMap, + but real key in the map will be fixed-size hash values of your keys. + For the strings you may use well-known hashing algorithms like SHA1, SHA2, + MurmurHash, CityHash + or its successor FarmHash and so on, which + converts variable-length strings to fixed-length bit-strings, and such hash values will be the keys in \p %FeldmanHashMap. + If your key is fixed-sized the hash functor is optional, see \p feldman_hashmap::traits::hash for explanation and examples. + - \p %FeldmanHashMap uses a perfect hashing. It means that if two different keys, for example, of type \p std::string, + have identical hash then you cannot insert both that keys in the map. \p %FeldmanHashMap does not maintain the key, + it maintains its fixed-size hash value. + + The map supports @ref cds_container_FeldmanHashMap_iterators "bidirectional thread-safe iterators". + + Template parameters: + - \p GC - safe memory reclamation schema. Can be \p gc::HP, \p gc::DHP or one of \ref cds_urcu_type "RCU type" + - \p Key - a key type to be stored in the map + - \p T - a value type to be stored in the map + - \p Traits - type traits, the structure based on \p feldman_hashmap::traits or result of \p feldman_hashmap::make_traits metafunction. + + There are several specializations of \p %FeldmanHashMap for each \p GC. You should include: + - for \p gc::HP garbage collector + - for \p gc::DHP garbage collector + - for \ref cds_container_FeldmanHashMap_rcu "RCU type". RCU specialization + has a slightly different interface. + */ + template < + class GC + ,typename Key + ,typename T +#ifdef CDS_DOXYGEN_INVOKED + ,class Traits = feldman_hashmap::traits +#else + ,class Traits +#endif + > + class FeldmanHashMap +#ifdef CDS_DOXYGEN_INVOKED + : protected cds::intrusive::FeldmanHashSet< GC, std::pair, Traits > +#else + : protected cds::container::details::make_feldman_hashmap< GC, Key, T, Traits >::type +#endif + { + //@cond + typedef cds::container::details::make_feldman_hashmap< GC, Key, T, Traits > maker; + typedef typename maker::type base_class; + //@endcond + public: + typedef GC gc; ///< Garbage collector + typedef Key key_type; ///< Key type + typedef T mapped_type; ///< Mapped type + typedef std::pair< key_type const, mapped_type> value_type; ///< Key-value pair to be stored in the map + typedef Traits traits; ///< Map traits +#ifdef CDS_DOXYGEN_INVOKED + typedef typename traits::hash hasher; ///< Hash functor, see \p feldman_hashmap::traits::hash +#else + typedef typename maker::hasher hasher; +#endif + + typedef typename maker::hash_type hash_type; ///< Hash type deduced from \p hasher return type + typedef typename base_class::hash_comparator hash_comparator; ///< hash compare functor based on \p Traits::compare and \p Traits::less + + typedef typename traits::item_counter item_counter; ///< Item counter type + typedef typename traits::allocator allocator; ///< Element allocator + typedef typename traits::node_allocator node_allocator; ///< Array node allocator + typedef typename traits::memory_model memory_model; ///< Memory model + typedef typename traits::back_off back_off; ///< Backoff strategy + typedef typename traits::stat stat; ///< Internal statistics type + + /// Count of hazard pointers required + static constexpr size_t const c_nHazardPtrCount = base_class::c_nHazardPtrCount; + + /// The size of \p hash_type in bytes, see \p feldman_hashmap::traits::hash_size for explanation + static constexpr size_t const c_hash_size = base_class::c_hash_size; + + /// Level statistics + typedef feldman_hashmap::level_statistics level_statistics; + + protected: + //@cond + typedef typename maker::node_type node_type; + typedef typename maker::cxx_node_allocator cxx_node_allocator; + typedef std::unique_ptr< node_type, typename maker::node_disposer > scoped_node_ptr; + + template + class bidirectional_iterator: public base_class::iterator_base + { + friend class FeldmanHashMap; + typedef typename base_class::iterator_base iterator_base; + + protected: + static constexpr bool const c_bConstantIterator = IsConst; + + public: + typedef typename std::conditional< IsConst, value_type const*, value_type*>::type value_ptr; ///< Value pointer + typedef typename std::conditional< IsConst, value_type const&, value_type&>::type value_ref; ///< Value reference + + public: + bidirectional_iterator() noexcept + {} + + bidirectional_iterator( bidirectional_iterator const& rhs ) noexcept + : iterator_base( rhs ) + {} + + bidirectional_iterator& operator=( bidirectional_iterator const& rhs ) noexcept + { + iterator_base::operator=( rhs ); + return *this; + } + + bidirectional_iterator& operator++() + { + iterator_base::operator++(); + return *this; + } + + bidirectional_iterator& operator--() + { + iterator_base::operator--(); + return *this; + } + + value_ptr operator ->() const noexcept + { + node_type * p = iterator_base::pointer(); + return p ? &p->m_Value : nullptr; + } + + value_ref operator *() const noexcept + { + node_type * p = iterator_base::pointer(); + assert( p ); + return p->m_Value; + } + + void release() + { + iterator_base::release(); + } + + template + bool operator ==( bidirectional_iterator const& rhs ) const noexcept + { + return iterator_base::operator==( rhs ); + } + + template + bool operator !=( bidirectional_iterator const& rhs ) const noexcept + { + return !( *this == rhs ); + } + + public: // for internal use only! + bidirectional_iterator( base_class const& set, typename base_class::array_node * pNode, size_t idx, bool ) + : iterator_base( set, pNode, idx, false ) + {} + + bidirectional_iterator( base_class const& set, typename base_class::array_node * pNode, size_t idx ) + : iterator_base( set, pNode, idx ) + {} + }; + + /// Reverse bidirectional iterator + template + class reverse_bidirectional_iterator : public base_class::iterator_base + { + friend class FeldmanHashMap; + typedef typename base_class::iterator_base iterator_base; + + public: + typedef typename std::conditional< IsConst, value_type const*, value_type*>::type value_ptr; ///< Value pointer + typedef typename std::conditional< IsConst, value_type const&, value_type&>::type value_ref; ///< Value reference + + public: + reverse_bidirectional_iterator() noexcept + : iterator_base() + {} + + reverse_bidirectional_iterator( reverse_bidirectional_iterator const& rhs ) noexcept + : iterator_base( rhs ) + {} + + reverse_bidirectional_iterator& operator=( reverse_bidirectional_iterator const& rhs) noexcept + { + iterator_base::operator=( rhs ); + return *this; + } + + reverse_bidirectional_iterator& operator++() + { + iterator_base::operator--(); + return *this; + } + + reverse_bidirectional_iterator& operator--() + { + iterator_base::operator++(); + return *this; + } + + value_ptr operator ->() const noexcept + { + node_type * p = iterator_base::pointer(); + return p ? &p->m_Value : nullptr; + } + + value_ref operator *() const noexcept + { + node_type * p = iterator_base::pointer(); + assert( p ); + return p->m_Value; + } + + void release() + { + iterator_base::release(); + } + + template + bool operator ==( reverse_bidirectional_iterator const& rhs ) const + { + return iterator_base::operator==( rhs ); + } + + template + bool operator !=( reverse_bidirectional_iterator const& rhs ) + { + return !( *this == rhs ); + } + + public: // for internal use only! + reverse_bidirectional_iterator( base_class const& set, typename base_class::array_node * pNode, size_t idx, bool ) + : iterator_base( set, pNode, idx, false ) + {} + + reverse_bidirectional_iterator( base_class const& set, typename base_class::array_node * pNode, size_t idx ) + : iterator_base( set, pNode, idx, false ) + { + iterator_base::backward(); + } + }; + + //@endcond + + public: +#ifdef CDS_DOXYGEN_INVOKED + /// Guarded pointer + typedef typename gc::template guarded_ptr< value_type > guarded_ptr; +#else + typedef typename gc::template guarded_ptr< node_type, value_type, cds::container::details::guarded_ptr_cast_set > guarded_ptr; +#endif + +#ifdef CDS_DOXYGEN_INVOKED + typedef implementation_defined iterator; ///< @ref cds_container_FeldmanHashMap_iterators "bidirectional iterator" type + typedef implementation_defined const_iterator; ///< @ref cds_container_FeldmanHashMap_iterators "bidirectional const iterator" type + typedef implementation_defined reverse_iterator; ///< @ref cds_container_FeldmanHashMap_iterators "bidirectional reverse iterator" type + typedef implementation_defined const_reverse_iterator; ///< @ref cds_container_FeldmanHashMap_iterators "bidirectional reverse const iterator" type +#else + typedef bidirectional_iterator iterator; + typedef bidirectional_iterator const_iterator; + typedef reverse_bidirectional_iterator reverse_iterator; + typedef reverse_bidirectional_iterator const_reverse_iterator; +#endif + + protected: + //@cond + hasher m_Hasher; + //@endcond + + public: + /// Creates empty map + /** + @param head_bits - 2head_bits specifies the size of head array, minimum is 4. + @param array_bits - 2array_bits specifies the size of array node, minimum is 2. + + Equation for \p head_bits and \p array_bits: + \code + c_hash_size * 8 == head_bits + N * array_bits + \endcode + where \p N is multi-level array depth. + */ + FeldmanHashMap( size_t head_bits = 8, size_t array_bits = 4 ) + : base_class( head_bits, array_bits ) + {} + + /// Destructs the map and frees all data + ~FeldmanHashMap() + {} + + /// Inserts new element with key and default value + /** + The function creates an element with \p key and default value, and then inserts the node created into the map. + + Preconditions: + - The \p key_type should be constructible from a value of type \p K. + In trivial case, \p K is equal to \p key_type. + - The \p mapped_type should be default-constructible. + + Returns \p true if inserting successful, \p false otherwise. + */ + template + bool insert( K&& key ) + { + scoped_node_ptr sp( cxx_node_allocator().MoveNew( m_Hasher, std::forward( key ))); + if ( base_class::insert( *sp )) { + sp.release(); + return true; + } + return false; + } + + /// Inserts new element + /** + The function creates a node with copy of \p val value + and then inserts the node created into the map. + + Preconditions: + - The \p key_type should be constructible from \p key of type \p K. + - The \p value_type should be constructible from \p val of type \p V. + + Returns \p true if \p val is inserted into the map, \p false otherwise. + */ + template + bool insert( K&& key, V&& val ) + { + scoped_node_ptr sp( cxx_node_allocator().MoveNew( m_Hasher, std::forward( key ), std::forward( val ))); + if ( base_class::insert( *sp )) { + sp.release(); + return true; + } + return false; + } + + /// Inserts new element and initialize it by a functor + /** + This function inserts new element with key \p key and if inserting is successful then it calls + \p func functor with signature + \code + struct functor { + void operator()( value_type& item ); + }; + \endcode + + The argument \p item of user-defined functor \p func is the reference + to the map's item inserted: + - item.first is a const reference to item's key that cannot be changed. + - item.second is a reference to item's value that may be changed. + + \p key_type should be constructible from value of type \p K. + + The function allows to split creating of new item into two part: + - create item from \p key; + - insert new item into the map; + - if inserting is successful, initialize the value of item by calling \p func functor + + This can be useful if complete initialization of object of \p value_type is heavyweight and + it is preferable that the initialization should be completed only if inserting is successful. + */ + template + bool insert_with( K&& key, Func func ) + { + scoped_node_ptr sp( cxx_node_allocator().MoveNew( m_Hasher, std::forward( key ))); + if ( base_class::insert( *sp, [&func]( node_type& item ) { func( item.m_Value ); } )) { + sp.release(); + return true; + } + return false; + } + + /// For key \p key inserts data of type \p value_type created in-place from std::forward(args)... + /** + Returns \p true if inserting successful, \p false otherwise. + */ + template + bool emplace( K&& key, Args&&... args ) + { + scoped_node_ptr sp( cxx_node_allocator().MoveNew( m_Hasher, std::forward( key ), std::forward( args )... )); + if ( base_class::insert( *sp )) { + sp.release(); + return true; + } + return false; + } + + /// Updates data by \p key + /** + The operation performs inserting or replacing the element with lock-free manner. + + If the \p key not found in the map, then the new item created from \p key + will be inserted into the map iff \p bInsert is \p true + (note that in this case the \ref key_type should be constructible from type \p K). + Otherwise, if \p key is found, it is replaced with a new item created from + \p key. + The functor \p Func signature: + \code + struct my_functor { + void operator()( value_type& item, value_type * old ); + }; + \endcode + where: + - \p item - item of the map + - \p old - old item of the map, if \p nullptr - the new item was inserted + + The functor may change any fields of the \p item.second. + + Returns std::pair where \p first is \p true if operation is successful, + \p second is \p true if new item has been added or \p false if \p key already exists. + + @warning See \ref cds_intrusive_item_creating "insert item troubleshooting" + */ + template + std::pair update( K&& key, Func func, bool bInsert = true ) + { + scoped_node_ptr sp( cxx_node_allocator().MoveNew( m_Hasher, std::forward( key ))); + std::pair result = base_class::do_update( *sp, + [&func]( node_type& node, node_type * old ) { func( node.m_Value, old ? &old->m_Value : nullptr );}, + bInsert ); + if ( result.first ) + sp.release(); + return result; + } + + /// Delete \p key from the map + /** + \p key_type must be constructible from value of type \p K. + The function deletes the element with hash value equal to hash( key_type( key )) + + Return \p true if \p key is found and deleted, \p false otherwise. + */ + template + bool erase( K const& key ) + { + return base_class::erase( m_Hasher( key_type( key ))); + } + + /// Delete \p key from the map + /** + The function searches an item with hash value equal to hash( key_type( key )), + calls \p f functor and deletes the item. If \p key is not found, the functor is not called. + + The functor \p Func interface: + \code + struct extractor { + void operator()( value_type& item ) { ... } + }; + \endcode + where \p item is the element found. + + \p key_type must be constructible from value of type \p K. + + Return \p true if key is found and deleted, \p false otherwise + */ + template + bool erase( K const& key, Func f ) + { + return base_class::erase( m_Hasher( key_type( key )), [&f]( node_type& node) { f( node.m_Value ); } ); + } + + /// Deletes the element pointed by iterator \p iter + /** + Returns \p true if the operation is successful, \p false otherwise. + + The function does not invalidate the iterator, it remains valid and can be used for further traversing. + */ + bool erase_at( iterator const& iter ) + { + return base_class::do_erase_at( iter ); + } + //@cond + bool erase_at( reverse_iterator const& iter ) + { + return base_class::do_erase_at( iter ); + } + bool erase_at( const_iterator const& iter ) + { + return base_class::do_erase_at( iter ); + } + bool erase_at( const_reverse_iterator const& iter ) + { + return base_class::do_erase_at( iter ); + } + //@endcond + + /// Extracts the item from the map with specified \p key + /** + The function searches an item with key equal to hash( key_type( key )) in the map, + unlinks it from the map, and returns a guarded pointer to the item found. + If \p key is not found the function returns an empty guarded pointer. + + The item extracted is freed automatically by garbage collector \p GC + when returned \p guarded_ptr object will be destroyed or released. + @note Each \p guarded_ptr object uses the GC's guard that can be limited resource. + + Usage: + \code + typedef cds::container::FeldmanHashMap< cds::gc::HP, int, foo, my_traits > map_type; + map_type theMap; + // ... + { + map_type::guarded_ptr gp( theMap.extract( 5 )); + if ( gp ) { + // Deal with gp + // ... + } + // Destructor of gp releases internal HP guard and frees the pointer + } + \endcode + */ + template + guarded_ptr extract( K const& key ) + { + return base_class::extract( m_Hasher( key_type( key ))); + } + + /// Checks whether the map contains \p key + /** + The function searches the item by its hash that is equal to hash( key_type( key )) + and returns \p true if it is found, or \p false otherwise. + */ + template + bool contains( K const& key ) + { + return base_class::contains( m_Hasher( key_type( key ))); + } + + /// Find the key \p key + /** + + The function searches the item by its hash that is equal to hash( key_type( key )) + and calls the functor \p f for item found. + The interface of \p Func functor is: + \code + struct functor { + void operator()( value_type& item ); + }; + \endcode + where \p item is the item found. + + The functor may change \p item.second. + + The function returns \p true if \p key is found, \p false otherwise. + */ + template + bool find( K const& key, Func f ) + { + return base_class::find( m_Hasher( key_type( key )), [&f]( node_type& node ) { f( node.m_Value );}); + } + + /// Finds the key \p key and return the item found + /** + The function searches the item with a hash equal to hash( key_type( key )) + and returns a guarded pointer to the item found. + If \p key is not found the function returns an empty guarded pointer. + + It is safe when a concurrent thread erases the item returned as \p guarded_ptr. + In this case the item will be freed later by garbage collector \p GC automatically + when \p guarded_ptr object will be destroyed or released. + @note Each \p guarded_ptr object uses one GC's guard which can be limited resource. + + Usage: + \code + typedef cds::container::FeldmanHashMap< cds::gc::HP, int, foo, my_traits > map_type; + map_type theMap; + // ... + { + map_type::guarded_ptr gp( theMap.get( 5 )); + if ( gp ) { + // Deal with gp + //... + } + // Destructor of guarded_ptr releases internal HP guard + } + \endcode + */ + template + guarded_ptr get( K const& key ) + { + return base_class::get( m_Hasher( key_type( key ))); + } + + /// Clears the map (non-atomic) + /** + The function unlink all data node from the map. + The function is not atomic but is thread-safe. + After \p %clear() the map may not be empty because another threads may insert items. + */ + void clear() + { + base_class::clear(); + } + + /// Checks if the map is empty + /** + Emptiness is checked by item counting: if item count is zero then the map is empty. + Thus, the correct item counting feature is an important part of the map implementation. + */ + bool empty() const + { + return base_class::empty(); + } + + /// Returns item count in the map + size_t size() const + { + return base_class::size(); + } + + /// Returns const reference to internal statistics + stat const& statistics() const + { + return base_class::statistics(); + } + + /// Returns the size of head node + size_t head_size() const + { + return base_class::head_size(); + } + + /// Returns the size of the array node + size_t array_node_size() const + { + return base_class::array_node_size(); + } + + /// Collects tree level statistics into \p stat + /** + The function traverses the set and collects statistics for each level of the tree + into \p feldman_hashset::level_statistics struct. The element of \p stat[i] + represents statistics for level \p i, level 0 is head array. + The function is thread-safe and may be called in multi-threaded environment. + + Result can be useful for estimating efficiency of hash functor you use. + */ + void get_level_statistics( std::vector< feldman_hashmap::level_statistics>& stat) const + { + base_class::get_level_statistics( stat ); + } + + public: + ///@name Thread-safe iterators + /** @anchor cds_container_FeldmanHashMap_iterators + The map supports thread-safe iterators: you may iterate over the map in multi-threaded environment. + It is guaranteed that the iterators will remain valid even if another thread deletes the node the iterator points to: + Hazard Pointer embedded into the iterator object protects the node from physical reclamation. + + @note Since the iterator object contains hazard pointer that is a thread-local resource, + the iterator should not be passed to another thread. + + Each iterator object supports the common interface: + - dereference operators: + @code + value_type [const] * operator ->() noexcept + value_type [const] & operator *() noexcept + @endcode + - pre-increment and pre-decrement. Post-operators is not supported + - equality operators == and !=. + Iterators are equal iff they point to the same cell of the same array node. + Note that for two iterators \p it1 and \p it2, the conditon it1 == it2 + does not entail &(*it1) == &(*it2) + - helper member function \p release() that clears internal hazard pointer. + After \p release() the iterator points to \p nullptr but it still remain valid: further iterating is possible. + + During iteration you may safely erase any item from the set; + @ref erase_at() function call doesn't invalidate any iterator. + If some iterator points to the item to be erased, that item is not deleted immediately + but only after that iterator will be advanced forward or backward. + + @note It is possible the item can be iterated more that once, for example, if an iterator points to the item + in array node that is being splitted. + */ + ///@{ + /// Returns an iterator to the beginning of the map + iterator begin() + { + return base_class::template init_begin(); + } + + /// Returns an const iterator to the beginning of the map + const_iterator begin() const + { + return base_class::template init_begin(); + } + + /// Returns an const iterator to the beginning of the map + const_iterator cbegin() + { + return base_class::template init_begin(); + } + + /// Returns an iterator to the element following the last element of the map. This element acts as a placeholder; attempting to access it results in undefined behavior. + iterator end() + { + return base_class::template init_end(); + } + + /// Returns a const iterator to the element following the last element of the map. This element acts as a placeholder; attempting to access it results in undefined behavior. + const_iterator end() const + { + return base_class::template init_end(); + } + + /// Returns a const iterator to the element following the last element of the map. This element acts as a placeholder; attempting to access it results in undefined behavior. + const_iterator cend() + { + return base_class::template init_end(); + } + + /// Returns a reverse iterator to the first element of the reversed map + reverse_iterator rbegin() + { + return base_class::template init_rbegin(); + } + + /// Returns a const reverse iterator to the first element of the reversed map + const_reverse_iterator rbegin() const + { + return base_class::template init_rbegin(); + } + + /// Returns a const reverse iterator to the first element of the reversed map + const_reverse_iterator crbegin() + { + return base_class::template init_rbegin(); + } + + /// Returns a reverse iterator to the element following the last element of the reversed map + /** + It corresponds to the element preceding the first element of the non-reversed container. + This element acts as a placeholder, attempting to access it results in undefined behavior. + */ + reverse_iterator rend() + { + return base_class::template init_rend(); + } + + /// Returns a const reverse iterator to the element following the last element of the reversed map + /** + It corresponds to the element preceding the first element of the non-reversed container. + This element acts as a placeholder, attempting to access it results in undefined behavior. + */ + const_reverse_iterator rend() const + { + return base_class::template init_rend(); + } + + /// Returns a const reverse iterator to the element following the last element of the reversed map + /** + It corresponds to the element preceding the first element of the non-reversed container. + This element acts as a placeholder, attempting to access it results in undefined behavior. + */ + const_reverse_iterator crend() + { + return base_class::template init_rend(); + } + ///@} + }; + +}} // namespace cds::container + +#endif // #ifndef CDSLIB_CONTAINER_IMPL_FELDMAN_HASHMAP_H diff --git a/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/container/impl/feldman_hashset.h b/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/container/impl/feldman_hashset.h new file mode 100644 index 0000000..b0c96f9 --- /dev/null +++ b/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/container/impl/feldman_hashset.h @@ -0,0 +1,610 @@ +/* + This file is a part of libcds - Concurrent Data Structures library + + (C) Copyright Maxim Khizhinsky (libcds.dev@gmail.com) 2006-2017 + + Source code repo: http://github.com/khizmax/libcds/ + Download: http://sourceforge.net/projects/libcds/files/ + + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. + + * Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ + +#ifndef CDSLIB_CONTAINER_IMPL_FELDMAN_HASHSET_H +#define CDSLIB_CONTAINER_IMPL_FELDMAN_HASHSET_H + +#include +#include + +namespace cds { namespace container { + + /// Hash set based on multi-level array + /** @ingroup cds_nonintrusive_set + @anchor cds_container_FeldmanHashSet_hp + + Source: + - [2013] Steven Feldman, Pierre LaBorde, Damian Dechev "Concurrent Multi-level Arrays: + Wait-free Extensible Hash Maps" + + [From the paper] The hardest problem encountered while developing a parallel hash map is how to perform + a global resize, the process of redistributing the elements in a hash map that occurs when adding new + buckets. The negative impact of blocking synchronization is multiplied during a global resize, because all + threads will be forced to wait on the thread that is performing the involved process of resizing the hash map + and redistributing the elements. \p %FeldmanHashSet implementation avoids global resizes through new array + allocation. By allowing concurrent expansion this structure is free from the overhead of an explicit resize, + which facilitates concurrent operations. + + The presented design includes dynamic hashing, the use of sub-arrays within the hash map data structure; + which, in combination with perfect hashing, means that each element has a unique final, as well as current, position. + It is important to note that the perfect hash function required by our hash map is trivial to realize as + any hash function that permutes the bits of the key is suitable. This is possible because of our approach + to the hash function; we require that it produces hash values that are equal in size to that of the key. + We know that if we expand the hash map a fixed number of times there can be no collision as duplicate keys + are not provided for in the standard semantics of a hash map. + + \p %FeldmanHashSet is a multi-level array which has an internal structure similar to a tree: + @image html feldman_hashset.png + The multi-level array differs from a tree in that each position on the tree could hold an array of nodes or a single node. + A position that holds a single node is a \p dataNode which holds the hash value of a key and the value that is associated + with that key; it is a simple struct holding two variables. A \p dataNode in the multi-level array could be marked. + A \p markedDataNode refers to a pointer to a \p dataNode that has been bitmarked at the least significant bit (LSB) + of the pointer to the node. This signifies that this \p dataNode is contended. An expansion must occur at this node; + any thread that sees this \p markedDataNode will try to replace it with an \p arrayNode; which is a position that holds + an array of nodes. The pointer to an \p arrayNode is differentiated from that of a pointer to a \p dataNode by a bitmark + on the second-least significant bit. + + \p %FeldmanHashSet multi-level array is similar to a tree in that we keep a pointer to the root, which is a memory array + called \p head. The length of the \p head memory array is unique, whereas every other \p arrayNode has a uniform length; + a normal \p arrayNode has a fixed power-of-two length equal to the binary logarithm of a variable called \p arrayLength. + The maximum depth of the tree, \p maxDepth, is the maximum number of pointers that must be followed to reach any node. + We define \p currentDepth as the number of memory arrays that we need to traverse to reach the \p arrayNode on which + we need to operate; this is initially one, because of \p head. + + That approach to the structure of the hash set uses an extensible hashing scheme; the hash value is treated as a bit + string and rehash incrementally. + + @note Two important things you should keep in mind when you're using \p %FeldmanHashSet: + - all keys must be fixed-size. It means that you cannot use \p std::string as a key for \p %FeldmanHashSet. + Instead, for the strings you should use well-known hashing algorithms like SHA1, SHA2, + MurmurHash, CityHash + or its successor FarmHash and so on, which + converts variable-length strings to fixed-length bit-strings, and use that hash as a key in \p %FeldmanHashSet. + - \p %FeldmanHashSet uses a perfect hashing. It means that if two different keys, for example, of type \p std::string, + have identical hash then you cannot insert both that keys in the set. \p %FeldmanHashSet does not maintain the key, + it maintains its fixed-size hash value. + + The set supports @ref cds_container_FeldmanHashSet_iterators "bidirectional thread-safe iterators". + + Template parameters: + - \p GC - safe memory reclamation schema. Can be \p gc::HP, \p gc::DHP or one of \ref cds_urcu_type "RCU type" + - \p T - a value type to be stored in the set + - \p Traits - type traits, the structure based on \p feldman_hashset::traits or result of \p feldman_hashset::make_traits metafunction. + \p Traits is the mandatory argument because it has one mandatory type - an @ref feldman_hashset::traits::hash_accessor "accessor" + to hash value of \p T. The set algorithm does not calculate that hash value. + + There are several specializations of \p %FeldmanHashSet for each \p GC. You should include: + - for \p gc::HP garbage collector + - for \p gc::DHP garbage collector + - for \ref cds_intrusive_FeldmanHashSet_rcu "RCU type". RCU specialization + has a slightly different interface. + */ + template < + class GC + , typename T +#ifdef CDS_DOXYGEN_INVOKED + , class Traits = feldman_hashset::traits +#else + , class Traits +#endif + > + class FeldmanHashSet +#ifdef CDS_DOXYGEN_INVOKED + : protected cds::intrusive::FeldmanHashSet< GC, T, Traits > +#else + : protected cds::container::details::make_feldman_hashset< GC, T, Traits >::type +#endif + { + //@cond + typedef cds::container::details::make_feldman_hashset< GC, T, Traits > maker; + typedef typename maker::type base_class; + //@endcond + + public: + typedef GC gc; ///< Garbage collector + typedef T value_type; ///< type of value stored in the set + typedef Traits traits; ///< Traits template parameter, see \p feldman_hashset::traits + + typedef typename base_class::hash_accessor hash_accessor; ///< Hash accessor functor + typedef typename base_class::hash_type hash_type; ///< Hash type deduced from \p hash_accessor return type + typedef typename base_class::hash_comparator hash_comparator; ///< hash compare functor based on \p opt::compare and \p opt::less option setter + + typedef typename traits::item_counter item_counter; ///< Item counter type + typedef typename traits::allocator allocator; ///< Element allocator + typedef typename traits::node_allocator node_allocator; ///< Array node allocator + typedef typename traits::memory_model memory_model; ///< Memory model + typedef typename traits::back_off back_off; ///< Backoff strategy + typedef typename traits::stat stat; ///< Internal statistics type + + typedef typename gc::template guarded_ptr< value_type > guarded_ptr; ///< Guarded pointer + + /// Count of hazard pointers required + static constexpr size_t const c_nHazardPtrCount = base_class::c_nHazardPtrCount; + + /// The size of \p hash_type in bytes, see \p feldman_hashset::traits::hash_size for explanation + static constexpr size_t const c_hash_size = base_class::c_hash_size; + + /// Level statistics + typedef feldman_hashset::level_statistics level_statistics; + + protected: + //@cond + typedef typename maker::cxx_node_allocator cxx_node_allocator; + typedef std::unique_ptr< value_type, typename maker::node_disposer > scoped_node_ptr; + //@endcond + + public: + ///@name Thread-safe iterators + ///@{ + /// Bidirectional iterator + /** @anchor cds_container_FeldmanHashSet_iterators + The set supports thread-safe iterators: you may iterate over the set in multi-threaded environment. + It is guaranteed that the iterators will remain valid even if another thread deletes the node the iterator points to: + Hazard Pointer embedded into the iterator object protects the node from physical reclamation. + + @note Since the iterator object contains hazard pointer that is a thread-local resource, + the iterator should not be passed to another thread. + + Each iterator object supports the following interface: + - dereference operators: + @code + value_type [const] * operator ->() noexcept + value_type [const] & operator *() noexcept + @endcode + - pre-increment and pre-decrement. Post-operators is not supported + - equality operators == and !=. + Iterators are equal iff they point to the same cell of the same array node. + Note that for two iterators \p it1 and \p it2, the conditon it1 == it2 + does not entail &(*it1) == &(*it2) + - helper member function \p release() that clears internal hazard pointer. + After \p release() the iterator points to \p nullptr but it still remain valid: further iterating is possible. + + During iteration you may safely erase any item from the set; + @ref erase_at() function call doesn't invalidate any iterator. + If some iterator points to the item to be erased, that item is not deleted immediately + but only after that iterator will be advanced forward or backward. + + @note It is possible the item can be iterated more that once, for example, if an iterator points to the item + in array node that is being splitted. + */ + typedef typename base_class::iterator iterator; + typedef typename base_class::const_iterator const_iterator; ///< @ref cds_container_FeldmanHashSet_iterators "bidirectional const iterator" type + typedef typename base_class::reverse_iterator reverse_iterator; ///< @ref cds_container_FeldmanHashSet_iterators "bidirectional reverse iterator" type + typedef typename base_class::const_reverse_iterator const_reverse_iterator; ///< @ref cds_container_FeldmanHashSet_iterators "bidirectional reverse const iterator" type + + /// Returns an iterator to the beginning of the set + iterator begin() + { + return base_class::begin(); + } + + /// Returns an const iterator to the beginning of the set + const_iterator begin() const + { + return base_class::begin(); + } + + /// Returns an const iterator to the beginning of the set + const_iterator cbegin() + { + return base_class::cbegin(); + } + + /// Returns an iterator to the element following the last element of the set. This element acts as a placeholder; attempting to access it results in undefined behavior. + iterator end() + { + return base_class::end(); + } + + /// Returns a const iterator to the element following the last element of the set. This element acts as a placeholder; attempting to access it results in undefined behavior. + const_iterator end() const + { + return base_class::end(); + } + + /// Returns a const iterator to the element following the last element of the set. This element acts as a placeholder; attempting to access it results in undefined behavior. + const_iterator cend() + { + return base_class::cend(); + } + + /// Returns a reverse iterator to the first element of the reversed set + reverse_iterator rbegin() + { + return base_class::rbegin(); + } + + /// Returns a const reverse iterator to the first element of the reversed set + const_reverse_iterator rbegin() const + { + return base_class::rbegin(); + } + + /// Returns a const reverse iterator to the first element of the reversed set + const_reverse_iterator crbegin() + { + return base_class::crbegin(); + } + + /// Returns a reverse iterator to the element following the last element of the reversed set + /** + It corresponds to the element preceding the first element of the non-reversed container. + This element acts as a placeholder, attempting to access it results in undefined behavior. + */ + reverse_iterator rend() + { + return base_class::rend(); + } + + /// Returns a const reverse iterator to the element following the last element of the reversed set + /** + It corresponds to the element preceding the first element of the non-reversed container. + This element acts as a placeholder, attempting to access it results in undefined behavior. + */ + const_reverse_iterator rend() const + { + return base_class::rend(); + } + + /// Returns a const reverse iterator to the element following the last element of the reversed set + /** + It corresponds to the element preceding the first element of the non-reversed container. + This element acts as a placeholder, attempting to access it results in undefined behavior. + */ + const_reverse_iterator crend() + { + return base_class::crend(); + } + ///@} + + public: + /// Creates empty set + /** + @param head_bits - 2head_bits specifies the size of head array, minimum is 4. + @param array_bits - 2array_bits specifies the size of array node, minimum is 2. + + Equation for \p head_bits and \p array_bits: + \code + sizeof(hash_type) * 8 == head_bits + N * array_bits + \endcode + where \p N is multi-level array depth. + */ + FeldmanHashSet( size_t head_bits = 8, size_t array_bits = 4 ) + : base_class( head_bits, array_bits ) + {} + + /// Destructs the set and frees all data + ~FeldmanHashSet() + {} + + /// Inserts new element + /** + The function creates an element with copy of \p val value and then inserts it into the set. + + The type \p Q should contain as minimum the complete hash for the element. + The object of \ref value_type should be constructible from a value of type \p Q. + In trivial case, \p Q is equal to \ref value_type. + + Returns \p true if \p val is inserted into the set, \p false otherwise. + */ + template + bool insert( Q const& val ) + { + scoped_node_ptr sp( cxx_node_allocator().New( val )); + if ( base_class::insert( *sp )) { + sp.release(); + return true; + } + return false; + } + + /// Inserts new element + /** + The function allows to split creating of new item into two part: + - create item with key only + - insert new item into the set + - if inserting is success, calls \p f functor to initialize value-fields of \p val. + + The functor signature is: + \code + void func( value_type& val ); + \endcode + where \p val is the item inserted. User-defined functor \p f should guarantee that during changing + \p val no any other changes could be made on this set's item by concurrent threads. + The user-defined functor is called only if the inserting is success. + */ + template + bool insert( Q const& val, Func f ) + { + scoped_node_ptr sp( cxx_node_allocator().New( val )); + if ( base_class::insert( *sp, f )) { + sp.release(); + return true; + } + return false; + } + + /// Updates the element + /** + The operation performs inserting or replacing with lock-free manner. + + If the \p val key not found in the set, then the new item created from \p val + will be inserted into the set iff \p bInsert is \p true. + Otherwise, if \p val is found, it is replaced with new item created from \p val + and previous item is disposed. + In both cases \p func functor is called. + + The functor \p Func signature: + \code + struct my_functor { + void operator()( value_type& cur, value_type * prev ); + }; + \endcode + where: + - \p cur - current element + - \p prev - pointer to previous element with such hash. \p prev is \p nullptr + if \p cur was just inserted. + + The functor may change non-key fields of the \p item; however, \p func must guarantee + that during changing no any other modifications could be made on this item by concurrent threads. + + Returns std::pair where \p first is \p true if operation is successful, + i.e. the item has been inserted or updated, + \p second is \p true if the new item has been added or \p false if the item with key equal to \p val + already exists. + */ + template + std::pair update( Q const& val, Func func, bool bInsert = true ) + { + scoped_node_ptr sp( cxx_node_allocator().New( val )); + std::pair bRes = base_class::do_update( *sp, func, bInsert ); + if ( bRes.first ) + sp.release(); + return bRes; + } + + /// Inserts data of type \p value_type created in-place from std::forward(args)... + /** + Returns \p true if inserting successful, \p false otherwise. + */ + template + bool emplace( Args&&... args ) + { + scoped_node_ptr sp( cxx_node_allocator().MoveNew( std::forward(args)... )); + if ( base_class::insert( *sp )) { + sp.release(); + return true; + } + return false; + } + + /// Deletes the item from the set + /** + The function searches \p hash in the set, + deletes the item found, and returns \p true. + If that item is not found the function returns \p false. + */ + bool erase( hash_type const& hash ) + { + return base_class::erase( hash ); + } + + /// Deletes the item from the set + /** + The function searches \p hash in the set, + call \p f functor with item found, and deltes the element from the set. + + The \p Func interface is + \code + struct functor { + void operator()( value_type& item ); + }; + \endcode + + If \p hash is not found the function returns \p false. + */ + template + bool erase( hash_type const& hash, Func f ) + { + return base_class::erase( hash, f ); + } + + /// Deletes the item pointed by iterator \p iter + /** + Returns \p true if the operation is successful, \p false otherwise. + + The function does not invalidate the iterator, it remains valid and can be used for further traversing. + */ + bool erase_at( iterator const& iter ) + { + return base_class::erase_at( iter ); + } + //@cond + bool erase_at( reverse_iterator const& iter ) + { + return base_class::erase_at( iter ); + } + //@endcond + + /// Extracts the item with specified \p hash + /** + The function searches \p hash in the set, + unlinks it from the set, and returns a guarded pointer to the item extracted. + If \p hash is not found the function returns an empty guarded pointer. + + The item returned is reclaimed by garbage collector \p GC + when returned \ref guarded_ptr object to be destroyed or released. + @note Each \p guarded_ptr object uses the GC's guard that can be limited resource. + + Usage: + \code + typedef cds::container::FeldmanHashSet< your_template_args > my_set; + my_set theSet; + // ... + { + my_set::guarded_ptr gp( theSet.extract( 5 )); + if ( gp ) { + // Deal with gp + // ... + } + // Destructor of gp releases internal HP guard + } + \endcode + */ + guarded_ptr extract( hash_type const& hash ) + { + return base_class::extract( hash ); + } + + /// Finds an item by it's \p hash + /** + The function searches the item by \p hash and calls the functor \p f for item found. + The interface of \p Func functor is: + \code + struct functor { + void operator()( value_type& item ); + }; + \endcode + where \p item is the item found. + + The functor may change non-key fields of \p item. Note that the functor is only guarantee + that \p item cannot be disposed during the functor is executing. + The functor does not serialize simultaneous access to the set's \p item. If such access is + possible you must provide your own synchronization schema on item level to prevent unsafe item modifications. + + The function returns \p true if \p hash is found, \p false otherwise. + */ + template + bool find( hash_type const& hash, Func f ) + { + return base_class::find( hash, f ); + } + + /// Checks whether the set contains \p hash + /** + The function searches the item by its \p hash + and returns \p true if it is found, or \p false otherwise. + */ + bool contains( hash_type const& hash ) + { + return base_class::contains( hash ); + } + + /// Finds an item by it's \p hash and returns the item found + /** + The function searches the item by its \p hash + and returns the guarded pointer to the item found. + If \p hash is not found the function returns an empty \p guarded_ptr. + + @note Each \p guarded_ptr object uses one GC's guard which can be limited resource. + + Usage: + \code + typedef cds::container::FeldmanHashSet< your_template_params > my_set; + my_set theSet; + // ... + { + my_set::guarded_ptr gp( theSet.get( 5 )); + if ( theSet.get( 5 )) { + // Deal with gp + //... + } + // Destructor of guarded_ptr releases internal HP guard + } + \endcode + */ + guarded_ptr get( hash_type const& hash ) + { + return base_class::get( hash ); + } + + /// Clears the set (non-atomic) + /** + The function unlink all data node from the set. + The function is not atomic but is thread-safe. + After \p %clear() the set may not be empty because another threads may insert items. + */ + void clear() + { + base_class::clear(); + } + + /// Checks if the set is empty + /** + Emptiness is checked by item counting: if item count is zero then the set is empty. + Thus, the correct item counting feature is an important part of the set implementation. + */ + bool empty() const + { + return base_class::empty(); + } + + /// Returns item count in the set + size_t size() const + { + return base_class::size(); + } + + /// Returns const reference to internal statistics + stat const& statistics() const + { + return base_class::statistics(); + } + + /// Returns the size of head node + size_t head_size() const + { + return base_class::head_size(); + } + + /// Returns the size of the array node + size_t array_node_size() const + { + return base_class::array_node_size(); + } + + /// Collects tree level statistics into \p stat + /** + The function traverses the set and collects statistics for each level of the tree + into \p feldman_hashset::level_statistics struct. The element of \p stat[i] + represents statistics for level \p i, level 0 is head array. + The function is thread-safe and may be called in multi-threaded environment. + + Result can be useful for estimating efficiency of hash functor you use. + */ + void get_level_statistics(std::vector< feldman_hashset::level_statistics>& stat) const + { + base_class::get_level_statistics(stat); + } + }; + +}} // namespace cds::container + +#endif // #ifndef CDSLIB_CONTAINER_IMPL_FELDMAN_HASHSET_H diff --git a/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/container/impl/iterable_kvlist.h b/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/container/impl/iterable_kvlist.h new file mode 100644 index 0000000..8d12bf8 --- /dev/null +++ b/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/container/impl/iterable_kvlist.h @@ -0,0 +1,751 @@ +/* + This file is a part of libcds - Concurrent Data Structures library + + (C) Copyright Maxim Khizhinsky (libcds.dev@gmail.com) 2006-2017 + + Source code repo: http://github.com/khizmax/libcds/ + Download: http://sourceforge.net/projects/libcds/files/ + + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. + + * Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ + +#ifndef CDSLIB_CONTAINER_IMPL_ITERABLE_KVLIST_H +#define CDSLIB_CONTAINER_IMPL_ITERABLE_KVLIST_H + +#include +#include + +namespace cds { namespace container { + + /// Iterable ordered list for key-value pair + /** @ingroup cds_nonintrusive_list + \anchor cds_nonintrusive_IterableKVList_gc + + This is key-value variation of non-intrusive \p IterableList. + Like standard container, this implementation split a value stored into two part - + constant key and alterable value. + + Usually, ordered single-linked list is used as a building block for the hash table implementation. + Iterable list is suitable for almost append-only hash table because the list doesn't delete + its internal node when erasing a key but it is marked them as empty to be reused in the future. + However, plenty of empty nodes degrades performance. + + The complexity of searching is O(N). + + Template arguments: + - \p GC - garbage collector used + - \p Key - key type of an item stored in the list. It should be copy-constructible + - \p Value - value type stored in a list + - \p Traits - type traits, default is \p iterable_list::traits + + It is possible to declare option-based list with \p cds::container::iterable_list::make_traits metafunction instead of \p Traits template + argument. For example, the following traits-based declaration of \p gc::HP iterable list + \code + #include + // Declare comparator for the item + struct my_compare { + int operator ()( int i1, int i2 ) + { + return i1 - i2; + } + }; + + // Declare traits + struct my_traits: public cds::container::iterable_list::traits + { + typedef my_compare compare; + }; + + // Declare traits-based list + typedef cds::container::IterableKVList< cds::gc::HP, int, int, my_traits > traits_based_list; + \endcode + is equivalent for the following option-based list + \code + #include + + // my_compare is the same + + // Declare option-based list + typedef cds::container::IterableKVList< cds::gc::HP, int, int, + typename cds::container::iterable_list::make_traits< + cds::container::opt::compare< my_compare > // item comparator option + >::type + > option_based_list; + \endcode + + \par Usage + There are different specializations of this template for each garbage collecting schema used. + You should include appropriate .h-file depending on GC you are using: + - for gc::HP: \code #include \endcode + - for gc::DHP: \code #include \endcode + - for \ref cds_urcu_desc "RCU": \code #include \endcode + */ + template < + typename GC, + typename Key, + typename Value, +#ifdef CDS_DOXYGEN_INVOKED + typename Traits = iterable_list::traits +#else + typename Traits +#endif + > + class IterableKVList: +#ifdef CDS_DOXYGEN_INVOKED + protected container::IterableList< GC, std::pair, Traits > +#else + protected details::make_iterable_kvlist< GC, Key, Value, Traits >::type +#endif + { + //@cond + typedef details::make_iterable_kvlist< GC, Key, Value, Traits > maker; + typedef typename maker::type base_class; + //@endcond + + public: +#ifdef CDS_DOXYGEN_INVOKED + typedef Key key_type; ///< Key type + typedef Value mapped_type; ///< Type of value stored in the list + typedef std::pair value_type; ///< key/value pair stored in the list +#else + typedef typename maker::key_type key_type; + typedef typename maker::mapped_type mapped_type; + typedef typename maker::value_type value_type; +#endif + typedef Traits traits; ///< List traits + typedef typename base_class::gc gc; ///< Garbage collector used + typedef typename base_class::back_off back_off; ///< Back-off strategy used + typedef typename maker::data_allocator_type allocator_type; ///< Allocator type used for allocate/deallocate data + typedef typename base_class::item_counter item_counter; ///< Item counting policy used + typedef typename maker::key_comparator key_comparator; ///< key comparison functor + typedef typename base_class::memory_model memory_model; ///< Memory ordering. See cds::opt::memory_model option + typedef typename base_class::stat stat; ///< Internal statistics + + static constexpr const size_t c_nHazardPtrCount = base_class::c_nHazardPtrCount; ///< Count of hazard pointer required for the algorithm + + /// Guarded pointer + typedef typename base_class::guarded_ptr guarded_ptr; + + //@cond + // Rebind traits (split-list support) + template + struct rebind_traits { + typedef IterableKVList< + gc + , key_type, mapped_type + , typename cds::opt::make_options< traits, Options...>::type + > type; + }; + + // Stat selector + template + using select_stat_wrapper = typename base_class::template select_stat_wrapper< Stat >; + //@endcond + + protected: + //@cond + typedef typename base_class::head_type head_type; + typedef typename maker::cxx_data_allocator cxx_data_allocator; + + template + using less_wrapper = typename maker::template less_wrapper< Less >; + + template + using iterator_type = typename base_class::template iterator_type; + //@endcond + + public: + /// Forward iterator + /** + The forward iterator for iterable list has some features: + - it has no post-increment operator + - to protect the value, the iterator contains a GC-specific guard. + For some GC (like as \p gc::HP), a guard is a limited resource per thread, so an exception (or assertion) "no free guard" + may be thrown if the limit of guard count per thread is exceeded. + - The iterator cannot be moved across thread boundary since it contains thread-private GC's guard. + - Iterator is thread-safe: even if an element the iterator points to is removed, the iterator stays valid because + it contains the guard keeping the value from to be recycled. + + The iterator interface: + \code + class iterator { + public: + // Default constructor + iterator(); + + // Copy constructor + iterator( iterator const& src ); + + // Dereference operator + value_type * operator ->() const; + + // Dereference operator + value_type& operator *() const; + + // Preincrement operator + iterator& operator ++(); + + // Assignment operator + iterator& operator = (iterator const& src); + + // Equality operators + bool operator ==(iterator const& i ) const; + bool operator !=(iterator const& i ) const; + }; + \endcode + + @note For two iterators pointed to the same element the value can be different; + this code + \code + if ( it1 == it2 ) + assert( &(*it1) == &(*it2)); + \endcode + can throw assertion. The point is that the iterator stores the value of element which can be modified later by other thread. + The guard inside the iterator prevents recycling that value so the iterator's value remains valid even after such changing. + Other iterator can observe modified value of the element. + */ + using typename base_class::iterator; + using typename base_class::const_iterator; + using base_class::begin; + using base_class::end; + using base_class::cbegin; + using base_class::cend; + + public: + /// Default constructor + /** + Initializes empty list + */ + IterableKVList() + {} + + //@cond + template >::value >> + explicit IterableKVList( Stat& st ) + : base_class( st ) + {} + //@endcond + + /// List destructor + /** + Clears the list + */ + ~IterableKVList() + {} + + /// Inserts new node with key and default value + /** + The function creates a node with \p key and default value, and then inserts the node created into the list. + + Preconditions: + - The \p key_type should be constructible from value of type \p K. In trivial case, \p K is equal to \p key_type. + - The \p mapped_type should be default-constructible. + + Returns \p true if inserting successful, \p false otherwise. + + @note The function is supported only if \ref mapped_type is default constructible + */ + template + bool insert( K&& key ) + { + return base_class::emplace( key_type( std::forward( key )), mapped_type()); + } + + /// Inserts new node with a key and a value + /** + The function creates a node with \p key and value \p val, and then inserts the node created into the list. + + Preconditions: + - The \p key_type should be constructible from \p key of type \p K. + - The \p mapped_type should be constructible from \p val of type \p V. + + Returns \p true if inserting successful, \p false otherwise. + */ + template + bool insert( K&& key, V&& val ) + { + return base_class::emplace( key_type( std::forward( key )), mapped_type( std::forward( val ))); + } + + /// Inserts new node and initialize it by a functor + /** + This function inserts new node with key \p key and if inserting is successful then it calls + \p func functor with signature + \code + struct functor { + void operator()( value_type& item ); + }; + \endcode + + The argument \p item of user-defined functor \p func is the reference + to the item inserted. item.second is a reference to item's value that may be changed. + User-defined functor \p func should guarantee that during changing item's value no any other changes + could be made on this list's item by concurrent threads. + The user-defined functor is called only if inserting is successful. + + The \p key_type should be constructible from value of type \p K. + + The function allows to split creating of new item into two part: + - create a new item from \p key; + - insert the new item into the list; + - if inserting is successful, initialize the value of item by calling \p func functor + + This can be useful if complete initialization of object of \p mapped_type is heavyweight and + it is preferable that the initialization should be completed only if inserting is successful. + + @warning See \ref cds_intrusive_item_creating "insert item troubleshooting" + + @note The function is supported only if \ref mapped_type is default constructible + */ + template + bool insert_with( K&& key, Func func ) + { + return base_class::insert( value_type( key_type( std::forward( key )), mapped_type()), func ); + } + + /// Updates data by \p key + /** + The operation performs inserting or replacing the element with lock-free manner. + + If the \p key not found in the list, then the new item created from \p key + will be inserted iff \p bAllowInsert is \p true. + (note that in this case the \ref key_type should be constructible from type \p K). + Otherwise, if \p key is found, the functor \p func is called with item found. + + The functor \p func is called after inserting or replacing, it signature is: + \code + void func( value_type& val, value_type* old ); + \endcode + where + - \p val - a new data constructed from \p key + - \p old - old value that will be retired. If new item has been inserted then \p old is \p nullptr. + + The functor may change non-key fields of \p val; however, \p func must guarantee + that during changing no any other modifications could be made on this item by concurrent threads. + + @return std::pair where \p first is true if operation is successful, + \p second is true if new item has been added or \p false if the item with such \p key + already exists. + + @warning See \ref cds_intrusive_item_creating "insert item troubleshooting" + + @note The function is supported only if \ref mapped_type is default constructible + */ + template + std::pair update( K&& key, Func f, bool bAllowInsert = true ) + { + return base_class::update( value_type( key_type( std::forward( key )), mapped_type()), f, bAllowInsert ); + } + + /// Insert or update + /** + The operation performs inserting or updating data with lock-free manner. + + If the item \p key is not found in the list, then \p key is inserted + iff \p bInsert is \p true. + Otherwise, the current element is changed to value_type( key, val ), + the old element will be retired later. + + Returns std::pair where \p first is \p true if operation is successful, + \p second is \p true if \p key has been added or \p false if the item with that key + already in the list. + */ + template + std::pair upsert( Q&& key, V&& val, bool bInsert = true ) + { + return base_class::upsert( value_type( key_type( std::forward( key )), mapped_type( std::forward( val ))), bInsert ); + } + + /// Inserts a new node using move semantics + /** + \p key_type field of new item is constructed from \p key argument, + \p mapped_type field is done from \p args. + + Returns \p true if inserting successful, \p false otherwise. + */ + template + bool emplace( K&& key, Args&&... args ) + { + return base_class::emplace( key_type( std::forward( key )), mapped_type( std::forward( args )... )); + } + + /// Deletes \p key from the list + /** + + Returns \p true if \p key is found and has been deleted, \p false otherwise + */ + template + bool erase( K const& key ) + { + return base_class::erase( key ); + } + + /// Deletes the item from the list using \p pred predicate for searching + /** + The function is an analog of \p erase(K const&) but \p pred is used for key comparing. + \p Less functor has the interface like \p std::less. + \p pred must imply the same element order as the comparator used for building the list. + */ + template + bool erase_with( K const& key, Less pred ) + { + CDS_UNUSED( pred ); + return base_class::erase_with( key, less_wrapper()); + } + + /// Deletes \p key from the list + /** + The function searches an item with key \p key, calls \p f functor + and deletes the item. If \p key is not found, the functor is not called. + + The functor \p Func interface: + \code + struct extractor { + void operator()(value_type& val) { ... } + }; + \endcode + + Return \p true if key is found and deleted, \p false otherwise + */ + template + bool erase( K const& key, Func f ) + { + return base_class::erase( key, f ); + } + + /// Deletes the item from the list using \p pred predicate for searching + /** + The function is an analog of \p erase(K const&, Func) but \p pred is used for key comparing. + \p Less functor has the interface like \p std::less. + \p pred must imply the same element order as the comparator used for building the list. + */ + template + bool erase_with( K const& key, Less pred, Func f ) + { + CDS_UNUSED( pred ); + return base_class::erase_with( key, less_wrapper(), f ); + } + + /// Deletes the item pointed by iterator \p iter + /** + Returns \p true if the operation is successful, \p false otherwise. + The function can return \p false if the node the iterator points to has already been deleted + by other thread. + + The function does not invalidate the iterator, it remains valid and can be used for further traversing. + */ + bool erase_at( iterator const& iter ) + { + return base_class::erase_at( iter ); + } + + /// Extracts the item from the list with specified \p key + /** + The function searches an item with key equal to \p key, + unlinks it from the list, and returns it as \p guarded_ptr. + If \p key is not found the function returns an empty guarded pointer. + + Note the compare functor should accept a parameter of type \p K that can be not the same as \p key_type. + + The \p disposer specified in \p Traits class template parameter is called automatically + by garbage collector \p GC specified in class' template parameters when returned \p guarded_ptr object + will be destroyed or released. + @note Each \p guarded_ptr object uses the GC's guard that can be limited resource. + + Usage: + \code + typedef cds::container::IterableKVList< cds::gc::HP, int, foo, my_traits > ord_list; + ord_list theList; + // ... + { + ord_list::guarded_ptr gp(theList.extract( 5 )); + if ( gp ) { + // Deal with gp + // ... + } + // Destructor of gp releases internal HP guard + } + \endcode + */ + template + guarded_ptr extract( K const& key ) + { + return base_class::extract( key ); + } + + /// Extracts the item from the list with comparing functor \p pred + /** + The function is an analog of \p extract(K const&) but \p pred predicate is used for key comparing. + + \p Less functor has the semantics like \p std::less but should take arguments of type \ref key_type and \p K + in any order. + \p pred must imply the same element order as the comparator used for building the list. + */ + template + guarded_ptr extract_with( K const& key, Less pred ) + { + CDS_UNUSED( pred ); + return base_class::extract_with( key, less_wrapper()); + } + + /// Checks whether the list contains \p key + /** + The function searches the item with key equal to \p key + and returns \p true if it is found, and \p false otherwise. + */ + template + bool contains( Q const& key ) const + { + return base_class::contains( key ); + } + + /// Checks whether the map contains \p key using \p pred predicate for searching + /** + The function is an analog of contains( key ) but \p pred is used for key comparing. + \p Less functor has the interface like \p std::less. + \p Less must imply the same element order as the comparator used for building the list. + */ + template + bool contains( Q const& key, Less pred ) const + { + CDS_UNUSED( pred ); + return base_class::contains( key, less_wrapper()); + } + + /// Finds the key \p key and performs an action with it + /** + The function searches an item with key equal to \p key and calls the functor \p f for the item found. + The interface of \p Func functor is: + \code + struct functor { + void operator()( value_type& item ); + }; + \endcode + where \p item is the item found. + + The functor may change item.second that is reference to value of node. + Note that the function is only guarantee that \p item cannot be deleted during functor is executing. + The function does not serialize simultaneous access to the list \p item. If such access is + possible you must provide your own synchronization schema to exclude unsafe item modifications. + + The function returns \p true if \p key is found, \p false otherwise. + */ + template + bool find( Q const& key, Func f ) const + { + return base_class::find( key, [&f]( value_type& v, Q const& ) { f( v ); } ); + } + + /// Finds \p key in the list and returns iterator pointed to the item found + /** + If \p key is not found the function returns \p end(). + */ + template + iterator find( Q const& key ) const + { + return base_class::find( key ); + } + + /// Finds the key \p val using \p pred predicate for searching + /** + The function is an analog of \p find(Q&, Func) but \p pred is used for key comparing. + \p Less functor has the interface like \p std::less. + \p pred must imply the same element order as the comparator used for building the list. + */ + template + bool find_with( Q const& key, Less pred, Func f ) const + { + CDS_UNUSED( pred ); + return base_class::find_with( key, less_wrapper(), [&f]( value_type& v, Q const& ) { f( v ); } ); + } + + /// Finds \p key in the list using \p pred predicate for searching and returns iterator pointed to the item found + /** + The function is an analog of \p find(Q&) but \p pred is used for key comparing. + \p Less functor has the interface like \p std::less. + \p pred must imply the same element order as the comparator used for building the list. + + If \p key is not found the function returns \p end(). + */ + template + iterator find_with( Q const& key, Less pred ) const + { + CDS_UNUSED( pred ); + return base_class::find_with( key, less_wrapper()); + } + + /// Finds the \p key and return the item found + /** + The function searches the item with key equal to \p key + and returns it as \p guarded_ptr. + If \p key is not found the function returns an empty guarded pointer. + + @note Each \p guarded_ptr object uses one GC's guard which can be limited resource. + + Usage: + \code + typedef cds::container::IterableKVList< cds::gc::HP, int, foo, my_traits > ord_list; + ord_list theList; + // ... + { + ord_list::guarded_ptr gp(theList.get( 5 )); + if ( gp ) { + // Deal with gp + //... + } + // Destructor of guarded_ptr releases internal HP guard + } + \endcode + + Note the compare functor specified for class \p Traits template parameter + should accept a parameter of type \p K that can be not the same as \p key_type. + */ + template + guarded_ptr get( K const& key ) const + { + return base_class::get( key ); + } + + /// Finds the \p key and return the item found + /** + The function is an analog of \p get( guarded_ptr& ptr, K const&) + but \p pred is used for comparing the keys. + + \p Less functor has the semantics like \p std::less but should take arguments of type \ref key_type and \p K + in any order. + \p pred must imply the same element order as the comparator used for building the list. + */ + template + guarded_ptr get_with( K const& key, Less pred ) const + { + CDS_UNUSED( pred ); + return base_class::get_with( key, less_wrapper()); + } + + /// Checks if the list is empty + /** + Emptiness is checked by item counting: if item count is zero then the set is empty. + Thus, if you need to use \p %empty() you should provide appropriate (non-empty) \p iterable_list::traits::item_counter + feature. + */ + bool empty() const + { + return base_class::empty(); + } + + /// Returns list's item count + /** + The value returned depends on item counter provided by \p Traits. For \p atomicity::empty_item_counter, + this function always returns 0. + */ + size_t size() const + { + return base_class::size(); + } + + /// Clears the list + void clear() + { + base_class::clear(); + } + + /// Returns const reference to internal statistics + stat const& statistics() const + { + return base_class::statistics(); + } + + protected: + //@cond + // Split-list support + + template + bool insert_at( head_type& refHead, K&& key ) + { + return base_class::insert_at( refHead, value_type( key_type( std::forward( key )), mapped_type())); + } + + template + bool insert_at( head_type& refHead, K&& key, V&& val ) + { + return base_class::insert_at( refHead, value_type( key_type( std::forward( key )), std::forward( val ))); + } + + template + bool insert_with_at( head_type& refHead, K&& key, Func f ) + { + return base_class::insert_at( refHead, value_type( key_type( std::forward( key )), mapped_type()), f ); + } + + template + bool emplace_at( head_type& refHead, K&& key, Args&&... args ) + { + return base_class::emplace_at( refHead, std::forward(key), std::forward(args)... ); + } + + template + std::pair update_at( head_type& refHead, K&& key, Func f, bool bAllowInsert ) + { + return base_class::update_at( refHead, value_type( key_type( std::forward( key )), mapped_type()), f, bAllowInsert ); + } + + template + bool erase_at( head_type& refHead, K const& key, Compare cmp ) + { + return base_class::erase_at( refHead, key, cmp ); + } + + template + bool erase_at( head_type& refHead, K const& key, Compare cmp, Func f ) + { + return base_class::erase_at( refHead, key, cmp, f ); + } + template + guarded_ptr extract_at( head_type& refHead, K const& key, Compare cmp ) + { + return base_class::extract_at( refHead, key, cmp ); + } + + template + bool find_at( head_type& refHead, K const& key, Compare cmp ) + { + return base_class::find_at( refHead, key, cmp ); + } + + template + bool find_at( head_type& refHead, K& key, Compare cmp, Func f ) + { + return base_class::find_at( refHead, key, cmp, f ); + } + + template + guarded_ptr get_at( head_type& refHead, K const& key, Compare cmp ) + { + return base_class::get_at( refHead, key, cmp ); + } + + //@endcond + }; + +}} // namespace cds::container + +#endif // #ifndef CDSLIB_CONTAINER_IMPL_ITERABLE_KVLIST_H diff --git a/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/container/impl/iterable_list.h b/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/container/impl/iterable_list.h new file mode 100644 index 0000000..888ea87 --- /dev/null +++ b/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/container/impl/iterable_list.h @@ -0,0 +1,881 @@ +/* + This file is a part of libcds - Concurrent Data Structures library + + (C) Copyright Maxim Khizhinsky (libcds.dev@gmail.com) 2006-2017 + + Source code repo: http://github.com/khizmax/libcds/ + Download: http://sourceforge.net/projects/libcds/files/ + + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. + + * Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ + +#ifndef CDSLIB_CONTAINER_IMPL_ITERABLE_LIST_H +#define CDSLIB_CONTAINER_IMPL_ITERABLE_LIST_H + +#include +#include + +namespace cds { namespace container { + + /// Iterable ordered list + /** @ingroup cds_nonintrusive_list + \anchor cds_nonintrusive_IterableList_gc + + This lock-free list implementation supports thread-safe iterators. + + Usually, ordered single-linked list is used as a building block for the hash table implementation. + Iterable list is suitable for almost append-only hash table because the list doesn't delete + its internal node when erasing a key but it is marked them as empty to be reused in the future. + However, plenty of empty nodes degrades performance. + + The complexity of searching is O(N). + + Template arguments: + - \p GC - Garbage collector used. + - \p T - type to be stored in the list. + - \p Traits - type traits, default is \p iterable_list::traits. + + Unlike standard container, this implementation does not divide type \p T into key and value part and + may be used as a main building block for hash set algorithms. + The key is a function (or a part) of type \p T, and this function is specified by Traits::compare functor + or Traits::less predicate. + + \p IterableKVList is a key-value version of iterable non-intrusive list that is closer to the C++ std library approach. + + It is possible to declare option-based list with cds::container::iterable_list::make_traits metafunction istead of \p Traits template + argument. For example, the following traits-based declaration of gc::HP iterable list + \code + #include + // Declare comparator for the item + struct my_compare { + int operator ()( int i1, int i2 ) + { + return i1 - i2; + } + }; + + // Declare traits + struct my_traits: public cds::container::iterable_list::traits + { + typedef my_compare compare; + }; + + // Declare traits-based list + typedef cds::container::IterableList< cds::gc::HP, int, my_traits > traits_based_list; + \endcode + + is equivalent for the following option-based list + \code + #include + + // my_compare is the same + + // Declare option-based list + typedef cds::container::IterableList< cds::gc::HP, int, + typename cds::container::iterable_list::make_traits< + cds::container::opt::compare< my_compare > // item comparator option + >::type + > option_based_list; + \endcode + + \par Usage + There are different specializations of this template for each garbage collecting schema used. + You should include appropriate .h-file depending on GC you are using: + - for gc::HP: \code #include \endcode + - for gc::DHP: \code #include \endcode + - for \ref cds_urcu_desc "RCU": \code #include \endcode + */ + template < + typename GC, + typename T, +#ifdef CDS_DOXYGEN_INVOKED + typename Traits = iterable_list::traits +#else + typename Traits +#endif + > + class IterableList: +#ifdef CDS_DOXYGEN_INVOKED + protected intrusive::IterableList< GC, T, Traits > +#else + protected details::make_iterable_list< GC, T, Traits >::type +#endif + { + //@cond + typedef details::make_iterable_list< GC, T, Traits > maker; + typedef typename maker::type base_class; + //@endcond + + public: + typedef T value_type; ///< Type of value stored in the list + typedef Traits traits; ///< List traits + + typedef typename base_class::gc gc; ///< Garbage collector used + typedef typename base_class::back_off back_off; ///< Back-off strategy used + typedef typename maker::data_allocator_type allocator_type; ///< Allocator type used for allocate/deallocate data + typedef typename base_class::item_counter item_counter; ///< Item counting policy used + typedef typename maker::key_comparator key_comparator; ///< key comparison functor + typedef typename base_class::memory_model memory_model; ///< Memory ordering. See \p cds::opt::memory_model option + typedef typename base_class::stat stat; ///< Internal statistics + + static constexpr const size_t c_nHazardPtrCount = base_class::c_nHazardPtrCount; ///< Count of hazard pointer required for the algorithm + + //@cond + // Rebind traits (split-list support) + template + struct rebind_traits { + typedef IterableList< + gc + , value_type + , typename cds::opt::make_options< traits, Options...>::type + > type; + }; + + // Stat selector + template + using select_stat_wrapper = typename base_class::template select_stat_wrapper< Stat >; + //@endcond + + protected: + //@cond + typedef typename maker::cxx_data_allocator cxx_data_allocator; + typedef typename maker::data_disposer data_disposer; + typedef typename base_class::node_type head_type; + //@endcond + + public: + /// Guarded pointer + typedef typename base_class::guarded_ptr guarded_ptr; + + protected: + //@cond + template + class iterator_type: protected base_class::template iterator_type + { + typedef typename base_class::template iterator_type iterator_base; + friend class IterableList; + + iterator_type( iterator_base it ) + : iterator_base( it ) + {} + + public: + typedef typename iterator_base::value_ptr value_ptr; + typedef typename iterator_base::value_ref value_ref; + + iterator_type() + {} + + iterator_type( iterator_type const& src ) + : iterator_base( src ) + {} + + value_ptr operator ->() const + { + return iterator_base::operator ->(); + } + + value_ref operator *() const + { + return iterator_base::operator *(); + } + + /// Pre-increment + iterator_type& operator ++() + { + iterator_base::operator ++(); + return *this; + } + + template + bool operator ==(iterator_type const& i ) const + { + return iterator_base::operator ==(i); + } + template + bool operator !=(iterator_type const& i ) const + { + return iterator_base::operator !=(i); + } + }; + //@endcond + + public: + ///@name Thread-safe forward iterators + //@{ + /// Forward iterator + /** + The forward iterator for iterable list has some features: + - it has no post-increment operator + - to protect the value, the iterator contains a GC-specific guard. + For some GC (like as \p gc::HP), a guard is a limited resource per thread, so an exception (or assertion) "no free guard" + may be thrown if the limit of guard count per thread is exceeded. + - The iterator cannot be moved across thread boundary since it contains thread-private GC's guard. + - Iterator is thread-safe: even if an element the iterator points to is removed, the iterator stays valid because + it contains the guard keeping the value from to be recycled. + + The iterator interface: + \code + class iterator { + public: + // Default constructor + iterator(); + + // Copy constructor + iterator( iterator const& src ); + + // Dereference operator + value_type * operator ->() const; + + // Dereference operator + value_type& operator *() const; + + // Preincrement operator + iterator& operator ++(); + + // Assignment operator + iterator& operator = (iterator const& src); + + // Equality operators + bool operator ==(iterator const& i ) const; + bool operator !=(iterator const& i ) const; + }; + \endcode + + @note For two iterators pointed to the same element the value can be different; + this code + \code + if ( it1 == it2 ) + assert( &(*it1) == &(*it2)); + \endcode + can throw assertion. The point is that the iterator stores the value of element which can be modified later by other thread. + The guard inside the iterator prevents recycling that value so the iterator's value remains valid even after such changing. + Other iterator can observe modified value of the element. + */ + typedef iterator_type iterator; + + /// Const forward iterator + /** + For iterator's features and requirements see \ref iterator + */ + typedef iterator_type const_iterator; + + /// Returns a forward iterator addressing the first element in a list + /** + For empty list \code begin() == end() \endcode + */ + iterator begin() + { + return iterator( base_class::begin()); + } + + /// Returns an iterator that addresses the location succeeding the last element in a list + /** + Do not use the value returned by end function to access any item. + Internally, end returning value equals to \p nullptr. + + The returned value can be used only to control reaching the end of the list. + For empty list \code begin() == end() \endcode + */ + iterator end() + { + return iterator( base_class::end()); + } + + /// Returns a forward const iterator addressing the first element in a list + const_iterator begin() const + { + return const_iterator( base_class::cbegin()); + } + + /// Returns a forward const iterator addressing the first element in a list + const_iterator cbegin() const + { + return const_iterator( base_class::cbegin()); + } + + /// Returns an const iterator that addresses the location succeeding the last element in a list + const_iterator end() const + { + return const_iterator( base_class::cend()); + } + + /// Returns an const iterator that addresses the location succeeding the last element in a list + const_iterator cend() const + { + return const_iterator( base_class::cend()); + } + //@} + + public: + /// Default constructor + /** + Initialize empty list + */ + IterableList() + {} + + //@cond + template >::value >> + explicit IterableList( Stat& st ) + : base_class( st ) + {} + //@endcond + + /// List destructor + /** + Clears the list + */ + ~IterableList() + {} + + /// Inserts new node + /** + The function creates a node with copy of \p val value + and then inserts the node created into the list. + + The type \p Q should contain least the complete key of the node. + The object of \ref value_type should be constructible from \p val of type \p Q. + In trivial case, \p Q is equal to \ref value_type. + + Returns \p true if inserting successful, \p false otherwise. + */ + template + bool insert( Q&& val ) + { + return insert_at( head(), std::forward( val )); + } + + /// Inserts new node + /** + This function inserts new node with default-constructed value and then it calls + \p func functor with signature + \code + void func( value_type& data ); + \endcode + + The argument \p data of user-defined functor \p func is the reference + to the list's item inserted. User-defined functor \p func should guarantee that during changing + item's value no any other changes could be made on this list's item by concurrent threads. + The user-defined functor is called only if inserting is success. + + The type \p Q should contain the complete key of the node. + The object of \p value_type should be constructible from \p key of type \p Q. + + The function allows to split creating of new item into two part: + - create item from \p key with initializing key-fields only; + - insert new item into the list; + - if inserting is successful, initialize non-key fields of item by calling \p func functor + + The method can be useful if complete initialization of object of \p value_type is heavyweight and + it is preferable that the initialization should be completed only if inserting is successful. + + @warning See \ref cds_intrusive_item_creating "insert item troubleshooting" + */ + template + bool insert( Q&& key, Func func ) + { + return insert_at( head(), std::forward( key ), func ); + } + + /// Updates data by \p key + /** + The operation performs inserting or replacing the element with lock-free manner. + + If the \p key not found in the list, then the new item created from \p key + will be inserted iff \p bAllowInsert is \p true. + Otherwise, if \p key is found, the functor \p func is called with item found. + + The functor \p func is called after inserting or replacing, it signature is: + \code + void func( value_type& val, value_type * old ); + \endcode + where + - \p val - a new data constructed from \p key + - \p old - old value that will be retired. If new item has been inserted then \p old is \p nullptr. + + The functor may change non-key fields of \p val; however, \p func must guarantee + that during changing no any other modifications could be made on this item by concurrent threads. + + Returns std::pair where \p first is true if operation is successful, + \p second is true if new item has been added or \p false if the item with such \p key + already exists. + + @warning See \ref cds_intrusive_item_creating "insert item troubleshooting" + */ + template + std::pair update( Q&& key, Func func, bool bAllowInsert = true ) + { + return update_at( head(), std::forward( key ), func, bAllowInsert ); + } + + /// Insert or update + /** + The operation performs inserting or updating data with lock-free manner. + + If the item \p key is not found in the list, then \p key is inserted + iff \p bInsert is \p true. + Otherwise, the current element is changed to \p key, the old element will be retired later. + + \p value_type should be constructible from \p key. + + Returns std::pair where \p first is \p true if operation is successful, + \p second is \p true if \p key has been added or \p false if the item with that key + already in the list. + */ + template + std::pair upsert( Q&& key, bool bInsert = true ) + { + return update_at( head(), std::forward( key ), []( value_type&, value_type* ) {}, bInsert ); + } + + /// Inserts data of type \p value_type constructed with std::forward(args)... + /** + Returns \p true if inserting successful, \p false otherwise. + */ + template + bool emplace( Args&&... args ) + { + return emplace_at( head(), std::forward(args)... ); + } + + /// Delete \p key from the list + /** + Since the key of IterableList's item type \p value_type is not explicitly specified, + template parameter \p Q sould contain the complete key to search in the list. + The list item comparator should be able to compare the type \p value_type + and the type \p Q. + + Return \p true if key is found and deleted, \p false otherwise + */ + template + bool erase( Q const& key ) + { + return erase_at( head(), key, key_comparator(), [](value_type const&){} ); + } + + /// Deletes the item from the list using \p pred predicate for searching + /** + The function is an analog of \p erase(Q const&) but \p pred is used for key comparing. + \p Less functor has the interface like \p std::less. + \p pred must imply the same element order as the comparator used for building the list. + */ + template + bool erase_with( Q const& key, Less pred ) + { + CDS_UNUSED( pred ); + return erase_at( head(), key, typename maker::template less_wrapper::type(), [](value_type const&){} ); + } + + /// Deletes \p key from the list + /** + The function searches an item with key \p key, calls \p f functor with item found + and deletes it. If \p key is not found, the functor is not called. + + The functor \p Func interface: + \code + struct extractor { + void operator()(const value_type& val) { ... } + }; + \endcode + + Since the key of IterableList's item type \p value_type is not explicitly specified, + template parameter \p Q should contain the complete key to search in the list. + The list item comparator should be able to compare the type \p value_type of list item + and the type \p Q. + + Return \p true if key is found and deleted, \p false otherwise + */ + template + bool erase( Q const& key, Func f ) + { + return erase_at( head(), key, key_comparator(), f ); + } + + /// Deletes the item from the list using \p pred predicate for searching + /** + The function is an analog of \p erase(Q const&, Func) but \p pred is used for key comparing. + \p Less functor has the interface like \p std::less. + \p pred must imply the same element order as the comparator used for building the list. + */ + template + bool erase_with( Q const& key, Less pred, Func f ) + { + CDS_UNUSED( pred ); + return erase_at( head(), key, typename maker::template less_wrapper::type(), f ); + } + + /// Deletes the item pointed by iterator \p iter + /** + Returns \p true if the operation is successful, \p false otherwise. + The function can return \p false if the node the iterator points to has already been deleted + by other thread. + + The function does not invalidate the iterator, it remains valid and can be used for further traversing. + */ + bool erase_at( iterator const& iter ) + { + return base_class::erase_at( iter ); + } + + /// Extracts the item from the list with specified \p key + /** + The function searches an item with key equal to \p key, + unlinks it from the list, and returns it as \p guarded_ptr. + If \p key is not found the function returns an empty guarded pointer. + + Note the compare functor should accept a parameter of type \p Q that can be not the same as \p value_type. + + @note Each \p guarded_ptr object uses the GC's guard that can be limited resource. + + Usage: + \code + typedef cds::container::IterableList< cds::gc::HP, foo, my_traits > ord_list; + ord_list theList; + // ... + { + ord_list::guarded_ptr gp(theList.extract( 5 )); + if ( gp ) { + // Deal with gp + // ... + } + // Destructor of gp releases internal HP guard and frees the item + } + \endcode + */ + template + guarded_ptr extract( Q const& key ) + { + return extract_at( head(), key, key_comparator()); + } + + /// Extracts the item from the list with comparing functor \p pred + /** + The function is an analog of \p extract(Q const&) but \p pred predicate is used for key comparing. + + \p Less functor has the semantics like \p std::less but it should accept arguments + of type \p value_type and \p Q in any order. + \p pred must imply the same element order as the comparator used for building the list. + */ + template + guarded_ptr extract_with( Q const& key, Less pred ) + { + CDS_UNUSED( pred ); + return extract_at( head(), key, typename maker::template less_wrapper::type()); + } + + /// Checks whether the list contains \p key + /** + The function searches the item with key equal to \p key + and returns \p true if it is found, and \p false otherwise. + */ + template + bool contains( Q const& key ) const + { + return find_at( head(), key, key_comparator()); + } + + /// Checks whether the list contains \p key using \p pred predicate for searching + /** + The function is an analog of contains( key ) but \p pred is used for key comparing. + \p Less functor has the interface like \p std::less. + \p pred must imply the same element order as the comparator used for building the list. + */ + template + bool contains( Q const& key, Less pred ) const + { + CDS_UNUSED( pred ); + return find_at( head(), key, typename maker::template less_wrapper::type()); + } + + /// Finds \p key and perform an action with it + /** + The function searches an item with key equal to \p key and calls the functor \p f for the item found. + The interface of \p Func functor is: + \code + struct functor { + void operator()( value_type& item, Q& key ); + }; + \endcode + where \p item is the item found, \p key is the find function argument. + + The functor may change non-key fields of \p item. Note that the function is only guarantee + that \p item cannot be deleted during functor is executing. + The function does not serialize simultaneous access to the list \p item. If such access is + possible you must provide your own synchronization schema to exclude unsafe item modifications. + + The function returns \p true if \p key is found, \p false otherwise. + */ + template + bool find( Q& key, Func f ) const + { + return find_at( head(), key, key_comparator(), f ); + } + //@cond + template + bool find( Q const& key, Func f ) const + { + return find_at( head(), key, key_comparator(), f ); + } + //@endcond + + /// Finds \p key in the list and returns iterator pointed to the item found + /** + If \p key is not found the function returns \p end(). + */ + template + iterator find( Q const& key ) const + { + return find_iterator_at( head(), key, key_comparator()); + } + + /// Finds \p key using \p pred predicate for searching + /** + The function is an analog of \p find(Q&, Func) but \p pred is used for key comparing. + \p Less functor has the interface like \p std::less. + \p pred must imply the same element order as the comparator used for building the list. + */ + template + bool find_with( Q& key, Less pred, Func f ) const + { + CDS_UNUSED( pred ); + return find_at( head(), key, typename maker::template less_wrapper::type(), f ); + } + //@cond + template + bool find_with( Q const& key, Less pred, Func f ) const + { + CDS_UNUSED( pred ); + return find_at( head(), key, typename maker::template less_wrapper::type(), f ); + } + //@endcond + + /// Finds \p key in the list using \p pred predicate for searching and returns iterator pointed to the item found + /** + The function is an analog of \p find(Q&) but \p pred is used for key comparing. + \p Less functor has the interface like \p std::less. + \p pred must imply the same element order as the comparator used for building the list. + + If \p key is not found the function returns \p end(). + */ + template + iterator find_with( Q const& key, Less pred ) const + { + CDS_UNUSED( pred ); + return find_iterator_at( head(), key, cds::opt::details::make_comparator_from_less()); + } + + /// Finds \p key and return the item found + /** \anchor cds_nonintrusive_MichaelList_hp_get + The function searches the item with key equal to \p key + and returns it as \p guarded_ptr. + If \p key is not found the function returns an empty guarded pointer. + + @note Each \p guarded_ptr object uses one GC's guard which can be limited resource. + + Usage: + \code + typedef cds::container::MichaelList< cds::gc::HP, foo, my_traits > ord_list; + ord_list theList; + // ... + { + ord_list::guarded_ptr gp(theList.get( 5 )); + if ( gp ) { + // Deal with gp + //... + } + // Destructor of guarded_ptr releases internal HP guard and frees the item + } + \endcode + + Note the compare functor specified for class \p Traits template parameter + should accept a parameter of type \p Q that can be not the same as \p value_type. + */ + template + guarded_ptr get( Q const& key ) const + { + return get_at( head(), key, key_comparator()); + } + + /// Finds \p key and return the item found + /** + The function is an analog of \ref cds_nonintrusive_MichaelList_hp_get "get( Q const&)" + but \p pred is used for comparing the keys. + + \p Less functor has the semantics like \p std::less but should accept arguments of type \p value_type and \p Q + in any order. + \p pred must imply the same element order as the comparator used for building the list. + */ + template + guarded_ptr get_with( Q const& key, Less pred ) const + { + CDS_UNUSED( pred ); + return get_at( head(), key, typename maker::template less_wrapper::type()); + } + + /// Checks if the list is empty + /** + Emptiness is checked by item counting: if item count is zero then the set is empty. + Thus, if you need to use \p %empty() you should provide appropriate (non-empty) \p iterable_list::traits::item_counter + feature. + */ + bool empty() const + { + return base_class::empty(); + } + + /// Returns list's item count + /** + The value returned depends on item counter provided by \p Traits. For \p atomicity::empty_item_counter, + this function always returns 0. + */ + size_t size() const + { + return base_class::size(); + } + + /// Clears the list (thread safe, not atomic) + void clear() + { + base_class::clear(); + } + + /// Returns const reference to internal statistics + stat const& statistics() const + { + return base_class::statistics(); + } + + protected: + //@cond + template + static value_type* alloc_data( Args&&... args ) + { + return cxx_data_allocator().MoveNew( std::forward(args)... ); + } + + static void free_data( value_type* pData ) + { + cxx_data_allocator().Delete( pData ); + } + + typedef std::unique_ptr< value_type, data_disposer > scoped_data_ptr; + + using base_class::head; + //@endcond + + protected: + //@cond + bool insert_node( value_type* pData ) + { + return insert_node_at( head(), pData ); + } + + bool insert_node_at( head_type* pHead, value_type* pData ) + { + assert( pData ); + scoped_data_ptr p( pData ); + if ( base_class::insert_at( pHead, *pData )) { + p.release(); + return true; + } + + return false; + } + + template + bool insert_at( head_type* pHead, Q&& val ) + { + return insert_node_at( pHead, alloc_data( std::forward( val ))); + } + + template + bool insert_at( head_type* pHead, Q&& key, Func f ) + { + scoped_data_ptr pNode( alloc_data( std::forward( key ))); + + if ( base_class::insert_at( pHead, *pNode, f )) { + pNode.release(); + return true; + } + return false; + } + + template + bool emplace_at( head_type* pHead, Args&&... args ) + { + return insert_node_at( pHead, alloc_data( std::forward(args)... )); + } + + template + std::pair update_at( head_type* pHead, Q&& key, Func f, bool bAllowInsert ) + { + scoped_data_ptr pData( alloc_data( std::forward( key ))); + + std::pair ret = base_class::update_at( pHead, *pData, f, bAllowInsert ); + if ( ret.first ) + pData.release(); + + return ret; + } + + template + bool erase_at( head_type* pHead, Q const& key, Compare cmp, Func f ) + { + return base_class::erase_at( pHead, key, cmp, f ); + } + + template + guarded_ptr extract_at( head_type* pHead, Q const& key, Compare cmp ) + { + return base_class::extract_at( pHead, key, cmp ); + } + + template + bool find_at( head_type const* pHead, Q const& key, Compare cmp ) const + { + return base_class::find_at( pHead, key, cmp ); + } + + template + bool find_at( head_type const* pHead, Q& val, Compare cmp, Func f ) const + { + return base_class::find_at( pHead, val, cmp, f ); + } + + template + iterator find_iterator_at( head_type const* pHead, Q const& key, Compare cmp ) const + { + return iterator( base_class::find_iterator_at( pHead, key, cmp )); + } + + template + guarded_ptr get_at( head_type const* pHead, Q const& key, Compare cmp ) const + { + return base_class::get_at( pHead, key, cmp ); + } + //@endcond + }; + +}} // namespace cds::container + +#endif // #ifndef CDSLIB_CONTAINER_IMPL_ITERABLE_LIST_H diff --git a/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/container/impl/lazy_kvlist.h b/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/container/impl/lazy_kvlist.h new file mode 100644 index 0000000..49526ca --- /dev/null +++ b/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/container/impl/lazy_kvlist.h @@ -0,0 +1,885 @@ +/* + This file is a part of libcds - Concurrent Data Structures library + + (C) Copyright Maxim Khizhinsky (libcds.dev@gmail.com) 2006-2017 + + Source code repo: http://github.com/khizmax/libcds/ + Download: http://sourceforge.net/projects/libcds/files/ + + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. + + * Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ + +#ifndef CDSLIB_CONTAINER_IMPL_LAZY_KVLIST_H +#define CDSLIB_CONTAINER_IMPL_LAZY_KVLIST_H + +#include +#include + +namespace cds { namespace container { + + /// Lazy ordered list (key-value pair) + /** @ingroup cds_nonintrusive_list + \anchor cds_nonintrusive_LazyKVList_gc + + This is key-value variation of non-intrusive LazyList. + Like standard container, this implementation split a value stored into two part - + constant key and alterable value. + + Usually, ordered single-linked list is used as a building block for the hash table implementation. + The complexity of searching is O(N). + + Template arguments: + - \p GC - garbage collector + - \p Key - key type of an item to be stored in the list. It should be copy-constructible + - \p Value - value type to be stored in the list + - \p Traits - type traits, default is \p lazy_list::traits + It is possible to declare option-based list with cds::container::lazy_list::make_traits metafunction istead of \p Traits template + argument. For example, the following traits-based declaration of \p gc::HP lazy list + \code + #include + // Declare comparator for the item + struct my_compare { + int operator ()( int i1, int i2 ) + { + return i1 - i2; + } + }; + + // Declare traits + struct my_traits: public cds::container::lazy_list::traits + { + typedef my_compare compare; + }; + + // Declare traits-based list + typedef cds::container::LazyKVList< cds::gc::HP, int, int, my_traits > traits_based_list; + \endcode + is equal to the following option-based list + \code + #include + + // my_compare is the same + + // Declare option-based list + typedef cds::container::LazyKVList< cds::gc::HP, int, int, + typename cds::container::lazy_list::make_traits< + cds::container::opt::compare< my_compare > // item comparator option + >::type + > option_based_list; + \endcode + + \par Usage + There are different specializations of this template for each garbage collecting schema used. + You should include appropriate .h-file depending on GC you are using: + - for \p gc::HP: + - for \p gc::DHP: + - for \ref cds_urcu_desc "RCU": + - for \p gc::nogc: + */ + template < + typename GC, + typename Key, + typename Value, +#ifdef CDS_DOXYGEN_INVOKED + typename Traits = lazy_list::traits +#else + typename Traits +#endif + > + class LazyKVList: +#ifdef CDS_DOXYGEN_INVOKED + protected intrusive::LazyList< GC, implementation_defined, Traits > +#else + protected details::make_lazy_kvlist< GC, Key, Value, Traits >::type +#endif + { + //@cond + typedef details::make_lazy_kvlist< GC, Key, Value, Traits > maker; + typedef typename maker::type base_class; + //@endcond + + public: + typedef GC gc; ///< Garbage collector + typedef Traits traits; ///< Traits +#ifdef CDS_DOXYGEN_INVOKED + typedef Key key_type ; ///< Key type + typedef Value mapped_type ; ///< Type of value stored in the list + typedef std::pair value_type ; ///< key/value pair stored in the list +#else + typedef typename maker::key_type key_type; + typedef typename maker::mapped_type mapped_type; + typedef typename maker::value_type value_type; +#endif + typedef typename base_class::back_off back_off; ///< Back-off strategy + typedef typename maker::allocator_type allocator_type; ///< Allocator type used for allocate/deallocate the nodes + typedef typename base_class::item_counter item_counter; ///< Item counter type + typedef typename maker::key_comparator key_comparator; ///< key comparing functor + typedef typename base_class::memory_model memory_model; ///< Memory ordering. See \p cds::opt::memory_model + typedef typename base_class::stat stat; ///< Internal statistics + + static constexpr const size_t c_nHazardPtrCount = base_class::c_nHazardPtrCount; ///< Count of hazard pointer required for the algorithm + + //@cond + // Rebind traits (split-list support) + template + struct rebind_traits { + typedef LazyKVList< + gc + , key_type, mapped_type + , typename cds::opt::make_options< traits, Options...>::type + > type; + }; + + // Stat selector + template + using select_stat_wrapper = typename base_class::template select_stat_wrapper< Stat >; + //@endcond + + protected: + //@cond + typedef typename base_class::value_type node_type; + typedef typename maker::cxx_allocator cxx_allocator; + typedef typename maker::node_deallocator node_deallocator; + typedef typename maker::intrusive_traits::compare intrusive_key_comparator; + + typedef typename base_class::node_type head_type; + //@endcond + + public: + /// Guarded pointer + typedef typename gc::template guarded_ptr< node_type, value_type, details::guarded_ptr_cast_map > guarded_ptr; + + protected: + //@cond + template + static node_type * alloc_node(const K& key) + { + return cxx_allocator().New( key ); + } + + template + static node_type * alloc_node( const K& key, const V& val ) + { + return cxx_allocator().New( key, val ); + } + + template + static node_type * alloc_node( Args&&... args ) + { + return cxx_allocator().MoveNew( std::forward(args)... ); + } + + static void free_node( node_type * pNode ) + { + cxx_allocator().Delete( pNode ); + } + + struct node_disposer { + void operator()( node_type * pNode ) + { + free_node( pNode ); + } + }; + typedef std::unique_ptr< node_type, node_disposer > scoped_node_ptr; + + head_type& head() + { + return base_class::m_Head; + } + + head_type const& head() const + { + return base_class::m_Head; + } + + head_type& tail() + { + return base_class::m_Tail; + } + + head_type const& tail() const + { + return base_class::m_Tail; + } + + //@endcond + + protected: + //@cond + template + class iterator_type: protected base_class::template iterator_type + { + typedef typename base_class::template iterator_type iterator_base; + + iterator_type( head_type const& pNode ) + : iterator_base( const_cast(&pNode)) + {} + iterator_type( head_type const * pNode ) + : iterator_base( const_cast(pNode)) + {} + + friend class LazyKVList; + + public: + typedef typename cds::details::make_const_type::reference value_ref; + typedef typename cds::details::make_const_type::pointer value_ptr; + + typedef typename cds::details::make_const_type::reference pair_ref; + typedef typename cds::details::make_const_type::pointer pair_ptr; + + iterator_type() + {} + + iterator_type( iterator_type const& src ) + : iterator_base( src ) + {} + + key_type const& key() const + { + typename iterator_base::value_ptr p = iterator_base::operator ->(); + assert( p != nullptr ); + return p->m_Data.first; + } + + value_ref val() const + { + typename iterator_base::value_ptr p = iterator_base::operator ->(); + assert( p != nullptr ); + return p->m_Data.second; + } + + pair_ptr operator ->() const + { + typename iterator_base::value_ptr p = iterator_base::operator ->(); + return p ? &(p->m_Data) : nullptr; + } + + pair_ref operator *() const + { + typename iterator_base::value_ref p = iterator_base::operator *(); + return p.m_Data; + } + + /// Pre-increment + iterator_type& operator ++() + { + iterator_base::operator ++(); + return *this; + } + + template + bool operator ==(iterator_type const& i ) const + { + return iterator_base::operator ==(i); + } + template + bool operator !=(iterator_type const& i ) const + { + return iterator_base::operator !=(i); + } + }; + //@endcond + + public: + /// Forward iterator + /** + The forward iterator for lazy list has some features: + - it has no post-increment operator + - to protect the value, the iterator contains a GC-specific guard + another guard is required locally for increment operator. + For some GC (\p gc::HP), a guard is limited resource per thread, so an exception (or assertion) "no free guard" + may be thrown if a limit of guard count per thread is exceeded. + - The iterator cannot be moved across thread boundary since it contains GC's guard that is thread-private GC data. + - Iterator ensures thread-safety even if you delete the item that iterator points to. However, in case of concurrent + deleting operations it is no guarantee that you iterate all item in the list. + + @warning Use this iterator on the concurrent container for debugging purpose only. + + The iterator interface to access item data: + - operator -> - returns a pointer to \ref value_type for iterator + - operator * - returns a reference (a const reference for \p const_iterator) to \ref value_type for iterator + - const key_type& key() - returns a key reference for iterator + - mapped_type& val() - retuns a value reference for iterator (const reference for \p const_iterator) + + For both functions the iterator should not be equal to end() + */ + typedef iterator_type iterator; + + /// Const forward iterator + /** + For iterator's features and requirements see \ref iterator + */ + typedef iterator_type const_iterator; + + ///@name Forward iterators (only for debugging purpose) + //@{ + /// Returns a forward iterator addressing the first element in a list + /** + For empty list \code begin() == end() \endcode + */ + iterator begin() + { + iterator it( head()); + ++it ; // skip dummy head + return it; + } + + /// Returns an iterator that addresses the location succeeding the last element in a list + /** + Do not use the value returned by end function to access any item. + Internally, end returning value equals to \p nullptr. + + The returned value can be used only to control reaching the end of the list. + For empty list \code begin() == end() \endcode + */ + iterator end() + { + return iterator( tail()); + } + + /// Returns a forward const iterator addressing the first element in a list + const_iterator begin() const + { + const_iterator it( head()); + ++it; // skip dummy head + return it; + } + + /// Returns a forward const iterator addressing the first element in a list + const_iterator cbegin() const + { + const_iterator it( head()); + ++it; // skip dummy head + return it; + } + + /// Returns an const iterator that addresses the location succeeding the last element in a list + const_iterator end() const + { + return const_iterator( tail()); + } + + /// Returns an const iterator that addresses the location succeeding the last element in a list + const_iterator cend() const + { + return const_iterator( tail()); + } + //@} + + public: + /// Default constructor + LazyKVList() + {} + + //@cond + template >::value >> + explicit LazyKVList( Stat& st ) + : base_class( st ) + {} + //@endcond + + /// Destructor clears the list + ~LazyKVList() + { + clear(); + } + + /// Inserts new node with key and default value + /** + The function creates a node with \p key and default value, and then inserts the node created into the list. + + Preconditions: + - The \ref key_type should be constructible from value of type \p K. + In trivial case, \p K is equal to \ref key_type. + - The \ref mapped_type should be default-constructible. + + Returns \p true if inserting successful, \p false otherwise. + */ + template + bool insert( K&& key ) + { + return insert_at( head(), std::forward( key )); + } + + /// Inserts new node with a key and a value + /** + The function creates a node with \p key and value \p val, and then inserts the node created into the list. + + Preconditions: + - The \ref key_type should be constructible from \p key of type \p K. + - The \ref mapped_type should be constructible from \p val of type \p V. + + Returns \p true if inserting successful, \p false otherwise. + */ + template + bool insert( K&& key, V&& val ) + { + // We cannot use insert with functor here + // because we cannot lock inserted node for updating + // Therefore, we use separate function + return insert_at( head(), std::forward( key ), std::forward( val )); + } + + /// Inserts new node and initializes it by a functor + /** + This function inserts new node with key \p key and if inserting is successful then it calls + \p func functor with signature + \code + struct functor { + void operator()( value_type& item ); + }; + \endcode + + The argument \p item of user-defined functor \p func is the reference + to the list's item inserted. item.second is a reference to item's value that may be changed. + The user-defined functor is called only if inserting is successful. + + The \p key_type should be constructible from value of type \p K. + + The function allows to split creating of new item into two part: + - create item from \p key; + - insert new item into the list; + - if inserting is successful, initialize the value of item by calling \p func functor + + This can be useful if complete initialization of object of \p mapped_type is heavyweight and + it is preferable that the initialization should be completed only if inserting is successful. + */ + template + bool insert_with( K&& key, Func func ) + { + return insert_with_at( head(), std::forward( key ), func ); + } + + /// Inserts data of type \ref mapped_type constructed with std::forward(args)... + /** + Returns \p true if inserting successful, \p false otherwise. + */ + template + bool emplace( Args&&... args ) + { + return emplace_at( head(), std::forward(args)... ); + } + + /// Updates data by \p key + /** + The operation performs inserting or replacing the element with lock-free manner. + + If the \p key not found in the list, then the new item created from \p key + will be inserted iff \p bAllowInsert is \p true. + (note that in this case the \ref key_type should be constructible from type \p K). + Otherwise, if \p key is found, the functor \p func is called with item found. + + The functor \p Func signature is: + \code + struct my_functor { + void operator()( bool bNew, value_type& item ); + }; + \endcode + with arguments: + - \p bNew - \p true if the item has been inserted, \p false otherwise + - \p item - the item found or inserted + + The functor may change any fields of the \p item.second of \p mapped_type; + during \p func call \p item is locked so it is safe to modify the item in + multi-threaded environment. + + Returns std::pair where \p first is true if operation is successful, + \p second is true if new item has been added or \p false if the item with \p key + already exists. + */ + template + std::pair update( K&& key, Func f, bool bAllowInsert = true ) + { + return update_at( head(), std::forward( key ), f, bAllowInsert ); + } + //@cond + template + CDS_DEPRECATED("ensure() is deprecated, use update()") + std::pair ensure( const K& key, Func f ) + { + return update( key, f, true ); + } + //@endcond + + /// Deletes \p key from the list + /** \anchor cds_nonintrusive_LazyKVList_hp_erase_val + + Returns \p true if \p key is found and has been deleted, \p false otherwise + */ + template + bool erase( K const& key ) + { + return erase_at( head(), key, intrusive_key_comparator()); + } + + /// Deletes the item from the list using \p pred predicate for searching + /** + The function is an analog of \ref cds_nonintrusive_LazyKVList_hp_erase_val "erase(K const&)" + but \p pred is used for key comparing. + \p Less functor has the interface like \p std::less. + \p pred must imply the same element order as the comparator used for building the list. + */ + template + bool erase_with( K const& key, Less pred ) + { + CDS_UNUSED( pred ); + return erase_at( head(), key, typename maker::template less_wrapper::type()); + } + + /// Deletes \p key from the list + /** \anchor cds_nonintrusive_LazyKVList_hp_erase_func + The function searches an item with key \p key, calls \p f functor with item found + and deletes it. If \p key is not found, the functor is not called. + + The functor \p Func interface: + \code + struct extractor { + void operator()(value_type& val) { ... } + }; + \endcode + + Returns \p true if key is found and deleted, \p false otherwise + */ + template + bool erase( K const& key, Func f ) + { + return erase_at( head(), key, intrusive_key_comparator(), f ); + } + + /// Deletes the item from the list using \p pred predicate for searching + /** + The function is an analog of \ref cds_nonintrusive_LazyKVList_hp_erase_func "erase(K const&, Func)" + but \p pred is used for key comparing. + \p Less functor has the interface like \p std::less. + \p pred must imply the same element order as the comparator used for building the list. + */ + template + bool erase_with( K const& key, Less pred, Func f ) + { + CDS_UNUSED( pred ); + return erase_at( head(), key, typename maker::template less_wrapper::type(), f ); + } + + /// Extracts the item from the list with specified \p key + /** \anchor cds_nonintrusive_LazyKVList_hp_extract + The function searches an item with key equal to \p key, + unlinks it from the list, and returns it as \p guarded_ptr. + If \p key is not found the function returns an empty guarded pointer. + + Note the compare functor should accept a parameter of type \p K that can be not the same as \p key_type. + + @note Each \p guarded_ptr object uses the GC's guard that can be limited resource. + + Usage: + \code + typedef cds::container::LazyKVList< cds::gc::HP, int, foo, my_traits > ord_list; + ord_list theList; + // ... + { + ord_list::guarded_ptr gp( theList.extract( 5 )); + if ( gp ) { + // Deal with gp + // ... + } + // Destructor of gp releases internal HP guard and frees the item + } + \endcode + */ + template + guarded_ptr extract( K const& key ) + { + return extract_at( head(), key, intrusive_key_comparator()); + } + + /// Extracts the item from the list with comparing functor \p pred + /** + The function is an analog of \ref cds_nonintrusive_LazyKVList_hp_extract "extract(K const&)" + but \p pred predicate is used for key comparing. + + \p Less functor has the semantics like \p std::less but should take arguments of type \ref key_type and \p K + in any order. + \p pred must imply the same element order as the comparator used for building the list. + */ + template + guarded_ptr extract_with( K const& key, Less pred ) + { + CDS_UNUSED( pred ); + return extract_at( head(), key, typename maker::template less_wrapper::type()); + } + + /// Checks whether the list contains \p key + /** + The function searches the item with key equal to \p key + and returns \p true if it is found, and \p false otherwise. + */ + template + bool contains( Q const& key ) + { + return find_at( head(), key, intrusive_key_comparator()); + } + //@cond + template + CDS_DEPRECATED("deprecated, use contains()") + bool find( Q const& key ) + { + return contains( key ); + } + //@endcond + + /// Checks whether the map contains \p key using \p pred predicate for searching + /** + The function is an analog of contains( key ) but \p pred is used for key comparing. + \p Less functor has the interface like \p std::less. + \p Less must imply the same element order as the comparator used for building the list. + */ + template + bool contains( Q const& key, Less pred ) + { + CDS_UNUSED( pred ); + return find_at( head(), key, typename maker::template less_wrapper::type()); + } + //@cond + template + CDS_DEPRECATED("deprecated, use contains()") + bool find_with( Q const& key, Less pred ) + { + return contains( key, pred ); + } + //@endcond + + /// Finds the key \p key and performs an action with it + /** \anchor cds_nonintrusive_LazyKVList_hp_find_func + The function searches an item with key equal to \p key and calls the functor \p f for the item found. + The interface of \p Func functor is: + \code + struct functor { + void operator()( value_type& item ); + }; + \endcode + where \p item is the item found. + + The functor may change item.second that is reference to value of node. + Note that the function is only guarantee that \p item cannot be deleted during functor is executing. + The function does not serialize simultaneous access to the list \p item. If such access is + possible you must provide your own synchronization schema to exclude unsafe item modifications. + + The function returns \p true if \p key is found, \p false otherwise. + */ + template + bool find( Q const& key, Func f ) + { + return find_at( head(), key, intrusive_key_comparator(), f ); + } + + /// Finds the key \p val using \p pred predicate for searching + /** + The function is an analog of \ref cds_nonintrusive_LazyKVList_hp_find_func "find(Q&, Func)" + but \p pred is used for key comparing. + \p Less functor has the interface like \p std::less. + \p pred must imply the same element order as the comparator used for building the list. + */ + template + bool find_with( Q const& key, Less pred, Func f ) + { + CDS_UNUSED( pred ); + return find_at( head(), key, typename maker::template less_wrapper::type(), f ); + } + + /// Finds \p key and return the item found + /** \anchor cds_nonintrusive_LazyKVList_hp_get + The function searches the item with key equal to \p key + and returns the item found as a guarded pointer. + If \p key is not found the functions returns an empty \p guarded_ptr. + + @note Each \p guarded_ptr object uses one GC's guard which can be limited resource. + + Usage: + \code + typedef cds::container::LazyKVList< cds::gc::HP, int, foo, my_traits > ord_list; + ord_list theList; + // ... + { + ord_list::guarded_ptr gp( theList.get( 5 )); + if ( gp ) { + // Deal with gp + //... + } + // Destructor of guarded_ptr releases internal HP guard and frees the item + } + \endcode + + Note the compare functor specified for class \p Traits template parameter + should accept a parameter of type \p K that can be not the same as \p key_type. + */ + template + guarded_ptr get( K const& key ) + { + return get_at( head(), key, intrusive_key_comparator()); + } + + /// Finds the key \p val and return the item found + /** + The function is an analog of \ref cds_nonintrusive_LazyKVList_hp_get "get(K const&)" + but \p pred is used for comparing the keys. + + \p Less functor has the semantics like \p std::less but should take arguments of type \ref key_type and \p K + in any order. + \p pred must imply the same element order as the comparator used for building the list. + */ + template + guarded_ptr get_with( K const& key, Less pred ) + { + CDS_UNUSED( pred ); + return get_at( head(), key, typename maker::template less_wrapper::type()); + } + + /// Checks if the list is empty + bool empty() const + { + return base_class::empty(); + } + + /// Returns list's item count + /** + The value returned depends on opt::item_counter option. For atomicity::empty_item_counter, + this function always returns 0. + + @note Even if you use real item counter and it returns 0, this fact is not mean that the list + is empty. To check list emptyness use \ref empty() method. + */ + size_t size() const + { + return base_class::size(); + } + + /// Returns const reference to internal statistics + stat const& statistics() const + { + return base_class::statistics(); + } + + /// Clears the list + void clear() + { + base_class::clear(); + } + + protected: + //@cond + bool insert_node_at( head_type& refHead, node_type * pNode ) + { + assert( pNode != nullptr ); + scoped_node_ptr p( pNode ); + + if ( base_class::insert_at( &refHead, *p )) { + p.release(); + return true; + } + + return false; + } + + template + bool insert_at( head_type& refHead, K&& key ) + { + return insert_node_at( refHead, alloc_node( std::forward( key ))); + } + + template + bool insert_at( head_type& refHead, K&& key, V&& val ) + { + return insert_node_at( refHead, alloc_node( std::forward( key ), std::forward( val ))); + } + + template + bool insert_with_at( head_type& refHead, K&& key, Func f ) + { + scoped_node_ptr pNode( alloc_node( std::forward( key ))); + + if ( base_class::insert_at( &refHead, *pNode, [&f](node_type& node){ f( node.m_Data ); } )) { + pNode.release(); + return true; + } + return false; + } + + template + bool emplace_at( head_type& refHead, Args&&... args ) + { + return insert_node_at( refHead, alloc_node( std::forward(args)... )); + } + + template + bool erase_at( head_type& refHead, K const& key, Compare cmp ) + { + return base_class::erase_at( &refHead, key, cmp ); + } + + template + bool erase_at( head_type& refHead, K const& key, Compare cmp, Func f ) + { + return base_class::erase_at( &refHead, key, cmp, [&f](node_type const & node){f( const_cast(node.m_Data)); }); + } + + template + guarded_ptr extract_at( head_type& refHead, K const& key, Compare cmp ) + { + return base_class::extract_at( &refHead, key, cmp ); + } + + template + std::pair update_at( head_type& refHead, K&& key, Func f, bool bAllowInsert ) + { + scoped_node_ptr pNode( alloc_node( std::forward( key ))); + + std::pair ret = base_class::update_at( &refHead, *pNode, + [&f]( bool bNew, node_type& node, node_type& ){ f( bNew, node.m_Data ); }, + bAllowInsert ); + if ( ret.first && ret.second ) + pNode.release(); + + return ret; + } + + template + bool find_at( head_type& refHead, K const& key, Compare cmp ) + { + return base_class::find_at( &refHead, key, cmp ); + } + + template + bool find_at( head_type& refHead, K& key, Compare cmp, Func f ) + { + return base_class::find_at( &refHead, key, cmp, [&f]( node_type& node, K& ){ f( node.m_Data ); }); + } + + template + guarded_ptr get_at( head_type& refHead, K const& key, Compare cmp ) + { + return base_class::get_at( &refHead, key, cmp ); + } + + //@endcond + }; + +}} // namespace cds::container + +#endif // #ifndef CDSLIB_CONTAINER_IMPL_LAZY_KVLIST_H diff --git a/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/container/impl/lazy_list.h b/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/container/impl/lazy_list.h new file mode 100644 index 0000000..d5a77c8 --- /dev/null +++ b/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/container/impl/lazy_list.h @@ -0,0 +1,868 @@ +/* + This file is a part of libcds - Concurrent Data Structures library + + (C) Copyright Maxim Khizhinsky (libcds.dev@gmail.com) 2006-2017 + + Source code repo: http://github.com/khizmax/libcds/ + Download: http://sourceforge.net/projects/libcds/files/ + + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. + + * Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ + +#ifndef CDSLIB_CONTAINER_IMPL_LAZY_LIST_H +#define CDSLIB_CONTAINER_IMPL_LAZY_LIST_H + +#include +#include + +namespace cds { namespace container { + + /// Lazy ordered list + /** @ingroup cds_nonintrusive_list + @anchor cds_nonintrusive_LazyList_gc + + Usually, ordered single-linked list is used as a building block for the hash table implementation. + The complexity of searching is O(N). + + Source: + - [2005] Steve Heller, Maurice Herlihy, Victor Luchangco, Mark Moir, William N. Scherer III, and Nir Shavit + "A Lazy Concurrent List-Based Set Algorithm" + + The lazy list is based on an optimistic locking scheme for inserts and removes, + eliminating the need to use the equivalent of an atomically markable + reference. It also has a novel wait-free membership \p find() operation + that does not need to perform cleanup operations and is more efficient. + + It is non-intrusive version of \p cds::intrusive::LazyList class. + + Template arguments: + - \p GC - garbage collector: \p gc::HP, \p gp::DHP + - \p T - type to be stored in the list. + - \p Traits - type traits, default is \p lazy_list::traits. + It is possible to declare option-based list with \p lazy_list::make_traits metafunction istead of \p Traits template + argument. For example, the following traits-based declaration of \p gc::HP lazy list + \code + #include + // Declare comparator for the item + struct my_compare { + int operator ()( int i1, int i2 ) + { + return i1 - i2; + } + }; + + // Declare traits + struct my_traits: public cds::container::lazy_list::traits + { + typedef my_compare compare; + }; + + // Declare traits-based list + typedef cds::container::LazyList< cds::gc::HP, int, my_traits > traits_based_list; + \endcode + is equal to the following option-based list: + \code + #include + + // my_compare is the same + + // Declare option-based list + typedef cds::container::LazyList< cds::gc::HP, int, + typename cds::container::lazy_list::make_traits< + cds::container::opt::compare< my_compare > // item comparator option + >::type + > option_based_list; + \endcode + + Unlike standard container, this implementation does not divide type \p T into key and value part and + may be used as main building block for hash set algorithms. + + The key is a function (or a part) of type \p T, and the comparing function is specified by \p Traits::compare functor + or \p Traits::less predicate. + + \p LazyKVList is a key-value version of lazy non-intrusive list that is closer to the C++ std library approach. + + \par Usage + There are different specializations of this template for each garbage collecting schema used. + You should include appropriate .h-file depending on GC you are using: + - for gc::HP: + - for gc::DHP: + - for \ref cds_urcu_desc "RCU": + - for gc::nogc: + */ + template < + typename GC, + typename T, +#ifdef CDS_DOXYGEN_INVOKED + typename Traits = lazy_list::traits +#else + typename Traits +#endif + > + class LazyList: +#ifdef CDS_DOXYGEN_INVOKED + protected intrusive::LazyList< GC, T, Traits > +#else + protected details::make_lazy_list< GC, T, Traits >::type +#endif + { + //@cond + typedef details::make_lazy_list< GC, T, Traits > maker; + typedef typename maker::type base_class; + //@endcond + + public: + typedef GC gc; ///< Garbage collector used + typedef T value_type; ///< Type of value stored in the list + typedef Traits traits; ///< List traits + + typedef typename base_class::back_off back_off; ///< Back-off strategy used + typedef typename maker::allocator_type allocator_type; ///< Allocator type used for allocate/deallocate the nodes + typedef typename base_class::item_counter item_counter; ///< Item counting policy used + typedef typename maker::key_comparator key_comparator; ///< key comparison functor + typedef typename base_class::memory_model memory_model; ///< Memory ordering. See cds::opt::memory_model option + typedef typename base_class::stat stat; ///< Internal statistics + + static constexpr const size_t c_nHazardPtrCount = base_class::c_nHazardPtrCount; ///< Count of hazard pointer required for the algorithm + + //@cond + // Rebind traits (split-list support) + template + struct rebind_traits { + typedef LazyList< + gc + , value_type + , typename cds::opt::make_options< traits, Options...>::type + > type; + }; + + // Stat selector + template + using select_stat_wrapper = typename base_class::template select_stat_wrapper< Stat >; + //@endcond + + protected: + //@cond + typedef typename base_class::value_type node_type; + typedef typename maker::cxx_allocator cxx_allocator; + typedef typename maker::node_deallocator node_deallocator; + typedef typename maker::intrusive_traits::compare intrusive_key_comparator; + + typedef typename base_class::node_type head_type; + + struct node_disposer { + void operator()( node_type * pNode ) + { + free_node( pNode ); + } + }; + typedef std::unique_ptr< node_type, node_disposer > scoped_node_ptr; + + //@endcond + + public: + /// Guarded pointer + typedef typename gc::template guarded_ptr< node_type, value_type, details::guarded_ptr_cast_set > guarded_ptr; + + protected: + //@cond + template + class iterator_type: protected base_class::template iterator_type + { + typedef typename base_class::template iterator_type iterator_base; + + iterator_type( head_type const& pNode ) + : iterator_base( const_cast( &pNode )) + {} + + iterator_type( head_type const * pNode ) + : iterator_base( const_cast( pNode )) + {} + + friend class LazyList; + + public: + typedef typename cds::details::make_const_type::pointer value_ptr; + typedef typename cds::details::make_const_type::reference value_ref; + + iterator_type() + {} + + iterator_type( iterator_type const& src ) + : iterator_base( src ) + {} + + value_ptr operator ->() const + { + typename iterator_base::value_ptr p = iterator_base::operator ->(); + return p ? &(p->m_Value) : nullptr; + } + + value_ref operator *() const + { + return (iterator_base::operator *()).m_Value; + } + + /// Pre-increment + iterator_type& operator ++() + { + iterator_base::operator ++(); + return *this; + } + + template + bool operator ==(iterator_type const& i ) const + { + return iterator_base::operator ==(i); + } + template + bool operator !=(iterator_type const& i ) const + { + return iterator_base::operator !=(i); + } + }; + //@endcond + + public: + ///@name Forward iterators (only for debugging purpose) + //@{ + /// Forward iterator + /** + The forward iterator for lazy list has some features: + - it has no post-increment operator + - to protect the value, the iterator contains a GC-specific guard + another guard is required locally for increment operator. + For some GC (\p gc::HP), a guard is limited resource per thread, so an exception (or assertion) "no free guard" + may be thrown if a limit of guard count per thread is exceeded. + - The iterator cannot be moved across thread boundary since it contains GC's guard that is thread-private GC data. + - Iterator ensures thread-safety even if you delete the item that iterator points to. However, in case of concurrent + deleting operations it is no guarantee that you iterate all item in the list. + Moreover, a crash is possible when you try to iterate the next element that has been deleted by concurrent thread. + + @warning Use this iterator on the concurrent container for debugging purpose only. + */ + typedef iterator_type iterator; + + /// Const forward iterator + /** + For iterator's features and requirements see \ref iterator + */ + typedef iterator_type const_iterator; + + /// Returns a forward iterator addressing the first element in a list + /** + For empty list \code begin() == end() \endcode + */ + iterator begin() + { + iterator it( head()); + ++it ; // skip dummy head node + return it; + } + + /// Returns an iterator that addresses the location succeeding the last element in a list + /** + Do not use the value returned by end function to access any item. + + The returned value can be used only to control reaching the end of the list. + For empty list \code begin() == end() \endcode + */ + iterator end() + { + return iterator( tail()); + } + + /// Returns a forward const iterator addressing the first element in a list + const_iterator begin() const + { + const_iterator it( head()); + ++it ; // skip dummy head node + return it; + } + + /// Returns a forward const iterator addressing the first element in a list + const_iterator cbegin() const + { + const_iterator it( head()); + ++it ; // skip dummy head node + return it; + } + + /// Returns an const iterator that addresses the location succeeding the last element in a list + const_iterator end() const + { + return const_iterator( tail()); + } + + /// Returns an const iterator that addresses the location succeeding the last element in a list + const_iterator cend() const + { + return const_iterator( tail()); + } + //@} + + public: + /// Default constructor + LazyList() + {} + + //@cond + template >::value >> + explicit LazyList( Stat& st ) + : base_class( st ) + {} + //@endcond + + /// Destructor clears the list + ~LazyList() + { + clear(); + } + + /// Inserts new node + /** + The function creates a node with copy of \p val value + and then inserts the node created into the list. + + The type \p Q should contain as minimum the complete key of the node. + The object of \ref value_type should be constructible from \p val of type \p Q. + In trivial case, \p Q is equal to \ref value_type. + + Returns \p true if inserting successful, \p false otherwise. + */ + template + bool insert( Q&& val ) + { + return insert_at( head(), std::forward( val )); + } + + /// Inserts new node + /** + This function inserts new node with default-constructed value and then it calls + \p func functor with signature + \code void func( value_type& item ) ;\endcode + + The argument \p item of user-defined functor \p func is the reference + to the list's item inserted. + When \p func is called it has exclusive access to the item. + The user-defined functor is called only if the inserting is success. + + The type \p Q should contain the complete key of the node. + The object of \p value_type should be constructible from \p key of type \p Q. + + The function allows to split creating of new item into two part: + - create item from \p key with initializing key-fields only; + - insert new item into the list; + - if inserting is successful, initialize non-key fields of item by calling \p func functor + + This can be useful if complete initialization of object of \p value_type is heavyweight and + it is preferable that the initialization should be completed only if inserting is successful. + */ + template + bool insert( Q&& key, Func func ) + { + return insert_at( head(), std::forward( key ), func ); + } + + /// Inserts data of type \p value_type constructed from \p args + /** + Returns \p true if inserting successful, \p false otherwise. + */ + template + bool emplace( Args&&... args ) + { + return emplace_at( head(), std::forward(args)... ); + } + + /// Updates data by \p key + /** + The operation performs inserting or replacing the element with lock-free manner. + + If the \p key not found in the list, then the new item created from \p key + will be inserted iff \p bAllowInsert is \p true. + Otherwise, if \p key is found, the functor \p func is called with item found. + + The functor \p Func signature is: + \code + struct my_functor { + void operator()( bool bNew, value_type& item, Q const& key ); + }; + \endcode + + with arguments: + - \p bNew - \p true if the item has been inserted, \p false otherwise + - \p item - item of the list + - \p key - argument \p key passed into the \p %update() function + + The functor may change non-key fields of the \p item; + during \p func call \p item is locked so it is safe to modify the item in + multi-threaded environment. + + Returns std::pair where \p first is true if operation is successful, + \p second is true if new item has been added or \p false if the item with \p key + already exists. + */ + template + std::pair update( Q const& key, Func func, bool bAllowInsert = true ) + { + return update_at( head(), key, func, bAllowInsert ); + } + //@cond + template + CDS_DEPRECATED("ensure() is deprecated, use update()") + std::pair ensure( Q const& key, Func f ) + { + return update( key, f, true ); + } + //@endcond + + /// Deletes \p key from the list + /** \anchor cds_nonintrusive_LazyList_hp_erase_val + Since the key of LazyList's item type \p T is not explicitly specified, + template parameter \p Q defines the key type searching in the list. + The list item comparator should be able to compare the type \p T of list item + and the type \p Q. + + Return \p true if key is found and deleted, \p false otherwise + */ + template + bool erase( Q const& key ) + { + return erase_at( head(), key, intrusive_key_comparator(), [](value_type const&){} ); + } + + /// Deletes the item from the list using \p pred predicate for searching + /** + The function is an analog of \ref cds_nonintrusive_LazyList_hp_erase_val "erase(Q const&)" + but \p pred is used for key comparing. + \p Less functor has the interface like \p std::less. + \p pred must imply the same element order as the comparator used for building the list. + */ + template + bool erase_with( Q const& key, Less pred ) + { + CDS_UNUSED( pred ); + return erase_at( head(), key, typename maker::template less_wrapper::type(), [](value_type const&){} ); + } + + /// Deletes \p key from the list + /** \anchor cds_nonintrusive_LazyList_hp_erase_func + The function searches an item with key \p key, calls \p f functor with item found + and deletes the item. If \p key is not found, the functor is not called. + + The functor \p Func interface: + \code + struct extractor { + void operator()(const value_type& val) { ... } + }; + \endcode + + Since the key of LazyList's item type \p T is not explicitly specified, + template parameter \p Q defines the key type searching in the list. + The list item comparator should be able to compare the type \p T of list item + and the type \p Q. + + Return \p true if key is found and deleted, \p false otherwise + + See also: \ref erase + */ + template + bool erase( Q const& key, Func f ) + { + return erase_at( head(), key, intrusive_key_comparator(), f ); + } + + /// Deletes the item from the list using \p pred predicate for searching + /** + The function is an analog of \ref cds_nonintrusive_LazyList_hp_erase_func "erase(Q const&, Func)" + but \p pred is used for key comparing. + \p Less functor has the interface like \p std::less. + \p pred must imply the same element order as the comparator used for building the list. + */ + template + bool erase_with( Q const& key, Less pred, Func f ) + { + CDS_UNUSED( pred ); + return erase_at( head(), key, typename maker::template less_wrapper::type(), f ); + } + + /// Extracts the item from the list with specified \p key + /** \anchor cds_nonintrusive_LazyList_hp_extract + The function searches an item with key equal to \p key, + unlinks it from the list, and returns it as \p guarded_ptr. + If \p key is not found the function returns an empty guarded pointer. + + Note the compare functor should accept a parameter of type \p Q that can be not the same as \p value_type. + + @note Each \p guarded_ptr object uses the GC's guard that can be limited resource. + + Usage: + \code + typedef cds::container::LazyList< cds::gc::HP, foo, my_traits > ord_list; + ord_list theList; + // ... + { + ord_list::guarded_ptr gp(theList.extract( 5 )); + if ( gp ) { + // Deal with gp + // ... + } + // Destructor of gp releases internal HP guard and frees the item + } + \endcode + */ + template + guarded_ptr extract( Q const& key ) + { + return extract_at( head(), key, intrusive_key_comparator()); + } + + /// Extracts the item from the list with comparing functor \p pred + /** + The function is an analog of \ref cds_nonintrusive_LazyList_hp_extract "extract(Q const&)" + but \p pred predicate is used for key comparing. + + \p Less functor has the semantics like \p std::less but should take arguments of type \ref value_type and \p Q + in any order. + \p pred must imply the same element order as the comparator used for building the list. + */ + template + guarded_ptr extract_with( Q const& key, Less pred ) + { + CDS_UNUSED( pred ); + return extract_at( head(), key, typename maker::template less_wrapper::type()); + } + + /// Checks whether the list contains \p key + /** + The function searches the item with key equal to \p key + and returns \p true if it is found, and \p false otherwise. + */ + template + bool contains( Q const& key ) + { + return find_at( head(), key, intrusive_key_comparator()); + } + //@cond + template + CDS_DEPRECATED("deprecated, use contains()") + bool find( Q const& key ) + { + return contains( key ); + } + //@endcond + + /// Checks whether the list contains \p key using \p pred predicate for searching + /** + The function is an analog of contains( key ) but \p pred is used for key comparing. + \p Less functor has the interface like \p std::less. + \p pred must imply the same element order as the comparator used for building the list. + */ + template + bool contains( Q const& key, Less pred ) + { + CDS_UNUSED( pred ); + return find_at( head(), key, typename maker::template less_wrapper::type()); + } + //@cond + template + CDS_DEPRECATED("deprecated, use contains()") + bool find_with( Q const& key, Less pred ) + { + return contains( key, pred ); + } + //@endcond + /// Finds the key \p key and performs an action with it + /** \anchor cds_nonintrusive_LazyList_hp_find_func + The function searches an item with key equal to \p key and calls the functor \p f for the item found. + The interface of \p Func functor is: + \code + struct functor { + void operator()( value_type& item, Q& key ); + }; + \endcode + where \p item is the item found, \p key is the find function argument. + + The functor may change non-key fields of \p item. Note that the function is only guarantee + that \p item cannot be deleted during functor is executing. + The function does not serialize simultaneous access to the list \p item. If such access is + possible you must provide your own synchronization schema to exclude unsafe item modifications. + + The \p key argument is non-const since it can be used as \p f functor destination i.e., the functor + may modify both arguments. + + The function returns \p true if \p key is found, \p false otherwise. + */ + template + bool find( Q& key, Func f ) + { + return find_at( head(), key, intrusive_key_comparator(), f ); + } + //@cond + template + bool find( Q const& key, Func f ) + { + return find_at( head(), key, intrusive_key_comparator(), f ); + } + //@endcond + + /// Finds the key \p key using \p pred predicate for searching + /** + The function is an analog of \ref cds_nonintrusive_LazyList_hp_find_func "find(Q&, Func)" + but \p pred is used for key comparing. + \p Less functor has the interface like \p std::less. + \p pred must imply the same element order as the comparator used for building the list. + */ + template + bool find_with( Q& key, Less pred, Func f ) + { + CDS_UNUSED( pred ); + return find_at( head(), key, typename maker::template less_wrapper::type(), f ); + } + //@cond + template + bool find_with( Q const& key, Less pred, Func f ) + { + CDS_UNUSED( pred ); + return find_at( head(), key, typename maker::template less_wrapper::type(), f ); + } + //@endcond + + /// Finds the key \p key and return the item found + /** \anchor cds_nonintrusive_LazyList_hp_get + The function searches the item with key equal to \p key + and returns the item found as \p guarded_ptr. + If \p key is not found the function returns an empty guarded pointer. + + @note Each \p guarded_ptr object uses one GC's guard which can be limited resource. + + Usage: + \code + typedef cds::container::LazyList< cds::gc::HP, foo, my_traits > ord_list; + ord_list theList; + // ... + { + ord_list::guarded_ptr gp( theList.get( 5 )); + if ( gp ) { + // Deal with gp + //... + } + // Destructor of guarded_ptr releases internal HP guard and frees the item + } + \endcode + + Note the compare functor specified for class \p Traits template parameter + should accept a parameter of type \p Q that can be not the same as \p value_type. + */ + template + guarded_ptr get( Q const& key ) + { + return get_at( head(), key, intrusive_key_comparator()); + } + + /// Finds the key \p key and return the item found + /** + The function is an analog of \ref cds_nonintrusive_LazyList_hp_get "get( Q const&)" + but \p pred is used for comparing the keys. + + \p Less functor has the semantics like \p std::less but should take arguments of type \ref value_type and \p Q + in any order. + \p pred must imply the same element order as the comparator used for building the list. + */ + template + guarded_ptr get_with( Q const& key, Less pred ) + { + CDS_UNUSED( pred ); + return get_at( head(), key, typename maker::template less_wrapper::type()); + } + + /// Checks whether the list is empty + bool empty() const + { + return base_class::empty(); + } + + /// Returns list's item count + /** + The value returned depends on \p Traits::item_counter type. For \p atomicity::empty_item_counter, + this function always returns 0. + + @note Even if you use real item counter and it returns 0, this fact is not mean that the list + is empty. To check list emptyness use \ref empty() method. + */ + size_t size() const + { + return base_class::size(); + } + + /// Returns const reference to internal statistics + stat const& statistics() const + { + return base_class::statistics(); + } + + /// Clears the list + void clear() + { + base_class::clear(); + } + + protected: + //@cond + static value_type& node_to_value( node_type& n ) + { + return n.m_Value; + } + + static value_type const& node_to_value( node_type const& n ) + { + return n.m_Value; + } + + template + static node_type * alloc_node( Q const& v ) + { + return cxx_allocator().New( v ); + } + + template + static node_type * alloc_node( Args&&... args ) + { + return cxx_allocator().MoveNew( std::forward( args )... ); + } + + static void free_node( node_type * pNode ) + { + cxx_allocator().Delete( pNode ); + } + + head_type& head() + { + return base_class::m_Head; + } + + head_type const& head() const + { + return base_class::m_Head; + } + + head_type& tail() + { + return base_class::m_Tail; + } + + head_type const& tail() const + { + return base_class::m_Tail; + } + + bool insert_node( node_type * pNode ) + { + return insert_node_at( head(), pNode ); + } + + bool insert_node_at( head_type& refHead, node_type * pNode ) + { + assert( pNode != nullptr ); + scoped_node_ptr p( pNode ); + + if ( base_class::insert_at( &refHead, *pNode )) { + p.release(); + return true; + } + + return false; + } + + template + bool insert_at( head_type& refHead, Q&& val ) + { + return insert_node_at( refHead, alloc_node( std::forward( val ))); + } + + template + bool emplace_at( head_type& refHead, Args&&... args ) + { + return insert_node_at( refHead, alloc_node( std::forward(args)... )); + } + + template + bool insert_at( head_type& refHead, Q&& key, Func f ) + { + scoped_node_ptr pNode( alloc_node( std::forward( key ))); + + if ( base_class::insert_at( &refHead, *pNode, [&f](node_type& node){ f( node_to_value(node)); } )) { + pNode.release(); + return true; + } + return false; + } + + template + bool erase_at( head_type& refHead, Q const& key, Compare cmp, Func f ) + { + return base_class::erase_at( &refHead, key, cmp, [&f](node_type const& node){ f( node_to_value(node)); } ); + } + + template + guarded_ptr extract_at( head_type& refHead, Q const& key, Compare cmp ) + { + return base_class::extract_at( &refHead, key, cmp ); + } + + template + std::pair update_at( head_type& refHead, Q const& key, Func f, bool bAllowInsert ) + { + scoped_node_ptr pNode( alloc_node( key )); + + std::pair ret = base_class::update_at( &refHead, *pNode, + [&f, &key](bool bNew, node_type& node, node_type&) { f( bNew, node_to_value(node), key );}, + bAllowInsert ); + if ( ret.first && ret.second ) + pNode.release(); + + return ret; + } + + template + bool find_at( head_type& refHead, Q const& key, Compare cmp ) + { + return base_class::find_at( &refHead, key, cmp ); + } + + template + bool find_at( head_type& refHead, Q& val, Compare cmp, Func f ) + { + return base_class::find_at( &refHead, val, cmp, [&f](node_type& node, Q& v){ f( node_to_value(node), v ); }); + } + + template + guarded_ptr get_at( head_type& refHead, Q const& key, Compare cmp ) + { + return base_class::get_at( &refHead, key, cmp ); + } + + //@endcond + }; + +}} // namespace cds::container + +#endif // #ifndef CDSLIB_CONTAINER_IMPL_LAZY_LIST_H diff --git a/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/container/impl/michael_kvlist.h b/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/container/impl/michael_kvlist.h new file mode 100644 index 0000000..04819f8 --- /dev/null +++ b/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/container/impl/michael_kvlist.h @@ -0,0 +1,886 @@ +/* + This file is a part of libcds - Concurrent Data Structures library + + (C) Copyright Maxim Khizhinsky (libcds.dev@gmail.com) 2006-2017 + + Source code repo: http://github.com/khizmax/libcds/ + Download: http://sourceforge.net/projects/libcds/files/ + + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. + + * Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ + +#ifndef CDSLIB_CONTAINER_IMPL_MICHAEL_KVLIST_H +#define CDSLIB_CONTAINER_IMPL_MICHAEL_KVLIST_H + +#include +#include + +namespace cds { namespace container { + + /// Michael's ordered list for key-value pair + /** @ingroup cds_nonintrusive_list + \anchor cds_nonintrusive_MichaelKVList_gc + + This is key-value variation of non-intrusive MichaelList. + Like standard container, this implementation split a value stored into two part - + constant key and alterable value. + + Usually, ordered single-linked list is used as a building block for the hash table implementation. + The complexity of searching is O(N) where \p N is the item count in the list, not in the + hash table. + + Template arguments: + - \p GC - garbage collector used + - \p Key - key type of an item stored in the list. It should be copy-constructible + - \p Value - value type stored in a list + - \p Traits - type traits, default is \p michael_list::traits + + It is possible to declare option-based list with \p cds::container::michael_list::make_traits metafunction instead of \p Traits template + argument. For example, the following traits-based declaration of \p gc::HP Michael's list + \code + #include + // Declare comparator for the item + struct my_compare { + int operator ()( int i1, int i2 ) + { + return i1 - i2; + } + }; + + // Declare traits + struct my_traits: public cds::container::michael_list::traits + { + typedef my_compare compare; + }; + + // Declare traits-based list + typedef cds::container::MichaelKVList< cds::gc::HP, int, int, my_traits > traits_based_list; + \endcode + is equivalent for the following option-based list + \code + #include + + // my_compare is the same + + // Declare option-based list + typedef cds::container::MichaelKVList< cds::gc::HP, int, int, + typename cds::container::michael_list::make_traits< + cds::container::opt::compare< my_compare > // item comparator option + >::type + > option_based_list; + \endcode + + \par Usage + There are different specializations of this template for each garbage collecting schema used. + You should include appropriate .h-file depending on GC you are using: + - for gc::HP: \code #include \endcode + - for gc::DHP: \code #include \endcode + - for \ref cds_urcu_desc "RCU": \code #include \endcode + - for gc::nogc: \code #include \endcode + */ + template < + typename GC, + typename Key, + typename Value, +#ifdef CDS_DOXYGEN_INVOKED + typename Traits = michael_list::traits +#else + typename Traits +#endif + > + class MichaelKVList: +#ifdef CDS_DOXYGEN_INVOKED + protected intrusive::MichaelList< GC, implementation_defined, Traits > +#else + protected details::make_michael_kvlist< GC, Key, Value, Traits >::type +#endif + { + //@cond + typedef details::make_michael_kvlist< GC, Key, Value, Traits > maker; + typedef typename maker::type base_class; + //@endcond + + public: +#ifdef CDS_DOXYGEN_INVOKED + typedef Key key_type ; ///< Key type + typedef Value mapped_type ; ///< Type of value stored in the list + typedef std::pair value_type ; ///< key/value pair stored in the list +#else + typedef typename maker::key_type key_type; + typedef typename maker::value_type mapped_type; + typedef typename maker::pair_type value_type; +#endif + + typedef typename base_class::gc gc; ///< Garbage collector used + typedef Traits traits; ///< List traits + typedef typename base_class::back_off back_off; ///< Back-off strategy used + typedef typename maker::allocator_type allocator_type; ///< Allocator type used for allocate/deallocate the nodes + typedef typename base_class::item_counter item_counter; ///< Item counting policy used + typedef typename maker::key_comparator key_comparator; ///< key comparison functor + typedef typename base_class::memory_model memory_model; ///< Memory ordering. See cds::opt::memory_model option + typedef typename base_class::stat stat; ///< Internal statistics + + static constexpr const size_t c_nHazardPtrCount = base_class::c_nHazardPtrCount; ///< Count of hazard pointer required for the algorithm + + //@cond + // Rebind traits (split-list support) + template + struct rebind_traits { + typedef MichaelKVList< + gc + , key_type, mapped_type + , typename cds::opt::make_options< traits, Options...>::type + > type; + }; + + // Stat selector + template + using select_stat_wrapper = typename base_class::template select_stat_wrapper< Stat >; + //@endcond + + protected: + //@cond + typedef typename base_class::value_type node_type; + typedef typename maker::cxx_allocator cxx_allocator; + typedef typename maker::node_deallocator node_deallocator; + typedef typename maker::intrusive_traits::compare intrusive_key_comparator; + + typedef typename base_class::atomic_node_ptr head_type; + //@endcond + + public: + /// Guarded pointer + typedef typename gc::template guarded_ptr< node_type, value_type, details::guarded_ptr_cast_map > guarded_ptr; + + protected: + //@cond + template + static node_type * alloc_node(const K& key) + { + return cxx_allocator().New( key ); + } + + template + static node_type * alloc_node( const K& key, const V& val ) + { + return cxx_allocator().New( key, val ); + } + + template + static node_type * alloc_node( K&& key, Args&&... args ) + { + return cxx_allocator().MoveNew( std::forward(key), std::forward(args)...); + } + + static void free_node( node_type * pNode ) + { + cxx_allocator().Delete( pNode ); + } + + struct node_disposer { + void operator()( node_type * pNode ) + { + free_node( pNode ); + } + }; + typedef std::unique_ptr< node_type, node_disposer > scoped_node_ptr; + + head_type& head() + { + return base_class::m_pHead; + } + + head_type const& head() const + { + return base_class::m_pHead; + } + //@endcond + + protected: + //@cond + template + class iterator_type: protected base_class::template iterator_type + { + typedef typename base_class::template iterator_type iterator_base; + + iterator_type( head_type const& pNode ) + : iterator_base( pNode ) + {} + + friend class MichaelKVList; + + public: + typedef typename cds::details::make_const_type::reference value_ref; + typedef typename cds::details::make_const_type::pointer value_ptr; + + typedef typename cds::details::make_const_type::reference pair_ref; + typedef typename cds::details::make_const_type::pointer pair_ptr; + + iterator_type() + {} + + iterator_type( iterator_type const& src ) + : iterator_base( src ) + {} + + key_type const& key() const + { + typename iterator_base::value_ptr p = iterator_base::operator ->(); + assert( p != nullptr ); + return p->m_Data.first; + } + + pair_ptr operator ->() const + { + typename iterator_base::value_ptr p = iterator_base::operator ->(); + return p ? &(p->m_Data) : nullptr; + } + + pair_ref operator *() const + { + typename iterator_base::value_ref p = iterator_base::operator *(); + return p.m_Data; + } + + value_ref val() const + { + typename iterator_base::value_ptr p = iterator_base::operator ->(); + assert( p != nullptr ); + return p->m_Data.second; + } + + /// Pre-increment + iterator_type& operator ++() + { + iterator_base::operator ++(); + return *this; + } + + template + bool operator ==(iterator_type const& i ) const + { + return iterator_base::operator ==(i); + } + template + bool operator !=(iterator_type const& i ) const + { + return iterator_base::operator !=(i); + } + }; + //@endcond + + public: + /// Forward iterator + /** + The forward iterator for Michael's list has some features: + - it has no post-increment operator + - to protect the value, the iterator contains a GC-specific guard + another guard is required locally for increment operator. + For some GC (\p gc::HP), a guard is limited resource per thread, so an exception (or assertion) "no free guard" + may be thrown if a limit of guard count per thread is exceeded. + - The iterator cannot be moved across thread boundary since it contains GC's guard that is thread-private GC data. + - Iterator ensures thread-safety even if you delete the item that iterator points to. However, in case of concurrent + deleting operations it is no guarantee that you iterate all item in the list. + + @warning Use this iterator on the concurrent container for debugging purpose only. + + The iterator interface to access item data: + - operator -> - returns a pointer to \ref value_type for iterator + - operator * - returns a reference (a const reference for \p const_iterator) to \ref value_type for iterator + - const key_type& key() - returns a key reference for iterator + - mapped_type& val() - retuns a value reference for iterator (const reference for \p const_iterator) + + For both functions the iterator should not be equal to end() + */ + typedef iterator_type iterator; + + /// Const forward iterator + /** + For iterator's features and requirements see \ref iterator + */ + typedef iterator_type const_iterator; + + ///@name Forward iterators (only for debugging purpose) + //@{ + /// Returns a forward iterator addressing the first element in a list + /** + For empty list \code begin() == end() \endcode + */ + iterator begin() + { + return iterator( head()); + } + + /// Returns an iterator that addresses the location succeeding the last element in a list + /** + Do not use the value returned by end function to access any item. + Internally, end returning value equals to \p nullptr. + + The returned value can be used only to control reaching the end of the list. + For empty list \code begin() == end() \endcode + */ + iterator end() + { + return iterator(); + } + + /// Returns a forward const iterator addressing the first element in a list + const_iterator begin() const + { + return const_iterator( head()); + } + + /// Returns a forward const iterator addressing the first element in a list + const_iterator cbegin() const + { + return const_iterator( head()); + } + + /// Returns an const iterator that addresses the location succeeding the last element in a list + const_iterator end() const + { + return const_iterator(); + } + + /// Returns an const iterator that addresses the location succeeding the last element in a list + const_iterator cend() const + { + return const_iterator(); + } + //@} + + public: + /// Default constructor + /** + Initializes empty list + */ + MichaelKVList() + {} + + //@cond + template >::value >> + explicit MichaelKVList( Stat& st ) + : base_class( st ) + {} + //@endcond + + /// List destructor + /** + Clears the list + */ + ~MichaelKVList() + { + clear(); + } + + /// Inserts new node with key and default value + /** + The function creates a node with \p key and default value, and then inserts the node created into the list. + + Preconditions: + - The \p key_type should be constructible from value of type \p K. + In trivial case, \p K is equal to \p key_type. + - The \p mapped_type should be default-constructible. + + Returns \p true if inserting successful, \p false otherwise. + */ + template + bool insert( K&& key ) + { + return insert_at( head(), std::forward( key )); + } + + /// Inserts new node with a key and a value + /** + The function creates a node with \p key and value \p val, and then inserts the node created into the list. + + Preconditions: + - The \p key_type should be constructible from \p key of type \p K. + - The \p mapped_type should be constructible from \p val of type \p V. + + Returns \p true if inserting successful, \p false otherwise. + */ + template + bool insert( K&& key, V&& val ) + { + // We cannot use insert with functor here + // because we cannot lock inserted node for updating + // Therefore, we use separate function + return insert_at( head(), std::forward( key ), std::forward( val )); + } + + /// Inserts new node and initialize it by a functor + /** + This function inserts new node with key \p key and if inserting is successful then it calls + \p func functor with signature + \code + struct functor { + void operator()( value_type& item ); + }; + \endcode + + The argument \p item of user-defined functor \p func is the reference + to the item inserted. item.second is a reference to item's value that may be changed. + User-defined functor \p func should guarantee that during changing item's value no any other changes + could be made on this list's item by concurrent threads. + The user-defined functor is called only if inserting is successful. + + The \p key_type should be constructible from value of type \p K. + + The function allows to split creating of new item into two part: + - create a new item from \p key; + - insert the new item into the list; + - if inserting is successful, initialize the value of item by calling \p func functor + + This can be useful if complete initialization of object of \p mapped_type is heavyweight and + it is preferable that the initialization should be completed only if inserting is successful. + + @warning See \ref cds_intrusive_item_creating "insert item troubleshooting" + */ + template + bool insert_with( K&& key, Func func ) + { + return insert_with_at( head(), std::forward( key ), func ); + } + + /// Updates data by \p key + /** + The operation performs inserting or replacing the element with lock-free manner. + + If the \p key not found in the list, then the new item created from \p key + will be inserted iff \p bAllowInsert is \p true. + (note that in this case the \ref key_type should be constructible from type \p K). + Otherwise, if \p key is found, the functor \p func is called with item found. + + The functor \p Func signature is: + \code + struct my_functor { + void operator()( bool bNew, value_type& item ); + }; + \endcode + with arguments: + - \p bNew - \p true if the item has been inserted, \p false otherwise + - \p item - the item found or inserted + + The functor may change any fields of the \p item.second of \p mapped_type; + however, \p func must guarantee that during changing no any other modifications + could be made on this item by concurrent threads. + + Returns std::pair where \p first is true if operation is successful, + \p second is true if new item has been added or \p false if the item with \p key + already exists. + + @warning See \ref cds_intrusive_item_creating "insert item troubleshooting" + */ + template + std::pair update( K&& key, Func f, bool bAllowInsert = true ) + { + return update_at( head(), std::forward( key ), f, bAllowInsert ); + } + //@cond + template + CDS_DEPRECATED("ensure() is deprecated, use update()") + std::pair ensure( K const& key, Func f ) + { + return update( key, f, true ); + } + //@endcond + + /// Inserts a new node using move semantics + /** + \p key_type field of new item is constructed from \p key argument, + \p mapped_type field is done from \p args. + + Returns \p true if inserting successful, \p false otherwise. + */ + template + bool emplace( K&& key, Args&&... args ) + { + return emplace_at( head(), std::forward(key), std::forward(args)... ); + } + + /// Deletes \p key from the list + /** \anchor cds_nonintrusive_MichaelKVList_hp_erase_val + + Returns \p true if \p key is found and has been deleted, \p false otherwise + */ + template + bool erase( K const& key ) + { + return erase_at( head(), key, intrusive_key_comparator()); + } + + /// Deletes the item from the list using \p pred predicate for searching + /** + The function is an analog of \ref cds_nonintrusive_MichaelKVList_hp_erase_val "erase(K const&)" + but \p pred is used for key comparing. + \p Less functor has the interface like \p std::less. + \p pred must imply the same element order as the comparator used for building the list. + */ + template + bool erase_with( K const& key, Less pred ) + { + CDS_UNUSED( pred ); + return erase_at( head(), key, typename maker::template less_wrapper::type()); + } + + /// Deletes \p key from the list + /** \anchor cds_nonintrusive_MichaelKVList_hp_erase_func + The function searches an item with key \p key, calls \p f functor + and deletes the item. If \p key is not found, the functor is not called. + + The functor \p Func interface: + \code + struct extractor { + void operator()(value_type& val) { ... } + }; + \endcode + + Return \p true if key is found and deleted, \p false otherwise + + See also: \ref erase + */ + template + bool erase( K const& key, Func f ) + { + return erase_at( head(), key, intrusive_key_comparator(), f ); + } + + /// Deletes the item from the list using \p pred predicate for searching + /** + The function is an analog of \ref cds_nonintrusive_MichaelKVList_hp_erase_func "erase(K const&, Func)" + but \p pred is used for key comparing. + \p Less functor has the interface like \p std::less. + \p pred must imply the same element order as the comparator used for building the list. + */ + template + bool erase_with( K const& key, Less pred, Func f ) + { + CDS_UNUSED( pred ); + return erase_at( head(), key, typename maker::template less_wrapper::type(), f ); + } + + /// Extracts the item from the list with specified \p key + /** \anchor cds_nonintrusive_MichaelKVList_hp_extract + The function searches an item with key equal to \p key, + unlinks it from the list, and returns it as \p guarded_ptr. + If \p key is not found the function returns an empty guarded pointer. + + Note the compare functor should accept a parameter of type \p K that can be not the same as \p key_type. + + The \p disposer specified in \p Traits class template parameter is called automatically + by garbage collector \p GC specified in class' template parameters when returned \p guarded_ptr object + will be destroyed or released. + @note Each \p guarded_ptr object uses the GC's guard that can be limited resource. + + Usage: + \code + typedef cds::container::MichaelKVList< cds::gc::HP, int, foo, my_traits > ord_list; + ord_list theList; + // ... + { + ord_list::guarded_ptr gp(theList.extract( 5 )); + if ( gp ) { + // Deal with gp + // ... + } + // Destructor of gp releases internal HP guard + } + \endcode + */ + template + guarded_ptr extract( K const& key ) + { + return extract_at( head(), key, intrusive_key_comparator()); + } + + /// Extracts the item from the list with comparing functor \p pred + /** + The function is an analog of \ref cds_nonintrusive_MichaelKVList_hp_extract "extract(K const&)" + but \p pred predicate is used for key comparing. + + \p Less functor has the semantics like \p std::less but should take arguments of type \ref key_type and \p K + in any order. + \p pred must imply the same element order as the comparator used for building the list. + */ + template + guarded_ptr extract_with( K const& key, Less pred ) + { + CDS_UNUSED( pred ); + return extract_at( head(), key, typename maker::template less_wrapper::type()); + } + + /// Checks whether the list contains \p key + /** + The function searches the item with key equal to \p key + and returns \p true if it is found, and \p false otherwise. + */ + template + bool contains( Q const& key ) + { + return find_at( head(), key, intrusive_key_comparator()); + } + //@cond + template + CDS_DEPRECATED("deprecated, use contains()") + bool find( Q const& key ) + { + return contains( key ); + } + //@endcond + + /// Checks whether the map contains \p key using \p pred predicate for searching + /** + The function is an analog of contains( key ) but \p pred is used for key comparing. + \p Less functor has the interface like \p std::less. + \p Less must imply the same element order as the comparator used for building the list. + */ + template + bool contains( Q const& key, Less pred ) + { + CDS_UNUSED( pred ); + return find_at( head(), key, typename maker::template less_wrapper::type()); + } + //@cond + template + CDS_DEPRECATED("deprecated, use contains()") + bool find_with( Q const& key, Less pred ) + { + CDS_UNUSED( pred ); + return contains( key, pred ); + } + //@endcond + + /// Finds the key \p key and performs an action with it + /** \anchor cds_nonintrusive_MichaelKVList_hp_find_func + The function searches an item with key equal to \p key and calls the functor \p f for the item found. + The interface of \p Func functor is: + \code + struct functor { + void operator()( value_type& item ); + }; + \endcode + where \p item is the item found. + + The functor may change item.second that is reference to value of node. + Note that the function is only guarantee that \p item cannot be deleted during functor is executing. + The function does not serialize simultaneous access to the list \p item. If such access is + possible you must provide your own synchronization schema to exclude unsafe item modifications. + + The function returns \p true if \p key is found, \p false otherwise. + */ + template + bool find( Q const& key, Func f ) + { + return find_at( head(), key, intrusive_key_comparator(), f ); + } + + /// Finds the key \p val using \p pred predicate for searching + /** + The function is an analog of \ref cds_nonintrusive_MichaelKVList_hp_find_func "find(Q&, Func)" + but \p pred is used for key comparing. + \p Less functor has the interface like \p std::less. + \p pred must imply the same element order as the comparator used for building the list. + */ + template + bool find_with( Q const& key, Less pred, Func f ) + { + CDS_UNUSED( pred ); + return find_at( head(), key, typename maker::template less_wrapper::type(), f ); + } + + /// Finds the \p key and return the item found + /** \anchor cds_nonintrusive_MichaelKVList_hp_get + The function searches the item with key equal to \p key + and returns it as \p guarded_ptr. + If \p key is not found the function returns an empty guarded pointer. + + @note Each \p guarded_ptr object uses one GC's guard which can be limited resource. + + Usage: + \code + typedef cds::container::MichaelKVList< cds::gc::HP, int, foo, my_traits > ord_list; + ord_list theList; + // ... + { + ord_list::guarded_ptr gp(theList.get( 5 )); + if ( gp ) { + // Deal with gp + //... + } + // Destructor of guarded_ptr releases internal HP guard + } + \endcode + + Note the compare functor specified for class \p Traits template parameter + should accept a parameter of type \p K that can be not the same as \p key_type. + */ + template + guarded_ptr get( K const& key ) + { + return get_at( head(), key, intrusive_key_comparator()); + } + + /// Finds the \p key and return the item found + /** + The function is an analog of \ref cds_nonintrusive_MichaelKVList_hp_get "get( guarded_ptr& ptr, K const&)" + but \p pred is used for comparing the keys. + + \p Less functor has the semantics like \p std::less but should take arguments of type \ref key_type and \p K + in any order. + \p pred must imply the same element order as the comparator used for building the list. + */ + template + guarded_ptr get_with( K const& key, Less pred ) + { + CDS_UNUSED( pred ); + return get_at( head(), key, typename maker::template less_wrapper::type()); + } + + /// Checks if the list is empty + bool empty() const + { + return base_class::empty(); + } + + /// Returns list's item count + /** + The value returned depends on item counter provided by \p Traits. For \p atomicity::empty_item_counter, + this function always returns 0. + + @note Even if you use real item counter and it returns 0, this fact is not mean that the list + is empty. To check list emptyness use \p empty() method. + */ + size_t size() const + { + return base_class::size(); + } + + /// Clears the list + void clear() + { + base_class::clear(); + } + + /// Returns const reference to internal statistics + stat const& statistics() const + { + return base_class::statistics(); + } + + protected: + //@cond + bool insert_node_at( head_type& refHead, node_type * pNode ) + { + assert( pNode != nullptr ); + scoped_node_ptr p( pNode ); + if ( base_class::insert_at( refHead, *pNode )) { + p.release(); + return true; + } + return false; + } + + template + bool insert_at( head_type& refHead, K&& key ) + { + return insert_node_at( refHead, alloc_node( std::forward( key ))); + } + + template + bool insert_at( head_type& refHead, K&& key, V&& val ) + { + return insert_node_at( refHead, alloc_node( std::forward( key ), std::forward( val ))); + } + + template + bool insert_with_at( head_type& refHead, K&& key, Func f ) + { + scoped_node_ptr pNode( alloc_node( std::forward( key ))); + + if ( base_class::insert_at( refHead, *pNode, [&f](node_type& node){ f( node.m_Data ); })) { + pNode.release(); + return true; + } + return false; + } + + template + bool emplace_at( head_type& refHead, K&& key, Args&&... args ) + { + return insert_node_at( refHead, alloc_node( std::forward(key), std::forward(args)... )); + } + + template + std::pair update_at( head_type& refHead, K&& key, Func f, bool bAllowInsert ) + { + scoped_node_ptr pNode( alloc_node( std::forward( key ))); + + std::pair ret = base_class::update_at( refHead, *pNode, + [&f]( bool bNew, node_type& node, node_type& ){ f( bNew, node.m_Data ); }, + bAllowInsert ); + if ( ret.first && ret.second ) + pNode.release(); + + return ret; + } + + template + bool erase_at( head_type& refHead, K const& key, Compare cmp ) + { + return base_class::erase_at( refHead, key, cmp ); + } + + template + bool erase_at( head_type& refHead, K const& key, Compare cmp, Func f ) + { + return base_class::erase_at( refHead, key, cmp, [&f]( node_type const & node ){ f( const_cast(node.m_Data)); }); + } + template + guarded_ptr extract_at( head_type& refHead, K const& key, Compare cmp ) + { + return base_class::extract_at( refHead, key, cmp ); + } + + template + bool find_at( head_type& refHead, K const& key, Compare cmp ) + { + return base_class::find_at( refHead, key, cmp ); + } + + template + bool find_at( head_type& refHead, K& key, Compare cmp, Func f ) + { + return base_class::find_at( refHead, key, cmp, [&f](node_type& node, K const&){ f( node.m_Data ); }); + } + + template + guarded_ptr get_at( head_type& refHead, K const& key, Compare cmp ) + { + return base_class::get_at( refHead, key, cmp ); + } + + //@endcond + }; + +}} // namespace cds::container + +#endif // #ifndef CDSLIB_CONTAINER_IMPL_MICHAEL_KVLIST_H diff --git a/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/container/impl/michael_list.h b/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/container/impl/michael_list.h new file mode 100644 index 0000000..721a305 --- /dev/null +++ b/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/container/impl/michael_list.h @@ -0,0 +1,847 @@ +/* + This file is a part of libcds - Concurrent Data Structures library + + (C) Copyright Maxim Khizhinsky (libcds.dev@gmail.com) 2006-2017 + + Source code repo: http://github.com/khizmax/libcds/ + Download: http://sourceforge.net/projects/libcds/files/ + + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. + + * Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ + +#ifndef CDSLIB_CONTAINER_IMPL_MICHAEL_LIST_H +#define CDSLIB_CONTAINER_IMPL_MICHAEL_LIST_H + +#include +#include + +namespace cds { namespace container { + + /// Michael's ordered list + /** @ingroup cds_nonintrusive_list + \anchor cds_nonintrusive_MichaelList_gc + + Usually, ordered single-linked list is used as a building block for the hash table implementation. + The complexity of searching is O(N), where \p N is the item count in the list, not in the + hash table. + + Source: + - [2002] Maged Michael "High performance dynamic lock-free hash tables and list-based sets" + + This class is non-intrusive version of cds::intrusive::MichaelList class + + Template arguments: + - \p GC - garbage collector used + - \p T - type stored in the list. The type must be default- and copy-constructible. + - \p Traits - type traits, default is \p michael_list::traits + + Unlike standard container, this implementation does not divide type \p T into key and value part and + may be used as a main building block for hash set algorithms. + The key is a function (or a part) of type \p T, and this function is specified by Traits::compare functor + or Traits::less predicate + + MichaelKVList is a key-value version of Michael's non-intrusive list that is closer to the C++ std library approach. + + It is possible to declare option-based list with cds::container::michael_list::make_traits metafunction istead of \p Traits template + argument. For example, the following traits-based declaration of gc::HP Michael's list + \code + #include + // Declare comparator for the item + struct my_compare { + int operator ()( int i1, int i2 ) + { + return i1 - i2; + } + }; + + // Declare traits + struct my_traits: public cds::container::michael_list::traits + { + typedef my_compare compare; + }; + + // Declare traits-based list + typedef cds::container::MichaelList< cds::gc::HP, int, my_traits > traits_based_list; + \endcode + + is equivalent for the following option-based list + \code + #include + + // my_compare is the same + + // Declare option-based list + typedef cds::container::MichaelList< cds::gc::HP, int, + typename cds::container::michael_list::make_traits< + cds::container::opt::compare< my_compare > // item comparator option + >::type + > option_based_list; + \endcode + + \par Usage + There are different specializations of this template for each garbage collecting schema used. + You should include appropriate .h-file depending on GC you are using: + - for gc::HP: \code #include \endcode + - for gc::DHP: \code #include \endcode + - for \ref cds_urcu_desc "RCU": \code #include \endcode + - for gc::nogc: \code #include \endcode + */ + template < + typename GC, + typename T, +#ifdef CDS_DOXYGEN_INVOKED + typename Traits = michael_list::traits +#else + typename Traits +#endif + > + class MichaelList: +#ifdef CDS_DOXYGEN_INVOKED + protected intrusive::MichaelList< GC, T, Traits > +#else + protected details::make_michael_list< GC, T, Traits >::type +#endif + { + //@cond + typedef details::make_michael_list< GC, T, Traits > maker; + typedef typename maker::type base_class; + //@endcond + + public: + typedef T value_type; ///< Type of value stored in the list + typedef Traits traits; ///< List traits + + typedef typename base_class::gc gc; ///< Garbage collector used + typedef typename base_class::back_off back_off; ///< Back-off strategy used + typedef typename maker::allocator_type allocator_type; ///< Allocator type used for allocate/deallocate the nodes + typedef typename base_class::item_counter item_counter; ///< Item counting policy used + typedef typename maker::key_comparator key_comparator; ///< key comparison functor + typedef typename base_class::memory_model memory_model; ///< Memory ordering. See \p cds::opt::memory_model option + typedef typename base_class::stat stat; ///< Internal statistics + + static constexpr const size_t c_nHazardPtrCount = base_class::c_nHazardPtrCount; ///< Count of hazard pointer required for the algorithm + + //@cond + // Rebind traits (split-list support) + template + struct rebind_traits { + typedef MichaelList< + gc + , value_type + , typename cds::opt::make_options< traits, Options...>::type + > type; + }; + + // Stat selector + template + using select_stat_wrapper = typename base_class::template select_stat_wrapper< Stat >; + //@endcond + + protected: + //@cond + typedef typename base_class::value_type node_type; + typedef typename maker::cxx_allocator cxx_allocator; + typedef typename maker::node_deallocator node_deallocator; + typedef typename maker::intrusive_traits::compare intrusive_key_comparator; + + typedef typename base_class::atomic_node_ptr head_type; + + struct node_disposer { + void operator()( node_type * pNode ) + { + free_node( pNode ); + } + }; + typedef std::unique_ptr< node_type, node_disposer > scoped_node_ptr; + //@endcond + + public: + /// Guarded pointer + typedef typename gc::template guarded_ptr< node_type, value_type, details::guarded_ptr_cast_set > guarded_ptr; + + protected: + //@cond + template + class iterator_type: protected base_class::template iterator_type + { + typedef typename base_class::template iterator_type iterator_base; + + iterator_type( head_type const& pNode ) + : iterator_base( pNode ) + {} + + friend class MichaelList; + + public: + typedef typename cds::details::make_const_type::pointer value_ptr; + typedef typename cds::details::make_const_type::reference value_ref; + + iterator_type() + {} + + iterator_type( iterator_type const& src ) + : iterator_base( src ) + {} + + value_ptr operator ->() const + { + typename iterator_base::value_ptr p = iterator_base::operator ->(); + return p ? &(p->m_Value) : nullptr; + } + + value_ref operator *() const + { + return (iterator_base::operator *()).m_Value; + } + + /// Pre-increment + iterator_type& operator ++() + { + iterator_base::operator ++(); + return *this; + } + + template + bool operator ==(iterator_type const& i ) const + { + return iterator_base::operator ==(i); + } + template + bool operator !=(iterator_type const& i ) const + { + return iterator_base::operator !=(i); + } + }; + //@endcond + + public: + ///@name Forward iterators (only for debugging purpose) + //@{ + /// Forward iterator + /** + The forward iterator for Michael's list has some features: + - it has no post-increment operator + - to protect the value, the iterator contains a GC-specific guard + another guard is required locally for increment operator. + For some GC (\p gc::HP), a guard is limited resource per thread, so an exception (or assertion) "no free guard" + may be thrown if a limit of guard count per thread is exceeded. + - The iterator cannot be moved across thread boundary since it contains GC's guard that is thread-private GC data. + - Iterator ensures thread-safety even if you delete the item that iterator points to. However, in case of concurrent + deleting operations it is no guarantee that you iterate all item in the list. + Moreover, a crash is possible when you try to iterate the next element that has been deleted by concurrent thread. + + @warning Use this iterator on the concurrent container for debugging purpose only. + */ + typedef iterator_type iterator; + + /// Const forward iterator + /** + For iterator's features and requirements see \ref iterator + */ + typedef iterator_type const_iterator; + + /// Returns a forward iterator addressing the first element in a list + /** + For empty list \code begin() == end() \endcode + */ + iterator begin() + { + return iterator( head()); + } + + /// Returns an iterator that addresses the location succeeding the last element in a list + /** + Do not use the value returned by end function to access any item. + Internally, end returning value equals to \p nullptr. + + The returned value can be used only to control reaching the end of the list. + For empty list \code begin() == end() \endcode + */ + iterator end() + { + return iterator(); + } + + /// Returns a forward const iterator addressing the first element in a list + const_iterator begin() const + { + return const_iterator( head()); + } + + /// Returns a forward const iterator addressing the first element in a list + const_iterator cbegin() const + { + return const_iterator( head()); + } + + /// Returns an const iterator that addresses the location succeeding the last element in a list + const_iterator end() const + { + return const_iterator(); + } + + /// Returns an const iterator that addresses the location succeeding the last element in a list + const_iterator cend() const + { + return const_iterator(); + } + //@} + + public: + /// Default constructor + /** + Initialize empty list + */ + MichaelList() + {} + + //@cond + template >::value >> + explicit MichaelList( Stat& st ) + : base_class( st ) + {} + //@endcond + + /// List destructor + /** + Clears the list + */ + ~MichaelList() + { + clear(); + } + + /// Inserts new node + /** + The function creates a node with copy of \p val value + and then inserts the node created into the list. + + The type \p Q should contain least the complete key of the node. + The object of \ref value_type should be constructible from \p val of type \p Q. + In trivial case, \p Q is equal to \ref value_type. + + Returns \p true if inserting successful, \p false otherwise. + */ + template + bool insert( Q&& val ) + { + return insert_at( head(), std::forward( val )); + } + + /// Inserts new node + /** + This function inserts new node with default-constructed value and then it calls + \p func functor with signature + \code void func( value_type& itemValue ) ;\endcode + + The argument \p itemValue of user-defined functor \p func is the reference + to the list's item inserted. User-defined functor \p func should guarantee that during changing + item's value no any other changes could be made on this list's item by concurrent threads. + The user-defined functor is called only if inserting is success. + + The type \p Q should contain the complete key of the node. + The object of \p value_type should be constructible from \p key of type \p Q. + + The function allows to split creating of new item into two part: + - create item from \p key with initializing key-fields only; + - insert new item into the list; + - if inserting is successful, initialize non-key fields of item by calling \p func functor + + The method can be useful if complete initialization of object of \p value_type is heavyweight and + it is preferable that the initialization should be completed only if inserting is successful. + + @warning See \ref cds_intrusive_item_creating "insert item troubleshooting" + */ + template + bool insert( Q&& key, Func func ) + { + return insert_at( head(), std::forward(key), func ); + } + + /// Updates data by \p key + /** + The operation performs inserting or replacing the element with lock-free manner. + + If the \p key not found in the list, then the new item created from \p key + will be inserted iff \p bAllowInsert is \p true. + Otherwise, if \p key is found, the functor \p func is called with item found. + + The functor \p Func signature is: + \code + struct my_functor { + void operator()( bool bNew, value_type& item, Q const& key ); + }; + \endcode + + with arguments: + - \p bNew - \p true if the item has been inserted, \p false otherwise + - \p item - item of the list + - \p key - argument \p key passed into the \p %update() function + + The functor may change non-key fields of the \p item; however, \p func must guarantee + that during changing no any other modifications could be made on this item by concurrent threads. + + Returns std::pair where \p first is true if operation is successful, + \p second is true if new item has been added or \p false if the item with \p key + already exists. + + @warning See \ref cds_intrusive_item_creating "insert item troubleshooting" + */ + template + std::pair update( Q const& key, Func func, bool bAllowInsert = true ) + { + return update_at( head(), key, func, bAllowInsert ); + } + //@cond + template + CDS_DEPRECATED("ensure() is deprecated, use update()") + std::pair ensure( Q const& key, Func func ) + { + return update( key, func ); + } + //@endcond + + /// Inserts data of type \p value_type constructed with std::forward(args)... + /** + Returns \p true if inserting successful, \p false otherwise. + */ + template + bool emplace( Args&&... args ) + { + return emplace_at( head(), std::forward(args)... ); + } + + /// Delete \p key from the list + /** \anchor cds_nonintrusive_MichealList_hp_erase_val + Since the key of MichaelList's item type \p value_type is not explicitly specified, + template parameter \p Q sould contain the complete key to search in the list. + The list item comparator should be able to compare the type \p value_type + and the type \p Q. + + Return \p true if key is found and deleted, \p false otherwise + */ + template + bool erase( Q const& key ) + { + return erase_at( head(), key, intrusive_key_comparator(), [](value_type const&){} ); + } + + /// Deletes the item from the list using \p pred predicate for searching + /** + The function is an analog of \ref cds_nonintrusive_MichealList_hp_erase_val "erase(Q const&)" + but \p pred is used for key comparing. + \p Less functor has the interface like \p std::less. + \p pred must imply the same element order as the comparator used for building the list. + */ + template + bool erase_with( Q const& key, Less pred ) + { + CDS_UNUSED( pred ); + return erase_at( head(), key, typename maker::template less_wrapper::type(), [](value_type const&){} ); + } + + /// Deletes \p key from the list + /** \anchor cds_nonintrusive_MichaelList_hp_erase_func + The function searches an item with key \p key, calls \p f functor with item found + and deletes it. If \p key is not found, the functor is not called. + + The functor \p Func interface: + \code + struct extractor { + void operator()(const value_type& val) { ... } + }; + \endcode + + Since the key of MichaelList's item type \p value_type is not explicitly specified, + template parameter \p Q should contain the complete key to search in the list. + The list item comparator should be able to compare the type \p value_type of list item + and the type \p Q. + + Return \p true if key is found and deleted, \p false otherwise + */ + template + bool erase( Q const& key, Func f ) + { + return erase_at( head(), key, intrusive_key_comparator(), f ); + } + + /// Deletes the item from the list using \p pred predicate for searching + /** + The function is an analog of \ref cds_nonintrusive_MichaelList_hp_erase_func "erase(Q const&, Func)" + but \p pred is used for key comparing. + \p Less functor has the interface like \p std::less. + \p pred must imply the same element order as the comparator used for building the list. + */ + template + bool erase_with( Q const& key, Less pred, Func f ) + { + CDS_UNUSED( pred ); + return erase_at( head(), key, typename maker::template less_wrapper::type(), f ); + } + + /// Extracts the item from the list with specified \p key + /** \anchor cds_nonintrusive_MichaelList_hp_extract + The function searches an item with key equal to \p key, + unlinks it from the list, and returns it as \p guarded_ptr. + If \p key is not found the function returns an empty guarded pointer. + + Note the compare functor should accept a parameter of type \p Q that can be not the same as \p value_type. + + @note Each \p guarded_ptr object uses the GC's guard that can be limited resource. + + Usage: + \code + typedef cds::container::MichaelList< cds::gc::HP, foo, my_traits > ord_list; + ord_list theList; + // ... + { + ord_list::guarded_ptr gp(theList.extract( 5 )); + if ( gp ) { + // Deal with gp + // ... + } + // Destructor of gp releases internal HP guard and frees the item + } + \endcode + */ + template + guarded_ptr extract( Q const& key ) + { + return extract_at( head(), key, intrusive_key_comparator()); + } + + /// Extracts the item from the list with comparing functor \p pred + /** + The function is an analog of \ref cds_nonintrusive_MichaelList_hp_extract "extract(Q const&)" + but \p pred predicate is used for key comparing. + + \p Less functor has the semantics like \p std::less but it should accept arguments of type \p value_type and \p Q + in any order. + \p pred must imply the same element order as the comparator used for building the list. + */ + template + guarded_ptr extract_with( Q const& key, Less pred ) + { + CDS_UNUSED( pred ); + return extract_at( head(), key, typename maker::template less_wrapper::type()); + } + + /// Checks whether the list contains \p key + /** + The function searches the item with key equal to \p key + and returns \p true if it is found, and \p false otherwise. + */ + template + bool contains( Q const& key ) + { + return find_at( head(), key, intrusive_key_comparator()); + } + //@cond + template + CDS_DEPRECATED("deprecated, use contains()") + bool find( Q const& key ) + { + return contains( key ); + } + //@endcond + + /// Checks whether the list contains \p key using \p pred predicate for searching + /** + The function is an analog of contains( key ) but \p pred is used for key comparing. + \p Less functor has the interface like \p std::less. + \p pred must imply the same element order as the comparator used for building the list. + */ + template + bool contains( Q const& key, Less pred ) + { + CDS_UNUSED( pred ); + return find_at( head(), key, typename maker::template less_wrapper::type()); + } + //@cond + template + CDS_DEPRECATED("deprecated, use contains()") + bool find_with( Q const& key, Less pred ) + { + return contains( key, pred ); + } + //@endcond + + /// Finds \p key and perform an action with it + /** \anchor cds_nonintrusive_MichaelList_hp_find_func + The function searches an item with key equal to \p key and calls the functor \p f for the item found. + The interface of \p Func functor is: + \code + struct functor { + void operator()( value_type& item, Q& key ); + }; + \endcode + where \p item is the item found, \p key is the find function argument. + + The functor may change non-key fields of \p item. Note that the function is only guarantee + that \p item cannot be deleted during functor is executing. + The function does not serialize simultaneous access to the list \p item. If such access is + possible you must provide your own synchronization schema to exclude unsafe item modifications. + + The function returns \p true if \p key is found, \p false otherwise. + */ + template + bool find( Q& key, Func f ) + { + return find_at( head(), key, intrusive_key_comparator(), f ); + } + //@cond + template + bool find( Q const& key, Func f ) + { + return find_at( head(), key, intrusive_key_comparator(), f ); + } + //@endcond + + /// Finds \p key using \p pred predicate for searching + /** + The function is an analog of \ref cds_nonintrusive_MichaelList_hp_find_func "find(Q&, Func)" + but \p pred is used for key comparing. + \p Less functor has the interface like \p std::less. + \p pred must imply the same element order as the comparator used for building the list. + */ + template + bool find_with( Q& key, Less pred, Func f ) + { + CDS_UNUSED( pred ); + return find_at( head(), key, typename maker::template less_wrapper::type(), f ); + } + //@cond + template + bool find_with( Q const& key, Less pred, Func f ) + { + CDS_UNUSED( pred ); + return find_at( head(), key, typename maker::template less_wrapper::type(), f ); + } + //@endcond + + /// Finds \p key and return the item found + /** + The function searches the item with key equal to \p key + and returns it as \p guarded_ptr. + If \p key is not found the function returns an empty guarded pointer. + + @note Each \p guarded_ptr object uses one GC's guard which can be limited resource. + + Usage: + \code + typedef cds::container::MichaelList< cds::gc::HP, foo, my_traits > ord_list; + ord_list theList; + // ... + { + ord_list::guarded_ptr gp(theList.get( 5 )); + if ( gp ) { + // Deal with gp + //... + } + // Destructor of guarded_ptr releases internal HP guard and frees the item + } + \endcode + + Note the compare functor specified for class \p Traits template parameter + should accept a parameter of type \p Q that can be not the same as \p value_type. + */ + template + guarded_ptr get( Q const& key ) + { + return get_at( head(), key, intrusive_key_comparator()); + } + + /// Finds \p key and return the item found + /** + The function is an analog of \p get( Q const&) + but \p pred is used for comparing the keys. + + \p Less functor has the semantics like \p std::less but should accept arguments of type \p value_type and \p Q + in any order. + \p pred must imply the same element order as the comparator used for building the list. + */ + template + guarded_ptr get_with( Q const& key, Less pred ) + { + CDS_UNUSED( pred ); + return get_at( head(), key, typename maker::template less_wrapper::type()); + } + + /// Check if the list is empty + bool empty() const + { + return base_class::empty(); + } + + /// Returns list's item count + /** + The value returned depends on item counter provided by \p Traits. For \p atomicity::empty_item_counter, + this function always returns 0. + + @note Even if you use real item counter and it returns 0, this fact is not mean that the list + is empty. To check list emptyness use \p empty() method. + */ + size_t size() const + { + return base_class::size(); + } + + /// Clears the list + void clear() + { + base_class::clear(); + } + + /// Returns const reference to internal statistics + stat const& statistics() const + { + return base_class::statistics(); + } + + protected: + //@cond + static value_type& node_to_value( node_type& n ) + { + return n.m_Value; + } + static value_type const& node_to_value( node_type const& n ) + { + return n.m_Value; + } + + template + static node_type * alloc_node( Q const& v ) + { + return cxx_allocator().New( v ); + } + + template + static node_type * alloc_node( Args&&... args ) + { + return cxx_allocator().MoveNew( std::forward( args )... ); + } + + static void free_node( node_type * pNode ) + { + cxx_allocator().Delete( pNode ); + } + + head_type& head() + { + return base_class::m_pHead; + } + + head_type const& head() const + { + return base_class::m_pHead; + } + + bool insert_node( node_type * pNode ) + { + return insert_node_at( head(), pNode ); + } + + bool insert_node_at( head_type& refHead, node_type * pNode ) + { + assert( pNode ); + scoped_node_ptr p(pNode); + if ( base_class::insert_at( refHead, *pNode )) { + p.release(); + return true; + } + + return false; + } + + template + bool insert_at( head_type& refHead, Q&& val ) + { + return insert_node_at( refHead, alloc_node( std::forward(val))); + } + + template + bool insert_at( head_type& refHead, Q&& key, Func f ) + { + scoped_node_ptr pNode( alloc_node( std::forward( key ))); + + if ( base_class::insert_at( refHead, *pNode, [&f]( node_type& node ) { f( node_to_value(node)); } )) { + pNode.release(); + return true; + } + return false; + } + + template + bool emplace_at( head_type& refHead, Args&&... args ) + { + return insert_node_at( refHead, alloc_node( std::forward(args) ... )); + } + + template + bool erase_at( head_type& refHead, Q const& key, Compare cmp, Func f ) + { + return base_class::erase_at( refHead, key, cmp, [&f](node_type const& node){ f( node_to_value(node)); } ); + } + + template + guarded_ptr extract_at( head_type& refHead, Q const& key, Compare cmp ) + { + return base_class::extract_at( refHead, key, cmp ); + } + + template + std::pair update_at( head_type& refHead, Q const& key, Func f, bool bAllowInsert ) + { + scoped_node_ptr pNode( alloc_node( key )); + + std::pair ret = base_class::update_at( refHead, *pNode, + [&f, &key](bool bNew, node_type& node, node_type&){ f( bNew, node_to_value(node), key );}, + bAllowInsert ); + if ( ret.first && ret.second ) + pNode.release(); + + return ret; + } + + template + bool find_at( head_type& refHead, Q const& key, Compare cmp ) + { + return base_class::find_at( refHead, key, cmp ); + } + + template + bool find_at( head_type& refHead, Q& val, Compare cmp, Func f ) + { + return base_class::find_at( refHead, val, cmp, [&f](node_type& node, Q& v){ f( node_to_value(node), v ); }); + } + + template + guarded_ptr get_at( head_type& refHead, Q const& key, Compare cmp ) + { + return base_class::get_at( refHead, key, cmp ); + } + + //@endcond + }; + +}} // namespace cds::container + +#endif // #ifndef CDSLIB_CONTAINER_IMPL_MICHAEL_LIST_H diff --git a/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/container/impl/skip_list_map.h b/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/container/impl/skip_list_map.h new file mode 100644 index 0000000..5b58f7b --- /dev/null +++ b/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/container/impl/skip_list_map.h @@ -0,0 +1,703 @@ +/* + This file is a part of libcds - Concurrent Data Structures library + + (C) Copyright Maxim Khizhinsky (libcds.dev@gmail.com) 2006-2017 + + Source code repo: http://github.com/khizmax/libcds/ + Download: http://sourceforge.net/projects/libcds/files/ + + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. + + * Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ + +#ifndef CDSLIB_CONTAINER_IMPL_SKIP_LIST_MAP_H +#define CDSLIB_CONTAINER_IMPL_SKIP_LIST_MAP_H + +#include + +namespace cds { namespace container { + + /// Lock-free skip-list map + /** @ingroup cds_nonintrusive_map + \anchor cds_nonintrusive_SkipListMap_hp + + The implementation of well-known probabilistic data structure called skip-list + invented by W.Pugh in his papers: + - [1989] W.Pugh Skip Lists: A Probabilistic Alternative to Balanced Trees + - [1990] W.Pugh A Skip List Cookbook + + A skip-list is a probabilistic data structure that provides expected logarithmic + time search without the need of rebalance. The skip-list is a collection of sorted + linked list. Nodes are ordered by key. Each node is linked into a subset of the lists. + Each list has a level, ranging from 0 to 32. The bottom-level list contains + all the nodes, and each higher-level list is a sublist of the lower-level lists. + Each node is created with a random top level (with a random height), and belongs + to all lists up to that level. The probability that a node has the height 1 is 1/2. + The probability that a node has the height N is 1/2 ** N (more precisely, + the distribution depends on an random generator provided, but our generators + have this property). + + The lock-free variant of skip-list is implemented according to book + - [2008] M.Herlihy, N.Shavit "The Art of Multiprocessor Programming", + chapter 14.4 "A Lock-Free Concurrent Skiplist" + + Template arguments: + - \p GC - Garbage collector used. + - \p K - type of a key to be stored in the list. + - \p T - type of a value to be stored in the list. + - \p Traits - map traits, default is \p skip_list::traits + It is possible to declare option-based list with \p cds::container::skip_list::make_traits metafunction + istead of \p Traits template argument. + + Like STL map class, \p %SkipListMap stores the key-value pair as std:pair< K const, T>. + + @warning The skip-list requires up to 67 hazard pointers that may be critical for some GCs for which + the guard count is limited (like \p gc::HP). Those GCs should be explicitly initialized with + hazard pointer enough: \code cds::gc::HP myhp( 67 ) \endcode. Otherwise an run-time exception may be raised + when you try to create skip-list object. + + @note There are several specializations of \p %SkipListMap for each \p GC. You should include: + - for \p gc::HP garbage collector + - for \p gc::DHP garbage collector + - for \ref cds_nonintrusive_SkipListMap_rcu "RCU type" + - for \ref cds_nonintrusive_SkipListMap_nogc "non-deletable SkipListMap" + */ + template < + typename GC, + typename Key, + typename T, +#ifdef CDS_DOXYGEN_INVOKED + typename Traits = skip_list::traits +#else + typename Traits +#endif + > + class SkipListMap: +#ifdef CDS_DOXYGEN_INVOKED + protected intrusive::SkipListSet< GC, std::pair, Traits > +#else + protected details::make_skip_list_map< GC, Key, T, Traits >::type +#endif + { + //@cond + typedef details::make_skip_list_map< GC, Key, T, Traits > maker; + typedef typename maker::type base_class; + //@endcond + public: + typedef GC gc; ///< Garbage collector + typedef Key key_type; ///< Key type + typedef T mapped_type; ///< Mapped type + typedef Traits traits; ///< Map traits +# ifdef CDS_DOXYGEN_INVOKED + typedef std::pair< Key const, T> value_type; ///< Key-value pair to be stored in the map +# else + typedef typename maker::value_type value_type; +# endif + + typedef typename base_class::back_off back_off; ///< Back-off strategy + typedef typename traits::allocator allocator_type; ///< Allocator type used for allocate/deallocate the skip-list nodes + typedef typename base_class::item_counter item_counter; ///< Item counting policy used + typedef typename maker::key_comparator key_comparator; ///< key comparison functor + typedef typename base_class::memory_model memory_model; ///< Memory ordering, see \p cds::opt::memory_model + typedef typename traits::random_level_generator random_level_generator ; ///< random level generator + typedef typename traits::stat stat; ///< internal statistics type + + static size_t const c_nHazardPtrCount = base_class::c_nHazardPtrCount; ///< Count of hazard pointer required for the skip-list + + protected: + //@cond + typedef typename maker::node_type node_type; + typedef typename maker::node_allocator node_allocator; + + typedef std::unique_ptr< node_type, typename maker::node_deallocator > scoped_node_ptr; + //@endcond + + public: + /// Guarded pointer + typedef typename gc::template guarded_ptr< node_type, value_type, details::guarded_ptr_cast_set > guarded_ptr; + + protected: + //@cond + unsigned int random_level() + { + return base_class::random_level(); + } + //@endcond + + public: + /// Default ctor + SkipListMap() + : base_class() + {} + + /// Destructor destroys the set object + ~SkipListMap() + {} + + public: + ///@name Forward iterators (only for debugging purpose) + //@{ + /// Iterator type + /** + The forward iterator has some features: + - it is ordered + - it has no post-increment operator + - to protect the value, the iterator contains a GC-specific guard + another guard is required locally for increment operator. + For some GC (like as \p gc::HP), a guard is a limited resource per thread, so an exception (or assertion) "no free guard" + may be thrown if the limit of guard count per thread is exceeded. + - The iterator cannot be moved across thread boundary because it contains thread-private GC's guard. + - Iterator ensures thread-safety even if you delete the item the iterator points to. However, in case of concurrent + deleting operations there is no guarantee that you iterate all item in the list. + Moreover, a crash is possible when you try to iterate the next element that has been deleted by concurrent thread. + + @warning Use this iterator on the concurrent container for debugging purpose only. + + @note \p end() and \p cend() are not dereferenceable. + + The iterator interface: + \code + class iterator { + public: + // Default constructor + iterator(); + + // Copy construtor + iterator( iterator const& src ); + + // Dereference operator + value_type * operator ->() const; + + // Dereference operator + value_type& operator *() const; + + // Preincrement operator + iterator& operator ++(); + + // Assignment operator + iterator& operator = (iterator const& src); + + // Equality operators + bool operator ==(iterator const& i ) const; + bool operator !=(iterator const& i ) const; + }; + \endcode + */ + typedef skip_list::details::iterator< typename base_class::iterator > iterator; + + /// Const forward iterator type + typedef skip_list::details::iterator< typename base_class::const_iterator > const_iterator; + + /// Returns a forward iterator addressing the first element in a map + iterator begin() + { + return iterator( base_class::begin()); + } + + /// Returns a forward const iterator addressing the first element in a map + const_iterator begin() const + { + return cbegin(); + } + + /// Returns a forward const iterator addressing the first element in a map + const_iterator cbegin() const + { + return const_iterator( base_class::cbegin()); + } + + /// Returns a forward iterator that addresses the location succeeding the last element in a map. + iterator end() + { + return iterator( base_class::end()); + } + + /// Returns a forward const iterator that addresses the location succeeding the last element in a map. + const_iterator end() const + { + return cend(); + } + + /// Returns a forward const iterator that addresses the location succeeding the last element in a map. + const_iterator cend() const + { + return const_iterator( base_class::cend()); + } + //@} + + public: + /// Inserts new node with key and default value + /** + The function creates a node with \p key and default value, and then inserts the node created into the map. + + Preconditions: + - The \p key_type should be constructible from a value of type \p K. + In trivial case, \p K is equal to \p key_type. + - The \p mapped_type should be default-constructible. + + Returns \p true if inserting successful, \p false otherwise. + */ + template + bool insert( K const& key ) + { + return insert_with( key, [](value_type&){} ); + } + + /// Inserts new node + /** + The function creates a node with copy of \p val value + and then inserts the node created into the map. + + Preconditions: + - The \p key_type should be constructible from \p key of type \p K. + - The \p value_type should be constructible from \p val of type \p V. + + Returns \p true if \p val is inserted into the set, \p false otherwise. + */ + template + bool insert( K const& key, V const& val ) + { + return insert_with( key, [&val]( value_type& item ) { item.second = val; } ); + } + + /// Inserts new node and initialize it by a functor + /** + This function inserts new node with key \p key and if inserting is successful then it calls + \p func functor with signature + \code + struct functor { + void operator()( value_type& item ); + }; + \endcode + + The argument \p item of user-defined functor \p func is the reference + to the map's item inserted: + - item.first is a const reference to item's key that cannot be changed. + - item.second is a reference to item's value that may be changed. + + \p key_type should be constructible from value of type \p K. + + The function allows to split creating of new item into two part: + - create item from \p key; + - insert new item into the map; + - if inserting is successful, initialize the value of item by calling \p func functor + + This can be useful if complete initialization of object of \p value_type is heavyweight and + it is preferable that the initialization should be completed only if inserting is successful. + */ + template + bool insert_with( K const& key, Func func ) + { + scoped_node_ptr pNode( node_allocator().New( random_level(), key )); + if ( base_class::insert( *pNode, [&func]( node_type& item ) { func( item.m_Value ); } )) { + pNode.release(); + return true; + } + return false; + } + + /// For key \p key inserts data of type \p value_type created in-place from std::forward(args)... + /** + Returns \p true if inserting successful, \p false otherwise. + */ + template + bool emplace( K&& key, Args&&... args ) + { + scoped_node_ptr pNode( node_allocator().New( random_level(), std::forward(key), std::forward(args)... )); + if ( base_class::insert( *pNode )) { + pNode.release(); + return true; + } + return false; + } + + /// Updates data by \p key + /** + The operation performs inserting or changing data with lock-free manner. + + If the \p key not found in the map, then the new item created from \p key + will be inserted into the map iff \p bInsert is \p true + (note that in this case the \ref key_type should be constructible from type \p K). + Otherwise, if \p key is found, the functor \p func is called with item found. + The functor \p Func signature: + \code + struct my_functor { + void operator()( bool bNew, value_type& item ); + }; + \endcode + where: + - \p bNew - \p true if the item has been inserted, \p false otherwise + - \p item - item of the map + + The functor may change any fields of the \p item.second that is \ref value_type. + + Returns std::pair where \p first is \p true if operation is successful, + \p second is \p true if new item has been added or \p false if \p key already exists. + + @warning See \ref cds_intrusive_item_creating "insert item troubleshooting" + */ + template + std::pair update( K const& key, Func func, bool bInsert = true ) + { + scoped_node_ptr pNode( node_allocator().New( random_level(), key )); + std::pair res = base_class::update( *pNode, + [&func](bool bNew, node_type& item, node_type const& ){ func( bNew, item.m_Value );}, + bInsert + ); + if ( res.first && res.second ) + pNode.release(); + return res; + } + //@cond + template + CDS_DEPRECATED("ensure() is deprecated, use update()") + std::pair ensure( K const& key, Func func ) + { + return update( key, func, true ); + } + //@endcond + + /// Delete \p key from the map + /** \anchor cds_nonintrusive_SkipListMap_erase_val + + Return \p true if \p key is found and deleted, \p false otherwise + */ + template + bool erase( K const& key ) + { + return base_class::erase(key); + } + + /// Deletes the item from the map using \p pred predicate for searching + /** + The function is an analog of \ref cds_nonintrusive_SkipListMap_erase_val "erase(K const&)" + but \p pred is used for key comparing. + \p Less functor has the interface like \p std::less. + \p Less must imply the same element order as the comparator used for building the map. + */ + template + bool erase_with( K const& key, Less pred ) + { + CDS_UNUSED( pred ); + return base_class::erase_with( key, cds::details::predicate_wrapper< node_type, Less, typename maker::key_accessor >()); + } + + /// Delete \p key from the map + /** \anchor cds_nonintrusive_SkipListMap_erase_func + + The function searches an item with key \p key, calls \p f functor + and deletes the item. If \p key is not found, the functor is not called. + + The functor \p Func interface: + \code + struct extractor { + void operator()(value_type& item) { ... } + }; + \endcode + + Return \p true if key is found and deleted, \p false otherwise + */ + template + bool erase( K const& key, Func f ) + { + return base_class::erase( key, [&f]( node_type& node) { f( node.m_Value ); } ); + } + + /// Deletes the item from the map using \p pred predicate for searching + /** + The function is an analog of \ref cds_nonintrusive_SkipListMap_erase_func "erase(K const&, Func)" + but \p pred is used for key comparing. + \p Less functor has the interface like \p std::less. + \p Less must imply the same element order as the comparator used for building the map. + */ + template + bool erase_with( K const& key, Less pred, Func f ) + { + CDS_UNUSED( pred ); + return base_class::erase_with( key, + cds::details::predicate_wrapper< node_type, Less, typename maker::key_accessor >(), + [&f]( node_type& node) { f( node.m_Value ); } ); + } + + /// Extracts the item from the map with specified \p key + /** \anchor cds_nonintrusive_SkipListMap_hp_extract + The function searches an item with key equal to \p key in the map, + unlinks it from the map, and returns a guarded pointer to the item found. + If \p key is not found the function returns an empty guarded pointer. + + Note the compare functor should accept a parameter of type \p K that can be not the same as \p key_type. + + The item extracted is freed automatically by garbage collector \p GC + when returned \p guarded_ptr object will be destroyed or released. + @note Each \p guarded_ptr object uses the GC's guard that can be limited resource. + + Usage: + \code + typedef cds::container::SkipListMap< cds::gc::HP, int, foo, my_traits > skip_list; + skip_list theList; + // ... + { + skip_list::guarded_ptr gp( theList.extract( 5 )); + if ( gp ) { + // Deal with gp + // ... + } + // Destructor of gp releases internal HP guard and frees the pointer + } + \endcode + */ + template + guarded_ptr extract( K const& key ) + { + return base_class::extract_( key, typename base_class::key_comparator()); + } + + /// Extracts the item from the map with comparing functor \p pred + /** + The function is an analog of \ref cds_nonintrusive_SkipListMap_hp_extract "extract(K const&)" + but \p pred predicate is used for key comparing. + + \p Less functor has the semantics like \p std::less but should take arguments of type \ref key_type and \p K + in any order. + \p pred must imply the same element order as the comparator used for building the map. + */ + template + guarded_ptr extract_with( K const& key, Less pred ) + { + CDS_UNUSED( pred ); + typedef cds::details::predicate_wrapper< node_type, Less, typename maker::key_accessor > wrapped_less; + return base_class::extract_( key, cds::opt::details::make_comparator_from_less()); + } + + /// Extracts an item with minimal key from the map + /** + The function searches an item with minimal key, unlinks it, and returns an guarded pointer to the item found. + If the skip-list is empty the function returns an empty guarded pointer. + + The item extracted is freed automatically by garbage collector \p GC + when returned \p guarded_ptr object will be destroyed or released. + @note Each \p guarded_ptr object uses the GC's guard that can be limited resource. + + Usage: + \code + typedef cds::continer::SkipListMap< cds::gc::HP, int, foo, my_traits > skip_list; + skip_list theList; + // ... + { + skip_list::guarded_ptr gp( theList.extract_min()); + if ( gp ) { + // Deal with gp + //... + } + // Destructor of gp releases internal HP guard and then frees the pointer + } + \endcode + */ + guarded_ptr extract_min() + { + return base_class::extract_min_(); + } + + /// Extracts an item with maximal key from the map + /** + The function searches an item with maximal key, unlinks it, and returns a guarded pointer to item found. + If the skip-list is empty the function returns an empty \p guarded_ptr. + + The item found is freed by garbage collector \p GC automatically + when returned \p guarded_ptr object will be destroyed or released. + @note Each \p guarded_ptr object uses the GC's guard that can be limited resource. + + Usage: + \code + typedef cds::container::SkipListMap< cds::gc::HP, int, foo, my_traits > skip_list; + skip_list theList; + // ... + { + skip_list::guarded_ptr gp( theList.extract_max()); + if ( gp ) { + // Deal with gp + //... + } + // Destructor of gp releases internal HP guard and then frees the pointer + } + \endcode + */ + guarded_ptr extract_max() + { + return base_class::extract_max_(); + } + + /// Find the key \p key + /** \anchor cds_nonintrusive_SkipListMap_find_cfunc + + The function searches the item with key equal to \p key and calls the functor \p f for item found. + The interface of \p Func functor is: + \code + struct functor { + void operator()( value_type& item ); + }; + \endcode + where \p item is the item found. + + The functor may change \p item.second. + + The function returns \p true if \p key is found, \p false otherwise. + */ + template + bool find( K const& key, Func f ) + { + return base_class::find( key, [&f](node_type& item, K const& ) { f( item.m_Value );}); + } + + /// Finds the key \p val using \p pred predicate for searching + /** + The function is an analog of \ref cds_nonintrusive_SkipListMap_find_cfunc "find(K const&, Func)" + but \p pred is used for key comparing. + \p Less functor has the interface like \p std::less. + \p Less must imply the same element order as the comparator used for building the map. + */ + template + bool find_with( K const& key, Less pred, Func f ) + { + CDS_UNUSED( pred ); + return base_class::find_with( key, + cds::details::predicate_wrapper< node_type, Less, typename maker::key_accessor >(), + [&f](node_type& item, K const& ) { f( item.m_Value );}); + } + + /// Checks whether the map contains \p key + /** + The function searches the item with key equal to \p key + and returns \p true if it is found, and \p false otherwise. + */ + template + bool contains( K const& key ) + { + return base_class::contains( key ); + } + //@cond + template + CDS_DEPRECATED("deprecated, use contains()") + bool find( K const& key ) + { + return contains( key ); + } + //@endcond + + /// Checks whether the set contains \p key using \p pred predicate for searching + /** + The function is similar to contains( key ) but \p pred is used for key comparing. + \p Less functor has the interface like \p std::less. + \p Less must imply the same element order as the comparator used for building the set. + */ + template + bool contains( K const& key, Less pred ) + { + CDS_UNUSED( pred ); + return base_class::contains( key, cds::details::predicate_wrapper< node_type, Less, typename maker::key_accessor >()); + } + //@cond + template + CDS_DEPRECATED("deprecated, use contains()") + bool find_with( K const& key, Less pred ) + { + return contains( key, pred ); + } + //@endcond + + /// Finds the key \p key and return the item found + /** \anchor cds_nonintrusive_SkipListMap_hp_get + The function searches the item with key equal to \p key + and returns a guarded pointer to the item found. + If \p key is not found the function returns an empty guarded pointer. + + It is safe when a concurrent thread erases the item returned as \p guarded_ptr. + In this case the item will be freed later by garbage collector \p GC automatically + when \p guarded_ptr object will be destroyed or released. + @note Each \p guarded_ptr object uses one GC's guard which can be limited resource. + + Usage: + \code + typedef cds::container::SkipListMap< cds::gc::HP, int, foo, my_traits > skip_list; + skip_list theList; + // ... + { + skip_list::guarded_ptr gp( theList.get( 5 )); + if ( gp ) { + // Deal with gp + //... + } + // Destructor of guarded_ptr releases internal HP guard + } + \endcode + + Note the compare functor specified for class \p Traits template parameter + should accept a parameter of type \p K that can be not the same as \p value_type. + */ + template + guarded_ptr get( K const& key ) + { + return base_class::get_with_( key, typename base_class::key_comparator()); + } + + /// Finds the key \p key and return the item found + /** + The function is an analog of \ref cds_nonintrusive_SkipListMap_hp_get "get( K const&)" + but \p pred is used for comparing the keys. + + \p Less functor has the semantics like \p std::less but should take arguments of type \ref key_type and \p K + in any order. + \p pred must imply the same element order as the comparator used for building the map. + */ + template + guarded_ptr get_with( K const& key, Less pred ) + { + CDS_UNUSED( pred ); + typedef cds::details::predicate_wrapper< node_type, Less, typename maker::key_accessor > wrapped_less; + return base_class::get_with_( key, cds::opt::details::make_comparator_from_less< wrapped_less >()); + } + + /// Clears the map + void clear() + { + base_class::clear(); + } + + /// Checks if the map is empty + bool empty() const + { + return base_class::empty(); + } + + /// Returns item count in the map + size_t size() const + { + return base_class::size(); + } + + /// Returns const reference to internal statistics + stat const& statistics() const + { + return base_class::statistics(); + } + }; +}} // namespace cds::container + +#endif // #ifndef CDSLIB_CONTAINER_IMPL_SKIP_LIST_MAP_H diff --git a/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/container/impl/skip_list_set.h b/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/container/impl/skip_list_set.h new file mode 100644 index 0000000..7901f7d --- /dev/null +++ b/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/container/impl/skip_list_set.h @@ -0,0 +1,760 @@ +/* + This file is a part of libcds - Concurrent Data Structures library + + (C) Copyright Maxim Khizhinsky (libcds.dev@gmail.com) 2006-2017 + + Source code repo: http://github.com/khizmax/libcds/ + Download: http://sourceforge.net/projects/libcds/files/ + + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. + + * Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ + +#ifndef CDSLIB_CONTAINER_IMPL_SKIP_LIST_SET_H +#define CDSLIB_CONTAINER_IMPL_SKIP_LIST_SET_H + +#include +#include + +namespace cds { namespace container { + + /// Lock-free skip-list set + /** @ingroup cds_nonintrusive_set + \anchor cds_nonintrusive_SkipListSet_hp + + The implementation of well-known probabilistic data structure called skip-list + invented by W.Pugh in his papers: + - [1989] W.Pugh Skip Lists: A Probabilistic Alternative to Balanced Trees + - [1990] W.Pugh A Skip List Cookbook + + A skip-list is a probabilistic data structure that provides expected logarithmic + time search without the need of rebalance. The skip-list is a collection of sorted + linked list. Nodes are ordered by key. Each node is linked into a subset of the lists. + Each list has a level, ranging from 0 to 32. The bottom-level list contains + all the nodes, and each higher-level list is a sublist of the lower-level lists. + Each node is created with a random top level (with a random height), and belongs + to all lists up to that level. The probability that a node has the height 1 is 1/2. + The probability that a node has the height N is 1/2 ** N (more precisely, + the distribution depends on an random generator provided, but our generators + have this property). + + The lock-free variant of skip-list is implemented according to book + - [2008] M.Herlihy, N.Shavit "The Art of Multiprocessor Programming", + chapter 14.4 "A Lock-Free Concurrent Skiplist" + + Template arguments: + - \p GC - Garbage collector used. + - \p T - type to be stored in the list. + - \p Traits - set traits, default is \p skip_list::traits. + It is possible to declare option-based list with \p cds::container::skip_list::make_traits metafunction + istead of \p Traits template argument. + + @warning The skip-list requires up to 67 hazard pointers that may be critical for some GCs for which + the guard count is limited (like as \p gc::HP). Those GCs should be explicitly initialized with + hazard pointer enough: \code cds::gc::HP myhp( 67 ) \endcode. Otherwise an run-time exception may be raised + when you try to create skip-list object. + + @note There are several specializations of \p %SkipListSet for each \p GC. You should include: + - for \p gc::HP garbage collector + - for \p gc::DHP garbage collector + - for \ref cds_nonintrusive_SkipListSet_rcu "RCU type" + - for \ref cds_nonintrusive_SkipListSet_nogc "non-deletable SkipListSet" + + Iterators + + The class supports a forward iterator (\ref iterator and \ref const_iterator). + The iteration is ordered. + The iterator object is thread-safe: the element pointed by the iterator object is guarded, + so, the element cannot be reclaimed while the iterator object is alive. + However, passing an iterator object between threads is dangerous. + + \warning Due to concurrent nature of skip-list set it is not guarantee that you can iterate + all elements in the set: any concurrent deletion can exclude the element + pointed by the iterator from the set, and your iteration can be terminated + before end of the set. Therefore, such iteration is more suitable for debugging purpose only + + Remember, each iterator object requires 2 additional hazard pointers, that may be + a limited resource for \p GC like \p gc::HP (for \p gc::DHP the count of + guards is unlimited). + + The iterator class supports the following minimalistic interface: + \code + struct iterator { + // Default ctor + iterator(); + + // Copy ctor + iterator( iterator const& s); + + value_type * operator ->() const; + value_type& operator *() const; + + // Pre-increment + iterator& operator ++(); + + // Copy assignment + iterator& operator = (const iterator& src); + + bool operator ==(iterator const& i ) const; + bool operator !=(iterator const& i ) const; + }; + \endcode + Note, the iterator object returned by \p end(), \p cend() member functions points to \p nullptr and should not be dereferenced. + */ + template < + typename GC, + typename T, +#ifdef CDS_DOXYGEN_INVOKED + typename Traits = skip_list::traits +#else + typename Traits +#endif + > + class SkipListSet: +#ifdef CDS_DOXYGEN_INVOKED + protected intrusive::SkipListSet< GC, T, Traits > +#else + protected details::make_skip_list_set< GC, T, Traits >::type +#endif + { + //@cond + typedef details::make_skip_list_set< GC, T, Traits > maker; + typedef typename maker::type base_class; + //@endcond + public: + typedef GC gc; ///< Garbage collector used + typedef T value_type; ///< @anchor cds_containewr_SkipListSet_value_type Value type to be stored in the set + typedef Traits traits; ///< Options specified + + typedef typename base_class::back_off back_off; ///< Back-off strategy + typedef typename traits::allocator allocator_type; ///< Allocator type used for allocate/deallocate the skip-list nodes + typedef typename base_class::item_counter item_counter; ///< Item counting policy used + typedef typename maker::key_comparator key_comparator; ///< key comparison functor + typedef typename base_class::memory_model memory_model; ///< Memory ordering. See cds::opt::memory_model option + typedef typename traits::random_level_generator random_level_generator; ///< random level generator + typedef typename traits::stat stat; ///< internal statistics type + + static size_t const c_nHazardPtrCount = base_class::c_nHazardPtrCount; ///< Count of hazard pointer required for the skip-list + + protected: + //@cond + typedef typename maker::node_type node_type; + typedef typename maker::node_allocator node_allocator; + + typedef std::unique_ptr< node_type, typename maker::node_deallocator > scoped_node_ptr; + //@endcond + + public: + /// Guarded pointer + typedef typename gc::template guarded_ptr< node_type, value_type, details::guarded_ptr_cast_set > guarded_ptr; + + protected: + //@cond + unsigned int random_level() + { + return base_class::random_level(); + } + //@endcond + + public: + /// Default ctor + SkipListSet() + : base_class() + {} + + /// Destructor destroys the set object + ~SkipListSet() + {} + + public: + ///@name Forward iterators (only for debugging purpose) + //@{ + /// Iterator type + /** + The forward iterator has some features: + - it has no post-increment operator + - to protect the value, the iterator contains a GC-specific guard + another guard is required locally for increment operator. + For some GC (like as \p gc::HP), a guard is a limited resource per thread, so an exception (or assertion) "no free guard" + may be thrown if the limit of guard count per thread is exceeded. + - The iterator cannot be moved across thread boundary because it contains thread-private GC's guard. + - Iterator ensures thread-safety even if you delete the item the iterator points to. However, in case of concurrent + deleting operations there is no guarantee that you iterate all item in the list. + Moreover, a crash is possible when you try to iterate the next element that has been deleted by concurrent thread. + + @warning Use this iterator on the concurrent container for debugging purpose only. + + The iterator interface: + \code + class iterator { + public: + // Default constructor + iterator(); + + // Copy construtor + iterator( iterator const& src ); + + // Dereference operator + value_type * operator ->() const; + + // Dereference operator + value_type& operator *() const; + + // Preincrement operator + iterator& operator ++(); + + // Assignment operator + iterator& operator = (iterator const& src); + + // Equality operators + bool operator ==(iterator const& i ) const; + bool operator !=(iterator const& i ) const; + }; + \endcode + */ + typedef skip_list::details::iterator< typename base_class::iterator > iterator; + + /// Const iterator type + typedef skip_list::details::iterator< typename base_class::const_iterator > const_iterator; + + /// Returns a forward iterator addressing the first element in a set + iterator begin() + { + return iterator( base_class::begin()); + } + + /// Returns a forward const iterator addressing the first element in a set + const_iterator begin() const + { + return const_iterator( base_class::begin()); + } + + /// Returns a forward const iterator addressing the first element in a set + const_iterator cbegin() const + { + return const_iterator( base_class::cbegin()); + } + + /// Returns a forward iterator that addresses the location succeeding the last element in a set. + iterator end() + { + return iterator( base_class::end()); + } + + /// Returns a forward const iterator that addresses the location succeeding the last element in a set. + const_iterator end() const + { + return const_iterator( base_class::end()); + } + + /// Returns a forward const iterator that addresses the location succeeding the last element in a set. + const_iterator cend() const + { + return const_iterator( base_class::cend()); + } + //@} + + public: + /// Inserts new node + /** + The function creates a node with copy of \p val value + and then inserts the node created into the set. + + The type \p Q should contain as minimum the complete key for the node. + The object of \ref value_type should be constructible from a value of type \p Q. + In trivial case, \p Q is equal to \ref value_type. + + Returns \p true if \p val is inserted into the set, \p false otherwise. + */ + template + bool insert( Q const& val ) + { + scoped_node_ptr sp( node_allocator().New( random_level(), val )); + if ( base_class::insert( *sp.get())) { + sp.release(); + return true; + } + return false; + } + + /// Inserts new node + /** + The function allows to split creating of new item into two part: + - create item with key only + - insert new item into the set + - if inserting is success, calls \p f functor to initialize value-fields of \p val. + + The functor signature is: + \code + void func( value_type& val ); + \endcode + where \p val is the item inserted. User-defined functor \p f should guarantee that during changing + \p val no any other changes could be made on this set's item by concurrent threads. + The user-defined functor is called only if the inserting is success. + */ + template + bool insert( Q const& val, Func f ) + { + scoped_node_ptr sp( node_allocator().New( random_level(), val )); + if ( base_class::insert( *sp.get(), [&f]( node_type& v ) { f( v.m_Value ); } )) { + sp.release(); + return true; + } + return false; + } + + /// Updates the item + /** + The operation performs inserting or changing data with lock-free manner. + + If the \p val key not found in the set, then the new item created from \p val + will be inserted into the set iff \p bInsert is \p true. + Otherwise, if \p val is found, the functor \p func will be called with the item found. + + The functor \p Func signature: + \code + struct my_functor { + void operator()( bool bNew, value_type& item, const Q& val ); + }; + \endcode + where: + - \p bNew - \p true if the item has been inserted, \p false otherwise + - \p item - item of the set + - \p val - argument \p key passed into the \p %update() function + + The functor may change non-key fields of the \p item; however, \p func must guarantee + that during changing no any other modifications could be made on this item by concurrent threads. + + Returns std::pair where \p first is \p true if operation is successful, + i.e. the item has been inserted or updated, + \p second is \p true if new item has been added or \p false if the item with key equal to \p val + already exists. + + @warning See \ref cds_intrusive_item_creating "insert item troubleshooting" + */ + template + std::pair update( const Q& val, Func func, bool bInsert = true ) + { + scoped_node_ptr sp( node_allocator().New( random_level(), val )); + std::pair bRes = base_class::update( *sp, + [&func, &val](bool bNew, node_type& node, node_type&){ func( bNew, node.m_Value, val ); }, + bInsert ); + if ( bRes.first && bRes.second ) + sp.release(); + return bRes; + } + //@cond + template + CDS_DEPRECATED("ensure() is deprecated, use update()") + std::pair ensure( const Q& val, Func func ) + { + return update( val, func, true ); + } + //@endcond + + /// Inserts data of type \p value_type created in-place from std::forward(args)... + /** + Returns \p true if inserting successful, \p false otherwise. + */ + template + bool emplace( Args&&... args ) + { + scoped_node_ptr sp( node_allocator().New( random_level(), std::forward(args)... )); + if ( base_class::insert( *sp.get())) { + sp.release(); + return true; + } + return false; + } + + /// Delete \p key from the set + /** \anchor cds_nonintrusive_SkipListSet_erase_val + + The set item comparator should be able to compare the type \p value_type + and the type \p Q. + + Return \p true if key is found and deleted, \p false otherwise + */ + template + bool erase( Q const& key ) + { + return base_class::erase( key ); + } + + /// Deletes the item from the set using \p pred predicate for searching + /** + The function is an analog of \ref cds_nonintrusive_SkipListSet_erase_val "erase(Q const&)" + but \p pred is used for key comparing. + \p Less functor has the interface like \p std::less. + \p Less must imply the same element order as the comparator used for building the set. + */ + template + bool erase_with( Q const& key, Less pred ) + { + CDS_UNUSED( pred ); + return base_class::erase_with( key, cds::details::predicate_wrapper< node_type, Less, typename maker::value_accessor >()); + } + + /// Delete \p key from the set + /** \anchor cds_nonintrusive_SkipListSet_erase_func + + The function searches an item with key \p key, calls \p f functor + and deletes the item. If \p key is not found, the functor is not called. + + The functor \p Func interface: + \code + struct extractor { + void operator()(value_type const& val); + }; + \endcode + + Since the key of \p value_type is not explicitly specified, + template parameter \p Q defines the key type to search in the list. + The list item comparator should be able to compare the type \p T of list item + and the type \p Q. + + Return \p true if key is found and deleted, \p false otherwise + */ + template + bool erase( Q const& key, Func f ) + { + return base_class::erase( key, [&f]( node_type const& node) { f( node.m_Value ); } ); + } + + /// Deletes the item from the set using \p pred predicate for searching + /** + The function is an analog of \ref cds_nonintrusive_SkipListSet_erase_func "erase(Q const&, Func)" + but \p pred is used for key comparing. + \p Less functor has the interface like \p std::less. + \p Less must imply the same element order as the comparator used for building the set. + */ + template + bool erase_with( Q const& key, Less pred, Func f ) + { + CDS_UNUSED( pred ); + return base_class::erase_with( key, cds::details::predicate_wrapper< node_type, Less, typename maker::value_accessor >(), + [&f]( node_type const& node) { f( node.m_Value ); } ); + } + + /// Extracts the item from the set with specified \p key + /** \anchor cds_nonintrusive_SkipListSet_hp_extract + The function searches an item with key equal to \p key in the set, + unlinks it from the set, and returns it as \p guarded_ptr. + If \p key is not found the function returns an empty guarded pointer. + + Note the compare functor should accept a parameter of type \p Q that can be not the same as \p value_type. + + The item extracted is freed automatically by garbage collector \p GC + when returned \p guarded_ptr object will be destroyed or released. + @note Each \p guarded_ptr object uses the GC's guard that can be limited resource. + + Usage: + \code + typedef cds::container::SkipListSet< cds::gc::HP, foo, my_traits > skip_list; + skip_list theList; + // ... + { + skip_list::guarded_ptr gp(theList.extract( 5 )) + if ( gp ) { + // Deal with gp + // ... + } + // Destructor of gp releases internal HP guard and frees the pointer + } + \endcode + */ + template + guarded_ptr extract( Q const& key ) + { + return base_class::extract_( key, typename base_class::key_comparator()); + } + + /// Extracts the item from the set with comparing functor \p pred + /** + The function is an analog of \ref cds_nonintrusive_SkipListSet_hp_extract "extract(Q const&)" + but \p pred predicate is used for key comparing. + + \p Less functor has the semantics like \p std::less but should take arguments of type \ref value_type and \p Q + in any order. + \p pred must imply the same element order as the comparator used for building the set. + */ + template + guarded_ptr extract_with( Q const& key, Less pred ) + { + CDS_UNUSED( pred ); + typedef cds::details::predicate_wrapper< node_type, Less, typename maker::value_accessor > wrapped_less; + return base_class::extract_( key, cds::opt::details::make_comparator_from_less()); + } + + /// Extracts an item with minimal key from the set + /** + The function searches an item with minimal key, unlinks it, and returns pointer to the item found as \p guarded_ptr. + If the skip-list is empty the function returns an empty guarded pointer. + + The item extracted is freed automatically by garbage collector \p GC + when returned \p guarded_ptr object will be destroyed or released. + @note Each \p guarded_ptr object uses the GC's guard that can be limited resource. + + Usage: + \code + typedef cds::continer::SkipListSet< cds::gc::HP, foo, my_traits > skip_list; + skip_list theList; + // ... + { + skip_list::guarded_ptr gp( theList.extract_min()); + if ( gp ) { + // Deal with gp + //... + } + // Destructor of gp releases internal HP guard and then frees the pointer + } + \endcode + */ + guarded_ptr extract_min() + { + return base_class::extract_min_(); + } + + /// Extracts an item with maximal key from the set + /** + The function searches an item with maximal key, unlinks it, and returns the pointer to item found as \p guarded_ptr. + If the skip-list is empty the function returns an empty guarded pointer. + + The item found is freed by garbage collector \p GC automatically + when returned \p guarded_ptr object will be destroyed or released. + @note Each \p guarded_ptr object uses the GC's guard that can be limited resource. + + Usage: + \code + typedef cds::container::SkipListSet< cds::gc::HP, foo, my_traits > skip_list; + skip_list theList; + // ... + { + skip_list::guarded_ptr gp( theList.extract_max()); + if ( gp ) { + // Deal with gp + //... + } + // Destructor of gp releases internal HP guard and then frees the pointer + } + \endcode + */ + guarded_ptr extract_max() + { + return base_class::extract_max_(); + } + + /// Find the \p key + /** \anchor cds_nonintrusive_SkipListSet_find_func + + The function searches the item with key equal to \p key and calls the functor \p f for item found. + The interface of \p Func functor is: + \code + struct functor { + void operator()( value_type& item, Q& key ); + }; + \endcode + where \p item is the item found, \p key is the find function argument. + + The functor may change non-key fields of \p item. Note that the functor is only guarantee + that \p item cannot be disposed during functor is executing. + The functor does not serialize simultaneous access to the set's \p item. If such access is + possible you must provide your own synchronization schema on item level to exclude unsafe item modifications. + + Note the hash functor specified for class \p Traits template parameter + should accept a parameter of type \p Q that may be not the same as \p value_type. + + The function returns \p true if \p key is found, \p false otherwise. + */ + template + bool find( Q& key, Func f ) + { + return base_class::find( key, [&f]( node_type& node, Q& v ) { f( node.m_Value, v ); }); + } + //@cond + template + bool find( Q const& key, Func f ) + { + return base_class::find( key, [&f]( node_type& node, Q& v ) { f( node.m_Value, v ); } ); + } + //@endcond + + /// Finds \p key using \p pred predicate for searching + /** + The function is an analog of \ref cds_nonintrusive_SkipListSet_find_func "find(Q&, Func)" + but \p pred is used for key comparing. + \p Less functor has the interface like \p std::less. + \p Less must imply the same element order as the comparator used for building the set. + */ + template + bool find_with( Q& key, Less pred, Func f ) + { + CDS_UNUSED( pred ); + return base_class::find_with( key, cds::details::predicate_wrapper< node_type, Less, typename maker::value_accessor >(), + [&f]( node_type& node, Q& v ) { f( node.m_Value, v ); } ); + } + //@cond + template + bool find_with( Q const& key, Less pred, Func f ) + { + CDS_UNUSED( pred ); + return base_class::find_with( key, cds::details::predicate_wrapper< node_type, Less, typename maker::value_accessor >(), + [&f]( node_type& node, Q const& v ) { f( node.m_Value, v ); } ); + } + //@endcond + + /// Checks whether the set contains \p key + /** + The function searches the item with key equal to \p key + and returns \p true if it is found, and \p false otherwise. + */ + template + bool contains( Q const& key ) + { + return base_class::contains( key ); + } + //@cond + template + CDS_DEPRECATED("deprecated, use contains()") + bool find( Q const& key ) + { + return contains( key ); + } + //@endcond + + /// Checks whether the set contains \p key using \p pred predicate for searching + /** + The function is similar to contains( key ) but \p pred is used for key comparing. + \p Less functor has the interface like \p std::less. + \p Less must imply the same element order as the comparator used for building the set. + */ + template + bool contains( Q const& key, Less pred ) + { + CDS_UNUSED( pred ); + return base_class::contains( key, cds::details::predicate_wrapper< node_type, Less, typename maker::value_accessor >()); + } + //@cond + template + CDS_DEPRECATED("deprecated, use contains()") + bool find_with( Q const& key, Less pred ) + { + return contains( key, pred ); + } + //@endcond + + /// Finds \p key and return the item found + /** \anchor cds_nonintrusive_SkipListSet_hp_get + The function searches the item with key equal to \p key + and returns a guarded pointer to the item found. + If \p key is not found the function returns an empty guarded pointer. + + It is safe when a concurrent thread erases the item returned in \p result guarded pointer. + In this case the item will be freed later by garbage collector \p GC automatically + when \p guarded_ptr object will be destroyed or released. + @note Each \p guarded_ptr object uses one GC's guard which can be limited resource. + + Usage: + \code + typedef cds::container::SkipListSet< cds::gc::HP, foo, my_traits > skip_list; + skip_list theList; + // ... + { + skip_list::guarded_ptr gp( theList.get( 5 )); + if ( theList.get( 5 )) { + // Deal with gp + //... + } + // Destructor of guarded_ptr releases internal HP guard + } + \endcode + + Note the compare functor specified for class \p Traits template parameter + should accept a parameter of type \p Q that can be not the same as \p value_type. + */ + template + guarded_ptr get( Q const& key ) + { + return base_class::get_with_( key, typename base_class::key_comparator()); + } + + /// Finds \p key and return the item found + /** + The function is an analog of \ref cds_nonintrusive_SkipListSet_hp_get "get(Q const&)" + but \p pred is used for comparing the keys. + + \p Less functor has the semantics like \p std::less but should take arguments of type \ref value_type and \p Q + in any order. + \p pred must imply the same element order as the comparator used for building the set. + */ + template + guarded_ptr get_with( Q const& key, Less pred ) + { + CDS_UNUSED( pred ); + typedef cds::details::predicate_wrapper< node_type, Less, typename maker::value_accessor > wrapped_less; + return base_class::get_with_( key, cds::opt::details::make_comparator_from_less< wrapped_less >()); + } + + /// Clears the set (not atomic). + /** + The function deletes all items from the set. + The function is not atomic, thus, in multi-threaded environment with parallel insertions + this sequence + \code + set.clear(); + assert( set.empty()); + \endcode + the assertion could be raised. + + For each item the \ref disposer provided by \p Traits template parameter will be called. + */ + void clear() + { + base_class::clear(); + } + + /// Checks if the set is empty + bool empty() const + { + return base_class::empty(); + } + + /// Returns item count in the set + /** + The value returned depends on item counter type provided by \p Traits template parameter. + If it is \p atomicity::empty_item_counter this function always returns 0. + Therefore, the function is not suitable for checking the set emptiness, use \p empty() + member function for this purpose. + */ + size_t size() const + { + return base_class::size(); + } + + /// Returns const reference to internal statistics + stat const& statistics() const + { + return base_class::statistics(); + } + }; + +}} // namespace cds::container + +#endif // #ifndef CDSLIB_CONTAINER_IMPL_SKIP_LIST_SET_H diff --git a/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/container/iterable_kvlist_dhp.h b/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/container/iterable_kvlist_dhp.h new file mode 100644 index 0000000..dcbc6cf --- /dev/null +++ b/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/container/iterable_kvlist_dhp.h @@ -0,0 +1,39 @@ +/* + This file is a part of libcds - Concurrent Data Structures library + + (C) Copyright Maxim Khizhinsky (libcds.dev@gmail.com) 2006-2017 + + Source code repo: http://github.com/khizmax/libcds/ + Download: http://sourceforge.net/projects/libcds/files/ + + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. + + * Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ + +#ifndef CDSLIB_CONTAINER_ITERABLE_KVLIST_DHP_H +#define CDSLIB_CONTAINER_ITERABLE_KVLIST_DHP_H + +#include +#include +#include +#include + +#endif // #ifndef CDSLIB_CONTAINER_ITERABLE_KVLIST_DHP_H diff --git a/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/container/iterable_kvlist_hp.h b/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/container/iterable_kvlist_hp.h new file mode 100644 index 0000000..a77744d --- /dev/null +++ b/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/container/iterable_kvlist_hp.h @@ -0,0 +1,39 @@ +/* + This file is a part of libcds - Concurrent Data Structures library + + (C) Copyright Maxim Khizhinsky (libcds.dev@gmail.com) 2006-2017 + + Source code repo: http://github.com/khizmax/libcds/ + Download: http://sourceforge.net/projects/libcds/files/ + + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. + + * Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ + +#ifndef CDSLIB_CONTAINER_ITERABLE_KVLIST_HP_H +#define CDSLIB_CONTAINER_ITERABLE_KVLIST_HP_H + +#include +#include +#include +#include + +#endif // #ifndef CDSLIB_CONTAINER_ITERABLE_KVLIST_HP_H diff --git a/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/container/iterable_list_dhp.h b/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/container/iterable_list_dhp.h new file mode 100644 index 0000000..e625c2f --- /dev/null +++ b/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/container/iterable_list_dhp.h @@ -0,0 +1,39 @@ +/* + This file is a part of libcds - Concurrent Data Structures library + + (C) Copyright Maxim Khizhinsky (libcds.dev@gmail.com) 2006-2017 + + Source code repo: http://github.com/khizmax/libcds/ + Download: http://sourceforge.net/projects/libcds/files/ + + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. + + * Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ + +#ifndef CDSLIB_CONTAINER_ITERABLE_LIST_DHP_H +#define CDSLIB_CONTAINER_ITERABLE_LIST_DHP_H + +#include +#include +#include +#include + +#endif // #ifndef CDSLIB_CONTAINER_ITERABLE_LIST_DHP_H diff --git a/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/container/iterable_list_hp.h b/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/container/iterable_list_hp.h new file mode 100644 index 0000000..b67fd9b --- /dev/null +++ b/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/container/iterable_list_hp.h @@ -0,0 +1,39 @@ +/* + This file is a part of libcds - Concurrent Data Structures library + + (C) Copyright Maxim Khizhinsky (libcds.dev@gmail.com) 2006-2017 + + Source code repo: http://github.com/khizmax/libcds/ + Download: http://sourceforge.net/projects/libcds/files/ + + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. + + * Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ + +#ifndef CDSLIB_CONTAINER_ITERABLE_LIST_HP_H +#define CDSLIB_CONTAINER_ITERABLE_LIST_HP_H + +#include +#include +#include +#include + +#endif // #ifndef CDSLIB_CONTAINER_ITERABLE_LIST_HP_H diff --git a/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/container/lazy_kvlist_dhp.h b/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/container/lazy_kvlist_dhp.h new file mode 100644 index 0000000..e528085 --- /dev/null +++ b/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/container/lazy_kvlist_dhp.h @@ -0,0 +1,39 @@ +/* + This file is a part of libcds - Concurrent Data Structures library + + (C) Copyright Maxim Khizhinsky (libcds.dev@gmail.com) 2006-2017 + + Source code repo: http://github.com/khizmax/libcds/ + Download: http://sourceforge.net/projects/libcds/files/ + + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. + + * Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ + +#ifndef CDSLIB_CONTAINER_LAZY_KVLIST_DHP_H +#define CDSLIB_CONTAINER_LAZY_KVLIST_DHP_H + +#include +#include +#include +#include + +#endif // #ifndef CDSLIB_CONTAINER_LAZY_KVLIST_DHP_H diff --git a/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/container/lazy_kvlist_hp.h b/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/container/lazy_kvlist_hp.h new file mode 100644 index 0000000..39d3025 --- /dev/null +++ b/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/container/lazy_kvlist_hp.h @@ -0,0 +1,39 @@ +/* + This file is a part of libcds - Concurrent Data Structures library + + (C) Copyright Maxim Khizhinsky (libcds.dev@gmail.com) 2006-2017 + + Source code repo: http://github.com/khizmax/libcds/ + Download: http://sourceforge.net/projects/libcds/files/ + + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. + + * Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ + +#ifndef CDSLIB_CONTAINER_LAZY_KVLIST_HP_H +#define CDSLIB_CONTAINER_LAZY_KVLIST_HP_H + +#include +#include +#include +#include + +#endif // #ifndef CDSLIB_CONTAINER_LAZY_KVLIST_HP_H diff --git a/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/container/lazy_kvlist_nogc.h b/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/container/lazy_kvlist_nogc.h new file mode 100644 index 0000000..2837b76 --- /dev/null +++ b/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/container/lazy_kvlist_nogc.h @@ -0,0 +1,642 @@ +/* + This file is a part of libcds - Concurrent Data Structures library + + (C) Copyright Maxim Khizhinsky (libcds.dev@gmail.com) 2006-2017 + + Source code repo: http://github.com/khizmax/libcds/ + Download: http://sourceforge.net/projects/libcds/files/ + + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. + + * Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ + +#ifndef CDSLIB_CONTAINER_LAZY_KVLIST_NOGC_H +#define CDSLIB_CONTAINER_LAZY_KVLIST_NOGC_H + +#include +#include +#include +#include + +namespace cds { namespace container { + + /// Lazy ordered list (key-value pair, template specialization for gc::nogc) + /** @ingroup cds_nonintrusive_list + @anchor cds_nonintrusive_LazyKVList_nogc + + This specialization is append-only list when no item + reclamation may be performed. The class does not support deleting of list's item. + + See @ref cds_nonintrusive_LazyList_gc "cds::container::LazyList" + */ + template < + typename Key, + typename Value, +#ifdef CDS_DOXYGEN_INVOKED + typename Traits = lazy_list::traits +#else + typename Traits +#endif + > + class LazyKVList: +#ifdef CDS_DOXYGEN_INVOKED + protected intrusive::LazyList< gc::nogc, implementation_defined, Traits > +#else + protected details::make_lazy_kvlist< cds::gc::nogc, Key, Value, Traits >::type +#endif + { + //@cond + typedef details::make_lazy_kvlist< cds::gc::nogc, Key, Value, Traits > maker; + typedef typename maker::type base_class; + //@endcond + + public: + typedef Traits traits; ///< List traits + typedef cds::gc::nogc gc; ///< Garbage collector +#ifdef CDS_DOXYGEN_INVOKED + typedef Key key_type ; ///< Key type + typedef Value mapped_type ; ///< Type of value stored in the list + typedef std::pair value_type ; ///< key/value pair stored in the list +#else + typedef typename maker::key_type key_type; + typedef typename maker::mapped_type mapped_type; + typedef typename maker::value_type value_type; +#endif + typedef typename base_class::back_off back_off; ///< Back-off strategy used + typedef typename maker::allocator_type allocator_type; ///< Allocator type used for allocate/deallocate the nodes + typedef typename base_class::item_counter item_counter; ///< Item counting policy used + typedef typename maker::key_comparator key_comparator; ///< key comparison functor + typedef typename base_class::memory_model memory_model; ///< Memory ordering. See cds::opt::memory_model option + typedef typename base_class::stat stat; ///< Internal statistics + static constexpr bool const c_bSort = base_class::c_bSort; ///< List type: ordered (\p true) or unordered (\p false) + + //@cond + // Rebind traits (split-list support) + template + struct rebind_traits { + typedef LazyKVList< + gc + , key_type, mapped_type + , typename cds::opt::make_options< traits, Options...>::type + > type; + }; + + // Stat selector + template + using select_stat_wrapper = typename base_class::template select_stat_wrapper< Stat >; + //@endcond + + protected: + //@cond + typedef typename base_class::value_type node_type; + typedef typename maker::cxx_allocator cxx_allocator; + typedef typename maker::node_deallocator node_deallocator; + typedef typename base_class::key_comparator intrusive_key_comparator; + typedef typename base_class::node_type head_type; + //@endcond + + protected: + //@cond + template + static node_type * alloc_node(const K& key) + { + return cxx_allocator().New( key ); + } + + template + static node_type * alloc_node( const K& key, const V& val ) + { + return cxx_allocator().New( key, val ); + } + + template + static node_type * alloc_node( Args&&... args ) + { + return cxx_allocator().MoveNew( std::forward(args)... ); + } + + static void free_node( node_type * pNode ) + { + cxx_allocator().Delete( pNode ); + } + + struct node_disposer { + void operator()( node_type * pNode ) + { + free_node( pNode ); + } + }; + typedef std::unique_ptr< node_type, node_disposer > scoped_node_ptr; + + head_type& head() + { + return base_class::m_Head; + } + + head_type const& head() const + { + return base_class::m_Head; + } + + head_type& tail() + { + return base_class::m_Tail; + } + + head_type const& tail() const + { + return base_class::m_Tail; + } + //@endcond + + protected: + //@cond + template + class iterator_type: protected base_class::template iterator_type + { + typedef typename base_class::template iterator_type iterator_base; + + iterator_type( head_type const& refNode ) + : iterator_base( const_cast( &refNode )) + {} + + explicit iterator_type( const iterator_base& it ) + : iterator_base( it ) + {} + + friend class LazyKVList; + + protected: + explicit iterator_type( node_type& pNode ) + : iterator_base( &pNode ) + {} + + public: + typedef typename cds::details::make_const_type::reference value_ref; + typedef typename cds::details::make_const_type::pointer value_ptr; + + typedef typename cds::details::make_const_type::reference pair_ref; + typedef typename cds::details::make_const_type::pointer pair_ptr; + + iterator_type() + : iterator_base() + {} + + iterator_type( const iterator_type& src ) + : iterator_base( src ) + {} + + key_type const& key() const + { + typename iterator_base::value_ptr p = iterator_base::operator ->(); + assert( p != nullptr ); + return p->m_Data.first; + } + + value_ref val() const + { + typename iterator_base::value_ptr p = iterator_base::operator ->(); + assert( p != nullptr ); + return p->m_Data.second; + } + + pair_ptr operator ->() const + { + typename iterator_base::value_ptr p = iterator_base::operator ->(); + return p ? &(p->m_Data) : nullptr; + } + + pair_ref operator *() const + { + typename iterator_base::value_ref p = iterator_base::operator *(); + return p.m_Data; + } + + /// Pre-increment + iterator_type& operator ++() + { + iterator_base::operator ++(); + return *this; + } + + /// Post-increment + iterator_type operator ++(int) + { + return iterator_base::operator ++(0); + } + + template + bool operator ==(iterator_type const& i ) const + { + return iterator_base::operator ==(i); + } + template + bool operator !=(iterator_type const& i ) const + { + return iterator_base::operator !=(i); + } + }; + //@endcond + + public: + ///@name Forward iterators + //@{ + /// Forward iterator + /** + The forward iterator is safe: you may use it in multi-threaded enviromnent without any synchronization. + + The forward iterator for lazy list based on \p gc::nogc has pre- and post-increment operators. + + The iterator interface to access item data: + - operator -> - returns a pointer to \p value_type + - operator * - returns a reference (a const reference for \p const_iterator) to \p value_type + - const key_type& key() - returns a key reference for iterator + - mapped_type& val() - retuns a value reference for iterator (const reference for \p const_iterator) + + For both functions the iterator should not be equal to \p end() + */ + typedef iterator_type iterator; + + /// Const forward iterator + /** + For iterator's features and requirements see \ref iterator + */ + typedef iterator_type const_iterator; + + /// Returns a forward iterator addressing the first element in a list + /** + For empty list \code begin() == end() \endcode + */ + iterator begin() + { + iterator it( head()); + ++it ; // skip dummy head + return it; + } + + /// Returns an iterator that addresses the location succeeding the last element in a list + /** + Do not use the value returned by end function to access any item. + Internally, end returning value equals to nullptr. + + The returned value can be used only to control reaching the end of the list. + For empty list \code begin() == end() \endcode + */ + iterator end() + { + return iterator( tail()); + } + + /// Returns a forward const iterator addressing the first element in a list + const_iterator begin() const + { + const_iterator it( head()); + ++it ; // skip dummy head + return it; + } + /// Returns a forward const iterator addressing the first element in a list + const_iterator cbegin() const + { + const_iterator it( head()); + ++it ; // skip dummy head + return it; + } + + /// Returns an const iterator that addresses the location succeeding the last element in a list + const_iterator end() const + { + return const_iterator( tail()); + } + /// Returns an const iterator that addresses the location succeeding the last element in a list + const_iterator cend() const + { + return const_iterator( tail()); + } + //@} + + protected: + //@cond + iterator node_to_iterator( node_type * pNode ) + { + if ( pNode ) + return iterator( *pNode ); + return end(); + } + //@endcond + + public: + /// Default constructor + LazyKVList() + {} + + //@cond + template >::value >> + explicit LazyKVList( Stat& st ) + : base_class( st ) + {} + //@endcond + + /// Desctructor clears the list + ~LazyKVList() + { + clear(); + } + + /// Inserts new node with key and default value + /** + The function creates a node with \p key and default value, and then inserts the node created into the list. + + Preconditions: + - The \ref key_type should be constructible from value of type \p K. + In trivial case, \p K is equal to \ref key_type. + - The \ref mapped_type should be default-constructible. + + Returns an iterator pointed to inserted value, or \p end() if inserting is failed + */ + template + iterator insert( const K& key ) + { + return node_to_iterator( insert_at( head(), key )); + } + + /// Inserts new node with a key and a value + /** + The function creates a node with \p key and value \p val, and then inserts the node created into the list. + + Preconditions: + - The \ref key_type should be constructible from \p key of type \p K. + - The \ref mapped_type should be constructible from \p val of type \p V. + + Returns an iterator pointed to inserted value, or \p end() if inserting is failed + */ + template + iterator insert( const K& key, const V& val ) + { + // We cannot use insert with functor here + // because we cannot lock inserted node for updating + // Therefore, we use separate function + return node_to_iterator( insert_at( head(), key, val )); + } + + /// Inserts new node and initialize it by a functor + /** + This function inserts new node with key \p key and if inserting is successful then it calls + \p func functor with signature + \code void func( value_type& item ) ; endcode + or + \code + struct functor { + void operator()( value_type& item ); + }; + \endcode + + The argument \p item of user-defined functor \p func is the reference + to the list's item inserted. item.second is a reference to item's value that may be changed. + The user-defined functor is called only if the inserting is successful. + + The key_type should be constructible from value of type \p K. + + The function allows to split creating of new item into two part: + - create item from \p key; + - insert new item into the list; + - if inserting is successful, initialize the value of item by calling \p f functor + + This can be useful if complete initialization of object of \p mapped_type is heavyweight and + it is preferable that the initialization should be completed only if inserting is successful. + + Returns an iterator pointed to inserted value, or \p end() if inserting is failed + */ + template + iterator insert_with( const K& key, Func func ) + { + return node_to_iterator( insert_with_at( head(), key, func )); + } + + /// Updates the item + /** + If \p key is not in the list and \p bAllowInsert is \p true, + + the function inserts a new item. + Otherwise, the function returns an iterator pointing to the item found. + + Returns std::pair where \p first is an iterator pointing to + item found or inserted, \p second is true if new item has been added or \p false if the item + already is in the list. + */ + template + std::pair update( const K& key, bool bAllowInsert = true ) + { + std::pair< node_type *, bool > ret = update_at( head(), key, bAllowInsert ); + return std::make_pair( node_to_iterator( ret.first ), ret.second ); + } + //@cond + template + CDS_DEPRECATED("ensure() is deprecated, use update()") + std::pair ensure( const K& key ) + { + return update( key, true ); + } + //@endcond + + /// Inserts data of type \ref mapped_type constructed with std::forward(args)... + /** + Returns an iterator pointed to inserted value, or \p end() if inserting is failed + */ + template + iterator emplace( Args&&... args ) + { + return node_to_iterator( emplace_at( head(), std::forward(args)... )); + } + + /// Checks whether the list contains \p key + /** + The function searches the item with key equal to \p key + and returns an iterator pointed to item found if the key is found, + and \ref end() otherwise + */ + template + iterator contains( Q const& key ) + { + return node_to_iterator( find_at( head(), key, intrusive_key_comparator())); + } + //@cond + template + CDS_DEPRECATED("deprecated, use contains()") + iterator find( Q const& key ) + { + return contains( key ); + } + //@endcond + + /// Checks whether the map contains \p key using \p pred predicate for searching (ordered list version) + /** + The function is an analog of contains( key ) but \p pred is used for key comparing. + \p Less functor has the interface like \p std::less. + \p Less must imply the same element order as the comparator used for building the list. + */ + template + typename std::enable_if::type contains( Q const& key, Less pred ) + { + CDS_UNUSED( pred ); + return node_to_iterator( find_at( head(), key, typename maker::template less_wrapper::type())); + } + //@cond + template + CDS_DEPRECATED("deprecated, use contains()") + typename std::enable_if::type find_with( Q const& key, Less pred ) + { + return contains( key, pred ); + } + //@endcond + + /// Finds the key \p val using \p equal predicate for searching (unordered list version) + /** + The function is an analog of contains( key ) but \p equal is used for key comparing. + \p Equal functor has the interface like \p std::equal_to. + */ + template + typename std::enable_if::type contains( Q const& key, Equal equal ) + { + CDS_UNUSED( equal ); + return node_to_iterator( find_at( head(), key, typename maker::template equal_to_wrapper::type())); + } + //@cond + template + CDS_DEPRECATED("deprecated, use contains()") + typename std::enable_if::type find_with( Q const& key, Equal equal ) + { + return contains( key, equal ); + } + //@endcond + + /// Check if the list is empty + bool empty() const + { + return base_class::empty(); + } + + /// Returns list's item count + /** + The value returned depends on opt::item_counter option. For atomicity::empty_item_counter, + this function always returns 0. + + @note Even if you use real item counter and it returns 0, this fact is not mean that the list + is empty. To check list emptyness use \ref empty() method. + */ + size_t size() const + { + return base_class::size(); + } + + /// Returns const reference to internal statistics + stat const& statistics() const + { + return base_class::statistics(); + } + + /// Clears the list + /** + Post-condition: the list is empty + */ + void clear() + { + base_class::clear(); + } + + protected: + //@cond + node_type * insert_node_at( head_type& refHead, node_type * pNode ) + { + assert( pNode != nullptr ); + scoped_node_ptr p( pNode ); + if ( base_class::insert_at( &refHead, *p )) + return p.release(); + + return nullptr; + } + + template + node_type * insert_at( head_type& refHead, const K& key ) + { + return insert_node_at( refHead, alloc_node( key )); + } + + template + node_type * insert_at( head_type& refHead, const K& key, const V& val ) + { + return insert_node_at( refHead, alloc_node( key, val )); + } + + template + node_type * insert_with_at( head_type& refHead, const K& key, Func f ) + { + scoped_node_ptr pNode( alloc_node( key )); + + if ( base_class::insert_at( &refHead, *pNode )) { + f( pNode->m_Data ); + return pNode.release(); + } + + return nullptr; + } + + + template + std::pair< node_type *, bool > update_at( head_type& refHead, const K& key, bool bAllowInsert ) + { + scoped_node_ptr pNode( alloc_node( key )); + node_type * pItemFound = nullptr; + + std::pair ret = base_class::update_at( &refHead, *pNode, + [&pItemFound](bool, node_type& item, node_type&){ pItemFound = &item; }, + bAllowInsert ); + + if ( ret.second ) + pNode.release(); + + return std::make_pair( pItemFound, ret.second ); + } + + template + node_type * emplace_at( head_type& refHead, Args&&... args ) + { + return insert_node_at( refHead, alloc_node( std::forward(args)... )); + } + + template + node_type * find_at( head_type& refHead, const K& key, Compare cmp ) + { + return base_class::find_at( &refHead, key, cmp ); + } + + /* + template + bool find_at( head_type& refHead, K& key, Compare cmp, Func f ) + { + return base_class::find_at( &refHead, key, cmp, [&f]( node_type& node, K const& ){ f( node.m_Data ); }); + } + */ + //@endcond + }; + +}} // namespace cds::container + +#endif // #ifndef CDSLIB_CONTAINER_LAZY_KVLIST_NOGC_H diff --git a/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/container/lazy_kvlist_rcu.h b/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/container/lazy_kvlist_rcu.h new file mode 100644 index 0000000..4d2c025 --- /dev/null +++ b/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/container/lazy_kvlist_rcu.h @@ -0,0 +1,905 @@ +/* + This file is a part of libcds - Concurrent Data Structures library + + (C) Copyright Maxim Khizhinsky (libcds.dev@gmail.com) 2006-2017 + + Source code repo: http://github.com/khizmax/libcds/ + Download: http://sourceforge.net/projects/libcds/files/ + + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. + + * Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ + +#ifndef CDSLIB_CONTAINER_LAZY_KVLIST_RCU_H +#define CDSLIB_CONTAINER_LAZY_KVLIST_RCU_H + +#include +#include +#include +#include + +namespace cds { namespace container { + + /// Lazy ordered list (key-value pair), template specialization for \ref cds_urcu_desc "RCU" + /** @ingroup cds_nonintrusive_list + \anchor cds_nonintrusive_LazyKVList_rcu + + This is key-value variation of non-intrusive \p %LazyList. + Like standard container, this implementation split a value stored into two part - + constant key and alterable value. + + Usually, ordered single-linked list is used as a building block for the hash table implementation. + The complexity of searching is O(N). + + Template arguments: + - \p RCU - one of \ref cds_urcu_gc "RCU type" + - \p Key - key type of an item to be stored in the list. It should be copy-constructible + - \p Value - value type to be stored in the list + - \p Traits - type traits, default is \p lazy_list::traits + It is possible to declare option-based list with \p lazy_list::make_traits metafunction istead of \p Traits template + argument. For example, the following traits-based declaration of \p gc::HP lazy list + \code + #include + #include + // Declare comparator for the item + struct my_compare { + int operator ()( int i1, int i2 ) + { + return i1 - i2; + } + }; + + // Declare traits + struct my_traits: public cds::container::lazy_list::traits + { + typedef my_compare compare; + }; + + // Declare traits-based list + typedef cds::container::LazyKVList< cds::urcu::gc< cds::urcu::general_threaded<> >, int, int, my_traits > traits_based_list; + \endcode + is equal to the following option-based list + \code + #include + #include + + // my_compare is the same + + // Declare option-based list + typedef cds::container::LazyKVList< cds::urcu::gc< cds::urcu::general_threaded<> >, int, int, + typename cds::container::lazy_list::make_traits< + cds::container::opt::compare< my_compare > // item comparator option + >::type + > option_based_list; + \endcode + + @note Before including you should include appropriate RCU header file, + see \ref cds_urcu_gc "RCU type" for list of existing RCU class and corresponding header files. + */ + template < + typename RCU, + typename Key, + typename Value, +#ifdef CDS_DOXYGEN_INVOKED + typename Traits = lazy_list::traits +#else + typename Traits +#endif + > + class LazyKVList< cds::urcu::gc, Key, Value, Traits >: +#ifdef CDS_DOXYGEN_INVOKED + protected intrusive::LazyList< cds::urcu::gc, implementation_defined, Traits > +#else + protected details::make_lazy_kvlist< cds::urcu::gc, Key, Value, Traits >::type +#endif + { + //@cond + typedef details::make_lazy_kvlist< cds::urcu::gc, Key, Value, Traits > maker; + typedef typename maker::type base_class; + //@endcond + + public: + typedef cds::urcu::gc gc; ///< Garbage collector + typedef Traits traits; ///< List traits +#ifdef CDS_DOXYGEN_INVOKED + typedef Key key_type ; ///< Key type + typedef Value mapped_type ; ///< Type of value stored in the list + typedef std::pair value_type ; ///< key/value pair stored in the list +#else + typedef typename maker::key_type key_type; + typedef typename maker::mapped_type mapped_type; + typedef typename maker::value_type value_type; +#endif + typedef typename base_class::back_off back_off; ///< Back-off strategy used + typedef typename maker::allocator_type allocator_type; ///< Allocator type used for allocate/deallocate the nodes + typedef typename base_class::item_counter item_counter; ///< Item counting policy used + typedef typename maker::key_comparator key_comparator; ///< key comparison functor + typedef typename base_class::memory_model memory_model; ///< Memory ordering. See cds::opt::memory_model option + typedef typename base_class::stat stat; ///< Internal statistics + typedef typename base_class::rcu_check_deadlock rcu_check_deadlock ; ///< RCU deadlock checking policy + + typedef typename gc::scoped_lock rcu_lock ; ///< RCU scoped lock + static constexpr const bool c_bExtractLockExternal = base_class::c_bExtractLockExternal; ///< Group of \p extract_xxx functions require external locking + + //@cond + // Rebind traits (split-list support) + template + struct rebind_traits { + typedef LazyKVList< + gc + , key_type, mapped_type + , typename cds::opt::make_options< traits, Options...>::type + > type; + }; + + // Stat selector + template + using select_stat_wrapper = typename base_class::template select_stat_wrapper< Stat >; + //@endcond + + protected: + //@cond + typedef typename base_class::value_type node_type; + typedef typename maker::cxx_allocator cxx_allocator; + typedef typename maker::node_deallocator node_deallocator; + typedef typename maker::intrusive_traits::compare intrusive_key_comparator; + + typedef typename base_class::node_type head_type; + //@endcond + + public: + /// pointer to extracted node + using exempt_ptr = cds::urcu::exempt_ptr< gc, node_type, value_type, typename maker::intrusive_traits::disposer, + cds::urcu::details::conventional_exempt_pair_cast + >; + /// Type of \p get() member function return value + typedef value_type * raw_ptr; + + protected: + //@cond + template + static node_type * alloc_node(const K& key) + { + return cxx_allocator().New( key ); + } + + template + static node_type * alloc_node( const K& key, const V& val ) + { + return cxx_allocator().New( key, val ); + } + + template + static node_type * alloc_node( Args&&... args ) + { + return cxx_allocator().MoveNew( std::forward(args)... ); + } + + static void free_node( node_type * pNode ) + { + cxx_allocator().Delete( pNode ); + } + + struct node_disposer { + void operator()( node_type * pNode ) + { + free_node( pNode ); + } + }; + typedef std::unique_ptr< node_type, node_disposer > scoped_node_ptr; + + head_type& head() + { + return base_class::m_Head; + } + + head_type& head() const + { + return const_cast( base_class::m_Head ); + } + + head_type& tail() + { + return base_class::m_Tail; + } + + head_type& tail() const + { + return const_cast( base_class::m_Tail ); + } + + //@endcond + + protected: + //@cond + template + class iterator_type: protected base_class::template iterator_type + { + typedef typename base_class::template iterator_type iterator_base; + + iterator_type( head_type const& pNode ) + : iterator_base( const_cast(&pNode)) + {} + iterator_type( head_type const * pNode ) + : iterator_base( const_cast(pNode)) + {} + + friend class LazyKVList; + + public: + typedef typename cds::details::make_const_type::reference value_ref; + typedef typename cds::details::make_const_type::pointer value_ptr; + + typedef typename cds::details::make_const_type::reference pair_ref; + typedef typename cds::details::make_const_type::pointer pair_ptr; + + iterator_type() + {} + + iterator_type( iterator_type const& src ) + : iterator_base( src ) + {} + + key_type const& key() const + { + typename iterator_base::value_ptr p = iterator_base::operator ->(); + assert( p != nullptr ); + return p->m_Data.first; + } + + value_ref val() const + { + typename iterator_base::value_ptr p = iterator_base::operator ->(); + assert( p != nullptr ); + return p->m_Data.second; + } + + pair_ptr operator ->() const + { + typename iterator_base::value_ptr p = iterator_base::operator ->(); + return p ? &(p->m_Data) : nullptr; + } + + pair_ref operator *() const + { + typename iterator_base::value_ref p = iterator_base::operator *(); + return p.m_Data; + } + + /// Pre-increment + iterator_type& operator ++() + { + iterator_base::operator ++(); + return *this; + } + + template + bool operator ==(iterator_type const& i ) const + { + return iterator_base::operator ==(i); + } + template + bool operator !=(iterator_type const& i ) const + { + return iterator_base::operator !=(i); + } + }; + //@endcond + + public: + ///@name Forward iterators + //@{ + /// Forward iterator + /** + You may safely use iterators in multi-threaded environment only under external RCU lock. + Otherwise, a program crash is possible if another thread deletes the item the iterator points to. + */ + typedef iterator_type iterator; + + /// Const forward iterator + typedef iterator_type const_iterator; + + /// Returns a forward iterator addressing the first element in a list + /** + For empty list \code begin() == end() \endcode + */ + iterator begin() + { + iterator it( head()); + ++it ; // skip dummy head + return it; + } + + /// Returns an iterator that addresses the location succeeding the last element in a list + /** + Do not use the value returned by end function to access any item. + Internally, end returning value pointing to dummy tail node. + + The returned value can be used only to control reaching the end of the list. + For empty list \code begin() == end() \endcode + */ + iterator end() + { + return iterator( tail()); + } + + /// Returns a forward const iterator addressing the first element in a list + const_iterator begin() const + { + const_iterator it( head()); + ++it; // skip dummy head + return it; + } + + /// Returns a forward const iterator addressing the first element in a list + const_iterator cbegin() const + { + const_iterator it( head()); + ++it; // skip dummy head + return it; + } + + /// Returns an const iterator that addresses the location succeeding the last element in a list + const_iterator end() const + { + return const_iterator( tail()); + } + + /// Returns an const iterator that addresses the location succeeding the last element in a list + const_iterator cend() const + { + return const_iterator( tail()); + } + //@} + + public: + /// Default constructor + LazyKVList() + {} + + //@cond + template >::value >> + explicit LazyKVList( Stat& st ) + : base_class( st ) + {} + //@endcond + + /// Destructor clears the list + ~LazyKVList() + { + clear(); + } + + /// Inserts new node with key and default value + /** + The function creates a node with \p key and default value, and then inserts the node created into the list. + + Preconditions: + - The \ref key_type should be constructible from value of type \p K. + In trivial case, \p K is equal to \p key_type. + - The \ref mapped_type should be default-constructible. + + The function makes RCU lock internally. + + Returns \p true if inserting successful, \p false otherwise. + */ + template + bool insert( const K& key ) + { + return insert_at( head(), key ); + } + + /// Inserts new node with a key and a value + /** + The function creates a node with \p key and value \p val, and then inserts the node created into the list. + + Preconditions: + - The \p key_type should be constructible from \p key of type \p K. + - The \p mapped_type should be constructible from \p val of type \p V. + + The function makes RCU lock internally. + + Returns \p true if inserting successful, \p false otherwise. + */ + template + bool insert( const K& key, const V& val ) + { + return insert_at( head(), key, val ); + } + + /// Inserts new node and initializes it by a functor + /** + This function inserts new node with key \p key and if inserting is successful then it calls + \p func functor with signature + \code + struct functor { + void operator()( value_type& item ); + }; + \endcode + + The argument \p item of user-defined functor \p func is the reference + to the list's item inserted. item.second is a reference to item's value that may be changed. + The user-defined functor is called only if inserting is successful. + + The key_type should be constructible from value of type \p K. + + The function allows to split creating of new item into two part: + - create item from \p key; + - insert new item into the list; + - if inserting is successful, initialize the value of item by calling \p func functor + + This can be useful if complete initialization of object of \p mapped_type is heavyweight and + it is preferable that the initialization should be completed only if inserting is successful. + + The function makes RCU lock internally. + + @warning See \ref cds_intrusive_item_creating "insert item troubleshooting" + */ + template + bool insert_with( const K& key, Func func ) + { + return insert_with_at( head(), key, func ); + } + + /// Inserts data of type \p mapped_type constructed from \p args + /** + Returns \p true if inserting successful, \p false otherwise. + + The function makes RCU lock internally. + */ + template + bool emplace( Args&&... args ) + { + return emplace_at( head(), std::forward(args)... ); + } + + /// Updates data by \p key + /** + The operation performs inserting or replacing the element with lock-free manner. + + If the \p key not found in the list, then the new item created from \p key + will be inserted iff \p bAllowInsert is \p true. + (note that in this case the \ref key_type should be constructible from type \p K). + Otherwise, if \p key is found, the functor \p func is called with item found. + + The functor \p Func signature is: + \code + struct my_functor { + void operator()( bool bNew, value_type& item ); + }; + \endcode + with arguments: + - \p bNew - \p true if the item has been inserted, \p false otherwise + - \p item - the item found or inserted + + The functor may change any fields of the \p item.second of \p mapped_type; + during \p func call \p item is locked so it is safe to modify the item in + multi-threaded environment. + + The function applies RCU lock internally. + + Returns std::pair where \p first is true if operation is successful, + \p second is true if new item has been added or \p false if the item with \p key + already exists. + */ + template + std::pair update( const K& key, Func func, bool bAllowInsert = true ) + { + return update_at( head(), key, func, bAllowInsert ); + } + //@cond + template + CDS_DEPRECATED("ensure() is deprecated, use update()") + std::pair ensure( const K& key, Func f ) + { + return update( key, f, true ); + } + //@endcond + + /// Deletes \p key from the list + /** \anchor cds_nonintrusive_LazyKVList_rcu_erase + + RCU \p synchronize method can be called. RCU should not be locked. + + Returns \p true if \p key is found and has been deleted, \p false otherwise + */ + template + bool erase( K const& key ) + { + return erase_at( head(), key, intrusive_key_comparator()); + } + + /// Deletes the item from the list using \p pred predicate for searching + /** + The function is an analog of \ref cds_nonintrusive_LazyKVList_rcu_erase "erase(K const&)" + but \p pred is used for key comparing. + \p Less functor has the interface like \p std::less. + \p pred must imply the same element order as the comparator used for building the list. + */ + template + bool erase_with( K const& key, Less pred ) + { + CDS_UNUSED( pred ); + return erase_at( head(), key, typename maker::template less_wrapper::type()); + } + + /// Deletes \p key from the list + /** \anchor cds_nonintrusive_LazyKVList_rcu_erase_func + The function searches an item with key \p key, calls \p f functor + and deletes the item. If \p key is not found, the functor is not called. + + The functor \p Func interface: + \code + struct extractor { + void operator()(value_type& val) { ... } + }; + \endcode + + RCU \p synchronize method can be called. RCU should not be locked. + + Returns \p true if key is found and deleted, \p false otherwise + */ + template + bool erase( K const& key, Func f ) + { + return erase_at( head(), key, intrusive_key_comparator(), f ); + } + + /// Deletes the item from the list using \p pred predicate for searching + /** + The function is an analog of \ref cds_nonintrusive_LazyKVList_rcu_erase_func "erase(K const&, Func)" + but \p pred is used for key comparing. + \p Less functor has the interface like \p std::less. + \p pred must imply the same element order as the comparator used for building the list. + */ + template + bool erase_with( K const& key, Less pred, Func f ) + { + CDS_UNUSED( pred ); + return erase_at( head(), key, typename maker::template less_wrapper::type(), f ); + } + + /// Extracts an item from the list + /** + @anchor cds_nonintrusive_LazyKVList_rcu_extract + The function searches an item with key equal to \p key in the list, + unlinks it from the list, and returns \ref cds::urcu::exempt_ptr "exempt_ptr" pointer to the item found. + If \p key is not found the function returns an empty \p exempt_ptr. + + @note The function does NOT call RCU read-side lock or synchronization, + and does NOT dispose the item found. It just excludes the item from the list + and returns a pointer to item found. + You should manually lock RCU before calling this function. + + \code + #include + #include + + typedef cds::urcu::gc< general_buffered<> > rcu; + typedef cds::container::LazyKVList< rcu, int, Foo > rcu_lazy_list; + + rcu_lazy_list theList; + // ... + + rcu_lazy_list::exempt_ptr p; + { + // first, we should lock RCU + rcu_lazy_list::rcu_lock sl; + + // Now, you can apply extract function + // Note that you must not delete the item found inside the RCU lock + p = theList.extract( 10 ); + if ( !p ) { + // do something with p + ... + } + } + // Outside RCU lock section we may safely release extracted pointer. + // release() passes the pointer to RCU reclamation cycle. + p.release(); + \endcode + */ + template + exempt_ptr extract( K const& key ) + { + return exempt_ptr( extract_at( head(), key, intrusive_key_comparator())); + } + + /// Extracts an item from the list using \p pred predicate for searching + /** + This function is the analog for \p extract(K const&). + The \p pred is a predicate used for key comparing. + \p Less has the interface like \p std::less. + \p pred must imply the same element order as \ref key_comparator. + */ + template + exempt_ptr extract_with( K const& key, Less pred ) + { + CDS_UNUSED( pred ); + return exempt_ptr( extract_at( head(), key, typename maker::template less_wrapper::type())); + } + + /// Checks whether the list contains \p key + /** + The function searches the item with key equal to \p key + and returns \p true if it is found, and \p false otherwise. + + The function applies RCU lock internally. + */ + template + bool contains( Q const& key ) const + { + return find_at( head(), key, intrusive_key_comparator()); + } + //@cond + template + CDS_DEPRECATED("deprecated, use contains()") + bool find( Q const& key ) const + { + return contains( key ); + } + //@endcond + + /// Checks whether the map contains \p key using \p pred predicate for searching + /** + The function is an analog of contains( key ) but \p pred is used for key comparing. + \p Less functor has the interface like \p std::less. + \p Less must imply the same element order as the comparator used for building the list. + + The function applies RCU lock internally. + */ + template + bool contains( Q const& key, Less pred ) const + { + CDS_UNUSED( pred ); + return find_at( head(), key, typename maker::template less_wrapper::type()); + } + //@cond + template + CDS_DEPRECATED("deprecated, use contains()") + bool find_with( Q const& key, Less pred ) const + { + return contains( key, pred ); + } + //@endcond + + /// Finds the key \p key and performs an action with it + /** \anchor cds_nonintrusive_LazyKVList_rcu_find_func + The function searches an item with key equal to \p key and calls the functor \p f for the item found. + The interface of \p Func functor is: + \code + struct functor { + void operator()( value_type& item ); + }; + \endcode + where \p item is the item found. + + The functor may change item.second that is reference to value of node. + Note that the function is only guarantee that \p item cannot be deleted during functor is executing. + The function does not serialize simultaneous access to the list \p item. If such access is + possible you must provide your own synchronization schema to exclude unsafe item modifications. + + The function applies RCU lock internally. + + The function returns \p true if \p key is found, \p false otherwise. + */ + template + bool find( Q const& key, Func f ) const + { + return find_at( head(), key, intrusive_key_comparator(), f ); + } + + /// Finds the key \p val using \p pred predicate for searching + /** + The function is an analog of \ref cds_nonintrusive_LazyKVList_rcu_find_func "find(Q const&, Func)" + but \p pred is used for key comparing. + \p Less functor has the interface like \p std::less. + \p pred must imply the same element order as the comparator used for building the list. + */ + template + bool find_with( Q const& key, Less pred, Func f ) const + { + CDS_UNUSED( pred ); + return find_at( head(), key, typename maker::template less_wrapper::type(), f ); + } + + /// Finds \p key and return the item found + /** \anchor cds_nonintrusive_LazyKVList_rcu_get + The function searches the item with \p key and returns the pointer to item found. + If \p key is not found it returns \p nullptr. + + Note the compare functor should accept a parameter of type \p K that can be not the same as \p key_type. + + RCU should be locked before call of this function. + Returned item is valid only while RCU is locked: + \code + typedef cds::container::LazyKVList< cds::urcu::gc< cds::urcu::general_buffered<> >, int, foo, my_traits > ord_list; + ord_list theList; + // ... + { + // Lock RCU + ord_list::rcu_lock lock; + + ord_list::value_type * pVal = theList.get( 5 ); + if ( pVal ) { + // Deal with pVal + //... + } + // Unlock RCU by rcu_lock destructor + // pVal can be freed at any time after RCU has been unlocked + } + \endcode + */ + template + value_type * get( K const& key ) const + { + return get_at( head(), key, intrusive_key_comparator()); + } + + /// Finds \p key and return the item found + /** + The function is an analog of \ref cds_nonintrusive_LazyKVList_rcu_get "get(K const&)" + but \p pred is used for comparing the keys. + + \p Less functor has the semantics like \p std::less but should take arguments of type \ref key_type and \p K + in any order. + \p pred must imply the same element order as the comparator used for building the list. + */ + template + value_type * get_with( K const& key, Less pred ) const + { + CDS_UNUSED( pred ); + return get_at( head(), key, typename maker::template less_wrapper::type()); + } + + /// Checks if the list is empty + bool empty() const + { + return base_class::empty(); + } + + /// Returns list's item count + /** + The value returned depends on opt::item_counter option. For atomicity::empty_item_counter, + this function always returns 0. + + @note Even if you use real item counter and it returns 0, this fact is not mean that the list + is empty. To check list emptyness use \ref empty() method. + */ + size_t size() const + { + return base_class::size(); + } + + /// Returns const reference to internal statistics + stat const& statistics() const + { + return base_class::statistics(); + } + + /// Clears the list + void clear() + { + base_class::clear(); + } + + protected: + //@cond + bool insert_node_at( head_type& refHead, node_type * pNode ) + { + assert( pNode != nullptr ); + scoped_node_ptr p( pNode ); + + if ( base_class::insert_at( &refHead, *p )) { + p.release(); + return true; + } + + return false; + } + + template + bool insert_at( head_type& refHead, const K& key ) + { + return insert_node_at( refHead, alloc_node( key )); + } + + template + bool insert_at( head_type& refHead, const K& key, const V& val ) + { + return insert_node_at( refHead, alloc_node( key, val )); + } + + template + bool insert_with_at( head_type& refHead, const K& key, Func f ) + { + scoped_node_ptr pNode( alloc_node( key )); + + if ( base_class::insert_at( &refHead, *pNode, [&f](node_type& node){ f( node.m_Data ); } )) { + pNode.release(); + return true; + } + return false; + } + + template + bool emplace_at( head_type& refHead, Args&&... args ) + { + return insert_node_at( refHead, alloc_node( std::forward(args)... )); + } + + template + bool erase_at( head_type& refHead, K const& key, Compare cmp ) + { + return base_class::erase_at( &refHead, key, cmp ); + } + + template + bool erase_at( head_type& refHead, K const & key, Compare cmp, Func f ) + { + return base_class::erase_at( &refHead, key, cmp, [&f](node_type const & node){f( const_cast(node.m_Data)); }); + } + + template + std::pair update_at( head_type& refHead, const K& key, Func f, bool bAllowInsert ) + { + scoped_node_ptr pNode( alloc_node( key )); + + std::pair ret = base_class::update_at( &refHead, *pNode, + [&f]( bool bNew, node_type& node, node_type& ){ f( bNew, node.m_Data ); }, + bAllowInsert ); + if ( ret.first && ret.second ) + pNode.release(); + + return ret; + } + + template + node_type * extract_at( head_type& refHead, K const& key, Compare cmp ) + { + return base_class::extract_at( &refHead, key, cmp ); + } + + template + bool find_at( head_type& refHead, K const& key, Compare cmp ) const + { + return base_class::find_at( &refHead, key, cmp, [](node_type&, K const&) {} ); + } + + template + bool find_at( head_type& refHead, K& key, Compare cmp, Func f ) const + { + return base_class::find_at( &refHead, key, cmp, [&f]( node_type& node, K& ){ f( node.m_Data ); }); + } + + template + value_type * get_at( head_type& refHead, K const& val, Compare cmp ) const + { + node_type * pNode = base_class::get_at( &refHead, val, cmp ); + return pNode ? &pNode->m_Data : nullptr; + } + + //@endcond + }; + +}} // namespace cds::container + +#endif // #ifndef CDSLIB_CONTAINER_LAZY_KVLIST_RCU_H diff --git a/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/container/lazy_list_dhp.h b/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/container/lazy_list_dhp.h new file mode 100644 index 0000000..9939a00 --- /dev/null +++ b/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/container/lazy_list_dhp.h @@ -0,0 +1,39 @@ +/* + This file is a part of libcds - Concurrent Data Structures library + + (C) Copyright Maxim Khizhinsky (libcds.dev@gmail.com) 2006-2017 + + Source code repo: http://github.com/khizmax/libcds/ + Download: http://sourceforge.net/projects/libcds/files/ + + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. + + * Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ + +#ifndef CDSLIB_CONTAINER_LAZY_LIST_DHP_H +#define CDSLIB_CONTAINER_LAZY_LIST_DHP_H + +#include +#include +#include +#include + +#endif // #ifndef CDSLIB_CONTAINER_LAZY_LIST_DHP_H diff --git a/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/container/lazy_list_hp.h b/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/container/lazy_list_hp.h new file mode 100644 index 0000000..f4c7709 --- /dev/null +++ b/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/container/lazy_list_hp.h @@ -0,0 +1,39 @@ +/* + This file is a part of libcds - Concurrent Data Structures library + + (C) Copyright Maxim Khizhinsky (libcds.dev@gmail.com) 2006-2017 + + Source code repo: http://github.com/khizmax/libcds/ + Download: http://sourceforge.net/projects/libcds/files/ + + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. + + * Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ + +#ifndef CDSLIB_CONTAINER_LAZY_LIST_HP_H +#define CDSLIB_CONTAINER_LAZY_LIST_HP_H + +#include +#include +#include +#include + +#endif // #ifndef CDSLIB_CONTAINER_LAZY_LIST_HP_H diff --git a/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/container/lazy_list_nogc.h b/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/container/lazy_list_nogc.h new file mode 100644 index 0000000..c0edd09 --- /dev/null +++ b/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/container/lazy_list_nogc.h @@ -0,0 +1,526 @@ +/* + This file is a part of libcds - Concurrent Data Structures library + + (C) Copyright Maxim Khizhinsky (libcds.dev@gmail.com) 2006-2017 + + Source code repo: http://github.com/khizmax/libcds/ + Download: http://sourceforge.net/projects/libcds/files/ + + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. + + * Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ + +#ifndef CDSLIB_CONTAINER_LAZY_LIST_NOGC_H +#define CDSLIB_CONTAINER_LAZY_LIST_NOGC_H + +#include +#include +#include +#include + +namespace cds { namespace container { + + /// Lazy ordered single-linked list (template specialization for gc::nogc) + /** @ingroup cds_nonintrusive_list + \anchor cds_nonintrusive_LazyList_nogc + + This specialization is so-called append-only when no item + reclamation may be performed. The class does not support deleting of list item. + + The list can be ordered if \p Traits::sort is \p true that is default + or unordered otherwise. Unordered list can be maintained by \p equal_to + relationship (\p Traits::equal_to), but for the ordered list \p less + or \p compare relations should be specified in \p Traits. + + See @ref cds_nonintrusive_LazyList_gc "cds::container::LazyList" + */ + template < + typename T, +#ifdef CDS_DOXYGEN_INVOKED + typename Traits = lazy_list::traits +#else + typename Traits +#endif + > + class LazyList: +#ifdef CDS_DOXYGEN_INVOKED + protected intrusive::LazyList< gc::nogc, T, Traits > +#else + protected details::make_lazy_list< cds::gc::nogc, T, Traits >::type +#endif + { + //@cond + typedef details::make_lazy_list< cds::gc::nogc, T, Traits > maker; + typedef typename maker::type base_class; + //@endcond + + public: + typedef cds::gc::nogc gc; ///< Garbage collector + typedef T value_type; ///< Type of value stored in the list + typedef Traits traits; ///< List traits + + typedef typename base_class::back_off back_off; ///< Back-off strategy used + typedef typename maker::allocator_type allocator_type; ///< Allocator type used for allocate/deallocate the nodes + typedef typename base_class::item_counter item_counter; ///< Item counting policy used + typedef typename maker::key_comparator key_comparator; ///< key comparing functor + typedef typename base_class::memory_model memory_model; ///< Memory ordering. See cds::opt::memory_model option + typedef typename base_class::stat stat; ///< Internal statistics + + static constexpr bool const c_bSort = base_class::c_bSort; ///< List type: ordered (\p true) or unordered (\p false) + + //@cond + // Rebind traits (split-list support) + template + struct rebind_traits { + typedef LazyList< + gc + , value_type + , typename cds::opt::make_options< traits, Options...>::type + > type; + }; + + // Stat selector + template + using select_stat_wrapper = typename base_class::template select_stat_wrapper< Stat >; + //@endcond + + protected: + //@cond + typedef typename base_class::value_type node_type; + typedef typename maker::cxx_allocator cxx_allocator; + typedef typename maker::node_deallocator node_deallocator; + typedef typename base_class::key_comparator intrusive_key_comparator; + + typedef typename base_class::node_type head_type; + + struct node_disposer { + void operator()( node_type * pNode ) + { + free_node( pNode ); + } + }; + typedef std::unique_ptr< node_type, node_disposer > scoped_node_ptr; + //@endcond + + protected: + //@cond + template + class iterator_type: protected base_class::template iterator_type + { + typedef typename base_class::template iterator_type iterator_base; + + iterator_type( head_type const& pNode ) + : iterator_base( const_cast(&pNode)) + {} + + explicit iterator_type( const iterator_base& it ) + : iterator_base( it ) + {} + + friend class LazyList; + + protected: + explicit iterator_type( node_type& pNode ) + : iterator_base( &pNode ) + {} + + public: + typedef typename cds::details::make_const_type::pointer value_ptr; + typedef typename cds::details::make_const_type::reference value_ref; + + iterator_type() + {} + + iterator_type( const iterator_type& src ) + : iterator_base( src ) + {} + + value_ptr operator ->() const + { + typename iterator_base::value_ptr p = iterator_base::operator ->(); + return p ? &(p->m_Value) : nullptr; + } + + value_ref operator *() const + { + return (iterator_base::operator *()).m_Value; + } + + /// Pre-increment + iterator_type& operator ++() + { + iterator_base::operator ++(); + return *this; + } + + /// Post-increment + iterator_type operator ++(int) + { + return iterator_base::operator ++(0); + } + + template + bool operator ==(iterator_type const& i ) const + { + return iterator_base::operator ==(i); + } + template + bool operator !=(iterator_type const& i ) const + { + return iterator_base::operator !=(i); + } + }; + //@endcond + + public: + ///@name Forward iterators + //@{ + /// Returns a forward iterator addressing the first element in a list + /** + For empty list \code begin() == end() \endcode + */ + typedef iterator_type iterator; + + /// Const forward iterator + /** + For iterator's features and requirements see \ref iterator + */ + typedef iterator_type const_iterator; + + /// Returns a forward iterator addressing the first element in a list + /** + For empty list \code begin() == end() \endcode + */ + iterator begin() + { + iterator it( head()); + ++it ; // skip dummy head node + return it; + } + + /// Returns an iterator that addresses the location succeeding the last element in a list + /** + Do not use the value returned by end function to access any item. + + The returned value can be used only to control reaching the end of the list. + For empty list \code begin() == end() \endcode + */ + iterator end() + { + return iterator( tail()); + } + + /// Returns a forward const iterator addressing the first element in a list + const_iterator begin() const + { + const_iterator it( head()); + ++it ; // skip dummy head node + return it; + } + + /// Returns a forward const iterator addressing the first element in a list + const_iterator cbegin() const + { + const_iterator it( head()); + ++it ; // skip dummy head node + return it; + } + + /// Returns an const iterator that addresses the location succeeding the last element in a list + const_iterator end() const + { + return const_iterator( tail()); + } + + /// Returns an const iterator that addresses the location succeeding the last element in a list + const_iterator cend() const + { + return const_iterator( tail()); + } + //@} + + public: + /// Default constructor + LazyList() + {} + + //@cond + template >::value >> + explicit LazyList( Stat& st ) + : base_class( st ) + {} + //@endcond + + /// Desctructor clears the list + ~LazyList() + { + clear(); + } + + /// Inserts new node + /** + The function inserts \p val in the list if the list does not contain + an item with key equal to \p val. + + Return an iterator pointing to inserted item if success \ref end() otherwise + */ + template + iterator insert( Q&& val ) + { + return node_to_iterator( insert_at( head(), std::forward( val ))); + } + + /// Inserts data of type \p value_type created from \p args + /** + Return an iterator pointing to inserted item if success \ref end() otherwise + */ + template + iterator emplace( Args&&... args ) + { + return node_to_iterator( emplace_at( head(), std::forward(args)... )); + } + + /// Updates the item + /** + If \p key is not in the list and \p bAllowInsert is \p true, + the function inserts a new item. + Otherwise, the function returns an iterator pointing to the item found. + + Returns std::pair where \p first is an iterator pointing to + item found or inserted, \p second is true if new item has been added or \p false if the item + already is in the list. + */ + template + std::pair update( Q&& val, bool bAllowInsert = true ) + { + std::pair< node_type *, bool > ret = update_at( head(), std::forward( val ), bAllowInsert ); + return std::make_pair( node_to_iterator( ret.first ), ret.second ); + } + //@cond + template + CDS_DEPRECATED("ensure() is deprecated, use update()") + std::pair ensure( Q const& val ) + { + return update( val, true ); + } + //@endcond + + /// Checks whether the list contains \p key + /** + The function searches the item with key equal to \p key + and returns an iterator pointed to item found if the key is found, + and \ref end() otherwise + */ + template + iterator contains( Q const& key ) + { + return node_to_iterator( find_at( head(), key, intrusive_key_comparator())); + } + //@cond + template + CDS_DEPRECATED("deprecated, use contains()") + iterator find( Q const& key ) + { + return contains( key ); + } + //@endcond + + /// Checks whether the map contains \p key using \p pred predicate for searching (ordered list version) + /** + The function is an analog of contains( key ) but \p pred is used for key comparing. + \p Less functor has the interface like \p std::less. + \p Less must imply the same element order as the comparator used for building the list. + */ + template + typename std::enable_if::type contains( Q const& key, Less pred ) + { + CDS_UNUSED( pred ); + return node_to_iterator( find_at( head(), key, typename maker::template less_wrapper::type())); + } + //@cond + template + CDS_DEPRECATED("deprecated, use contains()") + typename std::enable_if::type find_with( Q const& key, Less pred ) + { + return contains( key, pred ); + } + //@endcond + + /// Finds the key \p val using \p equal predicate for searching (unordered list version) + /** + The function is an analog of contains( key ) but \p equal is used for key comparing. + \p Equal functor has the interface like \p std::equal_to. + */ + template + typename std::enable_if::type contains( Q const& key, Equal equal ) + { + CDS_UNUSED( equal ); + return node_to_iterator( find_at( head(), key, typename maker::template equal_to_wrapper::type())); + } + //@cond + template + CDS_DEPRECATED("deprecated, use contains()") + typename std::enable_if::type find_with( Q const& key, Equal equal ) + { + return contains( key, equal ); + } + //@endcond + + /// Check if the list is empty + bool empty() const + { + return base_class::empty(); + } + + /// Returns list's item count + /** + The value returned depends on \p Traits::item_counter type. For \p atomicity::empty_item_counter, + this function always returns 0. + + @note Even if you use real item counter and it returns 0, this fact is not mean that the list + is empty. To check list emptyness use \ref empty() method. + */ + size_t size() const + { + return base_class::size(); + } + + /// Returns const reference to internal statistics + stat const& statistics() const + { + return base_class::statistics(); + } + + /// Clears the list + void clear() + { + base_class::clear(); + } + + protected: + //@cond + static value_type& node_to_value( node_type& n ) + { + return n.m_Value; + } + + static node_type * alloc_node() + { + return cxx_allocator().New(); + } + + static node_type * alloc_node( value_type const& v ) + { + return cxx_allocator().New( v ); + } + + template + static node_type * alloc_node( Args&&... args ) + { + return cxx_allocator().MoveNew( std::forward( args )... ); + } + + static void free_node( node_type * pNode ) + { + cxx_allocator().Delete( pNode ); + } + + head_type& head() + { + return base_class::m_Head; + } + + head_type const& head() const + { + return base_class::m_Head; + } + + head_type& tail() + { + return base_class::m_Tail; + } + + head_type const& tail() const + { + return base_class::m_Tail; + } + + iterator node_to_iterator( node_type * pNode ) + { + if ( pNode ) + return iterator( *pNode ); + return end(); + } + + iterator insert_node( node_type * pNode ) + { + return node_to_iterator( insert_node_at( head(), pNode )); + } + + node_type * insert_node_at( head_type& refHead, node_type * pNode ) + { + assert( pNode != nullptr ); + scoped_node_ptr p( pNode ); + if ( base_class::insert_at( &refHead, *p )) + return p.release(); + + return nullptr; + } + + template + node_type * insert_at( head_type& refHead, Q&& val ) + { + return insert_node_at( refHead, alloc_node( std::forward( val ))); + } + + template + node_type * emplace_at( head_type& refHead, Args&&... args ) + { + return insert_node_at( refHead, alloc_node( std::forward(args)... )); + } + + template + std::pair< node_type *, bool > update_at( head_type& refHead, Q&& val, bool bAllowInsert ) + { + scoped_node_ptr pNode( alloc_node( std::forward( val ))); + node_type * pItemFound = nullptr; + + std::pair ret = base_class::update_at( &refHead, *pNode, + [&pItemFound](bool, node_type& item, node_type&) { pItemFound = &item; }, + bAllowInsert ); + + if ( ret.second ) + pNode.release(); + + return std::make_pair( pItemFound, ret.second ); + } + + template + node_type * find_at( head_type& refHead, Q const& key, Compare cmp ) + { + return base_class::find_at( &refHead, key, cmp ); + } + + //@endcond + }; +}} // namespace cds::container + +#endif // #ifndef CDSLIB_CONTAINER_LAZY_LIST_NOGC_H diff --git a/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/container/lazy_list_rcu.h b/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/container/lazy_list_rcu.h new file mode 100644 index 0000000..3a3c0a7 --- /dev/null +++ b/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/container/lazy_list_rcu.h @@ -0,0 +1,891 @@ +/* + This file is a part of libcds - Concurrent Data Structures library + + (C) Copyright Maxim Khizhinsky (libcds.dev@gmail.com) 2006-2017 + + Source code repo: http://github.com/khizmax/libcds/ + Download: http://sourceforge.net/projects/libcds/files/ + + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. + + * Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ + +#ifndef CDSLIB_CONTAINER_LAZY_LIST_RCU_H +#define CDSLIB_CONTAINER_LAZY_LIST_RCU_H + +#include +#include +#include +#include +#include + +namespace cds { namespace container { + + /// Lazy ordered list (template specialization for \ref cds_urcu_desc "RCU") + /** @ingroup cds_nonintrusive_list + \anchor cds_nonintrusive_LazyList_rcu + + Usually, ordered single-linked list is used as a building block for the hash table implementation. + The complexity of searching is O(N). + + Source: + - [2005] Steve Heller, Maurice Herlihy, Victor Luchangco, Mark Moir, William N. Scherer III, and Nir Shavit + "A Lazy Concurrent List-Based Set Algorithm" + + The lazy list is based on an optimistic locking scheme for inserts and removes, + eliminating the need to use the equivalent of an atomically markable + reference. It also has a novel wait-free membership \p find operation + that does not need to perform cleanup operations and is more efficient. + + It is non-intrusive version of \p cds::intrusive::LazyList class + + Template arguments: + - \p RCU - one of \ref cds_urcu_gc "RCU type" + - \p T - type to be stored in the list. + - \p Traits - type traits, default is lazy_list::traits + It is possible to declare option-based list with cds::container::lazy_list::make_traits metafunction istead of \p Traits template + argument. For example, the following traits-based declaration of \p gc::HP lazy list + \code + #include + #include + // Declare comparator for the item + struct my_compare { + int operator ()( int i1, int i2 ) + { + return i1 - i2; + } + }; + + // Declare traits + struct my_traits: public cds::container::lazy_list::traits + { + typedef my_compare compare; + }; + + // Declare traits-based list + typedef cds::container::LazyList< cds::urcu::gc< cds::urcu::general_instant<> >, int, my_traits > traits_based_list; + \endcode + is equal to the following option-based list + \code + #include + #include + + // my_compare is the same + + // Declare option-based list + typedef cds::container::LazyList< cds::urcu::gc< cds::urcu::general_instant<> >, int, + typename cds::container::lazy_list::make_traits< + cds::container::opt::compare< my_compare > // item comparator option + >::type + > option_based_list; + \endcode + + The implementation does not divide type \p T into key and value part and + may be used as main building block for some hash set containers. + The key is a function (or a part) of type \p T, and this function is specified by \p Traits::compare functor + or \p Traits::less predicate + + \ref cds_nonintrusive_LazyKVList_rcu "LazyKVList" is a key-value version + of lazy non-intrusive list that is closer to the C++ std library approach. + + @note Before including you should include + appropriate RCU header file, see \ref cds_urcu_gc "RCU type" for list + of existing RCU class and corresponding header files. + */ + template < + typename RCU, + typename T, +#ifdef CDS_DOXYGEN_INVOKED + typename Traits = lazy_list::traits +#else + typename Traits +#endif + > + class LazyList< cds::urcu::gc, T, Traits >: +#ifdef CDS_DOXYGEN_INVOKED + protected intrusive::LazyList< cds::urcu::gc, T, Traits > +#else + protected details::make_lazy_list< cds::urcu::gc, T, Traits >::type +#endif + { + //@cond + typedef details::make_lazy_list< cds::urcu::gc, T, Traits > maker; + typedef typename maker::type base_class; + //@endcond + + public: + typedef cds::urcu::gc gc; ///< Garbage collector + typedef T value_type; ///< Type of value stored in the list + typedef Traits traits; ///< List traits + + typedef typename base_class::back_off back_off; ///< Back-off strategy + typedef typename maker::allocator_type allocator_type; ///< Allocator type used for allocate/deallocate the nodes + typedef typename base_class::item_counter item_counter; ///< Item counting policy used + typedef typename maker::key_comparator key_comparator; ///< key compare functor + typedef typename base_class::memory_model memory_model; ///< Memory ordering. See cds::opt::memory_model option + typedef typename base_class::stat stat; ///< Internal statistics + typedef typename base_class::rcu_check_deadlock rcu_check_deadlock; ///< Deadlock checking policy + + typedef typename gc::scoped_lock rcu_lock ; ///< RCU scoped lock + static constexpr const bool c_bExtractLockExternal = base_class::c_bExtractLockExternal; ///< Group of \p extract_xxx functions require external locking + + //@cond + // Rebind traits (split-list support) + template + struct rebind_traits { + typedef LazyList< + gc + , value_type + , typename cds::opt::make_options< traits, Options...>::type + > type; + }; + + // Stat selector + template + using select_stat_wrapper = typename base_class::template select_stat_wrapper< Stat >; + //@endcond + + protected: + //@cond + typedef typename base_class::value_type node_type; + typedef typename maker::cxx_allocator cxx_allocator; + typedef typename maker::node_deallocator node_deallocator; + typedef typename maker::intrusive_traits::compare intrusive_key_comparator; + + typedef typename base_class::node_type head_type; + + struct node_disposer { + void operator()( node_type * pNode ) + { + free_node( pNode ); + } + }; + typedef std::unique_ptr< node_type, node_disposer > scoped_node_ptr; + //@endcond + + public: + using exempt_ptr = cds::urcu::exempt_ptr< gc, node_type, value_type, typename maker::intrusive_traits::disposer >; ///< pointer to extracted node + /// Type of \p get() member function return value + typedef value_type * raw_ptr; + + protected: + //@cond + template + class iterator_type: protected base_class::template iterator_type + { + typedef typename base_class::template iterator_type iterator_base; + + iterator_type( head_type const& pNode ) + : iterator_base( const_cast( &pNode )) + {} + + iterator_type( head_type const * pNode ) + : iterator_base( const_cast( pNode )) + {} + + friend class LazyList; + + public: + typedef typename cds::details::make_const_type::pointer value_ptr; + typedef typename cds::details::make_const_type::reference value_ref; + + iterator_type() + {} + + iterator_type( iterator_type const& src ) + : iterator_base( src ) + {} + + value_ptr operator ->() const + { + typename iterator_base::value_ptr p = iterator_base::operator ->(); + return p ? &(p->m_Value) : nullptr; + } + + value_ref operator *() const + { + return (iterator_base::operator *()).m_Value; + } + + /// Pre-increment + iterator_type& operator ++() + { + iterator_base::operator ++(); + return *this; + } + + template + bool operator ==(iterator_type const& i ) const + { + return iterator_base::operator ==(i); + } + template + bool operator !=(iterator_type const& i ) const + { + return iterator_base::operator !=(i); + } + }; + //@endcond + + public: + ///@name Forward iterators (only for debugging purpose) + //@{ + /// Forward iterator + /** + You may safely use iterators in multi-threaded environment only under RCU lock. + Otherwise, a crash is possible if another thread deletes the item the iterator points to. + */ + typedef iterator_type iterator; + + /// Const forward iterator + /** + For iterator's features and requirements see \ref iterator + */ + typedef iterator_type const_iterator; + + /// Returns a forward iterator addressing the first element in a list + /** + For empty list \code begin() == end() \endcode + */ + iterator begin() + { + iterator it( head()); + ++it ; // skip dummy head node + return it; + } + + /// Returns an iterator that addresses the location succeeding the last element in a list + /** + Do not use the value returned by end function to access any item. + + The returned value can be used only to control reaching the end of the list. + For empty list \code begin() == end() \endcode + */ + iterator end() + { + return iterator( tail()); + } + + /// Returns a forward const iterator addressing the first element in a list + const_iterator begin() const + { + const_iterator it( head()); + ++it ; // skip dummy head node + return it; + } + + /// Returns a forward const iterator addressing the first element in a list + const_iterator cbegin() const + { + const_iterator it( head()); + ++it ; // skip dummy head node + return it; + } + + /// Returns an const iterator that addresses the location succeeding the last element in a list + const_iterator end() const + { + return const_iterator( tail()); + } + + /// Returns an const iterator that addresses the location succeeding the last element in a list + const_iterator cend() const + { + return const_iterator( tail()); + } + //@} + + public: + /// Default constructor + LazyList() + {} + + //@cond + template >::value >> + explicit LazyList( Stat& st ) + : base_class( st ) + {} + //@endcond + + /// Desctructor clears the list + ~LazyList() + { + clear(); + } + + /// Inserts new node + /** + The function creates a node with copy of \p val value + and then inserts the node created into the list. + + The type \p Q should contain as minimum the complete key of the node. + The object of \p value_type should be constructible from \p val of type \p Q. + In trivial case, \p Q is equal to \p value_type. + + The function makes RCU lock internally. + + Returns \p true if inserting successful, \p false otherwise. + */ + template + bool insert( Q&& val ) + { + return insert_at( head(), std::forward( val )); + } + + /// Inserts new node + /** + This function inserts new node with default-constructed value and then it calls + \p func functor with signature + \code void func( value_type& itemValue ) ;\endcode + + The argument \p itemValue of user-defined functor \p func is the reference + to the list's item inserted. + The user-defined functor is called only if the inserting is success. + + The type \p Q should contain the complete key of the node. + The object of \ref value_type should be constructible from \p key of type \p Q. + + The function allows to split creating of new item into two part: + - create item from \p key with initializing key-fields only; + - insert new item into the list; + - if inserting is successful, initialize non-key fields of item by calling \p f functor + + This can be useful if complete initialization of object of \p value_type is heavyweight and + it is preferable that the initialization should be completed only if inserting is successful. + + The function makes RCU lock internally. + */ + template + bool insert( Q&& key, Func func ) + { + return insert_at( head(), std::forward( key ), func ); + } + + /// Inserts data of type \p value_type constructed from \p args + /** + Returns \p true if inserting successful, \p false otherwise. + + The function makes RCU lock internally. + */ + template + bool emplace( Args&&... args ) + { + return emplace_at( head(), std::forward(args)... ); + } + + /// Updates data by \p key + /** + The operation performs inserting or replacing the element with lock-free manner. + + If the \p key not found in the list, then the new item created from \p key + will be inserted iff \p bAllowInsert is \p true. + Otherwise, if \p key is found, the functor \p func is called with item found. + + The functor \p Func signature is: + \code + struct my_functor { + void operator()( bool bNew, value_type& item, Q const& val ); + }; + \endcode + + with arguments: + - \p bNew - \p true if the item has been inserted, \p false otherwise + - \p item - item of the list + - \p val - argument \p key passed into the \p %update() function + + The functor may change non-key fields of the \p item; + during \p func call \p item is locked so it is safe to modify the item in + multi-threaded environment. + + The function applies RCU lock internally. + + Returns std::pair where \p first is true if operation is successful, + \p second is true if new item has been added or \p false if the item with \p key + already exists. + */ + template + std::pair update( Q const& key, Func func, bool bAllowInsert = true ) + { + return update_at( head(), key, func, bAllowInsert ); + } + //@cond + template + CDS_DEPRECATED("ensure() is deprecated, use update()") + std::pair ensure( Q const& key, Func f ) + { + return update( key, f, true ); + } + //@endcond + + /// Deletes \p key from the list + /** \anchor cds_nonintrusive_LazyList_rcu_erase + Since the key of LazyList's item type \p T is not explicitly specified, + template parameter \p Q defines the key type searching in the list. + The list item comparator should be able to compare the type \p T of list item + and the type \p Q. + + RCU \p synchronize method can be called. RCU should not be locked. + + Return \p true if key is found and deleted, \p false otherwise + */ + template + bool erase( Q const& key ) + { + return erase_at( head(), key, intrusive_key_comparator(), [](value_type const&){} ); + } + + /// Deletes the item from the list using \p pred predicate for searching + /** + The function is an analog of \ref cds_nonintrusive_LazyList_rcu_erase "erase(Q const&)" + but \p pred is used for key comparing. + \p Less functor has the interface like \p std::less. + \p pred must imply the same element order as the comparator used for building the list. + */ + template + bool erase_with( Q const& key, Less pred ) + { + CDS_UNUSED( pred ); + return erase_at( head(), key, typename maker::template less_wrapper::type(), [](value_type const&){} ); + } + + /// Deletes \p key from the list + /** \anchor cds_nonintrusive_LazyList_rcu_erase_func + The function searches an item with key \p key, calls \p f functor + and deletes the item. If \p key is not found, the functor is not called. + + The functor \p Func interface: + \code + struct extractor { + void operator()(value_type const& val) { ... } + }; + \endcode + + Since the key of LazyList's item type \p T is not explicitly specified, + template parameter \p Q defines the key type searching in the list. + The list item comparator should be able to compare the type \p T of list item + and the type \p Q. + + RCU \p synchronize method can be called. RCU should not be locked. + + Return \p true if key is found and deleted, \p false otherwise + */ + template + bool erase( Q const& key, Func f ) + { + return erase_at( head(), key, intrusive_key_comparator(), f ); + } + + /// Deletes the item from the list using \p pred predicate for searching + /** + The function is an analog of \ref cds_nonintrusive_LazyList_rcu_erase_func "erase(Q const&, Func)" + but \p pred is used for key comparing. + \p Less functor has the interface like \p std::less. + \p pred must imply the same element order as the comparator used for building the list. + */ + template + bool erase_with( Q const& key, Less pred, Func f ) + { + CDS_UNUSED( pred ); + return erase_at( head(), key, typename maker::template less_wrapper::type(), f ); + } + + /// Extracts an item from the list + /** + @anchor cds_nonintrusive_LazyList_rcu_extract + The function searches an item with key equal to \p key in the list, + unlinks it from the list, and returns \ref cds::urcu::exempt_ptr "exempt_ptr" pointer to an item found. + If the item with the key equal to \p key is not found the function returns an empty \p exempt_ptr. + + @note The function does NOT call RCU read-side lock or synchronization, + and does NOT dispose the item found. It just excludes the item from the list + and returns a pointer to item found. + You should lock RCU before calling this function. + + \code + #include + #include + + typedef cds::urcu::gc< general_buffered<> > rcu; + typedef cds::container::LazyList< rcu, Foo > rcu_lazy_list; + + rcu_lazy_list theList; + // ... + + rcu_lazy_list::exempt_ptr p; + { + // first, we should lock RCU + rcu_lazy_list::rcu_lock sl; + + // Now, you can apply extract function + // Note that you must not delete the item found inside the RCU lock + p = theList.extract( 10 ); + if ( p ) { + // do something with p + ... + } + } + // Outside RCU lock section we may safely release extracted pointer. + // release() passes the pointer to RCU reclamation cycle. + p.release(); + \endcode + */ + template + exempt_ptr extract( Q const& key ) + { + return exempt_ptr(extract_at( head(), key, intrusive_key_comparator())); + } + + /// Extracts an item from the list using \p pred predicate for searching + /** + This function is the analog for \p extract(Q const&). + + The \p pred is a predicate used for key comparing. + \p Less has the interface like \p std::less. + \p pred must imply the same element order as \ref key_comparator. + */ + template + exempt_ptr extract_with( Q const& key, Less pred ) + { + CDS_UNUSED( pred ); + return exempt_ptr( extract_at( head(), key, typename maker::template less_wrapper::type())); + } + + /// Checks whether the list contains \p key + /** + The function searches the item with key equal to \p key + and returns \p true if it is found, and \p false otherwise. + + The function applies RCU lock internally. + */ + template + bool contains( Q const& key ) const + { + return find_at( head(), key, intrusive_key_comparator()); + } + //@cond + template + CDS_DEPRECATED("deprecated, use contains()") + bool find( Q const& key ) const + { + return contains( key ); + } + //@endcond + + /// Checks whether the list contains \p key using \p pred predicate for searching + /** + The function is an analog of contains( key ) but \p pred is used for key comparing. + \p Less functor has the interface like \p std::less. + \p pred must imply the same element order as the comparator used for building the list. + */ + template + bool contains( Q const& key, Less pred ) const + { + CDS_UNUSED( pred ); + return find_at( head(), key, typename maker::template less_wrapper::type()); + } + //@cond + template + CDS_DEPRECATED("deprecated, use contains()") + bool find_with( Q const& key, Less pred ) const + { + return contains( key, pred ); + } + //@endcond + + /// Finds the key \p key and performs an action with it + /** \anchor cds_nonintrusive_LazyList_rcu_find_func + The function searches an item with key equal to \p key and calls the functor \p f for the item found. + The interface of \p Func functor is: + \code + struct functor { + void operator()( value_type& item, Q& key ); + }; + \endcode + where \p item is the item found, \p key is the \p find() function argument. + + The functor may change non-key fields of \p item. Note that the function is only guarantee + that \p item cannot be deleted during functor is executing. + The function does not serialize simultaneous access to the list \p item. If such access is + possible you must provide your own synchronization schema to exclude unsafe item modifications. + + The \p key argument is non-const since it can be used as \p f functor destination i.e., the functor + may modify both arguments. + + The function makes RCU lock internally. + + The function returns \p true if \p key is found, \p false otherwise. + */ + template + bool find( Q& key, Func f ) const + { + return find_at( head(), key, intrusive_key_comparator(), f ); + } + //@cond + template + bool find( Q const& key, Func f ) const + { + return find_at( head(), key, intrusive_key_comparator(), f ); + } + //@endcond + + /// Finds the key \p key using \p pred predicate for searching + /** + The function is an analog of \ref cds_nonintrusive_LazyList_rcu_find_func "find(Q&, Func)" + but \p pred is used for key comparing. + \p Less functor has the interface like \p std::less. + \p pred must imply the same element order as the comparator used for building the list. + */ + template + bool find_with( Q& key, Less pred, Func f ) const + { + CDS_UNUSED( pred ); + return find_at( head(), key, typename maker::template less_wrapper::type(), f ); + } + //@cond + template + bool find_with( Q const& key, Less pred, Func f ) const + { + CDS_UNUSED( pred ); + return find_at( head(), key, typename maker::template less_wrapper::type(), f ); + } + //@endcond + + /// Finds the key \p key and return the item found + /** \anchor cds_nonintrusive_LazyList_rcu_get + The function searches the item with key equal to \p key and returns the pointer to item found. + If \p key is not found it returns \p nullptr. + + Note the compare functor should accept a parameter of type \p Q that can be not the same as \p value_type. + + RCU should be locked before call of this function. + Returned item is valid only while RCU is locked: + \code + typedef cds::container::LazyList< cds::urcu::gc< cds::urcu::general_buffered<> >, foo, my_traits > ord_list; + ord_list theList; + // ... + { + // Lock RCU + ord_list::rcu_lock lock; + + foo * pVal = theList.get( 5 ); + if ( pVal ) { + // Deal with pVal + //... + } + // Unlock RCU by rcu_lock destructor + // pVal can be freed at any time after RCU has been unlocked + } + \endcode + */ + template + value_type * get( Q const& key ) const + { + return get_at( head(), key, intrusive_key_comparator()); + } + + /// Finds the key \p key and return the item found + /** + The function is an analog of \ref cds_nonintrusive_LazyList_rcu_get "get(Q const&)" + but \p pred is used for comparing the keys. + + \p Less functor has the semantics like \p std::less but should take arguments of type \ref value_type and \p Q + in any order. + \p pred must imply the same element order as the comparator used for building the list. + */ + template + value_type * get_with( Q const& key, Less pred ) const + { + CDS_UNUSED( pred ); + return get_at( head(), key, typename maker::template less_wrapper::type()); + } + + /// Checks if the list is empty + bool empty() const + { + return base_class::empty(); + } + + /// Returns list's item count + /** + The value returned depends on \p Traits::item_counter type. For \p atomicity::empty_item_counter, + this function always returns 0. + + @note Even if you use real item counter and it returns 0, this fact is not mean that the list + is empty. To check list emptyness use \ref empty() method. + */ + size_t size() const + { + return base_class::size(); + } + + /// Returns const reference to internal statistics + stat const& statistics() const + { + return base_class::statistics(); + } + + /// Clears the list + void clear() + { + base_class::clear(); + } + + protected: + //@cond + bool insert_node( node_type * pNode ) + { + return insert_node_at( head(), pNode ); + } + + bool insert_node_at( head_type& refHead, node_type * pNode ) + { + assert( pNode != nullptr ); + scoped_node_ptr p( pNode ); + + if ( base_class::insert_at( &refHead, *pNode )) { + p.release(); + return true; + } + + return false; + } + + template + bool insert_at( head_type& refHead, Q&& val ) + { + return insert_node_at( refHead, alloc_node( std::forward( val ))); + } + + template + bool emplace_at( head_type& refHead, Args&&... args ) + { + return insert_node_at( refHead, alloc_node( std::forward(args)... )); + } + + template + bool insert_at( head_type& refHead, Q&& key, Func f ) + { + scoped_node_ptr pNode( alloc_node( std::forward( key ))); + + if ( base_class::insert_at( &refHead, *pNode, [&f](node_type& node){ f( node_to_value(node)); } )) { + pNode.release(); + return true; + } + return false; + } + + template + bool erase_at( head_type& refHead, Q const& key, Compare cmp, Func f ) + { + return base_class::erase_at( &refHead, key, cmp, [&f](node_type const& node){ f( node_to_value(node)); } ); + } + + template + node_type * extract_at( head_type& refHead, Q const& key, Compare cmp ) + { + return base_class::extract_at( &refHead, key, cmp ); + } + + template + std::pair update_at( head_type& refHead, Q const& key, Func f, bool bAllowInsert ) + { + scoped_node_ptr pNode( alloc_node( key )); + + std::pair ret = base_class::update_at( &refHead, *pNode, + [&f, &key](bool bNew, node_type& node, node_type&){f( bNew, node_to_value(node), key );}, + bAllowInsert ); + if ( ret.first && ret.second ) + pNode.release(); + + return ret; + } + + template + bool find_at( head_type& refHead, Q const& key, Compare cmp ) const + { + return base_class::find_at( &refHead, key, cmp, [](node_type&, Q const&) {} ); + } + + template + bool find_at( head_type& refHead, Q& val, Compare cmp, Func f ) const + { + return base_class::find_at( &refHead, val, cmp, [&f](node_type& node, Q& v){ f( node_to_value(node), v ); }); + } + + template + value_type * get_at( head_type& refHead, Q const& val, Compare cmp ) const + { + node_type * pNode = base_class::get_at( &refHead, val, cmp ); + return pNode ? &pNode->m_Value : nullptr; + } + + static value_type& node_to_value( node_type& n ) + { + return n.m_Value; + } + + static value_type const& node_to_value( node_type const& n ) + { + return n.m_Value; + } + + template + static node_type * alloc_node( Q&& v ) + { + return cxx_allocator().New( std::forward( v )); + } + + template + static node_type * alloc_node( Args&&... args ) + { + return cxx_allocator().MoveNew( std::forward( args )... ); + } + + static void free_node( node_type * pNode ) + { + cxx_allocator().Delete( pNode ); + } + + head_type& head() + { + return base_class::m_Head; + } + + head_type& head() const + { + return const_cast(base_class::m_Head); + } + + head_type& tail() + { + return base_class::m_Tail; + } + + head_type const& tail() const + { + return base_class::m_Tail; + } + //@endcond + }; + +}} // namespace cds::container + +#endif // #ifndef CDSLIB_CONTAINER_LAZY_LIST_RCU_H diff --git a/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/container/michael_kvlist_dhp.h b/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/container/michael_kvlist_dhp.h new file mode 100644 index 0000000..c38e145 --- /dev/null +++ b/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/container/michael_kvlist_dhp.h @@ -0,0 +1,39 @@ +/* + This file is a part of libcds - Concurrent Data Structures library + + (C) Copyright Maxim Khizhinsky (libcds.dev@gmail.com) 2006-2017 + + Source code repo: http://github.com/khizmax/libcds/ + Download: http://sourceforge.net/projects/libcds/files/ + + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. + + * Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ + +#ifndef CDSLIB_CONTAINER_MICHAEL_KVLIST_DHP_H +#define CDSLIB_CONTAINER_MICHAEL_KVLIST_DHP_H + +#include +#include +#include +#include + +#endif // #ifndef CDSLIB_CONTAINER_MICHAEL_KVLIST_DHP_H diff --git a/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/container/michael_kvlist_hp.h b/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/container/michael_kvlist_hp.h new file mode 100644 index 0000000..2b242d3 --- /dev/null +++ b/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/container/michael_kvlist_hp.h @@ -0,0 +1,39 @@ +/* + This file is a part of libcds - Concurrent Data Structures library + + (C) Copyright Maxim Khizhinsky (libcds.dev@gmail.com) 2006-2017 + + Source code repo: http://github.com/khizmax/libcds/ + Download: http://sourceforge.net/projects/libcds/files/ + + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. + + * Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ + +#ifndef CDSLIB_CONTAINER_MICHAEL_KVLIST_HP_H +#define CDSLIB_CONTAINER_MICHAEL_KVLIST_HP_H + +#include +#include +#include +#include + +#endif // #ifndef CDSLIB_CONTAINER_MICHAEL_KVLIST_HP_H diff --git a/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/container/michael_kvlist_nogc.h b/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/container/michael_kvlist_nogc.h new file mode 100644 index 0000000..2a5106a --- /dev/null +++ b/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/container/michael_kvlist_nogc.h @@ -0,0 +1,619 @@ +/* + This file is a part of libcds - Concurrent Data Structures library + + (C) Copyright Maxim Khizhinsky (libcds.dev@gmail.com) 2006-2017 + + Source code repo: http://github.com/khizmax/libcds/ + Download: http://sourceforge.net/projects/libcds/files/ + + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. + + * Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ + +#ifndef CDSLIB_CONTAINER_MICHAEL_KVLIST_NOGC_H +#define CDSLIB_CONTAINER_MICHAEL_KVLIST_NOGC_H + +#include +#include +#include +#include + +namespace cds { namespace container { + + //@cond + namespace details { + + template + struct make_michael_kvlist_nogc: public make_michael_kvlist + { + typedef make_michael_kvlist base_maker; + typedef typename base_maker::node_type node_type; + + struct intrusive_traits: public base_maker::intrusive_traits + { + typedef typename base_maker::node_deallocator disposer; + }; + + typedef intrusive::MichaelList type; + }; + + } // namespace details + //@endcond + + /// Michael's ordered list (key-value pair, template specialization for gc::nogc) + /** @ingroup cds_nonintrusive_list + @anchor cds_nonintrusive_MichaelKVList_nogc + + This specialization is intended for so-called persistent usage when no item + reclamation may be performed. The class does not support deleting of list item. + + Usually, ordered single-linked list is used as a building block for the hash table implementation. + The complexity of searching is O(N). + + See \ref cds_nonintrusive_MichaelList_gc "MichaelList" for description of template parameters. + + The interface of the specialization is a little different. + */ + template < + typename Key, + typename Value, +#ifdef CDS_DOXYGEN_INVOKED + typename Traits = michael_list::traits +#else + typename Traits +#endif + > + class MichaelKVList: +#ifdef CDS_DOXYGEN_INVOKED + protected intrusive::MichaelList< gc::nogc, implementation_defined, Traits > +#else + protected details::make_michael_kvlist_nogc< Key, Value, Traits >::type +#endif + { + //@cond + typedef details::make_michael_kvlist_nogc< Key, Value, Traits > maker; + typedef typename maker::type base_class; + //@endcond + + public: + typedef cds::gc::nogc gc; ///< Garbage collector used + typedef Traits traits; ///< List traits + +#ifdef CDS_DOXYGEN_INVOKED + typedef Key key_type ; ///< Key type + typedef Value mapped_type ; ///< Type of value stored in the list + typedef std::pair value_type ; ///< key/value pair stored in the list +#else + typedef typename maker::key_type key_type; + typedef typename maker::value_type mapped_type; + typedef typename maker::pair_type value_type; +#endif + + typedef typename base_class::back_off back_off; ///< Back-off strategy used + typedef typename maker::allocator_type allocator_type; ///< Allocator type used for allocate/deallocate the nodes + typedef typename base_class::item_counter item_counter; ///< Item counting policy used + typedef typename maker::key_comparator key_comparator; ///< key comparison functor + typedef typename base_class::memory_model memory_model; ///< Memory ordering. See cds::opt::memory_model option + typedef typename base_class::stat stat; ///< Internal statistics + + //@cond + // Rebind traits (split-list support) + template + struct rebind_traits { + typedef MichaelKVList< + gc + , key_type, mapped_type + , typename cds::opt::make_options< traits, Options...>::type + > type; + }; + + // Stat selector + template + using select_stat_wrapper = typename base_class::template select_stat_wrapper< Stat >; + //@endcond + + protected: + //@cond + typedef typename base_class::value_type node_type; + typedef typename maker::cxx_allocator cxx_allocator; + typedef typename maker::node_deallocator node_deallocator; + typedef typename maker::intrusive_traits::compare intrusive_key_comparator; + + typedef typename base_class::atomic_node_ptr head_type; + + struct node_disposer { + void operator()( node_type * pNode ) + { + free_node( pNode ); + } + }; + typedef std::unique_ptr< node_type, node_disposer > scoped_node_ptr; + //@endcond + + protected: + //@cond + template + class iterator_type: protected base_class::template iterator_type + { + typedef typename base_class::template iterator_type iterator_base; + + iterator_type( head_type const& refNode ) + : iterator_base( refNode ) + {} + + explicit iterator_type( const iterator_base& it ) + : iterator_base( it ) + {} + + friend class MichaelKVList; + + protected: + explicit iterator_type( node_type& pNode ) + : iterator_base( &pNode ) + {} + + public: + typedef typename cds::details::make_const_type::reference value_ref; + typedef typename cds::details::make_const_type::pointer value_ptr; + + typedef typename cds::details::make_const_type::reference pair_ref; + typedef typename cds::details::make_const_type::pointer pair_ptr; + + iterator_type() + : iterator_base() + {} + + iterator_type( const iterator_type& src ) + : iterator_base( src ) + {} + + key_type const& key() const + { + typename iterator_base::value_ptr p = iterator_base::operator ->(); + assert( p != nullptr ); + return p->m_Data.first; + } + + value_ref val() const + { + typename iterator_base::value_ptr p = iterator_base::operator ->(); + assert( p != nullptr ); + return p->m_Data.second; + } + + pair_ptr operator ->() const + { + typename iterator_base::value_ptr p = iterator_base::operator ->(); + return p ? &(p->m_Data) : nullptr; + } + + pair_ref operator *() const + { + typename iterator_base::value_ref p = iterator_base::operator *(); + return p.m_Data; + } + + /// Pre-increment + iterator_type& operator ++() + { + iterator_base::operator ++(); + return *this; + } + + /// Post-increment + iterator_type operator ++(int) + { + return iterator_base::operator ++(0); + } + + template + bool operator ==(iterator_type const& i ) const + { + return iterator_base::operator ==(i); + } + template + bool operator !=(iterator_type const& i ) const + { + return iterator_base::operator !=(i); + } + }; + //@endcond + + public: + ///@name Forward iterators + //@{ + /// Forward iterator + /** + The forward iterator is safe: you may use it in multi-threaded enviromnent without any synchronization. + + The forward iterator for Michael's list based on \p gc::nogc has pre- and post-increment operators. + + The iterator interface to access item data: + - operator -> - returns a pointer to \p value_type + - operator * - returns a reference (a const reference for \p const_iterator) to \p value_type + - const key_type& key() - returns a key reference for iterator + - mapped_type& val() - retuns a value reference for iterator (const reference for \p const_iterator) + + For both functions the iterator should not be equal to \p end(). + + @note \p end() iterator is not dereferenceable + */ + typedef iterator_type iterator; + + /// Const forward iterator + /** + For iterator's features and requirements see \ref iterator + */ + typedef iterator_type const_iterator; + + /// Returns a forward iterator addressing the first element in a list + /** + For empty list \code begin() == end() \endcode + */ + iterator begin() + { + return iterator( head()); + } + + /// Returns an iterator that addresses the location succeeding the last element in a list + /** + Do not use the value returned by end function to access any item. + Internally, end returning value equals to \p nullptr. + + The returned value can be used only to control reaching the end of the list. + For empty list \code begin() == end() \endcode + */ + iterator end() + { + return iterator(); + } + + /// Returns a forward const iterator addressing the first element in a list + const_iterator begin() const + { + return const_iterator( head()); + } + /// Returns a forward const iterator addressing the first element in a list + const_iterator cbegin() const + { + return const_iterator( head()); + } + + /// Returns an const iterator that addresses the location succeeding the last element in a list + const_iterator end() const + { + return const_iterator(); + } + /// Returns an const iterator that addresses the location succeeding the last element in a list + const_iterator cend() const + { + return const_iterator(); + } + //@} + + public: + /// Default constructor + /** + Initialize empty list + */ + MichaelKVList() + {} + + //@cond + template >::value >> + explicit MichaelKVList( Stat& st ) + : base_class( st ) + {} + //@endcond + + /// List destructor + /** + Clears the list + */ + ~MichaelKVList() + { + clear(); + } + + /// Inserts new node with key and default value + /** + The function creates a node with \p key and default value, and then inserts the node created into the list. + + Preconditions: + - The \ref key_type should be constructible from value of type \p K. + In trivial case, \p K is equal to \ref key_type. + - The \ref mapped_type should be default-constructible. + + Returns an iterator pointed to inserted value, or \p end() if inserting is failed + */ + template + iterator insert( const K& key ) + { + return node_to_iterator( insert_at( head(), key )); + } + + /// Inserts new node with a key and a value + /** + The function creates a node with \p key and value \p val, and then inserts the node created into the list. + + Preconditions: + - The \ref key_type should be constructible from \p key of type \p K. + - The \ref mapped_type should be constructible from \p val of type \p V. + + Returns an iterator pointed to inserted value, or \p end() if inserting is failed + */ + template + iterator insert( const K& key, const V& val ) + { + // We cannot use insert with functor here + // because we cannot lock inserted node for updating + // Therefore, we use separate function + return node_to_iterator( insert_at( head(), key, val )); + } + + /// Inserts new node and initialize it by a functor + /** + This function inserts new node with key \p key and if inserting is successful then it calls + \p func functor with signature + \code void func( value_type& item ); + struct functor { + void operator()( value_type& item ); + }; + \endcode + + The argument \p item of user-defined functor \p func is the reference + to the list's item inserted. item.second is a reference to item's value that may be changed. + User-defined functor \p func should guarantee that during changing item's value no any other changes + could be made on this list's item by concurrent threads. + + The key_type should be constructible from value of type \p K. + + The function allows to split creating of new item into two part: + - create item from \p key; + - insert new item into the list; + - if inserting is successful, initialize the value of item by calling \p f functor + + This can be useful if complete initialization of object of \p mapped_type is heavyweight and + it is preferable that the initialization should be completed only if inserting is successful. + + Returns an iterator pointed to inserted value, or \p end() if inserting is failed + */ + template + iterator insert_with( const K& key, Func func ) + { + return node_to_iterator( insert_with_at( head(), key, func )); + } + + /// Updates the item + /** + If \p key is not in the list and \p bAllowInsert is \p true, + + the function inserts a new item. + Otherwise, the function returns an iterator pointing to the item found. + + Returns std::pair where \p first is an iterator pointing to + item found or inserted, \p second is true if new item has been added or \p false if the item + already is in the list. + */ + template + std::pair update( K const& key, bool bAllowInsert = true ) + { + std::pair< node_type *, bool > ret = update_at( head(), key, bAllowInsert ); + return std::make_pair( node_to_iterator( ret.first ), ret.second ); + } + //@cond + template + CDS_DEPRECATED("ensure() is deprecated, use update()") + std::pair ensure( K const& key ) + { + return update( key ); + } + //@endcond + + /// Inserts data of type \ref mapped_type constructed with std::forward(args)... + /** + Returns an iterator pointed to inserted value, or \p end() if inserting is failed + */ + template + iterator emplace( K&& key, Args&&... args ) + { + return node_to_iterator( emplace_at( head(), std::forward(key), std::forward(args)... )); + } + + /// Checks whether the list contains \p key + /** + The function searches the item with key equal to \p key + and returns an iterator pointed to item found and \ref end() otherwise + */ + template + iterator contains( Q const& key ) + { + return node_to_iterator( find_at( head(), key, intrusive_key_comparator())); + } + //@cond + template + CDS_DEPRECATED("deprecated, use contains()") + iterator find( Q const& key ) + { + return contains( key ); + } + //@endcond + + /// Checks whether the list contains \p key using \p pred predicate for searching + /** + The function is an analog of contains( key ) but \p pred is used for key comparing. + \p Less functor has the interface like \p std::less. + \p pred must imply the same element order as the comparator used for building the list. + */ + template + iterator contains( Q const& key, Less pred ) + { + CDS_UNUSED( pred ); + return node_to_iterator( find_at( head(), key, typename maker::template less_wrapper::type())); + } + //@cond + template + CDS_DEPRECATED("deprecated, use contains()") + iterator find_with( Q const& key, Less pred ) + { + return contains( key, pred ); + } + //@endcond + + /// Check if the list is empty + bool empty() const + { + return base_class::empty(); + } + + /// Returns list's item count + /** + The value returned depends on item counter provided by \p Traits. For \p atomicity::empty_item_counter, + this function always returns 0. + + @note Even if you use real item counter and it returns 0, this fact does not mean that the list + is empty. To check list emptyness use \p empty() method. + */ + size_t size() const + { + return base_class::size(); + } + + /// Returns const reference to internal statistics + stat const& statistics() const + { + return base_class::statistics(); + } + + /// Clears the list + void clear() + { + base_class::clear(); + } + + protected: + //@cond + node_type * insert_node_at( head_type& refHead, node_type * pNode ) + { + assert( pNode != nullptr ); + scoped_node_ptr p( pNode ); + if ( base_class::insert_at( refHead, *pNode )) + return p.release(); + return nullptr; + } + + template + node_type * insert_at( head_type& refHead, const K& key ) + { + return insert_node_at( refHead, alloc_node( key )); + } + + template + node_type * insert_at( head_type& refHead, const K& key, const V& val ) + { + return insert_node_at( refHead, alloc_node( key, val )); + } + + template + node_type * insert_with_at( head_type& refHead, const K& key, Func f ) + { + scoped_node_ptr pNode( alloc_node( key )); + + if ( base_class::insert_at( refHead, *pNode )) { + f( pNode->m_Data ); + return pNode.release(); + } + return nullptr; + } + + template + std::pair< node_type *, bool > update_at( head_type& refHead, const K& key, bool bAllowInsert ) + { + scoped_node_ptr pNode( alloc_node( key )); + node_type * pItemFound = nullptr; + + std::pair ret = base_class::update_at( refHead, *pNode, + + [&pItemFound](bool, node_type& item, node_type&){ pItemFound = &item; }, + bAllowInsert ); + + if ( ret.second ) + pNode.release(); + return std::make_pair( pItemFound, ret.second ); + } + + template + node_type * emplace_at( head_type& refHead, K&& key, Args&&... args ) + { + return insert_node_at( refHead, alloc_node( std::forward(key), std::forward(args)... )); + } + + template + node_type * find_at( head_type& refHead, K const& key, Compare cmp ) + { + return base_class::find_at( refHead, key, cmp ); + } + + template + static node_type * alloc_node( const K& key ) + { + return cxx_allocator().New( key ); + } + + template + static node_type * alloc_node( const K& key, const V& val ) + { + return cxx_allocator().New( key, val ); + } + + template + static node_type * alloc_node( K&& key, Args&&... args ) + { + return cxx_allocator().MoveNew( std::forward( key ), std::forward( args )... ); + } + + static void free_node( node_type * pNode ) + { + cxx_allocator().Delete( pNode ); + } + + head_type& head() + { + return base_class::m_pHead; + } + + head_type const& head() const + { + return base_class::m_pHead; + } + + iterator node_to_iterator( node_type * pNode ) + { + if ( pNode ) + return iterator( *pNode ); + return end(); + } + //@endcond + }; + +}} // namespace cds::container + +#endif // #ifndef CDSLIB_CONTAINER_MICHAEL_KVLIST_NOGC_H diff --git a/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/container/michael_kvlist_rcu.h b/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/container/michael_kvlist_rcu.h new file mode 100644 index 0000000..e42fe7d --- /dev/null +++ b/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/container/michael_kvlist_rcu.h @@ -0,0 +1,954 @@ +/* + This file is a part of libcds - Concurrent Data Structures library + + (C) Copyright Maxim Khizhinsky (libcds.dev@gmail.com) 2006-2017 + + Source code repo: http://github.com/khizmax/libcds/ + Download: http://sourceforge.net/projects/libcds/files/ + + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. + + * Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ + +#ifndef CDSLIB_CONTAINER_MICHAEL_KVLIST_RCU_H +#define CDSLIB_CONTAINER_MICHAEL_KVLIST_RCU_H + +#include +#include // ref +#include +#include +#include + +namespace cds { namespace container { + + /// Michael's ordered list (key-value pair), template specialization for \ref cds_urcu_desc "RCU" + /** @ingroup cds_nonintrusive_list + \anchor cds_nonintrusive_MichaelKVList_rcu + + This is key-value variation of non-intrusive \ref cds_nonintrusive_MichaelList_rcu "MichaelList". + Like standard container, this implementation split a value stored into two part - + constant key and alterable value. + + Usually, ordered single-linked list is used as a building block for the hash table implementation. + The complexity of searching is O(N). + + Template arguments: + - \p RCU - one of \ref cds_urcu_gc "RCU type" + - \p Key - key type of an item stored in the list. It should be copy-constructible + - \p Value - value type stored in a list + - \p Traits - type traits, default is \p michael_list::traits + + @note Before including you should include appropriate RCU header file, + see \ref cds_urcu_gc "RCU type" for list of existing RCU class and corresponding header files. + + It is possible to declare option-based list using \p cds::container::michael_list::make_traits metafunction istead of \p Traits template + argument. For example, the following traits-based declaration of Michael's list + \code + #include + #include + // Declare comparator for the item + struct my_compare { + int operator ()( int i1, int i2 ) + { + return i1 - i2; + } + }; + + // Declare traits + struct my_traits: public cds::container::michael_list::traits + { + typedef my_compare compare; + }; + + // Declare traits-based list + typedef cds::container::MichaelKVList< cds::urcu::gc< cds::urcu::general_buffered<> >, int, int, my_traits > traits_based_list; + \endcode + + is equivalent for the following option-based list + \code + #include + #include + + // my_compare is the same + + // Declare option-based list + typedef cds::container::MichaelKVList< cds::urcu::gc< cds::urcu::general_buffered<> >, int, int, + typename cds::container::michael_list::make_traits< + cds::container::opt::compare< my_compare > // item comparator option + >::type + > option_based_list; + \endcode + */ + template < + typename RCU, + typename Key, + typename Value, +#ifdef CDS_DOXYGEN_INVOKED + typename Traits = michael_list::traits +#else + typename Traits +#endif + > + class MichaelKVList< cds::urcu::gc, Key, Value, Traits >: +#ifdef CDS_DOXYGEN_INVOKED + protected intrusive::MichaelList< cds::urcu::gc, implementation_defined, Traits > +#else + protected details::make_michael_kvlist< cds::urcu::gc, Key, Value, Traits >::type +#endif + { + //@cond + typedef details::make_michael_kvlist< cds::urcu::gc, Key, Value, Traits > maker; + typedef typename maker::type base_class; + //@endcond + + public: + typedef cds::urcu::gc gc; ///< Garbage collector + +#ifdef CDS_DOXYGEN_INVOKED + typedef Key key_type; ///< Key type + typedef Value mapped_type; ///< Type of value stored in the list + typedef std::pair value_type; ///< key/value pair stored in the list +#else + typedef typename maker::key_type key_type; + typedef typename maker::value_type mapped_type; + typedef typename maker::pair_type value_type; +#endif + typedef Traits traits; ///< List traits + + typedef typename base_class::back_off back_off; ///< Back-off strategy + typedef typename maker::allocator_type allocator_type; ///< Allocator type used for allocate/deallocate the nodes + typedef typename base_class::item_counter item_counter; ///< Item counting policy + typedef typename maker::key_comparator key_comparator; ///< key comparison functor + typedef typename base_class::memory_model memory_model; ///< Memory ordering. See \p michael_list::traits::memory_model + typedef typename base_class::stat stat; ///< Internal statistics + typedef typename base_class::rcu_check_deadlock rcu_check_deadlock ; ///< RCU deadlock checking policy + + typedef typename gc::scoped_lock rcu_lock ; ///< RCU scoped lock + static constexpr const bool c_bExtractLockExternal = base_class::c_bExtractLockExternal; ///< Group of \p extract_xxx functions do not require external locking + + //@cond + // Rebind traits (split-list support) + template + struct rebind_traits { + typedef MichaelKVList< + gc + , key_type, mapped_type + , typename cds::opt::make_options< traits, Options...>::type + > type; + }; + + // Stat selector + template + using select_stat_wrapper = typename base_class::template select_stat_wrapper< Stat >; + //@endcond + + protected: + //@cond + typedef typename base_class::value_type node_type; + typedef typename maker::cxx_allocator cxx_allocator; + typedef typename maker::node_deallocator node_deallocator; + typedef typename maker::intrusive_traits::compare intrusive_key_comparator; + + typedef typename base_class::atomic_node_ptr head_type; + + struct node_disposer { + void operator()( node_type * pNode ) + { + free_node( pNode ); + } + }; + typedef std::unique_ptr< node_type, node_disposer > scoped_node_ptr; + //@endcond + + public: + /// pointer to extracted node + using exempt_ptr = cds::urcu::exempt_ptr< gc, node_type, value_type, typename maker::intrusive_traits::disposer, + cds::urcu::details::conventional_exempt_pair_cast + >; + + private: + //@cond + struct raw_ptr_converter + { + value_type * operator()( node_type * p ) const + { + return p ? &p->m_Data : nullptr; + } + + value_type& operator()( node_type& n ) const + { + return n.m_Data; + } + + value_type const& operator()( node_type const& n ) const + { + return n.m_Data; + } + }; + //@endcond + + public: + /// Result of \p get(), \p get_with() functions - pointer to the node found + typedef cds::urcu::raw_ptr_adaptor< value_type, typename base_class::raw_ptr, raw_ptr_converter > raw_ptr; + + + protected: + //@cond + template + class iterator_type: protected base_class::template iterator_type + { + typedef typename base_class::template iterator_type iterator_base; + + iterator_type( head_type const& pNode ) + : iterator_base( pNode ) + {} + + friend class MichaelKVList; + + public: + typedef typename cds::details::make_const_type::reference value_ref; + typedef typename cds::details::make_const_type::pointer value_ptr; + + typedef typename cds::details::make_const_type::reference pair_ref; + typedef typename cds::details::make_const_type::pointer pair_ptr; + + iterator_type() + {} + + iterator_type( iterator_type const& src ) + : iterator_base( src ) + {} + + key_type const& key() const + { + typename iterator_base::value_ptr p = iterator_base::operator ->(); + assert( p != nullptr ); + return p->m_Data.first; + } + + pair_ptr operator ->() const + { + typename iterator_base::value_ptr p = iterator_base::operator ->(); + return p ? &(p->m_Data) : nullptr; + } + + pair_ref operator *() const + { + typename iterator_base::value_ref p = iterator_base::operator *(); + return p.m_Data; + } + + value_ref val() const + { + typename iterator_base::value_ptr p = iterator_base::operator ->(); + assert( p != nullptr ); + return p->m_Data.second; + } + + /// Pre-increment + iterator_type& operator ++() + { + iterator_base::operator ++(); + return *this; + } + + template + bool operator ==(iterator_type const& i ) const + { + return iterator_base::operator ==(i); + } + template + bool operator !=(iterator_type const& i ) const + { + return iterator_base::operator !=(i); + } + }; + //@endcond + + public: + ///@name Forward iterators + //@{ + /// Forward iterator + /** + You may safely use iterators in multi-threaded environment only under external RCU lock. + Otherwise, a program crash is possible if another thread deletes the item the iterator points to. + */ + typedef iterator_type iterator; + + /// Const forward iterator + typedef iterator_type const_iterator; + + /// Returns a forward iterator addressing the first element in a list + /** + For empty list \code begin() == end() \endcode + */ + iterator begin() + { + return iterator( head()); + } + + /// Returns an iterator that addresses the location succeeding the last element in a list + /** + Do not use the value returned by end function to access any item. + Internally, end returning value equals to \p nullptr. + + The returned value can be used only to control reaching the end of the list. + For empty list \code begin() == end() \endcode + */ + iterator end() + { + return iterator(); + } + + /// Returns a forward const iterator addressing the first element in a list + const_iterator begin() const + { + return const_iterator( head()); + } + /// Returns a forward const iterator addressing the first element in a list + const_iterator cbegin() const + { + return const_iterator( head()); + } + + /// Returns an const iterator that addresses the location succeeding the last element in a list + const_iterator end() const + { + return const_iterator(); + } + /// Returns an const iterator that addresses the location succeeding the last element in a list + const_iterator cend() const + { + return const_iterator(); + } + //@} + + public: + /// Default constructor + /** + Initializes empty list + */ + MichaelKVList() + {} + + //@cond + template >::value >> + explicit MichaelKVList( Stat& st ) + : base_class( st ) + {} + //@endcond + + /// List destructor + /** + Clears the list + */ + ~MichaelKVList() + { + clear(); + } + + /// Inserts new node with key and default value + /** + The function creates a node with \p key and default value, and then inserts the node created into the list. + + Preconditions: + - The \ref key_type should be constructible from value of type \p K. + In trivial case, \p K is equal to \ref key_type. + - The \ref mapped_type should be default-constructible. + + The function applies RCU lock internally. + + Returns \p true if inserting successful, \p false otherwise. + */ + template + bool insert( const K& key ) + { + return insert_at( head(), key ); + } + + /// Inserts new node with a key and a value + /** + The function creates a node with \p key and value \p val, and then inserts the node created into the list. + + Preconditions: + - The \ref key_type should be constructible from \p key of type \p K. + - The \ref mapped_type should be constructible from \p val of type \p V. + + The function applies RCU lock internally. + + Returns \p true if inserting successful, \p false otherwise. + */ + template + bool insert( const K& key, const V& val ) + { + return insert_at( head(), key, val ); + } + + /// Inserts new node and initialize it by a functor + /** + This function inserts new node with key \p key and if inserting is successful then it calls + \p func functor with signature + \code + struct functor { + void operator()( value_type& item ); + }; + \endcode + + The argument \p item of user-defined functor \p func is the reference + to the list's item inserted. item.second is a reference to item's value that may be changed. + User-defined functor \p func should guarantee that during changing item's value no any other changes + could be made on this list's item by concurrent threads. + + The key_type should be constructible from value of type \p K. + + The function allows to split creating of new item into two part: + - create item from \p key; + - insert new item into the list; + - if inserting is successful, initialize the value of item by calling \p func functor + + This can be useful if complete initialization of object of \p mapped_type is heavyweight and + it is preferable that the initialization should be completed only if inserting is successful. + + The function applies RCU lock internally. + + @warning See \ref cds_intrusive_item_creating "insert item troubleshooting" + */ + template + bool insert_with( const K& key, Func func ) + { + return insert_with_at( head(), key, func ); + } + + /// Updates an element with given \p key + /** + The operation performs inserting or changing data with lock-free manner. + + If the \p key not found in the list, then the new item created from \p key + is inserted into the list (note that in this case the \ref key_type should be + copy-constructible from type \p K). + Otherwise, the functor \p func is called with item found. + The functor \p Func may be a function with signature: + \code + void func( bool bNew, value_type& item ); + \endcode + or a functor: + \code + struct my_functor { + void operator()( bool bNew, value_type& item ); + }; + \endcode + + with arguments: + - \p bNew - \p true if the item has been inserted, \p false otherwise + - \p item - item of the list + + The functor may change any fields of the \p item.second that is \ref mapped_type; + however, \p func must guarantee that during changing no any other modifications + could be made on this item by concurrent threads. + + The function applies RCU lock internally. + + Returns std::pair where \p first is true if operation is successful, + \p second is true if new item has been added or \p false if the item with \p key + already is in the list. + + @warning See \ref cds_intrusive_item_creating "insert item troubleshooting" + */ + /// Updates data by \p key + /** + The operation performs inserting or replacing the element with lock-free manner. + + If the \p key not found in the list, then the new item created from \p key + will be inserted iff \p bAllowInsert is \p true. + (note that in this case the \ref key_type should be constructible from type \p K). + Otherwise, if \p key is found, the functor \p func is called with item found. + + The functor \p Func signature is: + \code + struct my_functor { + void operator()( bool bNew, value_type& item ); + }; + \endcode + with arguments: + - \p bNew - \p true if the item has been inserted, \p false otherwise + - \p item - the item found or inserted + + The functor may change any fields of the \p item.second that is \ref mapped_type; + however, \p func must guarantee that during changing no any other modifications + could be made on this item by concurrent threads. + + The function applies RCU lock internally. + + Returns std::pair where \p first is true if operation is successful, + \p second is true if new item has been added or \p false if the item with \p key + already exists. + + @warning See \ref cds_intrusive_item_creating "insert item troubleshooting" + */ + template + std::pair update( const K& key, Func func, bool bAllowInsert = true ) + { + return update_at( head(), key, func, bAllowInsert ); + } + //@cond + template + CDS_DEPRECATED("ensure() is deprecated, use update()") + std::pair ensure( const K& key, Func f ) + { + return update( key, f, true ); + } + //@endcond + + /// Inserts data of type \ref mapped_type constructed with std::forward(args)... + /** + Returns \p true if inserting successful, \p false otherwise. + + The function applies RCU lock internally. + */ + template + bool emplace( K&& key, Args&&... args ) + { + return emplace_at( head(), std::forward(key), std::forward(args)... ); + } + + /// Deletes \p key from the list + /** \anchor cds_nonintrusive_MichaelKVList_rcu_erase + + RCU \p synchronize method can be called. RCU should not be locked. + + Returns \p true if \p key is found and has been deleted, \p false otherwise + */ + template + bool erase( K const& key ) + { + return erase_at( head(), key, intrusive_key_comparator()); + } + + /// Deletes the item from the list using \p pred predicate for searching + /** + The function is an analog of \ref cds_nonintrusive_MichaelKVList_rcu_erase "erase(K const&)" + but \p pred is used for key comparing. + \p Less functor has the interface like \p std::less. + \p pred must imply the same element order as the comparator used for building the list. + */ + template + bool erase_with( K const& key, Less pred ) + { + CDS_UNUSED( pred ); + return erase_at( head(), key, typename maker::template less_wrapper::type()); + } + + /// Deletes \p key from the list + /** \anchor cds_nonintrusive_MichaelKVList_rcu_erase_func + The function searches an item with key \p key, calls \p f functor + and deletes the item. If \p key is not found, the functor is not called. + + The functor \p Func interface: + \code + struct functor { + void operator()(value_type& val) { ... } + }; + \endcode + + RCU \p synchronize method can be called. RCU should not be locked. + + Return \p true if key is found and deleted, \p false otherwise + + See also: \ref erase + */ + template + bool erase( K const& key, Func f ) + { + return erase_at( head(), key, intrusive_key_comparator(), f ); + } + + /// Deletes the item from the list using \p pred predicate for searching + /** + The function is an analog of \ref cds_nonintrusive_MichaelKVList_rcu_erase_func "erase(K const&, Func)" + but \p pred is used for key comparing. + \p Less functor has the interface like \p std::less. + \p pred must imply the same element order as the comparator used for building the list. + */ + template + bool erase_with( K const& key, Less pred, Func f ) + { + CDS_UNUSED( pred ); + return erase_at( head(), key, typename maker::template less_wrapper::type(), f ); + } + + /// Extracts an item from the list + /** + @anchor cds_nonintrusive_MichaelKVList_rcu_extract + The function searches an item with key equal to \p key in the list, + unlinks it from the list, and returns \ref cds::urcu::exempt_ptr "exempt_ptr" pointer to the item found. + If \p key is not found the function returns an empty \p exempt_ptr. + + @note The function does NOT dispose the item found. + It just excludes the item from the list and returns a pointer to item found. + You shouldn't lock RCU before calling this function. + + \code + #include + #include + + typedef cds::urcu::gc< general_buffered<> > rcu; + typedef cds::container::MichaelKVList< rcu, int, Foo > rcu_michael_list; + + rcu_michael_list theList; + // ... + + rcu_michael_list::exempt_ptr p; + + // The RCU should NOT be locked when extract() is called! + assert( !rcu::is_locked()); + + // extract() call + p = theList.extract( 10 ); + if ( p ) { + // do something with p + ... + } + + // we may safely release extracted pointer here. + // release() passes the pointer to RCU reclamation cycle. + p.release(); + \endcode + */ + template + exempt_ptr extract( K const& key ) + { + return exempt_ptr( extract_at( head(), key, intrusive_key_comparator())); + } + + /// Extracts an item from the list using \p pred predicate for searching + /** + This function is the analog for \p extract(K const&). + The \p pred is a predicate used for key comparing. + \p Less has the interface like \p std::less. + \p pred must imply the same element order as \ref key_comparator. + */ + template + exempt_ptr extract_with( K const& key, Less pred ) + { + CDS_UNUSED( pred ); + return exempt_ptr( extract_at( head(), key, typename maker::template less_wrapper::type())); + } + + /// Checks whether the list contains \p key + /** + The function searches the item with key equal to \p key + and returns \p true if it is found, and \p false otherwise. + + The function applies RCU lock internally. + */ + template + bool contains( Q const& key ) + { + return find_at( head(), key, intrusive_key_comparator()); + } + //@cond + template + CDS_DEPRECATED("deprecated, use contains()") + bool find( Q const& key ) + { + return contains( key ); + } + //@endcond + + /// Checks whether the map contains \p key using \p pred predicate for searching + /** + The function is an analog of contains( key ) but \p pred is used for key comparing. + \p Less functor has the interface like \p std::less. + \p Less must imply the same element order as the comparator used for building the list. + + The function applies RCU lock internally. + */ + template + bool contains( Q const& key, Less pred ) + { + CDS_UNUSED( pred ); + return find_at( head(), key, typename maker::template less_wrapper::type()); + } + //@cond + template + CDS_DEPRECATED("deprecated, use contains()") + bool find_with( Q const& key, Less pred ) + { + return contains( key, pred ); + } + //@endcond + + /// Finds \p key and performs an action with it + /** \anchor cds_nonintrusive_MichaelKVList_rcu_find_func + The function searches an item with key equal to \p key and calls the functor \p f for the item found. + The interface of \p Func functor is: + \code + struct functor { + void operator()( value_type& item ); + }; + \endcode + where \p item is the item found. + + The functor may change item.second that is reference to value of node. + Note that the function is only guarantee that \p item cannot be deleted during functor is executing. + The function does not serialize simultaneous access to the list \p item. If such access is + possible you must provide your own synchronization schema to exclude unsafe item modifications. + + The function makes RCU lock internally. + + The function returns \p true if \p key is found, \p false otherwise. + */ + template + bool find( Q const& key, Func f ) + { + return find_at( head(), key, intrusive_key_comparator(), f ); + } + + /// Finds the key \p val using \p pred predicate for searching + /** + The function is an analog of \ref cds_nonintrusive_MichaelKVList_rcu_find_func "find(Q const&, Func)" + but \p pred is used for key comparing. + \p Less functor has the interface like \p std::less. + \p pred must imply the same element order as the comparator used for building the list. + */ + template + bool find_with( Q const& key, Less pred, Func f ) + { + CDS_UNUSED( pred ); + return find_at( head(), key, typename maker::template less_wrapper::type(), f ); + } + + /// Finds \p key and return the item found + /** \anchor cds_nonintrusive_MichaelKVList_rcu_get + The function searches the item with \p key and returns the pointer to item found. + If \p key is not found it returns an empty \p raw_ptr object. + + Note the compare functor should accept a parameter of type \p K that can be not the same as \p key_type. + + RCU should be locked before call of this function. + Returned item is valid only while RCU is locked: + \code + typedef cds::container::MichaelKVList< cds::urcu::gc< cds::urcu::general_buffered<> >, int, foo, my_traits > ord_list; + ord_list theList; + // ... + tyename ord_list::raw_ptr rp; + { + // Lock RCU + ord_list::rcu_lock lock; + + rp = theList.get( 5 ); + if ( rp ) { + // Deal with rp + //... + } + // Unlock RCU by rcu_lock destructor + } + // rp can be released at any time after RCU has been unlocked + rp.release(); + \endcode + */ + template + raw_ptr get( K const& key ) + { + return get_at( head(), key, intrusive_key_comparator()); + } + + /// Finds \p key and return the item found + /** + The function is an analog of \ref cds_nonintrusive_MichaelKVList_rcu_get "get(K const&)" + but \p pred is used for comparing the keys. + + \p Less functor has the semantics like \p std::less but should take arguments of type \ref key_type and \p K + in any order. + \p pred must imply the same element order as the comparator used for building the list. + */ + template + raw_ptr get_with( K const& key, Less pred ) + { + CDS_UNUSED( pred ); + return get_at( head(), key, typename maker::template less_wrapper::type()); + } + + /// Checks if the list is empty + bool empty() const + { + return base_class::empty(); + } + + /// Returns list's item count + /** + The value returned depends on item counter provided by \p Traits. For \p atomicity::empty_item_counter, + this function always returns 0. + + @note Even if you use real item counter and it returns 0, this fact does not mean that the list + is empty. To check list emptyness use \p empty() method. + */ + size_t size() const + { + return base_class::size(); + } + + /// Returns const reference to internal statistics + stat const& statistics() const + { + return base_class::statistics(); + } + + /// Clears the list + /** + Post-condition: the list is empty + */ + void clear() + { + base_class::clear(); + } + + protected: + //@cond + bool insert_node_at( head_type& refHead, node_type * pNode ) + { + assert( pNode != nullptr ); + scoped_node_ptr p( pNode ); + if ( base_class::insert_at( refHead, *pNode )) { + p.release(); + return true; + } + return false; + } + + template + bool insert_at( head_type& refHead, const K& key ) + { + return insert_node_at( refHead, alloc_node( key )); + } + + template + bool insert_at( head_type& refHead, const K& key, const V& val ) + { + return insert_node_at( refHead, alloc_node( key, val )); + } + + template + bool insert_with_at( head_type& refHead, const K& key, Func f ) + { + scoped_node_ptr pNode( alloc_node( key )); + + if ( base_class::insert_at( refHead, *pNode, [&f](node_type& node){ f( node.m_Data ); })) { + pNode.release(); + return true; + } + return false; + } + + template + bool emplace_at( head_type& refHead, K&& key, Args&&... args ) + { + return insert_node_at( refHead, alloc_node( std::forward(key), std::forward(args)... )); + } + + template + std::pair update_at( head_type& refHead, const K& key, Func f, bool bAllowInsert ) + { + scoped_node_ptr pNode( alloc_node( key )); + + std::pair ret = base_class::update_at( refHead, *pNode, + [&f]( bool bNew, node_type& node, node_type& ){ f( bNew, node.m_Data ); }, + bAllowInsert ); + if ( ret.first && ret.second ) + pNode.release(); + + return ret; + } + + template + bool erase_at( head_type& refHead, K const& key, Compare cmp ) + { + return base_class::erase_at( refHead, key, cmp ); + } + + template + bool erase_at( head_type& refHead, K const& key, Compare cmp, Func f ) + { + return base_class::erase_at( refHead, key, cmp, [&f]( node_type const & node ){ f( const_cast(node.m_Data)); }); + } + + template + node_type * extract_at( head_type& refHead, K const& key, Compare cmp ) + { + return base_class::extract_at( refHead, key, cmp ); + } + + template + bool find_at( head_type& refHead, K const& key, Compare cmp ) + { + return base_class::find_at( refHead, key, cmp, [](node_type&, K const&) {} ); + } + + template + bool find_at( head_type& refHead, K& key, Compare cmp, Func f ) + { + return base_class::find_at( refHead, key, cmp, [&f](node_type& node, K const&){ f( node.m_Data ); }); + } + + template + raw_ptr get_at( head_type& refHead, K const& val, Compare cmp ) + { + return raw_ptr( base_class::get_at( refHead, val, cmp )); + } + + template + static node_type * alloc_node( const K& key ) + { + return cxx_allocator().New( key ); + } + + template + static node_type * alloc_node( const K& key, const V& val ) + { + return cxx_allocator().New( key, val ); + } + + template + static node_type * alloc_node( K&& key, Args&&... args ) + { + return cxx_allocator().MoveNew( std::forward( key ), std::forward( args )... ); + } + + static void free_node( node_type * pNode ) + { + cxx_allocator().Delete( pNode ); + } + + head_type& head() + { + return base_class::m_pHead; + } + + head_type& head() const + { + return const_cast(base_class::m_pHead); + } + //@endcond + }; + +}} // namespace cds::container + +#endif // #ifndef CDSLIB_CONTAINER_MICHAEL_KVLIST_RCU_H diff --git a/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/container/michael_list_dhp.h b/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/container/michael_list_dhp.h new file mode 100644 index 0000000..3935843 --- /dev/null +++ b/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/container/michael_list_dhp.h @@ -0,0 +1,39 @@ +/* + This file is a part of libcds - Concurrent Data Structures library + + (C) Copyright Maxim Khizhinsky (libcds.dev@gmail.com) 2006-2017 + + Source code repo: http://github.com/khizmax/libcds/ + Download: http://sourceforge.net/projects/libcds/files/ + + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. + + * Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ + +#ifndef CDSLIB_CONTAINER_MICHAEL_LIST_DHP_H +#define CDSLIB_CONTAINER_MICHAEL_LIST_DHP_H + +#include +#include +#include +#include + +#endif // #ifndef CDSLIB_CONTAINER_MICHAEL_LIST_DHP_H diff --git a/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/container/michael_list_hp.h b/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/container/michael_list_hp.h new file mode 100644 index 0000000..046ed4e --- /dev/null +++ b/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/container/michael_list_hp.h @@ -0,0 +1,39 @@ +/* + This file is a part of libcds - Concurrent Data Structures library + + (C) Copyright Maxim Khizhinsky (libcds.dev@gmail.com) 2006-2017 + + Source code repo: http://github.com/khizmax/libcds/ + Download: http://sourceforge.net/projects/libcds/files/ + + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. + + * Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ + +#ifndef CDSLIB_CONTAINER_MICHAEL_LIST_HP_H +#define CDSLIB_CONTAINER_MICHAEL_LIST_HP_H + +#include +#include +#include +#include + +#endif // #ifndef CDSLIB_CONTAINER_MICHAEL_LIST_HP_H diff --git a/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/container/michael_list_nogc.h b/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/container/michael_list_nogc.h new file mode 100644 index 0000000..747d58d --- /dev/null +++ b/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/container/michael_list_nogc.h @@ -0,0 +1,510 @@ +/* + This file is a part of libcds - Concurrent Data Structures library + + (C) Copyright Maxim Khizhinsky (libcds.dev@gmail.com) 2006-2017 + + Source code repo: http://github.com/khizmax/libcds/ + Download: http://sourceforge.net/projects/libcds/files/ + + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. + + * Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ + +#ifndef CDSLIB_CONTAINER_MICHAEL_LIST_NOGC_H +#define CDSLIB_CONTAINER_MICHAEL_LIST_NOGC_H + +#include +#include +#include +#include + +namespace cds { namespace container { + + //@cond + namespace details { + + template + struct make_michael_list_nogc: public make_michael_list + { + typedef make_michael_list base_maker; + typedef typename base_maker::node_type node_type; + + struct intrusive_traits: public base_maker::intrusive_traits + { + typedef typename base_maker::node_deallocator disposer; + }; + + typedef intrusive::MichaelList type; + }; + + } // namespace details + //@endcond + + /// Michael's lock-free ordered single-linked list (template specialization for \p gc::nogc) + /** @ingroup cds_nonintrusive_list + \anchor cds_nonintrusive_MichaelList_nogc + + This specialization is intended for so-called append-only usage when no item + reclamation may be performed. The class does not support deleting of list item. + Usually, ordered single-linked list is used as a building block for the hash table implementation. + The complexity of searching is O(N). + + See \ref cds_nonintrusive_MichaelList_gc "MichaelList" for description of template parameters. + */ + template + class MichaelList: +#ifdef CDS_DOXYGEN_INVOKED + protected intrusive::MichaelList< gc::nogc, T, Traits > +#else + protected details::make_michael_list_nogc< T, Traits >::type +#endif + { + //@cond + typedef details::make_michael_list_nogc< T, Traits > maker; + typedef typename maker::type base_class; + //@endcond + + public: + typedef cds::gc::nogc gc; ///< Garbage collector used + typedef T value_type; ///< Type of value stored in the list + typedef Traits traits; ///< List traits + + typedef typename base_class::back_off back_off; ///< Back-off strategy used + typedef typename maker::allocator_type allocator_type; ///< Allocator type used for allocate/deallocate the nodes + typedef typename base_class::item_counter item_counter; ///< Item counting policy used + typedef typename maker::key_comparator key_comparator; ///< key comparison functor + typedef typename base_class::memory_model memory_model; ///< Memory ordering. See cds::opt::memory_model option + typedef typename base_class::stat stat; ///< Internal statistics + + //@cond + // Rebind traits (split-list support) + template + struct rebind_traits { + typedef MichaelList< + gc + , value_type + , typename cds::opt::make_options< traits, Options...>::type + > type; + }; + + // Stat selector + template + using select_stat_wrapper = typename base_class::template select_stat_wrapper< Stat >; + //@endcond + + protected: + //@cond + typedef typename base_class::value_type node_type; + typedef typename maker::cxx_allocator cxx_allocator; + typedef typename maker::node_deallocator node_deallocator; + typedef typename maker::intrusive_traits::compare intrusive_key_comparator; + + typedef typename base_class::atomic_node_ptr head_type; + + struct node_disposer { + void operator()( node_type * pNode ) + { + free_node( pNode ); + } + }; + + typedef std::unique_ptr< node_type, node_disposer > scoped_node_ptr; + //@endcond + + protected: + //@cond + template + class iterator_type: protected base_class::template iterator_type + { + typedef typename base_class::template iterator_type iterator_base; + + iterator_type( head_type const& pNode ) + : iterator_base( pNode ) + {} + + explicit iterator_type( const iterator_base& it ) + : iterator_base( it ) + {} + + friend class MichaelList; + + protected: + explicit iterator_type( node_type& pNode ) + : iterator_base( &pNode ) + {} + + public: + typedef typename cds::details::make_const_type::pointer value_ptr; + typedef typename cds::details::make_const_type::reference value_ref; + + iterator_type() + {} + + iterator_type( const iterator_type& src ) + : iterator_base( src ) + {} + + value_ptr operator ->() const + { + typename iterator_base::value_ptr p = iterator_base::operator ->(); + return p ? &(p->m_Value) : nullptr; + } + + value_ref operator *() const + { + return (iterator_base::operator *()).m_Value; + } + + /// Pre-increment + iterator_type& operator ++() + { + iterator_base::operator ++(); + return *this; + } + + /// Post-increment + iterator_type operator ++(int) + { + return iterator_base::operator ++(0); + } + + template + bool operator ==(iterator_type const& i ) const + { + return iterator_base::operator ==(i); + } + template + bool operator !=(iterator_type const& i ) const + { + return iterator_base::operator !=(i); + } + }; + //@endcond + + public: + ///@name Forward iterators + //@{ + /// Returns a forward iterator addressing the first element in a list + /** + For empty list \code begin() == end() \endcode + */ + typedef iterator_type iterator; + + /// Const forward iterator + /** + For iterator's features and requirements see \ref iterator + */ + typedef iterator_type const_iterator; + + /// Returns a forward iterator addressing the first element in a list + /** + For empty list \code begin() == end() \endcode + */ + iterator begin() + { + return iterator( head()); + } + + /// Returns an iterator that addresses the location succeeding the last element in a list + /** + Do not use the value returned by end function to access any item. + Internally, end returning value equals to \p nullptr. + + The returned value can be used only to control reaching the end of the list. + For empty list \code begin() == end() \endcode + */ + iterator end() + { + return iterator(); + } + + /// Returns a forward const iterator addressing the first element in a list + const_iterator begin() const + { + return const_iterator( head()); + } + + /// Returns a forward const iterator addressing the first element in a list + const_iterator cbegin() const + { + return const_iterator( head()); + } + + /// Returns an const iterator that addresses the location succeeding the last element in a list + const_iterator end() const + { + return const_iterator(); + } + + /// Returns an const iterator that addresses the location succeeding the last element in a list + const_iterator cend() const + { + return const_iterator(); + } + //@} + + public: + /// Default constructor + /** + Initialize empty list + */ + MichaelList() + {} + + //@cond + template >::value >> + explicit MichaelList( Stat& st ) + : base_class( st ) + {} + //@endcond + + /// List destructor + /** + Clears the list + */ + ~MichaelList() + { + clear(); + } + + /// Inserts new node + /** + The function inserts \p val in the list if the list does not contain + an item with key equal to \p val. + + Return an iterator pointing to inserted item if success \ref end() otherwise + */ + template + iterator insert( Q&& val ) + { + return node_to_iterator( insert_at( head(), std::forward( val ))); + } + + /// Updates the item + /** + If \p key is not in the list and \p bAllowInsert is \p true, + the function inserts a new item. + Otherwise, the function returns an iterator pointing to the item found. + + Returns std::pair where \p first is an iterator pointing to + item found or inserted, \p second is true if new item has been added or \p false if the item + already is in the list. + */ + template + std::pair update( Q&& key, bool bAllowInsert = true ) + { + std::pair< node_type *, bool > ret = update_at( head(), std::forward( key ), bAllowInsert ); + return std::make_pair( node_to_iterator( ret.first ), ret.second ); + } + //@cond + template + CDS_DEPRECATED("ensure() is deprecated, use update()") + std::pair ensure( const Q& val ) + { + return update( val, true ); + } + //@endcond + + /// Inserts data of type \ref value_type constructed with std::forward(args)... + /** + Return an iterator pointing to inserted item if success \ref end() otherwise + */ + template + iterator emplace( Args&&... args ) + { + return node_to_iterator( emplace_at( head(), std::forward(args)... )); + } + + /// Checks whether the list contains \p key + /** + The function searches the item with key equal to \p key + and returns an iterator pointed to item found if the key is found, + and \ref end() otherwise + */ + template + iterator contains( Q const& key ) + { + return node_to_iterator( find_at( head(), key, intrusive_key_comparator())); + } + //@cond + template + CDS_DEPRECATED("deprecated, use contains()") + iterator find( Q const& key ) + { + return contains( key ); + } + //@endcond + + /// Checks whether the map contains \p key using \p pred predicate for searching + /** + The function is an analog of contains( key ) but \p pred is used for key comparing. + \p Less functor has the interface like \p std::less. + \p Less must imply the same element order as the comparator used for building the list. + */ + template + iterator contains( Q const& key, Less pred ) + { + CDS_UNUSED( pred ); + return node_to_iterator( find_at( head(), key, typename maker::template less_wrapper::type())); + } + //@cond + template + CDS_DEPRECATED("deprecated, use contains()") + iterator find_with( Q const& key, Less pred ) + { + return contains( key, pred ); + } + //@endcond + + /// Check if the list is empty + bool empty() const + { + return base_class::empty(); + } + + /// Returns list's item count + /** + The value returned depends on item counter provided by \p Traits. For \p atomicity::empty_item_counter, + this function always returns 0. + + @note Even if you use real item counter and it returns 0, this fact does not mean that the list + is empty. To check list emptyness use \p empty() method. + */ + size_t size() const + { + return base_class::size(); + } + + /// Returns const reference to internal statistics + stat const& statistics() const + { + return base_class::statistics(); + } + + /// Clears the list + void clear() + { + base_class::clear(); + } + + protected: + //@cond + static value_type& node_to_value( node_type& n ) + { + return n.m_Value; + } + + static node_type * alloc_node() + { + return cxx_allocator().New(); + } + + static node_type * alloc_node( value_type const& v ) + { + return cxx_allocator().New( v ); + } + + template + static node_type * alloc_node( Args&&... args ) + { + return cxx_allocator().MoveNew( std::forward( args )... ); + } + + static void free_node( node_type * pNode ) + { + cxx_allocator().Delete( pNode ); + } + + head_type& head() + { + return base_class::m_pHead; + } + + head_type const& head() const + { + return base_class::m_pHead; + } + + iterator node_to_iterator( node_type * pNode ) + { + if ( pNode ) + return iterator( *pNode ); + return end(); + } + + iterator insert_node( node_type * pNode ) + { + return node_to_iterator( insert_node_at( head(), pNode )); + } + + node_type * insert_node_at( head_type& refHead, node_type * pNode ) + { + assert( pNode != nullptr ); + scoped_node_ptr p(pNode); + if ( base_class::insert_at( refHead, *pNode )) + return p.release(); + + return nullptr; + } + + template + node_type * insert_at( head_type& refHead, Q&& val ) + { + return insert_node_at( refHead, alloc_node( std::forward( val ))); + } + + template + std::pair< node_type *, bool > update_at( head_type& refHead, Q&& val, bool bAllowInsert ) + { + scoped_node_ptr pNode( alloc_node( std::forward( val ))); + node_type * pItemFound = nullptr; + + std::pair ret = base_class::update_at( refHead, *pNode, + [&pItemFound](bool, node_type& item, node_type&) { pItemFound = &item; }, + bAllowInsert ); + + if ( ret.second ) + pNode.release(); + return std::make_pair( pItemFound, ret.second ); + } + + template + node_type * emplace_at( head_type& refHead, Args&&... args ) + { + return insert_node_at( refHead, alloc_node( std::forward(args)...)); + } + + template + node_type * find_at( head_type& refHead, Q const& key, Compare cmp ) + { + return base_class::find_at( refHead, key, cmp ); + } + //@endcond + }; +}} // namespace cds::container + +#endif // #ifndef CDSLIB_CONTAINER_MICHAEL_LIST_NOGC_H diff --git a/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/container/michael_list_rcu.h b/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/container/michael_list_rcu.h new file mode 100644 index 0000000..a9ae723 --- /dev/null +++ b/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/container/michael_list_rcu.h @@ -0,0 +1,904 @@ +/* + This file is a part of libcds - Concurrent Data Structures library + + (C) Copyright Maxim Khizhinsky (libcds.dev@gmail.com) 2006-2017 + + Source code repo: http://github.com/khizmax/libcds/ + Download: http://sourceforge.net/projects/libcds/files/ + + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. + + * Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ + +#ifndef CDSLIB_CONTAINER_MICHAEL_LIST_RCU_H +#define CDSLIB_CONTAINER_MICHAEL_LIST_RCU_H + +#include +#include +#include +#include +#include + +namespace cds { namespace container { + + /// Michael's ordered list (template specialization for \ref cds_urcu_desc "RCU") + /** @ingroup cds_nonintrusive_list + \anchor cds_nonintrusive_MichaelList_rcu + + Usually, ordered single-linked list is used as a building block for the hash table implementation. + The complexity of searching is O(N). + + Source: + - [2002] Maged Michael "High performance dynamic lock-free hash tables and list-based sets" + + This class is non-intrusive version of \ref cds_intrusive_MichaelList_rcu "cds::intrusive::MichaelList" RCU specialization. + + Template arguments: + - \p RCU - one of \ref cds_urcu_gc "RCU type" + - \p T - type stored in the list. The type must be default- and copy-constructible. + - \p Traits - type traits, default is michael_list::traits + + The implementation does not divide type \p T into key and value part and + may be used as a main building block for hash set containers. + The key is a function (or a part) of type \p T, and this function is specified by Traits::compare functor + or Traits::less predicate. + + \ref cds_nonintrusive_MichaelKVList_rcu "MichaelKVList" is a key-value version of Michael's + non-intrusive list that is closer to the C++ std library approach. + + @note Before including you should include appropriate RCU header file, + see \ref cds_urcu_gc "RCU type" for list of existing RCU class and corresponding header files. + + It is possible to declare option-based list with cds::container::michael_list::make_traits metafunction istead of \p Traits template + argument. For example, the following traits-based declaration of Michael's list + + \code + #include + #include + // Declare comparator for the item + struct my_compare { + int operator ()( int i1, int i2 ) + { + return i1 - i2; + } + }; + + // Declare traits + struct my_traits: public cds::container::michael_list::traits + { + typedef my_compare compare; + }; + + // Declare traits-based list + typedef cds::container::MichaelList< cds::urcu::gc< cds::urcu::general_buffered<> >, int, my_traits > traits_based_list; + \endcode + + is equivalent for the following option-based list + \code + #include + #include + + // my_compare is the same + + // Declare option-based list + typedef cds::container::MichaelList< cds::urcu::gc< cds::urcu::general_buffered<> >, int, + typename cds::container::michael_list::make_traits< + cds::container::opt::compare< my_compare > // item comparator option + >::type + > option_based_list; + \endcode + + Template argument list \p Options of cds::container::michael_list::make_traits metafunction are: + - opt::compare - key comparison functor. No default functor is provided. + If the option is not specified, the opt::less is used. + - opt::less - specifies binary predicate used for key comparison. Default is \p std::less. + - opt::back_off - back-off strategy used. If the option is not specified, the cds::backoff::empty is used. + - opt::item_counter - the type of item counting feature. Default is \ref atomicity::empty_item_counter that is no item counting. + - opt::allocator - the allocator used for creating and freeing list's item. Default is \ref CDS_DEFAULT_ALLOCATOR macro. + - opt::memory_model - C++ memory ordering model. Can be opt::v::relaxed_ordering (relaxed memory model, the default) + or opt::v::sequential_consistent (sequentially consisnent memory model). + - opt::rcu_check_deadlock - a deadlock checking policy. Default is opt::v::rcu_throw_deadlock + */ + template < + typename RCU, + typename T, +#ifdef CDS_DOXYGEN_INVOKED + typename Traits = michael_list::traits +#else + typename Traits +#endif + > + class MichaelList< cds::urcu::gc, T, Traits > : +#ifdef CDS_DOXYGEN_INVOKED + protected intrusive::MichaelList< cds::urcu::gc, T, Traits > +#else + protected details::make_michael_list< cds::urcu::gc, T, Traits >::type +#endif + { + //@cond + typedef details::make_michael_list< cds::urcu::gc, T, Traits > maker; + typedef typename maker::type base_class; + //@endcond + + public: + typedef cds::urcu::gc gc; ///< RCU + typedef T value_type; ///< Type of value stored in the list + typedef Traits traits; ///< List traits + + typedef typename base_class::back_off back_off; ///< Back-off strategy used + typedef typename maker::allocator_type allocator_type; ///< Allocator type used for allocate/deallocate the nodes + typedef typename base_class::item_counter item_counter; ///< Item counting policy used + typedef typename maker::key_comparator key_comparator; ///< key comparison functor + typedef typename base_class::memory_model memory_model; ///< Memory ordering. See cds::opt::memory_model option + typedef typename base_class::stat stat; ///< Internal statistics + typedef typename base_class::rcu_check_deadlock rcu_check_deadlock ; ///< RCU deadlock checking policy + + typedef typename gc::scoped_lock rcu_lock ; ///< RCU scoped lock + static constexpr const bool c_bExtractLockExternal = base_class::c_bExtractLockExternal; ///< Group of \p extract_xxx functions do not require external locking + + //@cond + // Rebind traits (split-list support) + template + struct rebind_traits { + typedef MichaelList< + gc + , value_type + , typename cds::opt::make_options< traits, Options...>::type + > type; + }; + + // Stat selector + template + using select_stat_wrapper = typename base_class::template select_stat_wrapper< Stat >; + //@endcond + + protected: + //@cond + typedef typename base_class::value_type node_type; + typedef typename maker::cxx_allocator cxx_allocator; + typedef typename maker::node_deallocator node_deallocator; + typedef typename maker::intrusive_traits::compare intrusive_key_comparator; + + typedef typename base_class::atomic_node_ptr head_type; + + struct node_disposer { + void operator()( node_type * pNode ) + { + free_node( pNode ); + } + }; + typedef std::unique_ptr< node_type, node_disposer > scoped_node_ptr; + //@endcond + + private: + //@cond + struct raw_ptr_converter + { + value_type * operator()( node_type * p ) const + { + return p ? &p->m_Value : nullptr; + } + + value_type& operator()( node_type& n ) const + { + return n.m_Value; + } + + value_type const& operator()( node_type const& n ) const + { + return n.m_Value; + } + }; + //@endcond + + public: + ///< pointer to extracted node + using exempt_ptr = cds::urcu::exempt_ptr< gc, node_type, value_type, typename maker::intrusive_traits::disposer >; + + /// Result of \p get(), \p get_with() functions - pointer to the node found + typedef cds::urcu::raw_ptr_adaptor< value_type, typename base_class::raw_ptr, raw_ptr_converter > raw_ptr; + + protected: + //@cond + template + class iterator_type: protected base_class::template iterator_type + { + typedef typename base_class::template iterator_type iterator_base; + + iterator_type( head_type const& pNode ) + : iterator_base( pNode ) + {} + + friend class MichaelList; + + public: + typedef typename cds::details::make_const_type::pointer value_ptr; + typedef typename cds::details::make_const_type::reference value_ref; + + iterator_type() + {} + + iterator_type( iterator_type const& src ) + : iterator_base( src ) + {} + + value_ptr operator ->() const + { + typename iterator_base::value_ptr p = iterator_base::operator ->(); + return p ? &(p->m_Value) : nullptr; + } + + value_ref operator *() const + { + return (iterator_base::operator *()).m_Value; + } + + /// Pre-increment + iterator_type& operator ++() + { + iterator_base::operator ++(); + return *this; + } + + template + bool operator ==(iterator_type const& i ) const + { + return iterator_base::operator ==(i); + } + template + bool operator !=(iterator_type const& i ) const + { + return iterator_base::operator !=(i); + } + }; + //@endcond + + public: + ///@name Forward iterators (only for debugging purpose) + //@{ + /// Forward iterator + /** + You may safely use iterators in multi-threaded environment only under RCU lock. + Otherwise, a crash is possible if another thread deletes the item the iterator points to. + */ + typedef iterator_type iterator; + + /// Const forward iterator + typedef iterator_type const_iterator; + + /// Returns a forward iterator addressing the first element in a list + /** + For empty list \code begin() == end() \endcode + */ + iterator begin() + { + return iterator( head()); + } + + /// Returns an iterator that addresses the location succeeding the last element in a list + /** + Do not use the value returned by end function to access any item. + Internally, end returning value equals to \p nullptr. + + The returned value can be used only to control reaching the end of the list. + For empty list \code begin() == end() \endcode + */ + iterator end() + { + return iterator(); + } + + /// Returns a forward const iterator addressing the first element in a list + const_iterator begin() const + { + return const_iterator( head()); + } + + /// Returns a forward const iterator addressing the first element in a list + const_iterator cbegin() const + { + return const_iterator( head()); + } + + /// Returns an const iterator that addresses the location succeeding the last element in a list + const_iterator end() const + { + return const_iterator(); + } + + /// Returns an const iterator that addresses the location succeeding the last element in a list + const_iterator cend() const + { + return const_iterator(); + } + //@} + + public: + /// Default constructor + /** + Initialize empty list + */ + MichaelList() + {} + + //@cond + template >::value >> + explicit MichaelList( Stat& st ) + : base_class( st ) + {} + //@endcond + + /// List destructor + /** + Clears the list + */ + ~MichaelList() + { + clear(); + } + + /// Inserts new node + /** + The function creates a node with copy of \p val value + and then inserts the node created into the list. + + The type \p Q should contain as minimum the complete key of the node. + The object of \ref value_type should be constructible from \p val of type \p Q. + In trivial case, \p Q is equal to \ref value_type. + + The function makes RCU lock internally. + + Returns \p true if inserting successful, \p false otherwise. + */ + template + bool insert( Q&& val ) + { + return insert_at( head(), std::forward( val )); + } + + /// Inserts new node + /** + This function inserts new node with default-constructed value and then it calls + \p func functor with signature + \code void func( value_type& itemValue ) ;\endcode + + The argument \p itemValue of user-defined functor \p func is the reference + to the list's item inserted. User-defined functor \p func should guarantee that during changing + item's value no any other changes could be made on this list's item by concurrent threads. + + The type \p Q should contain the complete key of the node. + The object of \ref value_type should be constructible from \p key of type \p Q. + + The function allows to split creating of new item into two part: + - create item from \p key with initializing key-fields only; + - insert new item into the list; + - if inserting is successful, initialize non-key fields of item by calling \p f functor + + This can be useful if complete initialization of object of \p value_type is heavyweight and + it is preferable that the initialization should be completed only if inserting is successful. + + The function makes RCU lock internally. + + @warning See \ref cds_intrusive_item_creating "insert item troubleshooting" + */ + template + bool insert( Q&& key, Func func ) + { + return insert_at( head(), std::forward( key ), func ); + } + + /// Updates data by \p key + /** + The operation performs inserting or replacing the element with lock-free manner. + + If the \p key not found in the list, then the new item created from \p key + will be inserted iff \p bAllowInsert is \p true. + Otherwise, if \p key is found, the functor \p func is called with item found. + + The functor \p Func signature is: + \code + struct my_functor { + void operator()( bool bNew, value_type& item, Q const& val ); + }; + \endcode + + with arguments: + - \p bNew - \p true if the item has been inserted, \p false otherwise + - \p item - item of the list + - \p val - argument \p key passed into the \p %update() function + + The functor may change non-key fields of the \p item; however, \p func must guarantee + that during changing no any other modifications could be made on this item by concurrent threads. + + The function applies RCU lock internally. + + Returns std::pair where \p first is true if operation is successful, + \p second is true if new item has been added or \p false if the item with \p key + already exists. + + @warning See \ref cds_intrusive_item_creating "insert item troubleshooting" + */ + template + std::pair update( Q const& key, Func func, bool bAllowInsert = true ) + { + return update_at( head(), key, func, bAllowInsert ); + } + //@cond + template + CDS_DEPRECATED("ensure() is deprecated, use update()") + std::pair ensure( Q const& key, Func f ) + { + return update( key, f, true ); + } + //@endcond + + /// Inserts data of type \ref value_type constructed from \p args + /** + Returns \p true if inserting successful, \p false otherwise. + + The function makes RCU lock internally. + */ + template + bool emplace( Args&&... args ) + { + return emplace_at( head(), std::forward(args)... ); + } + + /// Deletes \p key from the list + /** \anchor cds_nonintrusive_MichealList_rcu_erase_val + Since the key of MichaelList's item type \p value_type is not explicitly specified, + template parameter \p Q defines the key type searching in the list. + The list item comparator should be able to compare values of the type \p value_type + and \p Q in any order. + + RCU \p synchronize method can be called. RCU should not be locked. + + Return \p true if key is found and deleted, \p false otherwise + */ + template + bool erase( Q const& key ) + { + return erase_at( head(), key, intrusive_key_comparator(), [](value_type const&){} ); + } + + /// Deletes the item from the list using \p pred predicate for searching + /** + The function is an analog of \ref cds_nonintrusive_MichealList_rcu_erase_val "erase(Q const&)" + but \p pred is used for key comparing. + \p Less functor has the interface like \p std::less. + \p pred must imply the same element order as the comparator used for building the list. + */ + template + bool erase_with( Q const& key, Less pred ) + { + CDS_UNUSED( pred ); + return erase_at( head(), key, typename maker::template less_wrapper::type(), [](value_type const&){} ); + } + + /// Deletes \p key from the list + /** \anchor cds_nonintrusive_MichaelList_rcu_erase_func + The function searches an item with key \p key, calls \p f functor with item found + and deletes it. If \p key is not found, the functor is not called. + + The functor \p Func interface: + \code + struct functor { + void operator()(const value_type& val) { ... } + }; + \endcode + + Since the key of MichaelList's item type \p value_type is not explicitly specified, + template parameter \p Q defines the key type searching in the list. + The list item comparator should be able to compare the values of type \p value_type + and \p Q in any order. + + RCU \p synchronize method can be called. RCU should not be locked. + + Return \p true if key is found and deleted, \p false otherwise + */ + template + bool erase( Q const& key, Func f ) + { + return erase_at( head(), key, intrusive_key_comparator(), f ); + } + + /// Deletes the item from the list using \p pred predicate for searching + /** + The function is an analog of \ref cds_nonintrusive_MichaelList_rcu_erase_func "erase(Q const&, Func)" + but \p pred is used for key comparing. + \p Less functor has the interface like \p std::less. + \p pred must imply the same element order as the comparator used for building the list. + */ + template + bool erase_with( Q const& key, Less pred, Func f ) + { + CDS_UNUSED( pred ); + return erase_at( head(), key, typename maker::template less_wrapper::type(), f ); + } + + /// Extracts an item from the list + /** + @anchor cds_nonintrusive_MichaelList_rcu_extract + The function searches an item with key equal to \p key in the list, + unlinks it from the list, and returns \ref cds::urcu::exempt_ptr "exempt_ptr" pointer to the item found. + If the item with the key equal to \p key is not found the function returns an empty \p exempt_ptr. + + @note The function does NOT dispose the item found. It just excludes the item from the list + and returns a pointer to the item. + You shouldn't lock RCU for current thread before calling this function. + + \code + #include + #include + + typedef cds::urcu::gc< general_buffered<> > rcu; + typedef cds::container::MichaelList< rcu, Foo > rcu_michael_list; + + rcu_michael_list theList; + // ... + + rcu_michael_list::exempt_ptr p; + + // The RCU should NOT be locked when extract() is called! + assert( !rcu::is_locked()); + + // extract() call + p = theList.extract( 10 ) + if ( p ) { + // do something with p + ... + } + + // we may safely release extracted pointer here. + // release() passes the pointer to RCU reclamation cycle. + p.release(); + \endcode + */ + template + exempt_ptr extract( Q const& key ) + { + return exempt_ptr( extract_at( head(), key, intrusive_key_comparator())); + } + + /// Extracts an item from the list using \p pred predicate for searching + /** + This function is the analog for \p extract(Q const&). + + The \p pred is a predicate used for key comparing. + \p Less has the interface like \p std::less. + \p pred must imply the same element order as \ref key_comparator. + */ + template + exempt_ptr extract_with( Q const& key, Less pred ) + { + CDS_UNUSED( pred ); + return exempt_ptr( extract_at( head(), key, typename maker::template less_wrapper::type())); + } + + /// Checks whether the list contains \p key + /** + The function searches the item with key equal to \p key + and returns \p true if it is found, and \p false otherwise. + + The function applies RCU lock internally. + */ + template + bool contains( Q const& key ) + { + return find_at( head(), key, intrusive_key_comparator()); + } + //@cond + template + CDS_DEPRECATED("deprecated, use contains()") + bool find( Q const& key ) + { + return contains( key ); + } + //@endcond + + /// Checks whether the list contains \p key using \p pred predicate for searching + /** + The function is an analog of contains( key ) but \p pred is used for key comparing. + \p Less functor has the interface like \p std::less. + \p pred must imply the same element order as the comparator used for building the list. + */ + template + bool contains( Q const& key, Less pred ) + { + CDS_UNUSED( pred ); + return find_at( head(), key, typename maker::template less_wrapper::type()); + } + //@cond + // Deprecatd, use contains() + template + bool find_with( Q const& key, Less pred ) + { + CDS_UNUSED( pred ); + return contains( key, pred ); + } + //@endcond + + /// Finds the key \p key and performs an action with it + /** \anchor cds_nonintrusive_MichaelList_rcu_find_func + The function searches an item with key equal to \p key and calls the functor \p f for the item found. + The interface of \p Func functor is: + \code + struct functor { + void operator()( value_type& item, Q& key ); + }; + \endcode + where \p item is the item found, \p key is the \p %find() function argument. + + The functor may change non-key fields of \p item. Note that the function is only guarantee + that \p item cannot be deleted during functor is executing. + The function does not serialize simultaneous access to the list \p item. If such access is + possible you must provide your own synchronization schema to exclude unsafe item modifications. + + The function makes RCU lock internally. + + The function returns \p true if \p val is found, \p false otherwise. + */ + template + bool find( Q& key, Func f ) + { + return find_at( head(), key, intrusive_key_comparator(), f ); + } + //@cond + template + bool find( Q const& key, Func f ) + { + return find_at( head(), key, intrusive_key_comparator(), f ); + } + //@endcond + + /// Finds the key \p key using \p pred predicate for searching + /** + The function is an analog of \ref cds_nonintrusive_MichaelList_rcu_find_func "find(Q&, Func)" + but \p pred is used for key comparing. + \p Less functor has the interface like \p std::less. + \p pred must imply the same element order as the comparator used for building the list. + */ + template + bool find_with( Q& key, Less pred, Func f ) + { + CDS_UNUSED( pred ); + return find_at( head(), key, typename maker::template less_wrapper::type(), f ); + } + //@cond + template + bool find_with( Q const& key, Less pred, Func f ) + { + CDS_UNUSED( pred ); + return find_at( head(), key, typename maker::template less_wrapper::type(), f ); + } + //@endcond + + /// Finds the key \p key and return the item found + /** \anchor cds_nonintrusive_MichaelList_rcu_get + The function searches the item with key equal to \p key and returns the pointer to item found. + If \p key is not found it returns an empty \p raw_ptr. + + Note the compare functor should accept a parameter of type \p Q that can be not the same as \p value_type. + + RCU should be locked before call of this function. + Returned item is valid only while RCU is locked: + \code + typedef cds::container::MichaelList< cds::urcu::gc< cds::urcu::general_buffered<> >, foo, my_traits > ord_list; + ord_list theList; + // ... + typename ord_list::raw_ptr rp; + { + // Lock RCU + ord_list::rcu_lock lock; + + rp = theList.get( 5 ); + if ( rp ) { + // Deal with rp + //... + } + // Unlock RCU by rcu_lock destructor + // A value owned by rp can be freed at any time after RCU has been unlocked + } + // You can manually release rp after RCU-locked section + rp.release(); + \endcode + */ + template + raw_ptr get( Q const& key ) + { + return get_at( head(), key, intrusive_key_comparator()); + } + + /// Finds \p key and return the item found + /** + The function is an analog of \ref cds_nonintrusive_MichaelList_rcu_get "get(Q const&)" + but \p pred is used for comparing the keys. + + \p Less functor has the semantics like \p std::less but should take arguments of type \ref value_type and \p Q + in any order. + \p pred must imply the same element order as the comparator used for building the list. + */ + template + raw_ptr get_with( Q const& key, Less pred ) + { + CDS_UNUSED( pred ); + return get_at( head(), key, typename maker::template less_wrapper::type()); + } + + /// Checks if the list is empty + bool empty() const + { + return base_class::empty(); + } + + /// Returns list's item count + /** + The value returned depends on item counter provided by \p Traits. For \p atomicity::empty_item_counter, + this function always returns 0. + + @note Even if you use real item counter and it returns 0, this fact does not mean that the list + is empty. To check list emptyness use \p empty() method. + */ + size_t size() const + { + return base_class::size(); + } + + /// Returns const reference to internal statistics + stat const& statistics() const + { + return base_class::statistics(); + } + + /// Clears the list + void clear() + { + base_class::clear(); + } + + protected: + //@cond + bool insert_node( node_type * pNode ) + { + return insert_node_at( head(), pNode ); + } + + bool insert_node_at( head_type& refHead, node_type * pNode ) + { + assert( pNode ); + scoped_node_ptr p(pNode); + if ( base_class::insert_at( refHead, *pNode )) { + p.release(); + return true; + } + + return false; + } + + template + bool insert_at( head_type& refHead, Q&& val ) + { + return insert_node_at( refHead, alloc_node( std::forward( val ))); + } + + template + bool insert_at( head_type& refHead, Q&& key, Func f ) + { + scoped_node_ptr pNode( alloc_node( std::forward( key ))); + + if ( base_class::insert_at( refHead, *pNode, [&f]( node_type& node ) { f( node_to_value(node)); } )) { + pNode.release(); + return true; + } + return false; + } + + template + bool emplace_at( head_type& refHead, Args&&... args ) + { + return insert_node_at( refHead, alloc_node( std::forward(args) ... )); + } + + template + bool erase_at( head_type& refHead, Q const& key, Compare cmp, Func f ) + { + return base_class::erase_at( refHead, key, cmp, [&f](node_type const& node){ f( node_to_value(node)); } ); + } + + template + std::pair update_at( head_type& refHead, Q const& key, Func f, bool bAllowInsert ) + { + scoped_node_ptr pNode( alloc_node( key )); + + std::pair ret = base_class::update_at( refHead, *pNode, + [&f, &key](bool bNew, node_type& node, node_type&){ f( bNew, node_to_value(node), key );}, + bAllowInsert ); + if ( ret.first && ret.second ) + pNode.release(); + + return ret; + } + + template + node_type * extract_at( head_type& refHead, Q const& key, Compare cmp ) + { + return base_class::extract_at( refHead, key, cmp ); + } + + template + bool find_at( head_type& refHead, Q const& key, Compare cmp ) + { + return base_class::find_at( refHead, key, cmp, [](node_type&, Q const &) {} ); + } + + template + bool find_at( head_type& refHead, Q& val, Compare cmp, Func f ) + { + return base_class::find_at( refHead, val, cmp, [&f](node_type& node, Q& v){ f( node_to_value(node), v ); }); + } + + template + raw_ptr get_at( head_type& refHead, Q const& val, Compare cmp ) + { + return raw_ptr( base_class::get_at( refHead, val, cmp )); + } + + static value_type& node_to_value( node_type& n ) + { + return n.m_Value; + } + static value_type const& node_to_value( node_type const& n ) + { + return n.m_Value; + } + + template + static node_type * alloc_node( Q&& v ) + { + return cxx_allocator().New( std::forward( v )); + } + + template + static node_type * alloc_node( Args&&... args ) + { + return cxx_allocator().MoveNew( std::forward( args )... ); + } + + static void free_node( node_type * pNode ) + { + cxx_allocator().Delete( pNode ); + } + + head_type& head() + { + return base_class::m_pHead; + } + + head_type& head() const + { + return const_cast(base_class::m_pHead); + } + //@endcond + }; + +}} // namespace cds::container + +#endif // #ifndef CDSLIB_CONTAINER_MICHAEL_LIST_RCU_H diff --git a/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/container/michael_map.h b/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/container/michael_map.h new file mode 100644 index 0000000..114158b --- /dev/null +++ b/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/container/michael_map.h @@ -0,0 +1,1007 @@ +/* + This file is a part of libcds - Concurrent Data Structures library + + (C) Copyright Maxim Khizhinsky (libcds.dev@gmail.com) 2006-2017 + + Source code repo: http://github.com/khizmax/libcds/ + Download: http://sourceforge.net/projects/libcds/files/ + + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. + + * Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ + +#ifndef CDSLIB_CONTAINER_MICHAEL_MAP_H +#define CDSLIB_CONTAINER_MICHAEL_MAP_H + +#include +#include +#include + +namespace cds { namespace container { + + /// Michael's hash map + /** @ingroup cds_nonintrusive_map + \anchor cds_nonintrusive_MichaelHashMap_hp + + Source: + - [2002] Maged Michael "High performance dynamic lock-free hash tables and list-based sets" + + Michael's hash table algorithm is based on lock-free ordered list and it is very simple. + The main structure is an array \p T of size \p M. Each element in \p T is basically a pointer + to a hash bucket, implemented as a singly linked list. The array of buckets cannot be dynamically expanded. + However, each bucket may contain unbounded number of items. + + Template parameters are: + - \p GC - Garbage collector used. You may use any \ref cds_garbage_collector "Garbage collector" + from the \p libcds library. + Note the \p GC must be the same as the GC used for \p OrderedList + - \p OrderedList - ordered key-value list implementation used as bucket for hash map, for example, \p MichaelKVList, + \p LazyKVList, \p IterableKVList. The ordered list implementation specifies the \p Key and \p Value types + stored in the hash-map, the reclamation schema \p GC used by hash-map, the comparison functor for the type \p Key + and other features specific for the ordered list. + - \p Traits - map traits, default is \p michael_map::traits. + Instead of defining \p Traits struct you may use option-based syntax with \p michael_map::make_traits metafunction. + + Many of the class function take a key argument of type \p K that in general is not \p key_type. + \p key_type and an argument of template type \p K must meet the following requirements: + - \p key_type should be constructible from value of type \p K; + - the hash functor should be able to calculate correct hash value from argument \p key of type \p K: + hash( key_type(key)) == hash( key ) + - values of type \p key_type and \p K should be comparable + + There are the specializations: + - for \ref cds_urcu_desc "RCU" - declared in cds/container/michael_map_rcu.h, + see \ref cds_nonintrusive_MichaelHashMap_rcu "MichaelHashMap". + - for \p cds::gc::nogc declared in cds/container/michael_map_nogc.h, + see \ref cds_nonintrusive_MichaelHashMap_nogc "MichaelHashMap". + + \anchor cds_nonintrusive_MichaelHashMap_how_touse + How to use + + Suppose, you want to make \p int to \p int map for Hazard Pointer garbage collector. You should + choose suitable ordered list class that will be used as a bucket for the map; it may be \p MichaelKVList. + \code + #include // MichaelKVList for gc::HP + #include // MichaelHashMap + + // List traits based on std::less predicate + struct list_traits: public cds::container::michael_list::traits + { + typedef std::less less; + }; + + // Ordered list + typedef cds::container::MichaelKVList< cds::gc::HP, int, int, list_traits> int2int_list; + + // Map traits + struct map_traits: public cds::container::michael_map::traits + { + struct hash { + size_t operator()( int i ) const + { + return cds::opt::v::hash()( i ); + } + } + }; + + // Your map + typedef cds::container::MichaelHashMap< cds::gc::HP, int2int_list, map_traits > int2int_map; + + // Now you can use int2int_map class + + int main() + { + int2int_map theMap; + + theMap.insert( 100 ); + ... + } + \endcode + + You may use option-based declaration: + \code + #include // MichaelKVList for gc::HP + #include // MichaelHashMap + + // Ordered list + typedef cds::container::MichaelKVList< cds::gc::HP, int, int, + typename cds::container::michael_list::make_traits< + cds::container::opt::less< std::less > // item comparator option + >::type + > int2int_list; + + // Map + typedef cds::container::MichaelHashMap< cds::gc::HP, int2int_list, + cds::container::michael_map::make_traits< + cc::opt::hash< cds::opt::v::hash > + > + > int2int_map; + \endcode + */ + template < + class GC, + class OrderedList, +#ifdef CDS_DOXYGEN_INVOKED + class Traits = michael_map::traits +#else + class Traits +#endif + > + class MichaelHashMap + { + public: + typedef GC gc; ///< Garbage collector + typedef OrderedList ordered_list; ///< type of ordered list to be used as a bucket + typedef Traits traits; ///< Map traits + + typedef typename ordered_list::key_type key_type; ///< key type + typedef typename ordered_list::mapped_type mapped_type; ///< value type + typedef typename ordered_list::value_type value_type; ///< key/value pair stored in the map + typedef typename traits::allocator allocator; ///< Bucket table allocator + + typedef typename ordered_list::key_comparator key_comparator; ///< key compare functor +#ifdef CDS_DOXYGEN_INVOKED + typedef typename ordered_list::stat stat; ///< Internal statistics + /// Guarded pointer - a result of \p get() and \p extract() functions + typedef typename ordered_list::guarded_ptr guarded_ptr; +#endif + + /// Hash functor for \ref key_type and all its derivatives that you use + typedef typename cds::opt::v::hash_selector< typename traits::hash >::type hash; + typedef typename traits::item_counter item_counter; ///< Item counter type + + // GC and OrderedList::gc must be the same + static_assert( std::is_same::value, "GC and OrderedList::gc must be the same"); + + static constexpr const size_t c_nHazardPtrCount = ordered_list::c_nHazardPtrCount; ///< Count of hazard pointer required + + //@cond + typedef typename ordered_list::template select_stat_wrapper< typename ordered_list::stat > bucket_stat; + + typedef typename ordered_list::template rebind_traits< + cds::opt::item_counter< cds::atomicity::empty_item_counter > + , cds::opt::stat< typename bucket_stat::wrapped_stat > + >::type internal_bucket_type; + + typedef typename internal_bucket_type::guarded_ptr guarded_ptr; + typedef typename allocator::template rebind< internal_bucket_type >::other bucket_table_allocator; + typedef typename bucket_stat::stat stat; + //@endcond + + protected: + //@cond + const size_t m_nHashBitmask; + internal_bucket_type* m_Buckets; ///< bucket table + hash m_HashFunctor; ///< Hash functor + item_counter m_ItemCounter; ///< Item counter + stat m_Stat; ///< Internal statistics + //@endcond + + protected: + //@cond + /// Forward iterator + template + class iterator_type: protected cds::intrusive::michael_set::details::iterator< internal_bucket_type, IsConst > + { + typedef cds::intrusive::michael_set::details::iterator< internal_bucket_type, IsConst > base_class; + friend class MichaelHashMap; + + protected: + typedef typename base_class::bucket_ptr bucket_ptr; + typedef typename base_class::list_iterator list_iterator; + + public: + /// Value pointer type (const for const_iterator) + typedef typename cds::details::make_const_type::pointer value_ptr; + /// Value reference type (const for const_iterator) + typedef typename cds::details::make_const_type::reference value_ref; + + /// Key-value pair pointer type (const for const_iterator) + typedef typename cds::details::make_const_type::pointer pair_ptr; + /// Key-value pair reference type (const for const_iterator) + typedef typename cds::details::make_const_type::reference pair_ref; + + protected: + iterator_type( list_iterator const& it, bucket_ptr pFirst, bucket_ptr pLast ) + : base_class( it, pFirst, pLast ) + {} + + public: + /// Default ctor + iterator_type() + : base_class() + {} + + /// Copy ctor + iterator_type( const iterator_type& src ) + : base_class( src ) + {} + + /// Dereference operator + pair_ptr operator ->() const + { + assert( base_class::m_pCurBucket != nullptr ); + return base_class::m_itList.operator ->(); + } + + /// Dereference operator + pair_ref operator *() const + { + assert( base_class::m_pCurBucket != nullptr ); + return base_class::m_itList.operator *(); + } + + /// Pre-increment + iterator_type& operator ++() + { + base_class::operator++(); + return *this; + } + + /// Assignment operator + iterator_type& operator = (const iterator_type& src) + { + base_class::operator =(src); + return *this; + } + + /// Returns current bucket (debug function) + bucket_ptr bucket() const + { + return base_class::bucket(); + } + + /// Equality operator + template + bool operator ==(iterator_type const& i ) const + { + return base_class::operator ==( i ); + } + /// Equality operator + template + bool operator !=(iterator_type const& i ) const + { + return !( *this == i ); + } + }; + //@endcond + + public: + ///@name Forward iterators (only for debugging purpose) + //@{ + /// Forward iterator + /** + The forward iterator for Michael's map has some features: + - it has no post-increment operator + - to protect the value, the iterator contains a GC-specific guard + another guard is required locally for increment operator. + For some GC (like as \p gc::HP), a guard is a limited resource per thread, so an exception (or assertion) "no free guard" + may be thrown if the limit of guard count per thread is exceeded. + - The iterator cannot be moved across thread boundary because it contains thread-private GC's guard. + + Iterator thread safety depends on type of \p OrderedList: + - for \p MichaelKVList and \p LazyKVList: iterator guarantees safety even if you delete the item that iterator points to + because that item is guarded by hazard pointer. + However, in case of concurrent deleting operations it is no guarantee that you iterate all item in the map. + Moreover, a crash is possible when you try to iterate the next element that has been deleted by concurrent thread. + Use this iterator on the concurrent container for debugging purpose only. + - for \p IterableList: iterator is thread-safe. You may use it freely in concurrent environment. + + The iterator interface: + \code + class iterator { + public: + // Default constructor + iterator(); + + // Copy construtor + iterator( iterator const& src ); + + // Dereference operator + value_type * operator ->() const; + + // Dereference operator + value_type& operator *() const; + + // Preincrement operator + iterator& operator ++(); + + // Assignment operator + iterator& operator = (iterator const& src); + + // Equality operators + bool operator ==(iterator const& i ) const; + bool operator !=(iterator const& i ) const; + }; + \endcode + + @note The iterator object returned by \p end(), \p cend() member functions points to \p nullptr and should not be dereferenced. + */ + typedef iterator_type< false > iterator; + + /// Const forward iterator + typedef iterator_type< true > const_iterator; + + /// Returns a forward iterator addressing the first element in a map + /** + For empty map \code begin() == end() \endcode + */ + iterator begin() + { + return iterator( bucket_begin()->begin(), bucket_begin(), bucket_end()); + } + + /// Returns an iterator that addresses the location succeeding the last element in a map + /** + Do not use the value returned by end function to access any item. + The returned value can be used only to control reaching the end of the map. + For empty map \code begin() == end() \endcode + */ + iterator end() + { + return iterator( bucket_end()[-1].end(), bucket_end() - 1, bucket_end()); + } + + /// Returns a forward const iterator addressing the first element in a map + const_iterator begin() const + { + return get_const_begin(); + } + /// Returns a forward const iterator addressing the first element in a map + const_iterator cbegin() const + { + return get_const_begin(); + } + + /// Returns an const iterator that addresses the location succeeding the last element in a map + const_iterator end() const + { + return get_const_end(); + } + /// Returns an const iterator that addresses the location succeeding the last element in a map + const_iterator cend() const + { + return get_const_end(); + } + //@} + + public: + /// Initializes the map + /** @anchor cds_nonintrusive_MichaelHashMap_hp_ctor + The Michael's hash map is non-expandable container. You should point the average count of items \p nMaxItemCount + when you create an object. + \p nLoadFactor parameter defines average count of items per bucket and it should be small number between 1 and 10. + Remember, since the bucket implementation is an ordered list, searching in the bucket is linear [O(nLoadFactor)]. + Note, that many popular STL hash map implementation uses load factor 1. + + The ctor defines hash table size as rounding nMacItemCount / nLoadFactor up to nearest power of two. + */ + MichaelHashMap( + size_t nMaxItemCount, ///< estimation of max item count in the hash map + size_t nLoadFactor ///< load factor: estimation of max number of items in the bucket + ) + : m_nHashBitmask( michael_map::details::init_hash_bitmask( nMaxItemCount, nLoadFactor )) + , m_Buckets( bucket_table_allocator().allocate( bucket_count())) + { + for ( auto it = m_Buckets, itEnd = m_Buckets + bucket_count(); it != itEnd; ++it ) + construct_bucket( it ); + } + + /// Clears hash map and destroys it + ~MichaelHashMap() + { + clear(); + + for ( auto it = m_Buckets, itEnd = m_Buckets + bucket_count(); it != itEnd; ++it ) + it->~internal_bucket_type(); + bucket_table_allocator().deallocate( m_Buckets, bucket_count()); + } + + /// Inserts new node with key and default value + /** + The function creates a node with \p key and default value, and then inserts the node created into the map. + + Preconditions: + - The \p key_type should be constructible from value of type \p K. + In trivial case, \p K is equal to \p key_type. + - The \p mapped_type should be default-constructible. + + Returns \p true if inserting successful, \p false otherwise. + */ + template + bool insert( K&& key ) + { + const bool bRet = bucket( key ).insert( std::forward( key )); + if ( bRet ) + ++m_ItemCounter; + return bRet; + } + + /// Inserts new node + /** + The function creates a node with copy of \p val value + and then inserts the node created into the map. + + Preconditions: + - The \p key_type should be constructible from \p key of type \p K. + - The \p mapped_type should be constructible from \p val of type \p V. + + Returns \p true if \p val is inserted into the map, \p false otherwise. + */ + template + bool insert( K&& key, V&& val ) + { + const bool bRet = bucket( key ).insert( std::forward( key ), std::forward( val )); + if ( bRet ) + ++m_ItemCounter; + return bRet; + } + + /// Inserts new node and initialize it by a functor + /** + This function inserts new node with key \p key and if inserting is successful then it calls + \p func functor with signature + \code + struct functor { + void operator()( value_type& item ); + }; + \endcode + + The argument \p item of user-defined functor \p func is the reference + to the map's item inserted: + - item.first is a const reference to item's key that cannot be changed. + - item.second is a reference to item's value that may be changed. + + The user-defined functor is called only if inserting is successful. + + The \p key_type should be constructible from value of type \p K. + + The function allows to split creating of new item into two part: + - create item from \p key; + - insert new item into the map; + - if inserting is successful, initialize the value of item by calling \p func functor + + This can be useful if complete initialization of object of \p mapped_type is heavyweight and + it is preferable that the initialization should be completed only if inserting is successful. + + @warning For \ref cds_nonintrusive_MichaelKVList_gc "MichaelKVList" as the bucket see \ref cds_intrusive_item_creating "insert item troubleshooting". + \ref cds_nonintrusive_LazyKVList_gc "LazyKVList" provides exclusive access to inserted item and does not require any node-level + synchronization. + */ + template + bool insert_with( K&& key, Func func ) + { + const bool bRet = bucket( key ).insert_with( std::forward( key ), func ); + if ( bRet ) + ++m_ItemCounter; + return bRet; + } + + /// Updates data by \p key + /** + The operation performs inserting or replacing the element with lock-free manner. + + If the \p key not found in the map, then the new item created from \p key + will be inserted into the map iff \p bAllowInsert is \p true. + (note that in this case the \ref key_type should be constructible from type \p K). + Otherwise, if \p key is found, the functor \p func is called with item found. + + The functor \p func signature depends on \p OrderedList: + + for \p MichaelKVList, \p LazyKVList + \code + struct my_functor { + void operator()( bool bNew, value_type& item ); + }; + \endcode + with arguments: + - \p bNew - \p true if the item has been inserted, \p false otherwise + - \p item - the item found or inserted + + The functor may change any fields of the \p item.second that is \p mapped_type. + + for \p IterableKVList + \code + void func( value_type& val, value_type * old ); + \endcode + where + - \p val - a new data constructed from \p key + - \p old - old value that will be retired. If new item has been inserted then \p old is \p nullptr. + + The functor may change non-key fields of \p val; however, \p func must guarantee + that during changing no any other modifications could be made on this item by concurrent threads. + + @return std::pair where \p first is true if operation is successful, + \p second is true if new item has been added or \p false if the item with \p key + already exists. + + @warning For \ref cds_nonintrusive_MichaelKVList_gc "MichaelKVList" and \ref cds_nonintrusive_IterableKVList_gc "IterableKVList" + as the bucket see \ref cds_intrusive_item_creating "insert item troubleshooting". + \ref cds_nonintrusive_LazyKVList_gc "LazyKVList" provides exclusive access to inserted item and does not require any node-level + synchronization. + */ + template + std::pair update( K&& key, Func func, bool bAllowInsert = true ) + { + std::pair bRet = bucket( key ).update( std::forward( key ), func, bAllowInsert ); + if ( bRet.first && bRet.second ) + ++m_ItemCounter; + return bRet; + } + //@cond + template + CDS_DEPRECATED("ensure() is deprecated, use update()") + std::pair ensure( K const& key, Func func ) + { + std::pair bRet = bucket( key ).update( key, func, true ); + if ( bRet.first && bRet.second ) + ++m_ItemCounter; + return bRet; + } + //@endcond + + /// Inserts or updates the node (only for \p IterableKVList) + /** + The operation performs inserting or changing data with lock-free manner. + + If \p key is not found in the map, then \p key is inserted iff \p bAllowInsert is \p true. + Otherwise, the current element is changed to \p val, the old element will be retired later. + + Returns std::pair where \p first is \p true if operation is successful, + \p second is \p true if \p val has been added or \p false if the item with that key + already in the map. + */ + template +#ifdef CDS_DOXYGEN_INVOKED + std::pair +#else + typename std::enable_if< + std::is_same< Q, Q>::value && is_iterable_list< ordered_list >::value, + std::pair + >::type +#endif + upsert( Q&& key, V&& val, bool bAllowInsert = true ) + { + std::pair bRet = bucket( val ).upsert( std::forward( key ), std::forward( val ), bAllowInsert ); + if ( bRet.second ) + ++m_ItemCounter; + return bRet; + } + + /// For key \p key inserts data of type \p mapped_type created from \p args + /** + \p key_type should be constructible from type \p K + + Returns \p true if inserting successful, \p false otherwise. + */ + template + bool emplace( K&& key, Args&&... args ) + { + const bool bRet = bucket( key ).emplace( std::forward(key), std::forward(args)... ); + if ( bRet ) + ++m_ItemCounter; + return bRet; + } + + /// Deletes \p key from the map + /** \anchor cds_nonintrusive_MichaelMap_erase_val + + Return \p true if \p key is found and deleted, \p false otherwise + */ + template + bool erase( K const& key ) + { + const bool bRet = bucket( key ).erase( key ); + if ( bRet ) + --m_ItemCounter; + return bRet; + } + + /// Deletes the item from the map using \p pred predicate for searching + /** + The function is an analog of \ref cds_nonintrusive_MichaelMap_erase_val "erase(K const&)" + but \p pred is used for key comparing. + \p Less functor has the interface like \p std::less. + \p Less must imply the same element order as the comparator used for building the map. + */ + template + bool erase_with( K const& key, Less pred ) + { + const bool bRet = bucket( key ).erase_with( key, pred ); + if ( bRet ) + --m_ItemCounter; + return bRet; + } + + /// Deletes \p key from the map + /** \anchor cds_nonintrusive_MichaelMap_erase_func + + The function searches an item with key \p key, calls \p f functor + and deletes the item. If \p key is not found, the functor is not called. + + The functor \p Func interface: + \code + struct extractor { + void operator()(value_type& item) { ... } + }; + \endcode + + Return \p true if key is found and deleted, \p false otherwise + */ + template + bool erase( K const& key, Func f ) + { + const bool bRet = bucket( key ).erase( key, f ); + if ( bRet ) + --m_ItemCounter; + return bRet; + } + + /// Deletes the item from the map using \p pred predicate for searching + /** + The function is an analog of \ref cds_nonintrusive_MichaelMap_erase_func "erase(K const&, Func)" + but \p pred is used for key comparing. + \p Less functor has the interface like \p std::less. + \p Less must imply the same element order as the comparator used for building the map. + */ + template + bool erase_with( K const& key, Less pred, Func f ) + { + const bool bRet = bucket( key ).erase_with( key, pred, f ); + if ( bRet ) + --m_ItemCounter; + return bRet; + } + + /// Deletes the item pointed by iterator \p iter (only for \p IterableList based map) + /** + Returns \p true if the operation is successful, \p false otherwise. + The function can return \p false if the node the iterator points to has already been deleted + by other thread. + + The function does not invalidate the iterator, it remains valid and can be used for further traversing. + + @note \p %erase_at() is supported only for \p %MichaelHashMap based on \p IterableList. + */ +#ifdef CDS_DOXYGEN_INVOKED + bool erase_at( iterator const& iter ) +#else + template + typename std::enable_if< std::is_same::value && is_iterable_list< ordered_list >::value, bool >::type + erase_at( Iterator const& iter ) +#endif + { + assert( iter != end()); + assert( iter.bucket() != nullptr ); + + if ( iter.bucket()->erase_at( iter.underlying_iterator())) { + --m_ItemCounter; + return true; + } + return false; + } + + /// Extracts the item with specified \p key + /** \anchor cds_nonintrusive_MichaelHashMap_hp_extract + The function searches an item with key equal to \p key, + unlinks it from the map, and returns it as \p guarded_ptr. + If \p key is not found the function returns an empty guarded pointer. + + Note the compare functor should accept a parameter of type \p K that may be not the same as \p key_type. + + The extracted item is freed automatically when returned \p guarded_ptr object will be destroyed or released. + @note Each \p guarded_ptr object uses the GC's guard that can be limited resource. + + Usage: + \code + typedef cds::container::MichaelHashMap< your_template_args > michael_map; + michael_map theMap; + // ... + { + michael_map::guarded_ptr gp( theMap.extract( 5 )); + if ( gp ) { + // Deal with gp + // ... + } + // Destructor of gp releases internal HP guard + } + \endcode + */ + template + guarded_ptr extract( K const& key ) + { + guarded_ptr gp( bucket( key ).extract( key )); + if ( gp ) + --m_ItemCounter; + return gp; + } + + /// Extracts the item using compare functor \p pred + /** + The function is an analog of \ref cds_nonintrusive_MichaelHashMap_hp_extract "extract(K const&)" + but \p pred predicate is used for key comparing. + + \p Less functor has the semantics like \p std::less but should take arguments of type \p key_type and \p K + in any order. + \p pred must imply the same element order as the comparator used for building the map. + */ + template + guarded_ptr extract_with( K const& key, Less pred ) + { + guarded_ptr gp( bucket( key ).extract_with( key, pred )); + if ( gp ) + --m_ItemCounter; + return gp; + } + + /// Finds the key \p key + /** \anchor cds_nonintrusive_MichaelMap_find_cfunc + + The function searches the item with key equal to \p key and calls the functor \p f for item found. + The interface of \p Func functor is: + \code + struct functor { + void operator()( value_type& item ); + }; + \endcode + where \p item is the item found. + + The functor may change \p item.second. Note that the functor is only guarantee + that \p item cannot be disposed during functor is executing. + The functor does not serialize simultaneous access to the map's \p item. If such access is + possible you must provide your own synchronization schema on item level to exclude unsafe item modifications. + + The function returns \p true if \p key is found, \p false otherwise. + */ + template + bool find( K const& key, Func f ) + { + return bucket( key ).find( key, f ); + } + + /// Finds \p key and returns iterator pointed to the item found (only for \p IterableList) + /** + If \p key is not found the function returns \p end(). + + @note This function is supported only for map based on \p IterableList + */ + template +#ifdef CDS_DOXYGEN_INVOKED + iterator +#else + typename std::enable_if< std::is_same::value && is_iterable_list< ordered_list >::value, iterator >::type +#endif + find( K const& key ) + { + auto& b = bucket( key ); + auto it = b.find( key ); + if ( it == b.end()) + return end(); + return iterator( it, &b, bucket_end()); + } + + + /// Finds the key \p val using \p pred predicate for searching + /** + The function is an analog of \ref cds_nonintrusive_MichaelMap_find_cfunc "find(K const&, Func)" + but \p pred is used for key comparing. + \p Less functor has the interface like \p std::less. + \p Less must imply the same element order as the comparator used for building the map. + */ + template + bool find_with( K const& key, Less pred, Func f ) + { + return bucket( key ).find_with( key, pred, f ); + } + + /// Finds \p key using \p pred predicate and returns iterator pointed to the item found (only for \p IterableList) + /** + The function is an analog of \p find(K&) but \p pred is used for key comparing. + \p Less functor has the interface like \p std::less. + \p pred must imply the same element order as the comparator used for building the set. + + If \p key is not found the function returns \p end(). + + @note This function is supported only for map based on \p IterableList + */ + template +#ifdef CDS_DOXYGEN_INVOKED + iterator +#else + typename std::enable_if< std::is_same::value && is_iterable_list< ordered_list >::value, iterator >::type +#endif + find_with( K const& key, Less pred ) + { + auto& b = bucket( key ); + auto it = b.find_with( key, pred ); + if ( it == b.end()) + return end(); + return iterator( it, &b, bucket_end()); + } + + /// Checks whether the map contains \p key + /** + The function searches the item with key equal to \p key + and returns \p true if it is found, and \p false otherwise. + */ + template + bool contains( K const& key ) + { + return bucket( key ).contains( key ); + } + + /// Checks whether the map contains \p key using \p pred predicate for searching + /** + The function is an analog of contains( key ) but \p pred is used for key comparing. + \p Less functor has the interface like \p std::less. + \p Less must imply the same element order as the comparator used for building the map. + */ + template + bool contains( K const& key, Less pred ) + { + return bucket( key ).contains( key, pred ); + } + + /// Finds \p key and return the item found + /** \anchor cds_nonintrusive_MichaelHashMap_hp_get + The function searches the item with key equal to \p key + and returns the guarded pointer to the item found. + If \p key is not found the function returns an empty guarded pointer, + + @note Each \p guarded_ptr object uses one GC's guard which can be limited resource. + + Usage: + \code + typedef cds::container::MichaeHashMap< your_template_params > michael_map; + michael_map theMap; + // ... + { + michael_map::guarded_ptr gp( theMap.get( 5 )); + if ( gp ) { + // Deal with gp + //... + } + // Destructor of guarded_ptr releases internal HP guard + } + \endcode + + Note the compare functor specified for \p OrderedList template parameter + should accept a parameter of type \p K that can be not the same as \p key_type. + */ + template + guarded_ptr get( K const& key ) + { + return bucket( key ).get( key ); + } + + /// Finds \p key and return the item found + /** + The function is an analog of \ref cds_nonintrusive_MichaelHashMap_hp_get "get( K const&)" + but \p pred is used for comparing the keys. + + \p Less functor has the semantics like \p std::less but should take arguments of type \p key_type and \p K + in any order. + \p pred must imply the same element order as the comparator used for building the map. + */ + template + guarded_ptr get_with( K const& key, Less pred ) + { + return bucket( key ).get_with( key, pred ); + } + + /// Clears the map (not atomic) + void clear() + { + for ( size_t i = 0; i < bucket_count(); ++i ) + m_Buckets[i].clear(); + m_ItemCounter.reset(); + } + + /// Checks if the map is empty + /** + @warning If you use \p atomicity::empty_item_counter in \p traits::item_counter, + the function always returns \p true. + */ + bool empty() const + { + return size() == 0; + } + + /// Returns item count in the map + /** + If you use \p atomicity::empty_item_counter in \p traits::item_counter, + the function always returns 0. + */ + size_t size() const + { + return m_ItemCounter; + } + + /// Returns the size of hash table + /** + Since \p %MichaelHashMap cannot dynamically extend the hash table size, + the value returned is an constant depending on object initialization parameters; + see \p MichaelHashMap::MichaelHashMap for explanation. + */ + size_t bucket_count() const + { + return m_nHashBitmask + 1; + } + + /// Returns const reference to internal statistics + stat const& statistics() const + { + return m_Stat; + } + + protected: + //@cond + /// Calculates hash value of \p key + template + size_t hash_value( Q const& key ) const + { + return m_HashFunctor( key ) & m_nHashBitmask; + } + + /// Returns the bucket (ordered list) for \p key + template + internal_bucket_type& bucket( Q const& key ) + { + return m_Buckets[hash_value( key )]; + } + //@endcond + + private: + //@cond + internal_bucket_type* bucket_begin() const + { + return m_Buckets; + } + + internal_bucket_type* bucket_end() const + { + return m_Buckets + bucket_count(); + } + + const_iterator get_const_begin() const + { + return const_iterator( bucket_begin()->cbegin(), bucket_begin(), bucket_end()); + } + const_iterator get_const_end() const + { + return const_iterator( (bucket_end() - 1)->cend(), bucket_end() - 1, bucket_end()); + } + + template + typename std::enable_if< Stat::empty >::type construct_bucket( internal_bucket_type* b ) + { + new (b) internal_bucket_type; + } + + template + typename std::enable_if< !Stat::empty >::type construct_bucket( internal_bucket_type* b ) + { + new (b) internal_bucket_type( m_Stat ); + } + //@endcond + }; +}} // namespace cds::container + +#endif // ifndef CDSLIB_CONTAINER_MICHAEL_MAP_H diff --git a/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/container/michael_map_nogc.h b/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/container/michael_map_nogc.h new file mode 100644 index 0000000..610eef3 --- /dev/null +++ b/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/container/michael_map_nogc.h @@ -0,0 +1,607 @@ +/* + This file is a part of libcds - Concurrent Data Structures library + + (C) Copyright Maxim Khizhinsky (libcds.dev@gmail.com) 2006-2017 + + Source code repo: http://github.com/khizmax/libcds/ + Download: http://sourceforge.net/projects/libcds/files/ + + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. + + * Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ + +#ifndef CDSLIB_CONTAINER_MICHAEL_MAP_NOGC_H +#define CDSLIB_CONTAINER_MICHAEL_MAP_NOGC_H + +#include +#include +#include + +namespace cds { namespace container { + + /// Michael's hash map (template specialization for \p cds::gc::nogc) + /** @ingroup cds_nonintrusive_map + \anchor cds_nonintrusive_MichaelHashMap_nogc + + This specialization is so-called append-only when no item + reclamation may be performed. The class does not support deleting of map item. + + See @ref cds_nonintrusive_MichaelHashMap_hp "MichaelHashMap" for description of template parameters. + */ + template < + class OrderedList, +#ifdef CDS_DOXYGEN_INVOKED + class Traits = michael_map::traits +#else + class Traits +#endif + > + class MichaelHashMap + { + public: + typedef cds::gc::nogc gc; ///< No garbage collector + typedef OrderedList ordered_list; ///< type of ordered list used as a bucket implementation + typedef Traits traits; ///< Map traits + + typedef typename ordered_list::key_type key_type; ///< key type + typedef typename ordered_list::mapped_type mapped_type; ///< type of value to be stored in the map + typedef typename ordered_list::value_type value_type; ///< Pair used as the some functor's argument + + typedef typename ordered_list::key_comparator key_comparator; ///< key comparing functor + + /// Hash functor for \ref key_type and all its derivatives that you use + typedef typename cds::opt::v::hash_selector< typename traits::hash >::type hash; + typedef typename traits::item_counter item_counter; ///< Item counter type + typedef typename traits::allocator allocator; ///< Bucket table allocator + +#ifdef CDS_DOXYGEN_INVOKED + typedef typename ordered_list::stat stat; ///< Internal statistics +#endif + + // GC and OrderedList::gc must be the same + static_assert(std::is_same::value, "GC and OrderedList::gc must be the same"); + + protected: + //@cond + typedef typename ordered_list::template select_stat_wrapper< typename ordered_list::stat > bucket_stat; + + typedef typename ordered_list::template rebind_traits< + cds::opt::item_counter< cds::atomicity::empty_item_counter > + , cds::opt::stat< typename bucket_stat::wrapped_stat > + >::type internal_bucket_type; + + /// Bucket table allocator + typedef typename allocator::template rebind< internal_bucket_type >::other bucket_table_allocator; + + typedef typename internal_bucket_type::iterator bucket_iterator; + typedef typename internal_bucket_type::const_iterator bucket_const_iterator; + //@endcond + + public: + //@cond + typedef typename bucket_stat::stat stat; + //@endcond + + protected: + //@cond + const size_t m_nHashBitmask; + hash m_HashFunctor; ///< Hash functor + internal_bucket_type* m_Buckets; ///< bucket table + item_counter m_ItemCounter; ///< Item counter + stat m_Stat; ///< Internal statistics + //@endcond + + protected: + //@cond + template + class iterator_type: private cds::intrusive::michael_set::details::iterator< internal_bucket_type, IsConst > + { + typedef cds::intrusive::michael_set::details::iterator< internal_bucket_type, IsConst > base_class; + friend class MichaelHashMap; + + protected: + typedef typename base_class::bucket_ptr bucket_ptr; + typedef typename base_class::list_iterator list_iterator; + + public: + /// Value pointer type (const for const_iterator) + typedef typename cds::details::make_const_type::pointer value_ptr; + /// Value reference type (const for const_iterator) + typedef typename cds::details::make_const_type::reference value_ref; + + /// Key-value pair pointer type (const for const_iterator) + typedef typename cds::details::make_const_type::pointer pair_ptr; + /// Key-value pair reference type (const for const_iterator) + typedef typename cds::details::make_const_type::reference pair_ref; + + protected: + iterator_type( list_iterator const& it, bucket_ptr pFirst, bucket_ptr pLast ) + : base_class( it, pFirst, pLast ) + {} + + public: + /// Default ctor + iterator_type() + : base_class() + {} + + /// Copy ctor + iterator_type( const iterator_type& src ) + : base_class( src ) + {} + + /// Dereference operator + pair_ptr operator ->() const + { + assert( base_class::m_pCurBucket != nullptr ); + return base_class::m_itList.operator ->(); + } + + /// Dereference operator + pair_ref operator *() const + { + assert( base_class::m_pCurBucket != nullptr ); + return base_class::m_itList.operator *(); + } + + /// Pre-increment + iterator_type& operator ++() + { + base_class::operator++(); + return *this; + } + + /// Assignment operator + iterator_type& operator = (const iterator_type& src) + { + base_class::operator =(src); + return *this; + } + + /// Returns current bucket (debug function) + bucket_ptr bucket() const + { + return base_class::bucket(); + } + + /// Equality operator + template + bool operator ==(iterator_type const& i ) const + { + return base_class::operator ==( i ); + } + /// Equality operator + template + bool operator !=(iterator_type const& i ) const + { + return !( *this == i ); + } + }; + //@endcond + + public: + ///@name Forward iterators + //@{ + /// Forward iterator + /** + The forward iterator for Michael's map is based on \p OrderedList forward iterator and has some features: + - it has no post-increment operator + - it iterates items in unordered fashion + + The iterator interface: + \code + class iterator { + public: + // Default constructor + iterator(); + + // Copy construtor + iterator( iterator const& src ); + + // Dereference operator + value_type * operator ->() const; + + // Dereference operator + value_type& operator *() const; + + // Preincrement operator + iterator& operator ++(); + + // Assignment operator + iterator& operator = (iterator const& src); + + // Equality operators + bool operator ==(iterator const& i ) const; + bool operator !=(iterator const& i ) const; + }; + \endcode + */ + typedef iterator_type< false > iterator; + + /// Const forward iterator + typedef iterator_type< true > const_iterator; + + /// Returns a forward iterator addressing the first element in a set + /** + For empty set \code begin() == end() \endcode + */ + iterator begin() + { + return iterator( m_Buckets[0].begin(), m_Buckets, m_Buckets + bucket_count()); + } + + /// Returns an iterator that addresses the location succeeding the last element in a set + /** + Do not use the value returned by end function to access any item. + The returned value can be used only to control reaching the end of the set. + For empty set \code begin() == end() \endcode + */ + iterator end() + { + return iterator( m_Buckets[bucket_count() - 1].end(), m_Buckets + bucket_count() - 1, m_Buckets + bucket_count()); + } + + /// Returns a forward const iterator addressing the first element in a set + const_iterator begin() const + { + return get_const_begin(); + } + + /// Returns a forward const iterator addressing the first element in a set + const_iterator cbegin() const + { + return get_const_begin(); + } + + /// Returns an const iterator that addresses the location succeeding the last element in a set + const_iterator end() const + { + return get_const_end(); + } + + /// Returns an const iterator that addresses the location succeeding the last element in a set + const_iterator cend() const + { + return get_const_end(); + } + //@} + + public: + /// Initialize the map + /** + The Michael's hash map is non-expandable container. You should point the average count of items \p nMaxItemCount + when you create an object. + \p nLoadFactor parameter defines average count of items per bucket and it should be small number between 1 and 10. + Remember, since the bucket implementation is an ordered list, searching in the bucket is linear [O(nLoadFactor)]. + Note, that many popular STL hash map implementation uses load factor 1. + + The ctor defines hash table size as rounding nMacItemCount / nLoadFactor up to nearest power of two. + */ + MichaelHashMap( + size_t nMaxItemCount, ///< estimation of max item count in the hash set + size_t nLoadFactor ///< load factor: estimation of max number of items in the bucket + ) : m_nHashBitmask( michael_map::details::init_hash_bitmask( nMaxItemCount, nLoadFactor )) + , m_Buckets( bucket_table_allocator().allocate( bucket_count())) + { + for ( auto it = m_Buckets, itEnd = m_Buckets + bucket_count(); it != itEnd; ++it ) + construct_bucket( it ); + } + + /// Clears hash set and destroys it + ~MichaelHashMap() + { + clear(); + for ( auto it = m_Buckets, itEnd = m_Buckets + bucket_count(); it != itEnd; ++it ) + it->~internal_bucket_type(); + bucket_table_allocator().deallocate( m_Buckets, bucket_count()); + } + + /// Inserts new node with key and default value + /** + The function creates a node with \p key and default value, and then inserts the node created into the map. + + Preconditions: + - The \ref key_type should be constructible from value of type \p K. + In trivial case, \p K is equal to \ref key_type. + - The \ref mapped_type should be default-constructible. + + Returns an iterator pointed to inserted value, or \p end() if inserting is failed + */ + template + iterator insert( const K& key ) + { + internal_bucket_type& refBucket = bucket( key ); + bucket_iterator it = refBucket.insert( key ); + + if ( it != refBucket.end()) { + ++m_ItemCounter; + return iterator( it, &refBucket, m_Buckets + bucket_count()); + } + + return end(); + } + + /// Inserts new node + /** + The function creates a node with copy of \p val value + and then inserts the node created into the map. + + Preconditions: + - The \ref key_type should be constructible from \p key of type \p K. + - The \ref mapped_type should be constructible from \p val of type \p V. + + Returns an iterator pointed to inserted value, or \p end() if inserting is failed + */ + template + iterator insert( K const& key, V const& val ) + { + internal_bucket_type& refBucket = bucket( key ); + bucket_iterator it = refBucket.insert( key, val ); + + if ( it != refBucket.end()) { + ++m_ItemCounter; + return iterator( it, &refBucket, m_Buckets + bucket_count()); + } + + return end(); + } + + /// Inserts new node and initialize it by a functor + /** + This function inserts new node with key \p key and if inserting is successful then it calls + \p func functor with signature + \code + struct functor { + void operator()( value_type& item ); + }; + \endcode + + The argument \p item of user-defined functor \p func is the reference + to the map's item inserted. item.second is a reference to item's value that may be changed. + + The user-defined functor it is called only if the inserting is successful. + The \p key_type should be constructible from value of type \p K. + + The function allows to split creating of new item into two part: + - create item from \p key; + - insert new item into the map; + - if inserting is successful, initialize the value of item by calling \p f functor + + This can be useful if complete initialization of object of \p mapped_type is heavyweight and + it is preferable that the initialization should be completed only if inserting is successful. + + Returns an iterator pointed to inserted value, or \p end() if inserting is failed + + @warning For \ref cds_nonintrusive_MichaelKVList_nogc "MichaelKVList" as the bucket see \ref cds_intrusive_item_creating "insert item troubleshooting". + \ref cds_nonintrusive_LazyKVList_nogc "LazyKVList" provides exclusive access to inserted item and does not require any node-level + synchronization. + */ + template + iterator insert_with( const K& key, Func func ) + { + internal_bucket_type& refBucket = bucket( key ); + bucket_iterator it = refBucket.insert_with( key, func ); + + if ( it != refBucket.end()) { + ++m_ItemCounter; + return iterator( it, &refBucket, m_Buckets + bucket_count()); + } + + return end(); + } + + /// For key \p key inserts data of type \p mapped_type created from \p args + /** + \p key_type should be constructible from type \p K + + Returns an iterator pointed to inserted value, or \p end() if inserting is failed + */ + template + iterator emplace( K&& key, Args&&... args ) + { + internal_bucket_type& refBucket = bucket( key ); + bucket_iterator it = refBucket.emplace( std::forward(key), std::forward(args)... ); + + if ( it != refBucket.end()) { + ++m_ItemCounter; + return iterator( it, &refBucket, m_Buckets + bucket_count()); + } + + return end(); + } + + /// Updates the item + /** + If \p key is not in the map and \p bAllowInsert is \p true, the function inserts a new item. + Otherwise, the function returns an iterator pointing to the item found. + + Returns std::pair where \p first is an iterator pointing to + item found or inserted (if inserting is not allowed and \p key is not found, the iterator will be \p end()), + + \p second is true if new item has been added or \p false if the item + already is in the map. + + @warning For \ref cds_nonintrusive_MichaelKVList_nogc "MichaelKVList" as the bucket see \ref cds_intrusive_item_creating "insert item troubleshooting". + \ref cds_nonintrusive_LazyKVList_nogc "LazyKVList" provides exclusive access to inserted item and does not require any node-level + synchronization. + */ + template + std::pair update( const K& key, bool bAllowInsert = true ) + { + internal_bucket_type& refBucket = bucket( key ); + std::pair ret = refBucket.update( key, bAllowInsert ); + + if ( ret.second ) + ++m_ItemCounter; + else if ( ret.first == refBucket.end()) + return std::make_pair( end(), false ); + return std::make_pair( iterator( ret.first, &refBucket, m_Buckets + bucket_count()), ret.second ); + } + //@cond + template + CDS_DEPRECATED("ensure() is deprecated, use update()") + std::pair ensure( K const& key ) + { + return update( key, true ); + } + //@endcond + + /// Checks whether the map contains \p key + /** + The function searches the item with key equal to \p key + and returns an iterator pointed to item found and \ref end() otherwise + */ + template + iterator contains( K const& key ) + { + internal_bucket_type& refBucket = bucket( key ); + bucket_iterator it = refBucket.contains( key ); + + if ( it != refBucket.end()) + return iterator( it, &refBucket, m_Buckets + bucket_count()); + + return end(); + } + //@cond + template + CDS_DEPRECATED("deprecated, use contains()") + iterator find( K const& key ) + { + return contains( key ); + } + //@endcond + + /// Checks whether the map contains \p key using \p pred predicate for searching + /** + The function is an analog of contains( key ) but \p pred is used for key comparing. + \p Less functor has the interface like \p std::less. + \p pred must imply the same element order as the comparator used for building the map. + Hash functor specified in \p Traits should accept parameters of type \p K. + */ + template + iterator contains( K const& key, Less pred ) + { + internal_bucket_type& refBucket = bucket( key ); + bucket_iterator it = refBucket.contains( key, pred ); + + if ( it != refBucket.end()) + return iterator( it, &refBucket, m_Buckets + bucket_count()); + + return end(); + } + //@cond + template + CDS_DEPRECATED("deprecated, use contains()") + iterator find_with( K const& key, Less pred ) + { + return contains( key, pred ); + } + //@endcond + + /// Clears the map (not atomic) + void clear() + { + for ( size_t i = 0; i < bucket_count(); ++i ) + m_Buckets[i].clear(); + m_ItemCounter.reset(); + } + + /// Checks whether the map is empty + /** + @warning If you use \p atomicity::empty_item_counter in \p traits::item_counter, + the function always returns \p true. + */ + bool empty() const + { + return size() == 0; + } + + /// Returns item count in the map + /** + If you use \p atomicity::empty_item_counter in \p traits::item_counter, + the function always returns 0. + */ + size_t size() const + { + return m_ItemCounter; + } + + /// Returns const reference to internal statistics + stat const& statistics() const + { + return m_Stat; + } + + /// Returns the size of hash table + /** + Since \p %MichaelHashMap cannot dynamically extend the hash table size, + the value returned is an constant depending on object initialization parameters; + see \p MichaelHashMap::MichaelHashMap for explanation. + */ + size_t bucket_count() const + { + return m_nHashBitmask + 1; + } + + protected: + //@cond + /// Calculates hash value of \p key + template + size_t hash_value( K const & key ) const + { + return m_HashFunctor( key ) & m_nHashBitmask; + } + + /// Returns the bucket (ordered list) for \p key + template + internal_bucket_type& bucket( K const& key ) + { + return m_Buckets[hash_value( key )]; + } + //@endcond + + private: + //@cond + const_iterator get_const_begin() const + { + return const_iterator( const_cast(m_Buckets[0]).begin(), m_Buckets, m_Buckets + bucket_count()); + } + const_iterator get_const_end() const + { + return const_iterator( const_cast(m_Buckets[bucket_count() - 1]).end(), m_Buckets + bucket_count() - 1, m_Buckets + bucket_count()); + } + + template + typename std::enable_if< Stat::empty >::type construct_bucket( internal_bucket_type* b ) + { + new (b) internal_bucket_type; + } + + template + typename std::enable_if< !Stat::empty >::type construct_bucket( internal_bucket_type* b ) + { + new (b) internal_bucket_type( m_Stat ); + } + //@endcond + }; +}} // namespace cds::container + +#endif // ifndef CDSLIB_CONTAINER_MICHAEL_MAP_NOGC_H diff --git a/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/container/michael_map_rcu.h b/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/container/michael_map_rcu.h new file mode 100644 index 0000000..d445b6e --- /dev/null +++ b/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/container/michael_map_rcu.h @@ -0,0 +1,872 @@ +/* + This file is a part of libcds - Concurrent Data Structures library + + (C) Copyright Maxim Khizhinsky (libcds.dev@gmail.com) 2006-2017 + + Source code repo: http://github.com/khizmax/libcds/ + Download: http://sourceforge.net/projects/libcds/files/ + + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. + + * Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ + +#ifndef CDSLIB_CONTAINER_MICHAEL_MAP_RCU_H +#define CDSLIB_CONTAINER_MICHAEL_MAP_RCU_H + +#include +#include + +namespace cds { namespace container { + + /// Michael's hash map (template specialization for \ref cds_urcu_desc "RCU") + /** @ingroup cds_nonintrusive_map + \anchor cds_nonintrusive_MichaelHashMap_rcu + + Source: + - [2002] Maged Michael "High performance dynamic lock-free hash tables and list-based sets" + + Michael's hash table algorithm is based on lock-free ordered list and it is very simple. + The main structure is an array \p T of size \p M. Each element in \p T is basically a pointer + to a hash bucket, implemented as a singly linked list. The array of buckets cannot be dynamically expanded. + However, each bucket may contain unbounded number of items. + + Template parameters are: + - \p RCU - one of \ref cds_urcu_gc "RCU type" + - \p OrderedList - ordered key-value list implementation used as bucket for hash map, for example, \p MichaelKVList. + The ordered list implementation specifies the \p Key and \p Value types stored in the hash-map, the reclamation + schema \p GC used by hash-map, the comparison functor for the type \p Key and other features specific for + the ordered list. + - \p Traits - map traits, default is \p michael_map::traits. + Instead of defining \p Traits struct you may use option-based syntax with \p michael_map::make_traits metafunction + + Many of the class function take a key argument of type \p K that in general is not \p key_type. + \p key_type and an argument of template type \p K must meet the following requirements: + - \p key_type should be constructible from value of type \p K; + - the hash functor should be able to calculate correct hash value from argument \p key of type \p K: + hash( key_type(key)) == hash( key ) + - values of type \p key_type and \p K should be comparable + + How to use + + The tips about how to use Michael's map see \ref cds_nonintrusive_MichaelHashMap_how_touse "MichaelHashMap". + Remember, that you should include RCU-related header file (for example, cds/urcu/general_buffered.h) + before including cds/container/michael_map_rcu.h. + */ + template < + class RCU, + class OrderedList, +#ifdef CDS_DOXYGEN_INVOKED + class Traits = michael_map::traits +#else + class Traits +#endif + > + class MichaelHashMap< cds::urcu::gc< RCU >, OrderedList, Traits > + { + public: + typedef cds::urcu::gc< RCU > gc; ///< RCU used as garbage collector + typedef OrderedList ordered_list; ///< type of ordered list used as a bucket implementation + typedef Traits traits; ///< Map traits + + typedef typename ordered_list::key_type key_type; ///< key type + typedef typename ordered_list::mapped_type mapped_type; ///< value type + typedef typename ordered_list::value_type value_type; ///< key/value pair stored in the list + typedef typename ordered_list::key_comparator key_comparator;///< key comparison functor +#ifdef CDS_DOXYGEN_INVOKED + typedef typename ordered_list::stat stat; ///< Internal statistics + typedef typename ordered_list::exempt_ptr exempt_ptr; ///< pointer to extracted node + /// Type of \p get() member function return value + typedef typename ordered_list::raw_ptr raw_ptr; + typedef typename ordered_list::rcu_lock rcu_lock; ///< RCU scoped lock +#endif + + /// Hash functor for \ref key_type and all its derivatives that you use + typedef typename cds::opt::v::hash_selector< typename traits::hash >::type hash; + typedef typename traits::item_counter item_counter; ///< Item counter type + typedef typename traits::allocator allocator; ///< Bucket table allocator + + /// Group of \p extract_xxx functions require external locking if underlying ordered list requires that + static constexpr const bool c_bExtractLockExternal = ordered_list::c_bExtractLockExternal; + + // GC and OrderedList::gc must be the same + static_assert(std::is_same::value, "GC and OrderedList::gc must be the same"); + + protected: + //@cond + typedef typename ordered_list::template select_stat_wrapper< typename ordered_list::stat > bucket_stat; + + typedef typename ordered_list::template rebind_traits< + cds::opt::item_counter< cds::atomicity::empty_item_counter > + , cds::opt::stat< typename bucket_stat::wrapped_stat > + >::type internal_bucket_type; + + /// Bucket table allocator + typedef typename allocator::template rebind< internal_bucket_type >::other bucket_table_allocator; + //@endcond + + public: + //@cond + typedef typename bucket_stat::stat stat; + typedef typename internal_bucket_type::exempt_ptr exempt_ptr; + typedef typename internal_bucket_type::raw_ptr raw_ptr; + typedef typename internal_bucket_type::rcu_lock rcu_lock; + //@endcond + + protected: + //@cond + const size_t m_nHashBitmask; + hash m_HashFunctor; ///< Hash functor + internal_bucket_type* m_Buckets; ///< bucket table + item_counter m_ItemCounter; ///< Item counter + stat m_Stat; ///< Internal statistics + //@endcond + + protected: + //@cond + template + class iterator_type: private cds::intrusive::michael_set::details::iterator< internal_bucket_type, IsConst > + { + typedef cds::intrusive::michael_set::details::iterator< internal_bucket_type, IsConst > base_class; + friend class MichaelHashMap; + + protected: + typedef typename base_class::bucket_ptr bucket_ptr; + typedef typename base_class::list_iterator list_iterator; + + public: + /// Value pointer type (const for const_iterator) + typedef typename cds::details::make_const_type::pointer value_ptr; + /// Value reference type (const for const_iterator) + typedef typename cds::details::make_const_type::reference value_ref; + + /// Key-value pair pointer type (const for const_iterator) + typedef typename cds::details::make_const_type::pointer pair_ptr; + /// Key-value pair reference type (const for const_iterator) + typedef typename cds::details::make_const_type::reference pair_ref; + + protected: + iterator_type( list_iterator const& it, bucket_ptr pFirst, bucket_ptr pLast ) + : base_class( it, pFirst, pLast ) + {} + + public: + /// Default ctor + iterator_type() + : base_class() + {} + + /// Copy ctor + iterator_type( const iterator_type& src ) + : base_class( src ) + {} + + /// Dereference operator + pair_ptr operator ->() const + { + assert( base_class::m_pCurBucket != nullptr ); + return base_class::m_itList.operator ->(); + } + + /// Dereference operator + pair_ref operator *() const + { + assert( base_class::m_pCurBucket != nullptr ); + return base_class::m_itList.operator *(); + } + + /// Pre-increment + iterator_type& operator ++() + { + base_class::operator++(); + return *this; + } + + /// Assignment operator + iterator_type& operator = (const iterator_type& src) + { + base_class::operator =(src); + return *this; + } + + /// Returns current bucket (debug function) + bucket_ptr bucket() const + { + return base_class::bucket(); + } + + /// Equality operator + template + bool operator ==(iterator_type const& i ) + { + return base_class::operator ==( i ); + } + /// Equality operator + template + bool operator !=(iterator_type const& i ) + { + return !( *this == i ); + } + }; + //@endcond + + public: + ///@name Forward iterators (thread-safe under RCU lock) + //@{ + + /// Forward iterator + /** + The forward iterator for Michael's map is based on \p OrderedList forward iterator and has some features: + - it has no post-increment operator + - it iterates items in unordered fashion + + You may safely use iterators in multi-threaded environment only under RCU lock. + Otherwise, a crash is possible if another thread deletes the element the iterator points to. + + The iterator interface: + \code + class iterator { + public: + // Default constructor + iterator(); + + // Copy construtor + iterator( iterator const& src ); + + // Dereference operator + value_type * operator ->() const; + + // Dereference operator + value_type& operator *() const; + + // Preincrement operator + iterator& operator ++(); + + // Assignment operator + iterator& operator = (iterator const& src); + + // Equality operators + bool operator ==(iterator const& i ) const; + bool operator !=(iterator const& i ) const; + }; + \endcode + */ + typedef iterator_type< false > iterator; + + /// Const forward iterator + typedef iterator_type< true > const_iterator; + + /// Returns a forward iterator addressing the first element in a map + /** + For empty map \code begin() == end() \endcode + */ + iterator begin() + { + return iterator( m_Buckets[0].begin(), m_Buckets, m_Buckets + bucket_count()); + } + + /// Returns an iterator that addresses the location succeeding the last element in a map + /** + Do not use the value returned by end function to access any item. + The returned value can be used only to control reaching the end of the map. + For empty map \code begin() == end() \endcode + */ + iterator end() + { + return iterator( m_Buckets[bucket_count() - 1].end(), m_Buckets + bucket_count() - 1, m_Buckets + bucket_count()); + } + + /// Returns a forward const iterator addressing the first element in a map + const_iterator begin() const + { + return get_const_begin(); + } + + /// Returns a forward const iterator addressing the first element in a map + const_iterator cbegin() const + { + return get_const_begin(); + } + + /// Returns an const iterator that addresses the location succeeding the last element in a map + const_iterator end() const + { + return get_const_end(); + } + + /// Returns an const iterator that addresses the location succeeding the last element in a map + const_iterator cend() const + { + return get_const_end(); + } + //@} + + public: + /// Initializes the map + /** + The Michael's hash map is non-expandable container. You should point the average count of items \p nMaxItemCount + when you create an object. + \p nLoadFactor parameter defines average count of items per bucket and it should be small number between 1 and 10. + Remember, since the bucket implementation is an ordered list, searching in the bucket is linear [O(nLoadFactor)]. + Note, that many popular STL hash map implementation uses load factor 1. + + The ctor defines hash table size as rounding nMacItemCount / nLoadFactor up to nearest power of two. + */ + MichaelHashMap( + size_t nMaxItemCount, ///< estimation of max item count in the hash map + size_t nLoadFactor ///< load factor: estimation of max number of items in the bucket + ) : m_nHashBitmask( michael_map::details::init_hash_bitmask( nMaxItemCount, nLoadFactor )) + , m_Buckets( bucket_table_allocator().allocate( bucket_count())) + { + for ( auto it = m_Buckets, itEnd = m_Buckets + bucket_count(); it != itEnd; ++it ) + construct_bucket( it ); + } + + /// Clears hash map and destroys it + ~MichaelHashMap() + { + clear(); + for ( auto it = m_Buckets, itEnd = m_Buckets + bucket_count(); it != itEnd; ++it ) + it->~internal_bucket_type(); + bucket_table_allocator().deallocate( m_Buckets, bucket_count()); + } + + /// Inserts new node with key and default value + /** + The function creates a node with \p key and default value, and then inserts the node created into the map. + + Preconditions: + - The \p key_type should be constructible from value of type \p K. + In trivial case, \p K is equal to \ref key_type. + - The \p mapped_type should be default-constructible. + + The function applies RCU lock internally. + + Returns \p true if inserting successful, \p false otherwise. + */ + template + bool insert( const K& key ) + { + const bool bRet = bucket( key ).insert( key ); + if ( bRet ) + ++m_ItemCounter; + return bRet; + } + + /// Inserts new node + /** + The function creates a node with copy of \p val value + and then inserts the node created into the map. + + Preconditions: + - The \p key_type should be constructible from \p key of type \p K. + - The \p mapped_type should be constructible from \p val of type \p V. + + The function applies RCU lock internally. + + Returns \p true if \p val is inserted into the map, \p false otherwise. + */ + template + bool insert( K const& key, V const& val ) + { + const bool bRet = bucket( key ).insert( key, val ); + if ( bRet ) + ++m_ItemCounter; + return bRet; + } + + /// Inserts new node and initialize it by a functor + /** + This function inserts new node with key \p key and if inserting is successful then it calls + \p func functor with signature + \code + struct functor { + void operator()( value_type& item ); + }; + \endcode + + The argument \p item of user-defined functor \p func is the reference + to the map's item inserted: + - item.first is a const reference to item's key that cannot be changed. + - item.second is a reference to item's value that may be changed. + + The user-defined functor is called only if inserting is successful. + + The key_type should be constructible from value of type \p K. + + The function allows to split creating of new item into two part: + - create item from \p key; + - insert new item into the map; + - if inserting is successful, initialize the value of item by calling \p func functor + + This can be useful if complete initialization of object of \p mapped_type is heavyweight and + it is preferable that the initialization should be completed only if inserting is successful. + + The function applies RCU lock internally. + + @warning For \ref cds_nonintrusive_MichaelKVList_rcu "MichaelKVList" as the bucket see \ref cds_intrusive_item_creating "insert item troubleshooting". + \ref cds_nonintrusive_LazyKVList_rcu "LazyKVList" provides exclusive access to inserted item and does not require any node-level + synchronization. + */ + template + bool insert_with( const K& key, Func func ) + { + const bool bRet = bucket( key ).insert_with( key, func ); + if ( bRet ) + ++m_ItemCounter; + return bRet; + } + + /// Updates data by \p key + /** + The operation performs inserting or replacing the element with lock-free manner. + + If the \p key not found in the map, then the new item created from \p key + will be inserted into the map iff \p bAllowInsert is \p true. + (note that in this case the \ref key_type should be constructible from type \p K). + Otherwise, if \p key is found, the functor \p func is called with item found. + + The functor \p Func signature is: + \code + struct my_functor { + void operator()( bool bNew, value_type& item ); + }; + \endcode + with arguments: + - \p bNew - \p true if the item has been inserted, \p false otherwise + - \p item - the item found or inserted + + The functor may change any fields of the \p item.second that is \p mapped_type. + + The function applies RCU lock internally. + + Returns std::pair where \p first is true if operation is successful, + \p second is true if new item has been added or \p false if the item with \p key + already exists. + + @warning For \ref cds_nonintrusive_MichaelKVList_rcu "MichaelKVList" as the bucket see \ref cds_intrusive_item_creating "insert item troubleshooting". + \ref cds_nonintrusive_LazyKVList_rcu "LazyKVList" provides exclusive access to inserted item and does not require any node-level + synchronization. + */ + template + std::pair update( K const& key, Func func, bool bAllowInsert = true ) + { + std::pair bRet = bucket( key ).update( key, func, bAllowInsert ); + if ( bRet.second ) + ++m_ItemCounter; + return bRet; + } + //@cond + template + CDS_DEPRECATED("ensure() is deprecated, use update()") + std::pair ensure( K const& key, Func func ) + { + return update( key, func, true ); + } + //@endcond + + /// For key \p key inserts data of type \p mapped_type created from \p args + /** + \p key_type should be constructible from type \p K + + Returns \p true if inserting successful, \p false otherwise. + */ + template + bool emplace( K&& key, Args&&... args ) + { + const bool bRet = bucket( key ).emplace( std::forward(key), std::forward(args)... ); + if ( bRet ) + ++m_ItemCounter; + return bRet; + } + + /// Deletes \p key from the map + /** \anchor cds_nonintrusive_MichaelMap_rcu_erase_val + + RCU \p synchronize method can be called. RCU should not be locked. + + Return \p true if \p key is found and deleted, \p false otherwise + */ + template + bool erase( const K& key ) + { + const bool bRet = bucket( key ).erase( key ); + if ( bRet ) + --m_ItemCounter; + return bRet; + } + + /// Deletes the item from the map using \p pred predicate for searching + /** + The function is an analog of \ref cds_nonintrusive_MichaelMap_rcu_erase_val "erase(K const&)" + but \p pred is used for key comparing. + \p Less predicate has the interface like \p std::less. + \p Less must imply the same element order as the comparator used for building the map. + */ + template + bool erase_with( const K& key, Less pred ) + { + const bool bRet = bucket( key ).erase_with( key, pred ); + if ( bRet ) + --m_ItemCounter; + return bRet; + } + + /// Deletes \p key from the map + /** \anchor cds_nonintrusive_MichaelMap_rcu_erase_func + + The function searches an item with key \p key, calls \p f functor + and deletes the item. If \p key is not found, the functor is not called. + + The functor \p Func interface: + \code + struct extractor { + void operator()(value_type& item) { ... } + }; + \endcode + + RCU \p synchronize method can be called. RCU should not be locked. + + Return \p true if key is found and deleted, \p false otherwise + */ + template + bool erase( const K& key, Func f ) + { + const bool bRet = bucket( key ).erase( key, f ); + if ( bRet ) + --m_ItemCounter; + return bRet; + } + + /// Deletes the item from the map using \p pred predicate for searching + /** + The function is an analog of \ref cds_nonintrusive_MichaelMap_rcu_erase_func "erase(K const&, Func)" + but \p pred is used for key comparing. + \p Less functor has the interface like \p std::less. + \p Less must imply the same element order as the comparator used for building the map. + */ + template + bool erase_with( const K& key, Less pred, Func f ) + { + const bool bRet = bucket( key ).erase_with( key, pred, f ); + if ( bRet ) + --m_ItemCounter; + return bRet; + } + + /// Extracts an item from the map + /** \anchor cds_nonintrusive_MichaelHashMap_rcu_extract + The function searches an item with key equal to \p key, + unlinks it from the map, and returns \ref cds::urcu::exempt_ptr "exempt_ptr" pointer to the item found. + If the item is not found the function return an empty \p exempt_ptr. + + The function just excludes the key from the map and returns a pointer to item found. + Depends on \p ordered_list you should or should not lock RCU before calling of this function: + - for the set based on \ref cds_nonintrusive_MichaelList_rcu "MichaelList" RCU should not be locked + - for the set based on \ref cds_nonintrusive_LazyList_rcu "LazyList" RCU should be locked + See ordered list implementation for details. + + \code + #include + #include + #include + + typedef cds::urcu::gc< general_buffered<> > rcu; + typedef cds::container::MichaelKVList< rcu, int, Foo > rcu_michael_list; + typedef cds::container::MichaelHashMap< rcu, rcu_michael_list, foo_traits > rcu_michael_map; + + rcu_michael_map theMap; + // ... + + rcu_michael_map::exempt_ptr p; + + // For MichaelList we should not lock RCU + + // Note that you must not delete the item found inside the RCU lock + p = theMap.extract( 10 ); + if ( p ) { + // do something with p + ... + } + + // We may safely release p here + // release() passes the pointer to RCU reclamation cycle + p.release(); + \endcode + */ + template + exempt_ptr extract( K const& key ) + { + exempt_ptr p = bucket( key ).extract( key ); + if ( p ) + --m_ItemCounter; + return p; + } + + /// Extracts an item from the map using \p pred predicate for searching + /** + The function is an analog of \p extract(K const&) but \p pred is used for key comparing. + \p Less functor has the interface like \p std::less. + \p pred must imply the same element order as the comparator used for building the map. + */ + template + exempt_ptr extract_with( K const& key, Less pred ) + { + exempt_ptr p = bucket( key ).extract_with( key, pred ); + if ( p ) + --m_ItemCounter; + return p; + } + + /// Finds the key \p key + /** \anchor cds_nonintrusive_MichaelMap_rcu_find_cfunc + + The function searches the item with key equal to \p key and calls the functor \p f for item found. + The interface of \p Func functor is: + \code + struct functor { + void operator()( value_type& item ); + }; + \endcode + where \p item is the item found. + + The functor may change \p item.second. Note that the functor is only guarantee + that \p item cannot be disposed during functor is executing. + The functor does not serialize simultaneous access to the map's \p item. If such access is + possible you must provide your own synchronization schema on item level to exclude unsafe item modifications. + + The function applies RCU lock internally. + + The function returns \p true if \p key is found, \p false otherwise. + */ + template + bool find( K const& key, Func f ) + { + return bucket( key ).find( key, f ); + } + + /// Finds the key \p val using \p pred predicate for searching + /** + The function is an analog of \ref cds_nonintrusive_MichaelMap_rcu_find_cfunc "find(K const&, Func)" + but \p pred is used for key comparing. + \p Less functor has the interface like \p std::less. + \p Less must imply the same element order as the comparator used for building the map. + */ + template + bool find_with( K const& key, Less pred, Func f ) + { + return bucket( key ).find_with( key, pred, f ); + } + + /// Checks whether the map contains \p key + /** + The function searches the item with key equal to \p key + and returns \p true if it is found, and \p false otherwise. + + The function applies RCU lock internally. + */ + template + bool contains( K const& key ) + { + return bucket( key ).contains( key ); + } + //@cond + template + CDS_DEPRECATED("deprecated, use contains()") + bool find( K const& key ) + { + return bucket( key ).contains( key ); + } + //@endcond + + /// Checks whether the map contains \p key using \p pred predicate for searching + /** + The function is an analog of contains( key ) but \p pred is used for key comparing. + \p Less functor has the interface like \p std::less. + \p Less must imply the same element order as the comparator used for building the map. + */ + template + bool contains( K const& key, Less pred ) + { + return bucket( key ).contains( key, pred ); + } + //@cond + template + CDS_DEPRECATED("deprecated, use contains()") + bool find_with( K const& key, Less pred ) + { + return bucket( key ).contains( key, pred ); + } + //@endcond + + /// Finds \p key and return the item found + /** \anchor cds_nonintrusive_MichaelHashMap_rcu_get + The function searches the item with key equal to \p key and returns the pointer to item found. + If \p key is not found it returns \p nullptr. + Note the type of returned value depends on underlying \p ordered_list. + For details, see documentation of ordered list you use. + + Note the compare functor should accept a parameter of type \p K that can be not the same as \p key_type. + + RCU should be locked before call of this function. + Returned item is valid only while RCU is locked: + \code + typedef cds::container::MichaelHashMap< your_template_parameters > hash_map; + hash_map theMap; + // ... + typename hash_map::raw_ptr gp; + { + // Lock RCU + hash_map::rcu_lock lock; + + gp = theMap.get( 5 ); + if ( gp ) { + // Deal with gp + //... + } + // Unlock RCU by rcu_lock destructor + // gp can be reclaimed at any time after RCU has been unlocked + } + \endcode + */ + template + raw_ptr get( K const& key ) + { + return bucket( key ).get( key ); + } + + /// Finds \p key and return the item found + /** + The function is an analog of \ref cds_nonintrusive_MichaelHashMap_rcu_get "get(K const&)" + but \p pred is used for comparing the keys. + + \p Less functor has the semantics like \p std::less but should take arguments of type \ref key_type and \p K + in any order. + \p pred must imply the same element order as the comparator used for building the map. + */ + template + raw_ptr get_with( K const& key, Less pred ) + { + return bucket( key ).get_with( key, pred ); + } + + /// Clears the map (not atomic) + /** + The function erases all items from the map. + + The function is not atomic. It cleans up each bucket and then resets the item counter to zero. + If there are a thread that performs insertion while \p clear is working the result is undefined in general case: + empty() may return \p true but the map may contain item(s). + Therefore, \p clear may be used only for debugging purposes. + + RCU \p synchronize method can be called. RCU should not be locked. + */ + void clear() + { + for ( size_t i = 0; i < bucket_count(); ++i ) + m_Buckets[i].clear(); + m_ItemCounter.reset(); + } + + /// Checks if the map is empty + /** + @warning If you use \p atomicity::empty_item_counter in \p traits::item_counter, + the function always returns \p true. + */ + bool empty() const + { + return size() == 0; + } + + /// Returns item count in the map + /** + @warning If you use \p atomicity::empty_item_counter in \p traits::item_counter, + the function always returns 0. + */ + size_t size() const + { + return m_ItemCounter; + } + + /// Returns const reference to internal statistics + stat const& statistics() const + { + return m_Stat; + } + + /// Returns the size of hash table + /** + Since \p %MichaelHashMap cannot dynamically extend the hash table size, + the value returned is an constant depending on object initialization parameters; + see \p MichaelHashMap::MichaelHashMap for explanation. + */ + size_t bucket_count() const + { + return m_nHashBitmask + 1; + } + + protected: + //@cond + /// Calculates hash value of \p key + template + size_t hash_value( Q const& key ) const + { + return m_HashFunctor( key ) & m_nHashBitmask; + } + + /// Returns the bucket (ordered list) for \p key + template + internal_bucket_type& bucket( Q const& key ) + { + return m_Buckets[hash_value( key )]; + } + template + internal_bucket_type const& bucket( Q const& key ) const + { + return m_Buckets[hash_value( key )]; + } + //@endcond + private: + //@cond + const_iterator get_const_begin() const + { + return const_iterator( const_cast(m_Buckets[0]).begin(), m_Buckets, m_Buckets + bucket_count()); + } + const_iterator get_const_end() const + { + return const_iterator( const_cast(m_Buckets[bucket_count() - 1]).end(), m_Buckets + bucket_count() - 1, m_Buckets + bucket_count()); + } + + template + typename std::enable_if< Stat::empty >::type construct_bucket( internal_bucket_type* bkt ) + { + new (bkt) internal_bucket_type; + } + + template + typename std::enable_if< !Stat::empty >::type construct_bucket( internal_bucket_type* bkt ) + { + new (bkt) internal_bucket_type( m_Stat ); + } + //@endcond + }; +}} // namespace cds::container + +#endif // ifndef CDSLIB_CONTAINER_MICHAEL_MAP_RCU_H diff --git a/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/container/michael_set.h b/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/container/michael_set.h new file mode 100644 index 0000000..1cf0cf5 --- /dev/null +++ b/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/container/michael_set.h @@ -0,0 +1,994 @@ +/* + This file is a part of libcds - Concurrent Data Structures library + + (C) Copyright Maxim Khizhinsky (libcds.dev@gmail.com) 2006-2017 + + Source code repo: http://github.com/khizmax/libcds/ + Download: http://sourceforge.net/projects/libcds/files/ + + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. + + * Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ + +#ifndef CDSLIB_CONTAINER_MICHAEL_SET_H +#define CDSLIB_CONTAINER_MICHAEL_SET_H + +#include +#include +#include + +namespace cds { namespace container { + + /// Michael's hash set + /** @ingroup cds_nonintrusive_set + \anchor cds_nonintrusive_MichaelHashSet_hp + + Source: + - [2002] Maged Michael "High performance dynamic lock-free hash tables and list-based sets" + + Michael's hash table algorithm is based on lock-free ordered list and it is very simple. + The main structure is an array \p T of size \p M. Each element in \p T is basically a pointer + to a hash bucket, implemented as a singly linked list. The array of buckets cannot be dynamically expanded. + However, each bucket may contain unbounded number of items. + + Template parameters are: + - \p GC - Garbage collector used. You may use any \ref cds_garbage_collector "Garbage collector" + from the \p libcds library. + Note the \p GC must be the same as the \p GC used for \p OrderedList + - \p OrderedList - ordered list implementation used as bucket for hash set, possible implementations: + \p MichaelList, \p LazyList, \p IterableList. + The ordered list implementation specifies the type \p T to be stored in the hash-set, + the comparing functor for the type \p T and other features specific for the ordered list. + - \p Traits - set traits, default is \p michael_set::traits. + Instead of defining \p Traits struct you may use option-based syntax with \p michael_set::make_traits metafunction. + + There are the specializations: + - for \ref cds_urcu_desc "RCU" - declared in cd/container/michael_set_rcu.h, + see \ref cds_nonintrusive_MichaelHashSet_rcu "MichaelHashSet". + - for \ref cds::gc::nogc declared in cds/container/michael_set_nogc.h, + see \ref cds_nonintrusive_MichaelHashSet_nogc "MichaelHashSet". + + \anchor cds_nonintrusive_MichaelHashSet_hash_functor + Hash functor + + Some member functions of Michael's hash set accept the key parameter of type \p Q which differs from node type \p value_type. + It is expected that type \p Q contains full key of node type \p value_type, and if keys of type \p Q and \p value_type + are equal the hash values of these keys must be equal too. + + The hash functor \p Traits::hash should accept parameters of both type: + \code + // Our node type + struct Foo { + std::string key_; // key field + // ... other fields + }; + + // Hash functor + struct fooHash { + size_t operator()( const std::string& s ) const + { + return std::hash( s ); + } + + size_t operator()( const Foo& f ) const + { + return (*this)( f.key_ ); + } + }; + \endcode + + How to use + + Suppose, we have the following type \p Foo that we want to store in our \p %MichaelHashSet: + \code + struct Foo { + int nKey; // key field + int nVal; // value field + }; + \endcode + + To use \p %MichaelHashSet for \p Foo values, you should first choose suitable ordered list class + that will be used as a bucket for the set. We will use \p gc::DHP reclamation schema and + \p MichaelList as a bucket type. Also, for ordered list we should develop a comparator for our \p Foo + struct. + \code + #include + #include + + namespace cc = cds::container; + + // Foo comparator + struct Foo_cmp { + int operator ()(Foo const& v1, Foo const& v2 ) const + { + if ( std::less( v1.nKey, v2.nKey )) + return -1; + return std::less(v2.nKey, v1.nKey) ? 1 : 0; + } + }; + + // Our ordered list + typedef cc::MichaelList< cds::gc::DHP, Foo, + typename cc::michael_list::make_traits< + cc::opt::compare< Foo_cmp > // item comparator option + >::type + > bucket_list; + + // Hash functor for Foo + struct foo_hash { + size_t operator ()( int i ) const + { + return std::hash( i ); + } + size_t operator()( Foo const& i ) const + { + return std::hash( i.nKey ); + } + }; + + // Declare set type. + // Note that \p GC template parameter of ordered list must be equal \p GC for the set. + typedef cc::MichaelHashSet< cds::gc::DHP, bucket_list, + cc::michael_set::make_traits< + cc::opt::hash< foo_hash > + >::type + > foo_set; + + // Set variable + foo_set fooSet; + \endcode + */ + template < + class GC, + class OrderedList, +#ifdef CDS_DOXYGEN_INVOKED + class Traits = michael_set::traits +#else + class Traits +#endif + > + class MichaelHashSet + { + public: + typedef GC gc; ///< Garbage collector + typedef OrderedList ordered_list; ///< type of ordered list used as a bucket implementation + typedef Traits traits; ///< Set traits + + typedef typename ordered_list::value_type value_type; ///< type of value to be stored in the list + typedef typename ordered_list::key_comparator key_comparator; ///< key comparison functor +#ifdef CDS_DOXYGEN_INVOKED + typedef typename ordered_list::stat stat; ///< Internal statistics +#endif + + /// Hash functor for \ref value_type and all its derivatives that you use + typedef typename cds::opt::v::hash_selector< typename traits::hash >::type hash; + typedef typename traits::item_counter item_counter; ///< Item counter type + typedef typename traits::allocator allocator; ///< Bucket table allocator + + static constexpr const size_t c_nHazardPtrCount = ordered_list::c_nHazardPtrCount; ///< Count of hazard pointer required + + // GC and OrderedList::gc must be the same + static_assert( std::is_same::value, "GC and OrderedList::gc must be the same"); + + //@cond + typedef typename ordered_list::template select_stat_wrapper< typename ordered_list::stat > bucket_stat; + + typedef typename ordered_list::template rebind_traits< + cds::opt::item_counter< cds::atomicity::empty_item_counter > + , cds::opt::stat< typename bucket_stat::wrapped_stat > + >::type internal_bucket_type; + + /// Bucket table allocator + typedef typename allocator::template rebind< internal_bucket_type >::other bucket_table_allocator; + + typedef typename bucket_stat::stat stat; + //@endcond + + /// Guarded pointer - a result of \p get() and \p extract() functions + typedef typename internal_bucket_type::guarded_ptr guarded_ptr; + + protected: + //@cond + size_t const m_nHashBitmask; + internal_bucket_type * m_Buckets; ///< bucket table + hash m_HashFunctor; ///< Hash functor + item_counter m_ItemCounter; ///< Item counter + stat m_Stat; ///< Internal statistics + //@endcond + + public: + ///@name Forward iterators + //@{ + /// Forward iterator + /** + The forward iterator for Michael's set has some features: + - it has no post-increment operator + - to protect the value, the iterator contains a GC-specific guard + another guard is required locally for increment operator. + For some GC (like as \p gc::HP), a guard is a limited resource per thread, so an exception (or assertion) "no free guard" + may be thrown if the limit of guard count per thread is exceeded. + - The iterator cannot be moved across thread boundary because it contains thread-private GC's guard. + + Iterator thread safety depends on type of \p OrderedList: + - for \p MichaelList and \p LazyList: iterator guarantees safety even if you delete the item that iterator points to + because that item is guarded by hazard pointer. + However, in case of concurrent deleting operations it is no guarantee that you iterate all item in the set. + Moreover, a crash is possible when you try to iterate the next element that has been deleted by concurrent thread. + Use this iterator on the concurrent container for debugging purpose only. + - for \p IterableList: iterator is thread-safe. You may use it freely in concurrent environment. + + The iterator interface: + \code + class iterator { + public: + // Default constructor + iterator(); + + // Copy construtor + iterator( iterator const& src ); + + // Dereference operator + value_type * operator ->() const; + + // Dereference operator + value_type& operator *() const; + + // Preincrement operator + iterator& operator ++(); + + // Assignment operator + iterator& operator = (iterator const& src); + + // Equality operators + bool operator ==(iterator const& i ) const; + bool operator !=(iterator const& i ) const; + }; + \endcode + */ + + /// Forward iterator + typedef michael_set::details::iterator< internal_bucket_type, false > iterator; + + /// Const forward iterator + typedef michael_set::details::iterator< internal_bucket_type, true > const_iterator; + + /// Returns a forward iterator addressing the first element in a set + /** + For empty set \code begin() == end() \endcode + */ + iterator begin() + { + return iterator( bucket_begin()->begin(), bucket_begin(), bucket_end()); + } + + /// Returns an iterator that addresses the location succeeding the last element in a set + /** + Do not use the value returned by end function to access any item. + The returned value can be used only to control reaching the end of the set. + For empty set \code begin() == end() \endcode + */ + iterator end() + { + return iterator( bucket_end()[-1].end(), bucket_end() - 1, bucket_end()); + } + + /// Returns a forward const iterator addressing the first element in a set + const_iterator begin() const + { + return get_const_begin(); + } + + /// Returns a forward const iterator addressing the first element in a set + const_iterator cbegin() const + { + return get_const_begin(); + } + + /// Returns an const iterator that addresses the location succeeding the last element in a set + const_iterator end() const + { + return get_const_end(); + } + + /// Returns an const iterator that addresses the location succeeding the last element in a set + const_iterator cend() const + { + return get_const_end(); + } + //@} + + public: + /// Initialize hash set + /** + The Michael's hash set is non-expandable container. You should point the average count of items \p nMaxItemCount + when you create an object. + \p nLoadFactor parameter defines average count of items per bucket and it should be small number between 1 and 10. + Remember, since the bucket implementation is an ordered list, searching in the bucket is linear [O(nLoadFactor)]. + + The ctor defines hash table size as rounding nMaxItemCount / nLoadFactor up to nearest power of two. + */ + MichaelHashSet( + size_t nMaxItemCount, ///< estimation of max item count in the hash set + size_t nLoadFactor ///< load factor: estimation of max number of items in the bucket + ) : m_nHashBitmask( michael_set::details::init_hash_bitmask( nMaxItemCount, nLoadFactor )) + , m_Buckets( bucket_table_allocator().allocate( bucket_count())) + { + for ( auto it = m_Buckets, itEnd = m_Buckets + bucket_count(); it != itEnd; ++it ) + construct_bucket( it ); + } + + /// Clears hash set and destroys it + ~MichaelHashSet() + { + clear(); + + for ( auto it = m_Buckets, itEnd = m_Buckets + bucket_count(); it != itEnd; ++it ) + it->~internal_bucket_type(); + bucket_table_allocator().deallocate( m_Buckets, bucket_count()); + } + + /// Inserts new node + /** + The function creates a node with copy of \p val value + and then inserts the node created into the set. + + The type \p Q should contain as minimum the complete key for the node. + The object of \ref value_type should be constructible from a value of type \p Q. + In trivial case, \p Q is equal to \ref value_type. + + Returns \p true if \p val is inserted into the set, \p false otherwise. + */ + template + bool insert( Q&& val ) + { + const bool bRet = bucket( val ).insert( std::forward( val )); + if ( bRet ) + ++m_ItemCounter; + return bRet; + } + + /// Inserts new node + /** + The function allows to split creating of new item into two part: + - create item with key only + - insert new item into the set + - if inserting is success, calls \p f functor to initialize value-fields of \p val. + + The functor signature is: + \code + void func( value_type& val ); + \endcode + where \p val is the item inserted. + The user-defined functor is called only if the inserting is success. + + @warning For \ref cds_nonintrusive_MichaelList_gc "MichaelList" and \ref cds_nonintrusive_IterableList_gc "IterableList" + as the bucket see \ref cds_intrusive_item_creating "insert item troubleshooting". + @ref cds_nonintrusive_LazyList_gc "LazyList" provides exclusive access to inserted item and does not require any node-level + synchronization. + */ + template + bool insert( Q&& val, Func f ) + { + const bool bRet = bucket( val ).insert( std::forward( val ), f ); + if ( bRet ) + ++m_ItemCounter; + return bRet; + } + + /// Updates the element + /** + The operation performs inserting or changing data with lock-free manner. + + If the item \p val not found in the set, then \p val is inserted iff \p bAllowInsert is \p true. + Otherwise, the functor \p func is called with item found. + + The functor \p func signature depends of \p OrderedList: + + for \p MichaelList, \p LazyList + \code + struct functor { + void operator()( bool bNew, value_type& item, Q const& val ); + }; + \endcode + with arguments: + - \p bNew - \p true if the item has been inserted, \p false otherwise + - \p item - item of the set + - \p val - argument \p val passed into the \p %update() function + + The functor may change non-key fields of the \p item. + + for \p IterableList + \code + void func( value_type& val, value_type * old ); + \endcode + where + - \p val - a new data constructed from \p key + - \p old - old value that will be retired. If new item has been inserted then \p old is \p nullptr. + + @return std::pair where \p first is \p true if operation is successful, + \p second is \p true if new item has been added or \p false if the item with \p key + already is in the set. + + @warning For \ref cds_intrusive_MichaelList_hp "MichaelList" and \ref cds_nonintrusive_IterableList_gc "IterableList" + as the bucket see \ref cds_intrusive_item_creating "insert item troubleshooting". + \ref cds_intrusive_LazyList_hp "LazyList" provides exclusive access to inserted item and does not require any node-level + synchronization. + */ + template + std::pair update( Q&& val, Func func, bool bAllowUpdate = true ) + { + std::pair bRet = bucket( val ).update( std::forward( val ), func, bAllowUpdate ); + if ( bRet.second ) + ++m_ItemCounter; + return bRet; + } + //@cond + template + CDS_DEPRECATED("ensure() is deprecated, use update()") + std::pair ensure( const Q& val, Func func ) + { + return update( val, func, true ); + } + //@endcond + + /// Inserts or updates the node (only for \p IterableList) + /** + The operation performs inserting or changing data with lock-free manner. + + If the item \p val is not found in the set, then \p val is inserted iff \p bAllowInsert is \p true. + Otherwise, the current element is changed to \p val, the old element will be retired later. + + Returns std::pair where \p first is \p true if operation is successful, + \p second is \p true if \p val has been added or \p false if the item with that key + already in the set. + */ + template +#ifdef CDS_DOXYGEN_INVOKED + std::pair +#else + typename std::enable_if< + std::is_same< Q, Q>::value && is_iterable_list< ordered_list >::value, + std::pair + >::type +#endif + upsert( Q&& val, bool bAllowInsert = true ) + { + std::pair bRet = bucket( val ).upsert( std::forward( val ), bAllowInsert ); + if ( bRet.second ) + ++m_ItemCounter; + return bRet; + } + + /// Inserts data of type \p value_type constructed from \p args + /** + Returns \p true if inserting successful, \p false otherwise. + */ + template + bool emplace( Args&&... args ) + { + bool bRet = bucket_emplace( std::forward(args)... ); + if ( bRet ) + ++m_ItemCounter; + return bRet; + } + + /// Deletes \p key from the set + /** + Since the key of MichaelHashSet's item type \p value_type is not explicitly specified, + template parameter \p Q defines the key type searching in the list. + The set item comparator should be able to compare the type \p value_type + and the type \p Q. + + Return \p true if key is found and deleted, \p false otherwise. + */ + template + bool erase( Q const& key ) + { + const bool bRet = bucket( key ).erase( key ); + if ( bRet ) + --m_ItemCounter; + return bRet; + } + + /// Deletes the item from the set using \p pred predicate for searching + /** + The function is an analog of \p erase(Q const&) but \p pred is used for key comparing. + \p Less functor has the interface like \p std::less. + \p Less must imply the same element order as the comparator used for building the set. + */ + template + bool erase_with( Q const& key, Less pred ) + { + const bool bRet = bucket( key ).erase_with( key, pred ); + if ( bRet ) + --m_ItemCounter; + return bRet; + } + + /// Deletes \p key from the set + /** + The function searches an item with key \p key, calls \p f functor + and deletes the item. If \p key is not found, the functor is not called. + + The functor \p Func interface: + \code + struct extractor { + void operator()(value_type& item); + }; + \endcode + where \p item - the item found. + + Since the key of %MichaelHashSet's \p value_type is not explicitly specified, + template parameter \p Q defines the key type searching in the list. + The list item comparator should be able to compare the type \p T of list item + and the type \p Q. + + Return \p true if key is found and deleted, \p false otherwise + */ + template + bool erase( Q const& key, Func f ) + { + const bool bRet = bucket( key ).erase( key, f ); + if ( bRet ) + --m_ItemCounter; + return bRet; + } + + /// Deletes the item from the set using \p pred predicate for searching + /** + The function is an analog of \p erase(Q const&, Func) but \p pred is used for key comparing. + \p Less functor has the interface like \p std::less. + \p Less must imply the same element order as the comparator used for building the set. + */ + template + bool erase_with( Q const& key, Less pred, Func f ) + { + const bool bRet = bucket( key ).erase_with( key, pred, f ); + if ( bRet ) + --m_ItemCounter; + return bRet; + } + + /// Deletes the item pointed by iterator \p iter (only for \p IterableList based set) + /** + Returns \p true if the operation is successful, \p false otherwise. + The function can return \p false if the node the iterator points to has already been deleted + by other thread. + + The function does not invalidate the iterator, it remains valid and can be used for further traversing. + + @note \p %erase_at() is supported only for \p %MichaelHashSet based on \p IterableList. + */ +#ifdef CDS_DOXYGEN_INVOKED + bool erase_at( iterator const& iter ) +#else + template + typename std::enable_if< std::is_same::value && is_iterable_list< ordered_list >::value, bool >::type + erase_at( Iterator const& iter ) +#endif + { + assert( iter != end()); + assert( iter.bucket() != nullptr ); + + if ( iter.bucket()->erase_at( iter.underlying_iterator())) { + --m_ItemCounter; + return true; + } + return false; + } + + /// Extracts the item with specified \p key + /** \anchor cds_nonintrusive_MichaelHashSet_hp_extract + The function searches an item with key equal to \p key, + unlinks it from the set, and returns it as \p guarded_ptr. + If \p key is not found the function returns an empty guadd pointer. + + Note the compare functor should accept a parameter of type \p Q that may be not the same as \p value_type. + + The extracted item is freed automatically when returned \p guarded_ptr object will be destroyed or released. + @note Each \p guarded_ptr object uses the GC's guard that can be limited resource. + + Usage: + \code + typedef cds::container::MichaelHashSet< your_template_args > michael_set; + michael_set theSet; + // ... + { + typename michael_set::guarded_ptr gp( theSet.extract( 5 )); + if ( gp ) { + // Deal with gp + // ... + } + // Destructor of gp releases internal HP guard + } + \endcode + */ + template + guarded_ptr extract( Q const& key ) + { + guarded_ptr gp( bucket( key ).extract( key )); + if ( gp ) + --m_ItemCounter; + return gp; + } + + /// Extracts the item using compare functor \p pred + /** + The function is an analog of \p extract(Q const&) + but \p pred predicate is used for key comparing. + + \p Less functor has the semantics like \p std::less but should take arguments + of type \p value_type and \p Q in any order. + \p pred must imply the same element order as the comparator used for building the set. + */ + template + guarded_ptr extract_with( Q const& key, Less pred ) + { + guarded_ptr gp( bucket( key ).extract_with( key, pred )); + if ( gp ) + --m_ItemCounter; + return gp; + } + + /// Finds the key \p key + /** + The function searches the item with key equal to \p key and calls the functor \p f for item found. + The interface of \p Func functor is: + \code + struct functor { + void operator()( value_type& item, Q& key ); + }; + \endcode + where \p item is the item found, \p key is the find function argument. + + The functor may change non-key fields of \p item. Note that the functor is only guarantee + that \p item cannot be disposed during functor is executing. + The functor does not serialize simultaneous access to the set's \p item. If such access is + possible you must provide your own synchronization schema on item level to exclude unsafe item modifications. + + The \p key argument is non-const since it can be used as \p f functor destination i.e., the functor + can modify both arguments. + + Note the hash functor specified for class \p Traits template parameter + should accept a parameter of type \p Q that may be not the same as \p value_type. + + The function returns \p true if \p key is found, \p false otherwise. + */ + template + bool find( Q& key, Func f ) + { + return bucket( key ).find( key, f ); + } + //@cond + template + bool find( Q const& key, Func f ) + { + return bucket( key ).find( key, f ); + } + //@endcond + + /// Finds \p key and returns iterator pointed to the item found (only for \p IterableList) + /** + If \p key is not found the function returns \p end(). + + @note This function is supported only for the set based on \p IterableList + */ + template +#ifdef CDS_DOXYGEN_INVOKED + iterator +#else + typename std::enable_if< std::is_same::value && is_iterable_list< ordered_list >::value, iterator >::type +#endif + find( Q& key ) + { + internal_bucket_type& b = bucket( key ); + typename internal_bucket_type::iterator it = b.find( key ); + if ( it == b.end()) + return end(); + return iterator( it, &b, bucket_end()); + } + //@cond + template + typename std::enable_if< std::is_same::value && is_iterable_list< ordered_list >::value, iterator >::type + find( Q const& key ) + { + internal_bucket_type& b = bucket( key ); + typename internal_bucket_type::iterator it = b.find( key ); + if ( it == b.end()) + return end(); + return iterator( it, &b, bucket_end()); + } + //@endcond + + /// Finds the key \p key using \p pred predicate for searching + /** + The function is an analog of \p find(Q&, Func) but \p pred is used for key comparing. + \p Less functor has the interface like \p std::less. + \p Less must imply the same element order as the comparator used for building the set. + */ + template + bool find_with( Q& key, Less pred, Func f ) + { + return bucket( key ).find_with( key, pred, f ); + } + //@cond + template + bool find_with( Q const& key, Less pred, Func f ) + { + return bucket( key ).find_with( key, pred, f ); + } + //@endcond + + /// Finds \p key using \p pred predicate and returns iterator pointed to the item found (only for \p IterableList) + /** + The function is an analog of \p find(Q&) but \p pred is used for key comparing. + \p Less functor has the interface like \p std::less. + \p pred must imply the same element order as the comparator used for building the set. + + If \p key is not found the function returns \p end(). + + @note This function is supported only for the set based on \p IterableList + */ + template +#ifdef CDS_DOXYGEN_INVOKED + iterator +#else + typename std::enable_if< std::is_same::value && is_iterable_list< ordered_list >::value, iterator >::type +#endif + find_with( Q& key, Less pred ) + { + internal_bucket_type& b = bucket( key ); + typename internal_bucket_type::iterator it = b.find_with( key, pred ); + if ( it == b.end()) + return end(); + return iterator( it, &b, bucket_end()); + } + //@cond + template + typename std::enable_if< std::is_same::value && is_iterable_list< ordered_list >::value, iterator >::type + find_with( Q const& key, Less pred ) + { + internal_bucket_type& b = bucket( key ); + typename internal_bucket_type::iterator it = b.find_with( key, pred ); + if ( it == b.end()) + return end(); + return iterator( it, &b, bucket_end()); + } + //@endcond + + /// Checks whether the set contains \p key + /** + The function searches the item with key equal to \p key + and returns \p true if the key is found, and \p false otherwise. + + Note the hash functor specified for class \p Traits template parameter + should accept a parameter of type \p Q that can be not the same as \p value_type. + */ + template + bool contains( Q const& key ) + { + return bucket( key ).contains( key ); + } + + /// Checks whether the set contains \p key using \p pred predicate for searching + /** + The function is an analog of contains( key ) but \p pred is used for key comparing. + \p Less functor has the interface like \p std::less. + \p Less must imply the same element order as the comparator used for building the set. + */ + template + bool contains( Q const& key, Less pred ) + { + return bucket( key ).contains( key, pred ); + } + + /// Finds the key \p key and return the item found + /** \anchor cds_nonintrusive_MichaelHashSet_hp_get + The function searches the item with key equal to \p key + and returns the guarded pointer to the item found. + If \p key is not found the functin returns an empty guarded pointer. + + @note Each \p guarded_ptr object uses one GC's guard which can be limited resource. + + Usage: + \code + typedef cds::container::MichaeHashSet< your_template_params > michael_set; + michael_set theSet; + // ... + { + typename michael_set::guarded_ptr gp( theSet.get( 5 )); + if ( gp ) { + // Deal with gp + //... + } + // Destructor of guarded_ptr releases internal HP guard + } + \endcode + + Note the compare functor specified for \p OrderedList template parameter + should accept a parameter of type \p Q that can be not the same as \p value_type. + */ + template + guarded_ptr get( Q const& key ) + { + return bucket( key ).get( key ); + } + + /// Finds the key \p key and return the item found + /** + The function is an analog of \ref cds_nonintrusive_MichaelHashSet_hp_get "get( Q const&)" + but \p pred is used for comparing the keys. + + \p Less functor has the semantics like \p std::less but should take arguments of type \ref value_type and \p Q + in any order. + \p pred must imply the same element order as the comparator used for building the set. + */ + template + guarded_ptr get_with( Q const& key, Less pred ) + { + return bucket( key ).get_with( key, pred ); + } + + /// Clears the set (non-atomic) + /** + The function erases all items from the set. + + The function is not atomic. It cleans up each bucket and then resets the item counter to zero. + If there are a thread that performs insertion while \p clear is working the result is undefined in general case: + empty() may return \p true but the set may contain item(s). + Therefore, \p clear may be used only for debugging purposes. + */ + void clear() + { + for ( size_t i = 0; i < bucket_count(); ++i ) + m_Buckets[i].clear(); + m_ItemCounter.reset(); + } + + /// Checks if the set is empty + /** + @warning If you use \p atomicity::empty_item_counter in \p traits::item_counter, + the function always returns \p true. + */ + bool empty() const + { + return size() == 0; + } + + /// Returns item count in the set + /** + @warning If you use \p atomicity::empty_item_counter in \p traits::item_counter, + the function always returns 0. + */ + size_t size() const + { + return m_ItemCounter; + } + + /// Returns const reference to internal statistics + stat const& statistics() const + { + return m_Stat; + } + + /// Returns the size of hash table + /** + Since MichaelHashSet cannot dynamically extend the hash table size, + the value returned is an constant depending on object initialization parameters; + see MichaelHashSet::MichaelHashSet for explanation. + */ + size_t bucket_count() const + { + return m_nHashBitmask + 1; + } + + protected: + //@cond + /// Calculates hash value of \p key + template + size_t hash_value( Q const& key ) const + { + return m_HashFunctor( key ) & m_nHashBitmask; + } + + /// Returns the bucket (ordered list) for \p key + template + internal_bucket_type& bucket( Q const& key ) + { + return m_Buckets[ hash_value( key ) ]; + } + template + internal_bucket_type const& bucket( Q const& key ) const + { + return m_Buckets[hash_value( key )]; + } + //@endcond + + private: + //@cond + internal_bucket_type* bucket_begin() const + { + return m_Buckets; + } + + internal_bucket_type* bucket_end() const + { + return m_Buckets + bucket_count(); + } + + const_iterator get_const_begin() const + { + return const_iterator( bucket_begin()->cbegin(), bucket_begin(), bucket_end()); + } + const_iterator get_const_end() const + { + return const_iterator(( bucket_end() -1 )->cend(), bucket_end() - 1, bucket_end()); + } + + template + typename std::enable_if< Stat::empty >::type construct_bucket( internal_bucket_type* b ) + { + new (b) internal_bucket_type; + } + + template + typename std::enable_if< !Stat::empty >::type construct_bucket( internal_bucket_type* b ) + { + new (b) internal_bucket_type( m_Stat ); + } + + template + typename std::enable_if< !is_iterable_list::value, bool>::type + bucket_emplace( Args&&... args ) + { + class list_accessor: public List + { + public: + using List::alloc_node; + using List::node_to_value; + using List::insert_node; + }; + + auto pNode = list_accessor::alloc_node( std::forward( args )... ); + assert( pNode != nullptr ); + return static_cast( bucket( list_accessor::node_to_value( *pNode ))).insert_node( pNode ); + } + + template + typename std::enable_if< is_iterable_list::value, bool>::type + bucket_emplace( Args&&... args ) + { + class list_accessor: public List + { + public: + using List::alloc_data; + using List::insert_node; + }; + + auto pData = list_accessor::alloc_data( std::forward( args )... ); + assert( pData != nullptr ); + return static_cast( bucket( *pData )).insert_node( pData ); + } + //@endcond + }; + +}} // namespace cds::container + +#endif // ifndef CDSLIB_CONTAINER_MICHAEL_SET_H diff --git a/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/container/michael_set_nogc.h b/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/container/michael_set_nogc.h new file mode 100644 index 0000000..5874dd2 --- /dev/null +++ b/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/container/michael_set_nogc.h @@ -0,0 +1,457 @@ +/* + This file is a part of libcds - Concurrent Data Structures library + + (C) Copyright Maxim Khizhinsky (libcds.dev@gmail.com) 2006-2017 + + Source code repo: http://github.com/khizmax/libcds/ + Download: http://sourceforge.net/projects/libcds/files/ + + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. + + * Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ + +#ifndef CDSLIB_CONTAINER_MICHAEL_SET_NOGC_H +#define CDSLIB_CONTAINER_MICHAEL_SET_NOGC_H + +#include +#include + +namespace cds { namespace container { + + /// Michael's hash set (template specialization for gc::nogc) + /** @ingroup cds_nonintrusive_set + \anchor cds_nonintrusive_MichaelHashSet_nogc + + This specialization is so-called append-only when no item + reclamation may be performed. The class does not support deleting of list item. + + See \ref cds_nonintrusive_MichaelHashSet_hp "MichaelHashSet" for description of template parameters. + The template parameter \p OrderedList should be any \p gc::nogc -derived ordered list, for example, + \ref cds_nonintrusive_MichaelList_nogc "append-only MichaelList". + */ + template < + class OrderedList, +#ifdef CDS_DOXYGEN_INVOKED + class Traits = michael_set::traits +#else + class Traits +#endif + > + class MichaelHashSet< cds::gc::nogc, OrderedList, Traits > + { + public: + typedef cds::gc::nogc gc; ///< Garbage collector + typedef OrderedList ordered_list; ///< type of ordered list to be used as a bucket implementation + typedef Traits traits; ///< Set traits + + typedef typename ordered_list::value_type value_type; ///< type of value stored in the list + typedef typename ordered_list::key_comparator key_comparator; ///< key comparison functor +#ifdef CDS_DOXYGEN_INVOKED + typedef typename ordered_list::stat stat; ///< Internal statistics +#endif + + /// Hash functor for \ref value_type and all its derivatives that you use + typedef typename cds::opt::v::hash_selector< typename traits::hash >::type hash; + typedef typename traits::item_counter item_counter; ///< Item counter type + typedef typename traits::allocator allocator; ///< Bucket table allocator + + // GC and OrderedList::gc must be the same + static_assert(std::is_same::value, "GC and OrderedList::gc must be the same"); + + protected: + //@cond + typedef typename ordered_list::template select_stat_wrapper< typename ordered_list::stat > bucket_stat; + + typedef typename ordered_list::template rebind_traits< + cds::opt::item_counter< cds::atomicity::empty_item_counter > + , cds::opt::stat< typename bucket_stat::wrapped_stat > + >::type internal_bucket_type_; + + class internal_bucket_type: public internal_bucket_type_ + { + typedef internal_bucket_type_ base_class; + public: + using base_class::base_class; + using typename base_class::node_type; + using base_class::alloc_node; + using base_class::insert_node; + using base_class::node_to_value; + }; + + /// Bucket table allocator + typedef typename allocator::template rebind< internal_bucket_type >::other bucket_table_allocator; + + typedef typename internal_bucket_type::iterator bucket_iterator; + typedef typename internal_bucket_type::const_iterator bucket_const_iterator; + //@endcond + + public: + //@cond + typedef typename bucket_stat::stat stat; + //@endcond + + protected: + //@cond + const size_t m_nHashBitmask; + hash m_HashFunctor; ///< Hash functor + internal_bucket_type* m_Buckets; ///< bucket table + item_counter m_ItemCounter; ///< Item counter + stat m_Stat; ///< Internal statistics + //@endcond + + public: + ///@name Forward iterators + //@{ + /// Forward iterator + /** + The forward iterator for Michael's set is based on \p OrderedList forward iterator and has some features: + - it has no post-increment operator + - it iterates items in unordered fashion + + The iterator interface: + \code + class iterator { + public: + // Default constructor + iterator(); + + // Copy construtor + iterator( iterator const& src ); + + // Dereference operator + value_type * operator ->() const; + + // Dereference operator + value_type& operator *() const; + + // Preincrement operator + iterator& operator ++(); + + // Assignment operator + iterator& operator = (iterator const& src); + + // Equality operators + bool operator ==(iterator const& i ) const; + bool operator !=(iterator const& i ) const; + }; + \endcode + */ + typedef michael_set::details::iterator< internal_bucket_type, false > iterator; + + /// Const forward iterator + /** + For iterator's features and requirements see \ref iterator + */ + typedef michael_set::details::iterator< internal_bucket_type, true > const_iterator; + + /// Returns a forward iterator addressing the first element in a set + /** + For empty set \code begin() == end() \endcode + */ + iterator begin() + { + return iterator( m_Buckets[0].begin(), m_Buckets, m_Buckets + bucket_count()); + } + + /// Returns an iterator that addresses the location succeeding the last element in a set + /** + Do not use the value returned by end function to access any item. + The returned value can be used only to control reaching the end of the set. + For empty set \code begin() == end() \endcode + */ + iterator end() + { + return iterator( m_Buckets[bucket_count() - 1].end(), m_Buckets + bucket_count() - 1, m_Buckets + bucket_count()); + } + + /// Returns a forward const iterator addressing the first element in a set + const_iterator begin() const + { + return get_const_begin(); + } + + /// Returns a forward const iterator addressing the first element in a set + const_iterator cbegin() const + { + return get_const_begin(); + } + + /// Returns an const iterator that addresses the location succeeding the last element in a set + const_iterator end() const + { + return get_const_end(); + } + + /// Returns an const iterator that addresses the location succeeding the last element in a set + const_iterator cend() const + { + return get_const_end(); + } + //@} + + public: + /// Initialize hash set + /** + The Michael's hash set is non-expandable container. You should point the average count of items \p nMaxItemCount + when you create an object. + \p nLoadFactor parameter defines average count of items per bucket and it should be small number between 1 and 10. + Remember, since the bucket implementation is an ordered list, searching in the bucket is linear [O(nLoadFactor)]. + + The ctor defines hash table size as rounding nMaxItemCount / nLoadFactor up to nearest power of two. + */ + MichaelHashSet( + size_t nMaxItemCount, ///< estimation of max item count in the hash set + size_t nLoadFactor ///< load factor: estimation of max number of items in the bucket + ) : m_nHashBitmask( michael_set::details::init_hash_bitmask( nMaxItemCount, nLoadFactor )) + , m_Buckets( bucket_table_allocator().allocate( bucket_count())) + { + for ( auto it = m_Buckets, itEnd = m_Buckets + bucket_count(); it != itEnd; ++it ) + construct_bucket( it ); + } + + /// Clears hash set and destroys it + ~MichaelHashSet() + { + clear(); + for ( auto it = m_Buckets, itEnd = m_Buckets + bucket_count(); it != itEnd; ++it ) + it->~internal_bucket_type(); + bucket_table_allocator().deallocate( m_Buckets, bucket_count()); + } + + /// Inserts new node + /** + The function inserts \p val in the set if it does not contain + an item with key equal to \p val. + + Return an iterator pointing to inserted item if success, otherwise \ref end() + */ + template + iterator insert( const Q& val ) + { + internal_bucket_type& refBucket = bucket( val ); + bucket_iterator it = refBucket.insert( val ); + + if ( it != refBucket.end()) { + ++m_ItemCounter; + return iterator( it, &refBucket, m_Buckets + bucket_count()); + } + + return end(); + } + + /// Inserts data of type \ref value_type constructed with std::forward(args)... + /** + Return an iterator pointing to inserted item if success \ref end() otherwise + */ + template + iterator emplace( Args&&... args ) + { + typename internal_bucket_type::node_type * pNode = internal_bucket_type::alloc_node( std::forward( args )... ); + internal_bucket_type& refBucket = bucket( internal_bucket_type::node_to_value( *pNode )); + bucket_iterator it = refBucket.insert_node( pNode ); + if ( it != refBucket.end()) { + ++m_ItemCounter; + return iterator( it, &refBucket, m_Buckets + bucket_count()); + } + + return end(); + } + + /// Updates the element + /** + The operation performs inserting or changing data with lock-free manner. + + If the item \p val not found in the set, then \p val is inserted iff \p bAllowInsert is \p true. + + Returns std::pair where \p first is an iterator pointing to + item found or inserted, or \p end() if \p bAllowInsert is \p false, + + \p second is true if new item has been added or \p false if the item is already in the set. + + @warning For \ref cds_intrusive_MichaelList_hp "MichaelList" as the bucket see \ref cds_intrusive_item_creating "insert item troubleshooting". + \ref cds_intrusive_LazyList_hp "LazyList" provides exclusive access to inserted item and does not require any node-level + synchronization. + */ + template + std::pair update( Q const& val, bool bAllowInsert = true ) + { + internal_bucket_type& refBucket = bucket( val ); + std::pair ret = refBucket.update( val, bAllowInsert ); + + if ( ret.first != refBucket.end()) { + if ( ret.second ) + ++m_ItemCounter; + return std::make_pair( iterator( ret.first, &refBucket, m_Buckets + bucket_count()), ret.second ); + } + return std::make_pair( end(), ret.second ); + } + //@cond + template + CDS_DEPRECATED("ensure() is deprecated, use update()") + std::pair ensure( Q const& val ) + { + return update( val, true ); + } + //@endcond + + /// Checks whether the set contains \p key + /** + The function searches the item with key equal to \p key + and returns an iterator pointed to item found if the key is found, + or \ref end() otherwise. + + Note the hash functor specified for class \p Traits template parameter + should accept a parameter of type \p Q that can be not the same as \p value_type. + */ + template + iterator contains( Q const& key ) + { + internal_bucket_type& refBucket = bucket( key ); + bucket_iterator it = refBucket.contains( key ); + if ( it != refBucket.end()) + return iterator( it, &refBucket, m_Buckets + bucket_count()); + + return end(); + } + //@cond + template + CDS_DEPRECATED("use contains()") + iterator find( Q const& key ) + { + return contains( key ); + } + //@endcond + + /// Checks whether the set contains \p key using \p pred predicate for searching + /** + The function is an analog of contains( key ) but \p pred is used for key comparing. + \p Less functor has the interface like \p std::less. + \p Less must imply the same element order as the comparator used for building the set. + */ + template + iterator contains( Q const& key, Less pred ) + { + internal_bucket_type& refBucket = bucket( key ); + bucket_iterator it = refBucket.contains( key, pred ); + if ( it != refBucket.end()) + return iterator( it, &refBucket, m_Buckets + bucket_count()); + + return end(); + } + //@cond + template + CDS_DEPRECATED("use contains()") + iterator find_with( Q const& key, Less pred ) + { + return contains( key, pred ); + } + //@endcond + + /// Clears the set (not atomic) + void clear() + { + for ( size_t i = 0; i < bucket_count(); ++i ) + m_Buckets[i].clear(); + m_ItemCounter.reset(); + } + + /// Checks if the set is empty + /** + @warning If you use \p atomicity::empty_item_counter in \p traits::item_counter, + the function always returns \p true. + */ + bool empty() const + { + return size() == 0; + } + + /// Returns item count in the set + /** + @warning If you use \p atomicity::empty_item_counter in \p traits::item_counter, + the function always returns 0. + */ + size_t size() const + { + return m_ItemCounter; + } + + /// Returns const reference to internal statistics + stat const& statistics() const + { + return m_Stat; + } + + /// Returns the size of hash table + /** + Since \p %MichaelHashSet cannot dynamically extend the hash table size, + the value returned is an constant depending on object initialization parameters; + see MichaelHashSet::MichaelHashSet for explanation. + */ + size_t bucket_count() const + { + return m_nHashBitmask + 1; + } + + protected: + //@cond + /// Calculates hash value of \p key + template + size_t hash_value( const Q& key ) const + { + return m_HashFunctor( key ) & m_nHashBitmask; + } + + /// Returns the bucket (ordered list) for \p key + template + internal_bucket_type& bucket( const Q& key ) + { + return m_Buckets[hash_value( key )]; + } + //@endcond + + private: + //@cond + template + typename std::enable_if< Stat::empty >::type construct_bucket( internal_bucket_type* b ) + { + new (b) internal_bucket_type; + } + + template + typename std::enable_if< !Stat::empty >::type construct_bucket( internal_bucket_type* b ) + { + new (b) internal_bucket_type( m_Stat ); + } + + const_iterator get_const_begin() const + { + return const_iterator( const_cast(m_Buckets[0]).begin(), m_Buckets, m_Buckets + bucket_count()); + } + const_iterator get_const_end() const + { + return const_iterator( const_cast(m_Buckets[bucket_count() - 1]).end(), m_Buckets + bucket_count() - 1, m_Buckets + bucket_count()); + } + //@endcond + }; + +}} // cds::container + +#endif // ifndef CDSLIB_CONTAINER_MICHAEL_SET_NOGC_H diff --git a/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/container/michael_set_rcu.h b/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/container/michael_set_rcu.h new file mode 100644 index 0000000..abd82b2 --- /dev/null +++ b/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/container/michael_set_rcu.h @@ -0,0 +1,824 @@ +/* + This file is a part of libcds - Concurrent Data Structures library + + (C) Copyright Maxim Khizhinsky (libcds.dev@gmail.com) 2006-2017 + + Source code repo: http://github.com/khizmax/libcds/ + Download: http://sourceforge.net/projects/libcds/files/ + + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. + + * Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ + +#ifndef CDSLIB_CONTAINER_MICHAEL_SET_RCU_H +#define CDSLIB_CONTAINER_MICHAEL_SET_RCU_H + +#include +#include + +namespace cds { namespace container { + + /// Michael's hash set (template specialization for \ref cds_urcu_desc "RCU") + /** @ingroup cds_nonintrusive_set + \anchor cds_nonintrusive_MichaelHashSet_rcu + + Source: + - [2002] Maged Michael "High performance dynamic lock-free hash tables and list-based sets" + + Michael's hash table algorithm is based on lock-free ordered list and it is very simple. + The main structure is an array \p T of size \p M. Each element in \p T is basically a pointer + to a hash bucket, implemented as a singly linked list. The array of buckets cannot be dynamically expanded. + However, each bucket may contain unbounded number of items. + + Template parameters are: + - \p RCU - one of \ref cds_urcu_gc "RCU type" + - \p OrderedList - ordered list implementation used as the bucket for hash set, for example, + \ref cds_nonintrusive_MichaelList_rcu "MichaelList". + The ordered list implementation specifies the type \p T stored in the hash-set, + the comparison functor for the type \p T and other features specific for + the ordered list. + - \p Traits - set traits, default is michael_set::traits. + Instead of defining \p Traits struct you may use option-based syntax with michael_set::make_traits metafunction. + + About hash functor see \ref cds_nonintrusive_MichaelHashSet_hash_functor "MichaelSet hash functor". + + How to use + + Suppose, we have the following type \p Foo that we want to store in your \p %MichaelHashSet: + \code + struct Foo { + int nKey ; // key field + int nVal ; // value field + }; + \endcode + + To use \p %MichaelHashSet for \p Foo values, you should first choose suitable ordered list class + that will be used as a bucket for the set. We will cds::urcu::general_buffered<> RCU type and + MichaelList as a bucket type. + You should include RCU-related header file (cds/urcu/general_buffered.h in this example) + before including cds/container/michael_set_rcu.h. + Also, for ordered list we should develop a comparator for our \p Foo struct. + \code + #include + #include + #include + + namespace cc = cds::container; + + // Foo comparator + struct Foo_cmp { + int operator ()(Foo const& v1, Foo const& v2 ) const + { + if ( std::less( v1.nKey, v2.nKey )) + return -1; + return std::less(v2.nKey, v1.nKey) ? 1 : 0; + } + }; + + // Ordered list + typedef cc::MichaelList< cds::urcu::gc< cds::urcu::general_buffered<> >, Foo, + typename cc::michael_list::make_traits< + cc::opt::compare< Foo_cmp > // item comparator option + >::type + > bucket_list; + + // Hash functor for Foo + struct foo_hash { + size_t operator ()( int i ) const + { + return std::hash( i ); + } + size_t operator()( Foo const& i ) const + { + return std::hash( i.nKey ); + } + }; + + // Declare the set + // Note that \p RCU template parameter of ordered list must be equal \p RCU for the set. + typedef cc::MichaelHashSet< cds::urcu::gc< cds::urcu::general_buffered<> >, bucket_list, + cc::michael_set::make_traits< + cc::opt::hash< foo_hash > + >::type + > foo_set; + + foo_set fooSet; + \endcode + */ + template < + class RCU, + class OrderedList, +#ifdef CDS_DOXYGEN_INVOKED + class Traits = michael_set::traits +#else + class Traits +#endif + > + class MichaelHashSet< cds::urcu::gc< RCU >, OrderedList, Traits > + { + public: + typedef cds::urcu::gc< RCU > gc; ///< RCU used as garbage collector + typedef OrderedList ordered_list; ///< type of ordered list to be used as a bucket implementation + typedef Traits traits; ///< Set traits + + typedef typename ordered_list::value_type value_type; ///< type of value to be stored in the list + typedef typename ordered_list::key_comparator key_comparator; ///< key comparing functor +#ifdef CDS_DOXYGEN_INVOKED + typedef typename ordered_list::stat stat; ///< Internal statistics + typedef typename ordered_list::exempt_ptr exempt_ptr; ///< pointer to extracted node + typedef typename ordered_list::raw_ptr raw_ptr; ///< Return type of \p get() member function and its derivatives +#endif + + /// Hash functor for \ref value_type and all its derivatives that you use + typedef typename cds::opt::v::hash_selector< typename traits::hash >::type hash; + typedef typename traits::item_counter item_counter; ///< Item counter type + typedef typename traits::allocator allocator; ///< Bucket table allocator + + typedef typename ordered_list::rcu_lock rcu_lock; ///< RCU scoped lock + /// Group of \p extract_xxx functions require external locking if underlying ordered list requires that + static constexpr const bool c_bExtractLockExternal = ordered_list::c_bExtractLockExternal; + + // GC and OrderedList::gc must be the same + static_assert(std::is_same::value, "GC and OrderedList::gc must be the same"); + + //@cond + typedef typename ordered_list::template select_stat_wrapper< typename ordered_list::stat > bucket_stat; + + typedef typename ordered_list::template rebind_traits< + cds::opt::item_counter< cds::atomicity::empty_item_counter > + , cds::opt::stat< typename bucket_stat::wrapped_stat > + >::type internal_bucket_type_; + + class internal_bucket_type: public internal_bucket_type_ + { + typedef internal_bucket_type_ base_class; + public: + using base_class::base_class; + using typename base_class::node_type; + using base_class::alloc_node; + using base_class::insert_node; + using base_class::node_to_value; + }; + + typedef typename internal_bucket_type::exempt_ptr exempt_ptr; + typedef typename internal_bucket_type::raw_ptr raw_ptr; + typedef typename bucket_stat::stat stat; + //@endcond + + protected: + //@cond + /// Bucket table allocator + typedef typename allocator::template rebind< internal_bucket_type >::other bucket_table_allocator; + + const size_t m_nHashBitmask; + hash m_HashFunctor; ///< Hash functor + internal_bucket_type* m_Buckets; ///< bucket table + item_counter m_ItemCounter; ///< Item counter + stat m_Stat; ///< Internal statistics + //@endcond + + public: + ///@name Forward iterators (thread-safe under RCU lock) + //@{ + /// Forward iterator + /** + The forward iterator for Michael's set is based on \p OrderedList forward iterator and has some features: + - it has no post-increment operator + - it iterates items in unordered fashion + + You may safely use iterators in multi-threaded environment only under RCU lock. + Otherwise, a crash is possible if another thread deletes the element the iterator points to. + + The iterator interface: + \code + class iterator { + public: + // Default constructor + iterator(); + + // Copy construtor + iterator( iterator const& src ); + + // Dereference operator + value_type * operator ->() const; + + // Dereference operator + value_type& operator *() const; + + // Preincrement operator + iterator& operator ++(); + + // Assignment operator + iterator& operator = (iterator const& src); + + // Equality operators + bool operator ==(iterator const& i ) const; + bool operator !=(iterator const& i ) const; + }; + \endcode + */ + typedef michael_set::details::iterator< internal_bucket_type, false > iterator; + + /// Const forward iterator + typedef michael_set::details::iterator< internal_bucket_type, true > const_iterator; + + /// Returns a forward iterator addressing the first element in a set + /** + For empty set \code begin() == end() \endcode + */ + iterator begin() + { + return iterator( m_Buckets[0].begin(), m_Buckets, m_Buckets + bucket_count()); + } + + /// Returns an iterator that addresses the location succeeding the last element in a set + /** + Do not use the value returned by end function to access any item. + The returned value can be used only to control reaching the end of the set. + For empty set \code begin() == end() \endcode + */ + iterator end() + { + return iterator( m_Buckets[bucket_count() - 1].end(), m_Buckets + bucket_count() - 1, m_Buckets + bucket_count()); + } + + /// Returns a forward const iterator addressing the first element in a set + const_iterator begin() const + { + return get_const_begin(); + } + + /// Returns a forward const iterator addressing the first element in a set + const_iterator cbegin() const + { + return get_const_begin(); + } + + /// Returns an const iterator that addresses the location succeeding the last element in a set + const_iterator end() const + { + return get_const_end(); + } + + /// Returns an const iterator that addresses the location succeeding the last element in a set + const_iterator cend() const + { + return get_const_end(); + } + //@} + + public: + /// Initialize hash set + /** + The Michael's hash set is non-expandable container. You should point the average count of items \p nMaxItemCount + when you create an object. + \p nLoadFactor parameter defines average count of items per bucket and it should be small number between 1 and 10. + Remember, since the bucket implementation is an ordered list, searching in the bucket is linear [O(nLoadFactor)]. + + The ctor defines hash table size as rounding nMaxItemCount / nLoadFactor up to nearest power of two. + */ + MichaelHashSet( + size_t nMaxItemCount, ///< estimation of max item count in the hash set + size_t nLoadFactor ///< load factor: estimation of max number of items in the bucket + ) : m_nHashBitmask( michael_set::details::init_hash_bitmask( nMaxItemCount, nLoadFactor )) + , m_Buckets( bucket_table_allocator().allocate( bucket_count())) + { + for ( auto it = m_Buckets, itEnd = m_Buckets + bucket_count(); it != itEnd; ++it ) + construct_bucket( it ); + } + + /// Clears hash set and destroys it + ~MichaelHashSet() + { + clear(); + + for ( auto it = m_Buckets, itEnd = m_Buckets + bucket_count(); it != itEnd; ++it ) + it->~internal_bucket_type(); + bucket_table_allocator().deallocate( m_Buckets, bucket_count()); + + } + + /// Inserts new node + /** + The function creates a node with copy of \p val value + and then inserts the node created into the set. + + The type \p Q should contain as minimum the complete key for the node. + The object of \ref value_type should be constructible from a value of type \p Q. + In trivial case, \p Q is equal to \ref value_type. + + The function applies RCU lock internally. + + Returns \p true if \p val is inserted into the set, \p false otherwise. + */ + template + bool insert( Q&& val ) + { + const bool bRet = bucket( val ).insert( std::forward( val )); + if ( bRet ) + ++m_ItemCounter; + return bRet; + } + + /// Inserts new node + /** + The function allows to split creating of new item into two part: + - create item with key only + - insert new item into the set + - if inserting is success, calls \p f functor to initialize value-fields of \p val. + + The functor signature is: + \code + void func( value_type& val ); + \endcode + where \p val is the item inserted. + The user-defined functor is called only if the inserting is success. + + The function applies RCU lock internally. + + @warning For \ref cds_nonintrusive_MichaelList_rcu "MichaelList" as the bucket see \ref cds_intrusive_item_creating "insert item troubleshooting". + \ref cds_nonintrusive_LazyList_rcu "LazyList" provides exclusive access to inserted item and does not require any node-level + synchronization. + */ + template + bool insert( Q&& val, Func f ) + { + const bool bRet = bucket( val ).insert( std::forward( val ), f ); + if ( bRet ) + ++m_ItemCounter; + return bRet; + } + + /// Updates the element + /** + The operation performs inserting or changing data with lock-free manner. + + If the item \p val not found in the set, then \p val is inserted iff \p bAllowInsert is \p true. + Otherwise, the functor \p func is called with item found. + The functor signature is: + \code + struct functor { + void operator()( bool bNew, value_type& item, Q const& val ); + }; + \endcode + with arguments: + - \p bNew - \p true if the item has been inserted, \p false otherwise + - \p item - item of the set + - \p val - argument \p val passed into the \p %update() function + + The functor may change non-key fields of the \p item. + + The function applies RCU lock internally. + + Returns std::pair where \p first is \p true if operation is successful, + \p second is \p true if new item has been added or \p false if the item with \p key + already is in the set. + + @warning For \ref cds_intrusive_MichaelList_hp "MichaelList" as the bucket see \ref cds_intrusive_item_creating "insert item troubleshooting". + \ref cds_intrusive_LazyList_hp "LazyList" provides exclusive access to inserted item and does not require any node-level + synchronization. + */ + template + std::pair update( Q const& val, Func func, bool bAllowInsert = true ) + { + std::pair bRet = bucket( val ).update( val, func, bAllowInsert ); + if ( bRet.second ) + ++m_ItemCounter; + return bRet; + }//@cond + template + CDS_DEPRECATED("ensure() is deprecated, use update()") + std::pair ensure( const Q& val, Func func ) + { + return update( val, func, true ); + } + //@endcond + + /// Inserts data of type \p value_type created from \p args + /** + Returns \p true if inserting successful, \p false otherwise. + + The function applies RCU lock internally. + */ + template + bool emplace( Args&&... args ) + { + typename internal_bucket_type::node_type * pNode = internal_bucket_type::alloc_node( std::forward( args )... ); + bool bRet = bucket( internal_bucket_type::node_to_value( *pNode )).insert_node( pNode ); + if ( bRet ) + ++m_ItemCounter; + return bRet; + } + + /// Deletes \p key from the set + /** \anchor cds_nonintrusive_MichealSet_rcu_erase_val + + Since the key of MichaelHashSet's item type \p value_type is not explicitly specified, + template parameter \p Q defines the key type searching in the list. + The set item comparator should be able to compare the type \p value_type + and the type \p Q. + + RCU \p synchronize method can be called. RCU should not be locked. + + Return \p true if key is found and deleted, \p false otherwise + */ + template + bool erase( Q const& key ) + { + const bool bRet = bucket( key ).erase( key ); + if ( bRet ) + --m_ItemCounter; + return bRet; + } + + /// Deletes the item from the set using \p pred predicate for searching + /** + The function is an analog of \ref cds_nonintrusive_MichealSet_rcu_erase_val "erase(Q const&)" + but \p pred is used for key comparing. + \p Less functor has the interface like \p std::less. + \p Less must imply the same element order as the comparator used for building the set. + */ + template + bool erase_with( Q const& key, Less pred ) + { + const bool bRet = bucket( key ).erase_with( key, pred ); + if ( bRet ) + --m_ItemCounter; + return bRet; + } + + /// Deletes \p key from the set + /** \anchor cds_nonintrusive_MichealSet_rcu_erase_func + + The function searches an item with key \p key, calls \p f functor + and deletes the item. If \p key is not found, the functor is not called. + + The functor \p Func interface: + \code + struct extractor { + void operator()(value_type const& val); + }; + \endcode + + Since the key of %MichaelHashSet's \p value_type is not explicitly specified, + template parameter \p Q defines the key type searching in the list. + The list item comparator should be able to compare the type \p T of list item + and the type \p Q. + + RCU \p synchronize method can be called. RCU should not be locked. + + Return \p true if key is found and deleted, \p false otherwise + */ + template + bool erase( Q const& key, Func f ) + { + const bool bRet = bucket( key ).erase( key, f ); + if ( bRet ) + --m_ItemCounter; + return bRet; + } + + /// Deletes the item from the set using \p pred predicate for searching + /** + The function is an analog of \ref cds_nonintrusive_MichealSet_rcu_erase_func "erase(Q const&, Func)" + but \p pred is used for key comparing. + \p Less functor has the interface like \p std::less. + \p Less must imply the same element order as the comparator used for building the set. + */ + template + bool erase_with( Q const& key, Less pred, Func f ) + { + const bool bRet = bucket( key ).erase_with( key, pred, f ); + if ( bRet ) + --m_ItemCounter; + return bRet; + } + + /// Extracts an item from the set + /** \anchor cds_nonintrusive_MichaelHashSet_rcu_extract + The function searches an item with key equal to \p key in the set, + unlinks it from the set, and returns \ref cds::urcu::exempt_ptr "exempt_ptr" pointer to the item found. + If the item with the key equal to \p key is not found the function return an empty \p exempt_ptr. + + The function just excludes the item from the set and returns a pointer to item found. + Depends on \p ordered_list you should or should not lock RCU before calling of this function: + - for the set based on \ref cds_nonintrusive_MichaelList_rcu "MichaelList" RCU should not be locked + - for the set based on \ref cds_nonintrusive_LazyList_rcu "LazyList" RCU should be locked + See ordered list implementation for details. + + \code + #include + #include + #include + + typedef cds::urcu::gc< general_buffered<> > rcu; + typedef cds::container::MichaelList< rcu, Foo > rcu_michael_list; + typedef cds::container::MichaelHashSet< rcu, rcu_michael_list, foo_traits > rcu_michael_set; + + rcu_michael_set theSet; + // ... + + typename rcu_michael_set::exempt_ptr p; + + // For MichaelList we should not lock RCU + + // Note that you must not delete the item found inside the RCU lock + p = theSet.extract( 10 ); + if ( p ) { + // do something with p + ... + } + + // We may safely release p here + // release() passes the pointer to RCU reclamation cycle + p.release(); + \endcode + */ + template + exempt_ptr extract( Q const& key ) + { + exempt_ptr p = bucket( key ).extract( key ); + if ( p ) + --m_ItemCounter; + return p; + } + + /// Extracts an item from the set using \p pred predicate for searching + /** + The function is an analog of \p extract(Q const&) but \p pred is used for key comparing. + \p Less functor has the interface like \p std::less. + \p pred must imply the same element order as the comparator used for building the set. + */ + template + exempt_ptr extract_with( Q const& key, Less pred ) + { + exempt_ptr p = bucket( key ).extract_with( key, pred ); + if ( p ) + --m_ItemCounter; + return p; + } + + /// Finds the key \p key + /** \anchor cds_nonintrusive_MichealSet_rcu_find_func + + The function searches the item with key equal to \p key and calls the functor \p f for item found. + The interface of \p Func functor is: + \code + struct functor { + void operator()( value_type& item, Q& key ); + }; + \endcode + where \p item is the item found, \p key is the find function argument. + + The functor may change non-key fields of \p item. Note that the functor is only guarantee + that \p item cannot be disposed during functor is executing. + The functor does not serialize simultaneous access to the set's \p item. If such access is + possible you must provide your own synchronization schema on item level to exclude unsafe item modifications. + + The \p key argument is non-const since it can be used as \p f functor destination i.e., the functor + can modify both arguments. + + Note the hash functor specified for class \p Traits template parameter + should accept a parameter of type \p Q that may be not the same as \p value_type. + + The function applies RCU lock internally. + + The function returns \p true if \p key is found, \p false otherwise. + */ + template + bool find( Q& key, Func f ) + { + return bucket( key ).find( key, f ); + } + //@cond + template + bool find( Q const& key, Func f ) + { + return bucket( key ).find( key, f ); + } + //@endcond + + /// Finds the key \p key using \p pred predicate for searching + /** + The function is an analog of \ref cds_nonintrusive_MichealSet_rcu_find_func "find(Q&, Func)" + but \p pred is used for key comparing. + \p Less functor has the interface like \p std::less. + \p Less must imply the same element order as the comparator used for building the set. + */ + template + bool find_with( Q& key, Less pred, Func f ) + { + return bucket( key ).find_with( key, pred, f ); + } + //@cond + template + bool find_with( Q const& key, Less pred, Func f ) + { + return bucket( key ).find_with( key, pred, f ); + } + //@endcond + + /// Checks whether the set contains \p key + /** + + The function searches the item with key equal to \p key + and returns \p true if the key is found, and \p false otherwise. + + Note the hash functor specified for class \p Traits template parameter + should accept a parameter of type \p Q that can be not the same as \p value_type. + */ + template + bool contains( Q const& key ) + { + return bucket( key ).contains( key ); + } + //@cond + template + CDS_DEPRECATED("use contains()") + bool find( Q const& key ) + { + return contains( key ); + } + //@endcond + + /// Checks whether the set contains \p key using \p pred predicate for searching + /** + The function is an analog of contains( key ) but \p pred is used for key comparing. + \p Less functor has the interface like \p std::less. + \p Less must imply the same element order as the comparator used for building the set. + */ + template + bool contains( Q const& key, Less pred ) + { + return bucket( key ).contains( key, pred ); + } + //@cond + template + CDS_DEPRECATED("use contains()") + bool find_with( Q const& key, Less pred ) + { + return contains( key, pred ); + } + //@endcond + + /// Finds the key \p key and return the item found + /** \anchor cds_nonintrusive_MichaelHashSet_rcu_get + The function searches the item with key equal to \p key and returns the pointer to item found. + If \p key is not found it returns \p nullptr. + Note the type of returned value depends on underlying \p ordered_list. + For details, see documentation of ordered list you use. + + Note the compare functor should accept a parameter of type \p Q that can be not the same as \p value_type. + + RCU should be locked before call of this function. + Returned item is valid only while RCU is locked: + \code + typedef cds::container::MichaelHashSet< your_template_parameters > hash_set; + hash_set theSet; + typename hash_set::raw_ptr gp; + // ... + { + // Lock RCU + hash_set::rcu_lock lock; + + gp = theSet.get( 5 ); + if ( gp ) { + // Deal with pVal + //... + } + // Unlock RCU by rcu_lock destructor + // gp can be reclaimed at any time after RCU has been unlocked + } + \endcode + */ + template + raw_ptr get( Q const& key ) + { + return bucket( key ).get( key ); + } + + /// Finds the key \p key and return the item found + /** + The function is an analog of \ref cds_nonintrusive_MichaelHashSet_rcu_get "get(Q const&)" + but \p pred is used for comparing the keys. + + \p Less functor has the semantics like \p std::less but should take arguments of type \ref value_type and \p Q + in any order. + \p pred must imply the same element order as the comparator used for building the set. + */ + template + raw_ptr get_with( Q const& key, Less pred ) + { + return bucket( key ).get_with( key, pred ); + } + + /// Clears the set (not atomic) + void clear() + { + for ( size_t i = 0; i < bucket_count(); ++i ) + m_Buckets[i].clear(); + m_ItemCounter.reset(); + } + + /// Checks if the set is empty + /** + @warning If you use \p atomicity::empty_item_counter in \p traits::item_counter, + the function always returns \p true. + */ + bool empty() const + { + return size() == 0; + } + + /// Returns item count in the set + /** + @warning If you use \p atomicity::empty_item_counter in \p traits::item_counter, + the function always returns 0. + */ + size_t size() const + { + return m_ItemCounter; + } + + /// Returns const reference to internal statistics + stat const& statistics() const + { + return m_Stat; + } + + /// Returns the size of hash table + /** + Since \p %MichaelHashSet cannot dynamically extend the hash table size, + the value returned is an constant depending on object initialization parameters; + see MichaelHashSet::MichaelHashSet for explanation. + */ + size_t bucket_count() const + { + return m_nHashBitmask + 1; + } + + protected: + //@cond + /// Calculates hash value of \p key + template + size_t hash_value( Q const& key ) const + { + return m_HashFunctor( key ) & m_nHashBitmask; + } + + /// Returns the bucket (ordered list) for \p key + template + internal_bucket_type& bucket( Q const& key ) + { + return m_Buckets[hash_value( key )]; + } + template + internal_bucket_type const& bucket( Q const& key ) const + { + return m_Buckets[hash_value( key )]; + } + + template + typename std::enable_if< Stat::empty >::type construct_bucket( internal_bucket_type* bkt ) + { + new (bkt) internal_bucket_type; + } + + template + typename std::enable_if< !Stat::empty >::type construct_bucket( internal_bucket_type* bkt ) + { + new (bkt) internal_bucket_type( m_Stat ); + } + + const_iterator get_const_begin() const + { + return const_iterator( const_cast(m_Buckets[0]).begin(), m_Buckets, m_Buckets + bucket_count()); + } + const_iterator get_const_end() const + { + return const_iterator( const_cast(m_Buckets[bucket_count() - 1]).end(), m_Buckets + bucket_count() - 1, m_Buckets + bucket_count()); + } + //@endcond + }; + +}} // namespace cds::container + +#endif // ifndef CDSLIB_CONTAINER_MICHAEL_SET_RCU_H diff --git a/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/container/moir_queue.h b/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/container/moir_queue.h new file mode 100644 index 0000000..dc560bb --- /dev/null +++ b/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/container/moir_queue.h @@ -0,0 +1,323 @@ +/* + This file is a part of libcds - Concurrent Data Structures library + + (C) Copyright Maxim Khizhinsky (libcds.dev@gmail.com) 2006-2017 + + Source code repo: http://github.com/khizmax/libcds/ + Download: http://sourceforge.net/projects/libcds/files/ + + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. + + * Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ + +#ifndef CDSLIB_CONTAINER_MOIR_QUEUE_H +#define CDSLIB_CONTAINER_MOIR_QUEUE_H + +#include +#include +#include + +namespace cds { namespace container { + + //@cond + namespace details { + template + struct make_moir_queue: public cds::container::details::make_msqueue< GC, T, Traits > + { + typedef cds::container::details::make_msqueue< GC, T, Traits > base_class; + typedef cds::intrusive::MoirQueue< GC, typename base_class::node_type, typename base_class::intrusive_traits > type; + }; + } + //@endcond + + /// A variation of Michael & Scott's lock-free queue + /** @ingroup cds_nonintrusive_queue + It is non-intrusive version of \p cds::intrusive::MoirQueue. + + Template arguments: + - \p GC - garbage collector type: \p gc::HP, \p gc::DHP + - \p T - a type stored in the queue. + - \p Traits - queue traits, default is \p msqueue::traits. You can use \p msqueue::make_traits + metafunction to make your traits or just derive your traits from \p %msqueue::traits: + \code + struct myTraits: public cds::container::msqueue::traits { + typedef cds::intrusive::msqueue::stat<> stat; + typedef cds::atomicity::item_counter item_counter; + }; + typedef cds::container::MoirQueue< cds::gc::HP, Foo, myTraits > myQueue; + + // Equivalent make_traits example: + typedef cds::container::MoirQueue< cds::gc::HP, Foo, + typename cds::container::msqueue::make_traits< + cds::opt::stat< cds::container::msqueue::stat<> >, + cds::opt::item_counter< cds::atomicity::item_counter > + >::type + > myQueue; + \endcode + */ + template + class MoirQueue: +#ifdef CDS_DOXYGEN_INVOKED + private intrusive::MoirQueue< GC, intrusive::msqueue::node< T >, Traits > +#else + private details::make_moir_queue< GC, T, Traits >::type +#endif + { + //@cond + typedef details::make_moir_queue< GC, T, Traits > maker; + typedef typename maker::type base_class; + //@endcond + + public: + /// Rebind template arguments + template + struct rebind { + typedef MoirQueue< GC2, T2, Traits2 > other ; ///< Rebinding result + }; + + static constexpr const size_t c_nHazardPtrCount = base_class::c_nHazardPtrCount; ///< Count of hazard pointer required for the algorithm + + public: + typedef T value_type ; ///< Value type stored in the queue + typedef typename base_class::gc gc; ///< Garbage collector + typedef typename base_class::back_off back_off; ///< Back-off strategy + typedef typename maker::allocator_type allocator_type; ///< Allocator type used for allocate/deallocate the nodes + typedef typename base_class::item_counter item_counter; ///< Item counting policy used + typedef typename base_class::stat stat; ///< Internal statistics policy used + typedef typename base_class::memory_model memory_model; ///< Memory ordering. See cds::opt::memory_model option + + protected: + //@cond + typedef typename maker::node_type node_type; ///< queue node type (derived from intrusive::msqueue::node) + + typedef typename maker::cxx_allocator cxx_allocator; + typedef typename maker::node_deallocator node_deallocator; // deallocate node + typedef typename base_class::node_traits node_traits; + //@endcond + + protected: + ///@cond + static node_type * alloc_node() + { + return cxx_allocator().New(); + } + static node_type * alloc_node( const value_type& val ) + { + return cxx_allocator().New( val ); + } + template + static node_type * alloc_node_move( Args&&... args ) + { + return cxx_allocator().MoveNew( std::forward( args )... ); + } + static void free_node( node_type * p ) + { + node_deallocator()( p ); + } + + struct node_disposer { + void operator()( node_type * pNode ) + { + free_node( pNode ); + } + }; + typedef std::unique_ptr< node_type, node_disposer > scoped_node_ptr; + //@endcond + + public: + /// Initializes empty queue + MoirQueue() + {} + + /// Destructor clears the queue + ~MoirQueue() + {} + + /// Enqueues \p val value into the queue. + /** + The function makes queue node in dynamic memory calling copy constructor for \p val + and then it calls \p intrusive::MoirQueue::enqueue. + Returns \p true if success, \p false otherwise. + */ + bool enqueue( value_type const& val ) + { + scoped_node_ptr p( alloc_node(val)); + if ( base_class::enqueue( *p )) { + p.release(); + return true; + } + return false; + } + + /// Enqueues \p val value into the queue, move semantics + bool enqueue( value_type&& val ) + { + scoped_node_ptr p( alloc_node_move( std::move( val ))); + if ( base_class::enqueue( *p )) { + p.release(); + return true; + } + return false; + } + + /// Enqueues \p data to queue using a functor + /** + \p Func is a functor calling to create a new node. + The functor should initialize creating node + and it takes one argument - a reference to a new node of type \ref value_type : + \code + cds:container::MoirQueue< cds::gc::HP, Foo > myQueue; + Bar bar; + myQueue.enqueue_with( [&bar]( Foo& dest ) { dest = bar; } ); + \endcode + */ + template + bool enqueue_with( Func f ) + { + scoped_node_ptr p( alloc_node()); + f( p->m_value ); + if ( base_class::enqueue( *p )) { + p.release(); + return true; + } + return false; + } + + /// Enqueues data of type \ref value_type constructed with std::forward(args)... + template + bool emplace( Args&&... args ) + { + scoped_node_ptr p( alloc_node_move( std::forward( args )... )); + if ( base_class::enqueue( *p )) { + p.release(); + return true; + } + return false; + } + + /// Synonym for \p enqueue() function + bool push( value_type const& val ) + { + return enqueue( val ); + } + + /// Synonym for \p enqueue() function, move semantics + bool push( value_type&& val ) + { + return enqueue( std::move( val )); + } + + /// Synonym for \p enqueue_with() function + template + bool push_with( Func f ) + { + return enqueue_with( f ); + } + + /// Dequeues a value from the queue + /** + If queue is not empty, the function returns \p true, \p dest contains copy of + dequeued value. The assignment operator for type \ref value_type is invoked. + If queue is empty, the function returns \p false, \p dest is unchanged. + */ + bool dequeue( value_type& dest ) + { + return dequeue_with( [&dest]( value_type& src ) { + // TSan finds a race between this read of \p src and node_type constructor + // I think, it is wrong + CDS_TSAN_ANNOTATE_IGNORE_READS_BEGIN; + dest = std::move( src ); + CDS_TSAN_ANNOTATE_IGNORE_READS_END; + }); + } + + /// Dequeues a value using a functor + /** + \p Func is a functor called to copy dequeued value. + The functor takes one argument - a reference to removed node: + \code + cds:container::MoirQueue< cds::gc::HP, Foo > myQueue; + Bar bar; + myQueue.dequeue_with( [&bar]( Foo& src ) { bar = std::move( src );}); + \endcode + The functor is called only if the queue is not empty. + */ + template + bool dequeue_with( Func f ) + { + typename base_class::dequeue_result res; + if ( base_class::do_dequeue( res )) { + f( node_traits::to_value_ptr( *res.pNext )->m_value ); + base_class::dispose_result( res ); + return true; + } + return false; + } + + /// Synonym for \p dequeue() function + bool pop( value_type& dest ) + { + return dequeue( dest ); + } + + /// Synonym for \p dequeue_with() function + template + bool pop_with( Func f ) + { + return dequeue_with( f ); + } + + /// Clear the queue + /** + The function repeatedly calls \ref dequeue until it returns \p nullptr. + The disposer defined in template \p Traits is called for each item + that can be safely disposed. + */ + void clear() + { + base_class::clear(); + } + + /// Checks if the queue is empty + bool empty() const + { + return base_class::empty(); + } + + /// Returns queue's item count (see \ref intrusive::MSQueue::size for explanation) + size_t size() const + { + return base_class::size(); + } + + /// Returns refernce to internal statistics + const stat& statistics() const + { + return base_class::statistics(); + } + + }; + +}} // namespace cds::container + +#endif // #ifndef CDSLIB_CONTAINER_MOIR_QUEUE_H + + diff --git a/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/container/mspriority_queue.h b/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/container/mspriority_queue.h new file mode 100644 index 0000000..62c12cf --- /dev/null +++ b/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/container/mspriority_queue.h @@ -0,0 +1,344 @@ +/* + This file is a part of libcds - Concurrent Data Structures library + + (C) Copyright Maxim Khizhinsky (libcds.dev@gmail.com) 2006-2017 + + Source code repo: http://github.com/khizmax/libcds/ + Download: http://sourceforge.net/projects/libcds/files/ + + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. + + * Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ + +#ifndef CDSLIB_CONTAINER_MSPRIORITY_QUEUE_H +#define CDSLIB_CONTAINER_MSPRIORITY_QUEUE_H + +#include +#include +#include + +namespace cds { namespace container { + + /// MSPriorityQueue related definitions + /** @ingroup cds_nonintrusive_helper + */ + namespace mspriority_queue { + +#ifdef CDS_DOXYGEN_INVOKED + /// Synonym for \p cds::intrusive::mspriority_queue::stat + typedef cds::intrusive::mspriority_queue::stat<> stat; + + /// Synonym for \p cds::intrusive::mspriority_queue::empty_stat + typedef cds::intrusive::mspriority_queue::empty_stat empty_stat; +#else + using cds::intrusive::mspriority_queue::stat; + using cds::intrusive::mspriority_queue::empty_stat; +#endif + + /// MSPriorityQueue traits + /** + The traits for \p %cds::container::MSPriorityQueue is the same as for + \p cds::intrusive::MSPriorityQueue (see \p cds::intrusive::mspriority_queue::traits) + plus some additional properties. + */ + struct traits: public cds::intrusive::mspriority_queue::traits + { + /// The allocator use to allocate memory for values + typedef CDS_DEFAULT_ALLOCATOR allocator; + + /// Move policy + /** + The move policy used in \p MSPriorityQueue::pop() function to move item's value. + Default is \p opt::v::assignment_move_policy. + */ + typedef cds::opt::v::assignment_move_policy move_policy; + }; + + /// Metafunction converting option list to traits + /** + \p Options are: + - \p opt::buffer - the buffer type for heap array. Possible type are: \p opt::v::initiaized_static_buffer, \p opt::v::initialized_dynamic_buffer. + Default is \p %opt::v::initialized_dynamic_buffer. + You may specify any type of values for the buffer since at instantiation time + the \p buffer::rebind member metafunction is called to change the type of values stored in the buffer. + - \p opt::compare - priority compare functor. No default functor is provided. + If the option is not specified, the \p opt::less is used. + - \p opt::less - specifies binary predicate used for priority compare. Default is \p std::less. + - \p opt::lock_type - lock type. Default is \p cds::sync::spin. + - \p opt::back_off - back-off strategy. Default is \p cds::backoff::yield + - \p opt::allocator - allocator (like \p std::allocator) for the values of queue's items. + Default is \ref CDS_DEFAULT_ALLOCATOR + - \p opt::move_policy - policy for moving item's value. Default is \p opt::v::assignment_move_policy. + If the compiler supports move semantics it would be better to specify the move policy + based on the move semantics for type \p T. + - \p opt::stat - internal statistics. Available types: \p mspriority_queue::stat, \p mspriority_queue::empty_stat (the default, no overhead) + */ + template + struct make_traits { +# ifdef CDS_DOXYGEN_INVOKED + typedef implementation_defined type ; ///< Metafunction result +# else + typedef typename cds::opt::make_options< + typename cds::opt::find_type_traits< traits, Options... >::type + ,Options... + >::type type; +# endif + }; + } // namespace mspriority_queue + + /// Michael & Scott array-based lock-based concurrent priority queue heap + /** @ingroup cds_nonintrusive_priority_queue + Source: + - [1996] G.Hunt, M.Michael, S. Parthasarathy, M.Scott + "An efficient algorithm for concurrent priority queue heaps" + + \p %MSPriorityQueue augments the standard array-based heap data structure with + a mutual-exclusion lock on the heap's size and locks on each node in the heap. + Each node also has a tag that indicates whether + it is empty, valid, or in a transient state due to an update to the heap + by an inserting thread. + The algorithm allows concurrent insertions and deletions in opposite directions, + without risking deadlock and without the need for special server threads. + It also uses a "bit-reversal" technique to scatter accesses across the fringe + of the tree to reduce contention. + On large heaps the algorithm achieves significant performance improvements + over serialized single-lock algorithm, for various insertion/deletion + workloads. For small heaps it still performs well, but not as well as + single-lock algorithm. + + Template parameters: + - \p T - type to be stored in the list. The priority is a part of \p T type. + - \p Traits - the traits. See \p mspriority_queue::traits for explanation. + It is possible to declare option-based queue with \p mspriority_queue::make_traits + metafunction instead of \p Traits template argument. + */ + template + class MSPriorityQueue: protected cds::intrusive::MSPriorityQueue< T, Traits > + { + //@cond + typedef cds::intrusive::MSPriorityQueue< T, Traits > base_class; + //@endcond + public: + typedef T value_type ; ///< Value type stored in the queue + typedef Traits traits ; ///< Traits template parameter + + typedef typename base_class::key_comparator key_comparator; ///< priority comparing functor based on opt::compare and opt::less option setter. + typedef typename base_class::lock_type lock_type; ///< heap's size lock type + typedef typename base_class::back_off back_off ; ///< Back-off strategy + typedef typename traits::stat stat; ///< internal statistics type, see \p intrusive::mspriority_queue::traits::stat + typedef typename base_class::item_counter item_counter;///< Item counter type + typedef typename traits::allocator::template rebind::other allocator_type; ///< Value allocator + typedef typename traits::move_policy move_policy; ///< Move policy for type \p T + + protected: + //@cond + typedef cds::details::Allocator< value_type, allocator_type > cxx_allocator; + + struct value_deleter { + void operator()( value_type * p ) const + { + cxx_allocator().Delete( p ); + } + }; + typedef std::unique_ptr scoped_ptr; + //@endcond + + public: + /// Constructs empty priority queue + /** + For \p cds::opt::v::initialized_static_buffer the \p nCapacity parameter is ignored. + */ + MSPriorityQueue( size_t nCapacity ) + : base_class( nCapacity ) + {} + + /// Clears priority queue and destructs the object + ~MSPriorityQueue() + { + clear(); + } + + /// Inserts an item into priority queue + /** + If the priority queue is full, the function returns \p false, + no item has been added. + Otherwise, the function inserts the copy of \p val into the heap + and returns \p true. + + The function use copy constructor to create new heap item from \p val. + */ + bool push( value_type const& val ) + { + scoped_ptr pVal( cxx_allocator().New( val )); + if ( base_class::push( *(pVal.get()))) { + pVal.release(); + return true; + } + return false; + } + + /// Inserts an item into the queue using a functor + /** + \p Func is a functor called to create node. + The functor \p f takes one argument - a reference to a new node of type \ref value_type : + \code + cds::container::MSPriorityQueue< Foo > myQueue; + Bar bar; + myQueue.push_with( [&bar]( Foo& dest ) { dest = bar; } ); + \endcode + */ + template + bool push_with( Func f ) + { + scoped_ptr pVal( cxx_allocator().New()); + f( *pVal ); + if ( base_class::push( *pVal )) { + pVal.release(); + return true; + } + return false; + } + + /// Inserts a item into priority queue + /** + If the priority queue is full, the function returns \p false, + no item has been added. + Otherwise, the function inserts a new item created from \p args arguments + into the heap and returns \p true. + */ + template + bool emplace( Args&&... args ) + { + scoped_ptr pVal( cxx_allocator().MoveNew( std::forward(args)... )); + if ( base_class::push( *(pVal.get()))) { + pVal.release(); + return true; + } + return false; + } + + /// Extracts item with high priority + /** + If the priority queue is empty, the function returns \p false. + Otherwise, it returns \p true and \p dest contains the copy of extracted item. + The item is deleted from the heap. + + The function uses \ref move_policy to move extracted value from the heap's top + to \p dest. + + The function is equivalent of such call: + \code + pop_with( dest, [&dest]( value_type& src ) { move_policy()(dest, src); } ); + \endcode + */ + bool pop( value_type& dest ) + { + return pop_with( [&dest]( value_type& src ) { move_policy()(dest, std::move(src)); }); + } + + /// Extracts an item with high priority + /** + If the priority queue is empty, the function returns \p false. + Otherwise, it returns \p true and \p dest contains the copy of extracted item. + The item is deleted from the heap. + + \p Func is a functor called to copy popped value. + The functor takes one argument - a reference to removed node: + \code + cds:container::MSPriorityQueue< Foo > myQueue; + Bar bar; + myQueue.pop_with( [&bar]( Foo& src ) { bar = std::move( src );}); + \endcode + */ + template + bool pop_with( Func f ) + { + value_type * pVal = base_class::pop(); + if ( pVal ) { + f( *pVal ); + cxx_allocator().Delete( pVal ); + return true; + } + return false; + } + + /// Clears the queue (not atomic) + /** + This function is not atomic, but thread-safe + */ + void clear() + { + base_class::clear_with( []( value_type& src ) { value_deleter()(&src); } ); + } + + /// Clears the queue (not atomic) + /** + This function is not atomic, but thread-safe. + + For each item removed the functor \p f is called. + \p Func interface is: + \code + struct clear_functor + { + void operator()( value_type& item ); + }; + \endcode + */ + template + void clear_with( Func f ) + { + base_class::clear_with( [&f]( value_type& val ) { f(val); value_deleter()( &val ); } ); + } + + /// Checks is the priority queue is empty + bool empty() const + { + return base_class::empty(); + } + + /// Checks if the priority queue is full + bool full() const + { + return base_class::full(); + } + + /// Returns current size of priority queue + size_t size() const + { + return base_class::size(); + } + + /// Return capacity of the priority queue + size_t capacity() const + { + return base_class::capacity(); + } + + /// Returns const reference to internal statistics + stat const& statistics() const + { + return base_class::statistics(); + } + }; + +}} // namespace cds::container + +#endif // #ifndef CDSLIB_CONTAINER_MSPRIORITY_QUEUE_H diff --git a/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/container/msqueue.h b/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/container/msqueue.h new file mode 100644 index 0000000..fa51eb9 --- /dev/null +++ b/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/container/msqueue.h @@ -0,0 +1,435 @@ +/* + This file is a part of libcds - Concurrent Data Structures library + + (C) Copyright Maxim Khizhinsky (libcds.dev@gmail.com) 2006-2017 + + Source code repo: http://github.com/khizmax/libcds/ + Download: http://sourceforge.net/projects/libcds/files/ + + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. + + * Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ + +#ifndef CDSLIB_CONTAINER_MSQUEUE_H +#define CDSLIB_CONTAINER_MSQUEUE_H + +#include +#include +#include + +namespace cds { namespace container { + + /// MSQueue related definitions + /** @ingroup cds_nonintrusive_helper + */ + namespace msqueue { + /// Internal statistics + template ::counter_type > + using stat = cds::intrusive::msqueue::stat< Counter >; + + /// Dummy internal statistics + typedef cds::intrusive::msqueue::empty_stat empty_stat; + + /// MSQueue default type traits + struct traits + { + /// Node allocator + typedef CDS_DEFAULT_ALLOCATOR allocator; + + /// Back-off strategy + typedef cds::backoff::empty back_off; + + /// Item counting feature; by default, disabled. Use \p cds::atomicity::item_counter to enable item counting + typedef atomicity::empty_item_counter item_counter; + + /// Internal statistics (by default, disabled) + /** + Possible option value are: \p msqueue::stat, \p msqueue::empty_stat (the default), + user-provided class that supports \p %msqueue::stat interface. + */ + typedef msqueue::empty_stat stat; + + /// C++ memory ordering model + /** + Can be \p opt::v::relaxed_ordering (relaxed memory model, the default) + or \p opt::v::sequential_consistent (sequentially consisnent memory model). + */ + typedef opt::v::relaxed_ordering memory_model; + + /// Padding for internal critical atomic data. Default is \p opt::cache_line_padding + enum { padding = opt::cache_line_padding }; + }; + + /// Metafunction converting option list to \p msqueue::traits + /** + Supported \p Options are: + - \p opt::allocator - allocator (like \p std::allocator) used for allocating queue nodes. Default is \ref CDS_DEFAULT_ALLOCATOR + - \p opt::back_off - back-off strategy used, default is \p cds::backoff::empty. + - \p opt::item_counter - the type of item counting feature. Default is \p cds::atomicity::empty_item_counter (item counting disabled) + To enable item counting use \p cds::atomicity::item_counter + - \p opt::stat - the type to gather internal statistics. + Possible statistics types are: \p msqueue::stat, \p msqueue::empty_stat, user-provided class that supports \p %msqueue::stat interface. + Default is \p %msqueue::empty_stat. + - \p opt::padding - padding for internal critical atomic data. Default is \p opt::cache_line_padding + - \p opt::memory_model - C++ memory ordering model. Can be \p opt::v::relaxed_ordering (relaxed memory model, the default) + or \p opt::v::sequential_consistent (sequentially consisnent memory model). + + Example: declare \p %MSQueue with item counting and internal statistics + \code + typedef cds::container::MSQueue< cds::gc::HP, Foo, + typename cds::container::msqueue::make_traits< + cds::opt::item_counter< cds::atomicity::item_counter >, + cds::opt::stat< cds::container::msqueue::stat<> > + >::type + > myQueue; + \endcode + */ + template + struct make_traits { +# ifdef CDS_DOXYGEN_INVOKED + typedef implementation_defined type; ///< Metafunction result +# else + typedef typename cds::opt::make_options< + typename cds::opt::find_type_traits< traits, Options... >::type + , Options... + >::type type; +# endif + }; + } // namespace msqueue + + //@cond + namespace details { + template + struct make_msqueue + { + typedef GC gc; + typedef T value_type; + typedef Traits traits; + + struct node_type : public intrusive::msqueue::node< gc > + { + value_type m_value; + + node_type( value_type const& val ) + : m_value( val ) + {} + + template + node_type( Args&&... args ) + : m_value( std::forward( args )... ) + {} + }; + + typedef typename traits::allocator::template rebind::other allocator_type; + typedef cds::details::Allocator< node_type, allocator_type > cxx_allocator; + + struct node_deallocator + { + void operator ()( node_type * pNode ) + { + cxx_allocator().Delete( pNode ); + } + }; + + struct intrusive_traits : public traits + { + typedef cds::intrusive::msqueue::base_hook< cds::opt::gc > hook; + typedef node_deallocator disposer; + static constexpr const cds::intrusive::opt::link_check_type link_checker = cds::intrusive::msqueue::traits::link_checker; + }; + + typedef intrusive::MSQueue< gc, node_type, intrusive_traits > type; + }; + } + //@endcond + + /// Michael & Scott lock-free queue + /** @ingroup cds_nonintrusive_queue + It is non-intrusive version of Michael & Scott's queue algorithm based on intrusive implementation + \p cds::intrusive::MSQueue. + + Template arguments: + - \p GC - garbage collector type: \p gc::HP, \p gc::DHP + - \p T is a type stored in the queue. + - \p Traits - queue traits, default is \p msqueue::traits. You can use \p msqueue::make_traits + metafunction to make your traits or just derive your traits from \p %msqueue::traits: + \code + struct myTraits: public cds::container::msqueue::traits { + typedef cds::intrusive::msqueue::stat<> stat; + typedef cds::atomicity::item_counter item_counter; + }; + typedef cds::container::MSQueue< cds::gc::HP, Foo, myTraits > myQueue; + + // Equivalent make_traits example: + typedef cds::container::MSQueue< cds::gc::HP, Foo, + typename cds::container::msqueue::make_traits< + cds::opt::stat< cds::container::msqueue::stat<> >, + cds::opt::item_counter< cds::atomicity::item_counter > + >::type + > myQueue; + \endcode + */ + template + class MSQueue: +#ifdef CDS_DOXYGEN_INVOKED + private intrusive::MSQueue< GC, cds::intrusive::msqueue::node< T >, Traits > +#else + private details::make_msqueue< GC, T, Traits >::type +#endif + { + //@cond + typedef details::make_msqueue< GC, T, Traits > maker; + typedef typename maker::type base_class; + //@endcond + + public: + /// Rebind template arguments + template + struct rebind { + typedef MSQueue< GC2, T2, Traits2> other ; ///< Rebinding result + }; + + public: + typedef T value_type; ///< Value type stored in the queue + typedef Traits traits; ///< Queue traits + + typedef typename base_class::gc gc; ///< Garbage collector used + typedef typename base_class::back_off back_off; ///< Back-off strategy used + typedef typename maker::allocator_type allocator_type; ///< Allocator type used for allocate/deallocate the nodes + typedef typename base_class::item_counter item_counter; ///< Item counting policy used + typedef typename base_class::stat stat; ///< Internal statistics policy used + typedef typename base_class::memory_model memory_model; ///< Memory ordering. See cds::opt::memory_model option + + static constexpr const size_t c_nHazardPtrCount = base_class::c_nHazardPtrCount; ///< Count of hazard pointer required for the algorithm + + protected: + //@cond + typedef typename maker::node_type node_type; ///< queue node type (derived from \p intrusive::msqueue::node) + + typedef typename maker::cxx_allocator cxx_allocator; + typedef typename maker::node_deallocator node_deallocator; // deallocate node + typedef typename base_class::node_traits node_traits; + //@endcond + + protected: + ///@cond + static node_type * alloc_node() + { + return cxx_allocator().New(); + } + static node_type * alloc_node( value_type const& val ) + { + return cxx_allocator().New( val ); + } + template + static node_type * alloc_node_move( Args&&... args ) + { + return cxx_allocator().MoveNew( std::forward( args )... ); + } + static void free_node( node_type * p ) + { + node_deallocator()( p ); + } + + struct node_disposer { + void operator()( node_type * pNode ) + { + free_node( pNode ); + } + }; + typedef std::unique_ptr< node_type, node_disposer > scoped_node_ptr; + //@endcond + + public: + /// Initializes empty queue + MSQueue() + {} + + /// Destructor clears the queue + ~MSQueue() + {} + + /// Enqueues \p val value into the queue. + /** + The function makes queue node in dynamic memory calling copy constructor for \p val + and then it calls \p intrusive::MSQueue::enqueue. + Returns \p true if success, \p false otherwise. + */ + bool enqueue( value_type const& val ) + { + scoped_node_ptr p( alloc_node(val)); + if ( base_class::enqueue( *p )) { + p.release(); + return true; + } + return false; + } + + /// Enqueues \p val in the queue, move semantics + bool enqueue( value_type&& val ) + { + scoped_node_ptr p( alloc_node_move( std::move( val ))); + if ( base_class::enqueue( *p )) { + p.release(); + return true; + } + return false; + } + + /// Enqueues data to the queue using a functor + /** + \p Func is a functor called to create node. + The functor \p f takes one argument - a reference to a new node of type \ref value_type : + \code + cds::container::MSQueue< cds::gc::HP, Foo > myQueue; + Bar bar; + myQueue.enqueue_with( [&bar]( Foo& dest ) { dest = bar; } ); + \endcode + */ + template + bool enqueue_with( Func f ) + { + scoped_node_ptr p( alloc_node()); + f( p->m_value ); + if ( base_class::enqueue( *p )) { + p.release(); + return true; + } + return false; + } + + /// Enqueues data of type \ref value_type constructed from std::forward(args)... + template + bool emplace( Args&&... args ) + { + scoped_node_ptr p( alloc_node_move( std::forward( args )... )); + if ( base_class::enqueue( *p )) { + p.release(); + return true; + } + return false; + } + + /// Synonym for \p enqueue() function + bool push( value_type const& val ) + { + return enqueue( val ); + } + + /// Synonym for \p enqueue() function + bool push( value_type&& val ) + { + return enqueue( std::move( val )); + } + + /// Synonym for \p enqueue_with() function + template + bool push_with( Func f ) + { + return enqueue_with( f ); + } + + /// Dequeues a value from the queue + /** + If queue is not empty, the function returns \p true, \p dest contains copy of + dequeued value. The assignment operator for type \ref value_type is invoked. + If queue is empty, the function returns \p false, \p dest is unchanged. + */ + bool dequeue( value_type& dest ) + { + return dequeue_with( [&dest]( value_type& src ) { + // TSan finds a race between this read of \p src and node_type constructor + // I think, it is wrong + CDS_TSAN_ANNOTATE_IGNORE_READS_BEGIN; + dest = std::move( src ); + CDS_TSAN_ANNOTATE_IGNORE_READS_END; + }); + } + + /// Dequeues a value using a functor + /** + \p Func is a functor called to copy dequeued value. + The functor takes one argument - a reference to removed node: + \code + cds:container::MSQueue< cds::gc::HP, Foo > myQueue; + Bar bar; + myQueue.dequeue_with( [&bar]( Foo& src ) { bar = std::move( src );}); + \endcode + The functor is called only if the queue is not empty. + */ + template + bool dequeue_with( Func f ) + { + typename base_class::dequeue_result res; + if ( base_class::do_dequeue( res )) { + f( node_traits::to_value_ptr( *res.pNext )->m_value ); + base_class::dispose_result( res ); + return true; + } + return false; + } + + /// Synonym for \p dequeue() function + bool pop( value_type& dest ) + { + return dequeue( dest ); + } + + /// Synonym for \p dequeue_with() function + template + bool pop_with( Func f ) + { + return dequeue_with( f ); + } + + /// Clear the queue + /** + The function repeatedly calls \ref dequeue until it returns \p nullptr. + */ + void clear() + { + base_class::clear(); + } + + /// Checks if the queue is empty + bool empty() const + { + return base_class::empty(); + } + + /// Returns queue's item count (see \ref intrusive::MSQueue::size for explanation) + /** \copydetails cds::intrusive::MSQueue::size() + */ + size_t size() const + { + return base_class::size(); + } + + /// Returns reference to internal statistics + const stat& statistics() const + { + return base_class::statistics(); + } + }; + +}} // namespace cds::container + +#endif // #ifndef CDSLIB_CONTAINER_MSQUEUE_H diff --git a/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/container/optimistic_queue.h b/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/container/optimistic_queue.h new file mode 100644 index 0000000..6dcc0d0 --- /dev/null +++ b/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/container/optimistic_queue.h @@ -0,0 +1,432 @@ +/* + This file is a part of libcds - Concurrent Data Structures library + + (C) Copyright Maxim Khizhinsky (libcds.dev@gmail.com) 2006-2017 + + Source code repo: http://github.com/khizmax/libcds/ + Download: http://sourceforge.net/projects/libcds/files/ + + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. + + * Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ + +#ifndef CDSLIB_CONTAINER_OPTIMISTIC_QUEUE_H +#define CDSLIB_CONTAINER_OPTIMISTIC_QUEUE_H + +#include +#include +#include + +namespace cds { namespace container { + + /// OptimisticQueue related definitions + /** @ingroup cds_nonintrusive_helper + */ + namespace optimistic_queue { + /// Internal statistics + template ::counter_type > + using stat = cds::intrusive::optimistic_queue::stat< Counter >; + + /// Dummy internal statistics + typedef cds::intrusive::optimistic_queue::empty_stat empty_stat; + + /// MSQueue default type traits + struct traits + { + /// Node allocator + typedef CDS_DEFAULT_ALLOCATOR allocator; + + /// Back-off strategy + typedef cds::backoff::empty back_off; + + /// Item counting feature; by default, disabled. Use \p cds::atomicity::item_counter to enable item counting + typedef atomicity::empty_item_counter item_counter; + + /// Internal statistics (by default, disabled) + /** + Possible option value are: \p optimistic_queue::stat, \p optimistic_queue::empty_stat (the default), + user-provided class that supports \p %optimistic_queue::stat interface. + */ + typedef optimistic_queue::empty_stat stat; + + /// C++ memory ordering model + /** + Can be \p opt::v::relaxed_ordering (relaxed memory model, the default) + or \p opt::v::sequential_consistent (sequentially consisnent memory model). + */ + typedef opt::v::relaxed_ordering memory_model; + + /// Padding for internal critical atomic data. Default is \p opt::cache_line_padding + enum { padding = opt::cache_line_padding }; + }; + + /// Metafunction converting option list to \p msqueue::traits + /** + Supported \p Options are: + - \p opt::allocator - allocator (like \p std::allocator) used for allocating queue nodes. Default is \ref CDS_DEFAULT_ALLOCATOR + - \p opt::back_off - back-off strategy used, default is \p cds::backoff::empty. + - \p opt::item_counter - the type of item counting feature. Default is \p cds::atomicity::empty_item_counter (item counting disabled) + To enable item counting use \p cds::atomicity::item_counter + - \p opt::stat - the type to gather internal statistics. + Possible statistics types are: \p optimistic_queue::stat, \p optimistic_queue::empty_stat, + user-provided class that supports \p %optimistic_queue::stat interface. + Default is \p %optimistic_queue::empty_stat. + - \p opt::padding - padding for internal critical atomic data. Default is \p opt::cache_line_padding + - \p opt::memory_model - C++ memory ordering model. Can be \p opt::v::relaxed_ordering (relaxed memory model, the default) + or \p opt::v::sequential_consistent (sequentially consisnent memory model). + + Example: declare \p OptimisticQueue with item counting and internal statistics + \code + typedef cds::container::OptimisticQueue< cds::gc::HP, Foo, + typename cds::container::optimistic_queue::make_traits< + cds::opt::item_counter< cds::atomicity::item_counter >, + cds::opt::stat< cds::container::optimistic_queue::stat<> > + >::type + > myQueue; + \endcode + */ + template + struct make_traits { +# ifdef CDS_DOXYGEN_INVOKED + typedef implementation_defined type; ///< Metafunction result +# else + typedef typename cds::opt::make_options< + typename cds::opt::find_type_traits< traits, Options... >::type + , Options... + >::type type; +# endif + }; + } // namespace optimistic_queue + + //@cond + namespace details { + template + struct make_optimistic_queue + { + typedef GC gc; + typedef T value_type; + typedef Traits traits; + + struct node_type: public cds::intrusive::optimistic_queue::node< gc > + { + value_type m_value; + + node_type( value_type const& val ) + : m_value( val ) + {} + + template + node_type( Args&&... args ) + : m_value( std::forward(args)...) + {} + }; + + typedef typename traits::allocator::template rebind::other allocator_type; + typedef cds::details::Allocator< node_type, allocator_type > cxx_allocator; + + struct node_deallocator + { + void operator ()( node_type * pNode ) + { + cxx_allocator().Delete( pNode ); + } + }; + + struct intrusive_traits : public traits + { + typedef cds::intrusive::optimistic_queue::base_hook< opt::gc > hook; + typedef node_deallocator disposer; + static constexpr const opt::link_check_type link_checker = cds::intrusive::optimistic_queue::traits::link_checker; + }; + + typedef intrusive::OptimisticQueue< gc, node_type, intrusive_traits > type; + }; + } // namespace details + //@endcond + + /// Optimistic queue + /** @ingroup cds_nonintrusive_queue + Implementation of Ladan-Mozes & Shavit optimistic queue algorithm. + - [2008] Edya Ladan-Mozes, Nir Shavit "An Optimistic Approach to Lock-Free FIFO Queues" + + Template arguments: + - \p GC - garbage collector type: \p gc::HP, \p gc::DHP. + - \p T - type of values to be stored in the queue + - \p Traits - queue traits, default is \p optimistic_queue::traits. You can use \p optimistic_queue::make_traits + metafunction to make your traits or just derive your traits from \p %optimistic_queue::traits: + \code + struct myTraits: public cds::container::optimistic_queue::traits { + typedef cds::intrusive::optimistic_queue::stat<> stat; + typedef cds::atomicity::item_counter item_counter; + }; + typedef cds::container::OptimisticQueue< cds::gc::HP, Foo, myTraits > myQueue; + + // Equivalent make_traits example: + typedef cds::container::OptimisticQueue< cds::gc::HP, Foo, + typename cds::container::optimistic_queue::make_traits< + cds::opt::stat< cds::container::optimistic_queue::stat<> >, + cds::opt::item_counter< cds::atomicity::item_counter > + >::type + > myQueue; + \endcode + */ + template + class OptimisticQueue: +#ifdef CDS_DOXYGEN_INVOKED + private intrusive::OptimisticQueue< GC, cds::intrusive::optimistic_queue::node< T >, Traits > +#else + private details::make_optimistic_queue< GC, T, Traits >::type +#endif + { + //@cond + typedef details::make_optimistic_queue< GC, T, Traits > maker; + typedef typename maker::type base_class; + //@endcond + + public: + /// Rebind template arguments + template + struct rebind { + typedef OptimisticQueue< GC2, T2, Traits2 > other ; ///< Rebinding result + }; + + public: + typedef GC gc; ///< Garbage collector + typedef T value_type; ///< Value type to be stored in the queue + typedef Traits traits; ///< Queue traits + + typedef typename base_class::back_off back_off; ///< Back-off strategy used + typedef typename maker::allocator_type allocator_type; ///< Allocator type used for allocate/deallocate the nodes + typedef typename base_class::item_counter item_counter; ///< Item counting policy used + typedef typename base_class::stat stat; ///< Internal statistics policy used + typedef typename base_class::memory_model memory_model; ///< Memory ordering. See \p cds::opt::memory_model option + + static constexpr const size_t c_nHazardPtrCount = base_class::c_nHazardPtrCount; ///< Count of hazard pointer required for the algorithm + + protected: + //@cond + typedef typename maker::node_type node_type; ///< queue node type (derived from intrusive::optimistic_queue::node) + typedef typename maker::cxx_allocator cxx_allocator; + typedef typename maker::node_deallocator node_deallocator; // deallocate node + typedef typename base_class::node_traits node_traits; + //@endcond + + protected: + ///@cond + static node_type * alloc_node() + { + return cxx_allocator().New(); + } + static node_type * alloc_node( const value_type& val ) + { + return cxx_allocator().New( val ); + } + template + static node_type * alloc_node_move( Args&&... args ) + { + return cxx_allocator().MoveNew( std::forward( args )... ); + } + static void free_node( node_type * p ) + { + node_deallocator()( p ); + } + + struct node_disposer { + void operator()( node_type * pNode ) + { + free_node( pNode ); + } + }; + typedef std::unique_ptr< node_type, node_disposer > scoped_node_ptr; + //@endcond + + public: + /// Initializes empty queue + OptimisticQueue() + {} + + /// Destructor clears the queue + ~OptimisticQueue() + {} + + /// Enqueues \p val value into the queue. + /** + The function makes queue node in dynamic memory calling copy constructor for \p val + and then it calls \p intrusive::OptimisticQueue::enqueue. + Returns \p true if success, \p false otherwise. + */ + bool enqueue( const value_type& val ) + { + scoped_node_ptr p( alloc_node(val)); + if ( base_class::enqueue( *p )) { + p.release(); + return true; + } + return false; + } + + /// Enqueues \p val value into the queue, move semntics + bool enqueue( value_type&& val ) + { + scoped_node_ptr p( alloc_node_move( std::move( val ))); + if ( base_class::enqueue( *p )) { + p.release(); + return true; + } + return false; + } + + /// Enqueues \p data to queue using a functor + /** + \p Func is a functor called to create node. + The functor \p f takes one argument - a reference to a new node of type \ref value_type : + \code + cds::container::OptimisticQueue< cds::gc::HP, Foo > myQueue; + Bar bar; + myQueue.enqueue_with( [&bar]( Foo& dest ) { dest = bar; } ); + \endcode + */ + template + bool enqueue_with( Func f ) + { + scoped_node_ptr p( alloc_node()); + f( p->m_value ); + if ( base_class::enqueue( *p )) { + p.release(); + return true; + } + return false; + } + + /// Enqueues data of type \ref value_type constructed with std::forward(args)... + template + bool emplace( Args&&... args ) + { + scoped_node_ptr p( alloc_node_move( std::forward(args)... )); + if ( base_class::enqueue( *p )) { + p.release(); + return true; + } + return false; + } + + /// Synonym for \p enqueue( const value_type& ) function + bool push( const value_type& val ) + { + return enqueue( val ); + } + + /// Synonym for \p enqueue( value_type&& ) function + bool push( value_type&& val ) + { + return enqueue( std::move( val )); + } + + /// Synonym for \p enqueue_with() function + template + bool push_with( Func f ) + { + return enqueue_with( f ); + } + + /// Dequeues a value from the queue + /** + If queue is not empty, the function returns \p true, \p dest contains copy of + dequeued value. The assignment operator for type \p value_type is invoked. + + If queue is empty, the function returns \p false, \p dest is unchanged. + */ + bool dequeue( value_type& dest ) + { + return dequeue_with( [&dest]( value_type& src ) { dest = std::move( src ); }); + } + + /// Dequeues a value using a functor + /** + \p Func is a functor called to copy dequeued value. + The functor takes one argument - a reference to removed node: + \code + cds:container::OptimisticQueue< cds::gc::HP, Foo > myQueue; + Bar bar; + myQueue.dequeue_with( [&bar]( Foo& src ) { bar = std::move( src );}); + \endcode + The functor is called only if the queue is not empty. + */ + template + bool dequeue_with( Func f ) + { + typename base_class::dequeue_result res; + if ( base_class::do_dequeue( res )) { + f( node_traits::to_value_ptr( *res.pNext )->m_value ); + + base_class::dispose_result( res ); + + return true; + } + return false; + } + + /// Synonym for \ref dequeue() function + bool pop( value_type& dest ) + { + return dequeue( dest ); + } + + /// Synonym for template version of \p dequeue_with() function + template + bool pop_with( Func f ) + { + return dequeue_with( f ); + } + + /// Checks if the queue is empty + bool empty() const + { + return base_class::empty(); + } + + /// Clear the queue + /** + The function repeatedly calls \ref dequeue until it returns \p nullptr. + */ + void clear() + { + base_class::clear(); + } + + /// Returns queue's item count + /** \copydetails cds::intrusive::OptimisticQueue::size() + */ + size_t size() const + { + return base_class::size(); + } + + /// Returns reference to internal statistics + const stat& statistics() const + { + return base_class::statistics(); + } + }; + +}} // namespace cds::container + +#endif //#ifndef CDSLIB_CONTAINER_OPTIMISTIC_QUEUE_H diff --git a/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/container/rwqueue.h b/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/container/rwqueue.h new file mode 100644 index 0000000..9dd8307 --- /dev/null +++ b/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/container/rwqueue.h @@ -0,0 +1,415 @@ +/* + This file is a part of libcds - Concurrent Data Structures library + + (C) Copyright Maxim Khizhinsky (libcds.dev@gmail.com) 2006-2017 + + Source code repo: http://github.com/khizmax/libcds/ + Download: http://sourceforge.net/projects/libcds/files/ + + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. + + * Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ + +#ifndef CDSLIB_CONTAINER_RWQUEUE_H +#define CDSLIB_CONTAINER_RWQUEUE_H + +#include // unique_lock +#include +#include +#include + +namespace cds { namespace container { + /// RWQueue related definitions + /** @ingroup cds_nonintrusive_helper + */ + namespace rwqueue { + /// RWQueue default type traits + struct traits + { + /// Lock policy + typedef cds::sync::spin lock_type; + + /// Node allocator + typedef CDS_DEFAULT_ALLOCATOR allocator; + + /// Item counting feature; by default, disabled. Use \p cds::atomicity::item_counter to enable item counting + typedef cds::atomicity::empty_item_counter item_counter; + + /// Padding for internal critical atomic data. Default is \p opt::cache_line_padding + enum { padding = opt::cache_line_padding }; + }; + + /// Metafunction converting option list to \p rwqueue::traits + /** + Supported \p Options are: + - opt::lock_type - lock policy, default is \p cds::sync::spin. Any type satisfied \p Mutex C++ concept may be used. + - opt::allocator - allocator (like \p std::allocator) used for allocating queue nodes. Default is \ref CDS_DEFAULT_ALLOCATOR + - opt::item_counter - the type of item counting feature. Default is \p cds::atomicity::empty_item_counter (item counting disabled) + To enable item counting use \p cds::atomicity::item_counter. + - \p opt::padding - padding for internal critical data. Default is \p opt::cache_line_padding + + Example: declare mutex-based \p %RWQueue with item counting + \code + typedef cds::container::RWQueue< Foo, + typename cds::container::rwqueue::make_traits< + cds::opt::item_counter< cds::atomicity::item_counter >, + cds::opt::lock_type< std::mutex > + >::type + > myQueue; + \endcode + */ + template + struct make_traits { +# ifdef CDS_DOXYGEN_INVOKED + typedef implementation_defined type; ///< Metafunction result +# else + typedef typename cds::opt::make_options< + typename cds::opt::find_type_traits< traits, Options... >::type + , Options... + >::type type; +# endif + }; + + } // namespace rwqueue + + /// Michael & Scott blocking queue with fine-grained synchronization schema + /** @ingroup cds_nonintrusive_queue + The queue has two different locks: one for reading and one for writing. + Therefore, one writer and one reader can simultaneously access to the queue. + The queue does not require any garbage collector. + + Source + - [1998] Maged Michael, Michael Scott "Simple, fast, and practical non-blocking + and blocking concurrent queue algorithms" + + Template arguments + - \p T - value type to be stored in the queue + - \p Traits - queue traits, default is \p rwqueue::traits. You can use \p rwqueue::make_traits + metafunction to make your traits or just derive your traits from \p %rwqueue::traits: + \code + struct myTraits: public cds::container::rwqueue::traits { + typedef cds::atomicity::item_counter item_counter; + }; + typedef cds::container::RWQueue< Foo, myTraits > myQueue; + + // Equivalent make_traits example: + typedef cds::container::RWQueue< Foo, + typename cds::container::rwqueue::make_traits< + cds::opt::item_counter< cds::atomicity::item_counter > + >::type + > myQueue; + \endcode + */ + template + class RWQueue + { + public: + /// Rebind template arguments + template + struct rebind { + typedef RWQueue< T2, Traits2 > other ; ///< Rebinding result + }; + + public: + typedef T value_type; ///< Type of value to be stored in the queue + typedef Traits traits; ///< Queue traits + + typedef typename traits::lock_type lock_type; ///< Locking primitive + typedef typename traits::item_counter item_counter; ///< Item counting policy used + + protected: + //@cond + /// Node type + struct node_type + { + atomics::atomic< node_type *> m_pNext; ///< Pointer to the next node in the queue + value_type m_value; ///< Value stored in the node + + node_type( value_type const& v ) + : m_pNext( nullptr ) + , m_value(v) + {} + + node_type() + : m_pNext( nullptr ) + {} + + template + node_type( Args&&... args ) + : m_pNext( nullptr ) + , m_value( std::forward(args)...) + {} + }; + //@endcond + + public: + typedef typename traits::allocator::template rebind::other allocator_type; ///< Allocator type used for allocate/deallocate the queue nodes + + protected: + //@cond + typedef std::unique_lock scoped_lock; + typedef cds::details::Allocator< node_type, allocator_type > node_allocator; + + struct head_type { + mutable lock_type lock; + node_type * ptr; + }; + + head_type m_Head; + typename opt::details::apply_padding< head_type, traits::padding >::padding_type pad_; + head_type m_Tail; + + item_counter m_ItemCounter; + //@endcond + + protected: + //@cond + static node_type * alloc_node() + { + return node_allocator().New(); + } + + static node_type * alloc_node( T const& data ) + { + return node_allocator().New( data ); + } + + template + static node_type * alloc_node_move( Args&&... args ) + { + return node_allocator().MoveNew( std::forward( args )... ); + } + + static void free_node( node_type * pNode ) + { + node_allocator().Delete( pNode ); + } + + bool enqueue_node( node_type * p ) + { + assert( p != nullptr ); + { + scoped_lock lock( m_Tail.lock ); + m_Tail.ptr->m_pNext.store( p, atomics::memory_order_release ); + m_Tail.ptr = p; + } + ++m_ItemCounter; + return true; + } + + struct node_disposer { + void operator()( node_type * pNode ) + { + free_node( pNode ); + } + }; + typedef std::unique_ptr< node_type, node_disposer > scoped_node_ptr; + //@endcond + + public: + /// Makes empty queue + RWQueue() + { + node_type * pNode = alloc_node(); + m_Head.ptr = + m_Tail.ptr = pNode; + } + + /// Destructor clears queue + ~RWQueue() + { + clear(); + assert( m_Head.ptr == m_Tail.ptr ); + free_node( m_Head.ptr ); + } + + /// Enqueues \p data. Always return \a true + bool enqueue( value_type const& data ) + { + scoped_node_ptr p( alloc_node( data )); + if ( enqueue_node( p.get())) { + p.release(); + return true; + } + return false; + } + + /// Enqueues \p data, move semantics + bool enqueue( value_type&& data ) + { + scoped_node_ptr p( alloc_node_move( std::move( data ))); + if ( enqueue_node( p.get())) { + p.release(); + return true; + } + return false; + } + + /// Enqueues \p data to the queue using a functor + /** + \p Func is a functor called to create node. + The functor \p f takes one argument - a reference to a new node of type \ref value_type : + \code + cds::container::RWQueue< cds::gc::HP, Foo > myQueue; + Bar bar; + myQueue.enqueue_with( [&bar]( Foo& dest ) { dest = bar; } ); + \endcode + */ + template + bool enqueue_with( Func f ) + { + scoped_node_ptr p( alloc_node()); + f( p->m_value ); + if ( enqueue_node( p.get())) { + p.release(); + return true; + } + return false; + } + + /// Enqueues data of type \ref value_type constructed with std::forward(args)... + template + bool emplace( Args&&... args ) + { + scoped_node_ptr p( alloc_node_move( std::forward(args)... )); + if ( enqueue_node( p.get())) { + p.release(); + return true; + } + return false; + } + + /// Synonym for \p enqueue( value_type const& ) function + bool push( value_type const& val ) + { + return enqueue( val ); + } + + /// Synonym for \p enqueue( value_type&& ) function + bool push( value_type&& val ) + { + return enqueue( std::move( val )); + } + + /// Synonym for \p enqueue_with() function + template + bool push_with( Func f ) + { + return enqueue_with( f ); + } + + /// Dequeues a value to \p dest. + /** + If queue is empty returns \a false, \p dest can be corrupted. + If queue is not empty returns \a true, \p dest contains the value dequeued + */ + bool dequeue( value_type& dest ) + { + return dequeue_with( [&dest]( value_type& src ) { dest = std::move( src ); }); + } + + /// Dequeues a value using a functor + /** + \p Func is a functor called to copy dequeued value. + The functor takes one argument - a reference to removed node: + \code + cds:container::RWQueue< cds::gc::HP, Foo > myQueue; + Bar bar; + myQueue.dequeue_with( [&bar]( Foo& src ) { bar = std::move( src );}); + \endcode + The functor is called only if the queue is not empty. + */ + template + bool dequeue_with( Func f ) + { + node_type * pNode; + { + scoped_lock lock( m_Head.lock ); + pNode = m_Head.ptr; + node_type * pNewHead = pNode->m_pNext.load( atomics::memory_order_acquire ); + if ( pNewHead == nullptr ) + return false; + f( pNewHead->m_value ); + m_Head.ptr = pNewHead; + } // unlock here + --m_ItemCounter; + free_node( pNode ); + return true; + } + + /// Synonym for \p dequeue() function + bool pop( value_type& dest ) + { + return dequeue( dest ); + } + + /// Synonym for \p dequeue_with() function + template + bool pop_with( Func f ) + { + return dequeue_with( f ); + } + + /// Checks if queue is empty + bool empty() const + { + scoped_lock lock( m_Head.lock ); + return m_Head.ptr->m_pNext.load( atomics::memory_order_relaxed ) == nullptr; + } + + /// Clears queue + void clear() + { + scoped_lock lockR( m_Head.lock ); + scoped_lock lockW( m_Tail.lock ); + while ( m_Head.ptr->m_pNext.load( atomics::memory_order_relaxed ) != nullptr ) { + node_type * pHead = m_Head.ptr; + m_Head.ptr = m_Head.ptr->m_pNext.load( atomics::memory_order_relaxed ); + free_node( pHead ); + } + m_ItemCounter.reset(); + } + + /// Returns queue's item count + /** + The value returned depends on \p rwqueue::traits::item_counter. For \p atomicity::empty_item_counter, + this function always returns 0. + + @note Even if you use real item counter and it returns 0, this fact is not mean that the queue + is empty. To check queue emptyness use \p empty() method. + */ + size_t size() const + { + return m_ItemCounter.value(); + } + + //@cond + /// The class has no internal statistics. For test consistency only + std::nullptr_t statistics() const + { + return nullptr; + } + //@endcond + }; + +}} // namespace cds::container + +#endif // #ifndef CDSLIB_CONTAINER_RWQUEUE_H diff --git a/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/container/segmented_queue.h b/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/container/segmented_queue.h new file mode 100644 index 0000000..192505e --- /dev/null +++ b/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/container/segmented_queue.h @@ -0,0 +1,442 @@ +/* + This file is a part of libcds - Concurrent Data Structures library + + (C) Copyright Maxim Khizhinsky (libcds.dev@gmail.com) 2006-2017 + + Source code repo: http://github.com/khizmax/libcds/ + Download: http://sourceforge.net/projects/libcds/files/ + + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. + + * Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ + +#ifndef CDSLIB_CONTAINER_SEGMENTED_QUEUE_H +#define CDSLIB_CONTAINER_SEGMENTED_QUEUE_H + +#include +#include // ref +#include +#include + +namespace cds { namespace container { + + /// SegmentedQueue -related declarations + namespace segmented_queue { + +# ifdef CDS_DOXYGEN_INVOKED + /// SegmentedQueue internal statistics + typedef cds::intrusive::segmented_queue::stat stat; +# else + using cds::intrusive::segmented_queue::stat; +# endif + + /// SegmentedQueue empty internal statistics (no overhead) + typedef cds::intrusive::segmented_queue::empty_stat empty_stat; + + /// SegmentedQueue default type traits + struct traits { + + /// Item allocator. Default is \ref CDS_DEFAULT_ALLOCATOR + typedef CDS_DEFAULT_ALLOCATOR node_allocator; + + /// Item counter, default is atomicity::item_counter + /** + The item counting is an essential part of segmented queue algorithm. + The \p empty() member function is based on checking size() == 0. + Therefore, dummy item counter like atomicity::empty_item_counter is not the proper counter. + */ + typedef atomicity::item_counter item_counter; + + /// Internal statistics, possible predefined types are \ref stat, \ref empty_stat (the default) + typedef segmented_queue::empty_stat stat; + + /// Memory model, default is opt::v::relaxed_ordering. See cds::opt::memory_model for the full list of possible types + typedef opt::v::relaxed_ordering memory_model; + + /// Alignment of critical data, default is cache line alignment. See cds::opt::alignment option specification + enum { alignment = opt::cache_line_alignment }; + + /// Padding of segment data, default is no special padding + /** + The segment is just an array of atomic data pointers, + so, the high load leads to false sharing and performance degradation. + A padding of segment data can eliminate false sharing issue. + On the other hand, the padding leads to increase segment size. + */ + enum { padding = cds::intrusive::segmented_queue::traits::padding }; + + /// Segment allocator. Default is \ref CDS_DEFAULT_ALLOCATOR + typedef CDS_DEFAULT_ALLOCATOR allocator; + + /// Lock type used to maintain an internal list of allocated segments + typedef cds::sync::spin lock_type; + + /// Random \ref cds::opt::permutation_generator "permutation generator" for sequence [0, quasi_factor) + typedef cds::opt::v::random2_permutation permutation_generator; + }; + + /// Metafunction converting option list to traits for SegmentedQueue + /** + The metafunction can be useful if a few fields in \p segmented_queue::traits should be changed. + For example: + \code + typedef cds::container::segmented_queue::make_traits< + cds::opt::item_counter< cds::atomicity::item_counter > + >::type my_segmented_queue_traits; + \endcode + This code creates \p %SegmentedQueue type traits with item counting feature, + all other \p segmented_queue::traits members left unchanged. + + \p Options are: + - \p opt::node_allocator - node allocator. + - \p opt::stat - internal statistics, possible type: \p segmented_queue::stat, \p segmented_queue::empty_stat (the default) + - \p opt::item_counter - item counting feature. Note that \p atomicity::empty_item_counetr is not suitable + for segmented queue. + - \p opt::memory_model - memory model, default is \p opt::v::relaxed_ordering. + See option description for the full list of possible models + - \p opt::alignment - the alignment of critical data, see option description for explanation + - \p opt::padding - the padding of segment data, default no special padding. + See \p traits::padding for explanation. + - \p opt::allocator - the allocator used to maintain segments. + - \p opt::lock_type - a mutual exclusion lock type used to maintain internal list of allocated + segments. Default is \p cds::opt::Spin, \p std::mutex is also suitable. + - \p opt::permutation_generator - a random permutation generator for sequence [0, quasi_factor), + default is \p cds::opt::v::random2_permutation + */ + template + struct make_traits { +# ifdef CDS_DOXYGEN_INVOKED + typedef implementation_defined type ; ///< Metafunction result +# else + typedef typename cds::opt::make_options< + typename cds::opt::find_type_traits< traits, Options... >::type + ,Options... + >::type type; +# endif + }; + + } // namespace segmented_queue + + //@cond + namespace details { + + template + struct make_segmented_queue + { + typedef GC gc; + typedef T value_type; + typedef Traits original_type_traits; + + typedef cds::details::Allocator< T, typename original_type_traits::node_allocator > cxx_node_allocator; + struct node_disposer { + void operator()( T * p ) + { + cxx_node_allocator().Delete( p ); + } + }; + + struct intrusive_type_traits: public original_type_traits + { + typedef node_disposer disposer; + }; + + typedef cds::intrusive::SegmentedQueue< gc, value_type, intrusive_type_traits > type; + }; + + } // namespace details + //@endcond + + /// Segmented queue + /** @ingroup cds_nonintrusive_queue + + The queue is based on work + - [2010] Afek, Korland, Yanovsky "Quasi-Linearizability: relaxed consistency for improved concurrency" + + In this paper the authors offer a relaxed version of linearizability, so-called quasi-linearizability, + that preserves some of the intuition, provides a flexible way to control the level of relaxation + and supports th implementation of more concurrent and scalable data structure. + Intuitively, the linearizability requires each run to be equivalent in some sense to a serial run + of the algorithm. This equivalence to some serial run imposes strong synchronization requirements + that in many cases results in limited scalability and synchronization bottleneck. + + The general idea is that the queue maintains a linked list of segments, each segment is an array of + nodes in the size of the quasi factor, and each node has a deleted boolean marker, which states + if it has been dequeued. Each producer iterates over last segment in the linked list in some random + permutation order. Whet it finds an empty cell it performs a CAS operation attempting to enqueue its + new element. In case the entire segment has been scanned and no available cell is found (implying + that the segment is full), then it attempts to add a new segment to the list. + + The dequeue operation is similar: the consumer iterates over the first segment in the linked list + in some random permutation order. When it finds an item which has not yet been dequeued, it performs + CAS on its deleted marker in order to "delete" it, if succeeded this item is considered dequeued. + In case the entire segment was scanned and all the nodes have already been dequeued (implying that + the segment is empty), then it attempts to remove this segment from the linked list and starts + the same process on the next segment. If there is no next segment, the queue is considered empty. + + Based on the fact that most of the time threads do not add or remove segments, most of the work + is done in parallel on different cells in the segments. This ensures a controlled contention + depending on the segment size, which is quasi factor. + + The segmented queue is an unfair queue since it violates the strong FIFO order but no more than + quasi factor. It means that the consumer dequeues any item from the current first segment. + + Template parameters: + - \p GC - a garbage collector, possible types are cds::gc::HP, cds::gc::DHP + - \p T - the type of values stored in the queue + - \p Traits - queue type traits, default is \p segmented_queue::traits. + \p segmented_queue::make_traits metafunction can be used to construct your + type traits. + */ + template + class SegmentedQueue: +#ifdef CDS_DOXYGEN_INVOKED + public cds::intrusive::SegmentedQueue< GC, T, Traits > +#else + public details::make_segmented_queue< GC, T, Traits >::type +#endif + { + //@cond + typedef details::make_segmented_queue< GC, T, Traits > maker; + typedef typename maker::type base_class; + //@endcond + public: + typedef GC gc; ///< Garbage collector + typedef T value_type; ///< type of the value stored in the queue + typedef Traits traits; ///< Queue traits + + typedef typename traits::node_allocator node_allocator; ///< Node allocator + typedef typename base_class::memory_model memory_model; ///< Memory ordering. See cds::opt::memory_model option + typedef typename base_class::item_counter item_counter; ///< Item counting policy, see cds::opt::item_counter option setter + typedef typename base_class::stat stat ; ///< Internal statistics policy + typedef typename base_class::lock_type lock_type ; ///< Type of mutex for maintaining an internal list of allocated segments. + typedef typename base_class::permutation_generator permutation_generator; ///< Random permutation generator for sequence [0, quasi-factor) + + static const size_t c_nHazardPtrCount = base_class::c_nHazardPtrCount ; ///< Count of hazard pointer required for the algorithm + + protected: + //@cond + typedef typename maker::cxx_node_allocator cxx_node_allocator; + typedef std::unique_ptr< value_type, typename maker::node_disposer > scoped_node_ptr; + + static value_type * alloc_node( value_type const& v ) + { + return cxx_node_allocator().New( v ); + } + + static value_type * alloc_node() + { + return cxx_node_allocator().New(); + } + + template + static value_type * alloc_node_move( Args&&... args ) + { + return cxx_node_allocator().MoveNew( std::forward( args )... ); + } + //@endcond + + public: + /// Initializes the empty queue + SegmentedQueue( + size_t nQuasiFactor ///< Quasi factor. If it is not a power of 2 it is rounded up to nearest power of 2. Minimum is 2. + ) + : base_class( nQuasiFactor ) + {} + + /// Clears the queue and deletes all internal data + ~SegmentedQueue() + {} + + /// Inserts a new element at last segment of the queue + /** + The function makes queue node in dynamic memory calling copy constructor for \p val + and then it calls intrusive::SEgmentedQueue::enqueue. + Returns \p true if success, \p false otherwise. + */ + bool enqueue( value_type const& val ) + { + scoped_node_ptr p( alloc_node(val)); + if ( base_class::enqueue( *p )) { + p.release(); + return true; + } + return false; + } + + /// Inserts a new element at last segment of the queue, move semantics + bool enqueue( value_type&& val ) + { + scoped_node_ptr p( alloc_node_move( std::move( val ))); + if ( base_class::enqueue( *p )) { + p.release(); + return true; + } + return false; + } + + /// Enqueues data to the queue using a functor + /** + \p Func is a functor called to create node. + The functor \p f takes one argument - a reference to a new node of type \ref value_type : + \code + cds::container::SegmentedQueue< cds::gc::HP, Foo > myQueue; + Bar bar; + myQueue.enqueue_with( [&bar]( Foo& dest ) { dest = bar; } ); + \endcode + */ + template + bool enqueue_with( Func f ) + { + scoped_node_ptr p( alloc_node()); + f( *p ); + if ( base_class::enqueue( *p )) { + p.release(); + return true; + } + return false; + } + + + /// Synonym for \p enqueue( value_type const& ) member function + bool push( value_type const& val ) + { + return enqueue( val ); + } + + /// Synonym for \p enqueue( value_type&& ) member function + bool push( value_type&& val ) + { + return enqueue( std::move( val )); + } + + /// Synonym for \p enqueue_with() member function + template + bool push_with( Func f ) + { + return enqueue_with( f ); + } + + /// Enqueues data of type \ref value_type constructed with std::forward(args)... + template + bool emplace( Args&&... args ) + { + scoped_node_ptr p( alloc_node_move( std::forward(args)... )); + if ( base_class::enqueue( *p )) { + p.release(); + return true; + } + return false; + } + + /// Dequeues a value from the queue + /** + If queue is not empty, the function returns \p true, \p dest contains copy of + dequeued value. The assignment operator for type \ref value_type is invoked. + If queue is empty, the function returns \p false, \p dest is unchanged. + */ + bool dequeue( value_type& dest ) + { + return dequeue_with( [&dest]( value_type& src ) { dest = std::move( src );}); + } + + /// Dequeues a value using a functor + /** + \p Func is a functor called to copy dequeued value. + The functor takes one argument - a reference to removed node: + \code + cds:container::MSQueue< cds::gc::HP, Foo > myQueue; + Bar bar; + myQueue.dequeue_with( [&bar]( Foo& src ) { bar = std::move( src );}); + \endcode + The functor is called only if the queue is not empty. + */ + template + bool dequeue_with( Func f ) + { + value_type * p = base_class::dequeue(); + if ( p ) { + f( *p ); + gc::template retire< typename maker::node_disposer >( p ); + return true; + } + return false; + } + + /// Synonym for \p dequeue_with() function + template + bool pop_with( Func f ) + { + return dequeue_with( f ); + } + + /// Synonym for \p dequeue() function + bool pop( value_type& dest ) + { + return dequeue( dest ); + } + + /// Checks if the queue is empty + /** + The original segmented queue algorithm does not allow to check emptiness accurately + because \p empty() is unlinearizable. + This function tests queue's emptiness checking size() == 0, + so, the item counting feature is an essential part of queue's algorithm. + */ + bool empty() const + { + return base_class::empty(); + } + + /// Clear the queue + /** + The function repeatedly calls \p dequeue() until it returns \p nullptr. + The disposer specified in \p Traits template argument is called for each removed item. + */ + void clear() + { + base_class::clear(); + } + + /// Returns queue's item count + size_t size() const + { + return base_class::size(); + } + + /// Returns reference to internal statistics + /** + The type of internal statistics is specified by \p Traits template argument. + */ + const stat& statistics() const + { + return base_class::statistics(); + } + + /// Returns quasi factor, a power-of-two number + size_t quasi_factor() const + { + return base_class::quasi_factor(); + } + }; + +}} // namespace cds::container + +#endif // #ifndef CDSLIB_CONTAINER_SEGMENTED_QUEUE_H diff --git a/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/container/skip_list_map_dhp.h b/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/container/skip_list_map_dhp.h new file mode 100644 index 0000000..88a7b3d --- /dev/null +++ b/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/container/skip_list_map_dhp.h @@ -0,0 +1,39 @@ +/* + This file is a part of libcds - Concurrent Data Structures library + + (C) Copyright Maxim Khizhinsky (libcds.dev@gmail.com) 2006-2017 + + Source code repo: http://github.com/khizmax/libcds/ + Download: http://sourceforge.net/projects/libcds/files/ + + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. + + * Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ + +#ifndef CDSLIB_CONTAINER_SKIP_LIST_SET_DHP_H +#define CDSLIB_CONTAINER_SKIP_LIST_SET_DHP_H + +#include +#include +#include +#include + +#endif // #ifndef CDSLIB_CONTAINER_SKIP_LIST_SET_DHP_H diff --git a/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/container/skip_list_map_hp.h b/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/container/skip_list_map_hp.h new file mode 100644 index 0000000..95f5323 --- /dev/null +++ b/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/container/skip_list_map_hp.h @@ -0,0 +1,39 @@ +/* + This file is a part of libcds - Concurrent Data Structures library + + (C) Copyright Maxim Khizhinsky (libcds.dev@gmail.com) 2006-2017 + + Source code repo: http://github.com/khizmax/libcds/ + Download: http://sourceforge.net/projects/libcds/files/ + + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. + + * Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ + +#ifndef CDSLIB_CONTAINER_SKIP_LIST_MAP_HP_H +#define CDSLIB_CONTAINER_SKIP_LIST_MAP_HP_H + +#include +#include +#include +#include + +#endif // #ifndef CDSLIB_CONTAINER_SKIP_LIST_MAP_HP_H diff --git a/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/container/skip_list_map_nogc.h b/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/container/skip_list_map_nogc.h new file mode 100644 index 0000000..dcd1425 --- /dev/null +++ b/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/container/skip_list_map_nogc.h @@ -0,0 +1,394 @@ +/* + This file is a part of libcds - Concurrent Data Structures library + + (C) Copyright Maxim Khizhinsky (libcds.dev@gmail.com) 2006-2017 + + Source code repo: http://github.com/khizmax/libcds/ + Download: http://sourceforge.net/projects/libcds/files/ + + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. + + * Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ + +#ifndef CDSLIB_CONTAINER_SKIP_LIST_MAP_NOGC_H +#define CDSLIB_CONTAINER_SKIP_LIST_MAP_NOGC_H + +#include + +namespace cds { namespace container { + //@cond + namespace skip_list { namespace details { + struct map_key_accessor + { + template + typename NodeType::stored_value_type::first_type const& operator()( NodeType const& node ) const + { + return node.m_Value.first; + } + }; + }} // namespace skip_list::details + //@endcond + + /// Lock-free skip-list map (template specialization for gc::nogc) + /** @ingroup cds_nonintrusive_map + \anchor cds_nonintrusive_SkipListMap_nogc + + This specialization is intended for so-called persistent usage when no item + reclamation may be performed. The class does not support deleting of map item. + See \ref cds_nonintrusive_SkipListMap_hp "SkipListMap" for detailed description. + + Template arguments: + - \p K - type of a key to be stored in the map. + - \p T - type of a value to be stored in the map. + - \p Traits - map traits, default is \p skip_list::traits + It is possible to declare option-based list with \p cds::container::skip_list::make_traits + metafunction istead of \p Traits template argument. + */ + template < + typename Key, + typename T, +#ifdef CDS_DOXYGEN_INVOKED + typename Traits = skip_list::traits +#else + typename Traits +#endif + > + class SkipListMap< cds::gc::nogc, Key, T, Traits >: +#ifdef CDS_DOXYGEN_INVOKED + protected SkipListSet< cds::gc::nogc, std::pair< Key const, T >, Traits > +#else + protected SkipListSet< + cds::gc::nogc + ,std::pair< Key const, T > + ,typename cds::opt::replace_key_accessor< Traits, skip_list::details::map_key_accessor >::type + > +#endif + { + //@cond + typedef SkipListSet< + cds::gc::nogc + ,std::pair< Key const, T > + ,typename cds::opt::replace_key_accessor< Traits, skip_list::details::map_key_accessor >::type + > base_class; + //@endcond + + public: + typedef cds::gc::nogc gc; ///< Garbage collector + typedef Key key_type; ///< Key type + typedef T mapped_type; ///< Mapped type + typedef std::pair< key_type const, mapped_type> value_type; ///< Key-value pair stored in the map + typedef Traits traits; ///< Options specified + + typedef typename base_class::back_off back_off; ///< Back-off strategy + typedef typename base_class::allocator_type allocator_type; ///< Allocator type used for allocate/deallocate the skip-list nodes + typedef typename base_class::item_counter item_counter; ///< Item counting policy + typedef typename base_class::key_comparator key_comparator; ///< key compare functor + typedef typename base_class::memory_model memory_model; ///< Memory ordering, see \p cds::opt::memory_model option + typedef typename base_class::stat stat; ///< internal statistics type + typedef typename base_class::random_level_generator random_level_generator; ///< random level generator + + protected: + //@cond + typedef typename base_class::node_type node_type; + typedef typename base_class::node_allocator node_allocator; + //@endcond + + public: + /// Default constructor + SkipListMap() + : base_class() + {} + + /// Destructor clears the map + ~SkipListMap() + {} + + public: + ///@name Forward ordered iterators + //@{ + /// Forward iterator + /** + The forward iterator for a split-list has some features: + - it has no post-increment operator + - it depends on iterator of underlying \p OrderedList + */ + typedef typename base_class::iterator iterator; + + /// Const forward iterator + typedef typename base_class::const_iterator const_iterator; + + /// Returns a forward iterator addressing the first element in a map + /** + For empty set \code begin() == end() \endcode + */ + iterator begin() + { + return base_class::begin(); + } + + /// Returns an iterator that addresses the location succeeding the last element in a map + /** + Do not use the value returned by end function to access any item. + The returned value can be used only to control reaching the end of the set. + For empty set \code begin() == end() \endcode + */ + iterator end() + { + return base_class::end(); + } + + /// Returns a forward const iterator addressing the first element in a map + const_iterator begin() const + { + return base_class::begin(); + } + + /// Returns a forward const iterator addressing the first element in a map + const_iterator cbegin() const + { + return base_class::cbegin(); + } + + /// Returns an const iterator that addresses the location succeeding the last element in a map + const_iterator end() const + { + return base_class::end(); + } + + /// Returns an const iterator that addresses the location succeeding the last element in a map + const_iterator cend() const + { + return base_class::cend(); + } + //@} + + public: + /// Inserts new node with key and default value + /** + The function creates a node with \p key and default value, and then inserts the node created into the map. + + Preconditions: + - The \ref key_type should be constructible from value of type \p K. + In trivial case, \p K is equal to \ref key_type. + - The \ref mapped_type should be default-constructible. + + Returns an iterator pointed to inserted value, or \p end() if inserting is failed + */ + template + iterator insert( K const& key ) + { + //TODO: pass arguments by reference (make_pair makes copy) + return base_class::insert( std::make_pair( key_type( key ), mapped_type())); + } + + /// Inserts new node + /** + The function creates a node with copy of \p val value + and then inserts the node created into the map. + + Preconditions: + - The \ref key_type should be constructible from \p key of type \p K. + - The \ref mapped_type should be constructible from \p val of type \p V. + + Returns an iterator pointed to inserted value, or \p end() if inserting is failed + */ + template + iterator insert( K const& key, V const& val ) + { + //TODO: pass arguments by reference (make_pair makes copy) + return base_class::insert( std::make_pair( key_type( key ), mapped_type( val ))); + } + + /// Inserts new node and initialize it by a functor + /** + This function inserts new node with key \p key and if inserting is successful then it calls + \p func functor with signature + \code + struct functor { + void operator()( value_type& item ); + }; + \endcode + + The argument \p item of user-defined functor \p func is the reference + to the map's item inserted. item.second is a reference to item's value that may be changed. + User-defined functor \p func should guarantee that during changing item's value no any other changes + could be made on this map's item by concurrent threads. + + The key_type should be constructible from value of type \p K. + + The function allows to split creating of new item into three part: + - create item from \p key; + - insert new item into the map; + - if inserting is successful, initialize the value of item by calling \p f functor + + This can be useful if complete initialization of object of \p mapped_type is heavyweight and + it is preferable that the initialization should be completed only if inserting is successful. + + Returns an iterator pointed to inserted value, or \p end() if inserting is failed + */ + template + iterator insert_with( K const& key, Func func ) + { + iterator it = insert( key ); + if ( it != end()) + func( (*it)); + return it; + } + + /// For key \p key inserts data of type \p mapped_type created in-place from \p args + /** + \p key_type should be constructible from type \p K + + Returns \p true if inserting successful, \p false otherwise. + */ + template + iterator emplace( K&& key, Args&&... args ) + { + return base_class::emplace( key_type( std::forward( key )), mapped_type( std::forward(args)... )); + } + + /// UPdates data by \p key + /** + The operation inserts new item if \p key is not found in the map and \p bInsert is \p true. + Otherwise, if \p key is found, the function returns an iterator that points to item found. + + Returns std::pair where \p first is an iterator pointing to + item found or inserted or \p end() if \p key is not found and insertion is not allowed (\p bInsert is \p false), + \p second is \p true if new item has been added or \p false if the item already exists. + */ + template + std::pair update( K const& key, bool bInsert = true ) + { + //TODO: pass arguments by reference (make_pair makes copy) + return base_class::update( std::make_pair( key_type( key ), mapped_type()), bInsert ); + } + //@cond + template + CDS_DEPRECATED("ensure() is deprecated, use update()") + std::pair ensure( K const& key ) + { + return update( key, true ); + } + //@endcond + + /// Checks whether the map contains \p key + /** + The function searches the item with key equal to \p key + and returns an iterator pointed to item found if the key is found, + and \ref end() otherwise + */ + template + iterator contains( K const& key ) + { + return base_class::contains( key ); + } + //@cond + + template + CDS_DEPRECATED("deprecated, use contains()") + iterator find( K const& key ) + { + return contains( key ); + } + //@endcond + + /// Checks whether the map contains \p key using \p pred predicate for searching + /** + The function is similar to contains( key ) but \p pred is used for key comparing. + \p Less functor has the interface like \p std::less. + \p Less must imply the same element order as the comparator used for building the map. + */ + template + iterator contains( K const& key, Less pred ) const + { + return base_class::contains( key, pred ); + } + //@cond + template + CDS_DEPRECATED("deprecated, use contains()") + iterator find_with( K const& key, Less pred ) const + { + return contains( key, pred ); + } + //@endcond + + /// Gets minimum key from the map + /** + If the map is empty the function returns \p nullptr + */ + value_type * get_min() const + { + return base_class::get_min(); + } + + /// Gets maximum key from the map + /** + The function returns \p nullptr if the map is empty + */ + value_type * get_max() const + { + return base_class::get_max(); + } + + /// Clears the map (not atomic) + /** + Finding and/or inserting is prohibited while clearing. + Otherwise an unpredictable result may be encountered. + Thus, \p clear() may be used only for debugging purposes. + */ + void clear() + { + base_class::clear(); + } + + /// Checks if the map is empty + /** + Emptiness is checked by item counting: if item count is zero then the map is empty. + Thus, the correct item counting feature is an important part of Michael's map implementation. + */ + bool empty() const + { + return base_class::empty(); + } + + /// Returns item count in the map + size_t size() const + { + return base_class::size(); + } + + /// Returns maximum height of skip-list. The max height is a constant for each object and does not exceed 32. + static constexpr unsigned int max_height() noexcept + { + return base_class::max_height(); + } + + /// Returns const reference to internal statistics + stat const& statistics() const + { + return base_class::statistics(); + } + }; + +}} // namespace cds::container + + +#endif // #ifndef CDSLIB_CONTAINER_SKIP_LIST_MAP_NOGC_H diff --git a/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/container/skip_list_map_rcu.h b/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/container/skip_list_map_rcu.h new file mode 100644 index 0000000..0f61951 --- /dev/null +++ b/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/container/skip_list_map_rcu.h @@ -0,0 +1,706 @@ +/* + This file is a part of libcds - Concurrent Data Structures library + + (C) Copyright Maxim Khizhinsky (libcds.dev@gmail.com) 2006-2017 + + Source code repo: http://github.com/khizmax/libcds/ + Download: http://sourceforge.net/projects/libcds/files/ + + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. + + * Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ + +#ifndef CDSLIB_CONTAINER_SKIP_LIST_MAP_RCU_H +#define CDSLIB_CONTAINER_SKIP_LIST_MAP_RCU_H + +#include +#include +#include + +namespace cds { namespace container { + + /// Lock-free skip-list map (template specialization for \ref cds_urcu_desc "RCU") + /** @ingroup cds_nonintrusive_map + \anchor cds_nonintrusive_SkipListMap_rcu + + The implementation of well-known probabilistic data structure called skip-list + invented by W.Pugh in his papers: + - [1989] W.Pugh Skip Lists: A Probabilistic Alternative to Balanced Trees + - [1990] W.Pugh A Skip List Cookbook + + A skip-list is a probabilistic data structure that provides expected logarithmic + time search without the need of rebalance. The skip-list is a collection of sorted + linked list. Nodes are ordered by key. Each node is linked into a subset of the lists. + Each list has a level, ranging from 0 to 32. The bottom-level list contains + all the nodes, and each higher-level list is a sublist of the lower-level lists. + Each node is created with a random top level (with a random height), and belongs + to all lists up to that level. The probability that a node has the height 1 is 1/2. + The probability that a node has the height N is 1/2 ** N (more precisely, + the distribution depends on an random generator provided, but our generators + have this property). + + The lock-free variant of skip-list is implemented according to book + - [2008] M.Herlihy, N.Shavit "The Art of Multiprocessor Programming", + chapter 14.4 "A Lock-Free Concurrent Skiplist" + + Template arguments: + - \p RCU - one of \ref cds_urcu_gc "RCU type". + - \p K - type of a key to be stored in the list. + - \p T - type of a value to be stored in the list. + - \p Traits - map traits, default is \p skip_list::traits. + It is possible to declare option-based list with \p cds::container::skip_list::make_traits metafunction + instead of \p Traits template argument. + + Like STL map class, \p %SkipListMap stores its key-value pair as std:pair< K const, T>. + + @note Before including you should include appropriate RCU header file, + see \ref cds_urcu_gc "RCU type" for list of existing RCU class and corresponding header files. + + Iterators + + The class supports a forward iterator (\ref iterator and \ref const_iterator). + The iteration is ordered. + You may iterate over skip-list set items only under RCU lock. + Only in this case the iterator is thread-safe since + while RCU is locked any set's item cannot be reclaimed. + + The requirement of RCU lock during iterating means that deletion of the elements (i.e. \ref erase) + is not possible. + + @warning The iterator object cannot be passed between threads + + The iterator class supports the following minimalistic interface: + \code + struct iterator { + // Default ctor + iterator(); + + // Copy ctor + iterator( iterator const& s); + + value_type * operator ->() const; + value_type& operator *() const; + + // Pre-increment + iterator& operator ++(); + + // Copy assignment + iterator& operator = (const iterator& src); + + bool operator ==(iterator const& i ) const; + bool operator !=(iterator const& i ) const; + }; + \endcode + Note, the iterator object returned by \ref end, \p cend member functions points to \p nullptr and should not be dereferenced. + + */ + template < + typename RCU, + typename Key, + typename T, +#ifdef CDS_DOXYGEN_INVOKED + typename Traits = skip_list::traits +#else + typename Traits +#endif + > + class SkipListMap< cds::urcu::gc< RCU >, Key, T, Traits >: +#ifdef CDS_DOXYGEN_INVOKED + protected intrusive::SkipListSet< cds::urcu::gc< RCU >, std::pair, Traits > +#else + protected details::make_skip_list_map< cds::urcu::gc< RCU >, Key, T, Traits >::type +#endif + { + //@cond + typedef details::make_skip_list_map< cds::urcu::gc< RCU >, Key, T, Traits > maker; + typedef typename maker::type base_class; + //@endcond + public: + typedef cds::urcu::gc< RCU > gc; ///< Garbage collector used + typedef Key key_type; ///< Key type + typedef T mapped_type; ///< Mapped type +# ifdef CDS_DOXYGEN_INVOKED + typedef std::pair< K const, T> value_type; ///< Value type stored in the map +# else + typedef typename maker::value_type value_type; +# endif + typedef Traits traits; ///< Map traits + + typedef typename base_class::back_off back_off; ///< Back-off strategy used + typedef typename traits::allocator allocator_type; ///< Allocator type used for allocate/deallocate the skip-list nodes + typedef typename base_class::item_counter item_counter; ///< Item counting policy used + typedef typename maker::key_comparator key_comparator; ///< key comparison functor + typedef typename base_class::memory_model memory_model; ///< Memory ordering. See cds::opt::memory_model option + typedef typename traits::random_level_generator random_level_generator; ///< random level generator + typedef typename traits::stat stat; ///< internal statistics type + + protected: + //@cond + typedef typename maker::node_type node_type; + typedef typename maker::node_allocator node_allocator; + + typedef std::unique_ptr< node_type, typename maker::node_deallocator > scoped_node_ptr; + //@endcond + + public: + typedef typename base_class::rcu_lock rcu_lock; ///< RCU scoped lock + /// Group of \p extract_xxx functions do not require external locking + static constexpr const bool c_bExtractLockExternal = base_class::c_bExtractLockExternal; + + /// pointer to extracted node + using exempt_ptr = cds::urcu::exempt_ptr< gc, node_type, value_type, typename maker::intrusive_type_traits::disposer >; + + private: + //@cond + struct raw_ptr_converter + { + value_type * operator()( node_type * p ) const + { + return p ? &p->m_Value : nullptr; + } + + value_type& operator()( node_type& n ) const + { + return n.m_Value; + } + + value_type const& operator()( node_type const& n ) const + { + return n.m_Value; + } + }; + //@endcond + + public: + /// Result of \p get(), \p get_with() functions - pointer to the node found + typedef cds::urcu::raw_ptr_adaptor< value_type, typename base_class::raw_ptr, raw_ptr_converter > raw_ptr; + + protected: + //@cond + unsigned int random_level() + { + return base_class::random_level(); + } + //@endcond + + public: + /// Default ctor + SkipListMap() + : base_class() + {} + + /// Destructor destroys the set object + ~SkipListMap() + {} + + public: + ///@name Forward ordered iterators (thread-safe under RCU lock) + //@{ + /// Forward iterator + /** + The forward iterator has some features: + - it has no post-increment operator + - it depends on iterator of underlying \p OrderedList + + You may safely use iterators in multi-threaded environment only under RCU lock. + Otherwise, a crash is possible if another thread deletes the element the iterator points to. + */ + typedef skip_list::details::iterator< typename base_class::iterator > iterator; + + /// Const iterator type + typedef skip_list::details::iterator< typename base_class::const_iterator > const_iterator; + + /// Returns a forward iterator addressing the first element in a map + iterator begin() + { + return iterator( base_class::begin()); + } + + /// Returns a forward const iterator addressing the first element in a map + const_iterator begin() const + { + return cbegin(); + } + /// Returns a forward const iterator addressing the first element in a map + const_iterator cbegin() const + { + return const_iterator( base_class::cbegin()); + } + + /// Returns a forward iterator that addresses the location succeeding the last element in a map. + iterator end() + { + return iterator( base_class::end()); + } + + /// Returns a forward const iterator that addresses the location succeeding the last element in a map. + const_iterator end() const + { + return cend(); + } + /// Returns a forward const iterator that addresses the location succeeding the last element in a map. + const_iterator cend() const + { + return const_iterator( base_class::cend()); + } + //@} + + public: + /// Inserts new node with key and default value + /** + The function creates a node with \p key and default value, and then inserts the node created into the map. + + Preconditions: + - The \p key_type should be constructible from a value of type \p K. + In trivial case, \p K is equal to \p key_type. + - The \p mapped_type should be default-constructible. + + RCU \p synchronize method can be called. RCU should not be locked. + + Returns \p true if inserting successful, \p false otherwise. + */ + template + bool insert( K const& key ) + { + return insert_with( key, [](value_type&){} ); + } + + /// Inserts new node + /** + The function creates a node with copy of \p val value + and then inserts the node created into the map. + + Preconditions: + - The \p key_type should be constructible from \p key of type \p K. + - The \p value_type should be constructible from \p val of type \p V. + + RCU \p synchronize method can be called. RCU should not be locked. + + Returns \p true if \p val is inserted into the set, \p false otherwise. + */ + template + bool insert( K const& key, V const& val ) + { + scoped_node_ptr pNode( node_allocator().New( random_level(), key, val )); + if ( base_class::insert( *pNode )) + { + pNode.release(); + return true; + } + return false; + } + + /// Inserts new node and initialize it by a functor + /** + This function inserts new node with key \p key and if inserting is successful then it calls + \p func functor with signature + \code + struct functor { + void operator()( value_type& item ); + }; + \endcode + + The argument \p item of user-defined functor \p func is the reference + to the map's item inserted: + - item.first is a const reference to item's key that cannot be changed. + - item.second is a reference to item's value that may be changed. + + The function allows to split creating of new item into three part: + - create item from \p key; + - insert new item into the map; + - if inserting is successful, initialize the value of item by calling \p func functor + + This can be useful if complete initialization of object of \p value_type is heavyweight and + it is preferable that the initialization should be completed only if inserting is successful. + + RCU \p synchronize method can be called. RCU should not be locked. + */ + template + bool insert_with( const K& key, Func func ) + { + scoped_node_ptr pNode( node_allocator().New( random_level(), key )); + if ( base_class::insert( *pNode, [&func]( node_type& item ) { func( item.m_Value ); } )) { + pNode.release(); + return true; + } + return false; + } + + /// For key \p key inserts data of type \p value_type created in-place from \p args + /** + Returns \p true if inserting successful, \p false otherwise. + + RCU \p synchronize() method can be called. RCU should not be locked. + */ + template + bool emplace( K&& key, Args&&... args ) + { + scoped_node_ptr pNode( node_allocator().New( random_level(), std::forward(key), std::forward(args)... )); + if ( base_class::insert( *pNode )) { + pNode.release(); + return true; + } + return false; + } + + /// Updates data by \p key + /** + The operation performs inserting or changing data with lock-free manner. + + If the \p key not found in the map, then the new item created from \p key + is inserted into the map iff \p bInsert is \p true. + Otherwise, if \p key found, the functor \p func is called with item found. + The functor \p Func interface is: + \code + struct my_functor { + void operator()( bool bNew, value_type& item ); + }; + \endcode + where: + - \p bNew - \p true if the item has been inserted, \p false otherwise + - \p item - item of the map + + The functor may change any fields of \p item.second. + + RCU \p synchronize() method can be called. RCU should not be locked. + + Returns std::pair where \p first is \p true if operation is successful, + \p second is \p true if new item has been added or \p false if the item with \p key + already exists. + + @warning See \ref cds_intrusive_item_creating "insert item troubleshooting" + */ + template + std::pair update( K const& key, Func func, bool bInsert = true ) + { + scoped_node_ptr pNode( node_allocator().New( random_level(), key )); + std::pair res = base_class::update( *pNode, + [&func](bool bNew, node_type& item, node_type const& ){ func( bNew, item.m_Value );}, + bInsert + ); + if ( res.first && res.second ) + pNode.release(); + return res; + } + //@cond + template + CDS_DEPRECATED("ensure() is deprecated, use update()") + std::pair ensure( K const& key, Func func ) + { + return update( key, func, true ); + } + //@endcond + + /// Delete \p key from the map + /**\anchor cds_nonintrusive_SkipListMap_rcu_erase_val + + RCU \p synchronize method can be called. RCU should not be locked. + + Return \p true if \p key is found and deleted, \p false otherwise + */ + template + bool erase( K const& key ) + { + return base_class::erase(key); + } + + /// Deletes the item from the map using \p pred predicate for searching + /** + The function is an analog of \ref cds_nonintrusive_SkipListMap_rcu_erase_val "erase(K const&)" + but \p pred is used for key comparing. + \p Less functor has the interface like \p std::less. + \p Less must imply the same element order as the comparator used for building the map. + */ + template + bool erase_with( K const& key, Less pred ) + { + CDS_UNUSED( pred ); + return base_class::erase_with( key, cds::details::predicate_wrapper< node_type, Less, typename maker::key_accessor >()); + } + + /// Delete \p key from the map + /** \anchor cds_nonintrusive_SkipListMap_rcu_erase_func + + The function searches an item with key \p key, calls \p f functor + and deletes the item. If \p key is not found, the functor is not called. + + The functor \p Func interface: + \code + struct extractor { + void operator()(value_type& item) { ... } + }; + \endcode + + RCU \p synchronize method can be called. RCU should not be locked. + + Return \p true if key is found and deleted, \p false otherwise + */ + template + bool erase( K const& key, Func f ) + { + return base_class::erase( key, [&f]( node_type& node) { f( node.m_Value ); } ); + } + + /// Deletes the item from the map using \p pred predicate for searching + /** + The function is an analog of \ref cds_nonintrusive_SkipListMap_rcu_erase_func "erase(K const&, Func)" + but \p pred is used for key comparing. + \p Less functor has the interface like \p std::less. + \p Less must imply the same element order as the comparator used for building the map. + */ + template + bool erase_with( K const& key, Less pred, Func f ) + { + CDS_UNUSED( pred ); + return base_class::erase_with( key, cds::details::predicate_wrapper< node_type, Less, typename maker::key_accessor >(), + [&f]( node_type& node) { f( node.m_Value ); } ); + } + + /// Extracts the item from the map with specified \p key + /** \anchor cds_nonintrusive_SkipListMap_rcu_extract + The function searches an item with key equal to \p key in the map, + unlinks it from the set, and returns \ref cds::urcu::exempt_ptr "exempt_ptr" pointer to the item found. + If the item is not found the function returns an empty \p exempt_ptr + + Note the compare functor from \p Traits class' template argument + should accept a parameter of type \p K that can be not the same as \p key_type. + + RCU \p synchronize() method can be called. RCU should NOT be locked. + + The function does not free the item found. + The item will be implicitly freed when the returned object is destroyed or when + its \p release() member function is called. + */ + template + exempt_ptr extract( K const& key ) + { + return exempt_ptr( base_class::do_extract( key )); + } + + /// Extracts the item from the map with comparing functor \p pred + /** + The function is an analog of \p extract(K const&) but \p pred predicate is used for key comparing. + \p Less has the semantics like \p std::less. + \p pred must imply the same element order as the comparator used for building the map. + */ + template + exempt_ptr extract_with( K const& key, Less pred ) + { + CDS_UNUSED( pred ); + return exempt_ptr( base_class::do_extract_with( key, cds::details::predicate_wrapper< node_type, Less, typename maker::key_accessor >())); + } + + /// Extracts an item with minimal key from the map + /** + The function searches an item with minimal key, unlinks it, + and returns \ref cds::urcu::exempt_ptr "exempt_ptr" pointer to the item. + If the skip-list is empty the function returns an empty \p exempt_ptr. + + RCU \p synchronize method can be called. RCU should NOT be locked. + + The function does not free the item found. + The item will be implicitly freed when the returned object is destroyed or when + its \p release() member function is called. + */ + exempt_ptr extract_min() + { + return exempt_ptr( base_class::do_extract_min()); + } + + /// Extracts an item with maximal key from the map + /** + The function searches an item with maximal key, unlinks it from the set, + and returns \ref cds::urcu::exempt_ptr "exempt_ptr" pointer to the item. + If the skip-list is empty the function returns an empty \p exempt_ptr. + + RCU \p synchronize method can be called. RCU should NOT be locked. + + The function does not free the item found. + The item will be implicitly freed when the returned object is destroyed or when + its \p release() member function is called. + */ + exempt_ptr extract_max() + { + return exempt_ptr( base_class::do_extract_max()); + } + + /// Find the key \p key + /** \anchor cds_nonintrusive_SkipListMap_rcu_find_cfunc + + The function searches the item with key equal to \p key and calls the functor \p f for item found. + The interface of \p Func functor is: + \code + struct functor { + void operator()( value_type& item ); + }; + \endcode + where \p item is the item found. + + The functor may change \p item.second. + + The function applies RCU lock internally. + + The function returns \p true if \p key is found, \p false otherwise. + */ + template + bool find( K const& key, Func f ) + { + return base_class::find( key, [&f](node_type& item, K const& ) { f( item.m_Value );}); + } + + /// Finds the key \p val using \p pred predicate for searching + /** + The function is an analog of \ref cds_nonintrusive_SkipListMap_rcu_find_cfunc "find(K const&, Func)" + but \p pred is used for key comparing. + \p Less functor has the interface like \p std::less. + \p Less must imply the same element order as the comparator used for building the map. + */ + template + bool find_with( K const& key, Less pred, Func f ) + { + CDS_UNUSED( pred ); + return base_class::find_with( key, cds::details::predicate_wrapper< node_type, Less, typename maker::key_accessor >(), + [&f](node_type& item, K const& ) { f( item.m_Value );}); + } + + /// Checks whether the map contains \p key + /** + The function searches the item with key equal to \p key + and returns \p true if it is found, and \p false otherwise. + + The function applies RCU lock internally. + */ + template + bool contains( K const& key ) + { + return base_class::contains( key ); + } + //@cond + template + CDS_DEPRECATED("deprecated, use contains()") + bool find( K const& key ) + { + return contains( key ); + } + //@endcond + + /// Checks whether the map contains \p key using \p pred predicate for searching + /** + The function is similar to contains( key ) but \p pred is used for key comparing. + \p Less functor has the interface like \p std::less. + \p Less must imply the same element order as the comparator used for building the set. + */ + template + bool contains( K const& key, Less pred ) + { + CDS_UNUSED( pred ); + return base_class::contains( key, cds::details::predicate_wrapper< node_type, Less, typename maker::key_accessor >()); + } + //@cond + template + CDS_DEPRECATED("deprecated, use contains()") + bool find_with( K const& key, Less pred ) + { + return contains( key, pred ); + } + //@endcond + + /// Finds the key \p key and return the item found + /** \anchor cds_nonintrusive_SkipListMap_rcu_get + The function searches the item with key equal to \p key and returns a \p raw_ptr object pointing to an item found. + If \p key is not found it returns empty \p raw_ptr. + + Note the compare functor in \p Traits class' template argument + should accept a parameter of type \p K that can be not the same as \p key_type. + + RCU should be locked before call of this function. + Returned item is valid only while RCU is locked: + \code + typedef cds::container::SkipListMap< cds::urcu::gc< cds::urcu::general_buffered<> >, int, foo, my_traits > skip_list; + skip_list theList; + // ... + typename skip_list::raw_ptr pVal; + { + // Lock RCU + skip_list::rcu_lock lock; + + pVal = theList.get( 5 ); + if ( pVal ) { + // Deal with pVal + //... + } + } + // You can manually release pVal after RCU-locked section + pVal.release(); + \endcode + */ + template + raw_ptr get( K const& key ) + { + return raw_ptr( base_class::get( key )); + } + + /// Finds the key \p key and return the item found + /** + The function is an analog of \ref cds_nonintrusive_SkipListMap_rcu_get "get(K const&)" + but \p pred is used for comparing the keys. + + \p Less functor has the semantics like \p std::less but should take arguments of type \ref key_type and \p K + in any order. + \p pred must imply the same element order as the comparator used for building the map. + */ + template + raw_ptr get_with( K const& key, Less pred ) + { + CDS_UNUSED( pred ); + return raw_ptr( base_class::get_with( key, cds::details::predicate_wrapper< node_type, Less, typename maker::key_accessor >())); + } + + /// Clears the map (not atomic) + void clear() + { + base_class::clear(); + } + + /// Checks if the map is empty + /** + Emptiness is checked by item counting: if item count is zero then the map is empty. + */ + bool empty() const + { + return base_class::empty(); + } + + /// Returns item count in the map + size_t size() const + { + return base_class::size(); + } + + /// Returns const reference to internal statistics + stat const& statistics() const + { + return base_class::statistics(); + } + }; +}} // namespace cds::container + +#endif // #ifndef CDSLIB_CONTAINER_SKIP_LIST_MAP_RCU_H diff --git a/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/container/skip_list_set_dhp.h b/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/container/skip_list_set_dhp.h new file mode 100644 index 0000000..84512a7 --- /dev/null +++ b/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/container/skip_list_set_dhp.h @@ -0,0 +1,39 @@ +/* + This file is a part of libcds - Concurrent Data Structures library + + (C) Copyright Maxim Khizhinsky (libcds.dev@gmail.com) 2006-2017 + + Source code repo: http://github.com/khizmax/libcds/ + Download: http://sourceforge.net/projects/libcds/files/ + + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. + + * Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ + +#ifndef CDSLIB_CONTAINER_SKIP_LIST_MAP_DHP_H +#define CDSLIB_CONTAINER_SKIP_LIST_MAP_DHP_H + +#include +#include +#include +#include + +#endif // #ifndef CDSLIB_CONTAINER_SKIP_LIST_MAP_DHP_H diff --git a/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/container/skip_list_set_hp.h b/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/container/skip_list_set_hp.h new file mode 100644 index 0000000..23cdbc1 --- /dev/null +++ b/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/container/skip_list_set_hp.h @@ -0,0 +1,39 @@ +/* + This file is a part of libcds - Concurrent Data Structures library + + (C) Copyright Maxim Khizhinsky (libcds.dev@gmail.com) 2006-2017 + + Source code repo: http://github.com/khizmax/libcds/ + Download: http://sourceforge.net/projects/libcds/files/ + + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. + + * Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ + +#ifndef CDSLIB_CONTAINER_SKIP_LIST_SET_HP_H +#define CDSLIB_CONTAINER_SKIP_LIST_SET_HP_H + +#include +#include +#include +#include + +#endif // #ifndef CDSLIB_CONTAINER_SKIP_LIST_SET_HP_H diff --git a/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/container/skip_list_set_nogc.h b/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/container/skip_list_set_nogc.h new file mode 100644 index 0000000..da898ef --- /dev/null +++ b/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/container/skip_list_set_nogc.h @@ -0,0 +1,444 @@ +/* + This file is a part of libcds - Concurrent Data Structures library + + (C) Copyright Maxim Khizhinsky (libcds.dev@gmail.com) 2006-2017 + + Source code repo: http://github.com/khizmax/libcds/ + Download: http://sourceforge.net/projects/libcds/files/ + + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. + + * Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ + +#ifndef CDSLIB_CONTAINER_SKIP_LIST_SET_NOGC_H +#define CDSLIB_CONTAINER_SKIP_LIST_SET_NOGC_H + +#include +#include +#include +#include +#include + +namespace cds { namespace container { + //@cond + namespace skip_list { namespace details { + struct set_key_accessor + { + template + typename NodeType::stored_value_type const& operator()( NodeType const& node ) const + { + return node.m_Value; + } + }; + }} // namespace skip_list::details + + namespace details { + template + struct make_skip_list_set_nogc + { + typedef cds::gc::nogc gc; + typedef T value_type; + typedef Traits traits; + + typedef cds::intrusive::skip_list::node< gc > intrusive_node_type; + struct node_type: public intrusive_node_type + { + typedef intrusive_node_type base_class; + typedef typename base_class::atomic_ptr atomic_ptr; + typedef atomic_ptr tower_item_type; + typedef value_type stored_value_type; + + value_type m_Value; + //atomic_ptr m_arrTower[] ; // allocated together with node_type in single memory block + + template + node_type( unsigned int nHeight, atomic_ptr * pTower, Q const& v ) + : m_Value(v) + { + if ( nHeight > 1 ) { + new (pTower) atomic_ptr[ nHeight - 1 ]; + base_class::make_tower( nHeight, pTower ); + } + } + + template + node_type( unsigned int nHeight, atomic_ptr * pTower, Q&& q, Args&&... args ) + : m_Value( std::forward(q), std::forward(args)... ) + { + if ( nHeight > 1 ) { + new (pTower) atomic_ptr[ nHeight - 1 ]; + base_class::make_tower( nHeight, pTower ); + } + } + + node_type() = delete; // no default ctor + }; + + typedef skip_list::details::node_allocator< node_type, traits> node_allocator; + + struct node_deallocator { + void operator ()( node_type * pNode ) + { + node_allocator().Delete( pNode ); + } + }; + + typedef skip_list::details::dummy_node_builder dummy_node_builder; + + typedef typename traits::key_accessor key_accessor; + typedef typename opt::details::make_comparator< value_type, traits >::type key_comparator; + + /* + template + struct less_wrapper { + typedef compare_wrapper< node_type, cds::opt::details::make_comparator_from_less, key_accessor > type; + }; + */ + + typedef typename cds::intrusive::skip_list::make_traits< + cds::opt::type_traits< traits > + ,cds::intrusive::opt::hook< intrusive::skip_list::base_hook< cds::opt::gc< gc > > > + ,cds::intrusive::opt::disposer< node_deallocator > + ,cds::intrusive::skip_list::internal_node_builder< dummy_node_builder > + ,cds::opt::compare< cds::details::compare_wrapper< node_type, key_comparator, key_accessor > > + >::type intrusive_type_traits; + + typedef cds::intrusive::SkipListSet< gc, node_type, intrusive_type_traits> type; + }; + } // namespace details + //@endcond + + /// Lock-free skip-list set (template specialization for gc::nogc) + /** @ingroup cds_nonintrusive_set + \anchor cds_nonintrusive_SkipListSet_nogc + + This specialization is intended for so-called persistent usage when no item + reclamation may be performed. The class does not support deleting of list item. + See \ref cds_nonintrusive_SkipListSet_hp "SkipListSet" for detailed description. + + Template arguments: + - \p T - type to be stored in the list. + - \p Traits - type traits. See skip_list::traits for explanation. + + It is possible to declare option-based list with cds::container::skip_list::make_traits metafunction istead of \p Traits template + argument. \p Options template arguments of cds::container::skip_list::make_traits metafunction are: + - opt::compare - key comparison functor. No default functor is provided. + If the option is not specified, the opt::less is used. + - opt::less - specifies binary predicate used for key comparison. Default is \p std::less. + - opt::item_counter - the type of item counting feature. Default is \ref atomicity::empty_item_counter that is no item counting. + - opt::memory_model - C++ memory ordering model. Can be opt::v::relaxed_ordering (relaxed memory model, the default) + or opt::v::sequential_consistent (sequentially consisnent memory model). + - skip_list::random_level_generator - random level generator. Can be \p skip_list::xor_shift, \p skip_list::turbo or + user-provided one. See skip_list::random_level_generator option description for explanation. + Default is \p skip_list::turbo32. + - opt::allocator - allocator for skip-list node. Default is \ref CDS_DEFAULT_ALLOCATOR. + - opt::back_off - back-off strategy used. If the option is not specified, the cds::backoff::Default is used. + - opt::stat - internal statistics. Available types: skip_list::stat, skip_list::empty_stat (the default) + */ + template < + typename T, +#ifdef CDS_DOXYGEN_INVOKED + class Traits = skip_list::traits +#else + class Traits +#endif + > + class SkipListSet< gc::nogc, T, Traits >: +#ifdef CDS_DOXYGEN_INVOKED + protected intrusive::SkipListSet< cds::gc::nogc, T, Traits > +#else + protected details::make_skip_list_set_nogc< T, typename cds::opt::replace_key_accessor< Traits, skip_list::details::set_key_accessor >::type >::type +#endif + { + //@cond + typedef details::make_skip_list_set_nogc< T, typename cds::opt::replace_key_accessor< Traits, skip_list::details::set_key_accessor >::type > maker; + typedef typename maker::type base_class; + //@endcond + public: + typedef typename base_class::gc gc ; ///< Garbage collector used + typedef T value_type ; ///< Value type stored in the set + typedef Traits options ; ///< Options specified + + typedef typename base_class::back_off back_off ; ///< Back-off strategy used + typedef typename options::allocator allocator_type ; ///< Allocator type used for allocate/deallocate the skip-list nodes + typedef typename base_class::item_counter item_counter ; ///< Item counting policy used + typedef typename maker::key_comparator key_comparator ; ///< key compare functor + typedef typename base_class::memory_model memory_model ; ///< Memory ordering. See cds::opt::memory_model option + typedef typename options::stat stat ; ///< internal statistics type + typedef typename base_class::random_level_generator random_level_generator ; ///< random level generator + + protected: + //@cond + typedef typename maker::node_type node_type; + typedef typename maker::node_allocator node_allocator; + typedef typename std::conditional< + std::is_same< typename options::key_accessor, opt::none >::value, + skip_list::details::set_key_accessor, + typename options::key_accessor + >::type key_accessor; + + typedef std::unique_ptr< node_type, typename maker::node_deallocator > scoped_node_ptr; + //@endcond + + public: + ///@name Forward iterators + //@{ + /// Forward ordered iterator + /** + The forward iterator for a split-list has some features: + - it has no post-increment operator + - it depends on iterator of underlying \p OrderedList + */ + typedef skip_list::details::iterator< typename base_class::iterator > iterator; + + /// Const iterator type + typedef skip_list::details::iterator< typename base_class::const_iterator > const_iterator; + + /// Returns a forward iterator addressing the first element in a set + iterator begin() + { + return iterator( base_class::begin()); + } + + /// Returns a forward const iterator addressing the first element in a set + const_iterator begin() const + { + return const_iterator( base_class::begin()); + } + + /// Returns a forward const iterator addressing the first element in a set + const_iterator cbegin() const + { + return const_iterator( base_class::cbegin()); + } + + /// Returns a forward iterator that addresses the location succeeding the last element in a set. + iterator end() + { + return iterator( base_class::end()); + } + + /// Returns a forward const iterator that addresses the location succeeding the last element in a set. + const_iterator end() const + { + return const_iterator( base_class::end()); + } + /// Returns a forward const iterator that addresses the location succeeding the last element in a set. + const_iterator cend() const + { + return const_iterator( base_class::cend()); + } + //@} + + protected: + //@cond + static iterator node_to_iterator( node_type * pNode ) + { + assert( pNode ); + return iterator( base_class::iterator::from_node( pNode )); + } + //@endcond + + public: + /// Default ctor + SkipListSet() + : base_class() + {} + + /// Destructor destroys the set object + ~SkipListSet() + {} + + /// Inserts new node + /** + The function inserts \p val in the set if it does not contain + an item with key equal to \p val. + + Return an iterator pointing to inserted item if success, otherwise \ref end() + */ + template + iterator insert( const Q& val ) + { + scoped_node_ptr sp( node_allocator().New( base_class::random_level(), val )); + if ( base_class::insert( *sp.get())) { + return node_to_iterator( sp.release()); + } + return end(); + } + + /// Inserts data of type \ref value_type constructed with std::forward(args)... + /** + Return an iterator pointing to inserted item if success \ref end() otherwise + */ + template + iterator emplace( Args&&... args ) + { + scoped_node_ptr sp( node_allocator().New( base_class::random_level(), std::forward(args)... )); + if ( base_class::insert( *sp.get())) { + return node_to_iterator( sp.release()); + } + return end(); + } + + /// Updates the item + /** + The operation inserts new item if \p val is not found in the set and \p bInsert is \p true. + Otherwise, if that key exists, the function returns an iterator that points to item found. + + Returns std::pair where \p first is an iterator pointing to + item found or inserted or \p end() if \p val is not found and \p bInsert is \p false, + \p second is \p true if new item has been added or \p false if the item + already is in the set. + */ + template + std::pair update( const Q& val, bool bInsert = true ) + { + scoped_node_ptr sp( node_allocator().New( base_class::random_level(), val )); + node_type * pNode; + std::pair bRes = base_class::update( *sp, [&pNode](bool, node_type& item, node_type&) { pNode = &item; }, bInsert ); + if ( bRes.first && bRes.second ) + sp.release(); + else if ( !bRes.first ) + return std::make_pair( end(), false ); + assert( pNode ); + return std::make_pair( node_to_iterator( pNode ), bRes.second ); + } + //@cond + template + CDS_DEPRECATED("ensure() is deprecated, use update()") + std::pair ensure( const Q& val ) + { + return update( val, true ); + } + //@endcond + + /// Checks whether the set contains \p key + /** + The function searches the item with key equal to \p key + and returns an iterator to item found or \p end() if the key is not fund + */ + template + iterator contains( Q const& key ) const + { + node_type * pNode = base_class::contains( key ); + if ( pNode ) + return node_to_iterator( pNode ); + return base_class::nonconst_end(); + } + //@cond + template + CDS_DEPRECATED("deprecated, use contains()") + iterator find( Q const& key ) const + { + return contains( key ); + } + //@edncond + + /// Checks whether the set contains \p key using \p pred predicate for searching + /** + The function is similar to contains( key ) but \p pred is used for key comparing. + \p Less functor has the interface like \p std::less. + \p Less must imply the same element order as the comparator used for building the set. + */ + template + iterator contains( Q const& key, Less pred ) const + { + CDS_UNUSED( pred ); + node_type * pNode = base_class::contains( key, cds::details::predicate_wrapper< node_type, Less, key_accessor>()); + if ( pNode ) + return node_to_iterator( pNode ); + return base_class::nonconst_end(); + } + //@cond + template + CDS_DEPRECATED("deprecated, use contains()") + iterator find_with( Q const& key, Less pred ) const + { + return contains( key, pred ); + } + //@endcond + + /// Gets minimum key from the set + /** + If the set is empty the function returns \p nullptr + */ + value_type * get_min() const + { + node_type * pNode = base_class::get_min(); + return pNode ? &pNode->m_Value : nullptr; + } + + /// Gets maximum key from the set + /** + The function returns \p nullptr if the set is empty + */ + value_type * get_max() const + { + node_type * pNode = base_class::get_max(); + return pNode ? &pNode->m_Value : nullptr; + } + + /// Clears the set (non-atomic) + /** + The function is not atomic. + Finding and/or inserting is prohibited while clearing. + Otherwise an unpredictable result may be encountered. + Thus, \p clear() may be used only for debugging purposes. + */ + void clear() + { + base_class::clear(); + } + + /// Checks if the set is empty + bool empty() const + { + return base_class::empty(); + } + + /// Returns item count in the set + /** + The value returned depends on item counter type provided by \p Traits template parameter. + If it is atomicity::empty_item_counter this function always returns 0. + The function is not suitable for checking the set emptiness, use \ref empty + member function for this purpose. + */ + size_t size() const + { + return base_class::size(); + } + + /// Returns maximum height of skip-list. The max height is a constant for each object and does not exceed 32. + static constexpr unsigned int max_height() noexcept + { + return base_class::max_height(); + } + + /// Returns const reference to internal statistics + stat const& statistics() const + { + return base_class::statistics(); + } + }; + +}} // cds::container + +#endif // ifndef CDSLIB_CONTAINER_SKIP_LIST_SET_NOGC_H diff --git a/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/container/skip_list_set_rcu.h b/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/container/skip_list_set_rcu.h new file mode 100644 index 0000000..becd683 --- /dev/null +++ b/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/container/skip_list_set_rcu.h @@ -0,0 +1,777 @@ +/* + This file is a part of libcds - Concurrent Data Structures library + + (C) Copyright Maxim Khizhinsky (libcds.dev@gmail.com) 2006-2017 + + Source code repo: http://github.com/khizmax/libcds/ + Download: http://sourceforge.net/projects/libcds/files/ + + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. + + * Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ + +#ifndef CDSLIB_CONTAINER_SKIP_LIST_SET_RCU_H +#define CDSLIB_CONTAINER_SKIP_LIST_SET_RCU_H + +#include +#include + +namespace cds { namespace container { + + /// Lock-free skip-list set (template specialization for \ref cds_urcu_desc "RCU") + /** @ingroup cds_nonintrusive_set + \anchor cds_nonintrusive_SkipListSet_rcu + + The implementation of well-known probabilistic data structure called skip-list + invented by W.Pugh in his papers: + - [1989] W.Pugh Skip Lists: A Probabilistic Alternative to Balanced Trees + - [1990] W.Pugh A Skip List Cookbook + + A skip-list is a probabilistic data structure that provides expected logarithmic + time search without the need of rebalance. The skip-list is a collection of sorted + linked list. Nodes are ordered by key. Each node is linked into a subset of the lists. + Each list has a level, ranging from 0 to 32. The bottom-level list contains + all the nodes, and each higher-level list is a sublist of the lower-level lists. + Each node is created with a random top level (with a random height), and belongs + to all lists up to that level. The probability that a node has the height 1 is 1/2. + The probability that a node has the height N is 1/2 ** N (more precisely, + the distribution depends on an random generator provided, but our generators + have this property). + + The lock-free variant of skip-list is implemented according to book + - [2008] M.Herlihy, N.Shavit "The Art of Multiprocessor Programming", + chapter 14.4 "A Lock-Free Concurrent Skiplist" + + Template arguments: + - \p RCU - one of \ref cds_urcu_gc "RCU type". + - \p T - type to be stored in the list. + - \p Traits - set traits, default is skip_list::traits for explanation. + + It is possible to declare option-based list with cds::container::skip_list::make_traits metafunction istead of \p Traits template + argument. + Template argument list \p Options of cds::container::skip_list::make_traits metafunction are: + - opt::compare - key comparison functor. No default functor is provided. + If the option is not specified, the opt::less is used. + - opt::less - specifies binary predicate used for key comparison. Default is \p std::less. + - opt::item_counter - the type of item counting feature. Default is \ref atomicity::empty_item_counter that is no item counting. + - opt::memory_model - C++ memory ordering model. Can be opt::v::relaxed_ordering (relaxed memory model, the default) + or opt::v::sequential_consistent (sequentially consisnent memory model). + - skip_list::random_level_generator - random level generator. Can be \p skip_list::xor_shift, \p skip_list::turbo or + user-provided one. See \p skip_list::random_level_generator option description for explanation. + Default is \p skip_list::turbo32. + - opt::allocator - allocator for skip-list node. Default is \ref CDS_DEFAULT_ALLOCATOR. + - opt::back_off - back-off strategy used. If the option is not specified, the cds::backoff::Default is used. + - opt::stat - internal statistics. Available types: skip_list::stat, skip_list::empty_stat (the default) + - opt::rcu_check_deadlock - a deadlock checking policy. Default is opt::v::rcu_throw_deadlock + + @note Before including you should include appropriate RCU header file, + see \ref cds_urcu_gc "RCU type" for list of existing RCU class and corresponding header files. + + Iterators + + The class supports a forward iterator (\ref iterator and \ref const_iterator). + The iteration is ordered. + + You may iterate over skip-list set items only under RCU lock. + Only in this case the iterator is thread-safe since + while RCU is locked any set's item cannot be reclaimed. + + The requirement of RCU lock during iterating means that deletion of the elements (i.e. \ref erase) + is not possible. + + @warning The iterator object cannot be passed between threads + + Example how to use skip-list set iterators: + \code + // First, you should include the header for RCU type you have chosen + #include + #include + + typedef cds::urcu::gc< cds::urcu::general_buffered<> > rcu_type; + + struct Foo { + // ... + }; + + // Traits for your skip-list. + // At least, you should define cds::opt::less or cds::opt::compare for Foo struct + struct my_traits: public cds::continer::skip_list::traits + { + // ... + }; + typedef cds::container::SkipListSet< rcu_type, Foo, my_traits > my_skiplist_set; + + my_skiplist_set theSet; + + // ... + + // Begin iteration + { + // Apply RCU locking manually + typename rcu_type::scoped_lock sl; + + for ( auto it = theList.begin(); it != theList.end(); ++it ) { + // ... + } + + // rcu_type::scoped_lock destructor releases RCU lock implicitly + } + \endcode + + \warning Due to concurrent nature of skip-list set it is not guarantee that you can iterate + all elements in the set: any concurrent deletion can exclude the element + pointed by the iterator from the set, and your iteration can be terminated + before end of the set. Therefore, such iteration is more suitable for debugging purposes + + The iterator class supports the following minimalistic interface: + \code + struct iterator { + // Default ctor + iterator(); + + // Copy ctor + iterator( iterator const& s); + + value_type * operator ->() const; + value_type& operator *() const; + + // Pre-increment + iterator& operator ++(); + + // Copy assignment + iterator& operator = (const iterator& src); + + bool operator ==(iterator const& i ) const; + bool operator !=(iterator const& i ) const; + }; + \endcode + Note, the iterator object returned by \ref end, \p cend member functions points to \p nullptr and should not be dereferenced. + */ + template < + typename RCU, + typename T, +#ifdef CDS_DOXYGEN_INVOKED + typename Traits = skip_list::traits +#else + typename Traits +#endif + > + class SkipListSet< cds::urcu::gc< RCU >, T, Traits >: +#ifdef CDS_DOXYGEN_INVOKED + protected intrusive::SkipListSet< cds::urcu::gc< RCU >, T, Traits > +#else + protected details::make_skip_list_set< cds::urcu::gc< RCU >, T, Traits >::type +#endif + { + //@cond + typedef details::make_skip_list_set< cds::urcu::gc< RCU >, T, Traits > maker; + typedef typename maker::type base_class; + //@endcond + public: + typedef typename base_class::gc gc ; ///< Garbage collector used + typedef T value_type ; ///< Value type stored in the set + typedef Traits traits ; ///< Options specified + + typedef typename base_class::back_off back_off ; ///< Back-off strategy used + typedef typename traits::allocator allocator_type ; ///< Allocator type used for allocate/deallocate the skip-list nodes + typedef typename base_class::item_counter item_counter ; ///< Item counting policy used + typedef typename maker::key_comparator key_comparator ; ///< key compare functor + typedef typename base_class::memory_model memory_model ; ///< Memory ordering. See cds::opt::memory_model option + typedef typename traits::random_level_generator random_level_generator ; ///< random level generator + typedef typename traits::stat stat ; ///< internal statistics type + typedef typename traits::rcu_check_deadlock rcu_check_deadlock ; ///< Deadlock checking policy + + protected: + //@cond + typedef typename maker::node_type node_type; + typedef typename maker::node_allocator node_allocator; + + typedef std::unique_ptr< node_type, typename maker::node_deallocator > scoped_node_ptr; + //@endcond + + public: + typedef typename base_class::rcu_lock rcu_lock; ///< RCU scoped lock + /// Group of \p extract_xxx functions do not require external locking + static constexpr const bool c_bExtractLockExternal = base_class::c_bExtractLockExternal; + + /// pointer to extracted node + using exempt_ptr = cds::urcu::exempt_ptr< gc, node_type, value_type, typename maker::intrusive_traits::disposer >; + + private: + //@cond + struct raw_ptr_converter + { + value_type * operator()( node_type * p ) const + { + return p ? &p->m_Value : nullptr; + } + + value_type& operator()( node_type& n ) const + { + return n.m_Value; + } + + value_type const& operator()( node_type const& n ) const + { + return n.m_Value; + } + }; + //@endcond + + public: + /// Result of \p get(), \p get_with() functions - pointer to the node found + typedef cds::urcu::raw_ptr_adaptor< value_type, typename base_class::raw_ptr, raw_ptr_converter > raw_ptr; + + protected: + //@cond + unsigned int random_level() + { + return base_class::random_level(); + } + //@endcond + + public: + /// Default ctor + SkipListSet() + : base_class() + {} + + /// Destructor destroys the set object + ~SkipListSet() + {} + + public: + ///@name Forward ordered iterators (thread-safe under RCU lock) + //@{ + /// Forward iterator + /** + The forward iterator has some features: + - it has no post-increment operator + - it depends on iterator of underlying \p OrderedList + + You may safely use iterators in multi-threaded environment only under RCU lock. + Otherwise, a crash is possible if another thread deletes the element the iterator points to. + */ + typedef skip_list::details::iterator< typename base_class::iterator > iterator; + + /// Const iterator type + typedef skip_list::details::iterator< typename base_class::const_iterator > const_iterator; + + /// Returns a forward iterator addressing the first element in a set + iterator begin() + { + return iterator( base_class::begin()); + } + + /// Returns a forward const iterator addressing the first element in a set + const_iterator begin() const + { + return const_iterator( base_class::begin()); + } + + /// Returns a forward const iterator addressing the first element in a set + const_iterator cbegin() const + { + return const_iterator( base_class::cbegin()); + } + + /// Returns a forward iterator that addresses the location succeeding the last element in a set. + iterator end() + { + return iterator( base_class::end()); + } + + /// Returns a forward const iterator that addresses the location succeeding the last element in a set. + const_iterator end() const + { + return const_iterator( base_class::end()); + } + + /// Returns a forward const iterator that addresses the location succeeding the last element in a set. + const_iterator cend() const + { + return const_iterator( base_class::cend()); + } + //@} + + public: + /// Inserts new node + /** + The function creates a node with copy of \p val value + and then inserts the node created into the set. + + The type \p Q should contain as minimum the complete key for the node. + The object of \ref value_type should be constructible from a value of type \p Q. + In trivial case, \p Q is equal to \ref value_type. + + RCU \p synchronize method can be called. RCU should not be locked. + + Returns \p true if \p val is inserted into the set, \p false otherwise. + */ + template + bool insert( Q const& val ) + { + scoped_node_ptr sp( node_allocator().New( random_level(), val )); + if ( base_class::insert( *sp.get())) { + sp.release(); + return true; + } + return false; + } + + /// Inserts new node + /** + The function allows to split creating of new item into two part: + - create item with key only + - insert new item into the set + - if inserting is success, calls \p f functor to initialize value-fields of \p val. + + The functor signature is: + \code + void func( value_type& val ); + \endcode + where \p val is the item inserted. User-defined functor \p f should guarantee that during changing + \p val no any other changes could be made on this set's item by concurrent threads. + The user-defined functor is called only if the inserting is success. + + RCU \p synchronize method can be called. RCU should not be locked. + */ + template + bool insert( Q const& val, Func f ) + { + scoped_node_ptr sp( node_allocator().New( random_level(), val )); + if ( base_class::insert( *sp.get(), [&f]( node_type& v ) { f( v.m_Value ); } )) { + sp.release(); + return true; + } + return false; + } + + /// Updates the item + /** + The operation performs inserting or changing data with lock-free manner. + + If \p val not found in the set, then the new item created from \p val + is inserted into the set iff \p bInsert is \p true. + Otherwise, the functor \p func is called with the item found. + The functor \p Func signature: + \code + struct my_functor { + void operator()( bool bNew, value_type& item, const Q& val ); + }; + \endcode + where: + - \p bNew - \p true if the item has been inserted, \p false otherwise + - \p item - an item of the set + - \p val - argument \p val passed into the \p %update() function + + The functor may change non-key fields of the \p item; however, \p func must guarantee + that during changing no any other modifications could be made on this item by concurrent threads. + + RCU \p synchronize method can be called. RCU should not be locked. + + Returns std::pair where \p first is true if operation is successful, + \p second is true if new item has been added or \p false if the item with \p key + already exists. + */ + template + std::pair update( const Q& val, Func func, bool bInsert = true ) + { + scoped_node_ptr sp( node_allocator().New( random_level(), val )); + std::pair bRes = base_class::update( *sp, + [&func, &val](bool bNew, node_type& node, node_type&){ func( bNew, node.m_Value, val );}, bInsert ); + if ( bRes.first && bRes.second ) + sp.release(); + return bRes; + } + //@cond + template + CDS_DEPRECATED("ensure() is deprecated, use update()") + std::pair ensure( const Q& val, Func func ) + { + return update( val, func, true ); + } + //@endcond + + /// Inserts data of type \ref value_type constructed with std::forward(args)... + /** + Returns \p true if inserting successful, \p false otherwise. + + RCU \p synchronize method can be called. RCU should not be locked. + */ + template + bool emplace( Args&&... args ) + { + scoped_node_ptr sp( node_allocator().New( random_level(), std::forward(args)... )); + if ( base_class::insert( *sp.get())) { + sp.release(); + return true; + } + return false; + } + + /// Delete \p key from the set + /** \anchor cds_nonintrusive_SkipListSet_rcu_erase_val + + The item comparator should be able to compare the type \p value_type + and the type \p Q. + + RCU \p synchronize method can be called. RCU should not be locked. + + Return \p true if key is found and deleted, \p false otherwise + */ + template + bool erase( Q const& key ) + { + return base_class::erase( key ); + } + + /// Deletes the item from the set using \p pred predicate for searching + /** + The function is an analog of \ref cds_nonintrusive_SkipListSet_rcu_erase_val "erase(Q const&)" + but \p pred is used for key comparing. + \p Less functor has the interface like \p std::less. + \p Less must imply the same element order as the comparator used for building the set. + */ + template + bool erase_with( Q const& key, Less pred ) + { + CDS_UNUSED( pred ); + return base_class::erase_with( key, cds::details::predicate_wrapper< node_type, Less, typename maker::value_accessor >()); + } + + /// Delete \p key from the set + /** \anchor cds_nonintrusive_SkipListSet_rcu_erase_func + + The function searches an item with key \p key, calls \p f functor + and deletes the item. If \p key is not found, the functor is not called. + + The functor \p Func interface: + \code + struct extractor { + void operator()(value_type const& val); + }; + \endcode + + Since the key of MichaelHashSet's \p value_type is not explicitly specified, + template parameter \p Q defines the key type searching in the list. + The list item comparator should be able to compare the type \p T of list item + and the type \p Q. + + RCU \p synchronize method can be called. RCU should not be locked. + + Return \p true if key is found and deleted, \p false otherwise + + See also: \ref erase + */ + template + bool erase( Q const& key, Func f ) + { + return base_class::erase( key, [&f]( node_type const& node) { f( node.m_Value ); } ); + } + + /// Deletes the item from the set using \p pred predicate for searching + /** + The function is an analog of \ref cds_nonintrusive_SkipListSet_rcu_erase_func "erase(Q const&, Func)" + but \p pred is used for key comparing. + \p Less functor has the interface like \p std::less. + \p Less must imply the same element order as the comparator used for building the set. + */ + template + bool erase_with( Q const& key, Less pred, Func f ) + { + CDS_UNUSED( pred ); + return base_class::erase_with( key, cds::details::predicate_wrapper< node_type, Less, typename maker::value_accessor >(), + [&f]( node_type const& node) { f( node.m_Value ); } ); + } + + /// Extracts the item from the set with specified \p key + /** \anchor cds_nonintrusive_SkipListSet_rcu_extract + The function searches an item with key equal to \p key in the set, + unlinks it from the set, and returns \ref cds::urcu::exempt_ptr "exempt_ptr" pointer to the item found. + If the item is not found the function returns an empty \p exempt_ptr + + Note the compare functor from \p Traits class' template argument + should accept a parameter of type \p Q that can be not the same as \p value_type. + + RCU \p synchronize method can be called. RCU should NOT be locked. + + The function does not free the item found. + The item will be implicitly freed when the returned object is destroyed or when + its \p release() member function is called. + */ + template + exempt_ptr extract( Q const& key ) + { + return exempt_ptr( base_class::do_extract( key )); + } + + /// Extracts the item from the set with comparing functor \p pred + /** + The function is an analog of \p extract(Q const&) but \p pred predicate is used for key comparing. + \p Less has the semantics like \p std::less. + \p pred must imply the same element order as the comparator used for building the set. + */ + template + exempt_ptr extract_with( Q const& key, Less pred ) + { + CDS_UNUSED( pred ); + return exempt_ptr( base_class::do_extract_with( key, cds::details::predicate_wrapper< node_type, Less, typename maker::value_accessor >())); + } + + /// Extracts an item with minimal key from the set + /** + The function searches an item with minimal key, unlinks it, + and returns \ref cds::urcu::exempt_ptr "exempt_ptr" pointer to the item. + If the skip-list is empty the function returns an empty \p exempt_ptr. + + RCU \p synchronize method can be called. RCU should NOT be locked. + + The function does not free the item found. + The item will be implicitly freed when the returned object is destroyed or when + its \p release() member function is called. + */ + exempt_ptr extract_min() + { + return exempt_ptr( base_class::do_extract_min()); + } + + /// Extracts an item with maximal key from the set + /** + The function searches an item with maximal key, unlinks it from the set, + and returns \ref cds::urcu::exempt_ptr "exempt_ptr" pointer to the item. + If the skip-list is empty the function returns an empty \p exempt_ptr. + + RCU \p synchronize method can be called. RCU should NOT be locked. + + The function does not free the item found. + The item will be implicitly freed when the returned object is destroyed or when + its \p release() member function is called. + */ + exempt_ptr extract_max() + { + return exempt_ptr( base_class::do_extract_max()); + } + + /// Find the key \p val + /** + @anchor cds_nonintrusive_SkipListSet_rcu_find_func + + The function searches the item with key equal to \p val and calls the functor \p f for item found. + The interface of \p Func functor is: + \code + struct functor { + void operator()( value_type& item, Q& val ); + }; + \endcode + where \p item is the item found, \p val is the find function argument. + + The functor may change non-key fields of \p item. Note that the functor is only guarantee + that \p item cannot be disposed during functor is executing. + The functor does not serialize simultaneous access to the set's \p item. If such access is + possible you must provide your own synchronization schema on item level to exclude unsafe item modifications. + + The \p val argument is non-const since it can be used as \p f functor destination i.e., the functor + can modify both arguments. + + Note the hash functor specified for class \p Traits template parameter + should accept a parameter of type \p Q that may be not the same as \p value_type. + + The function applies RCU lock internally. + + The function returns \p true if \p val is found, \p false otherwise. + */ + template + bool find( Q& val, Func f ) + { + return base_class::find( val, [&f]( node_type& node, Q& v ) { f( node.m_Value, v ); }); + } + //@cond + template + bool find( Q const& val, Func f ) + { + return base_class::find( val, [&f]( node_type& node, Q& v ) { f( node.m_Value, v ); } ); + } + //@endcond + + /// Finds the key \p val using \p pred predicate for searching + /** + The function is an analog of \ref cds_nonintrusive_SkipListSet_rcu_find_func "find(Q&, Func)" + but \p pred is used for key comparing. + \p Less functor has the interface like \p std::less. + \p Less must imply the same element order as the comparator used for building the set. + */ + template + bool find_with( Q& val, Less pred, Func f ) + { + CDS_UNUSED( pred ); + return base_class::find_with( val, cds::details::predicate_wrapper< node_type, Less, typename maker::value_accessor >(), + [&f]( node_type& node, Q& v ) { f( node.m_Value, v ); } ); + } + //@cond + template + bool find_with( Q const& val, Less pred, Func f ) + { + CDS_UNUSED( pred ); + return base_class::find_with( val, cds::details::predicate_wrapper< node_type, Less, typename maker::value_accessor >(), + [&f]( node_type& node, Q const& v ) { f( node.m_Value, v ); } ); + } + //@endcond + + /// Checks whether the set contains \p key + /** + The function searches the item with key equal to \p key + and returns \p true if it is found, and \p false otherwise. + + The function applies RCU lock internally. + */ + template + bool contains( Q const & key ) + { + return base_class::contains( key ); + } + //@cond + template + CDS_DEPRECATED("deprecated, use contains()") + bool find( Q const & key ) + { + return contains( key ); + } + //@endcond + + /// Checks whether the set contains \p key using \p pred predicate for searching + /** + The function is similar to contains( key ) but \p pred is used for key comparing. + \p Less functor has the interface like \p std::less. + \p Less must imply the same element order as the comparator used for building the set. + */ + template + bool contains( Q const& key, Less pred ) + { + CDS_UNUSED( pred ); + return base_class::contains( key, cds::details::predicate_wrapper< node_type, Less, typename maker::value_accessor >()); + } + //@cond + template + CDS_DEPRECATED("deprecated, use contains()") + bool find_with( Q const& key, Less pred ) + { + return contains( key, pred ); + } + //@endcond + + /// Finds \p key and return the item found + /** \anchor cds_nonintrusive_SkipListSet_rcu_get + The function searches the item with key equal to \p key and returns a \p raw_ptr object pointed to item found. + If \p key is not found it returns empty \p raw_ptr. + + Note the compare functor in \p Traits class' template argument + should accept a parameter of type \p Q that can be not the same as \p value_type. + + RCU should be locked before call of this function. + Returned item is valid only while RCU is locked: + \code + typedef cds::container::SkipListSet< cds::urcu::gc< cds::urcu::general_buffered<> >, foo, my_traits > skip_list; + skip_list theList; + // ... + typename skip_list::raw_ptr pVal; + { + // Lock RCU + skip_list::rcu_lock lock; + + pVal = theList.get( 5 ); + if ( pVal ) { + // Deal with pVal + //... + } + } + // You can manually release pVal after RCU-locked section + pVal.release(); + \endcode + */ + template + raw_ptr get( Q const& key ) + { + return raw_ptr( base_class::get( key )); + } + + /// Finds the key \p val and return the item found + /** + The function is an analog of \ref cds_nonintrusive_SkipListSet_rcu_get "get(Q const&)" + but \p pred is used for comparing the keys. + + \p Less functor has the semantics like \p std::less but should take arguments of type \ref value_type and \p Q + in any order. + \p pred must imply the same element order as the comparator used for building the set. + */ + template + raw_ptr get_with( Q const& val, Less pred ) + { + CDS_UNUSED( pred ); + return raw_ptr( base_class::get_with( val, cds::details::predicate_wrapper< node_type, Less, typename maker::value_accessor >())); + } + + /// Clears the set (non-atomic). + /** + The function deletes all items from the set. + The function is not atomic, thus, in multi-threaded environment with parallel insertions + this sequence + \code + set.clear(); + assert( set.empty()); + \endcode + the assertion could be raised. + + For each item the \ref disposer provided by \p Traits template parameter will be called. + */ + void clear() + { + base_class::clear(); + } + + /// Checks if the set is empty + bool empty() const + { + return base_class::empty(); + } + + /// Returns item count in the set + /** + The value returned depends on item counter type provided by \p Traits template parameter. + If it is atomicity::empty_item_counter this function always returns 0. + Therefore, the function is not suitable for checking the set emptiness, use \ref empty + member function for this purpose. + */ + size_t size() const + { + return base_class::size(); + } + + /// Returns const reference to internal statistics + stat const& statistics() const + { + return base_class::statistics(); + } + }; + +}} // namespace cds::container + +#endif // #ifndef CDSLIB_CONTAINER_SKIP_LIST_SET_RCU_H diff --git a/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/container/split_list_map.h b/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/container/split_list_map.h new file mode 100644 index 0000000..30972cf --- /dev/null +++ b/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/container/split_list_map.h @@ -0,0 +1,807 @@ +/* + This file is a part of libcds - Concurrent Data Structures library + + (C) Copyright Maxim Khizhinsky (libcds.dev@gmail.com) 2006-2017 + + Source code repo: http://github.com/khizmax/libcds/ + Download: http://sourceforge.net/projects/libcds/files/ + + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. + + * Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ + +#ifndef CDSLIB_CONTAINER_SPLIT_LIST_MAP_H +#define CDSLIB_CONTAINER_SPLIT_LIST_MAP_H + +#include +#include + +namespace cds { namespace container { + + /// Split-ordered list map + /** @ingroup cds_nonintrusive_map + \anchor cds_nonintrusive_SplitListMap_hp + + Hash table implementation based on split-ordered list algorithm discovered by Ori Shalev and Nir Shavit, see + - [2003] Ori Shalev, Nir Shavit "Split-Ordered Lists - Lock-free Resizable Hash Tables" + - [2008] Nir Shavit "The Art of Multiprocessor Programming" + + See intrusive::SplitListSet for a brief description of the split-list algorithm. + + Template parameters: + - \p GC - Garbage collector used like \p cds::gc::HP or \p cds::gc::DHP + - \p Key - key type of an item stored in the map. It should be copy-constructible + - \p Value - value type stored in the map + - \p Traits - map traits, default is \p split_list::traits. Instead of declaring \p %split_list::traits -based + struct you may apply option-based notation with \p split_list::make_traits metafunction. + + There are the specializations: + - for \ref cds_urcu_desc "RCU" - declared in cd/container/split_list_map_rcu.h, + see \ref cds_nonintrusive_SplitListMap_rcu "SplitListMap". + - for \ref cds::gc::nogc declared in cds/container/split_list_map_nogc.h, + see \ref cds_nonintrusive_SplitListMap_nogc "SplitListMap". + + \par Usage + + You should decide what garbage collector you want, and what ordered list you want to use. Split-ordered list + is original data structure based on an ordered list. Suppose, you want construct split-list map based on \p gc::HP GC + and \p MichaelList as ordered list implementation. Your map should map \p int key to \p std::string value. + So, you beginning your code with the following: + \code + #include + #include + + namespace cc = cds::container; + \endcode + The inclusion order is important: first, include file for ordered-list implementation (for this example, cds/container/michael_list_hp.h), + then the header for split-list map cds/container/split_list_map.h. + + Now, you should declare traits for split-list map. The main parts of traits are a hash functor and a comparing functor for the ordered list. + We use std::hash as hash functor and std::less predicate as comparing functor. + + The second attention: instead of using \p %MichaelList in \p %SplitListMap traits we use a tag \p cds::contaner::michael_list_tag for the Michael's list. + The split-list requires significant support from underlying ordered list class and it is not good idea to dive you + into deep implementation details of split-list and ordered list interrelations. The tag paradigm simplifies split-list interface. + + \code + // SplitListMap traits + struct foo_set_traits: public cc::split_list::traits + { + typedef cc::michael_list_tag ordered_list ; // what type of ordered list we want to use + typedef std::hash hash ; // hash functor for the key stored in split-list map + + // Type traits for our MichaelList class + struct ordered_list_traits: public cc::michael_list::traits + { + typedef std::less less ; // use our std::less predicate as comparator to order list nodes + }; + }; + \endcode + + Now you are ready to declare our map class based on \p %SplitListMap: + \code + typedef cc::SplitListMap< cds::gc::DHP, int, std::string, foo_set_traits > int_string_map; + \endcode + + You may use the modern option-based declaration instead of classic type-traits-based one: + \code + typedef cc::SplitListMap< + cs::gc::DHP // GC used + ,int // key type + ,std::string // value type + ,cc::split_list::make_traits< // metafunction to build split-list traits + cc::split_list::ordered_list // tag for underlying ordered list implementation + ,cc::opt::hash< std::hash > // hash functor + ,cc::split_list::ordered_list_traits< // ordered list traits desired + cc::michael_list::make_traits< // metafunction to build lazy list traits + cc::opt::less< std::less > // less-based compare functor + >::type + > + >::type + > int_string_map; + \endcode + In case of option-based declaration with \p split_list::make_traits metafunction the struct \p foo_set_traits is not required. + + Now, the map of type \p int_string_map is ready to use in your program. + + Note that in this example we show only mandatory \p traits parts, optional ones is the default and they are inherited + from \p container::split_list::traits. There are many other options for deep tuning of the split-list and + ordered-list containers. + */ + template < + class GC, + typename Key, + typename Value, +#ifdef CDS_DOXYGEN_INVOKED + class Traits = split_list::traits +#else + class Traits +#endif + > + class SplitListMap: + protected container::SplitListSet< + GC, + std::pair, + split_list::details::wrap_map_traits + > + { + //@cond + typedef container::SplitListSet< + GC, + std::pair, + split_list::details::wrap_map_traits + > base_class; + //@endcond + + public: + typedef GC gc; ///< Garbage collector + typedef Key key_type; ///< key type + typedef Value mapped_type; ///< type of value to be stored in the map + typedef Traits traits; ///< Map traits + + typedef std::pair value_type ; ///< key-value pair type + typedef typename base_class::ordered_list ordered_list; ///< Underlying ordered list class + typedef typename base_class::key_comparator key_comparator; ///< key compare functor + + typedef typename base_class::hash hash; ///< Hash functor for \ref key_type + typedef typename base_class::item_counter item_counter; ///< Item counter type + typedef typename base_class::stat stat; ///< Internal statistics + + /// Count of hazard pointer required + static constexpr const size_t c_nHazardPtrCount = base_class::c_nHazardPtrCount; + + protected: + //@cond + typedef typename base_class::maker::traits::key_accessor key_accessor; + typedef typename base_class::node_type node_type; + //@endcond + + public: + /// Guarded pointer + typedef typename gc::template guarded_ptr< node_type, value_type, details::guarded_ptr_cast_set > guarded_ptr; + + public: + ///@name Forward iterators (only for debugging purpose) + //@{ + /// Forward iterator + /** + The forward iterator for a split-list has the following features: + - it has no post-increment operator + - it depends on underlying ordered list iterator + - The iterator object cannot be moved across thread boundary because it contains GC's guard that is thread-private GC data. + - Iterator ensures thread-safety even if you delete the item that iterator points to. However, in case of concurrent + deleting operations it is no guarantee that you iterate all item in the split-list. + Moreover, a crash is possible when you try to iterate the next element that has been deleted by concurrent thread. + + @warning Use this iterator on the concurrent container for debugging purpose only. + + The iterator interface: + \code + class iterator { + public: + // Default constructor + iterator(); + + // Copy construtor + iterator( iterator const& src ); + + // Dereference operator + value_type * operator ->() const; + + // Dereference operator + value_type& operator *() const; + + // Preincrement operator + iterator& operator ++(); + + // Assignment operator + iterator& operator = (iterator const& src); + + // Equality operators + bool operator ==(iterator const& i ) const; + bool operator !=(iterator const& i ) const; + }; + \endcode + */ + typedef typename base_class::iterator iterator; + + /// Const forward iterator + typedef typename base_class::const_iterator const_iterator; + + /// Returns a forward iterator addressing the first element in a map + /** + For empty map \code begin() == end() \endcode + */ + iterator begin() + { + return base_class::begin(); + } + + /// Returns an iterator that addresses the location succeeding the last element in a map + /** + Do not use the value returned by end function to access any item. + The returned value can be used only to control reaching the end of the map. + For empty map \code begin() == end() \endcode + */ + iterator end() + { + return base_class::end(); + } + + /// Returns a forward const iterator addressing the first element in a map + const_iterator begin() const + { + return base_class::begin(); + } + + /// Returns a forward const iterator addressing the first element in a map + const_iterator cbegin() const + { + return base_class::cbegin(); + } + + /// Returns an const iterator that addresses the location succeeding the last element in a map + const_iterator end() const + { + return base_class::end(); + } + + /// Returns an const iterator that addresses the location succeeding the last element in a map + const_iterator cend() const + { + return base_class::cend(); + } + //@} + + public: + /// Initializes split-ordered map of default capacity + /** + The default capacity is defined in bucket table constructor. + See \p intrusive::split_list::expandable_bucket_table, \p intrusive::split_list::static_bucket_table + which selects by \p intrusive::split_list::traits::dynamic_bucket_table. + */ + SplitListMap() + : base_class() + {} + + /// Initializes split-ordered map + SplitListMap( + size_t nItemCount ///< estimated average item count + , size_t nLoadFactor = 1 ///< load factor - average item count per bucket. Small integer up to 10, default is 1. + ) + : base_class( nItemCount, nLoadFactor ) + {} + + public: + /// Inserts new node with key and default value + /** + The function creates a node with \p key and default value, and then inserts the node created into the map. + + Preconditions: + - The \ref key_type should be constructible from value of type \p K. + In trivial case, \p K is equal to \ref key_type. + - The \ref mapped_type should be default-constructible. + + Returns \p true if inserting successful, \p false otherwise. + */ + template + bool insert( K&& key ) + { + return base_class::emplace( key_type( std::forward( key )), mapped_type()); + } + + /// Inserts new node + /** + The function creates a node with copy of \p val value + and then inserts the node created into the map. + + Preconditions: + - The \ref key_type should be constructible from \p key of type \p K. + - The \ref mapped_type should be constructible from \p val of type \p V. + + Returns \p true if \p val is inserted into the map, \p false otherwise. + */ + template + bool insert( K&& key, V&& val ) + { + return base_class::emplace( key_type( std::forward( key )), mapped_type( std::forward( val ))); + } + + /// Inserts new node and initialize it by a functor + /** + This function inserts new node with key \p key and if inserting is successful then it calls + \p func functor with signature + \code + struct functor { + void operator()( value_type& item ); + }; + \endcode + + The argument \p item of user-defined functor \p func is the reference + to the map's item inserted: + - item.first is a const reference to item's key that cannot be changed. + - item.second is a reference to item's value that may be changed. + + It should be keep in mind that concurrent modifications of \p item.second may be possible. + + The key_type should be constructible from value of type \p K. + + The function allows to split creating of new item into two part: + - create item from \p key; + - insert new item into the map; + - if inserting is successful, initialize the value of item by calling \p func functor + + This can be useful if complete initialization of object of \p mapped_type is heavyweight and + it is preferable that the initialization should be completed only if inserting is successful. + + @warning For \ref cds_nonintrusive_MichaelKVList_gc "MichaelKVList" as the bucket see \ref cds_intrusive_item_creating "insert item troubleshooting". + \ref cds_nonintrusive_LazyKVList_gc "LazyKVList" provides exclusive access to inserted item and does not require any node-level + synchronization. + */ + template + bool insert_with( K&& key, Func func ) + { + //TODO: pass arguments by reference (make_pair makes copy) + return base_class::insert( std::make_pair( key_type( std::forward( key )), mapped_type()), func ); + } + + /// For key \p key inserts data of type \p mapped_type created from \p args + /** + \p key_type should be constructible from type \p K + + Returns \p true if inserting successful, \p false otherwise. + */ + template + bool emplace( K&& key, Args&&... args ) + { + return base_class::emplace( key_type( std::forward(key)), mapped_type( std::forward(args)...)); + } + + /// Updates the node + /** + The operation performs inserting or changing data with lock-free manner. + + If \p key is not found in the map, then \p key is inserted iff \p bAllowInsert is \p true. + Otherwise, the functor \p func is called with item found. + + The functor \p func signature depends on ordered list: + + for \p MichaelKVList, \p LazyKVList + \code + struct my_functor { + void operator()( bool bNew, value_type& item ); + }; + \endcode + with arguments: + - \p bNew - \p true if the item has been inserted, \p false otherwise + - \p item - the item found or inserted + + The functor may change any fields of the \p item.second that is \p mapped_type. + + for \p IterableKVList + \code + void func( value_type& val, value_type * old ); + \endcode + where + - \p val - a new data constructed from \p key + - \p old - old value that will be retired. If new item has been inserted then \p old is \p nullptr. + + Returns std::pair where \p first is true if operation is successful, + \p second is true if new item has been added or \p false if the item with \p key + already is in the map. + + @warning For \ref cds_nonintrusive_MichaelKVList_gc "MichaelKVList" and \ref cds_nonintrusive_IterableKVList_gc "IterableKVList" + as the ordered list see \ref cds_intrusive_item_creating "insert item troubleshooting". + \ref cds_nonintrusive_LazyKVList_gc "LazyKVList" provides exclusive access to inserted item and does not require any node-level + synchronization. + */ + template +#ifdef CDS_DOXYGE_INVOKED + std::pair +#else + typename std::enable_if< + std::is_same::value && !is_iterable_list< ordered_list >::value, + std::pair + >::type +#endif + update( K&& key, Func func, bool bAllowInsert = true ) + { + typedef decltype( std::make_pair( key_type( std::forward( key )), mapped_type())) arg_pair_type; + + return base_class::update( std::make_pair( key_type( key ), mapped_type()), + [&func]( bool bNew, value_type& item, arg_pair_type const& /*val*/ ) { + func( bNew, item ); + }, + bAllowInsert ); + } + //@cond + template +#ifdef CDS_DOXYGE_INVOKED + std::pair +#else + typename std::enable_if< + std::is_same::value && is_iterable_list< ordered_list >::value, + std::pair + >::type +#endif + update( K&& key, Func func, bool bAllowInsert = true ) + { + return base_class::update( std::make_pair( key_type( std::forward( key )), mapped_type()), func, bAllowInsert ); + } + //@endcond + //@cond + template + CDS_DEPRECATED("ensure() is deprecated, use update()") + std::pair ensure( K const& key, Func func ) + { + return update( key, func, true ); + } + //@endcond + + /// Inserts or updates the node (only for \p IterableKVList) + /** + The operation performs inserting or changing data with lock-free manner. + + If \p key is not found in the map, then \p key is inserted iff \p bAllowInsert is \p true. + Otherwise, the current element is changed to \p val, the old element will be retired later. + + Returns std::pair where \p first is \p true if operation is successful, + \p second is \p true if \p val has been added or \p false if the item with that key + already in the map. + */ + template +#ifdef CDS_DOXYGEN_INVOKED + std::pair +#else + typename std::enable_if< + std::is_same< Q, Q>::value && is_iterable_list< ordered_list >::value, + std::pair + >::type +#endif + upsert( Q&& key, V&& val, bool bAllowInsert = true ) + { + return base_class::upsert( std::make_pair( key_type( std::forward( key )), mapped_type( std::forward( val ))), bAllowInsert ); + } + + + /// Deletes \p key from the map + /** \anchor cds_nonintrusive_SplitListMap_erase_val + + Return \p true if \p key is found and deleted, \p false otherwise + */ + template + bool erase( K const& key ) + { + return base_class::erase( key ); + } + + /// Deletes the item from the map using \p pred predicate for searching + /** + The function is an analog of \ref cds_nonintrusive_SplitListMap_erase_val "erase(K const&)" + but \p pred is used for key comparing. + \p Less functor has the interface like \p std::less. + \p Less must imply the same element order as the comparator used for building the map. + */ + template + bool erase_with( K const& key, Less pred ) + { + CDS_UNUSED( pred ); + return base_class::erase_with( key, cds::details::predicate_wrapper()); + } + + /// Deletes \p key from the map + /** \anchor cds_nonintrusive_SplitListMap_erase_func + + The function searches an item with key \p key, calls \p f functor + and deletes the item. If \p key is not found, the functor is not called. + + The functor \p Func interface is: + \code + struct extractor { + void operator()(value_type& item) { ... } + }; + \endcode + + Return \p true if key is found and deleted, \p false otherwise + */ + template + bool erase( K const& key, Func f ) + { + return base_class::erase( key, f ); + } + + /// Deletes the item from the map using \p pred predicate for searching + /** + The function is an analog of \ref cds_nonintrusive_SplitListMap_erase_func "erase(K const&, Func)" + but \p pred is used for key comparing. + \p Less functor has the interface like \p std::less. + \p Less must imply the same element order as the comparator used for building the map. + */ + template + bool erase_with( K const& key, Less pred, Func f ) + { + CDS_UNUSED( pred ); + return base_class::erase_with( key, cds::details::predicate_wrapper(), f ); + } + + /// Deletes the item pointed by iterator \p iter (only for \p IterableList based map) + /** + Returns \p true if the operation is successful, \p false otherwise. + The function can return \p false if the node the iterator points to has already been deleted + by other thread. + + The function does not invalidate the iterator, it remains valid and can be used for further traversing. + + @note \p %erase_at() is supported only for \p %SplitListMap based on \p IterableList. + */ +#ifdef CDS_DOXYGEN_INVOKED + bool erase_at( iterator const& iter ) +#else + template + typename std::enable_if< std::is_same::value && is_iterable_list< ordered_list >::value, bool >::type + erase_at( Iterator const& iter ) +#endif + { + return base_class::erase_at( iter ); + } + + /// Extracts the item with specified \p key + /** \anchor cds_nonintrusive_SplitListMap_hp_extract + The function searches an item with key equal to \p key, + unlinks it from the map, and returns it as \p guarded_ptr. + If \p key is not found the function returns an empty guarded pointer. + + Note the compare functor should accept a parameter of type \p K that may be not the same as \p value_type. + + The extracted item is freed automatically when returned \p guarded_ptr object will be destroyed or released. + @note Each \p guarded_ptr object uses the GC's guard that can be limited resource. + + Usage: + \code + typedef cds::container::SplitListMap< your_template_args > splitlist_map; + splitlist_map theMap; + // ... + { + splitlist_map::guarded_ptr gp(theMap.extract( 5 )); + if ( gp ) { + // Deal with gp + // ... + } + // Destructor of gp releases internal HP guard + } + \endcode + */ + template + guarded_ptr extract( K const& key ) + { + return base_class::extract_( key ); + } + + /// Extracts the item using compare functor \p pred + /** + The function is an analog of \ref cds_nonintrusive_SplitListMap_hp_extract "extract(K const&)" + but \p pred predicate is used for key comparing. + + \p Less functor has the semantics like \p std::less but should take arguments of type \ref value_type and \p K + in any order. + \p pred must imply the same element order as the comparator used for building the map. + */ + template + guarded_ptr extract_with( K const& key, Less pred ) + { + CDS_UNUSED( pred ); + return base_class::extract_with_( key, cds::details::predicate_wrapper()); + } + + /// Finds the key \p key + /** \anchor cds_nonintrusive_SplitListMap_find_cfunc + + The function searches the item with key equal to \p key and calls the functor \p f for item found. + The interface of \p Func functor is: + \code + struct functor { + void operator()( value_type& item ); + }; + \endcode + where \p item is the item found. + + The functor may change \p item.second. Note that the functor is only guarantee + that \p item cannot be disposed during functor is executing. + The functor does not serialize simultaneous access to the map's \p item. If such access is + possible you must provide your own synchronization schema on item level to exclude unsafe item modifications. + + The function returns \p true if \p key is found, \p false otherwise. + */ + template + bool find( K const& key, Func f ) + { + return base_class::find( key, [&f](value_type& pair, K const&){ f( pair ); } ); + } + + /// Finds \p key and returns iterator pointed to the item found (only for \p IterableList) + /** + If \p key is not found the function returns \p end(). + + @note This function is supported only for map based on \p IterableList + */ + template +#ifdef CDS_DOXYGEN_INVOKED + iterator +#else + typename std::enable_if< std::is_same::value && is_iterable_list::value, iterator >::type +#endif + find( K const& key ) + { + return base_class::find( key ); + } + + /// Finds the key \p val using \p pred predicate for searching + /** + The function is an analog of \ref cds_nonintrusive_SplitListMap_find_cfunc "find(K const&, Func)" + but \p pred is used for key comparing. + \p Less functor has the interface like \p std::less. + \p Less must imply the same element order as the comparator used for building the map. + */ + template + bool find_with( K const& key, Less pred, Func f ) + { + CDS_UNUSED( pred ); + return base_class::find_with( key, + cds::details::predicate_wrapper(), + [&f](value_type& pair, K const&){ f( pair ); } ); + } + + /// Finds \p key using \p pred predicate and returns iterator pointed to the item found (only for \p IterableList) + /** + The function is an analog of \p find(K&) but \p pred is used for key comparing. + \p Less functor has interface like \p std::less. + \p pred must imply the same element order as the comparator used for building the map. + + If \p key is not found the function returns \p end(). + + @note This function is supported only for map based on \p IterableList + */ + template +#ifdef CDS_DOXYGEN_INVOKED + iterator +#else + typename std::enable_if< std::is_same::value && is_iterable_list< ordered_list >::value, iterator >::type +#endif + find_with( K const& key, Less pred ) + { + CDS_UNUSED( pred ); + return base_class::find_with( key, cds::details::predicate_wrapper()); + } + + /// Checks whether the map contains \p key + /** + The function searches the item with key equal to \p key + and returns \p true if it is found, and \p false otherwise. + + Note the hash functor specified for class \p Traits template parameter + should accept a parameter of type \p Q that can be not the same as \p value_type. + Otherwise, you may use \p contains( Q const&, Less pred ) functions with explicit predicate for key comparing. + */ + template + bool contains( K const& key ) + { + return base_class::contains( key ); + } + + /// Checks whether the map contains \p key using \p pred predicate for searching + /** + The function is similar to contains( key ) but \p pred is used for key comparing. + \p Less functor has the interface like \p std::less. + \p Less must imply the same element order as the comparator used for building the map. + */ + template + bool contains( K const& key, Less pred ) + { + CDS_UNUSED( pred ); + return base_class::contains( key, cds::details::predicate_wrapper()); + } + + /// Finds \p key and return the item found + /** \anchor cds_nonintrusive_SplitListMap_hp_get + The function searches the item with key equal to \p key + and returns the item found as a guarded pointer. + If \p key is not found the function returns an empty guarded pointer. + + @note Each \p guarded_ptr object uses one GC's guard which can be limited resource. + + Usage: + \code + typedef cds::container::SplitListMap< your_template_params > splitlist_map; + splitlist_map theMap; + // ... + { + splitlist_map::guarded_ptr gp(theMap.get( 5 )); + if ( gp ) { + // Deal with gp + //... + } + // Destructor of guarded_ptr releases internal HP guard + } + \endcode + + Note the compare functor specified for split-list map + should accept a parameter of type \p K that can be not the same as \p value_type. + */ + template + guarded_ptr get( K const& key ) + { + return base_class::get_( key ); + } + + /// Finds \p key and return the item found + /** + The function is an analog of \ref cds_nonintrusive_SplitListMap_hp_get "get( K const&)" + but \p pred is used for comparing the keys. + + \p Less functor has the semantics like \p std::less but should take arguments of type \ref value_type and \p K + in any order. + \p pred must imply the same element order as the comparator used for building the map. + */ + template + guarded_ptr get_with( K const& key, Less pred ) + { + CDS_UNUSED( pred ); + return base_class::get_with_( key, cds::details::predicate_wrapper()); + } + + /// Clears the map (not atomic) + void clear() + { + base_class::clear(); + } + + /// Checks if the map is empty + /** + Emptiness is checked by item counting: if item count is zero then the map is empty. + Thus, the correct item counting is an important part of the map implementation. + */ + bool empty() const + { + return base_class::empty(); + } + + /// Returns item count in the map + size_t size() const + { + return base_class::size(); + } + + /// Returns internal statistics + stat const& statistics() const + { + return base_class::statistics(); + } + + /// Returns internal statistics for \p ordered_list + typename ordered_list::stat const& list_statistics() const + { + return base_class::list_statistics(); + } + }; + +}} // namespace cds::container + +#endif // #ifndef CDSLIB_CONTAINER_SPLIT_LIST_MAP_H diff --git a/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/container/split_list_map_nogc.h b/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/container/split_list_map_nogc.h new file mode 100644 index 0000000..3ae82f2 --- /dev/null +++ b/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/container/split_list_map_nogc.h @@ -0,0 +1,389 @@ +/* + This file is a part of libcds - Concurrent Data Structures library + + (C) Copyright Maxim Khizhinsky (libcds.dev@gmail.com) 2006-2017 + + Source code repo: http://github.com/khizmax/libcds/ + Download: http://sourceforge.net/projects/libcds/files/ + + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. + + * Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ + +#ifndef CDSLIB_CONTAINER_SPLIT_LIST_MAP_NOGC_H +#define CDSLIB_CONTAINER_SPLIT_LIST_MAP_NOGC_H + +#include +#include + +namespace cds { namespace container { + + /// Split-ordered list map (template specialization for gc::nogc) + /** @ingroup cds_nonintrusive_map + \anchor cds_nonintrusive_SplitListMap_nogc + + This specialization is so-called append-only. + The map does not support the removal of list item. + + See \ref cds_nonintrusive_SplitListMap_hp "SplitListMap" for description of template parameters. + + @warning Many member functions return an iterator pointing to an item. + The iterator can be used to set up field of the item, + but you should provide an exclusive access to it, + see \ref cds_intrusive_item_creating "insert item troubleshooting". + */ + template < + typename Key, + typename Value, +#ifdef CDS_DOXYGEN_INVOKED + class Traits = split_list::traits +#else + class Traits +#endif + > + class SplitListMap: + protected container::SplitListSet< + cds::gc::nogc, + std::pair, + split_list::details::wrap_map_traits + > + { + //@cond + typedef container::SplitListSet< + cds::gc::nogc, + std::pair, + split_list::details::wrap_map_traits + > base_class; + //@endcond + public: + typedef cds::gc::nogc gc; ///< Garbage collector + typedef Key key_type; ///< key type + typedef Value mapped_type; ///< type of value stored in the map + + typedef std::pair value_type ; ///< Pair type + typedef typename base_class::ordered_list ordered_list; ///< Underlying ordered list class + typedef typename base_class::key_comparator key_comparator; ///< key comparison functor + + typedef typename base_class::hash hash; ///< Hash functor for \ref key_type + typedef typename base_class::item_counter item_counter; ///< Item counter type + typedef typename base_class::stat stat; ///< Internal statistics + + protected: + //@cond + typedef typename base_class::traits::key_accessor key_accessor; + //@endcond + + public: + ///@name Forward iterators + //@{ + /// Forward iterator + /** + The forward iterator for split-list is based on \p OrderedList forward iterator and has some features: + - it has no post-increment operator + - it iterates items in unordered fashion + + The iterator interface: + \code + class iterator { + public: + // Default constructor + iterator(); + + // Copy construtor + iterator( iterator const& src ); + + // Dereference operator + value_type * operator ->() const; + + // Dereference operator + value_type& operator *() const; + + // Preincrement operator + iterator& operator ++(); + + // Assignment operator + iterator& operator = (iterator const& src); + + // Equality operators + bool operator ==(iterator const& i ) const; + bool operator !=(iterator const& i ) const; + }; + \endcode + */ + typedef typename base_class::iterator iterator; + + /// Const forward iterator + typedef typename base_class::const_iterator const_iterator; + + /// Returns a forward iterator addressing the first element in a map + /** + For empty set \code begin() == end() \endcode + */ + iterator begin() + { + return base_class::begin(); + } + + /// Returns an iterator that addresses the location succeeding the last element in a map + /** + Do not use the value returned by end function to access any item. + The returned value can be used only to control reaching the end of the set. + For empty set \code begin() == end() \endcode + */ + iterator end() + { + return base_class::end(); + } + + /// Returns a forward const iterator addressing the first element in a map + const_iterator begin() const + { + return base_class::begin(); + } + + /// Returns a forward const iterator addressing the first element in a map + const_iterator cbegin() const + { + return base_class::cbegin(); + } + + /// Returns an const iterator that addresses the location succeeding the last element in a map + const_iterator end() const + { + return base_class::end(); + } + + /// Returns an const iterator that addresses the location succeeding the last element in a map + const_iterator cend() const + { + return base_class::cend(); + } + //@} + + public: + /// Initialize split-ordered map of default capacity + /** + The default capacity is defined in bucket table constructor. + See \p intrusive::split_list::expandable_bucket_table, \p intrusive::split_list::static_ducket_table + which selects by \p intrusive::split_list::traits::dynamic_bucket_table. + */ + SplitListMap() + : base_class() + {} + + /// Initialize split-ordered map + SplitListMap( + size_t nItemCount ///< estimated average item count + , size_t nLoadFactor = 1 ///< load factor - average item count per bucket. Small integer up to 10, default is 1. + ) + : base_class( nItemCount, nLoadFactor ) + {} + + public: + /// Inserts new node with key and default value + /** + The function creates a node with \p key and default value, and then inserts the node created into the map. + + Preconditions: + - The \p key_type should be constructible from value of type \p K. + In trivial case, \p K is equal to \ref key_type. + - The \p mapped_type should be default-constructible. + + Returns an iterator pointed to inserted value, or \p end() if inserting is failed + */ + template + iterator insert( K const& key ) + { + //TODO: pass arguments by reference (make_pair makes copy) + return base_class::emplace( key_type( key ), mapped_type()); + } + + /// Inserts new node + /** + The function creates a node with copy of \p val value + and then inserts the node created into the map. + + Preconditions: + - The \p key_type should be constructible from \p key of type \p K. + - The \p mapped_type should be constructible from \p val of type \p V. + + Returns an iterator pointed to inserted value, or \p end() if inserting is failed + */ + template + iterator insert( K const& key, V const& val ) + { + return base_class::emplace( key_type( key ), mapped_type( val )); + } + + /// Inserts new node and initialize it by a functor + /** + This function inserts new node with key \p key and if inserting is successful then it calls + \p func functor with signature + \code + struct functor { + void operator()( value_type& item ); + }; + \endcode + + The argument \p item of user-defined functor \p func is the reference + to the map's item inserted. \p item.second is a reference to item's value that may be changed. + User-defined functor \p func should guarantee that during changing item's value no any other changes + could be made on this map's item by concurrent threads. + The user-defined functor is called only if the inserting is successful. + + The \p key_type should be constructible from value of type \p K. + + The function allows to split creating of new item into two part: + - create item from \p key; + - insert new item into the map; + - if inserting is successful, initialize the value of item by calling \p f functor + + This can be useful if complete initialization of object of \p mapped_type is heavyweight and + it is preferable that the initialization should be completed only if inserting is successful. + + Returns an iterator pointed to inserted value, or \p end() if inserting is failed + */ + template + iterator insert_with( const K& key, Func func ) + { + iterator it = insert( key ); + if ( it != end()) + func( (*it)); + return it; + } + + /// For key \p key inserts data of type \p mapped_type created in-place from \p args + /** + \p key_type should be constructible from type \p K + + Returns \p true if inserting successful, \p false otherwise. + */ + template + iterator emplace( K&& key, Args&&... args ) + { + return base_class::emplace( key_type( std::forward( key )), mapped_type( std::forward( args )...)); + } + + /// Updates the item + /** + If \p key is not in the map and \p bAllowInsert is \p true, the function inserts a new item. + Otherwise, the function returns an iterator pointing to the item found. + + Returns std::pair where \p first is an iterator pointing to + item found or inserted (if inserting is not allowed and \p key is not found, the iterator will be \p end()), + + \p second is true if new item has been added or \p false if the item + already is in the map. + */ + template + std::pair update( K const& key, bool bAllowInsert = true ) + { + //TODO: pass arguments by reference (make_pair makes copy) + return base_class::update( std::make_pair( key_type( key ), mapped_type()), bAllowInsert ); + } + //@cond + template + CDS_DEPRECATED("ensure() is deprecated, use update()") + std::pair ensure( K const& key ) + { + return update( key, true ); + } + //@endcond + + /// Checks whether the map contains \p key + /** + The function searches the item with key equal to \p key + and returns an iterator pointed to item found and \ref end() otherwise + */ + template + iterator contains( K const& key ) + { + return base_class::contains( key ); + } + //@cond + template + CDS_DEPRECATED("deprecated, use contains()") + iterator find( K const& key ) + { + return contains( key ); + } + //@endcond + + /// Checks whether the map contains \p key using \p pred predicate for searching + /** + The function is an analog of contains( key ) but \p pred is used for key comparing. + \p Less functor has the interface like \p std::less. + \p pred must imply the same element order as the comparator used for building the map. + */ + template + iterator contains( K const& key, Less pred ) + { + CDS_UNUSED( pred ); + return base_class::contains( key, cds::details::predicate_wrapper()); + } + //@cond + template + CDS_DEPRECATED("deprecated, use contains()") + iterator find_with( K const& key, Less pred ) + { + return contains( key, pred ); + } + //@endcond + + + /// Clears the set (not atomic, for debugging purposes only) + void clear() + { + base_class::clear(); + } + + /// Checks if the map is empty + /** + Emptiness is checked by item counting: if item count is zero then the map is empty. + Thus, the correct item counting feature is an important part of Michael's map implementation. + */ + bool empty() const + { + return base_class::empty(); + } + + /// Returns item count in the map + size_t size() const + { + return base_class::size(); + } + + /// Returns internal statistics + stat const& statistics() const + { + return base_class::statistics(); + } + + /// Returns internal statistics for \p ordered_list + typename ordered_list::stat const& list_statistics() const + { + return base_class::list_statistics(); + } + }; +}} // namespace cds::container + + +#endif // #ifndef CDSLIB_CONTAINER_SPLIT_LIST_MAP_NOGC_H diff --git a/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/container/split_list_map_rcu.h b/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/container/split_list_map_rcu.h new file mode 100644 index 0000000..1ca9d91 --- /dev/null +++ b/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/container/split_list_map_rcu.h @@ -0,0 +1,720 @@ +/* + This file is a part of libcds - Concurrent Data Structures library + + (C) Copyright Maxim Khizhinsky (libcds.dev@gmail.com) 2006-2017 + + Source code repo: http://github.com/khizmax/libcds/ + Download: http://sourceforge.net/projects/libcds/files/ + + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. + + * Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ + +#ifndef CDSLIB_CONTAINER_SPLIT_LIST_MAP_RCU_H +#define CDSLIB_CONTAINER_SPLIT_LIST_MAP_RCU_H + +#include +#include + +namespace cds { namespace container { + + /// Split-ordered list map (template specialization for \ref cds_urcu_desc "RCU") + /** @ingroup cds_nonintrusive_map + \anchor cds_nonintrusive_SplitListMap_rcu + + Hash table implementation based on split-ordered list algorithm discovered by Ori Shalev and Nir Shavit, see + - [2003] Ori Shalev, Nir Shavit "Split-Ordered Lists - Lock-free Resizable Hash Tables" + - [2008] Nir Shavit "The Art of Multiprocessor Programming" + + See intrusive::SplitListSet for a brief description of the split-list algorithm. + + Template parameters: + - \p RCU - one of \ref cds_urcu_gc "RCU type" + - \p Key - key type to be stored in the map + - \p Value - value type to be stored in the map + - \p Traits - type traits, default is \p split_list::traits. Instead of declaring \p %split_list::traits -based + struct you may apply option-based notation with \p split_list::make_traits metafunction. + + Iterators + + The class supports a forward unordered iterator (\ref iterator and \ref const_iterator). + You may iterate over split-list map items only under RCU lock. + Only in this case the iterator is thread-safe since + while RCU is locked any map's item cannot be reclaimed. + The requirement of RCU lock during iterating means that deletion of the elements + is not possible. + + @warning The iterator object cannot be passed between threads. + Due to concurrent nature of split-list map it is not guarantee that you can iterate + all elements in the map: any concurrent deletion can exclude the element + pointed by the iterator from the map, and your iteration can be terminated + before end of the map. Therefore, such iteration is more suitable for debugging purposes + + The iterator class supports the following minimalistic interface: + \code + struct iterator { + // Default ctor + iterator(); + + // Copy ctor + iterator( iterator const& s); + + value_type * operator ->() const; + value_type& operator *() const; + + // Pre-increment + iterator& operator ++(); + + // Copy assignment + iterator& operator = (const iterator& src); + + bool operator ==(iterator const& i ) const; + bool operator !=(iterator const& i ) const; + }; + \endcode + Note, the iterator object returned by \ref end, \p cend member functions points to \p nullptr and should not be dereferenced. + + \par Usage + + You should decide what garbage collector you want, and what ordered list you want to use. Split-ordered list + is original data structure based on an ordered list. Suppose, you want construct split-list map based on \p cds::urcu::general_buffered<> GC + and \p MichaelList as ordered list implementation. Your map should map \p int key to \p std::string value. + So, you beginning your program with following include: + \code + #include + #include + #include + + namespace cc = cds::container; + \endcode + The inclusion order is important: + - first, include one of \ref cds_urcu_gc "RCU implementation" (cds/urcu/general_buffered.h in our case) + - second, include the header of ordered-list implementation (for this example, cds/container/michael_list_rcu.h), + - then, the header for RCU-based split-list map cds/container/split_list_map_rcu.h. + + Now, you should declare traits for split-list map. The main parts of traits are a hash functor for the map key and a comparing functor for ordered list. + We use \p std::hash and \p std::less. + + The second attention: instead of using \p %MichaelList in \p %SplitListMap traits we use a tag \p ds::contaner::michael_list_tag + for the Michael's list. + The split-list requires significant support from underlying ordered list class and it is not good idea to dive you + into deep implementation details of split-list and ordered list interrelations. The tag paradigm simplifies split-list interface. + + \code + // SplitListMap traits + struct foo_set_traits: public cc::split_list::traits + { + typedef cc::michael_list_tag ordered_list ; // what type of ordered list we want to use + typedef std::hash hash ; // hash functor for the key stored in split-list map + + // Type traits for our MichaelList class + struct ordered_list_traits: public cc::michael_list::traits + { + typedef std::less less ; // use our std::less predicate as comparator to order list nodes + }; + }; + \endcode + + Now you are ready to declare our map class based on \p %SplitListMap: + \code + typedef cc::SplitListMap< cds::urcu::gc >, int, std::string, foo_set_traits > int_string_map; + \endcode + + You may use the modern option-based declaration instead of classic traits-based one: + \code + typedef cc::SplitListMap< + cds::urcu::gc > // RCU type + ,int // key type + ,std::string // value type + ,cc::split_list::make_traits< // metafunction to build split-list traits + cc::split_list::ordered_list // tag for underlying ordered list implementation + ,cc::opt::hash< std::hash > // hash functor + ,cc::split_list::ordered_list_traits< // ordered list traits desired + cc::michael_list::make_traits< // metafunction to build lazy list traits + cc::opt::less< std::less > // less-based compare functor + >::type + > + >::type + > int_string_map; + \endcode + In case of option-based declaration using \p split_list::make_traits metafunction the struct \p foo_set_traits is not required. + + Now, the map of type \p int_string_map is ready to use in your program. + + Note that in this example we show only mandatory \p traits parts, optional ones is the default and they are inherited + from cds::container::split_list::traits. + There are many other useful options for deep tuning the split-list and ordered-list containers. + */ + template < + class RCU, + typename Key, + typename Value, +#ifdef CDS_DOXYGEN_INVOKED + class Traits = split_list::traits +#else + class Traits +#endif + > + class SplitListMap< cds::urcu::gc< RCU >, Key, Value, Traits >: + protected container::SplitListSet< + cds::urcu::gc< RCU >, + std::pair, + split_list::details::wrap_map_traits + > + { + //@cond + typedef container::SplitListSet< + cds::urcu::gc< RCU >, + std::pair, + split_list::details::wrap_map_traits + > base_class; + //@endcond + + public: + typedef cds::urcu::gc< RCU > gc; ///< Garbage collector + typedef Key key_type; ///< key type + typedef Value mapped_type; ///< type of value to be stored in the map + typedef Traits traits; ///< Map traits + + typedef std::pair value_type; ///< key-value pair type + typedef typename base_class::ordered_list ordered_list; ///< Underlying ordered list class + typedef typename base_class::key_comparator key_comparator; ///< key comparison functor + + typedef typename base_class::hash hash; ///< Hash functor for \ref key_type + typedef typename base_class::item_counter item_counter; ///< Item counter type + typedef typename base_class::stat stat; ///< Internal statistics + + typedef typename base_class::rcu_lock rcu_lock; ///< RCU scoped lock + typedef typename base_class::exempt_ptr exempt_ptr; ///< pointer to extracted node + /// Group of \p extract_xxx functions require external locking if underlying ordered list requires that + static constexpr const bool c_bExtractLockExternal = base_class::c_bExtractLockExternal; + typedef typename base_class::raw_ptr raw_ptr; ///< type of \p get() return value + + protected: + //@cond + typedef typename base_class::maker::traits::key_accessor key_accessor; + //@endcond + + public: + /// Forward iterator + typedef typename base_class::iterator iterator; + + /// Const forward iterator + typedef typename base_class::const_iterator const_iterator; + + /// Returns a forward iterator addressing the first element in a map + /** + For empty map \code begin() == end() \endcode + */ + iterator begin() + { + return base_class::begin(); + } + + /// Returns an iterator that addresses the location succeeding the last element in a map + /** + Do not use the value returned by end function to access any item. + The returned value can be used only to control reaching the end of the map. + For empty map \code begin() == end() \endcode + */ + iterator end() + { + return base_class::end(); + } + + /// Returns a forward const iterator addressing the first element in a map + //@{ + const_iterator begin() const + { + return base_class::begin(); + } + const_iterator cbegin() const + { + return base_class::cbegin(); + } + //@} + + /// Returns an const iterator that addresses the location succeeding the last element in a map + //@{ + const_iterator end() const + { + return base_class::end(); + } + const_iterator cend() const + { + return base_class::cend(); + } + //@} + + public: + /// Initializes split-ordered map of default capacity + /** + The default capacity is defined in bucket table constructor. + See \p intrusive::split_list::expandable_bucket_table, \p intrusive::split_list::static_bucket_table + which selects by \p split_list::dynamic_bucket_table option. + */ + SplitListMap() + : base_class() + {} + + /// Initializes split-ordered map + SplitListMap( + size_t nItemCount ///< estimated average item count + , size_t nLoadFactor = 1 ///< load factor - average item count per bucket. Small integer up to 10, default is 1. + ) + : base_class( nItemCount, nLoadFactor ) + {} + + public: + /// Inserts new node with key and default value + /** + The function creates a node with \p key and the default value, and then inserts the node created into the map. + + Preconditions: + - The \p key_type should be constructible from value of type \p K. + - The \p mapped_type should be default-constructible. + + The function applies RCU lock internally. + + Returns \p true if inserting successful, \p false otherwise. + */ + template + bool insert( K const& key ) + { + return base_class::emplace( key_type( key ), mapped_type()); + } + + /// Inserts new node + /** + The function creates a node with copy of \p val value + and then inserts the node into the map. + + Preconditions: + - The \p key_type should be constructible from \p key of type \p K. + - The \p mapped_type should be constructible from \p val of type \p V. + + The function applies RCU lock internally. + + Returns \p true if \p val is inserted into the map, \p false otherwise. + */ + template + bool insert( K const& key, V const& val ) + { + //TODO: pass arguments by reference (make_pair makes copy) + return base_class::emplace( key_type( key ), mapped_type( val )); + } + + /// Inserts new node and initialize it by a functor + /** + This function inserts new node with key \p key and if inserting is successful then it calls + \p func functor with signature + \code + struct functor { + void operator()( value_type& item ); + }; + \endcode + + The argument \p item of user-defined functor \p func is the reference + to the map's item inserted: + - item.first is a const reference to item's key that cannot be changed. + - item.second is a reference to item's value that may be changed. + + It should be keep in mind that concurrent modifications of \p item.second in \p func body + should be careful. You shouldf guarantee that during changing item's value in \p func no any other changes + could be made on this \p item by concurrent threads. + + \p func is called only if inserting is successful. + + The function allows to split creating of new item into two part: + - create item from \p key; + - insert new item into the map; + - if inserting is successful, initialize the value of item by calling \p func functor + + This can be useful if complete initialization of object of \p mapped_type is heavyweight and + it is preferable that the initialization should be completed only if inserting is successful. + + The function applies RCU lock internally. + */ + template + bool insert_with( K const& key, Func func ) + { + //TODO: pass arguments by reference (make_pair makes copy) + return base_class::insert( std::make_pair( key_type( key ), mapped_type()), func ); + } + + /// For key \p key inserts data of type \p mapped_type created in-place from \p args + /** + \p key_type should be constructible from type \p K + + The function applies RCU lock internally. + + Returns \p true if inserting successful, \p false otherwise. + */ + template + bool emplace( K&& key, Args&&... args ) + { + return base_class::emplace( key_type( std::forward( key )), mapped_type( std::forward(args)... )); + } + + /// Updates data by \p key + /** + The operation performs inserting or replacing the element with lock-free manner. + + If the \p key not found in the map, then the new item created from \p key + will be inserted into the map iff \p bAllowInsert is \p true. + (note that in this case the \ref key_type should be constructible from type \p K). + Otherwise, if \p key is found, the functor \p func is called with item found. + + The functor \p Func signature is: + \code + struct my_functor { + void operator()( bool bNew, value_type& item ); + }; + \endcode + with arguments: + - \p bNew - \p true if the item has been inserted, \p false otherwise + - \p item - the item found or inserted + + The functor may change any fields of the \p item.second that is \p mapped_type. + + The function applies RCU lock internally. + + Returns std::pair where \p first is true if operation is successful, + \p second is true if new item has been added or \p false if the item with \p key + already exists. + + @warning For \ref cds_nonintrusive_MichaelKVList_gc "MichaelKVList" as the ordered list see \ref cds_intrusive_item_creating "insert item troubleshooting". + \ref cds_nonintrusive_LazyKVList_gc "LazyKVList" provides exclusive access to inserted item and does not require any node-level + synchronization. + */ + template + std::pair update( K const& key, Func func, bool bAllowInsert = true ) + { + //TODO: pass arguments by reference (make_pair makes copy) + typedef decltype( std::make_pair( key_type( key ), mapped_type())) arg_pair_type; + + return base_class::update( std::make_pair( key_type( key ), mapped_type()), + [&func]( bool bNew, value_type& item, arg_pair_type const& /*val*/ ) { + func( bNew, item ); + }, + bAllowInsert ); + } + //@cond + template + CDS_DEPRECATED("ensure() is deprecated, use update()") + std::pair ensure( K const& key, Func func ) + { + return update( key, func, true ); + } + //@endcond + + /// Deletes \p key from the map + /** \anchor cds_nonintrusive_SplitListMap_rcu_erase_val + + RCU \p synchronize method can be called. RCU should not be locked. + + Return \p true if \p key is found and deleted, \p false otherwise + */ + template + bool erase( K const& key ) + { + return base_class::erase( key ); + } + + /// Deletes the item from the map using \p pred predicate for searching + /** + The function is an analog of \ref cds_nonintrusive_SplitListMap_rcu_erase_val "erase(K const&)" + but \p pred is used for key comparing. + \p Less functor has the interface like \p std::less. + \p Less must imply the same element order as the comparator used for building the map. + */ + template + bool erase_with( K const& key, Less pred ) + { + CDS_UNUSED( pred ); + return base_class::erase_with( key, cds::details::predicate_wrapper()); + } + + /// Deletes \p key from the map + /** \anchor cds_nonintrusive_SplitListMap_rcu_erase_func + + The function searches an item with key \p key, calls \p f functor + and deletes the item. If \p key is not found, the functor is not called. + + The functor \p Func interface is: + \code + struct extractor { + void operator()(value_type& item) { ... } + }; + \endcode + + RCU \p synchronize method can be called. RCU should not be locked. + + Return \p true if key is found and deleted, \p false otherwise + */ + template + bool erase( K const& key, Func f ) + { + return base_class::erase( key, f ); + } + + /// Deletes the item from the map using \p pred predicate for searching + /** + The function is an analog of \ref cds_nonintrusive_SplitListMap_rcu_erase_func "erase(K const&, Func)" + but \p pred is used for key comparing. + \p Less functor has the interface like \p std::less. + \p Less must imply the same element order as the comparator used for building the map. + */ + template + bool erase_with( K const& key, Less pred, Func f ) + { + CDS_UNUSED( pred ); + return base_class::erase_with( key, cds::details::predicate_wrapper(), f ); + } + + /// Extracts an item from the map + /** \anchor cds_nonintrusive_SplitListMap_rcu_extract + The function searches an item with key equal to \p key in the map, + unlinks it from the map, and returns \ref cds::urcu::exempt_ptr "exempt_ptr" pointer to the item found. + If the item with the key equal to \p key is not found the function returns an empty \p exempt_ptr. + + Depends on ordered list you should or should not lock RCU before calling of this function: + - for the set based on \ref cds_intrusive_MichaelList_rcu "MichaelList" RCU should not be locked + - for the set based on \ref cds_intrusive_LazyList_rcu "LazyList" RCU should be locked + See ordered list implementation for details. + + \code + typedef cds::urcu::gc< general_buffered<> > rcu; + + // Split-list set based on MichaelList by default + typedef cds::container::SplitListMap< rcu, int, Foo > splitlist_map; + + splitlist_map theMap; + // ... + + typename splitlist_map::exempt_ptr p; + + // For MichaelList we should not lock RCU + + // Now, you can apply extract function + p = theMap.extract( 10 ) + if ( p ) { + // do something with p + ... + } + + // We may safely release p here + // release() passes the pointer to RCU reclamation cycle + p.release(); + \endcode + */ + template + exempt_ptr extract( K const& key ) + { + return base_class::extract( key ); + } + + /// Extracts an item from the map using \p pred predicate for searching + /** + The function is an analog of \p extract(K const&) but \p pred is used for key comparing. + \p Less functor has the interface like \p std::less. + \p pred must imply the same element order as the comparator used for building the map. + */ + template + exempt_ptr extract_with( K const& key, Less pred ) + { + CDS_UNUSED( pred ); + return base_class::extract_with( key, cds::details::predicate_wrapper()); + } + + /// Finds the key \p key + /** \anchor cds_nonintrusive_SplitListMap_rcu_find_cfunc + + The function searches the item with key equal to \p key and calls the functor \p f for item found. + The interface of \p Func functor is: + \code + struct functor { + void operator()( value_type& item ); + }; + \endcode + where \p item is the item found. + + The functor may change \p item.second. Note that the functor is only guarantee + that \p item cannot be disposed during functor is executing. + The functor does not serialize simultaneous access to the map's \p item. If such access is + possible you must provide your own synchronization schema on item level to exclude unsafe item modifications. + + The function applies RCU lock internally. + + The function returns \p true if \p key is found, \p false otherwise. + */ + template + bool find( K const& key, Func f ) + { + return base_class::find( key, [&f](value_type& pair, K const&){ f( pair ); } ); + } + + /// Finds the key \p key using \p pred predicate for searching + /** + The function is an analog of \ref cds_nonintrusive_SplitListMap_rcu_find_cfunc "find(K const&, Func)" + but \p pred is used for key comparing. + \p Less functor has the interface like \p std::less. + \p Less must imply the same element order as the comparator used for building the map. + */ + template + bool find_with( K const& key, Less pred, Func f ) + { + CDS_UNUSED( pred ); + return base_class::find_with( key, + cds::details::predicate_wrapper(), + [&f](value_type& pair, K const&){ f( pair ); } ); + } + + /// Checks whether the map contains \p key + /** + The function searches the item with key equal to \p key + and returns \p true if it is found, and \p false otherwise. + + The function applies RCU lock internally. + */ + template + bool contains( K const& key ) + { + return base_class::contains( key ); + } + //@cond + template + CDS_DEPRECATED("deprecated, use contains()") + bool find( K const& key ) + { + return base_class::find( key ); + } + //@endcond + + /// Checks whether the map contains \p key using \p pred predicate for searching + /** + The function is an analog of contains( key ) but \p pred is used for key comparing. + \p Less functor has the interface like \p std::less. + \p Less must imply the same element order as the comparator used for building the map. + */ + template + bool contains( K const& key, Less pred ) + { + CDS_UNUSED( pred ); + return base_class::contains( key, cds::details::predicate_wrapper()); + } + //@cond + template + CDS_DEPRECATED("deprecated, use contains()") + bool find_with( K const& key, Less pred ) + { + return contains( key, pred ); + } + //@endcond + + /// Finds \p key and return the item found + /** \anchor cds_intrusive_SplitListMap_rcu_get + The function searches the item with key equal to \p key and returns the pointer to item found. + If \p key is not found it returns empty \p raw_ptr. + + Note the compare functor should accept a parameter of type \p K that can be not the same as \p value_type. + + RCU should be locked before call of this function. + Returned item is valid only while RCU is locked: + \code + typedef cds::urcu::gc< general_buffered<> > rcu; + typedef cds::container::SplitListMap< rcu, int, Foo > splitlist_map; + splitlist_map theMap; + // ... + { + // Lock RCU + typename splitlist_map::rcu_lock lock; + + typename splitlist_map::raw_ptr pVal = theMap.get( 5 ); + if ( pVal ) { + // Deal with pVal + //... + } + // Unlock RCU by rcu_lock destructor + // pVal can be retired by disposer at any time after RCU has been unlocked + } + \endcode + */ + template + raw_ptr get( K const& key ) + { + return base_class::get( key ); + } + + /// Finds \p key with predicate specified and return the item found + /** + The function is an analog of \ref cds_intrusive_SplitListMap_rcu_get "get(K const&)" + but \p pred is used for comparing the keys. + + \p Less functor has the semantics like \p std::less but should take arguments of type \ref value_type and \p K + in any order. + \p pred must imply the same element order as the comparator used for building the map. + */ + template + raw_ptr get_with( K const& key, Less pred ) + { + CDS_UNUSED( pred ); + return base_class::get_with( key, cds::details::predicate_wrapper()); + } + + /// Clears the map (not atomic) + void clear() + { + base_class::clear(); + } + + /// Checks if the map is empty + /** + Emptiness is checked by item counting: if item count is zero then the map is empty. + Thus, the correct item counting is an important part of the map implementation. + */ + bool empty() const + { + return base_class::empty(); + } + + /// Returns item count in the map + size_t size() const + { + return base_class::size(); + } + + /// Returns internal statistics + stat const& statistics() const + { + return base_class::statistics(); + } + + /// Returns internal statistics for \p ordered_list + typename ordered_list::stat const& list_statistics() const + { + return base_class::list_statistics(); + } + }; + +}} // namespace cds::container + +#endif // #ifndef CDSLIB_CONTAINER_SPLIT_LIST_MAP_RCU_H diff --git a/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/container/split_list_set.h b/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/container/split_list_set.h new file mode 100644 index 0000000..3c60d30 --- /dev/null +++ b/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/container/split_list_set.h @@ -0,0 +1,1004 @@ +/* + This file is a part of libcds - Concurrent Data Structures library + + (C) Copyright Maxim Khizhinsky (libcds.dev@gmail.com) 2006-2017 + + Source code repo: http://github.com/khizmax/libcds/ + Download: http://sourceforge.net/projects/libcds/files/ + + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. + + * Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ + +#ifndef CDSLIB_CONTAINER_SPLIT_LIST_SET_H +#define CDSLIB_CONTAINER_SPLIT_LIST_SET_H + +#include +#include +#include + +namespace cds { namespace container { + + /// Split-ordered list set + /** @ingroup cds_nonintrusive_set + \anchor cds_nonintrusive_SplitListSet_hp + + Hash table implementation based on split-ordered list algorithm discovered by Ori Shalev and Nir Shavit, see + - [2003] Ori Shalev, Nir Shavit "Split-Ordered Lists - Lock-free Resizable Hash Tables" + - [2008] Nir Shavit "The Art of Multiprocessor Programming" + + See \p intrusive::SplitListSet for a brief description of the split-list algorithm. + + Template parameters: + - \p GC - Garbage collector used + - \p T - type to be stored in the split-list. + - \p Traits - type traits, default is \p split_list::traits. Instead of declaring \p split_list::traits -based + struct you may apply option-based notation with \p split_list::make_traits metafunction. + + There are the specializations: + - for \ref cds_urcu_desc "RCU" - declared in cd/container/split_list_set_rcu.h, + see \ref cds_nonintrusive_SplitListSet_rcu "SplitListSet". + - for \ref cds::gc::nogc declared in cds/container/split_list_set_nogc.h, + see \ref cds_nonintrusive_SplitListSet_nogc "SplitListSet". + + \par Usage + + You should decide what garbage collector you want, and what ordered list you want to use as a base. Split-ordered list + is original data structure based on an ordered list. + + Suppose, you want construct split-list set based on \p gc::DHP GC + and \p LazyList as ordered list implementation. So, you beginning your program with following include: + \code + #include + #include + + namespace cc = cds::container; + + // The data belonged to split-ordered list + sturuct foo { + int nKey; // key field + std::string strValue ; // value field + }; + \endcode + The inclusion order is important: first, include header for ordered-list implementation (for this example, cds/container/lazy_list_dhp.h), + then the header for split-list set cds/container/split_list_set.h. + + Now, you should declare traits for split-list set. The main parts of traits are a hash functor for the set and a comparing functor for ordered list. + Note that we define several function in foo_hash and foo_less functors for different argument types since we want call our \p %SplitListSet + object by the key of type int and by the value of type foo. + + The second attention: instead of using \p %LazyList in \p %SplitListSet traits we use a tag \p cds::contaner::lazy_list_tag for the lazy list. + The split-list requires significant support from underlying ordered list class and it is not good idea to dive you + into deep implementation details of split-list and ordered list interrelations. The tag paradigm simplifies split-list interface. + + \code + // foo hash functor + struct foo_hash { + size_t operator()( int key ) const { return std::hash( key ) ; } + size_t operator()( foo const& item ) const { return std::hash( item.nKey ) ; } + }; + + // foo comparator + struct foo_less { + bool operator()(int i, foo const& f ) const { return i < f.nKey ; } + bool operator()(foo const& f, int i ) const { return f.nKey < i ; } + bool operator()(foo const& f1, foo const& f2) const { return f1.nKey < f2.nKey; } + }; + + // SplitListSet traits + struct foo_set_traits: public cc::split_list::traits + { + typedef cc::lazy_list_tag ordered_list; // what type of ordered list we want to use + typedef foo_hash hash; // hash functor for our data stored in split-list set + + // Type traits for our LazyList class + struct ordered_list_traits: public cc::lazy_list::traits + { + typedef foo_less less ; // use our foo_less as comparator to order list nodes + }; + }; + \endcode + + Now you are ready to declare our set class based on \p %SplitListSet: + \code + typedef cc::SplitListSet< cds::gc::DHP, foo, foo_set_traits > foo_set; + \endcode + + You may use the modern option-based declaration instead of classic traits-based one: + \code + typedef cc::SplitListSet< + cs::gc::DHP // GC used + ,foo // type of data stored + ,cc::split_list::make_traits< // metafunction to build split-list traits + cc::split_list::ordered_list // tag for underlying ordered list implementation + ,cc::opt::hash< foo_hash > // hash functor + ,cc::split_list::ordered_list_traits< // ordered list traits desired + cc::lazy_list::make_traits< // metafunction to build lazy list traits + cc::opt::less< foo_less > // less-based compare functor + >::type + > + >::type + > foo_set; + \endcode + In case of option-based declaration using split_list::make_traits metafunction + the struct \p foo_set_traits is not required. + + Now, the set of type \p foo_set is ready to use in your program. + + Note that in this example we show only mandatory \p traits parts, optional ones is the default and they are inherited + from \p cds::container::split_list::traits. + There are many other options for deep tuning the split-list and ordered-list containers. + */ + template < + class GC, + class T, +#ifdef CDS_DOXYGEN_INVOKED + class Traits = split_list::traits +#else + class Traits +#endif + > + class SplitListSet: +#ifdef CDS_DOXYGEN_INVOKED + protected intrusive::SplitListSet +#else + protected details::make_split_list_set< GC, T, typename Traits::ordered_list, split_list::details::wrap_set_traits >::type +#endif + { + protected: + //@cond + typedef details::make_split_list_set< GC, T, typename Traits::ordered_list, split_list::details::wrap_set_traits > maker; + typedef typename maker::type base_class; + //@endcond + + public: + typedef GC gc; ///< Garbage collector + typedef T value_type; ///< Type of vlue to be stored in split-list + typedef Traits traits; ///< \p Traits template argument + typedef typename maker::ordered_list ordered_list; ///< Underlying ordered list class + typedef typename base_class::key_comparator key_comparator; ///< key compare functor + + /// Hash functor for \p %value_type and all its derivatives that you use + typedef typename base_class::hash hash; + typedef typename base_class::item_counter item_counter; ///< Item counter type + typedef typename base_class::stat stat; ///< Internal statistics + + /// Count of hazard pointer required + static constexpr const size_t c_nHazardPtrCount = base_class::c_nHazardPtrCount; + + protected: + //@cond + typedef typename maker::cxx_node_allocator cxx_node_allocator; + typedef typename maker::node_type node_type; + //@endcond + + public: + /// Guarded pointer + typedef typename gc::template guarded_ptr< node_type, value_type, details::guarded_ptr_cast_set > guarded_ptr; + + protected: + //@cond + template + class iterator_type: protected base_class::template iterator_type + { + typedef typename base_class::template iterator_type iterator_base_class; + friend class SplitListSet; + + public: + /// Value pointer type (const for const iterator) + typedef typename cds::details::make_const_type::pointer value_ptr; + /// Value reference type (const for const iterator) + typedef typename cds::details::make_const_type::reference value_ref; + + public: + /// Default ctor + iterator_type() + {} + + /// Copy ctor + iterator_type( iterator_type const& src ) + : iterator_base_class( src ) + {} + + protected: + explicit iterator_type( iterator_base_class const& src ) + : iterator_base_class( src ) + {} + + public: + /// Dereference operator + value_ptr operator ->() const + { + return &(iterator_base_class::operator->()->m_Value); + } + + /// Dereference operator + value_ref operator *() const + { + return iterator_base_class::operator*().m_Value; + } + + /// Pre-increment + iterator_type& operator ++() + { + iterator_base_class::operator++(); + return *this; + } + + /// Assignment operator + iterator_type& operator = (iterator_type const& src) + { + iterator_base_class::operator=(src); + return *this; + } + + /// Equality operator + template + bool operator ==(iterator_type const& i ) const + { + return iterator_base_class::operator==(i); + } + + /// Equality operator + template + bool operator !=(iterator_type const& i ) const + { + return iterator_base_class::operator!=(i); + } + }; + //@endcond + + public: + /// Initializes split-ordered list of default capacity + /** + The default capacity is defined in bucket table constructor. + See \p intrusive::split_list::expandable_bucket_table, \p intrusive::split_list::static_bucket_table + which selects by \p split_list::dynamic_bucket_table option. + */ + SplitListSet() + : base_class() + {} + + /// Initializes split-ordered list + SplitListSet( + size_t nItemCount ///< estimated average of item count + , size_t nLoadFactor = 1 ///< the load factor - average item count per bucket. Small integer up to 8, default is 1. + ) + : base_class( nItemCount, nLoadFactor ) + {} + + public: + ///@name Forward iterators (only for debugging purpose) + //@{ + /// Forward iterator + /** + The forward iterator for a split-list has the following features: + - it has no post-increment operator + - it depends on underlying ordered list iterator + - The iterator object cannot be moved across thread boundary because it contains GC's guard that is thread-private GC data. + - Iterator ensures thread-safety even if you delete the item that iterator points to. However, in case of concurrent + deleting operations it is no guarantee that you iterate all item in the split-list. + Moreover, a crash is possible when you try to iterate the next element that has been deleted by concurrent thread. + + @warning Use this iterator on the concurrent container for debugging purpose only. + + The iterator interface: + \code + class iterator { + public: + // Default constructor + iterator(); + + // Copy construtor + iterator( iterator const& src ); + + // Dereference operator + value_type * operator ->() const; + + // Dereference operator + value_type& operator *() const; + + // Preincrement operator + iterator& operator ++(); + + // Assignment operator + iterator& operator = (iterator const& src); + + // Equality operators + bool operator ==(iterator const& i ) const; + bool operator !=(iterator const& i ) const; + }; + \endcode + */ + typedef iterator_type iterator; + + /// Const forward iterator + typedef iterator_type const_iterator; + + /// Returns a forward iterator addressing the first element in a set + /** + For empty set \code begin() == end() \endcode + */ + iterator begin() + { + return iterator( base_class::begin()); + } + + /// Returns an iterator that addresses the location succeeding the last element in a set + /** + Do not use the value returned by end function to access any item. + The returned value can be used only to control reaching the end of the set. + For empty set \code begin() == end() \endcode + */ + iterator end() + { + return iterator( base_class::end()); + } + + /// Returns a forward const iterator addressing the first element in a set + const_iterator begin() const + { + return cbegin(); + } + /// Returns a forward const iterator addressing the first element in a set + const_iterator cbegin() const + { + return const_iterator( base_class::cbegin()); + } + + /// Returns an const iterator that addresses the location succeeding the last element in a set + const_iterator end() const + { + return cend(); + } + /// Returns an const iterator that addresses the location succeeding the last element in a set + const_iterator cend() const + { + return const_iterator( base_class::cend()); + } + //@} + + public: + /// Inserts new node + /** + The function creates a node with copy of \p val value + and then inserts the node created into the set. + + The type \p Q should contain as minimum the complete key for the node. + The object of \ref value_type should be constructible from a value of type \p Q. + In trivial case, \p Q is equal to \ref value_type. + + Returns \p true if \p val is inserted into the set, \p false otherwise. + */ + template + bool insert( Q&& val ) + { + return insert_node( alloc_node( std::forward( val ))); + } + + /// Inserts new node + /** + The function allows to split creating of new item into two part: + - create item with key only + - insert new item into the set + - if inserting is success, calls \p f functor to initialize value-field of \p val. + + The functor signature is: + \code + void func( value_type& val ); + \endcode + where \p val is the item inserted. + + The user-defined functor is called only if the inserting is success. + + @warning For \ref cds_intrusive_MichaelList_hp "MichaelList" as the bucket see \ref cds_intrusive_item_creating "insert item troubleshooting". + \ref cds_intrusive_LazyList_hp "LazyList" provides exclusive access to inserted item and does not require any node-level + synchronization. + */ + template + bool insert( Q&& val, Func f ) + { + scoped_node_ptr pNode( alloc_node( std::forward( val ))); + + if ( base_class::insert( *pNode, [&f](node_type& node) { f( node.m_Value ) ; } )) { + pNode.release(); + return true; + } + return false; + } + + /// Inserts data of type \p value_type created from \p args + /** + Returns \p true if inserting successful, \p false otherwise. + */ + template + bool emplace( Args&&... args ) + { + return insert_node( alloc_node( std::forward(args)...)); + } + + /// Inserts or updates the node (only for \p IterableList -based set) + /** + The operation performs inserting or changing data with lock-free manner. + + If the item \p val is not found in the set, then \p val is inserted iff \p bAllowInsert is \p true. + Otherwise, the current element is changed to \p val, the old element will be retired later. + + Returns std::pair where \p first is \p true if operation is successful, + \p second is \p true if \p val has been added or \p false if the item with that key + already in the set. + */ + template +#ifdef CDS_DOXYGEN_INVOKED + std::pair +#else + typename std::enable_if< + std::is_same< Q, Q>::value && is_iterable_list< ordered_list >::value, + std::pair + >::type +#endif + upsert( Q&& val, bool bAllowInsert = true ) + { + scoped_node_ptr pNode( alloc_node( std::forward( val ))); + + auto bRet = base_class::upsert( *pNode, bAllowInsert ); + + if ( bRet.first ) + pNode.release(); + return bRet; + } + + /// Updates the node + /** + The operation performs inserting or changing data with lock-free manner. + + If \p key is not found in the set, then \p key is inserted iff \p bAllowInsert is \p true. + Otherwise, the functor \p func is called with item found. + + The functor \p func signature depends of ordered list: + + for \p MichaelList, \p LazyList + \code + struct functor { + void operator()( bool bNew, value_type& item, Q const& val ); + }; + \endcode + with arguments: + - \p bNew - \p true if the item has been inserted, \p false otherwise + - \p item - item of the set + - \p val - argument \p val passed into the \p %update() function + + The functor may change non-key fields of the \p item. + + for \p IterableList + \code + void func( value_type& val, value_type * old ); + \endcode + where + - \p val - a new data constructed from \p key + - \p old - old value that will be retired. If new item has been inserted then \p old is \p nullptr. + + Returns std::pair where \p first is true if operation is successful, + \p second is true if new item has been added or \p false if the item with \p key + already is in the set. + + @warning For \ref cds_intrusive_MichaelList_hp "MichaelList" and \ref cds_nonintrusive_IterableList_gc "IterableList" + as the bucket see \ref cds_intrusive_item_creating "insert item troubleshooting". + \ref cds_intrusive_LazyList_hp "LazyList" provides exclusive access to inserted item and does not require any node-level + synchronization. + */ + template +#ifdef CDS_DOXYGEN_INVOKED + std::pair +#else + typename std::enable_if< + std::is_same::value && !is_iterable_list::value, + std::pair + >::type +#endif + update( Q&& val, Func func, bool bAllowInsert = true ) + { + scoped_node_ptr pNode( alloc_node( std::forward( val ))); + + auto bRet = base_class::update( *pNode, + [&func, &val]( bool bNew, node_type& item, node_type const& /*val*/ ) { + func( bNew, item.m_Value, val ); + }, bAllowInsert ); + + if ( bRet.first && bRet.second ) + pNode.release(); + return bRet; + } + //@cond + template + typename std::enable_if< + std::is_same::value && is_iterable_list::value, + std::pair + >::type + update( Q&& val, Func func, bool bAllowInsert = true ) + { + scoped_node_ptr pNode( alloc_node( std::forward( val ))); + + auto bRet = base_class::update( *pNode, + [&func]( node_type& item, node_type* old ) { + func( item.m_Value, old ? &old->m_Value : nullptr ); + }, bAllowInsert ); + + if ( bRet.first ) + pNode.release(); + return bRet; + } + //@endcond + + //@cond + template + CDS_DEPRECATED("ensure() is deprecated, use update()") + std::pair ensure( Q const& val, Func func ) + { + return update( val, func, true ); + } + //@endcond + + /// Deletes \p key from the set + /** \anchor cds_nonintrusive_SplitListSet_erase_val + + The item comparator should be able to compare the values of type \p value_type + and the type \p Q. + + Return \p true if key is found and deleted, \p false otherwise + */ + template + bool erase( Q const& key ) + { + return base_class::erase( key ); + } + + /// Deletes the item from the set using \p pred predicate for searching + /** + The function is an analog of \ref cds_nonintrusive_SplitListSet_erase_val "erase(Q const&)" + but \p pred is used for key comparing. + \p Less functor has the interface like \p std::less. + \p Less must imply the same element order as the comparator used for building the set. + */ + template + bool erase_with( Q const& key, Less pred ) + { + CDS_UNUSED( pred ); + return base_class::erase_with( key, typename maker::template predicate_wrapper::type()); + } + + /// Deletes \p key from the set + /** \anchor cds_nonintrusive_SplitListSet_erase_func + + The function searches an item with key \p key, calls \p f functor + and deletes the item. If \p key is not found, the functor is not called. + + The functor \p Func interface: + \code + struct extractor { + void operator()(value_type const& val); + }; + \endcode + + Since the key of split-list \p value_type is not explicitly specified, + template parameter \p Q defines the key type searching in the list. + The list item comparator should be able to compare the values of the type \p value_type + and the type \p Q. + + Return \p true if key is found and deleted, \p false otherwise + */ + template + bool erase( Q const& key, Func f ) + { + return base_class::erase( key, [&f](node_type& node) { f( node.m_Value ); } ); + } + + /// Deletes the item from the set using \p pred predicate for searching + /** + The function is an analog of \ref cds_nonintrusive_SplitListSet_erase_func "erase(Q const&, Func)" + but \p pred is used for key comparing. + \p Less functor has the interface like \p std::less. + \p Less must imply the same element order as the comparator used for building the set. + */ + template + bool erase_with( Q const& key, Less pred, Func f ) + { + CDS_UNUSED( pred ); + return base_class::erase_with( key, typename maker::template predicate_wrapper::type(), + [&f](node_type& node) { f( node.m_Value ); } ); + } + + /// Deletes the item pointed by iterator \p iter (only for \p IterableList based set) + /** + Returns \p true if the operation is successful, \p false otherwise. + The function can return \p false if the node the iterator points to has already been deleted + by other thread. + + The function does not invalidate the iterator, it remains valid and can be used for further traversing. + + @note \p %erase_at() is supported only for \p %SplitListSet based on \p IterableList. + */ +#ifdef CDS_DOXYGEN_INVOKED + bool erase_at( iterator const& iter ) +#else + template + typename std::enable_if< std::is_same::value && is_iterable_list< ordered_list >::value, bool >::type + erase_at( Iterator const& iter ) +#endif + { + return base_class::erase_at( static_cast( iter )); + } + + + /// Extracts the item with specified \p key + /** \anchor cds_nonintrusive_SplitListSet_hp_extract + The function searches an item with key equal to \p key, + unlinks it from the set, and returns it as \p guarded_ptr. + If \p key is not found the function returns an empty guarded pointer. + + Note the compare functor should accept a parameter of type \p Q that may be not the same as \p value_type. + + The extracted item is freed automatically when returned \p guarded_ptr object will be destroyed or released. + @note Each \p guarded_ptr object uses the GC's guard that can be limited resource. + + Usage: + \code + typedef cds::container::SplitListSet< your_template_args > splitlist_set; + splitlist_set theSet; + // ... + { + splitlist_set::guarded_ptr gp(theSet.extract( 5 )); + if ( gp ) { + // Deal with gp + // ... + } + // Destructor of gp releases internal HP guard + } + \endcode + */ + template + guarded_ptr extract( Q const& key ) + { + return extract_( key ); + } + + /// Extracts the item using compare functor \p pred + /** + The function is an analog of \ref cds_nonintrusive_SplitListSet_hp_extract "extract(Q const&)" + but \p pred predicate is used for key comparing. + + \p Less functor has the semantics like \p std::less but should take arguments of type \ref value_type and \p Q + in any order. + \p pred must imply the same element order as the comparator used for building the set. + */ + template + guarded_ptr extract_with( Q const& key, Less pred ) + { + return extract_with_( key, pred ); + } + + /// Finds the key \p key + /** \anchor cds_nonintrusive_SplitListSet_find_func + + The function searches the item with key equal to \p key and calls the functor \p f for item found. + The interface of \p Func functor is: + \code + struct functor { + void operator()( value_type& item, Q& key ); + }; + \endcode + where \p item is the item found, \p key is the find function argument. + + The functor may change non-key fields of \p item. Note that the functor is only guarantee + that \p item cannot be disposed during functor is executing. + The functor does not serialize simultaneous access to the set's \p item. If such access is + possible you must provide your own synchronization schema on item level to exclude unsafe item modifications. + + The \p key argument is non-const since it can be used as \p f functor destination i.e., the functor + may modify both arguments. + + Note the hash functor specified for class \p Traits template parameter + should accept a parameter of type \p Q that can be not the same as \p value_type. + + The function returns \p true if \p key is found, \p false otherwise. + */ + template + bool find( Q& key, Func f ) + { + return find_( key, f ); + } + //@cond + template + bool find( Q const& key, Func f ) + { + return find_( key, f ); + } + //@endcond + + /// Finds \p key and returns iterator pointed to the item found (only for \p IterableList -based set) + /** + If \p key is not found the function returns \p end(). + + @note This function is supported only for the set based on \p IterableList + */ + template +#ifdef CDS_DOXYGEN_INVOKED + iterator +#else + typename std::enable_if< std::is_same::value && is_iterable_list< ordered_list >::value, iterator >::type +#endif + find( Q& key ) + { + return find_iterator_( key ); + } + //@cond + template + typename std::enable_if< std::is_same::value && is_iterable_list< ordered_list >::value, iterator >::type + find( Q const& key ) + { + return find_iterator_( key ); + } + //@endcond + + + /// Finds the key \p key using \p pred predicate for searching + /** + The function is an analog of \ref cds_nonintrusive_SplitListSet_find_func "find(Q&, Func)" + but \p pred is used for key comparing. + \p Less functor has the interface like \p std::less. + \p Less must imply the same element order as the comparator used for building the set. + */ + template + bool find_with( Q& key, Less pred, Func f ) + { + return find_with_( key, pred, f ); + } + //@cond + template + bool find_with( Q const& key, Less pred, Func f ) + { + return find_with_( key, pred, f ); + } + //@endcond + + /// Finds \p key using \p pred predicate and returns iterator pointed to the item found (only for \p IterableList -based set) + /** + The function is an analog of \p find(Q&) but \p pred is used for key comparing. + \p Less functor has the interface like \p std::less. + \p pred must imply the same element order as the comparator used for building the set. + + If \p key is not found the function returns \p end(). + + @note This function is supported only for the set based on \p IterableList + */ + template +#ifdef CDS_DOXYGEN_INVOKED + iterator +#else + typename std::enable_if< std::is_same::value && is_iterable_list< ordered_list >::value, iterator >::type +#endif + find_with( Q& key, Less pred ) + { + return find_iterator_with_( key, pred ); + } + //@cond + template + typename std::enable_if< std::is_same::value && is_iterable_list< ordered_list >::value, iterator >::type + find_with( Q const& key, Less pred ) + { + return find_iterator_with_( key, pred ); + } + //@endcond + + /// Checks whether the set contains \p key + /** + The function searches the item with key equal to \p key + and returns \p true if it is found, and \p false otherwise. + + Note the hash functor specified for class \p Traits template parameter + should accept a parameter of type \p Q that can be not the same as \p value_type. + Otherwise, you may use \p contains( Q const&, Less pred ) functions with explicit predicate for key comparing. + */ + template + bool contains( Q const& key ) + { + return base_class::contains( key ); + } + + /// Checks whether the map contains \p key using \p pred predicate for searching + /** + The function is similar to contains( key ) but \p pred is used for key comparing. + \p Less functor has the interface like \p std::less. + \p Less must imply the same element order as the comparator used for building the map. + */ + template + bool contains( Q const& key, Less pred ) + { + CDS_UNUSED( pred ); + return base_class::contains( key, typename maker::template predicate_wrapper::type()); + } + + /// Finds the key \p key and return the item found + /** \anchor cds_nonintrusive_SplitListSet_hp_get + The function searches the item with key equal to \p key + and returns the item found as \p guarded_ptr. + If \p key is not found the function returns an empty guarded pointer. + + @note Each \p guarded_ptr object uses one GC's guard which can be limited resource. + + Usage: + \code + typedef cds::container::SplitListSet< your_template_params > splitlist_set; + splitlist_set theSet; + // ... + { + splitlist_set::guarded_ptr gp(theSet.get( 5 )); + if ( gp ) { + // Deal with gp + //... + } + // Destructor of guarded_ptr releases internal HP guard + } + \endcode + + Note the compare functor specified for split-list set + should accept a parameter of type \p Q that can be not the same as \p value_type. + */ + template + guarded_ptr get( Q const& key ) + { + return get_( key ); + } + + /// Finds \p key and return the item found + /** + The function is an analog of \ref cds_nonintrusive_SplitListSet_hp_get "get( Q const&)" + but \p pred is used for comparing the keys. + + \p Less functor has the semantics like \p std::less but should take arguments of type \ref value_type and \p Q + in any order. + \p pred must imply the same element order as the comparator used for building the set. + */ + template + guarded_ptr get_with( Q const& key, Less pred ) + { + return get_with_( key, pred ); + } + + /// Clears the set (not atomic) + void clear() + { + base_class::clear(); + } + + /// Checks if the set is empty + /** + Emptiness is checked by item counting: if item count is zero then assume that the set is empty. + Thus, the correct item counting feature is an important part of split-list set implementation. + */ + bool empty() const + { + return base_class::empty(); + } + + /// Returns item count in the set + size_t size() const + { + return base_class::size(); + } + + /// Returns internal statistics + stat const& statistics() const + { + return base_class::statistics(); + } + + /// Returns internal statistics for \p ordered_list + typename ordered_list::stat const& list_statistics() const + { + return base_class::list_statistics(); + } + + protected: + //@cond + using base_class::extract_; + using base_class::get_; + + template + static node_type * alloc_node( Args&&... args ) + { + return cxx_node_allocator().MoveNew( std::forward( args )... ); + } + + static void free_node( node_type * pNode ) + { + cxx_node_allocator().Delete( pNode ); + } + + template + bool find_( Q& val, Func f ) + { + return base_class::find( val, [&f]( node_type& item, Q& v ) { f( item.m_Value, v ); } ); + } + + template + typename std::enable_if< std::is_same::value && is_iterable_list< ordered_list >::value, iterator>::type + find_iterator_( Q& val ) + { + return iterator( base_class::find( val )); + } + + template + bool find_with_( Q& val, Less pred, Func f ) + { + CDS_UNUSED( pred ); + return base_class::find_with( val, typename maker::template predicate_wrapper::type(), + [&f]( node_type& item, Q& v ) { f( item.m_Value, v ); } ); + } + + template + typename std::enable_if< std::is_same::value && is_iterable_list< ordered_list >::value, iterator>::type + find_iterator_with_( Q& val, Less pred ) + { + CDS_UNUSED( pred ); + return iterator( base_class::find_with( val, typename maker::template predicate_wrapper::type())); + } + + struct node_disposer { + void operator()( node_type * pNode ) + { + free_node( pNode ); + } + }; + typedef std::unique_ptr< node_type, node_disposer > scoped_node_ptr; + + bool insert_node( node_type * pNode ) + { + assert( pNode != nullptr ); + scoped_node_ptr p( pNode ); + + if ( base_class::insert( *pNode )) { + p.release(); + return true; + } + return false; + } + + template + guarded_ptr extract_with_( Q const& key, Less pred ) + { + CDS_UNUSED( pred ); + return base_class::extract_with_( key, typename maker::template predicate_wrapper::type()); + } + + template + guarded_ptr get_with_( Q const& key, Less pred ) + { + CDS_UNUSED( pred ); + return base_class::get_with_( key, typename maker::template predicate_wrapper::type()); + } + + //@endcond + }; + +}} // namespace cds::container + +#endif // #ifndef CDSLIB_CONTAINER_SPLIT_LIST_SET_H diff --git a/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/container/split_list_set_nogc.h b/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/container/split_list_set_nogc.h new file mode 100644 index 0000000..beca1d3 --- /dev/null +++ b/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/container/split_list_set_nogc.h @@ -0,0 +1,458 @@ +/* + This file is a part of libcds - Concurrent Data Structures library + + (C) Copyright Maxim Khizhinsky (libcds.dev@gmail.com) 2006-2017 + + Source code repo: http://github.com/khizmax/libcds/ + Download: http://sourceforge.net/projects/libcds/files/ + + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. + + * Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ + +#ifndef CDSLIB_CONTAINER_SPLIT_LIST_SET_NOGC_H +#define CDSLIB_CONTAINER_SPLIT_LIST_SET_NOGC_H + +#include +#include +#include +#include + +namespace cds { namespace container { + + /// Split-ordered list set (template specialization for \p gc::nogc) + /** @ingroup cds_nonintrusive_set + \anchor cds_nonintrusive_SplitListSet_nogc + + This specialization is so-called append-only container when no item + reclamation may be performed. The class does not support deleting of list item. + + See \ref cds_nonintrusive_SplitListSet_hp "SplitListSet" for description of template parameters. + + @warning Many member functions return an iterator pointing to an item. + The iterator can be used to set up field of the item, + but you should provide an exclusive access to it, + see \ref cds_intrusive_item_creating "insert item troubleshooting". + */ + template < + class T, +#ifdef CDS_DOXYGEN_INVOKED + class Traits = split_list::traits +#else + class Traits +#endif + > + class SplitListSet< cds::gc::nogc, T, Traits> +#ifdef CDS_DOXYGEN_INVOKED + :protected intrusive::SplitListSet +#else + :protected details::make_split_list_set< cds::gc::nogc, T, typename Traits::ordered_list, split_list::details::wrap_set_traits >::type +#endif + { + protected: + //@cond + typedef details::make_split_list_set< cds::gc::nogc, T, typename Traits::ordered_list, split_list::details::wrap_set_traits > maker; + typedef typename maker::type base_class; + //@endcond + + public: + typedef cds::gc::nogc gc; ///< Garbage collector + typedef T value_type; ///< type of value to be stored in the list + typedef Traits traits; ///< List traits + + typedef typename maker::ordered_list ordered_list; ///< Underlying ordered list class + typedef typename base_class::key_comparator key_comparator; ///< key comparison functor + + /// Hash functor for \ref value_type and all its derivatives that you use + typedef typename base_class::hash hash; + typedef typename base_class::item_counter item_counter; ///< Item counter type + typedef typename base_class::stat stat; ///< Internal statistics + + protected: + //@cond + typedef typename maker::cxx_node_allocator cxx_node_allocator; + typedef typename maker::node_type node_type; + + template + static node_type * alloc_node(Q const& v ) + { + return cxx_node_allocator().New( v ); + } + + template + static node_type * alloc_node( Args&&... args ) + { + return cxx_node_allocator().MoveNew( std::forward(args)...); + } + + static void free_node( node_type * pNode ) + { + cxx_node_allocator().Delete( pNode ); + } + + struct node_disposer { + void operator()( node_type * pNode ) + { + free_node( pNode ); + } + }; + typedef std::unique_ptr< node_type, node_disposer > scoped_node_ptr; + //@endcond + + public: + /// Initialize split-ordered list of default capacity + /** + The default capacity is defined in bucket table constructor. + See \p intrusive::split_list::expandable_bucket_table, \p intrusive::split_list::static_bucket_table + which selects by \p split_list::dynamic_bucket_table option. + */ + SplitListSet() + : base_class() + {} + + /// Initialize split-ordered list + SplitListSet( + size_t nItemCount ///< estimated average of item count + , size_t nLoadFactor = 1 ///< load factor - average item count per bucket. Small integer up to 10, default is 1. + ) + : base_class( nItemCount, nLoadFactor ) + {} + + protected: + //@cond + template + class iterator_type: protected base_class::template iterator_type + { + typedef typename base_class::template iterator_type iterator_base_class; + friend class SplitListSet; + + public: + /// Value pointer type (const for const iterator) + typedef typename cds::details::make_const_type::pointer value_ptr; + /// Value reference type (const for const iterator) + typedef typename cds::details::make_const_type::reference value_ref; + + public: + /// Default ctor + iterator_type() + {} + + /// Copy ctor + iterator_type( iterator_type const& src ) + : iterator_base_class( src ) + {} + + protected: + explicit iterator_type( iterator_base_class const& src ) + : iterator_base_class( src ) + {} + + public: + /// Dereference operator + value_ptr operator ->() const + { + return &(iterator_base_class::operator->()->m_Value); + } + + /// Dereference operator + value_ref operator *() const + { + return iterator_base_class::operator*().m_Value; + } + + /// Pre-increment + iterator_type& operator ++() + { + iterator_base_class::operator++(); + return *this; + } + + /// Assignment operator + iterator_type& operator = (iterator_type const& src) + { + iterator_base_class::operator=(src); + return *this; + } + + /// Equality operator + template + bool operator ==(iterator_type const& i ) const + { + return iterator_base_class::operator==(i); + } + + /// Equality operator + template + bool operator !=(iterator_type const& i ) const + { + return iterator_base_class::operator!=(i); + } + }; + //@endcond + + public: + ///@name Forward iterators + //@{ + /// Forward iterator + /** + The forward iterator for split-list is based on \p OrderedList forward iterator and has some features: + - it has no post-increment operator + - it iterates items in unordered fashion + + The iterator interface: + \code + class iterator { + public: + // Default constructor + iterator(); + + // Copy construtor + iterator( iterator const& src ); + + // Dereference operator + value_type * operator ->() const; + + // Dereference operator + value_type& operator *() const; + + // Preincrement operator + iterator& operator ++(); + + // Assignment operator + iterator& operator = (iterator const& src); + + // Equality operators + bool operator ==(iterator const& i ) const; + bool operator !=(iterator const& i ) const; + }; + \endcode + */ + typedef iterator_type iterator; + + /// Const forward iterator + typedef iterator_type const_iterator; + + /// Returns a forward iterator addressing the first element in a set + /** + For empty set \code begin() == end() \endcode + */ + iterator begin() + { + return iterator( base_class::begin()); + } + + /// Returns an iterator that addresses the location succeeding the last element in a set + /** + Do not use the value returned by end function to access any item. + The returned value can be used only to control reaching the end of the set. + For empty set \code begin() == end() \endcode + */ + iterator end() + { + return iterator( base_class::end()); + } + + /// Returns a forward const iterator addressing the first element in a set + const_iterator begin() const + { + return cbegin(); + } + /// Returns a forward const iterator addressing the first element in a set + const_iterator cbegin() const + { + return const_iterator( base_class::cbegin()); + } + + /// Returns an const iterator that addresses the location succeeding the last element in a set + const_iterator end() const + { + return cend(); + } + /// Returns an const iterator that addresses the location succeeding the last element in a set + const_iterator cend() const + { + return const_iterator( base_class::cend()); + } + //@} + + protected: + //@cond + iterator insert_node( node_type * pNode ) + { + assert( pNode != nullptr ); + scoped_node_ptr p(pNode); + + iterator it( base_class::insert_( *pNode )); + if ( it != end()) { + p.release(); + return it; + } + + return end(); + } + //@endcond + + public: + /// Inserts new node + /** + The function inserts \p val in the set if it does not contain + an item with key equal to \p val. + The \p value_type should be constructible from a value of type \p Q. + + Return an iterator pointing to inserted item if success \p end() otherwise + */ + template + iterator insert( const Q& val ) + { + return insert_node( alloc_node( val )); + } + + /// Inserts data of type \p value_type created from \p args + /** + Return an iterator pointing to inserted item if success \p end() otherwise + */ + template + iterator emplace( Args&&... args ) + { + return insert_node( alloc_node( std::forward(args)... )); + } + + /// Updates the item + /** + If \p key is not in the set and \p bAllowInsert is \p true, the function inserts a new item. + Otherwise, the function returns an iterator pointing to the item found. + + Returns std::pair where \p first is an iterator pointing to + item found or inserted (if inserting is not allowed and \p key is not found, the iterator will be \p end()), + + \p second is true if new item has been added or \p false if the item + already is in the set. + + @warning If the set is based on \ref cds_nonintrusive_MichaelList_nogc "MichaelList", + + see \ref cds_intrusive_item_creating "insert item troubleshooting". + \ref cds_nonintrusive_LazyList_nogc "LazyList" as the base provides exclusive access to inserted item + + and does not require any node-level synchronization. + */ + template + std::pair update( Q const& key, bool bAllowInsert = true ) + { + scoped_node_ptr pNode( alloc_node( key )); + + std::pair ret = base_class::update_( *pNode, + + [](bool /*bNew*/, node_type& /*item*/, node_type& /*val*/){}, + bAllowInsert ); + if ( ret.first != base_class::end() && ret.second ) { + pNode.release(); + return std::make_pair( iterator(ret.first), ret.second ); + } + + return std::make_pair( iterator(ret.first), ret.second ); + } + //@cond + template + CDS_DEPRECATED("ensure() is deprecated, use update()") + std::pair ensure( const Q& val ) + { + return update( val, true ); + } + //@endcond + + /// Checks whether the set contains \p key + /** + The function searches the item with key equal to \p key + and returns an iterator pointed to item found and \ref end() otherwise + */ + template + iterator contains( Q const& key ) + { + return iterator( base_class::find_( key )); + } + //@cond + template + CDS_DEPRECATED("deprecated, use contains()") + iterator find( Q const& key ) + { + return contains( key ); + } + //@endcond + + /// Checks whether the set contains \p key using \p pred predicate for searching + /** + The function is an analog of contains( key ) but \p pred is used for key comparing. + \p Less functor has the interface like \p std::less. + \p pred must imply the same element order as the comparator used for building the set. + */ + template + iterator contains( Q const& key, Less pred ) + { + CDS_UNUSED( pred ); + return iterator( base_class::find_with_( key, typename maker::template predicate_wrapper::type())); + } + //@cond + // eprecated, use contains() + template + iterator find_with( Q const& key, Less pred ) + { + return contains( key, pred ); + } + //@endcond + + /// Clears the set (not atomic, for debugging purposes only) + void clear() + { + base_class::clear(); + } + + /// Checks if the set is empty + /** + Emptiness is checked by item counting: if item count is zero then the set is empty. + Thus, the correct item counting feature is an important part of split-list set implementation. + */ + bool empty() const + { + return base_class::empty(); + } + + /// Returns item count in the set + size_t size() const + { + return base_class::size(); + } + + /// Returns internal statistics + stat const& statistics() const + { + return base_class::statistics(); + } + + /// Returns internal statistics for \p ordered_list + typename ordered_list::stat const& list_statistics() const + { + return base_class::list_statistics(); + } + }; + +}} // namespace cds::container + +#endif // #ifndef CDSLIB_CONTAINER_SPLIT_LIST_SET_NOGC_H diff --git a/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/container/split_list_set_rcu.h b/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/container/split_list_set_rcu.h new file mode 100644 index 0000000..da65701 --- /dev/null +++ b/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/container/split_list_set_rcu.h @@ -0,0 +1,1005 @@ +/* + This file is a part of libcds - Concurrent Data Structures library + + (C) Copyright Maxim Khizhinsky (libcds.dev@gmail.com) 2006-2017 + + Source code repo: http://github.com/khizmax/libcds/ + Download: http://sourceforge.net/projects/libcds/files/ + + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. + + * Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ + +#ifndef CDSLIB_CONTAINER_SPLIT_LIST_SET_RCU_H +#define CDSLIB_CONTAINER_SPLIT_LIST_SET_RCU_H + +#include +#include +#include + +namespace cds { namespace container { + + //@cond + namespace split_list { namespace details { + + template < + typename T, + class OrdList, + typename OrdListTag + > + class make_raw_ptr; + +#ifdef CDSLIB_CONTAINER_DETAILS_MICHAEL_LIST_BASE_H + template + class make_raw_ptr< T, RawPtr, cds::container::michael_list_tag > + { + typedef RawPtr intrusive_raw_ptr; + typedef typename intrusive_raw_ptr::value_type node_type; + typedef T value_type; + + struct raw_ptr_converter + { + value_type * operator()( node_type * p ) const + { + return p ? &p->m_Value : nullptr; + } + + value_type& operator()( node_type& n ) const + { + return n.m_Value; + } + + value_type const& operator()( node_type const& n ) const + { + return n.m_Value; + } + }; + public: + typedef cds::urcu::raw_ptr_adaptor< value_type, intrusive_raw_ptr, raw_ptr_converter > raw_ptr; + + static raw_ptr make( intrusive_raw_ptr&& p ) + { + return raw_ptr(std::move( p )); + } + }; +#endif + +#ifdef CDSLIB_CONTAINER_DETAILS_LAZY_LIST_BASE_H + template + class make_raw_ptr< T, RawPtr, cds::container::lazy_list_tag > + { + typedef RawPtr node_type_pointer; + typedef T value_type; + + public: + typedef value_type * raw_ptr; + + static raw_ptr make( node_type_pointer p ) + { + return p ? &p->m_Value : nullptr; + } + }; +#endif + }} //namespace split_list::details + //@endcond + + /// Split-ordered list set (template specialization for \ref cds_urcu_desc "RCU") + /** @ingroup cds_nonintrusive_set + \anchor cds_nonintrusive_SplitListSet_rcu + + Hash table implementation based on split-ordered list algorithm discovered by Ori Shalev and Nir Shavit, see + - [2003] Ori Shalev, Nir Shavit "Split-Ordered Lists - Lock-free Resizable Hash Tables" + - [2008] Nir Shavit "The Art of Multiprocessor Programming" + + See \p intrusive::SplitListSet for a brief description of the split-list algorithm. + + Template parameters: + - \p RCU - one of \ref cds_urcu_gc "RCU type" + - \p T - type of the value to be stored in the split-list. + - \p Traits - type traits, default is \p split_list::traits. Instead of declaring \p split_list::traits -based + struct you can apply option-based notation with \p split_list::make_traits metafunction. + + Iterators + + The class supports a forward iterator (\ref iterator and \ref const_iterator). + The iteration is unordered. + + You may iterate over split-list set items only under RCU lock. + Only in this case the iterator is thread-safe since + while RCU is locked any set's item cannot be reclaimed. + + @warning The iterator object cannot be passed between threads + + \warning Due to concurrent nature of skip-list set it is not guarantee that you can iterate + all elements in the set: any concurrent deletion can exclude the element + pointed by the iterator from the set, and your iteration can be terminated + before end of the set. Therefore, such iteration is more suitable for debugging purposes + + The iterator class supports the following minimalistic interface: + \code + struct iterator { + // Default ctor + iterator(); + + // Copy ctor + iterator( iterator const& s); + + value_type * operator ->() const; + value_type& operator *() const; + + // Pre-increment + iterator& operator ++(); + + // Copy assignment + iterator& operator = (const iterator& src); + + bool operator ==(iterator const& i ) const; + bool operator !=(iterator const& i ) const; + }; + \endcode + Note, the iterator object returned by \p end(), \p cend() member functions points to \p nullptr and should not be dereferenced. + + \par Usage + + You should decide what garbage collector you want, and what ordered list you want to use. Split-ordered list + is an original data structure based on an ordered list. Suppose, you want construct split-list set based on \p cds::urcu::general_buffered<> GC + and \p LazyList as ordered list implementation. So, you beginning your program with following include: + \code + #include + #include + #include + + namespace cc = cds::container; + + // The data belonged to split-ordered list + sturuct foo { + int nKey; // key field + std::string strValue ; // value field + }; + \endcode + The inclusion order is important: + - first, include one of \ref cds_urcu_gc "RCU implementation" (cds/urcu/general_buffered.h in our case) + - second, include file for ordered-list implementation (for this example, cds/container/lazy_list_rcu.h), + - then, the header for RCU-based split-list set cds/container/split_list_set_rcu.h. + + Now, you should declare traits for split-list set. The main parts of traits are a hash functor for the set and a comparing functor for ordered list. + Note that we define several function in \p foo_hash and \p foo_less functors for different argument types since we want call our \p %SplitListSet + object by the key of type \p int and by the value of type \p foo. + + The second attention: instead of using \p %LazyList in \p %SplitListSet traits we use \p cds::contaner::lazy_list_tag tag for the lazy list. + The split-list requires significant support from underlying ordered list class and it is not good idea to dive you + into deep implementation details of split-list and ordered list interrelations. The tag paradigm simplifies split-list interface. + + \code + // foo hash functor + struct foo_hash { + size_t operator()( int key ) const { return std::hash( key ) ; } + size_t operator()( foo const& item ) const { return std::hash( item.nKey ) ; } + }; + + // foo comparator + struct foo_less { + bool operator()(int i, foo const& f ) const { return i < f.nKey ; } + bool operator()(foo const& f, int i ) const { return f.nKey < i ; } + bool operator()(foo const& f1, foo const& f2) const { return f1.nKey < f2.nKey; } + }; + + // SplitListSet traits + struct foo_set_traits: public cc::split_list::traits + { + typedef cc::lazy_list_tag ordered_list ; // what type of ordered list we want to use + typedef foo_hash hash ; // hash functor for our data stored in split-list set + + // Type traits for our LazyList class + struct ordered_list_traits: public cc::lazy_list::traits + { + typedef foo_less less ; // use our foo_less as comparator to order list nodes + }; + }; + \endcode + + Now you are ready to declare our set class based on \p %SplitListSet: + \code + typedef cc::SplitListSet< cds::urcu::gc >, foo, foo_set_traits > foo_set; + \endcode + + You may use the modern option-based declaration instead of classic type-traits-based one: + \code + typedef cc::SplitListSet< + cds::urcu::gc > // RCU type used + ,foo // type of data stored + ,cc::split_list::make_traits< // metafunction to build split-list traits + cc::split_list::ordered_list // tag for underlying ordered list implementation + ,cc::opt::hash< foo_hash > // hash functor + ,cc::split_list::ordered_list_traits< // ordered list traits + cc::lazy_list::make_traits< // metafunction to build lazy list traits + cc::opt::less< foo_less > // less-based compare functor + >::type + > + >::type + > foo_set; + \endcode + In case of option-based declaration using \p split_list::make_traits metafunction + the struct \p foo_set_traits is not required. + + Now, the set of type \p foo_set is ready to use in your program. + + Note that in this example we show only mandatory \p traits parts, optional ones is the default and they are inherited + from \p container::split_list::traits. + There are many other options for deep tuning of the split-list and ordered-list containers. + */ + template < + class RCU, + class T, +#ifdef CDS_DOXYGEN_INVOKED + class Traits = split_list::traits +#else + class Traits +#endif + > + class SplitListSet< cds::urcu::gc< RCU >, T, Traits >: +#ifdef CDS_DOXYGEN_INVOKED + protected intrusive::SplitListSet< cds::urcu::gc< RCU >, T, typename Traits::ordered_list, Traits > +#else + protected details::make_split_list_set< cds::urcu::gc< RCU >, T, typename Traits::ordered_list, split_list::details::wrap_set_traits >::type +#endif + { + protected: + //@cond + typedef details::make_split_list_set< cds::urcu::gc< RCU >, T, typename Traits::ordered_list, split_list::details::wrap_set_traits > maker; + typedef typename maker::type base_class; + //@endcond + + public: + typedef cds::urcu::gc< RCU > gc; ///< RCU-based garbage collector + typedef T value_type; ///< Type of value to be storedin the set + typedef Traits traits; ///< \p Traits template argument + + // Note: ordered_list is not real ordered list type. Actual type is base_class::ordered_list + typedef typename maker::ordered_list ordered_list; ///< Underlying ordered list class + typedef typename base_class::key_comparator key_comparator; ///< key compare functor + + /// Hash functor for \ref value_type and all its derivatives that you use + typedef typename base_class::hash hash; + typedef typename base_class::item_counter item_counter; ///< Item counter type + typedef typename base_class::stat stat; ///< Internal statistics + + typedef typename base_class::rcu_lock rcu_lock ; ///< RCU scoped lock + /// Group of \p extract_xxx functions require external locking if underlying ordered list requires that + static constexpr const bool c_bExtractLockExternal = base_class::c_bExtractLockExternal; + + protected: + //@cond + typedef typename maker::cxx_node_allocator cxx_node_allocator; + typedef typename maker::node_type node_type; + //@endcond + + public: + /// pointer to extracted node + using exempt_ptr = cds::urcu::exempt_ptr< gc, node_type, value_type, typename maker::ordered_list_traits::disposer >; +# ifdef CDS_DOXYGEN_INVOKED + /// pointer to the node for \p get() function + /** + For \p LazyList, \p %raw_ptr is just pointer to \p value_type. + + For \p MichaelList, \p %raw_ptr is \p cds::urcu::raw_ptr object giving access to \p value_type. + */ + typedef implementation_defined raw_ptr; +# else + private: + typedef split_list::details::make_raw_ptr< value_type, typename base_class::ordered_list::raw_ptr, typename traits::ordered_list > raw_ptr_maker; + public: + typedef typename raw_ptr_maker::raw_ptr raw_ptr; +#endif + + protected: + //@cond + template + bool find_( Q& val, Func f ) + { + return base_class::find( val, [&f]( node_type& item, Q& v ) { f(item.m_Value, v) ; } ); + } + + template + bool find_with_( Q& val, Less pred, Func f ) + { + CDS_UNUSED( pred ); + return base_class::find_with( val, typename maker::template predicate_wrapper::type(), + [&f]( node_type& item, Q& v ) { f(item.m_Value, v) ; } ); + } + + template + static node_type * alloc_node( Q const& v ) + { + return cxx_node_allocator().New( v ); + } + + template + static node_type * alloc_node( Args&&... args ) + { + return cxx_node_allocator().MoveNew( std::forward(args)...); + } + + static void free_node( node_type * pNode ) + { + cxx_node_allocator().Delete( pNode ); + } + + struct node_disposer { + void operator()( node_type * pNode ) + { + free_node( pNode ); + } + }; + typedef std::unique_ptr< node_type, node_disposer > scoped_node_ptr; + + bool insert_node( node_type * pNode ) + { + assert( pNode != nullptr ); + scoped_node_ptr p(pNode); + + if ( base_class::insert( *pNode )) { + p.release(); + return true; + } + + return false; + } + //@endcond + + protected: + //@cond + template + class iterator_type: protected base_class::template iterator_type + { + typedef typename base_class::template iterator_type iterator_base_class; + friend class SplitListSet; + + public: + /// Value pointer type (const for const iterator) + typedef typename cds::details::make_const_type::pointer value_ptr; + /// Value reference type (const for const iterator) + typedef typename cds::details::make_const_type::reference value_ref; + + public: + /// Default ctor + iterator_type() + {} + + /// Copy ctor + iterator_type( iterator_type const& src ) + : iterator_base_class( src ) + {} + + protected: + explicit iterator_type( iterator_base_class const& src ) + : iterator_base_class( src ) + {} + + public: + /// Dereference operator + value_ptr operator ->() const + { + return &(iterator_base_class::operator->()->m_Value); + } + + /// Dereference operator + value_ref operator *() const + { + return iterator_base_class::operator*().m_Value; + } + + /// Pre-increment + iterator_type& operator ++() + { + iterator_base_class::operator++(); + return *this; + } + + /// Assignment operator + iterator_type& operator = (iterator_type const& src) + { + iterator_base_class::operator=(src); + return *this; + } + + /// Equality operator + template + bool operator ==(iterator_type const& i ) const + { + return iterator_base_class::operator==(i); + } + + /// Equality operator + template + bool operator !=(iterator_type const& i ) const + { + return iterator_base_class::operator!=(i); + } + }; + //@endcond + + public: + /// Initializes split-ordered list of default capacity + /** + The default capacity is defined in bucket table constructor. + See \p intrusive::split_list::expandable_bucket_table, \p intrusive::split_list::static_bucket_table + which selects by \p container::split_list::dynamic_bucket_table option. + */ + SplitListSet() + : base_class() + {} + + /// Initializes split-ordered list + SplitListSet( + size_t nItemCount ///< estimated average of item count + , size_t nLoadFactor = 1 ///< load factor - average item count per bucket. Small integer up to 8, default is 1. + ) + : base_class( nItemCount, nLoadFactor ) + {} + + public: + ///@name Forward iterators (thread-safe under RCU lock) + //@{ + /// Forward iterator + /** + The forward iterator for Michael's set is based on \p OrderedList forward iterator and has some features: + - it has no post-increment operator + - it iterates items in unordered fashion + + You may safely use iterators in multi-threaded environment only under RCU lock. + Otherwise, a crash is possible if another thread deletes the element the iterator points to. + + The iterator interface: + \code + class iterator { + public: + // Default constructor + iterator(); + + // Copy construtor + iterator( iterator const& src ); + + // Dereference operator + value_type * operator ->() const; + + // Dereference operator + value_type& operator *() const; + + // Preincrement operator + iterator& operator ++(); + + // Assignment operator + iterator& operator = (iterator const& src); + + // Equality operators + bool operator ==(iterator const& i ) const; + bool operator !=(iterator const& i ) const; + }; + \endcode + */ + typedef iterator_type iterator; + + /// Forward const iterator + typedef iterator_type const_iterator; + + /// Returns a forward iterator addressing the first element in a set + /** + For empty set \code begin() == end() \endcode + */ + iterator begin() + { + return iterator( base_class::begin()); + } + + /// Returns an iterator that addresses the location succeeding the last element in a set + /** + Do not use the value returned by end function to access any item. + The returned value can be used only to control reaching the end of the set. + For empty set \code begin() == end() \endcode + */ + iterator end() + { + return iterator( base_class::end()); + } + + /// Returns a forward const iterator addressing the first element in a set + const_iterator begin() const + { + return cbegin(); + } + /// Returns a forward const iterator addressing the first element in a set + const_iterator cbegin() const + { + return const_iterator( base_class::cbegin()); + } + + /// Returns an const iterator that addresses the location succeeding the last element in a set + const_iterator end() const + { + return cend(); + } + /// Returns an const iterator that addresses the location succeeding the last element in a set + const_iterator cend() const + { + return const_iterator( base_class::cend()); + } + //@} + + public: + /// Inserts new node + /** + The function creates a node with copy of \p val value + and then inserts the node created into the set. + + The type \p Q should contain as minimum the complete key for the node. + The object of \p value_type should be constructible from a value of type \p Q. + In trivial case, \p Q is equal to \p value_type. + + The function applies RCU lock internally. + + Returns \p true if \p val is inserted into the set, \p false otherwise. + */ + template + bool insert( Q const& val ) + { + return insert_node( alloc_node( val )); + } + + /// Inserts new node + /** + The function allows to split creating of new item into two part: + - create item with key only + - insert new item into the set + - if inserting is success, calls \p f functor to initialize value-field of \p val. + + The functor signature is: + \code + void func( value_type& val ); + \endcode + where \p val is the item inserted. User-defined functor \p f should guarantee that during changing + \p val no any other changes could be made on this set's item by concurrent threads. + The user-defined functor is called only if the inserting is success. + + The function applies RCU lock internally. + */ + template + bool insert( Q const& key, Func f ) + { + scoped_node_ptr pNode( alloc_node( key )); + + if ( base_class::insert( *pNode, [&f](node_type& node) { f( node.m_Value ) ; } )) { + pNode.release(); + return true; + } + return false; + } + + /// Inserts data of type \p value_type created from \p args + /** + Returns \p true if inserting successful, \p false otherwise. + + The function applies RCU lock internally. + */ + template + bool emplace( Args&&... args ) + { + return insert_node( alloc_node( std::forward(args)...)); + } + + /// Updates an element with given \p val + /** + The operation performs inserting or changing data with lock-free manner. + + If the \p val key not found in the set, then the new item created from \p val + is inserted into the set. Otherwise, the functor \p func is called with the item found. + The functor \p Func signature is: + \code + struct my_functor { + void operator()( bool bNew, value_type& item, const Q& val ); + }; + \endcode + + with arguments: + - \p bNew - \p true if the item has been inserted, \p false otherwise + - \p item - item of the set + - \p val - argument \p val passed into the \p %ensure() function + + The functor may change non-key fields of the \p item; however, \p func must guarantee + that during changing no any other modifications could be made on this item by concurrent threads. + + The function applies RCU lock internally. + + Returns std::pair where \p first is true if operation is successful, + \p second is true if new item has been added or \p false if the item with \p key + already is in the set. + */ + /// Updates the node + /** + The operation performs inserting or changing data with lock-free manner. + + If \p key is not found in the set, then \p key is inserted iff \p bAllowInsert is \p true. + Otherwise, the functor \p func is called with item found. + + The functor signature is: + \code + struct my_functor { + void operator()( bool bNew, value_type& item, const Q& val ); + }; + \endcode + + with arguments: + - \p bNew - \p true if the item has been inserted, \p false otherwise + - \p item - item of the set + - \p val - argument \p val passed into the \p %update() function + + The functor may change non-key fields of the \p item. + + The function applies RCU lock internally. + + Returns std::pair where \p first is true if operation is successful, + \p second is true if new item has been added or \p false if the item with \p key + already is in the map. + + @warning For \ref cds_intrusive_MichaelList_rcu "MichaelList" as the bucket see \ref cds_intrusive_item_creating "insert item troubleshooting". + \ref cds_intrusive_LazyList_rcu "LazyList" provides exclusive access to inserted item and does not require any node-level + synchronization. + */ + template + std::pair update( Q const& val, Func func, bool bAllowInsert = true ) + { + scoped_node_ptr pNode( alloc_node( val )); + + std::pair bRet = base_class::update( *pNode, + [&func, &val]( bool bNew, node_type& item, node_type const& /*val*/ ) { + func( bNew, item.m_Value, val ); + }, bAllowInsert ); + if ( bRet.first && bRet.second ) + pNode.release(); + return bRet; + } + //@cond + // Dprecated, use update() + template + std::pair ensure( Q const& val, Func func ) + { + return update( val, func, true ); + } + //@endcond + + /// Deletes \p key from the set + /** \anchor cds_nonintrusive_SplitListSet_rcu_erase_val + + Template parameter of type \p Q defines the key type searching in the list. + The set item comparator should be able to compare the values of type \p value_type + and the type \p Q. + + RCU \p synchronize method can be called. RCU should not be locked. + + Return \p true if key is found and deleted, \p false otherwise + */ + template + bool erase( Q const& key ) + { + return base_class::erase( key ); + } + + /// Deletes the item from the set using \p pred predicate for searching + /** + The function is an analog of \ref cds_nonintrusive_SplitListSet_rcu_erase_val "erase(Q const&)" + but \p pred is used for key comparing. + \p Less functor has the interface like \p std::less. + \p Less must imply the same element order as the comparator used for building the set. + */ + template + bool erase_with( Q const& key, Less pred ) + { + CDS_UNUSED( pred ); + return base_class::erase_with( key, typename maker::template predicate_wrapper::type()); + } + + /// Deletes \p key from the set + /** \anchor cds_nonintrusive_SplitListSet_rcu_erase_func + + The function searches an item with key \p key, calls \p f functor + and deletes the item. If \p key is not found, the functor is not called. + + The functor \p Func interface: + \code + struct extractor { + void operator()(value_type const& val); + }; + \endcode + + Template parameter of type \p Q defines the key type searching in the list. + The list item comparator should be able to compare the values of the type \p value_type + and the type \p Q. + + RCU \p synchronize method can be called. RCU should not be locked. + + Return \p true if key is found and deleted, \p false otherwise + */ + template + bool erase( Q const& key, Func f ) + { + return base_class::erase( key, [&f](node_type& node) { f( node.m_Value ); } ); + } + + /// Deletes the item from the set using \p pred predicate for searching + /** + The function is an analog of \ref cds_nonintrusive_SplitListSet_rcu_erase_func "erase(Q const&, Func)" + but \p pred is used for key comparing. + \p Less functor has the interface like \p std::less. + \p Less must imply the same element order as the comparator used for building the set. + */ + template + bool erase_with( Q const& key, Less pred, Func f ) + { + CDS_UNUSED( pred ); + return base_class::erase_with( key, typename maker::template predicate_wrapper::type(), + [&f](node_type& node) { f( node.m_Value ); } ); + } + + /// Extracts an item from the set + /** \anchor cds_nonintrusive_SplitListSet_rcu_extract + The function searches an item with key equal to \p key in the set, + unlinks it from the set, and returns \ref cds::urcu::exempt_ptr "exempt_ptr" pointer to the item found. + If the item with the key equal to \p key is not found the function returns an empty \p exempt_ptr. + + Depends on \p bucket_type you should or should not lock RCU before calling of this function: + - for the set based on \ref cds_intrusive_MichaelList_rcu "MichaelList" RCU should not be locked + - for the set based on \ref cds_intrusive_LazyList_rcu "LazyList" RCU should be locked + See ordered list implementation for details. + + \code + typedef cds::urcu::gc< general_buffered<> > rcu; + + // Split-list set based on MichaelList by default + typedef cds::container::SplitListSet< rcu, Foo > splitlist_set; + + splitlist_set theSet; + // ... + + splitlist_set::exempt_ptr p; + + // For MichaelList we should not lock RCU + + // Now, you can apply extract function + p = theSet.extract( 10 ); + if ( p ) { + // do something with p + ... + } + + // We may safely release p here + // release() passes the pointer to RCU reclamation cycle + p.release(); + \endcode + */ + template + exempt_ptr extract( Q const& key ) + { + return exempt_ptr( base_class::extract_( key, key_comparator())); + } + + /// Extracts an item from the set using \p pred predicate for searching + /** + The function is an analog of \p extract(Q const&) but \p pred is used for key comparing. + \p Less functor has the interface like \p std::less. + \p pred must imply the same element order as the comparator used for building the set. + */ + template + exempt_ptr extract_with( Q const& key, Less pred ) + { + CDS_UNUSED( pred ); + return exempt_ptr( base_class::extract_with_( key, typename maker::template predicate_wrapper::type())); + } + + /// Finds the key \p key + /** \anchor cds_nonintrusive_SplitListSet_rcu_find_func + + The function searches the item with key equal to \p key and calls the functor \p f for item found. + The interface of \p Func functor is: + \code + struct functor { + void operator()( value_type& item, Q& key ); + }; + \endcode + where \p item is the item found, \p key is the find function argument. + + The functor may change non-key fields of \p item. Note that the functor is only guarantee + that \p item cannot be disposed during functor is executing. + The functor does not serialize simultaneous access to the set's \p item. If such access is + possible you must provide your own synchronization schema on item level to exclude unsafe item modifications. + + Note the hash functor specified for class \p Traits template parameter + should accept a parameter of type \p Q that can be not the same as \p value_type. + + The function makes RCU lock internally. + + The function returns \p true if \p key is found, \p false otherwise. + */ + template + bool find( Q& key, Func f ) + { + return find_( key, f ); + } + //@cond + template + bool find( Q const& key, Func f ) + { + return find_( key, f ); + } + //@endcond + + /// Finds the key \p key using \p pred predicate for searching + /** + The function is an analog of \ref cds_nonintrusive_SplitListSet_rcu_find_func "find(Q&, Func)" + but \p pred is used for key comparing. + \p Less functor has the interface like \p std::less. + \p Less must imply the same element order as the comparator used for building the set. + */ + template + bool find_with( Q& key, Less pred, Func f ) + { + return find_with_( key, pred, f ); + } + //@cond + template + bool find_with( Q const& key, Less pred, Func f ) + { + return find_with_( key, pred, f ); + } + //@endcond + + /// Checks whether the set contains \p key + /** + The function searches the item with key equal to \p key + and returns \p true if it is found, and \p false otherwise. + + Note the hash functor specified for class \p Traits template parameter + should accept a parameter of type \p Q that can be not the same as \p value_type. + Otherwise, you may use \p contains( Q const&, Less pred ) functions with explicit predicate for key comparing. + + The function applies RCU lock internally. + */ + template + bool contains( Q const& key ) + { + return base_class::contains( key ); + } + //@cond + template + CDS_DEPRECATED("deprecated, use contains()") + bool find( Q const& key ) + { + return contains( key ); + } + //@endcond + + /// Checks whether the map contains \p key using \p pred predicate for searching + /** + The function is similar to contains( key ) but \p pred is used for key comparing. + \p Less functor has the interface like \p std::less. + \p Less must imply the same element order as the comparator used for building the map. + */ + template + bool contains( Q const& key, Less pred ) + { + CDS_UNUSED( pred ); + return base_class::contains( key, typename maker::template predicate_wrapper::type()); + } + //@cond + template + CDS_DEPRECATED("deprecated, use contains()") + bool find_with( Q const& key, Less pred ) + { + return contains( key, pred ); + } + //@endcond + + /// Finds the key \p key and return the item found + /** \anchor cds_nonintrusive_SplitListSet_rcu_get + The function searches the item with key equal to \p key and returns the pointer to item found. + If \p key is not found it returns \p nullptr. + + Note the compare functor should accept a parameter of type \p Q that can be not the same as \p value_type. + + RCU should be locked before call of this function. + Returned item is valid only while RCU is locked: + \code + typedef cds::urcu::gc< general_buffered<> > rcu; + typedef cds::container::SplitListSet< rcu, Foo > splitlist_set; + splitlist_set theSet; + // ... + { + // Lock RCU + splitlist_set::rcu_lock lock; + + foo * pVal = theSet.get( 5 ); + if ( pVal ) { + // Deal with pVal + //... + } + // Unlock RCU by rcu_lock destructor + // pVal can be retired by disposer at any time after RCU has been unlocked + } + \endcode + */ + template + raw_ptr get( Q const& key ) + { + return raw_ptr_maker::make( base_class::get( key )); + } + + /// Finds the key \p key and return the item found + /** + The function is an analog of \ref cds_nonintrusive_SplitListSet_rcu_get "get(Q const&)" + but \p pred is used for comparing the keys. + + \p Less functor has the semantics like \p std::less but should take arguments of type \ref value_type and \p Q + in any order. + \p pred must imply the same element order as the comparator used for building the set. + */ + template + raw_ptr get_with( Q const& key, Less pred ) + { + CDS_UNUSED( pred ); + return raw_ptr_maker::make( base_class::get_with( key, typename maker::template predicate_wrapper::type())); + } + + /// Clears the set (not atomic) + void clear() + { + base_class::clear(); + } + + /// Checks if the set is empty + /** + Emptiness is checked by item counting: if item count is zero then assume that the set is empty. + Thus, the correct item counting feature is an important part of split-list set implementation. + */ + bool empty() const + { + return base_class::empty(); + } + + /// Returns item count in the set + size_t size() const + { + return base_class::size(); + } + + /// Returns internal statistics + stat const& statistics() const + { + return base_class::statistics(); + } + + /// Returns internal statistics for \p ordered_list + typename ordered_list::stat const& list_statistics() const + { + return base_class::list_statistics(); + } + }; +}} // namespace cds::container + +#endif // #ifndef CDSLIB_CONTAINER_SPLIT_LIST_SET_RCU_H diff --git a/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/container/striped_map.h b/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/container/striped_map.h new file mode 100644 index 0000000..e2e4126 --- /dev/null +++ b/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/container/striped_map.h @@ -0,0 +1,927 @@ +/* + This file is a part of libcds - Concurrent Data Structures library + + (C) Copyright Maxim Khizhinsky (libcds.dev@gmail.com) 2006-2017 + + Source code repo: http://github.com/khizmax/libcds/ + Download: http://sourceforge.net/projects/libcds/files/ + + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. + + * Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ + +#ifndef CDSLIB_CONTAINER_STRIPED_MAP_H +#define CDSLIB_CONTAINER_STRIPED_MAP_H + +#include +#include +#include +#include + +namespace cds { namespace container { + + //@cond + namespace details { + template + class make_striped_map + { + typedef StripedSet< Container, Options...> billet; + typedef typename billet::options billet_options; + typedef typename billet_options::hash billet_hash; + + typedef typename Container::value_type pair_type; + typedef typename pair_type::first_type key_type; + + struct options: public billet_options { + struct hash: public billet_hash { + size_t operator()( pair_type const& v ) const + { + return billet_hash::operator()( v.first ); + } + + template + size_t operator()( Q const& v ) const + { + return billet_hash::operator()( v ); + } + }; + }; + + public: + typedef StripedSet< Container, cds::opt::type_traits< options > > type ; ///< metafunction result + }; + } + //@endcond + + /// Striped hash map + /** @ingroup cds_nonintrusive_map + + Source + - [2008] Maurice Herlihy, Nir Shavit "The Art of Multiprocessor Programming" + + Lock striping is very simple technique. + The map consists of the bucket table and the array of locks. + Initially, the capacity of lock array and bucket table is the same. + When the map is resized, bucket table capacity will be doubled but lock array will not. + The lock \p i protects each bucket \p j, where j = i mod L , + where \p L - the size of lock array. + + Template arguments: + - \p Container - the container class that is used as bucket entry. The \p Container class should support + an uniform interface described below. + - \p Options - options + + The \p %StripedMap class does not exactly specify the type of container that should be used as a \p Container bucket. + Instead, the class supports different container type for the bucket, for exampe, \p std::list, \p std::map and others. + + Remember that \p %StripedMap class algorithm ensures sequential blocking access to its bucket through the mutex type you specify + among \p Options template arguments. + + The \p Options are: + - \p cds::opt::mutex_policy - concurrent access policy. + Available policies: \p striped_set::striping, \p striped_set::refinable. + Default is \p %striped_set::striping. + - \p cds::opt::hash - hash functor. Default option value see opt::v::hash_selector + which selects default hash functor for your compiler. + - \p cds::opt::compare - key comparison functor. No default functor is provided. + If the option is not specified, the \p %opt::less is used. + - \p cds::opt::less - specifies binary predicate used for key comparison. Default is \p std::less. + - \p cds::opt::item_counter - item counter type. Default is \p atomicity::item_counter since some operation on the counter is performed + without locks. Note that item counting is an essential part of the map algorithm, so dummy counter + like as \p atomicity::empty_item_counter is not suitable. + - \p cds::opt::allocator - the allocator type using for memory allocation of bucket table and lock array. Default is \ref CDS_DEFAULT_ALLOCATOR. + - \p cds::opt::resizing_policy - the resizing policy that is a functor that decides when to resize the hash map. + Default option value depends on bucket container type: + for sequential containers like \p std::list, \p std::vector the resizing policy is striped_set::load_factor_resizing<4> ; + for other type of containers like \p std::map, \p std::unordered_map the resizing policy is \p striped_set::no_resizing. + See \ref cds_striped_resizing_policy "available resizing policy". + Note that the choose of resizing policy depends of \p Container type: + for sequential containers like \p std::list, \p std::vector and so on, right choosing of the policy can + significantly improve performance. + For other, non-sequential types of \p Container (like a \p std::map) + the resizing policy is not so important. + - \p cds::opt::copy_policy - the copy policy which is used to copy items from the old map to the new one when resizing. + The policy can be optionally used in adapted bucket container for performance reasons of resizing. + The detail of copy algorithm depends on type of bucket container and explains below. + + \p %opt::compare or \p %opt::less options are used only in some \p Container class for searching an item. + \p %opt::compare option has the highest priority: if \p %opt::compare is specified, \p %opt::less is not used. + + You can pass other option that would be passed to adapt metafunction, see below. + + Internal details + + The \p %StripedMap class cannot utilize the \p Container container specified directly, but only its adapted variant which + supports an unified interface. Internally, the adaptation is made via \p striped_set::adapt metafunction that wraps bucket container + and provides the unified bucket interface suitable for \p %StripedMap. Such adaptation is completely transparent for you - + you don't need to call \p adapt metafunction directly, \p %StripedMap class's internal machinery itself invokes appropriate + \p adapt metafunction to adjust your \p Container container class to \p %StripedMap bucket's internal interface. + All you need is to include a right header before striped_hash_map.h. + + By default, striped_set::adapt metafunction does not make any wrapping to \p AnyContainer, + so, the result striped_set::adapt::type is the same as \p AnyContainer. + However, there are a lot of specializations of \p adapt for well-known containers, see table below. + Any of this specialization wraps corresponding container making it suitable for the map's bucket. + Remember, you should include the proper header file for \p adapt before striped_map.h. + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Container.h-file for \p adaptExampleNotes
\p std::list\code + #include + #include + typedef cds::container::StripedMap< + std::list< std::pair< const Key, V > >, + cds::opt::less< std::less > + > striped_map; + \endcode + + The type of values stored in the \p std::list must be std::pair< const Key, V > , where \p Key - key type, and \p V - value type + The list is ordered by key \p Key. + Template argument pack \p Options must contain \p cds::opt::less or \p cds::opt::compare for type \p Key stored in the list. +
\p std::map\code + #include + #include + typedef cds::container::StripedMap< + std::map< Key, T, std::less > + > striped_map; + \endcode + +
\p std::unordered_map\code + #include + #include + typedef cds::container::StripedMap< + std::unordered_map< + Key, T, + std::hash, + std::equal_to + > + > striped_map; + \endcode + + You should provide two different hash function \p h1 and \p h2 - one for std::unordered_map and other for \p %StripedMap. + For the best result, \p h1 and \p h2 must be orthogonal i.e. h1(X) != h2(X) for any value \p X of type \p Key. +
\p boost::container::slist\code + #include + #include + typedef cds::container::StripedMap< + boost::container::slist< std::pair< const Key, T > > + > striped_map; + \endcode + + The type of values stored in the \p boost::container::slist must be std::pair< const Key, T > , + where \p Key - key type, and \p T - value type. The list is ordered. + \p Options must contain \p cds::opt::less or \p cds::opt::compare. +
\p boost::container::list\code + #include + #include + typedef cds::container::StripedMap< + boost::container::list< std::pair< const Key, T > > + > striped_map; + \endcode + + The type of values stored in the \p boost::container::list must be std::pair< const Key, T > , + where \p Key - key type, and \p T - value type. The list is ordered. + \p Options must contain \p cds::opt::less or \p cds::opt::compare. +
\p boost::container::map\code + #include + #include + typedef cds::container::StripedMap< + boost::container::map< Key, T, std::less > + > striped_map; + \endcode + +
\p boost::container::flat_map\code + #include + #include + typedef cds::container::StripedMap< + boost::container::flat_map< Key, T, + std::less< std::less > + > + > striped_map; + \endcode + +
\p boost::unordered_map\code + #include + #include + typedef cds::container::StripedMap< + boost::unordered_map< Key, T, boost::hash, std::equal_to > + > refinable_map; + \endcode + +
+ + + You can use another container type as map's bucket. + Suppose, you have a container class \p MyBestContainer and you want to integrate it with \p %StripedMap as bucket type. + There are two possibility: + - either your \p MyBestContainer class has native support of bucket's interface; + in this case, you can use default striped_set::adapt metafunction; + - or your \p MyBestContainer class does not support bucket's interface; it means you should develop a specialization + cds::container::striped_set::adapt metafunction providing necessary interface. + + The striped_set::adapt< Container, Options... > metafunction has two template argument: + - \p Container is the class that should be used as the bucket, for example, std::list< std::pair< Key, T > >. + - \p Options pack is the options from \p %StripedMap declaration. The \p adapt metafunction can use + any option from \p Options for its internal use. For example, a \p compare option can be passed to \p adapt + metafunction via \p Options argument of \p %StripedMap declaration. + + See \p striped_set::adapt metafunction for the description of interface that the bucket container must provide + to be \p %StripedMap compatible. + + Copy policy + There are three predefined copy policy: + - \p cds::container::striped_set::copy_item - copy item from old bucket to new one when resizing using copy ctor. It is default policy for + any compiler that do not support move semantics + - \p cds::container::striped_set::move_item - move item from old bucket to new one when resizing using move semantics. It is default policy for + any compiler that support move semantics. If compiler does not support move semantics, the move policy is the same as \p copy_item + - \p cds::container::striped_set::swap_item - copy item from old bucket to new one when resizing using \p std::swap. Not all containers support + this copy policy, see details in table below. + + You can define your own copy policy specifically for your case. + Note, right copy policy can significantly improve the performance of resizing. + + + + + + + + + + + + + + + + + +
ContainerPolicies
+ - \p std::list + - \p boost::list + \code + struct copy_item { + void operator()( + std::list< std::pair >& list, + std::list >::iterator itInsert, + std::list >::iterator itWhat ) + { + list.insert( itInsert, *itWhat ); + } + } \endcode + + \code + // The type T stored in the list must be swappable + struct swap_item { + void operator()( + std::list< std::pair >& list, + std::list >::iterator itInsert, + std::list >::iterator itWhat ) + { + std::pair newVal( itWhat->first, T()); + std::swap( list.insert( itInsert, newVal )->second, itWhat->second ); + } + } \endcode + + \code + struct move_item { + void operator()( + std::list< std::pair >& list, + std::list >::iterator itInsert, + std::list >::iterator itWhat ) + { + list.insert( itInsert, std::move( *itWhat )); + } + } \endcode +
+ - \p std::map + - \p std::unordered_map + - \p boost::container::map + - \p boost::container::flat_map + - \p boost::unordered_map + \code + struct copy_item { + void operator()( std::map< Key, T>& map, std::map::iterator itWhat ) + { + map.insert( *itWhat ); + } + } \endcode + + \code + struct swap_item { + void operator()( std::map< Key, T>& map, std::map::iterator itWhat ) + { + std::swap( + map.insert( + std::map::value_type( itWhat->first, T())).first->second + , itWhat->second + )); + } + } \endcode + \p T type must be swappable. + + \code + struct move_item { + void operator()( std::map< Key, T>& map, std::map::iterator itWhat ) + { + map.insert( std::move( *itWhat )); + } + } \endcode +
\p boost::container::slist\code + struct copy_item { + void operator()( + bc::slist< std::pair >& list, + bc::slist >::iterator itInsert, + bc::slist >::iterator itWhat ) + { + list.insert_after( itInsert, *itWhat ); + } + } \endcode + + \code + // The type T stored in the list must be swappable + struct swap_item { + void operator()( + bc::slist< std::pair >& list, + bc::slist >::iterator itInsert, + bc::slist >::iterator itWhat ) + { + std::pair newVal( itWhat->first, T()); + std::swap( list.insert( itInsert, newVal )->second, itWhat->second ); + } + } \endcode + + \code + struct move_item { + void operator()( + bc::slist< std::pair >& list, + bc::slist >::iterator itInsert, + bc::slist >::iterator itWhat ) + { + list.insert_after( itInsert, std::move( *itWhat )); + } + } \endcode +
+ + Advanced functions + + The library provides some advanced functions like \p erase_with(), \p find_with(), + that cannot be supported by all underlying containers. + The table below shows whether underlying container supports those functions + (the sign "+" means "container supports the function"): + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Container\p find_with\p erse_with
\p std::list++
\p std::map--
\p std::unordered_map--
\p boost::container::slist++
\p boost::container::list++
\p boost::container::map--
\p boost::container::flat_map--
\p boost::unordered_map--
+ + **/ +template + class StripedMap +#ifdef CDS_DOXYGEN_INVOKED + : protected StripedSet +#else + : protected details::make_striped_map< Container, Options...>::type +#endif + { + //@cond + typedef typename details::make_striped_map< Container, Options...>::type base_class; + //@endcond + + public: + //@cond + typedef typename base_class::default_options default_options; + typedef typename base_class::options options; + //@endcond + + typedef Container underlying_container_type ; ///< original intrusive container type for the bucket + typedef typename base_class::bucket_type bucket_type ; ///< container type adapted for hash set + typedef typename bucket_type::value_type value_type ; ///< pair type ( std::pair ) + typedef typename value_type::first_type key_type ; ///< key type + typedef typename value_type::second_type mapped_type ; ///< mapped type + + typedef typename base_class::hash hash ; ///< Hash functor + typedef typename base_class::item_counter item_counter ; ///< Item counter + typedef typename base_class::resizing_policy resizing_policy ; ///< Resizing policy + typedef typename base_class::allocator_type allocator_type ; ///< allocator type specified in options. + typedef typename base_class::mutex_policy mutex_policy ; ///< Mutex policy + + protected: + //@cond + typedef typename base_class::scoped_cell_lock scoped_cell_lock; + typedef typename base_class::scoped_full_lock scoped_full_lock; + typedef typename base_class::scoped_resize_lock scoped_resize_lock; + //@endcond + + private: + //@cond + struct key_accessor { + key_type const& operator()( value_type const& p ) const + { + return p.first; + } + }; + //@endcond + + public: + /// Default ctor. The initial capacity is 16. + StripedMap() + : base_class() + {} + + /// Ctor with initial capacity specified + StripedMap( + size_t nCapacity ///< Initial size of bucket table and lock array. Must be power of two, the minimum is 16. + ) : base_class( nCapacity ) + {} + + /// Ctor with resizing policy (copy semantics) + /** + This constructor initializes m_ResizingPolicy member with copy of \p resizingPolicy parameter + */ + StripedMap( + size_t nCapacity ///< Initial size of bucket table and lock array. Must be power of two, the minimum is 16. + ,resizing_policy const& resizingPolicy ///< Resizing policy + ) : base_class( nCapacity, resizingPolicy ) + {} + + /// Ctor with resizing policy (move semantics) + /** + This constructor initializes m_ResizingPolicy member moving \p resizingPolicy parameter + Move semantics is used. Available only for the compilers that supports C++11 rvalue reference. + */ + StripedMap( + size_t nCapacity ///< Initial size of bucket table and lock array. Must be power of two, the minimum is 16. + ,resizing_policy&& resizingPolicy ///< Resizing policy + ) : base_class( nCapacity, std::forward(resizingPolicy)) + {} + + /// Destructor destroys internal data + ~StripedMap() + {} + + public: + /// Inserts new node with key and default value + /** + The function creates a node with \p key and default value, and then inserts the node created into the map. + + Preconditions: + - The \p key_type should be constructible from a value of type \p K. + In trivial case, \p K is equal to \p key_type. + - The \p mapped_type should be default-constructible. + + Returns \p true if inserting successful, \p false otherwise. + */ + template + bool insert( K const& key ) + { + return insert_with( key, [](value_type&){} ); + } + + /// Inserts new node + /** + The function creates a node with copy of \p val value + and then inserts the node created into the map. + + Preconditions: + - The \p key_type should be constructible from \p key of type \p K. + - The \p mapped_type should be constructible from \p val of type \p V. + + Returns \p true if \p val is inserted into the set, \p false otherwise. + */ + template + bool insert( K const& key, V const& val ) + { + return insert_with( key, [&val](value_type& item) { item.second = val ; } ); + } + + /// Inserts new node and initialize it by a functor + /** + This function inserts new node with key \p key and if inserting is successful then it calls + \p func functor with signature + \code + struct functor { + void operator()( value_type& item ); + }; + \endcode + + The argument \p item of user-defined functor \p func is the reference + to the map's item inserted: + - item.first is a const reference to item's key that cannot be changed. + - item.second is a reference to item's value that may be changed. + + The key_type should be constructible from value of type \p K. + + The function allows to split creating of new item into two part: + - create item from \p key; + - insert new item into the map; + - if inserting is successful, initialize the value of item by calling \p func functor + + This can be useful if complete initialization of object of \p mapped_type is heavyweight and + it is preferable that the initialization should be completed only if inserting is successful. + */ + template + bool insert_with( const K& key, Func func ) + { + return base_class::insert( key, func ); + } + + /// For key \p key inserts data of type \p mapped_type created in-place from \p args + /** + Returns \p true if inserting successful, \p false otherwise. + */ + template + bool emplace( K&& key, Args&&... args ) + { + bool bOk; + bool bResize; + size_t nHash = base_class::hashing( std::forward(key)); + bucket_type * pBucket; + { + scoped_cell_lock sl( base_class::m_MutexPolicy, nHash ); + pBucket = base_class::bucket( nHash ); + + bOk = pBucket->emplace( std::forward(key), std::forward(args)...); + bResize = bOk && base_class::m_ResizingPolicy( ++base_class::m_ItemCounter, *this, *pBucket ); + } + + if ( bResize ) + base_class::resize(); + + return bOk; + } + + /// Updates the node + /** + The operation performs inserting or changing data with lock-free manner. + + If \p key is not found in the map, then \p key is inserted iff \p bAllowInsert is \p true. + Otherwise, the functor \p func is called with item found. + + The functor signature is: + \code + struct my_functor { + void operator()( bool bNew, value_type& item ); + }; + \endcode + with arguments: + - \p bNew - \p true if the item has been inserted, \p false otherwise + - \p item - item of the map + + Returns std::pair where \p first is true if operation is successful, + \p second is true if new item has been added or \p false if the item with \p key + already is in the map. + */ + template + std::pair update( K const& key, Func func, bool bAllowInsert = true ) + { + std::pair result; + bool bResize; + size_t nHash = base_class::hashing( key ); + bucket_type * pBucket; + { + scoped_cell_lock sl( base_class::m_MutexPolicy, nHash ); + pBucket = base_class::bucket( nHash ); + + result = pBucket->update( key, func, bAllowInsert ); + bResize = result.first && result.second && base_class::m_ResizingPolicy( ++base_class::m_ItemCounter, *this, *pBucket ); + } + + if ( bResize ) + base_class::resize(); + return result; + } + //@cond + template + CDS_DEPRECATED("ensure() is deprecated, use update() instead") + std::pair ensure( K const& key, Func func ) + { + return update( key, func, true ); + } + //@endcond + + /// Delete \p key from the map + /** \anchor cds_nonintrusive_StripedMap_erase + + Return \p true if \p key is found and deleted, \p false otherwise + */ + template + bool erase( K const& key ) + { + return base_class::erase( key ); + } + + /// Deletes the item from the map using \p pred predicate for searching + /** + The function is an analog of \ref cds_nonintrusive_StripedMap_erase "erase(K const&)" + but \p pred is used for key comparing. + \p Less functor has the interface like \p std::less. + \p pred must imply the same element order as the comparator used for building the map. + + @note This function is enabled if the compiler supports C++11 + default template arguments for function template and the underlying container + supports \p %erase_with feature. + */ + template < typename K, typename Less + ,typename Bucket = bucket_type, typename = typename std::enable_if< Bucket::has_erase_with >::type > + bool erase_with( K const& key, Less pred ) + { + return erase_with( key, pred, [](value_type const&) {} ); + } + + /// Delete \p key from the map + /** \anchor cds_nonintrusive_StripedMap_erase_func + + The function searches an item with key \p key, calls \p f functor + and deletes the item. If \p key is not found, the functor is not called. + + The functor \p Func interface: + \code + struct extractor { + void operator()(value_type& item) { ... } + }; + \endcode + + Return \p true if key is found and deleted, \p false otherwise + */ + template + bool erase( K const& key, Func f ) + { + return base_class::erase( key, f ); + } + + /// Deletes the item from the map using \p pred predicate for searching + /** + The function is an analog of \ref cds_nonintrusive_StripedMap_erase_func "erase(K const&, Func)" + but \p pred is used for key comparing. + \p Less functor has the interface like \p std::less. + \p pred must imply the same element order as the comparator used for building the map. + + @note This function is enabled if the compiler supports C++11 + default template arguments for function template and the underlying container + supports \p %erase_with feature. + */ + template ::type > + bool erase_with( K const& key, Less pred, Func f ) + { + CDS_UNUSED( pred ); + return base_class::erase_with( key, cds::details::predicate_wrapper< value_type, Less, key_accessor >(), f ); + } + + /// Find the key \p key + /** \anchor cds_nonintrusive_StripedMap_find_func + + The function searches the item with key equal to \p key and calls the functor \p f for item found. + The interface of \p Func functor is: + \code + struct functor { + void operator()( value_type& item ); + }; + \endcode + where \p item is the item found. + + The functor may change \p item.second. + + The function returns \p true if \p key is found, \p false otherwise. + */ + template + bool find( K const& key, Func f ) + { + return base_class::find( key, [&f]( value_type& pair, K const& ) mutable { f(pair); } ); + } + + /// Find the key \p val using \p pred predicate + /** + The function is an analog of \ref cds_nonintrusive_StripedMap_find_func "find(K const&, Func)" + but \p pred is used for key comparing + \p Less has the interface like \p std::less. + \p pred must imply the same element order as the comparator used for building the set. + + @note This function is enabled if the compiler supports C++11 + default template arguments for function template and the underlying container + supports \p %find_with feature. + */ + template ::type > + bool find_with( K const& key, Less pred, Func f ) + { + CDS_UNUSED( pred ); + return base_class::find_with( key, cds::details::predicate_wrapper< value_type, Less, key_accessor >(), + [&f]( value_type& pair, K const& ) mutable { f(pair); } ); + } + + /// Checks whether the map contains \p key + /** + The function searches the item with key equal to \p key + and returns \p true if it is found, and \p false otherwise. + */ + template + bool contains( K const& key ) + { + return base_class::contains( key ); + } + //@cond + template + CDS_DEPRECATED("use contains()") + bool find( K const& key ) + { + return contains( key ); + } + //@endcond + + /// Checks whether the set contains \p key using \p pred predicate for searching + /** + The function is similar to contains( key ) but \p pred is used for key comparing. + \p Less functor has the interface like \p std::less. + \p Less must imply the same element order as the comparator used for building the set. + + @note This function is enabled if the compiler supports C++11 + default template arguments for function template and the underlying container + supports \p %contains() feature. + */ + template ::type > + bool contains( K const& key, Less pred ) + { + CDS_UNUSED( pred ); + return base_class::contains( key, cds::details::predicate_wrapper< value_type, Less, key_accessor >()); + } + //@cond + template ::type > + CDS_DEPRECATED("use contains()") + bool find_with( K const& key, Less pred ) + { + return contains( key, pred ); + } + //@endcond + + /// Clears the map + void clear() + { + base_class::clear(); + } + + /// Checks if the map is empty + /** + Emptiness is checked by item counting: if item count is zero then the map is empty. + */ + bool empty() const + { + return base_class::empty(); + } + + /// Returns item count in the map + size_t size() const + { + return base_class::size(); + } + + /// Returns the size of hash table + /** + The hash table size is non-constant and can be increased via resizing. + */ + size_t bucket_count() const + { + return base_class::bucket_count(); + } + + /// Returns lock array size + /** + The lock array size is constant. + */ + size_t lock_count() const + { + return base_class::lock_count(); + } + + /// Returns resizing policy object + resizing_policy& get_resizing_policy() + { + return base_class::get_resizing_policy(); + } + + /// Returns resizing policy (const version) + resizing_policy const& get_resizing_policy() const + { + return base_class::get_resizing_policy(); + } + }; + +}} // namespace cds::container + +#endif // #ifndef CDSLIB_CONTAINER_STRIPED_MAP_H diff --git a/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/container/striped_map/boost_flat_map.h b/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/container/striped_map/boost_flat_map.h new file mode 100644 index 0000000..a3da94c --- /dev/null +++ b/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/container/striped_map/boost_flat_map.h @@ -0,0 +1,80 @@ +/* + This file is a part of libcds - Concurrent Data Structures library + + (C) Copyright Maxim Khizhinsky (libcds.dev@gmail.com) 2006-2017 + + Source code repo: http://github.com/khizmax/libcds/ + Download: http://sourceforge.net/projects/libcds/files/ + + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. + + * Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ + +#ifndef CDSLIB_CONTAINER_STRIPED_MAP_BOOST_FLAT_MAP_ADAPTER_H +#define CDSLIB_CONTAINER_STRIPED_MAP_BOOST_FLAT_MAP_ADAPTER_H + +#include +#if BOOST_VERSION < 104800 +# error "For boost::container::flat_map you must use boost 1.48 or above" +#endif + +#include +#include + + +//@cond +namespace cds { namespace container { + namespace striped_set { + + // Copy policy for map + template + struct copy_item_policy< boost::container::flat_map< Key, T, Traits, Alloc > > + : public details::boost_map_copy_policies >::copy_item_policy + {}; + + // Swap item policy + template + struct swap_item_policy< boost::container::flat_map< Key, T, Traits, Alloc > > + : public details::boost_map_copy_policies >::swap_item_policy + {}; + + // Move policy for map + template + struct move_item_policy< boost::container::flat_map< Key, T, Traits, Alloc > > + : public details::boost_map_copy_policies >::move_item_policy + {}; + } // namespace striped_set +}} // namespace cds::container + +namespace cds { namespace intrusive { namespace striped_set { + + template + class adapt< boost::container::flat_map< Key, T, Traits, Alloc>, Options... > + { + public: + typedef boost::container::flat_map< Key, T, Traits, Alloc> container_type ; ///< underlying container type + typedef cds::container::striped_set::details::boost_map_adapter< container_type, Options... > type; + }; +}}} // namespace cds::intrusive::striped_set + +//@endcond + +#endif // #ifndef CDSLIB_CONTAINER_STRIPED_MAP_BOOST_FLAT_MAP_ADAPTER_H diff --git a/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/container/striped_map/boost_list.h b/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/container/striped_map/boost_list.h new file mode 100644 index 0000000..7c91fcf --- /dev/null +++ b/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/container/striped_map/boost_list.h @@ -0,0 +1,298 @@ +/* + This file is a part of libcds - Concurrent Data Structures library + + (C) Copyright Maxim Khizhinsky (libcds.dev@gmail.com) 2006-2017 + + Source code repo: http://github.com/khizmax/libcds/ + Download: http://sourceforge.net/projects/libcds/files/ + + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. + + * Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ + +#ifndef CDSLIB_CONTAINER_STRIPED_MAP_BOOST_LIST_ADAPTER_H +#define CDSLIB_CONTAINER_STRIPED_MAP_BOOST_LIST_ADAPTER_H + +#include +#if BOOST_VERSION < 104800 +# error "For boost::container::list you must use boost 1.48 or above" +#endif + +#include // ref +#include // std::lower_bound +#include // std::pair +#include +#include + +//@cond +namespace cds { namespace container { + namespace striped_set { + + // Copy policy for map + template + struct copy_item_policy< boost::container::list< std::pair< K const, T >, Alloc > > + { + typedef std::pair< K const, T> pair_type; + typedef boost::container::list< pair_type, Alloc > list_type; + typedef typename list_type::iterator iterator; + + void operator()( list_type& list, iterator itInsert, iterator itWhat ) + { + list.insert( itInsert, *itWhat ); + } + }; + + // Swap policy for map + template + struct swap_item_policy< boost::container::list< std::pair< K const, T >, Alloc > > + { + typedef std::pair< K const, T> pair_type; + typedef boost::container::list< pair_type, Alloc > list_type; + typedef typename list_type::iterator iterator; + + void operator()( list_type& list, iterator itInsert, iterator itWhat ) + { + pair_type newVal( itWhat->first, typename pair_type::second_type()); + itInsert = list.insert( itInsert, newVal ); + std::swap( itInsert->second, itWhat->second ); + } + }; + + // Move policy for map + template + struct move_item_policy< boost::container::list< std::pair< K const, T >, Alloc > > + { + typedef std::pair< K const, T> pair_type; + typedef boost::container::list< pair_type, Alloc > list_type; + typedef typename list_type::iterator iterator; + + void operator()( list_type& list, iterator itInsert, iterator itWhat ) + { + list.insert( itInsert, std::move( *itWhat )); + } + }; + } // namespace striped_set +}} // namespace cds:container + +namespace cds { namespace intrusive { namespace striped_set { + + /// boost::container::list adapter for hash map bucket + template + class adapt< boost::container::list< std::pair, Alloc>, Options... > + { + public: + typedef boost::container::list< std::pair, Alloc> container_type ; ///< underlying container type + + private: + /// Adapted container type + class adapted_container: public cds::container::striped_set::adapted_sequential_container + { + public: + typedef typename container_type::value_type value_type ; ///< value type stored in the container + typedef typename value_type::first_type key_type; + typedef typename value_type::second_type mapped_type; + typedef typename container_type::iterator iterator ; ///< container iterator + typedef typename container_type::const_iterator const_iterator ; ///< container const iterator + + static bool const has_find_with = true; + static bool const has_erase_with = true; + + private: + //@cond + typedef typename cds::opt::details::make_comparator_from_option_list< value_type, Options... >::type key_comparator; + + typedef typename cds::opt::select< + typename cds::opt::value< + typename cds::opt::find_option< + cds::opt::copy_policy< cds::container::striped_set::move_item > + , Options... + >::type + >::copy_policy + , cds::container::striped_set::copy_item, cds::container::striped_set::copy_item_policy + , cds::container::striped_set::swap_item, cds::container::striped_set::swap_item_policy + , cds::container::striped_set::move_item, cds::container::striped_set::move_item_policy + >::type copy_item; + + struct find_predicate + { + bool operator()( value_type const& i1, value_type const& i2) const + { + return key_comparator()( i1.first, i2.first ) < 0; + } + + template + bool operator()( Q const& i1, value_type const& i2) const + { + return key_comparator()( i1, i2.first ) < 0; + } + + template + bool operator()( value_type const& i1, Q const& i2) const + { + return key_comparator()( i1.first, i2 ) < 0; + } + }; + //@endcond + + private: + //@cond + container_type m_List; + //@endcond + + public: + adapted_container() + {} + + template + bool insert( const Q& key, Func f ) + { + iterator it = std::lower_bound( m_List.begin(), m_List.end(), key, find_predicate()); + if ( it == m_List.end() || key_comparator()( key, it->first ) != 0 ) { + //value_type newItem( key ); + it = m_List.insert( it, value_type( key_type( key ), mapped_type())); + f( *it ); + + return true; + } + + // key already exists + return false; + } + + template + bool emplace( K&& key, Args&&... args ) + { + value_type val( key_type( std::forward( key )), mapped_type( std::forward( args )... )); + iterator it = std::lower_bound( m_List.begin(), m_List.end(), val.first, find_predicate()); + if ( it == m_List.end() || key_comparator()( val.first, it->first ) != 0 ) { + m_List.emplace( it, std::move( val )); + return true; + } + return false; + } + + template + std::pair update( const Q& key, Func func, bool bAllowInsert ) + { + iterator it = std::lower_bound( m_List.begin(), m_List.end(), key, find_predicate()); + if ( it == m_List.end() || key_comparator()( key, it->first ) != 0 ) { + // insert new + if ( !bAllowInsert ) + return std::make_pair( false, false ); + + it = m_List.insert( it, value_type( key_type( key ), mapped_type())); + func( true, *it ); + + return std::make_pair( true, true ); + } + else { + // already exists + func( false, *it ); + return std::make_pair( true, false ); + } + } + + template + bool erase( Q const& key, Func f ) + { + iterator it = std::lower_bound( m_List.begin(), m_List.end(), key, find_predicate()); + if ( it == m_List.end() || key_comparator()( key, it->first ) != 0 ) + return false; + + // key exists + f( *it ); + m_List.erase( it ); + + return true; + } + + template + bool erase( Q const& key, Less pred, Func f ) + { + iterator it = std::lower_bound( m_List.begin(), m_List.end(), key, pred ); + if ( it == m_List.end() || pred( key, it->first ) || pred(it->first, key)) + return false; + + // key exists + f( *it ); + m_List.erase( it ); + + return true; + } + + template + bool find( Q& val, Func f ) + { + iterator it = std::lower_bound( m_List.begin(), m_List.end(), val, find_predicate()); + if ( it == m_List.end() || key_comparator()( val, it->first ) != 0 ) + return false; + + // key exists + f( *it, val ); + return true; + } + + template + bool find( Q& val, Less pred, Func f ) + { + iterator it = std::lower_bound( m_List.begin(), m_List.end(), val, pred ); + if ( it == m_List.end() || pred( val, it->first ) || pred( it->first, val )) + return false; + + // key exists + f( *it, val ); + return true; + } + + /// Clears the container + void clear() + { + m_List.clear(); + } + + iterator begin() { return m_List.begin(); } + const_iterator begin() const { return m_List.begin(); } + iterator end() { return m_List.end(); } + const_iterator end() const { return m_List.end(); } + + void move_item( adapted_container& /*from*/, iterator itWhat ) + { + iterator it = std::lower_bound( m_List.begin(), m_List.end(), *itWhat, find_predicate()); + assert( it == m_List.end() || key_comparator()( itWhat->first, it->first ) != 0 ); + + copy_item()( m_List, it, itWhat ); + } + + size_t size() const + { + return m_List.size(); + } + }; + + public: + typedef adapted_container type ; ///< Result of \p adapt metafunction + + }; + +}}} // namespace cds::intrusive::striped_set +//@endcond + +#endif // #ifndef CDSLIB_CONTAINER_STRIPED_MAP_BOOST_LIST_ADAPTER_H diff --git a/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/container/striped_map/boost_map.h b/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/container/striped_map/boost_map.h new file mode 100644 index 0000000..845f73c --- /dev/null +++ b/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/container/striped_map/boost_map.h @@ -0,0 +1,80 @@ +/* + This file is a part of libcds - Concurrent Data Structures library + + (C) Copyright Maxim Khizhinsky (libcds.dev@gmail.com) 2006-2017 + + Source code repo: http://github.com/khizmax/libcds/ + Download: http://sourceforge.net/projects/libcds/files/ + + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. + + * Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ + +#ifndef CDSLIB_CONTAINER_STRIPED_MAP_BOOST_MAP_ADAPTER_H +#define CDSLIB_CONTAINER_STRIPED_MAP_BOOST_MAP_ADAPTER_H + +#include +#if BOOST_VERSION < 104800 +# error "For boost::container::map you must use boost 1.48 or above" +#endif + +#include +#include + +//@cond +namespace cds { namespace container { + namespace striped_set { + + // Copy policy for map + template + struct copy_item_policy< boost::container::map< Key, T, Traits, Alloc > > + : public details::boost_map_copy_policies >::copy_item_policy + {}; + + // Swap item policy + template + struct swap_item_policy< boost::container::map< Key, T, Traits, Alloc > > + : public details::boost_map_copy_policies >::swap_item_policy + {}; + + // Move policy for map + template + struct move_item_policy< boost::container::map< Key, T, Traits, Alloc > > + : public details::boost_map_copy_policies >::move_item_policy + {}; + } // namespace striped_set +}} // namespace cds::container + +namespace cds { namespace intrusive { namespace striped_set { + + /// std::set adapter for hash set bucket + template + class adapt< boost::container::map< Key, T, Traits, Alloc>, Options... > + { + public: + typedef boost::container::map< Key, T, Traits, Alloc> container_type ; ///< underlying container type + typedef cds::container::striped_set::details::boost_map_adapter< container_type, Options... > type; + }; +}}} // namespace cds::intrusive::striped_set + +//@endcond + +#endif // #ifndef CDSLIB_CONTAINER_STRIPED_MAP_BOOST_MAP_ADAPTER_H diff --git a/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/container/striped_map/boost_slist.h b/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/container/striped_map/boost_slist.h new file mode 100644 index 0000000..64b2135 --- /dev/null +++ b/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/container/striped_map/boost_slist.h @@ -0,0 +1,308 @@ +/* + This file is a part of libcds - Concurrent Data Structures library + + (C) Copyright Maxim Khizhinsky (libcds.dev@gmail.com) 2006-2017 + + Source code repo: http://github.com/khizmax/libcds/ + Download: http://sourceforge.net/projects/libcds/files/ + + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. + + * Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ + +#ifndef CDSLIB_CONTAINER_STRIPED_MAP_BOOST_SLIST_ADAPTER_H +#define CDSLIB_CONTAINER_STRIPED_MAP_BOOST_SLIST_ADAPTER_H + +#include +#if BOOST_VERSION < 104800 +# error "For boost::container::slist you must use boost 1.48 or above" +#endif + +#include // ref +#include // std::pair +#include +#include + +//@cond +namespace cds { namespace container { + namespace striped_set { + + // Copy policy for map + template + struct copy_item_policy< boost::container::slist< std::pair< K const, T >, Alloc > > + { + typedef std::pair< K const, T> pair_type; + typedef boost::container::slist< pair_type, Alloc > list_type; + typedef typename list_type::iterator iterator; + + void operator()( list_type& list, iterator itInsert, iterator itWhat ) + { + itInsert = list.insert_after( itInsert, *itWhat ); + } + }; + + // Swap policy for map + template + struct swap_item_policy< boost::container::slist< std::pair< K const, T >, Alloc > > + { + typedef std::pair< K const, T> pair_type; + typedef boost::container::slist< pair_type, Alloc > list_type; + typedef typename list_type::iterator iterator; + + void operator()( list_type& list, iterator itInsert, iterator itWhat ) + { + pair_type newVal( itWhat->first, typename pair_type::mapped_type()); + itInsert = list.insert_after( itInsert, newVal ); + std::swap( itInsert->second, itWhat->second ); + } + }; + + // Move policy for map + template + struct move_item_policy< boost::container::slist< std::pair< K const, T >, Alloc > > + { + typedef std::pair< K const, T> pair_type; + typedef boost::container::slist< pair_type, Alloc > list_type; + typedef typename list_type::iterator iterator; + + void operator()( list_type& list, iterator itInsert, iterator itWhat ) + { + list.insert_after( itInsert, std::move( *itWhat )); + } + }; + } // namespace striped_set +}} // namespace cds:container + +namespace cds { namespace intrusive { namespace striped_set { + + /// boost::container::slist adapter for hash map bucket + template + class adapt< boost::container::slist< std::pair, Alloc>, Options... > + { + public: + typedef boost::container::slist< std::pair, Alloc> container_type ; ///< underlying container type + + private: + /// Adapted container type + class adapted_container: public cds::container::striped_set::adapted_sequential_container + { + public: + typedef typename container_type::value_type value_type ; ///< value type stored in the container + typedef typename value_type::first_type key_type; + typedef typename value_type::second_type mapped_type; + typedef typename container_type::iterator iterator ; ///< container iterator + typedef typename container_type::const_iterator const_iterator ; ///< container const iterator + + static bool const has_find_with = true; + static bool const has_erase_with = true; + + private: + //@cond + typedef typename cds::opt::details::make_comparator_from_option_list< value_type, Options... >::type key_comparator; + + typedef typename cds::opt::select< + typename cds::opt::value< + typename cds::opt::find_option< + cds::opt::copy_policy< cds::container::striped_set::move_item > + , Options... + >::type + >::copy_policy + , cds::container::striped_set::copy_item, cds::container::striped_set::copy_item_policy + , cds::container::striped_set::swap_item, cds::container::striped_set::swap_item_policy + , cds::container::striped_set::move_item, cds::container::striped_set::move_item_policy + >::type copy_item; + + template + std::pair< iterator, bool > find_prev_item( Q const& key ) + { + iterator itPrev = m_List.before_begin(); + iterator itEnd = m_List.end(); + for ( iterator it = m_List.begin(); it != itEnd; ++it ) { + int nCmp = key_comparator()( key, it->first ); + if ( nCmp < 0 ) + itPrev = it; + else if ( nCmp > 0 ) + break; + else + return std::make_pair( itPrev, true ); + } + return std::make_pair( itPrev, false ); + } + + template + std::pair< iterator, bool > find_prev_item( Q const& key, Less pred ) + { + iterator itPrev = m_List.before_begin(); + iterator itEnd = m_List.end(); + for ( iterator it = m_List.begin(); it != itEnd; ++it ) { + if ( pred( key, it->first )) + itPrev = it; + else if ( pred(it->first, key)) + break; + else + return std::make_pair( itPrev, true ); + } + return std::make_pair( itPrev, false ); + } + //@endcond + + private: + //@cond + container_type m_List; + //@endcond + + public: + adapted_container() + {} + + template + bool insert( const Q& key, Func f ) + { + std::pair< iterator, bool > pos = find_prev_item( key ); + if ( !pos.second ) { + pos.first = m_List.insert_after( pos.first, value_type( key_type( key ), mapped_type())); + f( *pos.first ); + return true; + } + + // key already exists + return false; + } + + template + bool emplace( K&& key, Args&&... args ) + { + std::pair< iterator, bool > pos = find_prev_item( key ); + if ( !pos.second ) { + m_List.emplace_after( pos.first, key_type( std::forward( key )), mapped_type( std::forward( args )... )); + return true; + } + return false; + } + + template + std::pair update( const Q& key, Func func, bool bAllowInsert ) + { + std::pair< iterator, bool > pos = find_prev_item( key ); + if ( !pos.second ) { + // insert new + if ( !bAllowInsert ) + return std::make_pair( false, false ); + + pos.first = m_List.insert_after( pos.first, value_type( key_type( key ), mapped_type())); + func( true, *pos.first ); + return std::make_pair( true, true ); + } + else { + // already exists + func( false, *(++pos.first)); + return std::make_pair( true, false ); + } + } + + template + bool erase( Q const& key, Func f ) + { + std::pair< iterator, bool > pos = find_prev_item( key ); + if ( !pos.second ) + return false; + + // key exists + iterator it = pos.first; + f( *(++it)); + m_List.erase_after( pos.first ); + + return true; + } + + template + bool erase( Q const& key, Less pred, Func f ) + { + std::pair< iterator, bool > pos = find_prev_item( key, pred ); + if ( !pos.second ) + return false; + + // key exists + iterator it = pos.first; + f( *(++it)); + m_List.erase_after( pos.first ); + + return true; + } + + template + bool find( Q& val, Func f ) + { + std::pair< iterator, bool > pos = find_prev_item( val ); + if ( !pos.second ) + return false; + + // key exists + f( *(++pos.first), val ); + return true; + } + + template + bool find( Q& val, Less pred, Func f ) + { + std::pair< iterator, bool > pos = find_prev_item( val, pred ); + if ( !pos.second ) + return false; + + // key exists + f( *(++pos.first), val ); + return true; + } + + void clear() + { + m_List.clear(); + } + + iterator begin() { return m_List.begin(); } + const_iterator begin() const { return m_List.begin(); } + iterator end() { return m_List.end(); } + const_iterator end() const { return m_List.end(); } + + void move_item( adapted_container& /*from*/, iterator itWhat ) + { + std::pair< iterator, bool > pos = find_prev_item( itWhat->first ); + assert( !pos.second ); + + copy_item()( m_List, pos.first, itWhat ); + } + + size_t size() const + { + return m_List.size(); + } + }; + + public: + typedef adapted_container type ; ///< Result of \p adapt metafunction + + }; +}}} // namespace cds::intrusive::striped_set + + +//@endcond + +#endif // #ifndef CDSLIB_CONTAINER_STRIPED_MAP_BOOST_SLIST_ADAPTER_H diff --git a/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/container/striped_map/boost_unordered_map.h b/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/container/striped_map/boost_unordered_map.h new file mode 100644 index 0000000..f73c4bf --- /dev/null +++ b/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/container/striped_map/boost_unordered_map.h @@ -0,0 +1,76 @@ +/* + This file is a part of libcds - Concurrent Data Structures library + + (C) Copyright Maxim Khizhinsky (libcds.dev@gmail.com) 2006-2017 + + Source code repo: http://github.com/khizmax/libcds/ + Download: http://sourceforge.net/projects/libcds/files/ + + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. + + * Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ + +#ifndef CDSLIB_CONTAINER_STRIPED_MAP_BOOST_UNORDERED_MAP_ADAPTER_H +#define CDSLIB_CONTAINER_STRIPED_MAP_BOOST_UNORDERED_MAP_ADAPTER_H + +#include +#include + +//@cond +namespace cds { namespace container { + namespace striped_set { + + // Copy policy for map + template + struct copy_item_policy< boost::unordered_map< Key, T, Traits, Alloc > > + : public details::boost_map_copy_policies >::copy_item_policy + {}; + + // Swap policy for map + template + struct swap_item_policy< boost::unordered_map< Key, T, Traits, Alloc > > + : public details::boost_map_copy_policies >::swap_item_policy + {}; + + // Move policy for map + template + struct move_item_policy< boost::unordered_map< Key, T, Traits, Alloc > > + : public details::boost_map_copy_policies >::move_item_policy + {}; + } // namespace striped_set +}} // namespace cds::container + +namespace cds { namespace intrusive { namespace striped_set { + + /// boost::unordered_map adapter for hash map bucket + template + class adapt< boost::unordered_map< Key, T, Hash, Pred, Alloc>, Options... > + { + public: + typedef boost::unordered_map< Key, T, Hash, Pred, Alloc> container_type ; ///< underlying container type + typedef cds::container::striped_set::details::boost_map_adapter< container_type, Options... > type; + }; +}}} // namespace cds::intrusive::striped_set + + +//@endcond + +#endif // #ifndef CDSLIB_CONTAINER_STRIPED_MAP_BOOST_UNORDERED_MAP_ADAPTER_H diff --git a/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/container/striped_map/std_hash_map.h b/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/container/striped_map/std_hash_map.h new file mode 100644 index 0000000..8fd414c --- /dev/null +++ b/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/container/striped_map/std_hash_map.h @@ -0,0 +1,217 @@ +/* + This file is a part of libcds - Concurrent Data Structures library + + (C) Copyright Maxim Khizhinsky (libcds.dev@gmail.com) 2006-2017 + + Source code repo: http://github.com/khizmax/libcds/ + Download: http://sourceforge.net/projects/libcds/files/ + + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. + + * Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ + +#ifndef CDSLIB_CONTAINER_STRIPED_MAP_STD_HASH_MAP_ADAPTER_H +#define CDSLIB_CONTAINER_STRIPED_MAP_STD_HASH_MAP_ADAPTER_H + +#include +#include + +//@cond +namespace cds { namespace container { + namespace striped_set { + + // Copy policy for map + template + struct copy_item_policy< std::unordered_map< Key, T, Hash, Pred, Alloc > > + { + typedef std::unordered_map< Key, T, Hash, Pred, Alloc > map_type; + typedef typename map_type::value_type pair_type; + typedef typename map_type::iterator iterator; + + void operator()( map_type& map, iterator itWhat ) + { + map.insert( *itWhat ); + } + }; + + // Swap policy for map + template + struct swap_item_policy< std::unordered_map< Key, T, Hash, Pred, Alloc > > + { + typedef std::unordered_map< Key, T, Hash, Pred, Alloc > map_type; + typedef typename map_type::value_type pair_type; + typedef typename map_type::iterator iterator; + + void operator()( map_type& map, iterator itWhat ) + { + pair_type pair( itWhat->first, typename pair_type::second_type()); + std::pair res = map.insert( pair ); + assert( res.second ); + std::swap( res.first->second, itWhat->second ); + } + }; + + // Move policy for map + template + struct move_item_policy< std::unordered_map< Key, T, Hash, Pred, Alloc > > + { + typedef std::unordered_map< Key, T, Hash, Pred, Alloc > map_type; + typedef typename map_type::value_type pair_type; + typedef typename map_type::iterator iterator; + + void operator()( map_type& map, iterator itWhat ) + { + map.insert( std::move( *itWhat )); + } + }; + } // namespace striped_set +}} // namespace cds::container + +namespace cds { namespace intrusive { namespace striped_set { + + /// std::unordered_map adapter for hash map bucket + template + class adapt< std::unordered_map< Key, T, Hash, Pred, Alloc>, Options... > + { + public: + typedef std::unordered_map< Key, T, Hash, Pred, Alloc> container_type ; ///< underlying container type + + private: + /// Adapted container type + class adapted_container: public cds::container::striped_set::adapted_container + { + public: + typedef typename container_type::value_type value_type ; ///< value type stored in the container + typedef typename container_type::key_type key_type; + typedef typename container_type::mapped_type mapped_type; + typedef typename container_type::iterator iterator ; ///< container iterator + typedef typename container_type::const_iterator const_iterator ; ///< container const iterator + + static bool const has_find_with = false; + static bool const has_erase_with = false; + + private: + //@cond + typedef typename cds::opt::select< + typename cds::opt::value< + typename cds::opt::find_option< + cds::opt::copy_policy< cds::container::striped_set::move_item > + , Options... + >::type + >::copy_policy + , cds::container::striped_set::copy_item, cds::container::striped_set::copy_item_policy + , cds::container::striped_set::swap_item, cds::container::striped_set::swap_item_policy + , cds::container::striped_set::move_item, cds::container::striped_set::move_item_policy + >::type copy_item; + //@endcond + + private: + //@cond + container_type m_Map; + //@endcond + + public: + template + bool insert( const Q& key, Func f ) + { + std::pair res = m_Map.insert( value_type( key_type( key ), mapped_type())); + if ( res.second ) + f( const_cast(*res.first)); + return res.second; + } + + template + bool emplace( Q&& key, Args&&... args ) + { + std::pair res = m_Map.emplace( key_type( std::forward( key )), mapped_type( std::forward( args )...)); + return res.second; + } + + template + std::pair update( const Q& key, Func func, bool bAllowInsert ) + { + if ( bAllowInsert ) { + std::pair res = m_Map.insert( value_type( key_type( key ), mapped_type())); + func( res.second, const_cast(*res.first)); + return std::make_pair( true, res.second ); + } + else { + auto it = m_Map.find( key_type( key )); + if ( it == end()) + return std::make_pair( false, false ); + func( false, *it ); + return std::make_pair( true, false ); + } + } + + template + bool erase( const Q& key, Func f ) + { + iterator it = m_Map.find( key_type( key )); + if ( it == m_Map.end()) + return false; + f( const_cast(*it)); + m_Map.erase( it ); + return true; + } + + template + bool find( Q& key, Func f ) + { + iterator it = m_Map.find( key_type( key )); + if ( it == m_Map.end()) + return false; + f( const_cast(*it), key ); + return true; + } + + void clear() + { + m_Map.clear(); + } + + iterator begin() { return m_Map.begin(); } + const_iterator begin() const { return m_Map.begin(); } + iterator end() { return m_Map.end(); } + const_iterator end() const { return m_Map.end(); } + + void move_item( adapted_container& /*from*/, iterator itWhat ) + { + assert( m_Map.find( itWhat->first ) == m_Map.end()); + copy_item()( m_Map, itWhat ); + } + + size_t size() const + { + return m_Map.size(); + } + }; + + public: + typedef adapted_container type ; ///< Result of \p adapt metafunction + + }; +}}} // namespace cds::intrusive::striped_set + + +//@endcond + +#endif // #ifndef CDSLIB_CONTAINER_STRIPED_MAP_STD_HASH_MAP_ADAPTER_H diff --git a/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/container/striped_map/std_list.h b/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/container/striped_map/std_list.h new file mode 100644 index 0000000..4c5f9ba --- /dev/null +++ b/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/container/striped_map/std_list.h @@ -0,0 +1,330 @@ +/* + This file is a part of libcds - Concurrent Data Structures library + + (C) Copyright Maxim Khizhinsky (libcds.dev@gmail.com) 2006-2017 + + Source code repo: http://github.com/khizmax/libcds/ + Download: http://sourceforge.net/projects/libcds/files/ + + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. + + * Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ + +#ifndef CDSLIB_CONTAINER_STRIPED_MAP_STD_LIST_ADAPTER_H +#define CDSLIB_CONTAINER_STRIPED_MAP_STD_LIST_ADAPTER_H + +#include +#include // ref +#include // std::lower_bound +#include // std::pair +#include + +#undef CDS_STD_LIST_SIZE_CXX11_CONFORM +#if !( defined(__GLIBCXX__ ) && (!defined(_GLIBCXX_USE_CXX11_ABI) || _GLIBCXX_USE_CXX11_ABI == 0 )) +# define CDS_STD_LIST_SIZE_CXX11_CONFORM +#endif + +//@cond +namespace cds { namespace container { + namespace striped_set { + + // Copy policy for map + template + struct copy_item_policy< std::list< std::pair< K const, T >, Alloc > > + { + typedef std::pair< K const, T> pair_type; + typedef std::list< pair_type, Alloc > list_type; + typedef typename list_type::iterator iterator; + + void operator()( list_type& list, iterator itInsert, iterator itWhat ) + { + list.insert( itInsert, *itWhat ); + } + }; + + // Swap policy for map + template + struct swap_item_policy< std::list< std::pair< K const, T >, Alloc > > + { + typedef std::pair< K const, T> pair_type; + typedef std::list< pair_type, Alloc > list_type; + typedef typename list_type::iterator iterator; + + void operator()( list_type& list, iterator itInsert, iterator itWhat ) + { + pair_type newVal( itWhat->first, typename pair_type::second_type()); + itInsert = list.insert( itInsert, newVal ); + std::swap( itInsert->second, itWhat->second ); + } + }; + + // Move policy for map + template + struct move_item_policy< std::list< std::pair< K const, T >, Alloc > > + { + typedef std::pair< K const, T> pair_type; + typedef std::list< pair_type, Alloc > list_type; + typedef typename list_type::iterator iterator; + + void operator()( list_type& list, iterator itInsert, iterator itWhat ) + { + list.insert( itInsert, std::move( *itWhat )); + } + }; + } // namespace striped_set +}} // namespace cds:container + +namespace cds { namespace intrusive { namespace striped_set { + + /// std::list adapter for hash map bucket + template + class adapt< std::list< std::pair, Alloc>, Options... > + { + public: + typedef std::list< std::pair, Alloc> container_type ; ///< underlying container type + + private: + /// Adapted container type + class adapted_container: public cds::container::striped_set::adapted_sequential_container + { + public: + typedef typename container_type::value_type value_type ; ///< value type stored in the container + typedef typename value_type::first_type key_type; + typedef typename value_type::second_type mapped_type; + typedef typename container_type::iterator iterator ; ///< container iterator + typedef typename container_type::const_iterator const_iterator ; ///< container const iterator + + static bool const has_find_with = true; + static bool const has_erase_with = true; + + private: + //@cond + typedef typename cds::opt::details::make_comparator_from_option_list< value_type, Options... >::type key_comparator; + + + typedef typename cds::opt::select< + typename cds::opt::value< + typename cds::opt::find_option< + cds::opt::copy_policy< cds::container::striped_set::move_item > + , Options... + >::type + >::copy_policy + , cds::container::striped_set::copy_item, cds::container::striped_set::copy_item_policy + , cds::container::striped_set::swap_item, cds::container::striped_set::swap_item_policy + , cds::container::striped_set::move_item, cds::container::striped_set::move_item_policy + >::type copy_item; + + struct find_predicate + { + bool operator()( value_type const& i1, value_type const& i2) const + { + return key_comparator()( i1.first, i2.first ) < 0; + } + + template + bool operator()( Q const& i1, value_type const& i2) const + { + return key_comparator()( i1, i2.first ) < 0; + } + + template + bool operator()( value_type const& i1, Q const& i2) const + { + return key_comparator()( i1.first, i2 ) < 0; + } + }; + //@endcond + + private: + //@cond + container_type m_List; +# if !defined(CDS_STD_LIST_SIZE_CXX11_CONFORM) + // GCC C++ lib bug: + // In GCC (at least up to 4.7.x), the complexity of std::list::size() is O(N) + // (see http://gcc.gnu.org/bugzilla/show_bug.cgi?id=49561) + // Fixed in GCC 5 + size_t m_nSize ; // list size +# endif + //@endcond + + public: + adapted_container() +# if !defined(CDS_STD_LIST_SIZE_CXX11_CONFORM) + : m_nSize(0) +# endif + {} + + template + bool insert( const Q& key, Func f ) + { + iterator it = std::lower_bound( m_List.begin(), m_List.end(), key, find_predicate()); + if ( it == m_List.end() || key_comparator()( key, it->first ) != 0 ) { + it = m_List.insert( it, value_type( key_type( key ), mapped_type())); + f( *it ); + +# if !defined(CDS_STD_LIST_SIZE_CXX11_CONFORM) + ++m_nSize; +# endif + return true; + } + + // key already exists + return false; + } + + template + bool emplace( K&& key, Args&&... args ) + { + value_type val( key_type( std::forward( key )), mapped_type( std::forward( args )... )); + iterator it = std::lower_bound( m_List.begin(), m_List.end(), val.first, find_predicate()); + if ( it == m_List.end() || key_comparator()( val.first, it->first ) != 0 ) { + it = m_List.emplace( it, std::move( val )); + +# if !defined(CDS_STD_LIST_SIZE_CXX11_CONFORM) + ++m_nSize; +# endif + return true; + } + return false; + } + + template + std::pair update( const Q& key, Func func, bool bAllowInsert ) + { + iterator it = std::lower_bound( m_List.begin(), m_List.end(), key, find_predicate()); + if ( it == m_List.end() || key_comparator()( key, it->first ) != 0 ) { + // insert new + if ( !bAllowInsert ) + return std::make_pair( false, false ); + + it = m_List.insert( it, value_type( key_type( key ), mapped_type())); + func( true, *it ); +# if !defined(CDS_STD_LIST_SIZE_CXX11_CONFORM) + ++m_nSize; +# endif + return std::make_pair( true, true ); + } + else { + // already exists + func( false, *it ); + return std::make_pair( true, false ); + } + } + + template + bool erase( Q const& key, Func f ) + { + iterator it = std::lower_bound( m_List.begin(), m_List.end(), key, find_predicate()); + if ( it == m_List.end() || key_comparator()( key, it->first ) != 0 ) + return false; + + // key exists + f( *it ); + m_List.erase( it ); +# if !defined(CDS_STD_LIST_SIZE_CXX11_CONFORM) + --m_nSize; +# endif + + return true; + } + + template + bool erase( Q const& key, Less pred, Func f ) + { + iterator it = std::lower_bound( m_List.begin(), m_List.end(), key, pred ); + if ( it == m_List.end() || pred( key, it->first ) || pred( it->first, key )) + return false; + + // key exists + f( *it ); + m_List.erase( it ); +# if !defined(CDS_STD_LIST_SIZE_CXX11_CONFORM) + --m_nSize; +# endif + + return true; + } + + template + bool find( Q& val, Func f ) + { + iterator it = std::lower_bound( m_List.begin(), m_List.end(), val, find_predicate()); + if ( it == m_List.end() || key_comparator()( val, it->first ) != 0 ) + return false; + + // key exists + f( *it, val ); + return true; + } + + template + bool find( Q& val, Less pred, Func f ) + { + iterator it = std::lower_bound( m_List.begin(), m_List.end(), val, pred ); + if ( it == m_List.end() || pred( val, it->first ) || pred( it->first, val )) + return false; + + // key exists + f( *it, val ); + return true; + } + + void clear() + { + m_List.clear(); + } + + iterator begin() { return m_List.begin(); } + const_iterator begin() const { return m_List.begin(); } + iterator end() { return m_List.end(); } + const_iterator end() const { return m_List.end(); } + + void move_item( adapted_container& /*from*/, iterator itWhat ) + { + iterator it = std::lower_bound( m_List.begin(), m_List.end(), *itWhat, find_predicate()); + assert( it == m_List.end() || key_comparator()( itWhat->first, it->first ) != 0 ); + + copy_item()( m_List, it, itWhat ); +# if !defined(CDS_STD_LIST_SIZE_CXX11_CONFORM) + ++m_nSize; +# endif + } + + size_t size() const + { +# if !defined(CDS_STD_LIST_SIZE_CXX11_CONFORM) + return m_nSize; +# else + return m_List.size(); +# endif + + } + }; + + public: + typedef adapted_container type ; ///< Result of \p adapt metafunction + + }; +}}} // namespace cds::intrusive::striped_set + +//@endcond + +#endif // #ifndef CDSLIB_CONTAINER_STRIPED_MAP_STD_LIST_ADAPTER_H diff --git a/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/container/striped_map/std_map.h b/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/container/striped_map/std_map.h new file mode 100644 index 0000000..eaebeac --- /dev/null +++ b/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/container/striped_map/std_map.h @@ -0,0 +1,216 @@ +/* + This file is a part of libcds - Concurrent Data Structures library + + (C) Copyright Maxim Khizhinsky (libcds.dev@gmail.com) 2006-2017 + + Source code repo: http://github.com/khizmax/libcds/ + Download: http://sourceforge.net/projects/libcds/files/ + + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. + + * Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ + +#ifndef CDSLIB_CONTAINER_STRIPED_MAP_STD_MAP_ADAPTER_H +#define CDSLIB_CONTAINER_STRIPED_MAP_STD_MAP_ADAPTER_H + +#include +#include + +//@cond +namespace cds { namespace container { + namespace striped_set { + + // Copy policy for map + template + struct copy_item_policy< std::map< Key, T, Traits, Alloc > > + { + typedef std::map< Key, T, Traits, Alloc > map_type; + typedef typename map_type::value_type pair_type; + typedef typename map_type::iterator iterator; + + void operator()( map_type& map, iterator itWhat ) + { + map.insert( *itWhat ); + } + }; + + // Swap item policy + template + struct swap_item_policy< std::map< Key, T, Traits, Alloc > > + { + typedef std::map< Key, T, Traits, Alloc > map_type; + typedef typename map_type::value_type pair_type; + typedef typename map_type::iterator iterator; + + void operator()( map_type& map, iterator itWhat ) + { + std::pair< typename map_type::iterator, bool > ret = map.insert( pair_type( itWhat->first, typename pair_type::second_type())); + assert( ret.second ) ; // successful insertion + std::swap( ret.first->second, itWhat->second ); + } + }; + + // Move policy for map + template + struct move_item_policy< std::map< Key, T, Traits, Alloc > > + { + typedef std::map< Key, T, Traits, Alloc > map_type; + typedef typename map_type::value_type pair_type; + typedef typename map_type::iterator iterator; + + void operator()( map_type& map, iterator itWhat ) + { + map.insert( std::move( *itWhat )); + } + }; + } // namespace striped_set +}} // namespace cds::container + +namespace cds { namespace intrusive { namespace striped_set { + + /// std::set adapter for hash set bucket + template + class adapt< std::map< Key, T, Traits, Alloc>, Options... > + { + public: + typedef std::map< Key, T, Traits, Alloc> container_type ; ///< underlying container type + + private: + /// Adapted container type + class adapted_container: public cds::container::striped_set::adapted_container + { + public: + typedef typename container_type::value_type value_type ; ///< value type stored in the container + typedef typename container_type::key_type key_type; + typedef typename container_type::mapped_type mapped_type; + typedef typename container_type::iterator iterator ; ///< container iterator + typedef typename container_type::const_iterator const_iterator ; ///< container const iterator + + static bool const has_find_with = false; + static bool const has_erase_with = false; + + private: + //@cond + typedef typename cds::opt::select< + typename cds::opt::value< + typename cds::opt::find_option< + cds::opt::copy_policy< cds::container::striped_set::move_item > + , Options... + >::type + >::copy_policy + , cds::container::striped_set::copy_item, cds::container::striped_set::copy_item_policy + , cds::container::striped_set::swap_item, cds::container::striped_set::swap_item_policy + , cds::container::striped_set::move_item, cds::container::striped_set::move_item_policy + >::type copy_item; + //@endcond + + private: + //@cond + container_type m_Map; + //@endcond + + public: + + template + bool insert( const Q& key, Func f ) + { + std::pair res = m_Map.insert( value_type( key_type( key ), mapped_type())); + if ( res.second ) + f( *res.first ); + return res.second; + } + + template + bool emplace( Q&& key, Args&&... args ) + { + std::pair res = m_Map.emplace( key_type( std::forward( key )), mapped_type( std::forward( args )...)); + return res.second; + } + + template + std::pair update( const Q& key, Func func, bool bAllowInsert ) + { + if ( bAllowInsert ) { + std::pair res = m_Map.insert( value_type( key_type( key ), mapped_type())); + func( res.second, *res.first ); + return std::make_pair( true, res.second ); + } + else { + auto it = m_Map.find( key_type( key )); + if ( it == end()) + return std::make_pair( false, false ); + func( false, *it ); + return std::make_pair( true, false ); + } + } + + template + bool erase( const Q& key, Func f ) + { + iterator it = m_Map.find( key_type( key )); + if ( it == m_Map.end()) + return false; + f( *it ); + m_Map.erase( it ); + return true; + } + + template + bool find( Q& key, Func f ) + { + iterator it = m_Map.find( key_type( key )); + if ( it == m_Map.end()) + return false; + f( *it, key ); + return true; + } + + /// Clears the container + void clear() + { + m_Map.clear(); + } + + iterator begin() { return m_Map.begin(); } + const_iterator begin() const { return m_Map.begin(); } + iterator end() { return m_Map.end(); } + const_iterator end() const { return m_Map.end(); } + + void move_item( adapted_container& /*from*/, iterator itWhat ) + { + assert( m_Map.find( itWhat->first ) == m_Map.end()); + copy_item()( m_Map, itWhat ); + } + + size_t size() const + { + return m_Map.size(); + } + }; + + public: + typedef adapted_container type ; ///< Result of \p adapt metafunction + }; +}}} // namespace cds::intrusive::striped_set + +//@endcond + +#endif // #ifndef CDSLIB_CONTAINER_STRIPED_MAP_STD_MAP_ADAPTER_H diff --git a/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/container/striped_set.h b/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/container/striped_set.h new file mode 100644 index 0000000..ae24666 --- /dev/null +++ b/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/container/striped_set.h @@ -0,0 +1,975 @@ +/* + This file is a part of libcds - Concurrent Data Structures library + + (C) Copyright Maxim Khizhinsky (libcds.dev@gmail.com) 2006-2017 + + Source code repo: http://github.com/khizmax/libcds/ + Download: http://sourceforge.net/projects/libcds/files/ + + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. + + * Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ + +#ifndef CDSLIB_CONTAINER_STRIPED_SET_H +#define CDSLIB_CONTAINER_STRIPED_SET_H + +#include +#include + +namespace cds { namespace container { + + /// Striped hash set + /** @ingroup cds_nonintrusive_set + + Source + - [2008] Maurice Herlihy, Nir Shavit "The Art of Multiprocessor Programming" + + Lock striping is very simple technique. + The set consists of the bucket table and the array of locks. + Initially, the capacity of lock array and bucket table is the same. + When set is resized, bucket table capacity will be doubled but lock array will not. + The lock \p i protects each bucket \p j, where j = i mod L , + where \p L - the size of lock array. + + Template arguments: + - \p Container - the container class that is used as bucket table entry. The \p Container class should support + an uniform interface described below. + - \p Options - options + + The \p %StripedSet class does not exactly dictate the type of container that should be used as a \p Container bucket. + Instead, the class supports different container type for the bucket, for exampe, \p std::list, \p std::set and others. + + Remember that \p %StripedSet class algorithm ensures sequential blocking access to its bucket through the mutex type you specify + among \p Options template arguments. + + The \p Options are: + - \p opt::mutex_policy - concurrent access policy. + Available policies: \p intrusive::striped_set::striping, \p intrusive::striped_set::refinable. + Default is \p %striped_set::striping. + - \p opt::hash - hash functor. Default option value see opt::v::hash_selector + which selects default hash functor for your compiler. + - \p opt::compare - key comparison functor. No default functor is provided. + If the option is not specified, the \p %opt::less is used. + - \p opt::less - specifies binary predicate used for key comparison. Default is \p std::less. + - \p opt::item_counter - item counter type. Default is \p atomicity::item_counter since some operation on the counter is performed + without locks. Note that item counting is an essential part of the set algorithm, so dummy counter + like as \p atomicity::empty_item_counter is not suitable. + - \p opt::allocator - the allocator type using for memory allocation of bucket table and lock array. Default is \ref CDS_DEFAULT_ALLOCATOR. + - \p opt::resizing_policy - the resizing policy that is a functor that decides when to resize the hash set. + Default option value depends on bucket container type: + for sequential containers like \p std::list, \p std::vector the resizing policy is striped_set::load_factor_resizing<4> ; + for other type of containers like \p std::set, \p std::unordered_set the resizing policy is \p striped_set::no_resizing. + See \ref cds_striped_resizing_policy "available resizing policy". + Note that the choose of resizing policy depends of \p Container type: + for sequential containers like \p std::list, \p std::vector and so on, right choosing of the policy can + significantly improve performance. + For other, non-sequential types of \p Container (like a \p std::set) the resizing policy is not so important. + - \p opt::copy_policy - the copy policy which is used to copy items from the old set to the new one when resizing. + The policy can be optionally used in adapted bucket container for performance reasons of resizing. + The detail of copy algorithm depends on type of bucket container and explains below. + + \p %opt::compare or \p %opt::less options are used in some \p Container class for searching an item. + \p %opt::compare option has the highest priority: if \p %opt::compare is specified, \p %opt::less is not used. + + You can pass other option that would be passed to adapt metafunction, see below. + + Internal details + + The \p %StripedSet class cannot utilize the \p Container container specified directly, but only its adapted variant which + supports an unified interface. Internally, the adaptation is made via striped_set::adapt metafunction that wraps bucket container + and provides the unified bucket interface suitable for \p %StripedSet. Such adaptation is completely transparent for you - + you don't need to call \p adapt metafunction directly, \p %StripedSet class's internal machinery itself invokes appropriate + \p adapt metafunction to adjust your \p Container container class to \p %StripedSet bucket's internal interface. + All you need is to include a right header before striped_hash_set.h. + + By default, striped_set::adapt metafunction does not make any wrapping to \p AnyContainer, + so, the result striped_set::adapt::type is the same as \p AnyContainer. + However, there are a lot of specializations of striped_set::adapt for well-known containers, see table below. + Any of this specialization wraps corresponding container making it suitable for the set's bucket. + Remember, you should include the proper header file for \p adapt before including striped_hash_set.h. + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Container.h-file for \p adaptExampleNotes
\p std::list\code + #include + #include + typedef cds::container::StripedSet< + std::list, + cds::opt::less< std::less > + > striped_set; + \endcode + + The list is ordered. + Template argument pack \p Options must contain cds::opt::less or cds::opt::compare for type \p T stored in the list +
\p std::vector\code + #include + #include + typedef cds::container::StripedSet< + std::vector, + cds::opt::less< std::less > + > striped_set; + \endcode + + The vector is ordered. + Template argument pack \p Options must contain \p cds::opt::less or \p cds::opt::compare for type \p T stored in the list +
\p std::set\code + #include + #include + typedef cds::container::StripedSet< + std::set< T, std::less > + > striped_set; + \endcode + +
\p std::unordered_set\code + #include + #include + typedef cds::container::StripedSet< + std::unordered_set< + T, + hash, + equal + > + > striped_set; + \endcode + + You should provide two different hash function \p h1 and \p h2 - one for \p std::unordered_set and other for \p %StripedSet. + For the best result, \p h1 and \p h2 must be orthogonal i.e. h1(X) != h2(X) for any value \p X. +
\p boost::container::slist\code + #include + #include + typedef cds::container::StripedSet< + boost::container::slist + > striped_set; + \endcode + + The list is ordered. + \p Options must contain \p cds::opt::less or \p cds::opt::compare. +
\p boost::container::list\code + #include + #include + typedef cds::container::StripedSet< + boost::container::list + > striped_set; + \endcode + + The list is ordered. + \p Options must contain \p cds::opt::less or \p cds::opt::compare. +
\p boost::container::vector\code + #include + #include + typedef cds::container::StripedSet< + boost::container::vector, + cds::opt::less< std::less > + > striped_set; + \endcode + + The vector is ordered. + Template argument pack \p Options must contain \p cds::opt::less or \p cds::opt::compare for type \p T stored in the vector +
\p boost::container::stable_vector\code + #include + #include + typedef cds::container::StripedSet< + boost::container::stable_vector, + cds::opt::less< std::less > + > striped_set; + \endcode + + The vector is ordered. + Template argument pack \p Options must contain \p cds::opt::less or \p cds::opt::compare for type \p T stored in the vector +
\p boost::container::set\code + #include + #include + typedef cds::container::StripedSet< + boost::container::set< T, std::less > + > striped_set; + \endcode + +
\p boost::container::flat_set\code + #include + #include + typedef cds::container::StripedSet< + boost::container::flat_set< T, std::less > + > striped_set; + \endcode + +
\p boost::unordered_set\code + #include + #include + typedef cds::container::StripedSet< + boost::unordered_set< + T, + hash, + equal + > + > striped_set; + \endcode + + You should provide two different hash function \p h1 and \p h2 - one for \p boost::unordered_set and other for \p %StripedSet. + For the best result, \p h1 and \p h2 must be orthogonal i.e. h1(X) != h2(X) for any value \p X. +
+ + You can use another container type as set's bucket. + Suppose, you have a container class \p MyBestContainer and you want to integrate it with \p %StripedSet as bucket type. + There are two possibility: + - either your \p MyBestContainer class has native support of bucket's interface; + in this case, you can use default striped_set::adapt metafunction; + - or your \p MyBestContainer class does not support bucket's interface, which means, that you should develop a specialization + cds::container::striped_set::adapt metafunction providing necessary interface. + + The striped_set::adapt< Container, Options... > metafunction has two template argument: + - \p Container is the class that should be used as the bucket, for example, std::list< T >. + - \p Options pack is the options from \p %StripedSet declaration. The \p adapt metafunction can use + any option from \p Options for its internal use. For example, a \p compare option can be passed to \p adapt + metafunction via \p Options argument of \p %StripedSet declaration. + + See striped_set::adapt metafunction for the description of interface that the bucket container must provide + to be %StripedSet compatible. + + Copy policy + There are three predefined copy policy: + - \p cds::container::striped_set::copy_item - copy item from old bucket to new one when resizing using copy ctor. It is default policy for + any compiler that do not support move semantics + - \p cds::container::striped_set::move_item - move item from old bucket to new one when resizing using move semantics. It is default policy for + any compiler that support move semantics. If compiler does not support move semantics, the move policy is the same as \p copy_item + - \p cds::container::striped_set::swap_item - copy item from old bucket to new one when resizing using \p std::swap. Not all containers support + this copy policy, see details in table below. + + You can define your own copy policy specifically for your case. + Note, right copy policy can significantly improve the performance of resizing. + + + + + + + + + + + + + + + + + +
ContainerPolicies
+ - \p std::list + - \p std::vector + - \p boost::list + - \p boost::vector + - \p boost::stable_vector + \code + struct copy_item { + void operator()( std::list& list, std::list::iterator itInsert, std::list::iterator itWhat ) + { + list.insert( itInsert, *itWhat ); + } + } \endcode + + \code + // The type T stored in the list must be swappable + struct swap_item { + void operator()( std::list& list, std::list::iterator itInsert, std::list::iterator itWhat ) + { + std::swap( *list.insert( itInsert, T()), *itWhat ); + } + } \endcode + + \code + struct move_item { + void operator()( std::list& list, std::list::iterator itInsert, std::list::iterator itWhat ) + { + list.insert( itInsert, std::move( *itWhat )); + } + } \endcode +
+ - \p std::set + - \p std::unordered_set + \code + struct copy_item { + void operator()( std::set& set, std::set::iterator itWhat ) + { + set.insert( *itWhat ); + } + } \endcode + \p swap_item is not applicable (same as \p copy_item) + + \code + struct move_item { + void operator()( std::set& set, std::set::iterator itWhat ) + { + set.insert( std::move( *itWhat )); + } + } \endcode +
+ - \p boost::container::slist + \code + struct copy_item { + void operator()( bc::slist& list, bc::slist::iterator itInsert, bc::slist::iterator itWhat ) + { + list.insert_after( itInsert, *itWhat ); + } + } \endcode + + \code + // The type T stored in the list must be swappable + struct swap_item { + void operator()( bc::slist& list, bc::slist::iterator itInsert, bc::slist::iterator itWhat ) + { + std::swap( *list.insert_after( itInsert, T()), *itWhat ); + } + } \endcode + + \code + struct move_item { + void operator()( bc::slist& list, bc::slist::iterator itInsert, bc::slist::iterator itWhat ) + { + list.insert_after( itInsert, std::move( *itWhat )); + } + } \endcode +
+ + Advanced functions + + libcds provides some advanced functions like \p erase_with(), \p find_with(), + that cannot be supported by all underlying containers. + The table below shows whether underlying container supports those functions + (the sign "+" means "container supports the function"): + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Container\p find_with\p erse_with
\p std::list++
\p std::vector++
\p std::set--
\p std::unordered_set--
\p boost::container::slist++
\p boost::container::list++
\p boost::container::vector++
\p boost::container::stable_vector++
\p boost::container::set--
\p boost::container::flat_set--
\p boost::unordered_set--
+ */ + template + class StripedSet: protected intrusive::StripedSet + { + //@cond + typedef intrusive::StripedSet base_class; + //@endcond + public: + //@cond + typedef typename base_class::default_options default_options; + typedef typename base_class::options options; + //@endcond + + typedef Container underlying_container_type ; ///< original intrusive container type for the bucket + typedef typename base_class::bucket_type bucket_type ; ///< container type adapted for hash set + typedef typename bucket_type::value_type value_type ; ///< value type stored in the set + + typedef typename base_class::hash hash ; ///< Hash functor + typedef typename base_class::item_counter item_counter ; ///< Item counter + typedef typename base_class::resizing_policy resizing_policy ; ///< Resizing policy + typedef typename base_class::allocator_type allocator_type ; ///< allocator type specified in options. + typedef typename base_class::mutex_policy mutex_policy ; ///< Mutex policy + + protected: + //@cond + typedef typename base_class::scoped_cell_lock scoped_cell_lock; + typedef typename base_class::scoped_full_lock scoped_full_lock; + typedef typename base_class::scoped_resize_lock scoped_resize_lock; + //@endcond + + public: + /// Default ctor. The initial capacity is 16. + StripedSet() + : base_class() + {} + + /// Ctor with initial capacity specified + StripedSet( + size_t nCapacity ///< Initial size of bucket table and lock array. Must be power of two, the minimum is 16. + ) + + : base_class( nCapacity ) + {} + + /// Ctor with resizing policy (copy semantics) + /** + This constructor initializes m_ResizingPolicy member with copy of \p resizingPolicy parameter + */ + StripedSet( + size_t nCapacity ///< Initial size of bucket table and lock array. Must be power of two, the minimum is 16. + ,resizing_policy const& resizingPolicy ///< Resizing policy + ) + + : base_class( nCapacity, resizingPolicy ) + {} + + /// Ctor with resizing policy (move semantics) + /** + This constructor initializes m_ResizingPolicy member moving \p resizingPolicy parameter + Move semantics is used. Available only for the compilers that supports C++11 rvalue reference. + */ + StripedSet( + size_t nCapacity ///< Initial size of bucket table and lock array. Must be power of two, the minimum is 16. + ,resizing_policy&& resizingPolicy ///< Resizing policy + ) + + : base_class( nCapacity, std::forward(resizingPolicy)) + {} + + /// Destructor destroys internal data + ~StripedSet() + {} + + public: + /// Inserts new node + /** + The function creates a node with copy of \p val value + and then inserts the node created into the set. + + The type \p Q should contain as minimum the complete key for the node. + The object of \p value_type should be constructible from a value of type \p Q. + In trivial case, \p Q is equal to \p value_type. + + Returns \p true if \p val is inserted into the set, \p false otherwise. + */ + template + bool insert( Q const& val ) + { + return insert( val, []( value_type& ) {} ); + } + + /// Inserts new node + /** + The function allows to split creating of new item into two part: + - create item with key only + - insert new item into the set + - if inserting is success, calls \p f functor to initialize value-field of new item . + + The functor signature is: + \code + void func( value_type& item ); + \endcode + where \p item is the item inserted. + + The type \p Q can differ from \p value_type of items storing in the set. + Therefore, the \p value_type should be constructible from type \p Q. + + The user-defined functor is called only if the inserting is success. + */ + template + bool insert( Q const& val, Func f ) + { + bool bOk; + bool bResize; + size_t nHash = base_class::hashing( val ); + bucket_type * pBucket; + { + scoped_cell_lock sl( base_class::m_MutexPolicy, nHash ); + pBucket = base_class::bucket( nHash ); + bOk = pBucket->insert( val, f ); + bResize = bOk && base_class::m_ResizingPolicy( ++base_class::m_ItemCounter, *this, *pBucket ); + } + + if ( bResize ) + base_class::resize(); + return bOk; + } + + /// Inserts data of type \p %value_type constructed with std::forward(args)... + /** + Returns \p true if inserting successful, \p false otherwise. + */ + template + bool emplace( Args&&... args ) + { + bool bOk; + bool bResize; + value_type val( std::forward( args )... ); + size_t nHash = base_class::hashing( val ); + bucket_type * pBucket; + { + scoped_cell_lock sl( base_class::m_MutexPolicy, nHash ); + pBucket = base_class::bucket( nHash ); + + bOk = pBucket->emplace( std::move( val )); + bResize = bOk && base_class::m_ResizingPolicy( ++base_class::m_ItemCounter, *this, *pBucket ); + } + + if ( bResize ) + base_class::resize(); + return bOk; + } + + /// Updates the node + /** + The operation performs inserting or changing data with lock-free manner. + + If \p key is not found in the set, then \p key is inserted iff \p bAllowInsert is \p true. + Otherwise, the functor \p func is called with item found. + + The functor signature is: + \code + struct my_functor { + void operator()( bool bNew, value_type& item, const Q& val ); + }; + \endcode + with arguments: + - \p bNew - \p true if the item has been inserted, \p false otherwise + - \p item - item of the set + - \p val - argument \p val passed into the \p %update() function + + The functor may change non-key fields of the \p item. + + Returns std::pair where \p first is true if operation is successful, + \p second is true if new item has been added or \p false if the item with \p key + already is in the map. + */ + template + std::pair update( Q const& val, Func func, bool bAllowInsert = true ) + { + std::pair result; + bool bResize = false; + size_t nHash = base_class::hashing( val ); + bucket_type * pBucket; + { + scoped_cell_lock sl( base_class::m_MutexPolicy, nHash ); + pBucket = base_class::bucket( nHash ); + + result = pBucket->update( val, func, bAllowInsert ); + if ( result.first && result.second ) + bResize = base_class::m_ResizingPolicy( ++base_class::m_ItemCounter, *this, *pBucket ); + } + + if ( bResize ) + base_class::resize(); + return result; + } + //@cond + template + CDS_DEPRECATED("ensure() is deprecated, use update()") + std::pair ensure( Q const& val, Func func ) + { + return update( val, func, true ); + } + //@endcond + + /// Delete \p key from the set + /** \anchor cds_nonintrusive_StripedSet_erase + + The set item comparator should be able to compare the type \p value_type and the type \p Q. + Return \p true if key is found and deleted, \p false otherwise + */ + template + bool erase( Q const& key ) + { + return erase( key, [](value_type const&) {} ); + } + + /// Deletes the item from the set using \p pred predicate for searching + /** + The function is an analog of \ref cds_nonintrusive_StripedSet_erase "erase(Q const&)" + but \p pred is used for key comparing. + \p Less functor has the interface like \p std::less. + \p pred must imply the same element order as the comparator used for building the set. + + @note This function is enabled if the compiler supports C++11 + default template arguments for function template and the underlying container + supports \p %erase_with feature. + */ + template < typename Q, typename Less + ,typename Bucket = bucket_type, typename = typename std::enable_if< Bucket::has_erase_with >::type > + bool erase_with( Q const& key, Less pred ) + { + return erase_with( key, pred, [](value_type const&) {} ); + } + + /// Delete \p key from the set + /** \anchor cds_nonintrusive_StripedSet_erase_func + + The function searches an item with key \p key, calls \p f functor with item found + and deletes it. If \p key is not found, the functor is not called. + + The functor \p Func interface is: + \code + struct functor { + void operator()(value_type const& val); + }; + \endcode + + Return \p true if key is found and deleted, \p false otherwise + */ + template + bool erase( Q const& key, Func f ) + { + bool bOk; + size_t nHash = base_class::hashing( key ); + { + scoped_cell_lock sl( base_class::m_MutexPolicy, nHash ); + bucket_type * pBucket = base_class::bucket( nHash ); + + bOk = pBucket->erase( key, f ); + } + + if ( bOk ) + --base_class::m_ItemCounter; + return bOk; + } + + /// Deletes the item from the set using \p pred predicate for searching + /** + The function is an analog of \ref cds_nonintrusive_StripedSet_erase_func "erase(Q const&, Func)" + but \p pred is used for key comparing. + \p Less functor has the interface like \p std::less. + \p pred must imply the same element order as the comparator used for building the set. + + @note This function is enabled if the compiler supports C++11 + default template arguments for function template and the underlying container + supports \p %erase_with feature. + */ + template < typename Q, typename Less, typename Func + , typename Bucket = bucket_type, typename = typename std::enable_if< Bucket::has_erase_with >::type > + bool erase_with( Q const& key, Less pred, Func f ) + { + bool bOk; + size_t nHash = base_class::hashing( key ); + { + scoped_cell_lock sl( base_class::m_MutexPolicy, nHash ); + bucket_type * pBucket = base_class::bucket( nHash ); + + bOk = pBucket->erase( key, pred, f ); + } + + if ( bOk ) + --base_class::m_ItemCounter; + return bOk; + } + + /// Find the key \p val + /** \anchor cds_nonintrusive_StripedSet_find_func + + The function searches the item with key equal to \p val and calls the functor \p f for item found. + The interface of \p Func functor is: + \code + struct functor { + void operator()( value_type& item, Q& val ); + }; + \endcode + where \p item is the item found, \p val is the find function argument. + + The functor can change non-key fields of \p item. + The \p val argument is non-const since it can be used as \p f functor destination i.e., the functor + can modify both arguments. + + The type \p Q can differ from \p value_type of items storing in the container. + Therefore, the \p value_type should be comparable with type \p Q. + + The function returns \p true if \p val is found, \p false otherwise. + */ + template + bool find( Q& val, Func f ) + { + return base_class::find( val, f ); + } + + /// Find the key \p val using \p pred predicate + /** + The function is an analog of \ref cds_nonintrusive_StripedSet_find_func "find(Q&, Func)" + but \p pred is used for key comparing + \p Less has the interface like \p std::less. + \p pred must imply the same element order as the comparator used for building the set. + + @note This function is enabled if the compiler supports C++11 + default template arguments for function template and the underlying container + supports \p %find_with feature. + */ + template ::type > + bool find_with( Q& val, Less pred, Func f ) + { + return base_class::find_with( val, pred, f ); + } + + /// Find the key \p val + /** \anchor cds_nonintrusive_StripedSet_find_cfunc + + The function searches the item with key equal to \p val and calls the functor \p f for item found. + The interface of \p Func functor is: + \code + struct functor { + void operator()( value_type& item, Q const& val ); + }; + \endcode + where \p item is the item found, \p val is the find function argument. + + The functor can change non-key fields of \p item. + + The type \p Q can differ from \p value_type of items storing in the container. + Therefore, the \p value_type should be comparable with type \p Q. + + The function returns \p true if \p val is found, \p false otherwise. + */ + template + bool find( Q const& val, Func f ) + { + return base_class::find( val, f ); + } + + /// Find the key \p val using \p pred predicate + /** + The function is an analog of \ref cds_nonintrusive_StripedSet_find_cfunc "find(Q const&, Func)" + but \p pred is used for key comparing + \p Less has the interface like \p std::less. + \p pred must imply the same element order as the comparator used for building the set. + + @note This function is enabled if the compiler supports C++11 + default template arguments for function template and the underlying container + supports \p %find_with feature. + */ + template ::type > + bool find_with( Q const& val, Less pred, Func f ) + { + return base_class::find_with( val, pred, f ); + } + + /// Checks whether the set contains \p key + /** + The function searches the item with key equal to \p key + and returns \p true if it is found, and \p false otherwise. + + Note the hash functor specified for class \p Traits template parameter + should accept a parameter of type \p Q that can be not the same as \p value_type. + Otherwise, you may use \p contains( Q const&, Less pred ) functions with explicit predicate for key comparing. + */ + template + bool contains( Q const& key ) + { + return base_class::contains( key ); + } + //@cond + template + CDS_DEPRECATED("use contains()") + bool find( Q const& key ) + { + return contains( key ); + } + //@endcond + + /// Checks whether the map contains \p key using \p pred predicate for searching + /** + The function is similar to contains( key ) but \p pred is used for key comparing. + \p Less functor has the interface like \p std::less. + \p Less must imply the same element order as the comparator used for building the map. + */ + template ::type > + bool contains( Q const& key, Less pred ) + { + return base_class::contains( key, pred ); + } + //@cond + template ::type > + CDS_DEPRECATED("use contains()") + bool find_with( Q const& val, Less pred ) + { + return contains( val, pred ); + } + //@endcond + + /// Clears the set + /** + The function erases all items from the set. + */ + void clear() + { + return base_class::clear(); + } + + /// Checks if the set is empty + /** + Emptiness is checked by item counting: if item count is zero then the set is empty. + */ + bool empty() const + { + return base_class::empty(); + } + + /// Returns item count in the set + size_t size() const + { + return base_class::size(); + } + + /// Returns the size of hash table + /** + The hash table size is non-constant and can be increased via resizing. + */ + size_t bucket_count() const + { + return base_class::bucket_count(); + } + + /// Returns lock array size + size_t lock_count() const + { + return base_class::lock_count(); + } + + /// Returns resizing policy object + resizing_policy& get_resizing_policy() + { + return base_class::get_resizing_policy(); + } + + /// Returns resizing policy (const version) + resizing_policy const& get_resizing_policy() const + { + return base_class::get_resizing_policy(); + } + }; + +}} // namespace cds::container + + +#endif // #ifndef CDSLIB_CONTAINER_STRIPED_SET_H diff --git a/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/container/striped_set/adapter.h b/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/container/striped_set/adapter.h new file mode 100644 index 0000000..9bda459 --- /dev/null +++ b/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/container/striped_set/adapter.h @@ -0,0 +1,535 @@ +/* + This file is a part of libcds - Concurrent Data Structures library + + (C) Copyright Maxim Khizhinsky (libcds.dev@gmail.com) 2006-2017 + + Source code repo: http://github.com/khizmax/libcds/ + Download: http://sourceforge.net/projects/libcds/files/ + + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. + + * Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ + +#ifndef CDSLIB_CONTAINER_STRIPED_SET_ADAPTER_H +#define CDSLIB_CONTAINER_STRIPED_SET_ADAPTER_H + +#include +#include + +namespace cds { namespace container { + /// Striped hash set related definitions + namespace striped_set { + + //@cond + struct copy_item ; // copy_item_policy tag + template + struct copy_item_policy; + + struct swap_item ; // swap_item_policy tag + template + struct swap_item_policy; + + struct move_item ; // move_item_policy tag + template + struct move_item_policy; + //@endcond + +#ifdef CDS_DOXYGEN_INVOKED + /// Default adapter for hash set + /** + By default, the metafunction does not make any transformation for container type \p Container. + \p Container should provide interface suitable for the hash set. + + The \p Options template argument contains a list of options + that has been passed to cds::container::StripedSet. + + Bucket interface + + The result of metafunction is a container (a bucket) that should support the following interface: + + Public typedefs that the bucket should provide: + - \p value_type - the type of the item in the bucket + - \p iterator - bucket's item iterator + - \p const_iterator - bucket's item constant iterator + - \p default_resizing_policy - defalt resizing policy preferable for the container. + By default, the library defines striped_set::load_factor_resizing<4> for sequential containers like + std::list, std::vector, and striped_set::no_resizing for ordered container like std::set, + std::unordered_set. + + Insert value \p val of type \p Q + \code template bool insert( const Q& val, Func f ) ; \endcode + The function allows to split creating of new item into two part: + - create item with key only from \p val + - try to insert new item into the container + - if inserting is success, calls \p f functor to initialize value-field of the new item. + + The functor signature is: + \code + void func( value_type& item ); + \endcode + where \p item is the item inserted. + + The type \p Q can differ from \ref value_type of items storing in the container. + Therefore, the \p value_type should be comparable with type \p Q and constructible from type \p Q, + + The user-defined functor is called only if the inserting is success. +
+ + Inserts data of type \ref value_type constructed with std::forward(args)... + \code template bool emplace( Args&&... args ) ; \endcode + Returns \p true if inserting successful, \p false otherwise. + + This function should be available only for compiler that supports + variadic template and move semantics +
+ + Updates \p item + \code template std::pair update( const Q& val, Func func, bool bAllowInsert ) \endcode + The operation performs inserting or changing data. + + If the \p val key not found in the container, then the new item created from \p val + is inserted iff \p bAllowInsert is \p true. Otherwise, the functor \p func is called with the item found. + The \p Func functor has interface: + \code + void func( bool bNew, value_type& item, const Q& val ); + \endcode + or like a functor: + \code + struct my_functor { + void operator()( bool bNew, value_type& item, const Q& val ); + }; + \endcode + + where arguments are: + - \p bNew - \p true if the item has been inserted, \p false otherwise + - \p item - container's item + - \p val - argument \p val passed into the \p update() function + + The functor can change non-key fields of the \p item. + + The type \p Q can differ from \ref value_type of items storing in the container. + Therefore, the \p value_type should be comparable with type \p Q and constructible from type \p Q, + + Returns std::pair where \p first is true if operation is successful, + \p second is true if new item has been added or \p false if the item with \p val key + already exists. +
+ + + Delete \p key + \code template bool erase( const Q& key, Func f ) \endcode + The function searches an item with key \p key, calls \p f functor + and deletes the item. If \p key is not found, the functor is not called. + + The functor \p Func interface is: + \code + struct extractor { + void operator()(value_type const& val); + }; + \endcode + + The type \p Q can differ from \ref value_type of items storing in the container. + Therefore, the \p value_type should be comparable with type \p Q. + + Return \p true if key is found and deleted, \p false otherwise +
+ + + Find the key \p val + \code template bool find( Q& val, Func f ) \endcode + The function searches the item with key equal to \p val and calls the functor \p f for item found. + The interface of \p Func functor is: + \code + struct functor { + void operator()( value_type& item, Q& val ); + }; + \endcode + where \p item is the item found, \p val is the find function argument. + + The functor can change non-key fields of \p item. + The \p val argument is non-const since it can be used as \p f functor destination i.e., the functor + can modify both arguments. + + The type \p Q can differ from \ref value_type of items storing in the container. + Therefore, the \p value_type should be comparable with type \p Q. + + The function returns \p true if \p val is found, \p false otherwise. +
+ + Clears the container + \code void clear() \endcode +
+ + Get size of bucket + \code size_t size() const \endcode + This function can be required by some resizing policy +
+ + Move item when resizing + \code void move_item( adapted_container& from, iterator it ) \endcode + This helper function is invented for the set resizing when the item + pointed by \p it iterator is copied from an old bucket \p from to a new bucket + pointed by \p this. +
+ + */ + template < typename Container, typename... Options> + class adapt + { + public: + typedef Container type ; ///< adapted container type + typedef typename type::value_type value_type ; ///< value type stored in the container + }; +#else // CDS_DOXYGEN_INVOKED + using cds::intrusive::striped_set::adapt; +#endif + + //@cond + using cds::intrusive::striped_set::adapted_sequential_container; + using cds::intrusive::striped_set::adapted_container; + //@endcond + + ///@copydoc cds::intrusive::striped_set::load_factor_resizing + template + using load_factor_resizing = cds::intrusive::striped_set::load_factor_resizing; + + ///@copydoc cds::intrusive::striped_set::rational_load_factor_resizing + template + using rational_load_factor_resizing = cds::intrusive::striped_set::rational_load_factor_resizing; + + ///@copydoc cds::intrusive::striped_set::single_bucket_size_threshold + template + using single_bucket_size_threshold = cds::intrusive::striped_set::single_bucket_size_threshold; + + ///@copydoc cds::intrusive::striped_set::no_resizing + typedef cds::intrusive::striped_set::no_resizing no_resizing; + + ///@copydoc cds::intrusive::striped_set::striping + template + using striping = cds::intrusive::striped_set::striping; + + ///@copydoc cds::intrusive::striped_set::refinable + template < + class RecursiveLock = std::recursive_mutex, + typename BackOff = cds::backoff::yield, + class Alloc = CDS_DEFAULT_ALLOCATOR + > + using refinable = cds::intrusive::striped_set::refinable; + + //@cond + namespace details { + + template + struct boost_set_copy_policies + { + struct copy_item_policy + { + typedef Set set_type; + typedef typename set_type::iterator iterator; + + void operator()( set_type& set, iterator itWhat ) + { + set.insert( *itWhat ); + } + }; + + typedef copy_item_policy swap_item_policy; + + struct move_item_policy + { + typedef Set set_type; + typedef typename set_type::iterator iterator; + + void operator()( set_type& set, iterator itWhat ) + { + set.insert( std::move( *itWhat )); + } + }; + }; + + template + class boost_set_adapter: public striped_set::adapted_container + { + public: + typedef Set container_type; + + typedef typename container_type::value_type value_type ; ///< value type stored in the container + typedef typename container_type::iterator iterator ; ///< container iterator + typedef typename container_type::const_iterator const_iterator ; ///< container const iterator + + static bool const has_find_with = false; + static bool const has_erase_with = false; + + private: + typedef typename cds::opt::select< + typename cds::opt::value< + typename cds::opt::find_option< + cds::opt::copy_policy< cds::container::striped_set::move_item > + , Options... + >::type + >::copy_policy + , cds::container::striped_set::copy_item, copy_item_policy + , cds::container::striped_set::swap_item, swap_item_policy + , cds::container::striped_set::move_item, move_item_policy + >::type copy_item; + + private: + container_type m_Set; + + public: + boost_set_adapter() + {} + + container_type& base_container() + { + return m_Set; + } + + template + bool insert( const Q& val, Func f ) + { + std::pair res = m_Set.insert( value_type(val)); + if ( res.second ) + f( const_cast(*res.first)); + return res.second; + } + + template + bool emplace( Args&&... args ) + { + std::pair res = m_Set.emplace( std::forward(args)... ); + return res.second; + } + + template + std::pair update( const Q& val, Func func, bool bAllowInsert ) + { + if ( bAllowInsert ) { + std::pair res = m_Set.insert( value_type(val)); + func( res.second, const_cast(*res.first), val ); + return std::make_pair( true, res.second ); + } + else { + auto it = m_Set.find( value_type( val )); + if ( it == m_Set.end()) + return std::make_pair( false, false ); + func( false, const_cast(*it), val ); + return std::make_pair( true, false ); + } + } + + template + bool erase( const Q& key, Func f ) + { + const_iterator it = m_Set.find( value_type(key)); + if ( it == m_Set.end()) + return false; + f( const_cast(*it)); + m_Set.erase( it ); + return true; + } + + template + bool find( Q& val, Func f ) + { + iterator it = m_Set.find( value_type(val)); + if ( it == m_Set.end()) + return false; + f( const_cast(*it), val ); + return true; + } + + void clear() + { + m_Set.clear(); + } + + iterator begin() { return m_Set.begin(); } + const_iterator begin() const { return m_Set.begin(); } + iterator end() { return m_Set.end(); } + const_iterator end() const { return m_Set.end(); } + + void move_item( adapted_container& /*from*/, iterator itWhat ) + { + assert( m_Set.find( *itWhat ) == m_Set.end()); + copy_item()( m_Set, itWhat ); + } + + size_t size() const + { + return m_Set.size(); + } + }; + + template + struct boost_map_copy_policies { + struct copy_item_policy { + typedef Map map_type; + typedef typename map_type::value_type pair_type; + typedef typename map_type::iterator iterator; + + void operator()( map_type& map, iterator itWhat ) + { + map.insert( *itWhat ); + } + }; + + struct swap_item_policy { + typedef Map map_type; + typedef typename map_type::value_type pair_type; + typedef typename map_type::iterator iterator; + + void operator()( map_type& map, iterator itWhat ) + { + std::pair< iterator, bool > ret = map.insert( pair_type( itWhat->first, typename pair_type::second_type())); + assert( ret.second ) ; // successful insertion + std::swap( ret.first->second, itWhat->second ); + } + }; + + struct move_item_policy { + typedef Map map_type; + typedef typename map_type::value_type pair_type; + typedef typename map_type::iterator iterator; + + void operator()( map_type& map, iterator itWhat ) + { + map.insert( std::move( *itWhat )); + } + }; + }; + + template + class boost_map_adapter: public striped_set::adapted_container + { + public: + typedef Map container_type; + + typedef typename container_type::value_type value_type ; ///< value type stored in the container + typedef typename container_type::key_type key_type; + typedef typename container_type::mapped_type mapped_type; + typedef typename container_type::iterator iterator ; ///< container iterator + typedef typename container_type::const_iterator const_iterator ; ///< container const iterator + + static bool const has_find_with = false; + static bool const has_erase_with = false; + + private: + typedef typename cds::opt::select< + typename cds::opt::value< + typename cds::opt::find_option< + cds::opt::copy_policy< cds::container::striped_set::move_item > + , Options... + >::type + >::copy_policy + , cds::container::striped_set::copy_item, copy_item_policy + , cds::container::striped_set::swap_item, swap_item_policy + , cds::container::striped_set::move_item, move_item_policy + >::type copy_item; + + private: + container_type m_Map; + + public: + template + bool insert( const Q& key, Func f ) + { + std::pair res = m_Map.insert( value_type( key_type( key ), mapped_type())); + if ( res.second ) + f( *res.first ); + return res.second; + } + + template + bool emplace( Q&& key, Args&&... args ) + { + std::pair res = m_Map.emplace( key_type( std::forward( key )), mapped_type( std::forward( args )...)); + return res.second; + } + + template + std::pair update( const Q& key, Func func, bool bAllowInsert ) + { + if ( bAllowInsert ) { + std::pair res = m_Map.insert( value_type( key_type( key ), mapped_type())); + func( res.second, *res.first ); + return std::make_pair( true, res.second ); + } + else { + auto it = m_Map.find( key_type( key )); + if ( it == end()) + return std::make_pair( false, false ); + func( false, *it ); + return std::make_pair( true, false ); + } + } + + template + bool erase( const Q& key, Func f ) + { + iterator it = m_Map.find( key_type( key )); + if ( it == m_Map.end()) + return false; + f( *it ); + m_Map.erase( it ); + return true; + } + + template + bool find( Q& val, Func f ) + { + iterator it = m_Map.find( key_type( val )); + if ( it == m_Map.end()) + return false; + f( *it, val ); + return true; + } + + void clear() + { + m_Map.clear(); + } + + iterator begin() { return m_Map.begin(); } + const_iterator begin() const { return m_Map.begin(); } + iterator end() { return m_Map.end(); } + const_iterator end() const { return m_Map.end(); } + + void move_item( adapted_container& /*from*/, iterator itWhat ) + { + assert( m_Map.find( itWhat->first ) == m_Map.end()); + copy_item()( m_Map, itWhat ); + } + + size_t size() const + { + return m_Map.size(); + } + }; + + } // namespace details + //@endcond + + } // namespace striped_set +}} // namespace cds::container + + +#endif // #ifndef CDSLIB_CONTAINER_STRIPED_SET_ADAPTER_H diff --git a/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/container/striped_set/boost_flat_set.h b/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/container/striped_set/boost_flat_set.h new file mode 100644 index 0000000..3e9efae --- /dev/null +++ b/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/container/striped_set/boost_flat_set.h @@ -0,0 +1,83 @@ +/* + This file is a part of libcds - Concurrent Data Structures library + + (C) Copyright Maxim Khizhinsky (libcds.dev@gmail.com) 2006-2017 + + Source code repo: http://github.com/khizmax/libcds/ + Download: http://sourceforge.net/projects/libcds/files/ + + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. + + * Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ + +#ifndef CDSLIB_CONTAINER_STRIPED_SET_BOOST_FLAT_SET_ADAPTER_H +#define CDSLIB_CONTAINER_STRIPED_SET_BOOST_FLAT_SET_ADAPTER_H + +#include +#if BOOST_VERSION < 104800 +# error "For boost::container::flat_set you must use boost 1.48 or above" +#endif + +#include +#include + +//#if CDS_COMPILER == CDS_COMPILER_MSVC && CDS_COMPILER_VERSION >= 1700 +//# error "boost::container::flat_set is not compatible with MS VC++ 11" +//#endif + +//@cond +namespace cds { namespace container { + namespace striped_set { + + // Copy policy for boost::container::flat_set + template + struct copy_item_policy< boost::container::flat_set< T, Traits, Alloc > > + : public details::boost_set_copy_policies< boost::container::flat_set< T, Traits, Alloc > >::copy_item_policy + {}; + + // Swap policy is not defined for boost::container::flat_set + template + struct swap_item_policy< boost::container::flat_set< T, Traits, Alloc > > + : public details::boost_set_copy_policies< boost::container::flat_set< T, Traits, Alloc > >::swap_item_policy + {}; + + // Move policy for boost::container::flat_set + template + struct move_item_policy< boost::container::flat_set< T, Traits, Alloc > > + : public details::boost_set_copy_policies< boost::container::flat_set< T, Traits, Alloc > >::move_item_policy + {}; + + } // namespace striped_set +}} // namespace cds::container + +namespace cds { namespace intrusive { namespace striped_set { + template + class adapt< boost::container::flat_set, Options... > + { + public: + typedef boost::container::flat_set container_type ; ///< underlying container type + typedef cds::container::striped_set::details::boost_set_adapter< container_type, Options... > type; + }; +}}} + +//@endcond + +#endif // #ifndef CDSLIB_CONTAINER_STRIPED_SET_BOOST_FLAT_SET_ADAPTER_H diff --git a/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/container/striped_set/boost_list.h b/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/container/striped_set/boost_list.h new file mode 100644 index 0000000..e3d39d5 --- /dev/null +++ b/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/container/striped_set/boost_list.h @@ -0,0 +1,291 @@ +/* + This file is a part of libcds - Concurrent Data Structures library + + (C) Copyright Maxim Khizhinsky (libcds.dev@gmail.com) 2006-2017 + + Source code repo: http://github.com/khizmax/libcds/ + Download: http://sourceforge.net/projects/libcds/files/ + + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. + + * Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ + +#ifndef CDSLIB_CONTAINER_STRIPED_SET_BOOST_LIST_ADAPTER_H +#define CDSLIB_CONTAINER_STRIPED_SET_BOOST_LIST_ADAPTER_H + +#include +#if BOOST_VERSION < 104800 +# error "For boost::container::list you must use boost 1.48 or above" +#endif + +#include // std::lower_bound +#include // ref +#include +#include + +//@cond +namespace cds { namespace container { + namespace striped_set { + + // Copy policy for boost::container::list + template + struct copy_item_policy< boost::container::list< T, Alloc > > + { + typedef boost::container::list< T, Alloc > list_type; + typedef typename list_type::iterator iterator; + + void operator()( list_type& list, iterator itInsert, iterator itWhat ) + { + itInsert = list.insert( itInsert, *itWhat ); + } + }; + + // Swap policy for boost::container::list + template + struct swap_item_policy< boost::container::list< T, Alloc > > + { + typedef boost::container::list< T, Alloc > list_type; + typedef typename list_type::iterator iterator; + + void operator()( list_type& list, iterator itInsert, iterator itWhat ) + { + typename list_type::value_type newVal; + itInsert = list.insert( itInsert, newVal ); + std::swap( *itWhat, *itInsert ); + } + }; + + // Move policy for boost::container::list + template + struct move_item_policy< boost::container::list< T, Alloc > > + { + typedef boost::container::list< T, Alloc > list_type; + typedef typename list_type::iterator iterator; + + void operator()( list_type& list, iterator itInsert, iterator itWhat ) + { + list.insert( itInsert, std::move( *itWhat )); + } + }; + } // namespace striped_set +}} // namespace cds::container + +namespace cds { namespace intrusive { namespace striped_set { + + /// boost::container::list adapter for hash set bucket + template + class adapt< boost::container::list, Options... > + { + public: + typedef boost::container::list container_type ; ///< underlying container type + + private: + /// Adapted container type + class adapted_container: public cds::container::striped_set::adapted_sequential_container + { + public: + typedef typename container_type::value_type value_type ; ///< value type stored in the container + typedef typename container_type::iterator iterator ; ///< container iterator + typedef typename container_type::const_iterator const_iterator ; ///< container const iterator + + static bool const has_find_with = true; + static bool const has_erase_with = true; + + private: + //@cond + typedef typename cds::opt::details::make_comparator_from_option_list< value_type, Options... >::type key_comparator; + + typedef typename cds::opt::select< + typename cds::opt::value< + typename cds::opt::find_option< + cds::opt::copy_policy< cds::container::striped_set::move_item > + , Options... + >::type + >::copy_policy + , cds::container::striped_set::copy_item, cds::container::striped_set::copy_item_policy + , cds::container::striped_set::swap_item, cds::container::striped_set::swap_item_policy + , cds::container::striped_set::move_item, cds::container::striped_set::move_item_policy + >::type copy_item; + + struct find_predicate + { + bool operator()( value_type const& i1, value_type const& i2) const + { + return key_comparator()( i1, i2 ) < 0; + } + + template + bool operator()( Q const& i1, value_type const& i2) const + { + return key_comparator()( i1, i2 ) < 0; + } + + template + bool operator()( value_type const& i1, Q const& i2) const + { + return key_comparator()( i1, i2 ) < 0; + } + }; + //@endcond + + private: + //@cond + container_type m_List; + //@endcond + + public: + adapted_container() + {} + + template + bool insert( Q const& val, Func f ) + { + iterator it = std::lower_bound( m_List.begin(), m_List.end(), val, find_predicate()); + if ( it == m_List.end() || key_comparator()( val, *it ) != 0 ) { + value_type newItem( val ); + it = m_List.insert( it, newItem ); + f( *it ); + + return true; + } + + // key already exists + return false; + } + + template + bool emplace( Args&&... args ) + { + value_type val( std::forward(args)... ); + iterator it = std::lower_bound( m_List.begin(), m_List.end(), val, find_predicate()); + if ( it == m_List.end() || key_comparator()( val, *it ) != 0 ) { + m_List.emplace( it, std::move( val )); + return true; + } + return false; + } + + template + std::pair update( Q const& val, Func func, bool bAllowInsert ) + { + iterator it = std::lower_bound( m_List.begin(), m_List.end(), val, find_predicate()); + if ( it == m_List.end() || key_comparator()( val, *it ) != 0 ) { + // insert new + if ( !bAllowInsert ) + return std::make_pair( false, false ); + + value_type newItem( val ); + it = m_List.insert( it, newItem ); + func( true, *it, val ); + return std::make_pair( true, true ); + } + else { + // already exists + func( false, *it, val ); + return std::make_pair( true, false ); + } + } + + template + bool erase( Q const& key, Func f ) + { + iterator it = std::lower_bound( m_List.begin(), m_List.end(), key, find_predicate()); + if ( it == m_List.end() || key_comparator()( key, *it ) != 0 ) + return false; + + // key exists + f( *it ); + m_List.erase( it ); + + return true; + } + + template + bool erase( Q const& key, Less pred, Func f ) + { + iterator it = std::lower_bound( m_List.begin(), m_List.end(), key, pred ); + if ( it == m_List.end() || pred( key, *it ) || pred( *it, key )) + return false; + + // key exists + f( *it ); + m_List.erase( it ); + + return true; + } + + template + bool find( Q& val, Func f ) + { + iterator it = std::lower_bound( m_List.begin(), m_List.end(), val, find_predicate()); + if ( it == m_List.end() || key_comparator()( val, *it ) != 0 ) + return false; + + // key exists + f( *it, val ); + return true; + } + + template + bool find( Q& val, Less pred, Func f ) + { + iterator it = std::lower_bound( m_List.begin(), m_List.end(), val, pred ); + if ( it == m_List.end() || pred( val, *it ) || pred( *it, val )) + return false; + + // key exists + f( *it, val ); + return true; + } + + /// Clears the container + void clear() + { + m_List.clear(); + } + + iterator begin() { return m_List.begin(); } + const_iterator begin() const { return m_List.begin(); } + iterator end() { return m_List.end(); } + const_iterator end() const { return m_List.end(); } + + void move_item( adapted_container& /*from*/, iterator itWhat ) + { + iterator it = std::lower_bound( m_List.begin(), m_List.end(), *itWhat, find_predicate()); + assert( it == m_List.end() || key_comparator()( *itWhat, *it ) != 0 ); + + copy_item()( m_List, it, itWhat ); + } + + size_t size() const + { + return m_List.size(); + } + }; + + public: + typedef adapted_container type ; ///< Result of \p adapt metafunction + + }; +}}} // namespace cds::intrsive::striped_set +//@endcond + +#endif // #ifndef CDSLIB_CONTAINER_STRIPED_SET_BOOST_LIST_ADAPTER_H diff --git a/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/container/striped_set/boost_set.h b/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/container/striped_set/boost_set.h new file mode 100644 index 0000000..f73943a --- /dev/null +++ b/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/container/striped_set/boost_set.h @@ -0,0 +1,82 @@ +/* + This file is a part of libcds - Concurrent Data Structures library + + (C) Copyright Maxim Khizhinsky (libcds.dev@gmail.com) 2006-2017 + + Source code repo: http://github.com/khizmax/libcds/ + Download: http://sourceforge.net/projects/libcds/files/ + + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. + + * Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ + +#ifndef CDSLIB_CONTAINER_STRIPED_SET_BOOST_SET_ADAPTER_H +#define CDSLIB_CONTAINER_STRIPED_SET_BOOST_SET_ADAPTER_H + +#include +#if BOOST_VERSION < 104800 +# error "For boost::container::set you must use boost 1.48 or above" +#endif + +#include +#include + +//@cond +namespace cds { namespace container { + namespace striped_set { + + // Copy policy for boost::container::set + template + struct copy_item_policy< boost::container::set< T, Traits, Alloc > > + : public details::boost_set_copy_policies< boost::container::set< T, Traits, Alloc > >::copy_item_policy + {}; + // Copy policy for boost::container::set + template + struct swap_item_policy< boost::container::set< T, Traits, Alloc > > + : public details::boost_set_copy_policies< boost::container::set< T, Traits, Alloc > >::swap_item_policy + {}; + + // Swap policy is not defined for boost::container::set + + // Move policy for boost::container::set + template + struct move_item_policy< boost::container::set< T, Traits, Alloc > > + : public details::boost_set_copy_policies< boost::container::set< T, Traits, Alloc > >::move_item_policy + {}; + + } // namespace striped_set +}} // namespace cds::container + +namespace cds { namespace intrusive { namespace striped_set { + /// boost::container::flat_set adapter for hash set bucket + template + class adapt< boost::container::set, Options... > + { + public: + typedef boost::container::set container_type ; ///< underlying container type + typedef cds::container::striped_set::details::boost_set_adapter< container_type, Options... > type; + }; +}}} // namespace cds::intrusive::striped_set + + +//@endcond + +#endif // #ifndef CDSLIB_CONTAINER_STRIPED_SET_BOOST_SET_ADAPTER_H diff --git a/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/container/striped_set/boost_slist.h b/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/container/striped_set/boost_slist.h new file mode 100644 index 0000000..7ef8efd --- /dev/null +++ b/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/container/striped_set/boost_slist.h @@ -0,0 +1,302 @@ +/* + This file is a part of libcds - Concurrent Data Structures library + + (C) Copyright Maxim Khizhinsky (libcds.dev@gmail.com) 2006-2017 + + Source code repo: http://github.com/khizmax/libcds/ + Download: http://sourceforge.net/projects/libcds/files/ + + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. + + * Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ + +#ifndef CDSLIB_CONTAINER_STRIPED_SET_BOOST_SLIST_ADAPTER_H +#define CDSLIB_CONTAINER_STRIPED_SET_BOOST_SLIST_ADAPTER_H + +#include // ref +#include +#include + +//@cond +namespace cds { namespace container { + namespace striped_set { + + // Copy policy for boost::container::slist + template + struct copy_item_policy< boost::container::slist< T, Alloc > > + { + typedef boost::container::slist< T, Alloc > list_type; + typedef typename list_type::iterator iterator; + + void operator()( list_type& list, iterator itInsert, iterator itWhat ) + { + list.insert_after( itInsert, *itWhat ); + } + }; + + // Swap policy for boost::container::slist + template + struct swap_item_policy< boost::container::slist< T, Alloc > > + { + typedef boost::container::slist< T, Alloc > list_type; + typedef typename list_type::iterator iterator; + + void operator()( list_type& list, iterator itInsert, iterator itWhat ) + { + T newVal; + itInsert = list.insert_after( itInsert, newVal ); + std::swap( *itInsert, *itWhat ); + } + }; + + // Move policy for boost::container::slist + template + struct move_item_policy< boost::container::slist< T, Alloc > > + { + typedef boost::container::slist< T, Alloc > list_type; + typedef typename list_type::iterator iterator; + + void operator()( list_type& list, iterator itInsert, iterator itWhat ) + { + list.insert_after( itInsert, std::move( *itWhat )); + } + }; + + } // namespace striped_set +}} // namespace cds::container + +namespace cds { namespace intrusive { namespace striped_set { + + /// boost::container::slist adapter for hash set bucket + template + class adapt< boost::container::slist, Options... > + { + public: + typedef boost::container::slist container_type ; ///< underlying container type + + private: + /// Adapted container type + class adapted_container: public cds::container::striped_set::adapted_sequential_container + { + public: + typedef typename container_type::value_type value_type ; ///< value type stored in the container + typedef typename container_type::iterator iterator ; ///< container iterator + typedef typename container_type::const_iterator const_iterator ; ///< container const iterator + + static bool const has_find_with = true; + static bool const has_erase_with = true; + + private: + //@cond + typedef typename cds::opt::details::make_comparator_from_option_list< value_type, Options... >::type key_comparator; + + typedef typename cds::opt::select< + typename cds::opt::value< + typename cds::opt::find_option< + cds::opt::copy_policy< cds::container::striped_set::move_item > + , Options... + >::type + >::copy_policy + , cds::container::striped_set::copy_item, cds::container::striped_set::copy_item_policy + , cds::container::striped_set::swap_item, cds::container::striped_set::swap_item_policy + , cds::container::striped_set::move_item, cds::container::striped_set::move_item_policy + >::type copy_item; + + template + std::pair< iterator, bool > find_prev_item( Q const& key ) + { + iterator itPrev = m_List.before_begin(); + iterator itEnd = m_List.end(); + for ( iterator it = m_List.begin(); it != itEnd; ++it ) { + int nCmp = key_comparator()( key, *it ); + if ( nCmp < 0 ) + itPrev = it; + else if ( nCmp > 0 ) + break; + else + return std::make_pair( itPrev, true ); + } + return std::make_pair( itPrev, false ); + } + + template + std::pair< iterator, bool > find_prev_item( Q const& key, Less pred ) + { + iterator itPrev = m_List.before_begin(); + iterator itEnd = m_List.end(); + for ( iterator it = m_List.begin(); it != itEnd; ++it ) { + if ( pred( key, *it )) + itPrev = it; + else if ( pred( *it, key )) + break; + else + return std::make_pair( itPrev, true ); + } + return std::make_pair( itPrev, false ); + } + + //@endcond + + private: + //@cond + container_type m_List; + //@endcond + + public: + adapted_container() + {} + + template + bool insert( const Q& val, Func f ) + { + std::pair< iterator, bool > pos = find_prev_item( val ); + if ( !pos.second ) { + value_type newItem( val ); + pos.first = m_List.insert_after( pos.first, newItem ); + f( *pos.first ); + return true; + } + + // key already exists + return false; + } + + template + bool emplace( Args&&... args ) + { + value_type val( std::forward(args)... ); + std::pair< iterator, bool > pos = find_prev_item( val ); + if ( !pos.second ) { + m_List.emplace_after( pos.first, std::move( val )); + return true; + } + return false; + } + + template + std::pair update( const Q& val, Func func, bool bAllowInsert ) + { + std::pair< iterator, bool > pos = find_prev_item( val ); + if ( !pos.second ) { + // insert new + if ( !bAllowInsert ) + return std::make_pair( false, false ); + + value_type newItem( val ); + pos.first = m_List.insert_after( pos.first, newItem ); + func( true, *pos.first, val ); + return std::make_pair( true, true ); + } + else { + // already exists + func( false, *(++pos.first), val ); + return std::make_pair( true, false ); + } + } + + template + bool erase( Q const& key, Func f ) + { + std::pair< iterator, bool > pos = find_prev_item( key ); + if ( !pos.second ) + return false; + + // key exists + iterator it = pos.first; + f( *(++it)); + m_List.erase_after( pos.first ); + + return true; + } + + template + bool erase( Q const& key, Less pred, Func f ) + { + std::pair< iterator, bool > pos = find_prev_item( key, pred ); + if ( !pos.second ) + return false; + + // key exists + iterator it = pos.first; + f( *(++it)); + m_List.erase_after( pos.first ); + + return true; + } + + template + bool find( Q& val, Func f ) + { + std::pair< iterator, bool > pos = find_prev_item( val ); + if ( !pos.second ) + return false; + + // key exists + f( *(++pos.first), val ); + return true; + } + + template + bool find( Q& val, Less pred, Func f ) + { + std::pair< iterator, bool > pos = find_prev_item( val, pred ); + if ( !pos.second ) + return false; + + // key exists + f( *(++pos.first), val ); + return true; + } + + void clear() + { + m_List.clear(); + } + + iterator begin() { return m_List.begin(); } + const_iterator begin() const { return m_List.begin(); } + iterator end() { return m_List.end(); } + const_iterator end() const { return m_List.end(); } + + void move_item( adapted_container& /*from*/, iterator itWhat ) + { + std::pair< iterator, bool > pos = find_prev_item( *itWhat ); + assert( !pos.second ); + + copy_item()( m_List, pos.first, itWhat ); + } + + size_t size() const + { + return m_List.size(); + } + }; + + public: + typedef adapted_container type ; ///< Result of \p adapt metafunction + + }; +}}} // namespace cds::intrusive::striped_set + + +//@endcond + +#endif // #ifndef CDSLIB_CONTAINER_STRIPED_SET_BOOST_SLIST_ADAPTER_H diff --git a/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/container/striped_set/boost_stable_vector.h b/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/container/striped_set/boost_stable_vector.h new file mode 100644 index 0000000..53a8db6 --- /dev/null +++ b/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/container/striped_set/boost_stable_vector.h @@ -0,0 +1,284 @@ +/* + This file is a part of libcds - Concurrent Data Structures library + + (C) Copyright Maxim Khizhinsky (libcds.dev@gmail.com) 2006-2017 + + Source code repo: http://github.com/khizmax/libcds/ + Download: http://sourceforge.net/projects/libcds/files/ + + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. + + * Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ + +#ifndef CDSLIB_CONTAINER_STRIPED_SET_BOOST_STABLE_VECTOR_ADAPTER_H +#define CDSLIB_CONTAINER_STRIPED_SET_BOOST_STABLE_VECTOR_ADAPTER_H + +#include +#if BOOST_VERSION < 104800 +# error "For boost::container::stable_vector you must use boost 1.48 or above" +#endif + +#include // ref +#include // std::lower_bound +#include // std::pair +#include +#include + +//@cond +namespace cds { namespace container { + namespace striped_set { + + // Copy policy for boost::container::stable_vector + template + struct copy_item_policy< boost::container::stable_vector< T, Alloc > > + { + typedef boost::container::stable_vector< T, Alloc > vector_type; + typedef typename vector_type::iterator iterator; + + void operator()( vector_type& vec, iterator itInsert, iterator itWhat ) + { + vec.insert( itInsert, *itWhat ); + } + }; + + // Swap policy for boost::container::stable_vector + template + struct swap_item_policy< boost::container::stable_vector< T, Alloc > > + { + typedef boost::container::stable_vector< T, Alloc > vector_type; + typedef typename vector_type::iterator iterator; + + void operator()( vector_type& vec, iterator itInsert, iterator itWhat ) + { + typename vector_type::value_type newVal; + itInsert = vec.insert( itInsert, newVal ); + std::swap( *itInsert, *itWhat ); + } + }; + + // Move policy for boost::container::stable_vector + template + struct move_item_policy< boost::container::stable_vector< T, Alloc > > + { + typedef boost::container::stable_vector< T, Alloc > vector_type; + typedef typename vector_type::iterator iterator; + + void operator()( vector_type& vec, iterator itInsert, iterator itWhat ) + { + vec.insert( itInsert, std::move( *itWhat )); + } + }; + + } // namespace striped_set +}} // namespace cds::container + +namespace cds { namespace intrusive { namespace striped_set { + /// boost::container::stable_vector adapter for hash set bucket + template + class adapt< boost::container::stable_vector, Options... > + { + public: + typedef boost::container::stable_vector container_type ; ///< underlying container type + + private: + /// Adapted container type + class adapted_container: public cds::container::striped_set::adapted_sequential_container + { + public: + typedef typename container_type::value_type value_type ; ///< value type stored in the container + typedef typename container_type::iterator iterator ; ///< container iterator + typedef typename container_type::const_iterator const_iterator ; ///< container const iterator + + static bool const has_find_with = true; + static bool const has_erase_with = true; + + private: + //@cond + typedef typename cds::opt::details::make_comparator_from_option_list< value_type, Options... >::type key_comparator; + + typedef typename cds::opt::select< + typename cds::opt::value< + typename cds::opt::find_option< + cds::opt::copy_policy< cds::container::striped_set::move_item > + , Options... + >::type + >::copy_policy + , cds::container::striped_set::copy_item, cds::container::striped_set::copy_item_policy + , cds::container::striped_set::swap_item, cds::container::striped_set::swap_item_policy + , cds::container::striped_set::move_item, cds::container::striped_set::move_item_policy + >::type copy_item; + + struct find_predicate + { + bool operator()( value_type const& i1, value_type const& i2) const + { + return key_comparator()( i1, i2 ) < 0; + } + + template + bool operator()( Q const& i1, value_type const& i2) const + { + return key_comparator()( i1, i2 ) < 0; + } + + template + bool operator()( value_type const& i1, Q const& i2) const + { + return key_comparator()( i1, i2 ) < 0; + } + }; + //@endcond + + private: + //@cond + container_type m_Vector; + //@endcond + + public: + template + bool insert( const Q& val, Func f ) + { + iterator it = std::lower_bound( m_Vector.begin(), m_Vector.end(), val, find_predicate()); + if ( it == m_Vector.end() || key_comparator()( val, *it ) != 0 ) { + value_type newItem( val ); + it = m_Vector.insert( it, newItem ); + f( *it ); + return true; + } + return false; + } + + template + bool emplace( Args&&... args ) + { + value_type val( std::forward(args)... ); + iterator it = std::lower_bound( m_Vector.begin(), m_Vector.end(), val, find_predicate()); + if ( it == m_Vector.end() || key_comparator()( val, *it ) != 0 ) { + it = m_Vector.emplace( it, std::move( val )); + return true; + } + return false; + } + + template + std::pair update( const Q& val, Func func, bool bAllowInsert ) + { + iterator it = std::lower_bound( m_Vector.begin(), m_Vector.end(), val, find_predicate()); + if ( it == m_Vector.end() || key_comparator()( val, *it ) != 0 ) { + // insert new + if ( !bAllowInsert ) + return std::make_pair( false, false ); + + value_type newItem( val ); + it = m_Vector.insert( it, newItem ); + func( true, *it, val ); + return std::make_pair( true, true ); + } + else { + // already exists + func( false, *it, val ); + return std::make_pair( true, false ); + } + } + + template + bool erase( const Q& key, Func f ) + { + iterator it = std::lower_bound( m_Vector.begin(), m_Vector.end(), key, find_predicate()); + if ( it == m_Vector.end() || key_comparator()( key, *it ) != 0 ) + return false; + + // key exists + f( *it ); + m_Vector.erase( it ); + return true; + } + + template + bool erase( const Q& key, Less pred, Func f ) + { + iterator it = std::lower_bound( m_Vector.begin(), m_Vector.end(), key, pred ); + if ( it == m_Vector.end() || pred( key, *it ) || pred( *it, key )) + return false; + + // key exists + f( *it ); + m_Vector.erase( it ); + return true; + } + + template + bool find( Q& val, Func f ) + { + iterator it = std::lower_bound( m_Vector.begin(), m_Vector.end(), val, find_predicate()); + if ( it == m_Vector.end() || key_comparator()( val, *it ) != 0 ) + return false; + + // key exists + f( *it, val ); + return true; + } + + template + bool find( Q& val, Less pred, Func f ) + { + iterator it = std::lower_bound( m_Vector.begin(), m_Vector.end(), val, pred ); + if ( it == m_Vector.end() || pred( val, *it ) || pred( *it, val )) + return false; + + // key exists + f( *it, val ); + return true; + } + + /// Clears the container + void clear() + { + m_Vector.clear(); + } + + iterator begin() { return m_Vector.begin(); } + const_iterator begin() const { return m_Vector.begin(); } + iterator end() { return m_Vector.end(); } + const_iterator end() const { return m_Vector.end(); } + + void move_item( adapted_container& /*from*/, iterator itWhat ) + { + iterator it = std::lower_bound( m_Vector.begin(), m_Vector.end(), *itWhat, find_predicate()); + assert( it == m_Vector.end() || key_comparator()( *itWhat, *it ) != 0 ); + + copy_item()( m_Vector, it, itWhat ); + } + + size_t size() const + { + return m_Vector.size(); + } + }; + + public: + typedef adapted_container type ; ///< Result of \p adapt metafunction + + }; +}}} // namespace cds::intrusive::striped_set +//@endcond + +#endif // #ifndef CDSLIB_CONTAINER_STRIPED_SET_BOOST_STABLE_VECTOR_ADAPTER_H diff --git a/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/container/striped_set/boost_unordered_set.h b/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/container/striped_set/boost_unordered_set.h new file mode 100644 index 0000000..8d3e70f --- /dev/null +++ b/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/container/striped_set/boost_unordered_set.h @@ -0,0 +1,73 @@ +/* + This file is a part of libcds - Concurrent Data Structures library + + (C) Copyright Maxim Khizhinsky (libcds.dev@gmail.com) 2006-2017 + + Source code repo: http://github.com/khizmax/libcds/ + Download: http://sourceforge.net/projects/libcds/files/ + + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. + + * Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ + +#ifndef CDSLIB_CONTAINER_STRIPED_SET_BOOST_UNORDERED_SET_ADAPTER_H +#define CDSLIB_CONTAINER_STRIPED_SET_BOOST_UNORDERED_SET_ADAPTER_H + +#include +#include + +//@cond +namespace cds { namespace container { + namespace striped_set { + + // Copy policy for boost::unordered_set + template + struct copy_item_policy< boost::unordered_set< T, Traits, Alloc > > + : public details::boost_set_copy_policies< boost::unordered_set< T, Traits, Alloc > >::copy_item_policy + {}; + + template + struct swap_item_policy< boost::unordered_set< T, Traits, Alloc > > + : public details::boost_set_copy_policies< boost::unordered_set< T, Traits, Alloc > >::swap_item_policy + {}; + + // Move policy for boost::unordered_set + template + struct move_item_policy< boost::unordered_set< T, Traits, Alloc > > + : public details::boost_set_copy_policies< boost::unordered_set< T, Traits, Alloc > >::move_item_policy + {}; + } // namespace striped_set +}} // namespace cds::container + +namespace cds { namespace intrusive { namespace striped_set { + /// boost::unordered_set adapter for hash set bucket + template + class adapt< boost::unordered_set, Options... > + { + public: + typedef boost::unordered_set container_type ; ///< underlying container type + typedef cds::container::striped_set::details::boost_set_adapter< container_type, Options... > type; + }; +}}} // namespace cds::intrusive::striped_set + +//@endcond + +#endif // #ifndef CDSLIB_CONTAINER_STRIPED_SET_BOOST_UNORDERED_SET_ADAPTER_H diff --git a/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/container/striped_set/boost_vector.h b/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/container/striped_set/boost_vector.h new file mode 100644 index 0000000..94d0eed --- /dev/null +++ b/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/container/striped_set/boost_vector.h @@ -0,0 +1,286 @@ +/* + This file is a part of libcds - Concurrent Data Structures library + + (C) Copyright Maxim Khizhinsky (libcds.dev@gmail.com) 2006-2017 + + Source code repo: http://github.com/khizmax/libcds/ + Download: http://sourceforge.net/projects/libcds/files/ + + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. + + * Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ + +#ifndef CDSLIB_CONTAINER_STRIPED_SET_BOOST_VECTOR_ADAPTER_H +#define CDSLIB_CONTAINER_STRIPED_SET_BOOST_VECTOR_ADAPTER_H + +#include +#if BOOST_VERSION < 104800 +# error "For boost::container::vector you must use boost 1.48 or above" +#endif + +#include // ref +#include // std::lower_bound +#include // std::pair +#include // lower_bound +#include + +//@cond +namespace cds { namespace container { + namespace striped_set { + + // Copy policy for boost::container::vector + template + struct copy_item_policy< boost::container::vector< T, Alloc > > + { + typedef boost::container::vector< T, Alloc > vector_type; + typedef typename vector_type::iterator iterator; + + void operator()( vector_type& vec, iterator itInsert, iterator itWhat ) + { + vec.insert( itInsert, *itWhat ); + } + }; + + // Swap policy for boost::container::vector + template + struct swap_item_policy< boost::container::vector< T, Alloc > > + { + typedef boost::container::vector< T, Alloc > vector_type; + typedef typename vector_type::iterator iterator; + + void operator()( vector_type& vec, iterator itInsert, iterator itWhat ) + { + typename vector_type::value_type newVal; + itInsert = vec.insert( itInsert, newVal ); + std::swap( *itInsert, *itWhat ); + } + }; + + // Move policy for boost::container::vector + template + struct move_item_policy< boost::container::vector< T, Alloc > > + { + typedef boost::container::vector< T, Alloc > vector_type; + typedef typename vector_type::iterator iterator; + + void operator()( vector_type& vec, iterator itInsert, iterator itWhat ) + { + vec.insert( itInsert, std::move( *itWhat )); + } + }; + } // namespace striped_set +}} // namespace cds::container + +namespace cds { namespace intrusive { namespace striped_set { + + /// boost::container::vector adapter for hash set bucket + template + class adapt< boost::container::vector, Options... > + { + public: + typedef boost::container::vector container_type ; ///< underlying container type + + private: + /// Adapted container type + class adapted_container: public cds::container::striped_set::adapted_sequential_container + { + public: + typedef typename container_type::value_type value_type ; ///< value type stored in the container + typedef typename container_type::iterator iterator ; ///< container iterator + typedef typename container_type::const_iterator const_iterator ; ///< container const iterator + + static bool const has_find_with = true; + static bool const has_erase_with = true; + + private: + //@cond + typedef typename cds::opt::details::make_comparator_from_option_list< value_type, Options... >::type key_comparator; + + typedef typename cds::opt::select< + typename cds::opt::value< + typename cds::opt::find_option< + cds::opt::copy_policy< cds::container::striped_set::move_item > + , Options... + >::type + >::copy_policy + , cds::container::striped_set::copy_item, cds::container::striped_set::copy_item_policy + , cds::container::striped_set::swap_item, cds::container::striped_set::swap_item_policy + , cds::container::striped_set::move_item, cds::container::striped_set::move_item_policy + >::type copy_item; + + struct find_predicate + { + bool operator()( value_type const& i1, value_type const& i2) const + { + return key_comparator()( i1, i2 ) < 0; + } + + template + bool operator()( Q const& i1, value_type const& i2) const + { + return key_comparator()( i1, i2 ) < 0; + } + + template + bool operator()( value_type const& i1, Q const& i2) const + { + return key_comparator()( i1, i2 ) < 0; + } + }; + //@endcond + + private: + //@cond + container_type m_Vector; + //@endcond + + public: + template + bool insert( const Q& val, Func f ) + { + iterator it = std::lower_bound( m_Vector.begin(), m_Vector.end(), val, find_predicate()); + if ( it == m_Vector.end() || key_comparator()( val, *it ) != 0 ) { + value_type newItem( val ); + it = m_Vector.insert( it, newItem ); + f( *it ); + return true; + } + return false; + } + + template + bool emplace( Args&&... args ) + { + value_type val( std::forward(args)... ); + iterator it = std::lower_bound( m_Vector.begin(), m_Vector.end(), val, find_predicate()); + if ( it == m_Vector.end() || key_comparator()( val, *it ) != 0 ) { + it = m_Vector.emplace( it, std::move( val )); + return true; + } + return false; + } + + template + std::pair update( const Q& val, Func func, bool bAllowInsert ) + { + iterator it = std::lower_bound( m_Vector.begin(), m_Vector.end(), val, find_predicate()); + if ( it == m_Vector.end() || key_comparator()( val, *it ) != 0 ) { + // insert new + if ( !bAllowInsert ) + return std::make_pair( false, false ); + + value_type newItem( val ); + it = m_Vector.insert( it, newItem ); + func( true, *it, val ); + return std::make_pair( true, true ); + } + else { + // already exists + func( false, *it, val ); + return std::make_pair( true, false ); + } + } + + template + bool erase( const Q& key, Func f ) + { + iterator it = std::lower_bound( m_Vector.begin(), m_Vector.end(), key, find_predicate()); + if ( it == m_Vector.end() || key_comparator()( key, *it ) != 0 ) + return false; + + // key exists + f( *it ); + m_Vector.erase( it ); + return true; + } + + template + bool erase( Q const& key, Less pred, Func f ) + { + iterator it = std::lower_bound( m_Vector.begin(), m_Vector.end(), key, pred ); + if ( it == m_Vector.end() || pred( key, *it ) || pred( *it, key )) + return false; + + // key exists + f( *it ); + m_Vector.erase( it ); + return true; + } + + template + bool find( Q& val, Func f ) + { + iterator it = std::lower_bound( m_Vector.begin(), m_Vector.end(), val, find_predicate()); + if ( it == m_Vector.end() || key_comparator()( val, *it ) != 0 ) + return false; + + // key exists + f( *it, val ); + return true; + } + + template + bool find( Q& val, Less pred, Func f ) + { + iterator it = std::lower_bound( m_Vector.begin(), m_Vector.end(), val, pred ); + if ( it == m_Vector.end() || pred( val, *it ) || pred( *it, val )) + return false; + + // key exists + f( *it, val ); + return true; + } + + /// Clears the container + void clear() + { + m_Vector.clear(); + } + + iterator begin() { return m_Vector.begin(); } + const_iterator begin() const { return m_Vector.begin(); } + iterator end() { return m_Vector.end(); } + const_iterator end() const { return m_Vector.end(); } + + void move_item( adapted_container& /*from*/, iterator itWhat ) + { + iterator it = std::lower_bound( m_Vector.begin(), m_Vector.end(), *itWhat, find_predicate()); + assert( it == m_Vector.end() || key_comparator()( *itWhat, *it ) != 0 ); + + copy_item()( m_Vector, it, itWhat ); + } + + size_t size() const + { + return m_Vector.size(); + } + }; + + public: + typedef adapted_container type ; ///< Result of \p adapt metafunction + + }; +}}} // namespace cds::intrusive::striped_set + + +//@endcond + +#endif // #ifndef CDSLIB_CONTAINER_STRIPED_SET_BOOST_VECTOR_ADAPTER_H diff --git a/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/container/striped_set/std_hash_set.h b/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/container/striped_set/std_hash_set.h new file mode 100644 index 0000000..ae7ff73 --- /dev/null +++ b/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/container/striped_set/std_hash_set.h @@ -0,0 +1,201 @@ +/* + This file is a part of libcds - Concurrent Data Structures library + + (C) Copyright Maxim Khizhinsky (libcds.dev@gmail.com) 2006-2017 + + Source code repo: http://github.com/khizmax/libcds/ + Download: http://sourceforge.net/projects/libcds/files/ + + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. + + * Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ + +#ifndef CDSLIB_CONTAINER_STRIPED_SET_STD_HASH_SET_ADAPTER_H +#define CDSLIB_CONTAINER_STRIPED_SET_STD_HASH_SET_ADAPTER_H + +#include +#include + +//@cond +namespace cds { namespace container { + namespace striped_set { + + // Copy policy for std::unordered_set + template + struct copy_item_policy< std::unordered_set< T, Hash, Pred, Alloc > > + { + typedef std::unordered_set< T, Hash, Pred, Alloc > set_type; + typedef typename set_type::iterator iterator; + + void operator()( set_type& set, iterator itWhat ) + { + set.insert( *itWhat ); + } + }; + + template + struct swap_item_policy< std::unordered_set< T, Hash, Pred, Alloc > >: public copy_item_policy< std::unordered_set< T, Hash, Pred, Alloc > > + {}; + + // Move policy for std::unordered_set + template + struct move_item_policy< std::unordered_set< T, Hash, Pred, Alloc > > + { + typedef std::unordered_set< T, Hash, Pred, Alloc > set_type; + typedef typename set_type::iterator iterator; + + void operator()( set_type& set, iterator itWhat ) + { + set.insert( std::move( *itWhat )); + } + }; + + } // namespace striped_set +}} // namespace cds::container + +namespace cds { namespace intrusive { namespace striped_set { + /// std::unordered_set adapter for hash set bucket + template + class adapt< std::unordered_set, Options... > + { + public: + typedef std::unordered_set container_type ; ///< underlying container type + + private: + /// Adapted container type + class adapted_container: public cds::container::striped_set::adapted_container + { + public: + typedef typename container_type::value_type value_type ; ///< value type stored in the container + typedef typename container_type::iterator iterator ; ///< container iterator + typedef typename container_type::const_iterator const_iterator ; ///< container const iterator + + static bool const has_find_with = false; + static bool const has_erase_with = false; + + private: + //@cond + typedef typename cds::opt::select< + typename cds::opt::value< + typename cds::opt::find_option< + cds::opt::copy_policy< cds::container::striped_set::move_item > + , Options... + >::type + >::copy_policy + , cds::container::striped_set::copy_item, cds::container::striped_set::copy_item_policy + , cds::container::striped_set::swap_item, cds::container::striped_set::swap_item_policy // not defined + , cds::container::striped_set::move_item, cds::container::striped_set::move_item_policy + >::type copy_item; + //@endcond + + private: + //@cond + container_type m_Set; + //@endcond + + public: + template + bool insert( const Q& val, Func f ) + { + std::pair res = m_Set.insert( value_type(val)); + if ( res.second ) + f( const_cast(*res.first)); + return res.second; + } + + template + bool emplace( Args&&... args ) + { + std::pair res = m_Set.emplace( std::forward(args)... ); + return res.second; + } + + template + std::pair update( const Q& val, Func func, bool bAllowInsert ) + { + if ( bAllowInsert ) { + std::pair res = m_Set.insert( value_type(val)); + func( res.second, const_cast(*res.first), val ); + return std::make_pair( true, res.second ); + } + else { + auto it = m_Set.find( value_type(val)); + if ( it == m_Set.end()) + return std::make_pair( false, false ); + + func( false, const_cast(*it), val ); + return std::make_pair( true, false ); + } + } + + template + bool erase( const Q& key, Func f ) + { + const_iterator it = m_Set.find( value_type(key)); + if ( it == m_Set.end()) + return false; + f( const_cast(*it)); + m_Set.erase( it ); + return true; + } + + template + bool find( Q& val, Func f ) + { + iterator it = m_Set.find( value_type(val)); + if ( it == m_Set.end()) + return false; + f( const_cast(*it), val ); + return true; + } + + /// Clears the container + void clear() + { + m_Set.clear(); + } + + iterator begin() { return m_Set.begin(); } + const_iterator begin() const { return m_Set.begin(); } + iterator end() { return m_Set.end(); } + const_iterator end() const { return m_Set.end(); } + + void move_item( adapted_container& /*from*/, iterator itWhat ) + { + assert( m_Set.find( *itWhat ) == m_Set.end()); + copy_item()( m_Set, itWhat ); + } + + size_t size() const + { + return m_Set.size(); + } + }; + + public: + typedef adapted_container type ; ///< Result of \p adapt metafunction + }; +}}} // namespace cds::intrusive::striped_set + + +//@endcond + +#endif // #ifndef CDSLIB_CONTAINER_STRIPED_SET_STD_HASH_SET_ADAPTER_H diff --git a/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/container/striped_set/std_list.h b/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/container/striped_set/std_list.h new file mode 100644 index 0000000..2a00f22 --- /dev/null +++ b/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/container/striped_set/std_list.h @@ -0,0 +1,327 @@ +/* + This file is a part of libcds - Concurrent Data Structures library + + (C) Copyright Maxim Khizhinsky (libcds.dev@gmail.com) 2006-2017 + + Source code repo: http://github.com/khizmax/libcds/ + Download: http://sourceforge.net/projects/libcds/files/ + + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. + + * Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ + +#ifndef CDSLIB_CONTAINER_STRIPED_SET_STD_LIST_ADAPTER_H +#define CDSLIB_CONTAINER_STRIPED_SET_STD_LIST_ADAPTER_H + +#include // ref +#include +#include // std::lower_bound +#include + +#undef CDS_STD_LIST_SIZE_CXX11_CONFORM +#if !( defined(__GLIBCXX__ ) && (!defined(_GLIBCXX_USE_CXX11_ABI) || _GLIBCXX_USE_CXX11_ABI == 0 )) +# define CDS_STD_LIST_SIZE_CXX11_CONFORM +#endif + +//@cond +namespace cds { namespace container { + namespace striped_set { + + // Copy policy for std::list + template + struct copy_item_policy< std::list< T, Alloc > > + { + typedef std::list< T, Alloc > list_type; + typedef typename list_type::iterator iterator; + + void operator()( list_type& list, iterator itInsert, iterator itWhat ) + { + itInsert = list.insert( itInsert, *itWhat ); + } + }; + + // Swap policy for std::list + template + struct swap_item_policy< std::list< T, Alloc > > + { + typedef std::list< T, Alloc > list_type; + typedef typename list_type::iterator iterator; + + void operator()( list_type& list, iterator itInsert, iterator itWhat ) + { + typename list_type::value_type newVal; + itInsert = list.insert( itInsert, newVal ); + std::swap( *itWhat, *itInsert ); + } + }; + + // Move policy for std::list + template + struct move_item_policy< std::list< T, Alloc > > + { + typedef std::list< T, Alloc > list_type; + typedef typename list_type::iterator iterator; + + void operator()( list_type& list, iterator itInsert, iterator itWhat ) + { + list.insert( itInsert, std::move( *itWhat )); + } + }; + } // namespace striped_set +}} // namespace cds::container + +namespace cds { namespace intrusive { namespace striped_set { + + /// std::list adapter for hash set bucket + template + class adapt< std::list, Options... > + { + public: + typedef std::list container_type ; ///< underlying container type + + private: + /// Adapted container type + class adapted_container: public cds::container::striped_set::adapted_sequential_container + { + public: + typedef typename container_type::value_type value_type ; ///< value type stored in the container + typedef typename container_type::iterator iterator ; ///< container iterator + typedef typename container_type::const_iterator const_iterator ; ///< container const iterator + + static bool const has_find_with = true; + static bool const has_erase_with = true; + + private: + //@cond + typedef typename cds::opt::details::make_comparator_from_option_list< value_type, Options... >::type key_comparator; + + + typedef typename cds::opt::select< + typename cds::opt::value< + typename cds::opt::find_option< + cds::opt::copy_policy< cds::container::striped_set::move_item > + , Options... + >::type + >::copy_policy + , cds::container::striped_set::copy_item, cds::container::striped_set::copy_item_policy + , cds::container::striped_set::swap_item, cds::container::striped_set::swap_item_policy + , cds::container::striped_set::move_item, cds::container::striped_set::move_item_policy + >::type copy_item; + + struct find_predicate + { + bool operator()( value_type const& i1, value_type const& i2) const + { + return key_comparator()( i1, i2 ) < 0; + } + + template + bool operator()( Q const& i1, value_type const& i2) const + { + return key_comparator()( i1, i2 ) < 0; + } + + template + bool operator()( value_type const& i1, Q const& i2) const + { + return key_comparator()( i1, i2 ) < 0; + } + }; + //@endcond + + private: + //@cond + container_type m_List; +# if !defined(CDS_STD_LIST_SIZE_CXX11_CONFORM) + // GCC C++ lib bug: + // In GCC, the complexity of std::list::size() is O(N) + // (see http://gcc.gnu.org/bugzilla/show_bug.cgi?id=49561) + // Fixed in GCC 5 + size_t m_nSize ; // list size +# endif + //@endcond + + public: + adapted_container() +# if !defined(CDS_STD_LIST_SIZE_CXX11_CONFORM) + : m_nSize(0) +# endif + {} + + template + bool insert( const Q& val, Func f ) + { + iterator it = std::lower_bound( m_List.begin(), m_List.end(), val, find_predicate()); + if ( it == m_List.end() || key_comparator()( val, *it ) != 0 ) { + value_type newItem( val ); + it = m_List.insert( it, newItem ); + f( *it ); + +# if !defined(CDS_STD_LIST_SIZE_CXX11_CONFORM) + ++m_nSize; +# endif + return true; + } + + // key already exists + return false; + } + + template + bool emplace( Args&&... args ) + { + value_type val(std::forward(args)...); + iterator it = std::lower_bound( m_List.begin(), m_List.end(), val, find_predicate()); + if ( it == m_List.end() || key_comparator()( val, *it ) != 0 ) { + it = m_List.emplace( it, std::move( val )); +# if !defined(CDS_STD_LIST_SIZE_CXX11_CONFORM) + ++m_nSize; +# endif + return true; + } + return false; + } + + template + std::pair update( const Q& val, Func func, bool bAllowInsert ) + { + iterator it = std::lower_bound( m_List.begin(), m_List.end(), val, find_predicate()); + if ( it == m_List.end() || key_comparator()( val, *it ) != 0 ) { + // insert new + if ( !bAllowInsert ) + return std::make_pair( false, false ); + + value_type newItem( val ); + it = m_List.insert( it, newItem ); + func( true, *it, val ); +# if !defined(CDS_STD_LIST_SIZE_CXX11_CONFORM) + ++m_nSize; +# endif + return std::make_pair( true, true ); + } + else { + // already exists + func( false, *it, val ); + return std::make_pair( true, false ); + } + } + + template + bool erase( const Q& key, Func f ) + { + iterator it = std::lower_bound( m_List.begin(), m_List.end(), key, find_predicate()); + if ( it == m_List.end() || key_comparator()( key, *it ) != 0 ) + return false; + + // key exists + f( *it ); + m_List.erase( it ); +# if !defined(CDS_STD_LIST_SIZE_CXX11_CONFORM) + --m_nSize; +# endif + + return true; + } + + template + bool erase( Q const& key, Less pred, Func f ) + { + iterator it = std::lower_bound( m_List.begin(), m_List.end(), key, pred ); + if ( it == m_List.end() || pred( key, *it ) || pred( *it, key )) + return false; + + // key exists + f( *it ); + m_List.erase( it ); +# if !defined(CDS_STD_LIST_SIZE_CXX11_CONFORM) + --m_nSize; +# endif + + return true; + } + + template + bool find( Q& val, Func f ) + { + iterator it = std::lower_bound( m_List.begin(), m_List.end(), val, find_predicate()); + if ( it == m_List.end() || key_comparator()( val, *it ) != 0 ) + return false; + + // key exists + f( *it, val ); + return true; + } + + template + bool find( Q& val, Less pred, Func f ) + { + iterator it = std::lower_bound( m_List.begin(), m_List.end(), val, pred ); + if ( it == m_List.end() || pred( val, *it ) || pred( *it, val )) + return false; + + // key exists + f( *it, val ); + return true; + } + + /// Clears the container + void clear() + { + m_List.clear(); + } + + iterator begin() { return m_List.begin(); } + const_iterator begin() const { return m_List.begin(); } + iterator end() { return m_List.end(); } + const_iterator end() const { return m_List.end(); } + + void move_item( adapted_container& /*from*/, iterator itWhat ) + { + iterator it = std::lower_bound( m_List.begin(), m_List.end(), *itWhat, find_predicate()); + assert( it == m_List.end() || key_comparator()( *itWhat, *it ) != 0 ); + + copy_item()( m_List, it, itWhat ); +# if !defined(CDS_STD_LIST_SIZE_CXX11_CONFORM) + ++m_nSize; +# endif + } + + size_t size() const + { +# if !defined(CDS_STD_LIST_SIZE_CXX11_CONFORM) + return m_nSize; +# else + return m_List.size(); +# endif + + } + }; + + public: + typedef adapted_container type ; ///< Result of \p adapt metafunction + + }; +}}} + + +//@endcond + +#endif // #ifndef CDSLIB_CONTAINER_STRIPED_SET_STD_LIST_ADAPTER_H diff --git a/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/container/striped_set/std_set.h b/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/container/striped_set/std_set.h new file mode 100644 index 0000000..f01a276 --- /dev/null +++ b/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/container/striped_set/std_set.h @@ -0,0 +1,201 @@ +/* + This file is a part of libcds - Concurrent Data Structures library + + (C) Copyright Maxim Khizhinsky (libcds.dev@gmail.com) 2006-2017 + + Source code repo: http://github.com/khizmax/libcds/ + Download: http://sourceforge.net/projects/libcds/files/ + + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. + + * Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ + +#ifndef CDSLIB_CONTAINER_STRIPED_SET_STD_SET_ADAPTER_H +#define CDSLIB_CONTAINER_STRIPED_SET_STD_SET_ADAPTER_H + +#include +#include + +//@cond +namespace cds { namespace container { + namespace striped_set { + + // Copy policy for std::set + template + struct copy_item_policy< std::set< T, Traits, Alloc > > + { + typedef std::set< T, Traits, Alloc > set_type; + typedef typename set_type::iterator iterator; + + void operator()( set_type& set, iterator itWhat ) + { + set.insert( *itWhat ); + } + }; + + template + struct swap_item_policy< std::set< T, Traits, Alloc > >: public copy_item_policy< std::set< T, Traits, Alloc > > + {}; + + // Move policy for std::set + template + struct move_item_policy< std::set< T, Traits, Alloc > > + { + typedef std::set< T, Traits, Alloc > set_type; + typedef typename set_type::iterator iterator; + + void operator()( set_type& set, iterator itWhat ) + { + set.insert( std::move( *itWhat )); + } + }; + } // namespace striped_set +}} // namespace cds::container + +namespace cds { namespace intrusive { namespace striped_set { + + /// std::set adapter for hash set bucket + template + class adapt< std::set, Options... > + { + public: + typedef std::set container_type ; ///< underlying container type + + private: + /// Adapted container type + class adapted_container: public cds::container::striped_set::adapted_container + { + public: + typedef typename container_type::value_type value_type ; ///< value type stored in the container + typedef typename container_type::iterator iterator ; ///< container iterator + typedef typename container_type::const_iterator const_iterator ; ///< container const iterator + + static bool const has_find_with = false; + static bool const has_erase_with = false; + + private: + //@cond + typedef typename cds::opt::select< + typename cds::opt::value< + typename cds::opt::find_option< + cds::opt::copy_policy< cds::container::striped_set::move_item > + , Options... + >::type + >::copy_policy + , cds::container::striped_set::copy_item, cds::container::striped_set::copy_item_policy + , cds::container::striped_set::swap_item, cds::container::striped_set::swap_item_policy + , cds::container::striped_set::move_item, cds::container::striped_set::move_item_policy + >::type copy_item; + //@endcond + + private: + //@cond + container_type m_Set; + //@endcond + + public: + + template + bool insert( const Q& val, Func f ) + { + std::pair res = m_Set.insert( value_type(val)); + if ( res.second ) + f( const_cast(*res.first)); + return res.second; + } + + template + bool emplace( Args&&... args ) + { + std::pair res = m_Set.emplace( std::forward(args)... ); + return res.second; + } + + template + std::pair update( const Q& val, Func func, bool bAllowInsert ) + { + if ( bAllowInsert ) { + std::pair res = m_Set.insert( value_type(val)); + func( res.second, const_cast(*res.first), val ); + return std::make_pair( true, res.second ); + } + else { + auto it = m_Set.find(value_type(val)); + if ( it == m_Set.end()) + return std::make_pair( false, false ); + func( false, const_cast(*it), val ); + return std::make_pair( true, false ); + } + } + + template + bool erase( const Q& key, Func f ) + { + iterator it = m_Set.find( value_type(key)); + if ( it == m_Set.end()) + return false; + f( const_cast(*it)); + m_Set.erase( it ); + return true; + } + + template + bool find( Q& val, Func f ) + { + iterator it = m_Set.find( value_type(val)); + if ( it == m_Set.end()) + return false; + f( const_cast(*it), val ); + return true; + } + + void clear() + { + m_Set.clear(); + } + + iterator begin() { return m_Set.begin(); } + const_iterator begin() const { return m_Set.begin(); } + iterator end() { return m_Set.end(); } + const_iterator end() const { return m_Set.end(); } + + void move_item( adapted_container& /*from*/, iterator itWhat ) + { + assert( m_Set.find( *itWhat ) == m_Set.end()); + copy_item()( m_Set, itWhat ); + } + + size_t size() const + { + return m_Set.size(); + } + }; + + public: + typedef adapted_container type ; ///< Result of \p adapt metafunction + + }; +}}} // namespace cds::intrusive::striped_set + + +//@endcond + +#endif // #ifndef CDSLIB_CONTAINER_STRIPED_SET_STD_SET_ADAPTER_H diff --git a/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/container/striped_set/std_vector.h b/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/container/striped_set/std_vector.h new file mode 100644 index 0000000..fcab055 --- /dev/null +++ b/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/container/striped_set/std_vector.h @@ -0,0 +1,282 @@ +/* + This file is a part of libcds - Concurrent Data Structures library + + (C) Copyright Maxim Khizhinsky (libcds.dev@gmail.com) 2006-2017 + + Source code repo: http://github.com/khizmax/libcds/ + Download: http://sourceforge.net/projects/libcds/files/ + + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. + + * Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ + +#ifndef CDSLIB_CONTAINER_STRIPED_SET_STD_VECTOR_ADAPTER_H +#define CDSLIB_CONTAINER_STRIPED_SET_STD_VECTOR_ADAPTER_H + +#include // ref +#include +#include // std::lower_bound +#include // std::pair +#include // lower_bound + +//@cond +namespace cds { namespace container { + namespace striped_set { + + // Copy policy for std::vector + template + struct copy_item_policy< std::vector< T, Alloc > > + { + typedef std::vector< T, Alloc > vector_type; + typedef typename vector_type::iterator iterator; + + void operator()( vector_type& vec, iterator itInsert, iterator itWhat ) + { + vec.insert( itInsert, *itWhat ); + } + }; + + // Swap policy for std::vector + template + struct swap_item_policy< std::vector< T, Alloc > > + { + typedef std::vector< T, Alloc > vector_type; + typedef typename vector_type::iterator iterator; + + void operator()( vector_type& vec, iterator itInsert, iterator itWhat ) + { + typename vector_type::value_type newVal; + itInsert = vec.insert( itInsert, newVal ); + std::swap( *itInsert, *itWhat ); + } + }; + + // Move policy for std::vector + template + struct move_item_policy< std::vector< T, Alloc > > + { + typedef std::vector< T, Alloc > vector_type; + typedef typename vector_type::iterator iterator; + + void operator()( vector_type& vec, iterator itInsert, iterator itWhat ) + { + vec.insert( itInsert, std::move( *itWhat )); + } + }; + + } // namespace striped_set +}} // namespace cds::container + +namespace cds { namespace intrusive { namespace striped_set { + + /// std::vector adapter for hash set bucket + template + class adapt< std::vector, Options... > + { + public: + typedef std::vector container_type ; ///< underlying container type + + private: + /// Adapted container type + class adapted_container: public cds::container::striped_set::adapted_sequential_container + { + public: + typedef typename container_type::value_type value_type ; ///< value type stored in the container + typedef typename container_type::iterator iterator ; ///< container iterator + typedef typename container_type::const_iterator const_iterator ; ///< container const iterator + + static bool const has_find_with = true; + static bool const has_erase_with = true; + + private: + //@cond + typedef typename cds::opt::details::make_comparator_from_option_list< value_type, Options... >::type key_comparator; + + typedef typename cds::opt::select< + typename cds::opt::value< + typename cds::opt::find_option< + cds::opt::copy_policy< cds::container::striped_set::move_item > + , Options... + >::type + >::copy_policy + , cds::container::striped_set::copy_item, cds::container::striped_set::copy_item_policy + , cds::container::striped_set::swap_item, cds::container::striped_set::swap_item_policy + , cds::container::striped_set::move_item, cds::container::striped_set::move_item_policy + >::type copy_item; + + struct find_predicate + { + bool operator()( value_type const& i1, value_type const& i2) const + { + return key_comparator()( i1, i2 ) < 0; + } + + template + bool operator()( Q const& i1, value_type const& i2) const + { + return key_comparator()( i1, i2 ) < 0; + } + + template + bool operator()( value_type const& i1, Q const& i2) const + { + return key_comparator()( i1, i2 ) < 0; + } + }; + //@endcond + + private: + //@cond + container_type m_Vector; + //@endcond + + public: + + template + bool insert( const Q& val, Func f ) + { + iterator it = std::lower_bound( m_Vector.begin(), m_Vector.end(), val, find_predicate()); + if ( it == m_Vector.end() || key_comparator()( val, *it ) != 0 ) { + value_type newItem( val ); + it = m_Vector.insert( it, newItem ); + f( *it ); + return true; + } + return false; + } + + template + bool emplace( Args&&... args ) + { + value_type val( std::forward(args)... ); + iterator it = std::lower_bound( m_Vector.begin(), m_Vector.end(), val, find_predicate()); + if ( it == m_Vector.end() || key_comparator()( val, *it ) != 0 ) { + it = m_Vector.emplace( it, std::move( val )); + return true; + } + return false; + } + + template + std::pair update( const Q& val, Func func, bool bAllowInsert ) + { + iterator it = std::lower_bound( m_Vector.begin(), m_Vector.end(), val, find_predicate()); + if ( it == m_Vector.end() || key_comparator()( val, *it ) != 0 ) { + // insert new + if ( !bAllowInsert ) + return std::make_pair( false, false ); + + value_type newItem( val ); + it = m_Vector.insert( it, newItem ); + func( true, *it, val ); + return std::make_pair( true, true ); + } + else { + // already exists + func( false, *it, val ); + return std::make_pair( true, false ); + } + } + + template + bool erase( const Q& key, Func f ) + { + iterator it = std::lower_bound( m_Vector.begin(), m_Vector.end(), key, find_predicate()); + if ( it == m_Vector.end() || key_comparator()( key, *it ) != 0 ) + return false; + + // key exists + f( *it ); + m_Vector.erase( it ); + return true; + } + + template + bool erase( const Q& key, Less pred, Func f ) + { + iterator it = std::lower_bound( m_Vector.begin(), m_Vector.end(), key, pred ); + if ( it == m_Vector.end() || pred( key, *it ) || pred( *it, key )) + return false; + + // key exists + f( *it ); + m_Vector.erase( it ); + return true; + } + + template + bool find( Q& val, Func f ) + { + iterator it = std::lower_bound( m_Vector.begin(), m_Vector.end(), val, find_predicate()); + if ( it == m_Vector.end() || key_comparator()( val, *it ) != 0 ) + return false; + + // key exists + f( *it, val ); + return true; + } + + template + bool find( Q& val, Less pred, Func f ) + { + iterator it = std::lower_bound( m_Vector.begin(), m_Vector.end(), val, pred ); + if ( it == m_Vector.end() || pred( val, *it ) || pred( *it, val )) + return false; + + // key exists + f( *it, val ); + return true; + } + + + void clear() + { + m_Vector.clear(); + } + + iterator begin() { return m_Vector.begin(); } + const_iterator begin() const { return m_Vector.begin(); } + iterator end() { return m_Vector.end(); } + const_iterator end() const { return m_Vector.end(); } + + void move_item( adapted_container& /*from*/, iterator itWhat ) + { + iterator it = std::lower_bound( m_Vector.begin(), m_Vector.end(), *itWhat, find_predicate()); + assert( it == m_Vector.end() || key_comparator()( *itWhat, *it ) != 0 ); + + copy_item()( m_Vector, it, itWhat ); + } + + size_t size() const + { + return m_Vector.size(); + } + }; + + public: + typedef adapted_container type ; ///< Result of \p adapt metafunction + + }; +}}} // namespace cds::intrusive::striped_set + +//@endcond + +#endif // #ifndef CDSLIB_CONTAINER_STRIPED_SET_STD_VECTOR_ADAPTER_H diff --git a/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/container/treiber_stack.h b/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/container/treiber_stack.h new file mode 100644 index 0000000..ca90e07 --- /dev/null +++ b/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/container/treiber_stack.h @@ -0,0 +1,409 @@ +/* + This file is a part of libcds - Concurrent Data Structures library + + (C) Copyright Maxim Khizhinsky (libcds.dev@gmail.com) 2006-2017 + + Source code repo: http://github.com/khizmax/libcds/ + Download: http://sourceforge.net/projects/libcds/files/ + + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. + + * Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ + +#ifndef CDSLIB_CONTAINER_TREIBER_STACK_H +#define CDSLIB_CONTAINER_TREIBER_STACK_H + +#include // unique_ptr +#include +#include + +namespace cds { namespace container { + + /// TreiberStack related definitions + /** @ingroup cds_nonintrusive_helper + */ + namespace treiber_stack { + /// Internal statistics + template ::counter_type > + using stat = cds::intrusive::treiber_stack::stat< Counter >; + + /// Dummy internal statistics + typedef cds::intrusive::treiber_stack::empty_stat empty_stat; + + /// TreiberStack default type traits + struct traits + { + /// Back-off strategy + typedef cds::backoff::Default back_off; + + /// Node allocator + typedef CDS_DEFAULT_ALLOCATOR allocator; + + /// C++ memory ordering model + /** + Can be opt::v::relaxed_ordering (relaxed memory model, the default) + or opt::v::sequential_consistent (sequentially consisnent memory model). + */ + typedef opt::v::relaxed_ordering memory_model; + + /// Item counting feature; by default, disabled. Use \p cds::atomicity::item_counter to enable item counting + typedef cds::atomicity::empty_item_counter item_counter; + + /// Internal statistics (by default, no internal statistics) + /** + Possible types are: \ref treiber_stack::stat, \ref treiber_stack::empty_stat (the default), + user-provided class that supports treiber_stack::stat interface. + */ + typedef empty_stat stat; + + /** @name Elimination back-off traits + The following traits is used only if elimination enabled + */ + ///@{ + + /// Enable elimination back-off; by default, it is disabled + static constexpr const bool enable_elimination = false; + + /// Back-off strategy to wait for elimination, default is cds::backoff::delay<> + typedef cds::backoff::delay<> elimination_backoff; + + /// Buffer type for elimination array + /** + Possible types are \p opt::v::initialized_static_buffer, \p opt::v::initialized_dynamic_buffer. + The buffer can be any size: \p Exp2 template parameter of those classes can be \p false. + The size should be selected empirically for your application and hardware, there are no common rules for that. + Default is %opt::v::initialized_static_buffer< any_type, 4 > . + */ + typedef opt::v::initialized_static_buffer< int, 4 > buffer; + + /// Random engine to generate a random position in elimination array + typedef opt::v::c_rand random_engine; + + /// Lock type used in elimination, default is cds::sync::spin + typedef cds::sync::spin lock_type; + + ///@} + }; + + /// Metafunction converting option list to \p TreiberStack traits + /** + Supported \p Options are: + - \p opt::allocator - allocator (like \p std::allocator) used for allocating stack nodes. Default is \ref CDS_DEFAULT_ALLOCATOR + - \p opt::back_off - back-off strategy used. If the option is not specified, the \p cds::backoff::Default is used. + - \p opt::memory_model - C++ memory ordering model. Can be \p opt::v::relaxed_ordering (relaxed memory model, the default) + or \p opt::v::sequential_consistent (sequentially consisnent memory model). + - \p opt::item_counter - the type of item counting feature. Default is \p cds::atomicity::empty_item_counter, i.e. + no item counting. Use \p cds::atomicity::item_counter to enable item counting. + - \p opt::stat - the type to gather internal statistics. + Possible option value are: \p treiber_stack::stat, \p treiber_stack::empty_stat (the default), + user-provided class that supports \p %treiber_stack::stat interface. + - \p opt::enable_elimination - enable elimination back-off for the stack. Default value is \p false. + + If elimination back-off is enabled, additional options can be specified: + - \p opt::buffer - an initialized buffer type for elimination array, see \p opt::v::initialized_static_buffer, \p opt::v::initialized_dynamic_buffer. + The buffer can be any size: \p Exp2 template parameter of those classes can be \p false. + The size should be selected empirically for your application and hardware, there are no common rules for that. + Default is %opt::v::initialized_static_buffer< any_type, 4 > . + - \p opt::random_engine - a random engine to generate a random position in elimination array. + Default is \p opt::v::c_rand. + - \p opt::elimination_backoff - back-off strategy to wait for elimination, default is \p cds::backoff::delay<> + - \p opt::lock_type - a lock type used in elimination back-off, default is \p cds::sync::spin. + + Example: declare %TreiberStack with item counting and internal statistics using \p %make_traits + \code + typedef cds::container::TreiberStack< cds::gc::HP, Foo, + typename cds::container::treiber_stack::make_traits< + cds::opt::item_counter< cds::atomicity::item_counter >, + cds::opt::stat< cds::intrusive::treiber_stack::stat<> > + >::type + > myStack; + \endcode + */ + template + struct make_traits { +# ifdef CDS_DOXYGEN_INVOKED + typedef implementation_defined type; ///< Metafunction result +# else + typedef typename cds::opt::make_options< + typename cds::opt::find_type_traits< traits, Options... >::type + , Options... + >::type type; +# endif + }; + } // namespace treiber_stack + + //@cond + namespace details { + template + struct make_treiber_stack + { + typedef GC gc; + typedef T value_type; + typedef Traits traits; + + struct node_type: public cds::intrusive::treiber_stack::node< gc > + { + value_type m_value; + + node_type( const value_type& val ) + : m_value( val ) + {} + + template + node_type( Args&&... args ) + : m_value( std::forward( args )... ) + {} + }; + + typedef typename traits::allocator::template rebind::other allocator_type; + typedef cds::details::Allocator< node_type, allocator_type > cxx_allocator; + + struct node_deallocator + { + void operator ()( node_type * pNode ) + { + cxx_allocator().Delete( pNode ); + } + }; + + struct intrusive_traits: public traits + { + typedef cds::intrusive::treiber_stack::base_hook< cds::opt::gc > hook; + typedef node_deallocator disposer; + static constexpr const opt::link_check_type link_checker = cds::intrusive::treiber_stack::traits::link_checker; + }; + + // Result of metafunction + typedef intrusive::TreiberStack< gc, node_type, intrusive_traits > type; + }; + } // namespace details + //@endcond + + /// Treiber's stack algorithm + /** @ingroup cds_nonintrusive_stack + It is non-intrusive version of Treiber's stack algorithm based on intrusive implementation + intrusive::TreiberStack. + + Template arguments: + - \p GC - garbage collector type: \p gc::HP, gc::DHP + - \p T - type stored in the stack. + - \p Traits - stack traits, default is \p treiber_stack::traits. You can use \p treiber_stack::make_traits + metafunction to make your traits or just derive your traits from \p %treiber_stack::traits: + \code + struct myTraits: public cds::container::treiber_stack::traits { + typedef cds::intrusive::treiber_stack::stat<> stat; + typedef cds::atomicity::item_counter item_counter; + }; + typedef cds::container::TreiberStack< cds::gc::HP, Foo, myTraits > myStack; + + // Equivalent make_traits example: + typedef cds::intrusive::TreiberStack< cds::gc::HP, Foo, + typename cds::intrusive::treiber_stack::make_traits< + cds::opt::item_counter< cds::atomicity::item_counter >, + cds::opt::stat< cds::intrusive::treiber_stack::stat<> > + >::type + > myStack; + \endcode + */ + template < + typename GC, + typename T, + typename Traits = treiber_stack::traits + > + class TreiberStack + : public +#ifdef CDS_DOXYGEN_INVOKED + intrusive::TreiberStack< GC, cds::intrusive::treiber_stack::node< T >, Traits > +#else + details::make_treiber_stack< GC, T, Traits >::type +#endif + { + //@cond + typedef details::make_treiber_stack< GC, T, Traits > maker; + typedef typename maker::type base_class; + //@endcond + + public: + /// Rebind template arguments + template + struct rebind { + typedef TreiberStack< GC2, T2, Traits2 > other; ///< Rebinding result + }; + + public: + typedef T value_type ; ///< Value type stored in the stack + typedef typename base_class::gc gc ; ///< Garbage collector used + typedef typename base_class::back_off back_off ; ///< Back-off strategy used + typedef typename maker::allocator_type allocator_type ; ///< Allocator type used for allocating/deallocating the nodes + typedef typename base_class::memory_model memory_model; ///< Memory ordering. See cds::opt::memory_order option + typedef typename base_class::stat stat ; ///< Internal statistics policy used + + protected: + typedef typename maker::node_type node_type ; ///< stack node type (derived from \p intrusive::treiber_stack::node) + + //@cond + typedef typename maker::cxx_allocator cxx_allocator; + typedef typename maker::node_deallocator node_deallocator; + //@endcond + + protected: + ///@cond + static node_type * alloc_node( const value_type& val ) + { + return cxx_allocator().New( val ); + } + template + static node_type * alloc_node_move( Args&&... args ) + { + return cxx_allocator().MoveNew( std::forward( args )... ); + } + + static void free_node( node_type * p ) + { + node_deallocator()( p ); + } + static void retire_node( node_type * p ) + { + gc::template retire( p ); + } + + struct node_disposer { + void operator()( node_type * pNode ) + { + free_node( pNode ); + } + }; + typedef std::unique_ptr< node_type, node_disposer > scoped_node_ptr; + //@endcond + + public: + /// Constructs empty stack + TreiberStack() + {} + + /// Constructs empty stack and initializes elimination back-off data + /** + This form should be used if you use elimination back-off with dynamically allocated collision array, i.e + \p Options... contains cds::opt::buffer< cds::opt::v::initialized_dynamic_buffer >. + \p nCollisionCapacity parameter specifies the capacity of collision array. + */ + TreiberStack( size_t nCollisionCapacity ) + : base_class( nCollisionCapacity ) + {} + + /// \p %TreiberStack is not copy-constructible + TreiberStack( TreiberStack const& ) = delete; + + /// Clears the stack on destruction + ~TreiberStack() + {} + + /// Pushes copy of \p val on the stack + bool push( value_type const& val ) + { + scoped_node_ptr p( alloc_node(val)); + if ( base_class::push( *p )) { + p.release(); + return true; + } + return false; + } + + /// Pushes data of type \ref value_type created from std::forward(args)... + template + bool emplace( Args&&... args ) + { + scoped_node_ptr p( alloc_node_move( std::forward(args)...)); + if ( base_class::push( *p )) { + p.release(); + return true; + } + return false; + } + + /// Pops an item from the stack + /** + The value of popped item is stored in \p val using assignment operator. + On success functions returns \p true, \p val contains value popped from the stack. + If stack is empty the function returns \p false, \p val is unchanged. + */ + bool pop( value_type& val ) + { + return pop_with( [&val]( value_type& src ) { val = std::move(src); } ); + } + + /// Pops an item from the stack with functor + /** + \p Func can be used to copy/move popped item from the stack. + \p Func interface is: + \code + void func( value_type& src ); + \endcode + where \p src - item popped. + */ + template + bool pop_with( Func f ) + { + node_type * p = base_class::pop(); + if ( !p ) + return false; + + f( p->m_value ); + retire_node( p ); + + return true; + } + + /// Check if stack is empty + bool empty() const + { + return base_class::empty(); + } + + /// Clear the stack + void clear() + { + base_class::clear(); + } + + /// Returns stack's item count + /** + The value returned depends on opt::item_counter option. For atomicity::empty_item_counter, + this function always returns 0. + + Warning: even if you use real item counter and it returns 0, this fact is not mean that the stack + is empty. To check emptyness use \ref empty() method. + */ + size_t size() const + { + return base_class::size(); + } + + /// Returns reference to internal statistics + stat const& statistics() const + { + return base_class::statistics(); + } + }; + +}} // namespace cds::container + +#endif // #ifndef CDSLIB_CONTAINER_TREIBER_STACK_H diff --git a/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/container/vyukov_mpmc_cycle_queue.h b/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/container/vyukov_mpmc_cycle_queue.h new file mode 100644 index 0000000..b3da350 --- /dev/null +++ b/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/container/vyukov_mpmc_cycle_queue.h @@ -0,0 +1,523 @@ +/* + This file is a part of libcds - Concurrent Data Structures library + + (C) Copyright Maxim Khizhinsky (libcds.dev@gmail.com) 2006-2017 + + Source code repo: http://github.com/khizmax/libcds/ + Download: http://sourceforge.net/projects/libcds/files/ + + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. + + * Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ + +#ifndef CDSLIB_CONTAINER_VYUKOV_MPMC_CYCLE_QUEUE_H +#define CDSLIB_CONTAINER_VYUKOV_MPMC_CYCLE_QUEUE_H + +#include +#include +#include +#include +#include + +namespace cds { namespace container { + + /// VyukovMPMCCycleQueue related definitions + /** @ingroup cds_nonintrusive_helper + */ + namespace vyukov_queue { + + /// VyukovMPMCCycleQueue default traits + struct traits { + /// Buffer type for internal array + /* + The type of element for the buffer is not important: the queue rebinds + the buffer for required type via \p rebind metafunction. + + For \p VyukovMPMCCycleQueue queue the buffer size should have power-of-2 size. + + You should use only uninitialized buffer for the queue - + \p cds::opt::v::uninitialized_dynamic_buffer (the default), + \p cds::opt::v::uninitialized_static_buffer. + */ + typedef cds::opt::v::uninitialized_dynamic_buffer< void * > buffer; + + /// A functor to clean item dequeued. + /** + The functor calls the destructor for queue item. + After an item is dequeued, \p value_cleaner cleans the cell that the item has been occupied. + If \p T is a complex type, \p value_cleaner may be useful feature. + + Default value is \ref opt::v::auto_cleaner + */ + typedef cds::opt::v::auto_cleaner value_cleaner; + + /// Item counting feature; by default, disabled. Use \p cds::atomicity::item_counter to enable item counting + typedef cds::atomicity::empty_item_counter item_counter; + + /// C++ memory ordering model + /** + Can be \p opt::v::relaxed_ordering (relaxed memory model, the default) + or \p opt::v::sequential_consistent (sequentially consistent memory model). + */ + typedef opt::v::relaxed_ordering memory_model; + + /// Padding for internal critical atomic data. Default is \p opt::cache_line_padding + enum { padding = opt::cache_line_padding }; + + /// Back-off strategy + typedef cds::backoff::Default back_off; + + /// Single-consumer version + /** + For single-consumer version of algorithm some additional functions + (\p front(), \p pop_front()) is available. + + Default is \p false + */ + static constexpr bool const single_consumer = false; + }; + + /// Metafunction converting option list to \p vyukov_queue::traits + /** + Supported \p Options are: + - \p opt::buffer - an uninitialized buffer type for internal cyclic array. Possible types are: + \p opt::v::uninitialized_dynamic_buffer (the default), \p opt::v::uninitialized_static_buffer. The type of + element in the buffer is not important: it will be changed via \p rebind metafunction. + - \p opt::value_cleaner - a functor to clean item dequeued. + The functor calls the destructor for queue item. + After an item is dequeued, \p value_cleaner cleans the cell that the item has been occupied. + If \p T is a complex type, \p value_cleaner can be an useful feature. + Default value is \ref opt::v::auto_cleaner + - \p opt::back_off - back-off strategy used. If the option is not specified, the \p cds::backoff::Default is used. + - \p opt::item_counter - the type of item counting feature. Default is \p cds::atomicity::empty_item_counter (item counting disabled) + To enable item counting use \p cds::atomicity::item_counter + - \p opt::padding - padding for internal critical atomic data. Default is \p opt::cache_line_padding + - \p opt::memory_model - C++ memory ordering model. Can be \p opt::v::relaxed_ordering (relaxed memory model, the default) + or \p opt::v::sequential_consistent (sequentially consisnent memory model). + + Example: declare \p %VyukovMPMCCycleQueue with item counting and static iternal buffer of size 1024: + \code + typedef cds::container::VyukovMPMCCycleQueue< Foo, + typename cds::container::vyukov_queue::make_traits< + cds::opt::buffer< cds::opt::v::uninitialized_static_buffer< void *, 1024 >, + cds::opt::item_counte< cds::atomicity::item_counter > + >::type + > myQueue; + \endcode + */ + template + struct make_traits { +# ifdef CDS_DOXYGEN_INVOKED + typedef implementation_defined type; ///< Metafunction result +# else + typedef typename cds::opt::make_options< + typename cds::opt::find_type_traits< traits, Options... >::type + , Options... + >::type type; +# endif + }; + + } //namespace vyukov_queue + + /// Vyukov's MPMC bounded queue + /** @ingroup cds_nonintrusive_queue + This algorithm is developed by Dmitry Vyukov (see http://www.1024cores.net) + It's multi-producer multi-consumer (MPMC), array-based, fails on overflow, does not require GC, w/o priorities, causal FIFO, + blocking producers and consumers queue. The algorithm is pretty simple and fast. It's not lock-free in the official meaning, + just implemented by means of atomic RMW operations w/o mutexes. + + The cost of enqueue/dequeue is 1 CAS per operation. + No dynamic memory allocation/management during operation. Producers and consumers are separated from each other (as in the two-lock queue), + i.e. do not touch the same data while queue is not empty. + + There is multiple producer/single consumer version \p cds::container::VyukovMPSCCycleQueue + that supports \p front() and \p pop_front() functions. + + Source: + - http://www.1024cores.net/home/lock-free-algorithms/queues/bounded-mpmc-queue + + Template parameters + - \p T - type stored in queue. + - \p Traits - queue traits, default is \p vyukov_queue::traits. You can use \p vyukov_queue::make_traits + metafunction to make your traits or just derive your traits from \p %vyukov_queue::traits: + \code + struct myTraits: public cds::container::vyukov_queue::traits { + typedef cds::atomicity::item_counter item_counter; + }; + typedef cds::container::VyukovMPMCCycleQueue< Foo, myTraits > myQueue; + + // Equivalent make_traits example: + typedef cds::container::VyukovMPMCCycleQueue< cds::gc::HP, Foo, + typename cds::container::vyukov_queue::make_traits< + cds::opt::item_counter< cds::atomicity::item_counter > + >::type + > myQueue; + \endcode + + \par License + Simplified BSD license by Dmitry Vyukov (http://www.1024cores.net/site/1024cores/home/code-license) + */ + template + class VyukovMPMCCycleQueue : public cds::bounded_container + { + public: + typedef T value_type; ///< Value type to be stored in the queue + typedef Traits traits; ///< Queue traits + typedef typename traits::item_counter item_counter; ///< Item counter type + typedef typename traits::memory_model memory_model; ///< Memory ordering. See \p cds::opt::memory_model option + typedef typename traits::value_cleaner value_cleaner; ///< Value cleaner, see \p vyukov_queue::traits::value_cleaner + typedef typename traits::back_off back_off; ///< back-off strategy + + /// \p true for single-consumer version, \p false otherwise + static constexpr bool const c_single_consumer = traits::single_consumer; + + /// Rebind template arguments + template + struct rebind { + typedef VyukovMPMCCycleQueue< T2, Traits2 > other ; ///< Rebinding result + }; + + protected: + //@cond + typedef atomics::atomic sequence_type; + struct cell_type + { + sequence_type sequence; + value_type data; + + cell_type() + {} + }; + + typedef typename traits::buffer::template rebind::other buffer; + //@endcond + + protected: + //@cond + buffer m_buffer; + size_t const m_nBufferMask; + typename opt::details::apply_padding< size_t, traits::padding >::padding_type pad1_; + sequence_type m_posEnqueue; + typename opt::details::apply_padding< sequence_type, traits::padding >::padding_type pad2_; + sequence_type m_posDequeue; + typename opt::details::apply_padding< sequence_type, traits::padding >::padding_type pad3_; + item_counter m_ItemCounter; + //@endcond + + public: + /// Constructs the queue of capacity \p nCapacity + /** + For \p cds::opt::v::uninitialized_static_buffer the \p nCapacity parameter is ignored. + + The buffer capacity must be the power of two. + */ + VyukovMPMCCycleQueue( + size_t nCapacity = 0 + ) + : m_buffer( nCapacity ) + , m_nBufferMask( m_buffer.capacity() - 1 ) + { + nCapacity = m_buffer.capacity(); + + // Buffer capacity must be power of 2 + assert( nCapacity >= 2 && (nCapacity & (nCapacity - 1)) == 0 ); + + for (size_t i = 0; i != nCapacity; ++i ) + m_buffer[i].sequence.store(i, memory_model::memory_order_relaxed); + + m_posEnqueue.store(0, memory_model::memory_order_relaxed); + m_posDequeue.store(0, memory_model::memory_order_relaxed); + } + + ~VyukovMPMCCycleQueue() + { + clear(); + } + + /// Enqueues data to the queue using a functor + /** + \p Func is a functor called to copy a value to the queue cell. + The functor \p f takes one argument - a reference to a empty cell of type \ref value_type : + \code + cds::container::VyukovMPMCCycleQueue< Foo > myQueue; + Bar bar; + myQueue.enqueue_with( [&bar]( Foo& dest ) { dest = std::move(bar); } ); + \endcode + */ + template + bool enqueue_with(Func f) + { + cell_type* cell; + back_off bkoff; + + size_t pos = m_posEnqueue.load(memory_model::memory_order_relaxed); + for (;;) + { + cell = &m_buffer[pos & m_nBufferMask]; + size_t seq = cell->sequence.load(memory_model::memory_order_acquire); + + intptr_t dif = static_cast(seq) - static_cast(pos); + + if (dif == 0) { + if ( m_posEnqueue.compare_exchange_weak(pos, pos + 1, memory_model::memory_order_relaxed, atomics::memory_order_relaxed )) + break; + } + else if (dif < 0) { + // Queue full? + if ( pos - m_posDequeue.load( memory_model::memory_order_relaxed ) == capacity()) + return false; // queue full + bkoff(); + pos = m_posEnqueue.load( memory_model::memory_order_relaxed ); + } + else + pos = m_posEnqueue.load(memory_model::memory_order_relaxed); + } + + f( cell->data ); + + cell->sequence.store(pos + 1, memory_model::memory_order_release); + ++m_ItemCounter; + + return true; + } + + /// Enqueues \p val value into the queue. + /** + The new queue item is created by calling placement new in free cell. + Returns \p true if success, \p false if the queue is full. + */ + bool enqueue( value_type const& val ) + { + return enqueue_with( [&val]( value_type& dest ){ new ( &dest ) value_type( val ); }); + } + + /// Enqueues \p val value into the queue, move semantics + bool enqueue( value_type&& val ) + { + return enqueue_with( [&val]( value_type& dest ) { new (&dest) value_type( std::move( val ));}); + } + + /// Synonym for \p enqueue( value_type const& ) + bool push( value_type const& data ) + { + return enqueue( data ); + } + + /// Synonym for \p enqueue( value_type&& ) + bool push( value_type&& data ) + { + return enqueue( std::move( data )); + } + + /// Synonym for \p enqueue_with() + template + bool push_with( Func f ) + { + return enqueue_with( f ); + } + + /// Enqueues data of type \ref value_type constructed with std::forward(args)... + template + bool emplace( Args&&... args ) + { +#if (CDS_COMPILER == CDS_COMPILER_GCC) && (CDS_COMPILER_VERSION < 40900) + //work around unsupported feature in g++ 4.8 for forwarding parameter packs to lambda. + value_type val( std::forward(args)... ); + return enqueue_with( [&val]( value_type& dest ){ new ( &dest ) value_type( std::move( val )); }); +#else + return enqueue_with( [&args ...]( value_type& dest ){ new ( &dest ) value_type( std::forward( args )... ); }); +#endif + } + + /// Dequeues a value using a functor + /** + \p Func is a functor called to copy dequeued value. + The functor takes one argument - a reference to removed node: + \code + cds:container::VyukovMPMCCycleQueue< Foo > myQueue; + Bar bar; + myQueue.dequeue_with( [&bar]( Foo& src ) { bar = std::move( src );}); + \endcode + The functor is called only if the queue is not empty. + */ + template + bool dequeue_with( Func f ) + { + cell_type * cell; + back_off bkoff; + + size_t pos = m_posDequeue.load( memory_model::memory_order_relaxed ); + for (;;) + { + cell = &m_buffer[pos & m_nBufferMask]; + size_t seq = cell->sequence.load(memory_model::memory_order_acquire); + intptr_t dif = static_cast(seq) - static_cast(pos + 1); + + if (dif == 0) { + if ( m_posDequeue.compare_exchange_weak(pos, pos + 1, memory_model::memory_order_relaxed, atomics::memory_order_relaxed)) + break; + } + else if (dif < 0) { + // Queue empty? + if ( pos - m_posEnqueue.load( memory_model::memory_order_relaxed ) == 0 ) + return false; // queue empty + bkoff(); + pos = m_posDequeue.load( memory_model::memory_order_relaxed ); + } + else + pos = m_posDequeue.load(memory_model::memory_order_relaxed); + } + + f( cell->data ); + value_cleaner()( cell->data ); + cell->sequence.store( pos + m_nBufferMask + 1, memory_model::memory_order_release ); + --m_ItemCounter; + + return true; + } + + /// Dequeues a value from the queue + /** + If queue is not empty, the function returns \p true, \p dest contains a copy of + dequeued value. The assignment operator for type \ref value_type is invoked. + If queue is empty, the function returns \p false, \p dest is unchanged. + */ + bool dequeue(value_type& dest ) + { + return dequeue_with( [&dest]( value_type& src ){ dest = std::move( src );}); + } + + /// Synonym for \p dequeue() + bool pop(value_type& data) + { + return dequeue(data); + } + + /// Synonym for \p dequeue_with() + template + bool pop_with( Func f ) + { + return dequeue_with( f ); + } + + /// Returns a pointer to top element of the queue or \p nullptr if queue is empty (only for single-consumer version) + template + typename std::enable_if::type front() + { + static_assert( c_single_consumer, "front() is enabled only if traits::single_consumer is true"); + + cell_type * cell; + back_off bkoff; + + size_t pos = m_posDequeue.load( memory_model::memory_order_relaxed ); + for ( ;;) + { + cell = &m_buffer[pos & m_nBufferMask]; + size_t seq = cell->sequence.load( memory_model::memory_order_acquire ); + intptr_t dif = static_cast(seq) - static_cast(pos + 1); + + if ( dif == 0 ) + return &cell->data; + else if ( dif < 0 ) { + // Queue empty? + if ( pos - m_posEnqueue.load( memory_model::memory_order_relaxed ) == 0 ) + return nullptr; // queue empty + bkoff(); + pos = m_posDequeue.load( memory_model::memory_order_relaxed ); + } + else + pos = m_posDequeue.load( memory_model::memory_order_relaxed ); + } + } + + /// Pops top element; returns \p true if queue is not empty, \p false otherwise (only for single-consumer version) + template + typename std::enable_if::type pop_front() + { + return dequeue_with( []( value_type& ) {} ); + } + + /// Checks if the queue is empty + bool empty() const + { + const cell_type * cell; + back_off bkoff; + + size_t pos = m_posDequeue.load(memory_model::memory_order_relaxed); + for (;;) + { + cell = &m_buffer[pos & m_nBufferMask]; + size_t seq = cell->sequence.load(memory_model::memory_order_acquire); + intptr_t dif = static_cast(seq) - static_cast(pos + 1); + + if (dif == 0) + return false; + else if (dif < 0) { + if ( pos - m_posEnqueue.load( memory_model::memory_order_relaxed ) == 0 ) + return true; + } + bkoff(); + pos = m_posDequeue.load(memory_model::memory_order_relaxed); + } + } + + /// Clears the queue + void clear() + { + value_type v; + while ( pop(v)); + } + + /// Returns queue's item count + /** + The value returned depends on \p vyukov_queue::traits::item_counter option. + For \p atomicity::empty_item_counter, the function always returns 0. + */ + size_t size() const + { + return m_ItemCounter.value(); + } + + /// Returns capacity of the queue + size_t capacity() const + { + return m_buffer.capacity(); + } + }; + + //@cond + namespace vyukov_queue { + + template + struct single_consumer_traits : public Traits + { + static constexpr bool const single_consumer = true; + }; + } // namespace vyukov_queue + //@endcond + + /// Vyukov's queue multiple producer - single consumer version + template + using VyukovMPSCCycleQueue = VyukovMPMCCycleQueue< T, vyukov_queue::single_consumer_traits >; + +}} // namespace cds::container + +#endif // #ifndef CDSLIB_CONTAINER_VYUKOV_MPMC_CYCLE_QUEUE_H diff --git a/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/container/weak_ringbuffer.h b/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/container/weak_ringbuffer.h new file mode 100644 index 0000000..0a5ea97 --- /dev/null +++ b/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/container/weak_ringbuffer.h @@ -0,0 +1,1010 @@ +/* + This file is a part of libcds - Concurrent Data Structures library + + (C) Copyright Maxim Khizhinsky (libcds.dev@gmail.com) 2006-2017 + + Source code repo: http://github.com/khizmax/libcds/ + Download: http://sourceforge.net/projects/libcds/files/ + + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. + + * Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ + +#ifndef CDSLIB_CONTAINER_WEAK_RINGBUFFER_H +#define CDSLIB_CONTAINER_WEAK_RINGBUFFER_H + +#include +#include +#include +#include +#include + +namespace cds { namespace container { + + /// \p WeakRingBuffer related definitions + /** @ingroup cds_nonintrusive_helper + */ + namespace weak_ringbuffer { + + /// \p WeakRingBuffer default traits + struct traits { + /// Buffer type for internal array + /* + The type of element for the buffer is not important: \p WeakRingBuffer rebind + the buffer for required type via \p rebind metafunction. + + For \p WeakRingBuffer the buffer size should have power-of-2 size. + + You should use only uninitialized buffer for the ring buffer - + \p cds::opt::v::uninitialized_dynamic_buffer (the default), + \p cds::opt::v::uninitialized_static_buffer. + */ + typedef cds::opt::v::uninitialized_dynamic_buffer< void * > buffer; + + /// A functor to clean item dequeued. + /** + The functor calls the destructor for popped element. + After a set of items is dequeued, \p value_cleaner cleans the cells that the items have been occupied. + If \p T is a complex type, \p value_cleaner may be useful feature. + For POD types \ref opt::v::empty_cleaner is suitable + + Default value is \ref opt::v::auto_cleaner that calls destructor only if it is not trivial. + */ + typedef cds::opt::v::auto_cleaner value_cleaner; + + /// C++ memory ordering model + /** + Can be \p opt::v::relaxed_ordering (relaxed memory model, the default) + or \p opt::v::sequential_consistent (sequentially consistent memory model). + */ + typedef opt::v::relaxed_ordering memory_model; + + /// Padding for internal critical atomic data. Default is \p opt::cache_line_padding + enum { padding = opt::cache_line_padding }; + }; + + /// Metafunction converting option list to \p weak_ringbuffer::traits + /** + Supported \p Options are: + - \p opt::buffer - an uninitialized buffer type for internal cyclic array. Possible types are: + \p opt::v::uninitialized_dynamic_buffer (the default), \p opt::v::uninitialized_static_buffer. The type of + element in the buffer is not important: it will be changed via \p rebind metafunction. + - \p opt::value_cleaner - a functor to clean items dequeued. + The functor calls the destructor for ring-buffer item. + After a set of items is dequeued, \p value_cleaner cleans the cells that the items have been occupied. + If \p T is a complex type, \p value_cleaner can be an useful feature. + Default value is \ref opt::v::empty_cleaner that is suitable for POD types. + - \p opt::padding - padding for internal critical atomic data. Default is \p opt::cache_line_padding + - \p opt::memory_model - C++ memory ordering model. Can be \p opt::v::relaxed_ordering (relaxed memory model, the default) + or \p opt::v::sequential_consistent (sequentially consisnent memory model). + + Example: declare \p %WeakRingBuffer with static iternal buffer for 1024 objects: + \code + typedef cds::container::WeakRingBuffer< Foo, + typename cds::container::weak_ringbuffer::make_traits< + cds::opt::buffer< cds::opt::v::uninitialized_static_buffer< void *, 1024 > + >::type + > myRing; + \endcode + */ + template + struct make_traits { +# ifdef CDS_DOXYGEN_INVOKED + typedef implementation_defined type; ///< Metafunction result +# else + typedef typename cds::opt::make_options< + typename cds::opt::find_type_traits< traits, Options... >::type + , Options... + >::type type; +# endif + }; + + } // namespace weak_ringbuffer + + /// Single-producer single-consumer ring buffer + /** @ingroup cds_nonintrusive_queue + Source: [2013] Nhat Minh Le, Adrien Guatto, Albert Cohen, Antoniu Pop. Correct and Effcient Bounded + FIFO Queues. [Research Report] RR-8365, INRIA. 2013. + + Ring buffer is a bounded queue. Additionally, \p %WeakRingBuffer supports batch operations - + you can push/pop an array of elements. + + There are a specialization \ref cds_nonintrusive_WeakRingBuffer_void "WeakRingBuffer" + that is not a queue but a "memory pool" between producer and consumer threads. + \p WeakRingBuffer supports variable-sized data. + + @warning: \p %WeakRingBuffer is developed for 64-bit architecture. + 32-bit platform must provide support for 64-bit atomics. + */ + template + class WeakRingBuffer: public cds::bounded_container + { + public: + typedef T value_type; ///< Value type to be stored in the ring buffer + typedef Traits traits; ///< Ring buffer traits + typedef typename traits::memory_model memory_model; ///< Memory ordering. See \p cds::opt::memory_model option + typedef typename traits::value_cleaner value_cleaner; ///< Value cleaner, see \p weak_ringbuffer::traits::value_cleaner + + /// Rebind template arguments + template + struct rebind { + typedef WeakRingBuffer< T2, Traits2 > other; ///< Rebinding result + }; + + //@cond + // Only for tests + typedef size_t item_counter; + //@endcond + + private: + //@cond + typedef typename traits::buffer::template rebind< value_type >::other buffer; + typedef uint64_t counter_type; + //@endcond + + public: + + /// Creates the ring buffer of \p capacity + /** + For \p cds::opt::v::uninitialized_static_buffer the \p nCapacity parameter is ignored. + + If the buffer capacity is a power of two, lightweight binary arithmetics is used + instead of modulo arithmetics. + */ + WeakRingBuffer( size_t capacity = 0 ) + : front_( 0 ) + , pfront_( 0 ) + , cback_( 0 ) + , buffer_( capacity ) + { + back_.store( 0, memory_model::memory_order_release ); + } + + /// Destroys the ring buffer + ~WeakRingBuffer() + { + value_cleaner cleaner; + counter_type back = back_.load( memory_model::memory_order_relaxed ); + for ( counter_type front = front_.load( memory_model::memory_order_relaxed ); front != back; ++front ) + cleaner( buffer_[ buffer_.mod( front ) ] ); + } + + /// Batch push - push array \p arr of size \p count + /** + \p CopyFunc is a per-element copy functor: for each element of \p arr + copy( dest, arr[i] ) is called. + The \p CopyFunc signature: + \code + void copy_func( value_type& element, Q const& source ); + \endcode + Here \p element is uninitialized so you should construct it using placement new + if needed; for example, if the element type is \p str::string and \p Q is char const*, + \p copy functor can be: + \code + cds::container::WeakRingBuffer ringbuf; + char const* arr[10]; + ringbuf.push( arr, 10, + []( std::string& element, char const* src ) { + new( &element ) std::string( src ); + }); + \endcode + You may use move semantics if appropriate: + \code + cds::container::WeakRingBuffer ringbuf; + std::string arr[10]; + ringbuf.push( arr, 10, + []( std::string& element, std:string& src ) { + new( &element ) std::string( std::move( src )); + }); + \endcode + + Returns \p true if success or \p false if not enough space in the ring + */ + template + bool push( Q* arr, size_t count, CopyFunc copy ) + { + assert( count < capacity()); + counter_type back = back_.load( memory_model::memory_order_relaxed ); + + assert( static_cast( back - pfront_ ) <= capacity()); + + if ( static_cast( pfront_ + capacity() - back ) < count ) { + pfront_ = front_.load( memory_model::memory_order_acquire ); + + if ( static_cast( pfront_ + capacity() - back ) < count ) { + // not enough space + return false; + } + } + + // copy data + for ( size_t i = 0; i < count; ++i, ++back ) + copy( buffer_[buffer_.mod( back )], arr[i] ); + + back_.store( back, memory_model::memory_order_release ); + + return true; + } + + /// Batch push - push array \p arr of size \p count with assignment as copy functor + /** + This function is equivalent for: + \code + push( arr, count, []( value_type& dest, Q const& src ) { dest = src; } ); + \endcode + + The function is available only if std::is_constructible::value + is \p true. + + Returns \p true if success or \p false if not enough space in the ring + */ + template + typename std::enable_if< std::is_constructible::value, bool>::type + push( Q* arr, size_t count ) + { + return push( arr, count, []( value_type& dest, Q const& src ) { new( &dest ) value_type( src ); } ); + } + + /// Push one element created from \p args + /** + The function is available only if std::is_constructible::value + is \p true. + + Returns \p false if the ring is full or \p true otherwise. + */ + template + typename std::enable_if< std::is_constructible::value, bool>::type + emplace( Args&&... args ) + { + counter_type back = back_.load( memory_model::memory_order_relaxed ); + + assert( static_cast( back - pfront_ ) <= capacity()); + + if ( pfront_ + capacity() - back < 1 ) { + pfront_ = front_.load( memory_model::memory_order_acquire ); + + if ( pfront_ + capacity() - back < 1 ) { + // not enough space + return false; + } + } + + new( &buffer_[buffer_.mod( back )] ) value_type( std::forward(args)... ); + + back_.store( back + 1, memory_model::memory_order_release ); + + return true; + } + + /// Enqueues data to the ring using a functor + /** + \p Func is a functor called to copy a value to the ring element. + The functor \p f takes one argument - a reference to a empty cell of type \ref value_type : + \code + cds::container::WeakRingBuffer< Foo > myRing; + Bar bar; + myRing.enqueue_with( [&bar]( Foo& dest ) { dest = std::move(bar); } ); + \endcode + */ + template + bool enqueue_with( Func f ) + { + counter_type back = back_.load( memory_model::memory_order_relaxed ); + + assert( static_cast( back - pfront_ ) <= capacity()); + + if ( pfront_ + capacity() - back < 1 ) { + pfront_ = front_.load( memory_model::memory_order_acquire ); + + if ( pfront_ + capacity() - back < 1 ) { + // not enough space + return false; + } + } + + f( buffer_[buffer_.mod( back )] ); + + back_.store( back + 1, memory_model::memory_order_release ); + + return true; + + } + + /// Enqueues \p val value into the queue. + /** + The new queue item is created by calling placement new in free cell. + Returns \p true if success, \p false if the ring is full. + */ + bool enqueue( value_type const& val ) + { + return emplace( val ); + } + + /// Enqueues \p val value into the queue, move semantics + bool enqueue( value_type&& val ) + { + return emplace( std::move( val )); + } + + /// Synonym for \p enqueue( value_type const& ) + bool push( value_type const& val ) + { + return enqueue( val ); + } + + /// Synonym for \p enqueue( value_type&& ) + bool push( value_type&& val ) + { + return enqueue( std::move( val )); + } + + /// Synonym for \p enqueue_with() + template + bool push_with( Func f ) + { + return enqueue_with( f ); + } + + /// Batch pop \p count element from the ring buffer into \p arr + /** + \p CopyFunc is a per-element copy functor: for each element of \p arr + copy( arr[i], source ) is called. + The \p CopyFunc signature: + \code + void copy_func( Q& dest, value_type& elemen ); + \endcode + + Returns \p true if success or \p false if not enough space in the ring + */ + template + bool pop( Q* arr, size_t count, CopyFunc copy ) + { + assert( count < capacity()); + + counter_type front = front_.load( memory_model::memory_order_relaxed ); + assert( static_cast( cback_ - front ) < capacity()); + + if ( static_cast( cback_ - front ) < count ) { + cback_ = back_.load( memory_model::memory_order_acquire ); + if ( static_cast( cback_ - front ) < count ) + return false; + } + + // copy data + value_cleaner cleaner; + for ( size_t i = 0; i < count; ++i, ++front ) { + value_type& val = buffer_[buffer_.mod( front )]; + copy( arr[i], val ); + cleaner( val ); + } + + front_.store( front, memory_model::memory_order_release ); + return true; + } + + /// Batch pop - push array \p arr of size \p count with assignment as copy functor + /** + This function is equivalent for: + \code + pop( arr, count, []( Q& dest, value_type& src ) { dest = src; } ); + \endcode + + The function is available only if std::is_assignable::value + is \p true. + + Returns \p true if success or \p false if not enough space in the ring + */ + template + typename std::enable_if< std::is_assignable::value, bool>::type + pop( Q* arr, size_t count ) + { + return pop( arr, count, []( Q& dest, value_type& src ) { dest = src; } ); + } + + /// Dequeues an element from the ring to \p val + /** + The function is available only if std::is_assignable::value + is \p true. + + Returns \p false if the ring is full or \p true otherwise. + */ + template + typename std::enable_if< std::is_assignable::value, bool>::type + dequeue( Q& val ) + { + return pop( &val, 1 ); + } + + /// Synonym for \p dequeue( Q& ) + template + typename std::enable_if< std::is_assignable::value, bool>::type + pop( Q& val ) + { + return dequeue( val ); + } + + /// Dequeues a value using a functor + /** + \p Func is a functor called to copy dequeued value. + The functor takes one argument - a reference to removed node: + \code + cds:container::WeakRingBuffer< Foo > myRing; + Bar bar; + myRing.dequeue_with( [&bar]( Foo& src ) { bar = std::move( src );}); + \endcode + + Returns \p true if the ring is not empty, \p false otherwise. + The functor is called only if the ring is not empty. + */ + template + bool dequeue_with( Func f ) + { + counter_type front = front_.load( memory_model::memory_order_relaxed ); + assert( static_cast( cback_ - front ) < capacity()); + + if ( cback_ - front < 1 ) { + cback_ = back_.load( memory_model::memory_order_acquire ); + if ( cback_ - front < 1 ) + return false; + } + + value_type& val = buffer_[buffer_.mod( front )]; + f( val ); + value_cleaner()( val ); + + front_.store( front + 1, memory_model::memory_order_release ); + return true; + } + + /// Synonym for \p dequeue_with() + template + bool pop_with( Func f ) + { + return dequeue_with( f ); + } + + /// Gets pointer to first element of ring buffer + /** + If the ring buffer is empty, returns \p nullptr + + The function is thread-safe since there is only one consumer. + Recall, \p WeakRingBuffer is single-producer/single consumer container. + */ + value_type* front() + { + counter_type front = front_.load( memory_model::memory_order_relaxed ); + assert( static_cast( cback_ - front ) < capacity()); + + if ( cback_ - front < 1 ) { + cback_ = back_.load( memory_model::memory_order_acquire ); + if ( cback_ - front < 1 ) + return nullptr; + } + + return &buffer_[buffer_.mod( front )]; + } + + /// Removes front element of ring-buffer + /** + If the ring-buffer is empty, returns \p false. + Otherwise, pops the first element from the ring. + */ + bool pop_front() + { + counter_type front = front_.load( memory_model::memory_order_relaxed ); + assert( static_cast( cback_ - front ) <= capacity()); + + if ( cback_ - front < 1 ) { + cback_ = back_.load( memory_model::memory_order_acquire ); + if ( cback_ - front < 1 ) + return false; + } + + // clean cell + value_cleaner()( buffer_[buffer_.mod( front )] ); + + front_.store( front + 1, memory_model::memory_order_release ); + return true; + } + + /// Clears the ring buffer (only consumer can call this function!) + void clear() + { + value_type v; + while ( pop( v )); + } + + /// Checks if the ring-buffer is empty + bool empty() const + { + return front_.load( memory_model::memory_order_relaxed ) == back_.load( memory_model::memory_order_relaxed ); + } + + /// Checks if the ring-buffer is full + bool full() const + { + return back_.load( memory_model::memory_order_relaxed ) - front_.load( memory_model::memory_order_relaxed ) >= capacity(); + } + + /// Returns the current size of ring buffer + size_t size() const + { + return static_cast( back_.load( memory_model::memory_order_relaxed ) - front_.load( memory_model::memory_order_relaxed )); + } + + /// Returns capacity of the ring buffer + size_t capacity() const + { + return buffer_.capacity(); + } + + private: + //@cond + atomics::atomic front_; + typename opt::details::apply_padding< atomics::atomic, traits::padding >::padding_type pad1_; + atomics::atomic back_; + typename opt::details::apply_padding< atomics::atomic, traits::padding >::padding_type pad2_; + counter_type pfront_; + typename opt::details::apply_padding< counter_type, traits::padding >::padding_type pad3_; + counter_type cback_; + typename opt::details::apply_padding< counter_type, traits::padding >::padding_type pad4_; + + buffer buffer_; + //@endcond + }; + + + /// Single-producer single-consumer ring buffer for untyped variable-sized data + /** @ingroup cds_nonintrusive_queue + @anchor cds_nonintrusive_WeakRingBuffer_void + + This SPSC ring-buffer is intended for data of variable size. The producer + allocates a buffer from ring, you fill it with data and pushes them back to ring. + The consumer thread reads data from front-end and then pops them: + \code + // allocates 1M ring buffer + WeakRingBuffer theRing( 1024 * 1024 ); + + void producer_thread() + { + // Get data of size N bytes + size_t size; + void* data; + + while ( true ) { + // Get external data + std::tie( data, size ) = get_data(); + + if ( data == nullptr ) + break; + + // Allocates a buffer from the ring + void* buf = theRing.back( size ); + if ( !buf ) { + std::cout << "The ring is full" << std::endl; + break; + } + + memcpy( buf, data, size ); + + // Push data into the ring + theRing.push_back(); + } + } + + void consumer_thread() + { + while ( true ) { + auto buf = theRing.front(); + + if ( buf.first == nullptr ) { + std::cout << "The ring is empty" << std::endl; + break; + } + + // Process data + process_data( buf.first, buf.second ); + + // Free buffer + theRing.pop_front(); + } + } + \endcode + + @warning: \p %WeakRingBuffer is developed for 64-bit architecture. + 32-bit platform must provide support for 64-bit atomics. + */ +#ifdef CDS_DOXYGEN_INVOKED + template +#else + template +#endif + class WeakRingBuffer: public cds::bounded_container + { + public: + typedef Traits traits; ///< Ring buffer traits + typedef typename traits::memory_model memory_model; ///< Memory ordering. See \p cds::opt::memory_model option + + private: + //@cond + typedef typename traits::buffer::template rebind< uint8_t >::other buffer; + typedef uint64_t counter_type; + //@endcond + + public: + /// Creates the ring buffer of \p capacity bytes + /** + For \p cds::opt::v::uninitialized_static_buffer the \p nCapacity parameter is ignored. + + If the buffer capacity is a power of two, lightweight binary arithmetics is used + instead of modulo arithmetics. + */ + WeakRingBuffer( size_t capacity = 0 ) + : front_( 0 ) + , pfront_( 0 ) + , cback_( 0 ) + , buffer_( capacity ) + { + back_.store( 0, memory_model::memory_order_release ); + } + + /// [producer] Reserve \p size bytes + /** + The function returns a pointer to reserved buffer of \p size bytes. + If no enough space in the ring buffer the function returns \p nullptr. + + After successful \p %back() you should fill the buffer provided and call \p push_back(): + \code + // allocates 1M ring buffer + WeakRingBuffer theRing( 1024 * 1024 ); + + void producer_thread() + { + // Get data of size N bytes + size_t size;1 + void* data; + + while ( true ) { + // Get external data + std::tie( data, size ) = get_data(); + + if ( data == nullptr ) + break; + + // Allocates a buffer from the ring + void* buf = theRing.back( size ); + if ( !buf ) { + std::cout << "The ring is full" << std::endl; + break; + } + + memcpy( buf, data, size ); + + // Push data into the ring + theRing.push_back(); + } + } + \endcode + */ + void* back( size_t size ) + { + assert( size > 0 ); + + // Any data is rounded to 8-byte boundary + size_t real_size = calc_real_size( size ); + + // check if we can reserve read_size bytes + assert( real_size < capacity()); + counter_type back = back_.load( memory_model::memory_order_relaxed ); + + assert( static_cast( back - pfront_ ) <= capacity()); + + if ( static_cast( pfront_ + capacity() - back ) < real_size ) { + pfront_ = front_.load( memory_model::memory_order_acquire ); + + if ( static_cast( pfront_ + capacity() - back ) < real_size ) { + // not enough space + return nullptr; + } + } + + uint8_t* reserved = buffer_.buffer() + buffer_.mod( back ); + + // Check if the buffer free space is enough for storing real_size bytes + size_t tail_size = capacity() - static_cast( buffer_.mod( back )); + if ( tail_size < real_size ) { + // make unused tail + assert( tail_size >= sizeof( size_t )); + assert( !is_tail( tail_size )); + + *reinterpret_cast( reserved ) = make_tail( tail_size - sizeof(size_t)); + back += tail_size; + + // We must be in beginning of buffer + assert( buffer_.mod( back ) == 0 ); + + if ( static_cast( pfront_ + capacity() - back ) < real_size ) { + pfront_ = front_.load( memory_model::memory_order_acquire ); + + if ( static_cast( pfront_ + capacity() - back ) < real_size ) { + // not enough space + return nullptr; + } + } + + back_.store( back, memory_model::memory_order_release ); + reserved = buffer_.buffer(); + } + + // reserve and store size + *reinterpret_cast( reserved ) = size; + + return reinterpret_cast( reserved + sizeof( size_t )); + } + + /// [producer] Push reserved bytes into ring + /** + The function pushes reserved buffer into the ring. Afte this call, + the buffer becomes visible by a consumer: + \code + // allocates 1M ring buffer + WeakRingBuffer theRing( 1024 * 1024 ); + + void producer_thread() + { + // Get data of size N bytes + size_t size;1 + void* data; + + while ( true ) { + // Get external data + std::tie( data, size ) = get_data(); + + if ( data == nullptr ) + break; + + // Allocates a buffer from the ring + void* buf = theRing.back( size ); + if ( !buf ) { + std::cout << "The ring is full" << std::endl; + break; + } + + memcpy( buf, data, size ); + + // Push data into the ring + theRing.push_back(); + } + } + \endcode + */ + void push_back() + { + counter_type back = back_.load( memory_model::memory_order_relaxed ); + uint8_t* reserved = buffer_.buffer() + buffer_.mod( back ); + + size_t real_size = calc_real_size( *reinterpret_cast( reserved )); + assert( real_size < capacity()); + + back_.store( back + real_size, memory_model::memory_order_release ); + } + + /// [producer] Push \p data of \p size bytes into ring + /** + This function invokes \p back( size ), \p memcpy( buf, data, size ) + and \p push_back() in one call. + */ + bool push_back( void const* data, size_t size ) + { + void* buf = back( size ); + if ( buf ) { + memcpy( buf, data, size ); + push_back(); + return true; + } + return false; + } + + /// [consumer] Get top data from the ring + /** + If the ring is empty, the function returns \p nullptr in \p std:pair::first. + */ + std::pair front() + { + counter_type front = front_.load( memory_model::memory_order_relaxed ); + assert( static_cast( cback_ - front ) < capacity()); + + if ( cback_ - front < sizeof( size_t )) { + cback_ = back_.load( memory_model::memory_order_acquire ); + if ( cback_ - front < sizeof( size_t )) + return std::make_pair( nullptr, 0u ); + } + + uint8_t * buf = buffer_.buffer() + buffer_.mod( front ); + + // check alignment + assert( ( reinterpret_cast( buf ) & ( sizeof( uintptr_t ) - 1 )) == 0 ); + + size_t size = *reinterpret_cast( buf ); + if ( is_tail( size )) { + // unused tail, skip + CDS_VERIFY( pop_front()); + + front = front_.load( memory_model::memory_order_relaxed ); + + if ( cback_ - front < sizeof( size_t )) { + cback_ = back_.load( memory_model::memory_order_acquire ); + if ( cback_ - front < sizeof( size_t ) ) + return std::make_pair( nullptr, 0u ); + } + + buf = buffer_.buffer() + buffer_.mod( front ); + size = *reinterpret_cast( buf ); + + assert( !is_tail( size )); + assert( buf == buffer_.buffer()); + } + +#ifdef _DEBUG + size_t real_size = calc_real_size( size ); + if ( static_cast( cback_ - front ) < real_size ) { + cback_ = back_.load( memory_model::memory_order_acquire ); + assert( static_cast( cback_ - front ) >= real_size ); + } +#endif + + return std::make_pair( reinterpret_cast( buf + sizeof( size_t )), size ); + } + + /// [consumer] Pops top data + /** + Typical consumer workloop: + \code + // allocates 1M ring buffer + WeakRingBuffer theRing( 1024 * 1024 ); + + void consumer_thread() + { + while ( true ) { + auto buf = theRing.front(); + + if ( buf.first == nullptr ) { + std::cout << "The ring is empty" << std::endl; + break; + } + + // Process data + process_data( buf.first, buf.second ); + + // Free buffer + theRing.pop_front(); + } + } + \endcode + */ + bool pop_front() + { + counter_type front = front_.load( memory_model::memory_order_relaxed ); + assert( static_cast( cback_ - front ) <= capacity()); + + if ( cback_ - front < sizeof(size_t)) { + cback_ = back_.load( memory_model::memory_order_acquire ); + if ( cback_ - front < sizeof( size_t )) + return false; + } + + uint8_t * buf = buffer_.buffer() + buffer_.mod( front ); + + // check alignment + assert( ( reinterpret_cast( buf ) & ( sizeof( uintptr_t ) - 1 )) == 0 ); + + size_t size = *reinterpret_cast( buf ); + size_t real_size = calc_real_size( untail( size )); + +#ifdef _DEBUG + if ( static_cast( cback_ - front ) < real_size ) { + cback_ = back_.load( memory_model::memory_order_acquire ); + assert( static_cast( cback_ - front ) >= real_size ); + } +#endif + + front_.store( front + real_size, memory_model::memory_order_release ); + return true; + } + + /// [consumer] Clears the ring buffer + void clear() + { + for ( auto el = front(); el.first; el = front()) + pop_front(); + } + + /// Checks if the ring-buffer is empty + bool empty() const + { + return front_.load( memory_model::memory_order_relaxed ) == back_.load( memory_model::memory_order_relaxed ); + } + + /// Checks if the ring-buffer is full + bool full() const + { + return back_.load( memory_model::memory_order_relaxed ) - front_.load( memory_model::memory_order_relaxed ) >= capacity(); + } + + /// Returns the current size of ring buffer + size_t size() const + { + return static_cast( back_.load( memory_model::memory_order_relaxed ) - front_.load( memory_model::memory_order_relaxed )); + } + + /// Returns capacity of the ring buffer + size_t capacity() const + { + return buffer_.capacity(); + } + + private: + //@cond + static size_t calc_real_size( size_t size ) + { + size_t real_size = (( size + sizeof( uintptr_t ) - 1 ) & ~( sizeof( uintptr_t ) - 1 )) + sizeof( size_t ); + + assert( real_size > size ); + assert( real_size - size >= sizeof( size_t )); + + return real_size; + } + + static bool is_tail( size_t size ) + { + return ( size & ( size_t( 1 ) << ( sizeof( size_t ) * 8 - 1 ))) != 0; + } + + static size_t make_tail( size_t size ) + { + return size | ( size_t( 1 ) << ( sizeof( size_t ) * 8 - 1 )); + } + + static size_t untail( size_t size ) + { + return size & (( size_t( 1 ) << ( sizeof( size_t ) * 8 - 1 )) - 1); + } + //@endcond + + private: + //@cond + atomics::atomic front_; + typename opt::details::apply_padding< atomics::atomic, traits::padding >::padding_type pad1_; + atomics::atomic back_; + typename opt::details::apply_padding< atomics::atomic, traits::padding >::padding_type pad2_; + counter_type pfront_; + typename opt::details::apply_padding< counter_type, traits::padding >::padding_type pad3_; + counter_type cback_; + typename opt::details::apply_padding< counter_type, traits::padding >::padding_type pad4_; + + buffer buffer_; + //@endcond + }; + +}} // namespace cds::container + + +#endif // #ifndef CDSLIB_CONTAINER_WEAK_RINGBUFFER_H diff --git a/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/details/aligned_allocator.h b/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/details/aligned_allocator.h new file mode 100644 index 0000000..1fd3c49 --- /dev/null +++ b/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/details/aligned_allocator.h @@ -0,0 +1,126 @@ +/* + This file is a part of libcds - Concurrent Data Structures library + + (C) Copyright Maxim Khizhinsky (libcds.dev@gmail.com) 2006-2017 + + Source code repo: http://github.com/khizmax/libcds/ + Download: http://sourceforge.net/projects/libcds/files/ + + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. + + * Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ + +#ifndef CDSLIB_DETAILS_ALIGNED_ALLOCATOR_H +#define CDSLIB_DETAILS_ALIGNED_ALLOCATOR_H + +#include +#include + +namespace cds { namespace details { + + /// Allocator for aligned data + /** + The class is the wrapper around user-defined aligned allocator. + Template parameters: + \li \p T is a type to allocate + \li \p ALIGNED_ALLOCATOR is an aligned allocator implementation. Default implementation is defined by macro + CDS_DEFAULT_ALIGNED_ALLOCATOR from cds/user_setup/allocator.h header file. + + The \p nAlign parameter of member function specifyes desired aligment of data allocated. + + \par Note + When an array allocation is performed the allocator guarantees the alignment for first element of array only. + To guarantee the alignment for each element of the array the size of type \p T must be multiple of \p nAlign: + \code + sizeof(T) % nAlign == 0 + \endcode + */ + template < + typename T + , typename ALIGNED_ALLOCATOR = CDS_DEFAULT_ALIGNED_ALLOCATOR + > + class AlignedAllocator: public ALIGNED_ALLOCATOR::template rebind::other + { + public: + /// Underlying aligned allocator type + typedef typename ALIGNED_ALLOCATOR::template rebind::other allocator_type; + + /// Analogue of operator new T(\p src... ) + template + T * New( size_t nAlign, const S&... src ) + { + return Construct( allocator_type::allocate( nAlign, 1), src... ); + } + + /// Analogue of operator new T[\p nCount ] + T * NewArray( size_t nAlign, size_t nCount ) + { + T * p = allocator_type::allocate( nAlign, nCount ); + for ( size_t i = 0; i < nCount; ++i ) + Construct( p + i ); + return p; + } + + /// Analogue of operator new T[\p nCount ]. + /** + Each item of array of type T is initialized by parameter \p src. + */ + template + T * NewArray( size_t nAlign, size_t nCount, const S& src ) + { + T * p = allocator_type::allocate( nAlign, nCount ); + for ( size_t i = 0; i < nCount; ++i ) + Construct( p + i, src ); + return p; + } + + /// Analogue of operator delete + void Delete( T * p ) + { + allocator_type::destroy( p ); + allocator_type::deallocate( p, 1 ); + } + + /// Analogue of operator delete [] + void Delete( T * p, size_t nCount ) + { + for ( size_t i = 0; i < nCount; ++i ) + allocator_type::destroy( p + i ); + allocator_type::deallocate( p, nCount ); + } + + /// Analogue of placement operator new( \p p ) T( \p src... ) + template + T * Construct( void * p, const S&... src ) + { + return new( p ) T( src... ); + } + + /// Rebinds allocator to other type \p Q instead of \p T + template + struct rebind { + typedef AlignedAllocator< Q, typename ALIGNED_ALLOCATOR::template rebind::other > other ; ///< Rebinding result + }; + }; + +}} // namespace cds::details + +#endif // #ifndef CDSLIB_DETAILS_ALIGNED_ALLOCATOR_H diff --git a/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/details/aligned_type.h b/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/details/aligned_type.h new file mode 100644 index 0000000..1185618 --- /dev/null +++ b/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/details/aligned_type.h @@ -0,0 +1,108 @@ +/* + This file is a part of libcds - Concurrent Data Structures library + + (C) Copyright Maxim Khizhinsky (libcds.dev@gmail.com) 2006-2017 + + Source code repo: http://github.com/khizmax/libcds/ + Download: http://sourceforge.net/projects/libcds/files/ + + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. + + * Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ + +#ifndef CDSLIB_DETAILS_ALIGNED_TYPE_H +#define CDSLIB_DETAILS_ALIGNED_TYPE_H + +#include + +namespace cds { namespace details { + + /// Aligned type + /** + This meta-algorithm solves compiler problem when you need to declare a type \p T with alignment + equal to another type alignment. For example, the following declaration produces an error in Microsoft Visual Studio 2008 compiler: + \code + typedef double my_double; + typedef __declspec(align( __alignof(my_double))) int aligned_int; + \endcode + In MS VS, the __declspec(align(N)) construction requires that N must be a integer constant (1, 2, 4 and so on) + but not an integer constant expression. + + The result of this meta-algo is a type \p aligned_type::type that is \p T aligned by \p Alignment. + For example, with \p aligned_type the prevoius example will not generate an error: + \code + typedef double my_double; + typedef typename cds::details::aligned_type::type aligned_int; + \endcode + and result of this declaration is equivalent to + \code + typedef __declspec(align(8)) int aligned_int; + \endcode + + The \p Alignment template parameter must be a constant expression and its result must be power of two. + The maximum of its value is 1024. + + See also \ref align_as + */ + template + struct aligned_type +#ifdef CDS_DOXYGEN_INVOKED + {} +#endif +; + + //@cond none +# define CDS_ALIGNED_TYPE_impl(nAlign) template struct aligned_type { typedef CDS_TYPE_ALIGNMENT(nAlign) T type; } + CDS_ALIGNED_TYPE_impl(1); + CDS_ALIGNED_TYPE_impl(2); + CDS_ALIGNED_TYPE_impl(4); + CDS_ALIGNED_TYPE_impl(8); + CDS_ALIGNED_TYPE_impl(16); + CDS_ALIGNED_TYPE_impl(32); + CDS_ALIGNED_TYPE_impl(64); + CDS_ALIGNED_TYPE_impl(128); + CDS_ALIGNED_TYPE_impl(256); + CDS_ALIGNED_TYPE_impl(512); + CDS_ALIGNED_TYPE_impl(1024); +# undef CDS_ALIGNED_TYPE_impl + //@endcond + + /** Alignment by example + + This meta-algo is similar to \ref aligned_type . + + For example, the following code + \code + typedef typename cds::details::align_as::type aligned_int; + \endcode + declares type \p aligned_int which is \p int aligned like \p double. + + See also: \ref aligned_type + */ + template + struct align_as { + /// Result of meta-algo: type \p T aligned like type \p AlignAs + typedef typename aligned_type::type type; + }; + +}} // namespace cds::details + +#endif // #ifndef CDSLIB_DETAILS_ALIGNED_TYPE_H diff --git a/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/details/allocator.h b/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/details/allocator.h new file mode 100644 index 0000000..c02d0e6 --- /dev/null +++ b/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/details/allocator.h @@ -0,0 +1,207 @@ +/* + This file is a part of libcds - Concurrent Data Structures library + + (C) Copyright Maxim Khizhinsky (libcds.dev@gmail.com) 2006-2017 + + Source code repo: http://github.com/khizmax/libcds/ + Download: http://sourceforge.net/projects/libcds/files/ + + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. + + * Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ + +#ifndef CDSLIB_DETAILS_ALLOCATOR_H +#define CDSLIB_DETAILS_ALLOCATOR_H + +#include +#include +#include +#include + +namespace cds { + namespace details { + + /// Extends \p std::allocator interface to provide semantics like operator \p new and \p delete + /** + The class is the wrapper around underlying \p Alloc class. + \p Alloc provides the \p std::allocator interface. + */ + template + class Allocator + : public std::conditional< + std::is_same< T, typename Alloc::value_type>::value + , Alloc + , typename Alloc::template rebind::other + >::type + { + public: + /// Underlying allocator type + typedef typename std::conditional< + std::is_same< T, typename Alloc::value_type>::value + , Alloc + , typename Alloc::template rebind::other + >::type allocator_type; + + /// \p true if underlined allocator is \p std::allocator, \p false otherwise + static constexpr bool const c_bStdAllocator = std::is_same< allocator_type, std::allocator>::value; + + /// Element type + typedef T value_type; + + /// Analogue of operator new T(\p src... ) + template + value_type * New( S const&... src ) + { + return Construct( allocator_type::allocate( 1, nullptr ), src... ); + } + + /// Analogue of operator new T( std::forward(args)... ) (move semantics) + template + value_type * MoveNew( Args&&... args ) + { + return MoveConstruct( allocator_type::allocate( 1, nullptr ), std::forward(args)... ); + } + + /// Analogue of operator new T[\p nCount ] + value_type * NewArray( size_t nCount ) + { + value_type * p = allocator_type::allocate( nCount, nullptr ); + for ( size_t i = 0; i < nCount; ++i ) + Construct( p + i ); + return p; + } + + /// Analogue of operator new T[\p nCount ]. + /** + Each item of array of type T is initialized by parameter \p src: T( src ) + */ + template + value_type * NewArray( size_t nCount, S const& src ) + { + value_type * p = allocator_type::allocate( nCount, nullptr ); + for ( size_t i = 0; i < nCount; ++i ) + Construct( p + i, src ); + return p; + } + +# if CDS_COMPILER == CDS_COMPILER_INTEL + //@cond + value_type * NewBlock( size_t nSize ) + { + return Construct( heap_alloc( nSize )); + } + //@endcond +# endif + /// Allocates block of memory of size at least \p nSize bytes. + /** + Internally, the block is allocated as an array of \p void* pointers, + then \p Construct() method is called to initialize \p T. + + Precondition: nSize >= sizeof(T) + */ + template + value_type * NewBlock( size_t nSize, S const&... src ) + { + return Construct( heap_alloc( nSize ), src... ); + } + + /// Analogue of operator delete + void Delete( value_type * p ) + { + allocator_type::destroy( p ); + allocator_type::deallocate( p, 1 ); + } + + /// Analogue of operator delete [] + void Delete( value_type * p, size_t nCount ) + { + for ( size_t i = 0; i < nCount; ++i ) + allocator_type::destroy( p + i ); + allocator_type::deallocate( p, nCount ); + } + +# if CDS_COMPILER == CDS_COMPILER_INTEL + //@cond + value_type * Construct( void * p ) + { + return new( p ) value_type; + } + //@endcond +# endif + /// Analogue of placement operator new( \p p ) T( src... ) + template + value_type * Construct( void * p, S const&... src ) + { + value_type * pv = new( p ) value_type( src... ); + return pv; + } + + /// Analogue of placement operator new( p ) T( std::forward(args)... ) + template + value_type * MoveConstruct( void * p, Args&&... args ) + { + value_type * pv = new( p ) value_type( std::forward(args)... ); + return pv; + } + + /// Rebinds allocator to other type \p Q instead of \p T + template + struct rebind { + typedef Allocator< Q, typename Alloc::template rebind::other > other ; ///< Rebinding result + }; + + private: + //@cond + void * heap_alloc( size_t nByteSize ) + { + assert( nByteSize >= sizeof(value_type)); + + size_t const nPtrSize = ( nByteSize + sizeof(void *) - 1 ) / sizeof(void *); + typedef typename allocator_type::template rebind< void * >::other void_allocator; + return void_allocator().allocate( nPtrSize ); + } + //@endcond + }; + + /// Deferral removing of the object of type \p T. Helper class + template + struct deferral_deleter { + typedef T type ; ///< Type + typedef Alloc allocator_type ; ///< Allocator for removing + + /// Frees the object \p p + /** + Caveats: this function uses temporary object of type \ref cds::details::Allocator to free the node \p p. + So, the node allocator should be stateless. It is standard requirement for \p std::allocator class objects. + + Do not use this function directly. + */ + static void free( T * p ) + { + Allocator a; + a.Delete( p ); + } + }; + + } // namespace details +} // namespace cds + +#endif // #ifndef CDSLIB_DETAILS_ALLOCATOR_H diff --git a/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/details/binary_functor_wrapper.h b/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/details/binary_functor_wrapper.h new file mode 100644 index 0000000..568f0d9 --- /dev/null +++ b/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/details/binary_functor_wrapper.h @@ -0,0 +1,95 @@ +/* + This file is a part of libcds - Concurrent Data Structures library + + (C) Copyright Maxim Khizhinsky (libcds.dev@gmail.com) 2006-2017 + + Source code repo: http://github.com/khizmax/libcds/ + Download: http://sourceforge.net/projects/libcds/files/ + + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. + + * Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ + +#ifndef CDSLIB_DETAILS_BINARY_FUNCTOR_WRAPPER_H +#define CDSLIB_DETAILS_BINARY_FUNCTOR_WRAPPER_H + +#include + +//@cond +namespace cds { namespace details { + + template + struct binary_functor_wrapper { + typedef ReturnType return_type; + typedef Functor functor_type; + typedef ArgType argument_type; + typedef Accessor accessor; + + return_type operator()( argument_type const& a1, argument_type const& a2 ) const + { + return functor_type()( accessor()( a1 ), accessor()( a2 )); + } + + template + return_type operator()( argument_type const& a, Q const& q ) const + { + return functor_type()( accessor()(a), q ); + } + + template + return_type operator()( Q const& q, argument_type const& a ) const + { + return functor_type()( q, accessor()(a)); + } + + template + return_type operator()( Q1 const& q1, Q2 const& q2 ) const + { + return functor_type()( q1, q2 ); + } + }; + + struct trivial_accessor + { + template + T const& operator()( T const& p ) const + { + return p; + } + + template + T& operator()( T& p ) const + { + return p; + } + }; + + template + using predicate_wrapper = binary_functor_wrapper< bool, Predicate, ArgType, Accessor>; + + template + using compare_wrapper = binary_functor_wrapper< int, Compare, ArgType, Accessor>; + +}} // namespace cds::details + +//@endcond + +#endif // #ifndef CDSLIB_DETAILS_BINARY_FUNCTOR_WRAPPER_H diff --git a/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/details/bit_reverse_counter.h b/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/details/bit_reverse_counter.h new file mode 100644 index 0000000..86c9406 --- /dev/null +++ b/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/details/bit_reverse_counter.h @@ -0,0 +1,107 @@ +/* + This file is a part of libcds - Concurrent Data Structures library + + (C) Copyright Maxim Khizhinsky (libcds.dev@gmail.com) 2006-2017 + + Source code repo: http://github.com/khizmax/libcds/ + Download: http://sourceforge.net/projects/libcds/files/ + + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. + + * Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ + +#ifndef CDSLIB_DETAILS_BIT_REVERSE_COUNTER_H +#define CDSLIB_DETAILS_BIT_REVERSE_COUNTER_H + +#include + +//@cond +namespace cds { namespace bitop { + + template + class bit_reverse_counter + { + public: + typedef Counter counter_type; + + private: + counter_type m_nCounter; + counter_type m_nReversed; + int m_nHighBit; + + public: + bit_reverse_counter() + : m_nCounter(0) + , m_nReversed(0) + , m_nHighBit(-1) + {} + + counter_type inc() + { + ++m_nCounter; + int nBit; + for ( nBit = m_nHighBit - 1; nBit >= 0; --nBit ) { + if ( !cds::bitop::complement( m_nReversed, nBit )) + break; + } + if ( nBit < 0 ) { + m_nReversed = m_nCounter; + ++m_nHighBit; + } + return m_nReversed; + } + + counter_type dec() + { + counter_type ret = m_nReversed; + --m_nCounter; + int nBit; + for ( nBit = m_nHighBit - 1; nBit >= 0; --nBit ) { + if ( cds::bitop::complement( m_nReversed, nBit )) + break; + } + if ( nBit < 0 ) { + m_nReversed = m_nCounter; + --m_nHighBit; + } + return ret; + } + + counter_type value() const + { + return m_nCounter; + } + + counter_type reversed_value() const + { + return m_nReversed; + } + + int high_bit() const + { + return m_nHighBit; + } + }; + +}} // namespace cds::bitop +//@endcond + +#endif // #ifndef CDSLIB_DETAILS_BIT_REVERSE_COUNTER_H diff --git a/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/details/bitop_generic.h b/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/details/bitop_generic.h new file mode 100644 index 0000000..a3cac9b --- /dev/null +++ b/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/details/bitop_generic.h @@ -0,0 +1,300 @@ +/* + This file is a part of libcds - Concurrent Data Structures library + + (C) Copyright Maxim Khizhinsky (libcds.dev@gmail.com) 2006-2017 + + Source code repo: http://github.com/khizmax/libcds/ + Download: http://sourceforge.net/projects/libcds/files/ + + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. + + * Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ + +#ifndef CDSLIB_DETAILS_BITOP_GENERIC_H +#define CDSLIB_DETAILS_BITOP_GENERIC_H + +#include // rand() + +namespace cds { + namespace bitop { namespace platform { + // Return true if x = 2 ** k, k >= 0 +#ifndef cds_bitop_isPow2_32_DEFINED + static inline bool isPow2_32( uint32_t x ) + { + return (x & ( x - 1 )) == 0 && x; + } +#endif + +#ifndef cds_bitop_isPow2_64_DEFINED + static inline bool isPow2_64( uint64_t x ) + { + return (x & ( x - 1 )) == 0 && x; + } +#endif + + //*************************************************** + // Most significant bit number (1..N) + // Return 0 if x == 0 + // +#ifndef cds_bitop_msb32_DEFINED + // Return number (1..32) of most significant bit + // Return 0 if x == 0 + // Source: Linux kernel + static inline int msb32( uint32_t x ) + { + int r = 32; + + if (!x) + return 0; + if (!(x & 0xffff0000u)) { + x <<= 16; + r -= 16; + } + if (!(x & 0xff000000u)) { + x <<= 8; + r -= 8; + } + if (!(x & 0xf0000000u)) { + x <<= 4; + r -= 4; + } + if (!(x & 0xc0000000u)) { + x <<= 2; + r -= 2; + } + if (!(x & 0x80000000u)) { + //x <<= 1; + r -= 1; + } + return r; + } +#endif + +#ifndef cds_bitop_msb32nz_DEFINED + static inline int msb32nz( uint32_t x ) + { + return msb32( x ) - 1; + } +#endif + +#ifndef cds_bitop_msb64_DEFINED + static inline int msb64( uint64_t x ) + { + uint32_t h = (uint32_t) (x >> 32); + if ( h ) + return msb32( h ) + 32; + return msb32( (uint32_t) x ); + } +#endif + +#ifndef cds_bitop_msb64nz_DEFINED + static inline int msb64nz( uint64_t x ) + { + return msb64( x ) - 1; + } +#endif + + //*************************************************** + // Least significant bit number (1..N) + // Return 0 if x == 0 + // +#ifndef cds_bitop_lsb32_DEFINED + // Return number (1..32) of least significant bit + // Return 0 if x == 0 + // Source: Linux kernel + static inline int lsb32( uint32_t x ) + { + int r = 1; + + if (!x) + return 0; + if (!(x & 0xffff)) { + x >>= 16; + r += 16; + } + if (!(x & 0xff)) { + x >>= 8; + r += 8; + } + if (!(x & 0xf)) { + x >>= 4; + r += 4; + } + if (!(x & 3)) { + x >>= 2; + r += 2; + } + if (!(x & 1)) { + //x >>= 1; + r += 1; + } + return r; + } +#endif + +#ifndef cds_bitop_lsb32nz_DEFINED + static inline int lsb32nz( uint32_t x ) + { + return lsb32( x ) - 1; + } +#endif + +#ifndef cds_bitop_lsb64_DEFINED + static inline int lsb64( uint64_t x ) + { + if ( !x ) + return 0; + if ( x & 0xffffffffu ) + return lsb32( (uint32_t) x ); + return lsb32( (uint32_t) (x >> 32)) + 32; + } +#endif + +#ifndef cds_bitop_lsb64nz_DEFINED + static inline int lsb64nz( uint64_t x ) + { + return lsb64( x ) - 1; + } +#endif + + //****************************************************** + // Reverse bit order + //****************************************************** +#ifndef cds_bitop_rbo32_DEFINED + static inline uint32_t rbo32( uint32_t x ) + { + // swap odd and even bits + x = ((x >> 1) & 0x55555555) | ((x & 0x55555555) << 1); + // swap consecutive pairs + x = ((x >> 2) & 0x33333333) | ((x & 0x33333333) << 2); + // swap nibbles ... + x = ((x >> 4) & 0x0F0F0F0F) | ((x & 0x0F0F0F0F) << 4); + // swap bytes + x = ((x >> 8) & 0x00FF00FF) | ((x & 0x00FF00FF) << 8); + // swap 2-byte long pairs + return ( x >> 16 ) | ( x << 16 ); + } +#endif + +#ifndef cds_bitop_rbo64_DEFINED + static inline uint64_t rbo64( uint64_t x ) + { + // Low 32bit Hight 32bit + return ( static_cast(rbo32( (uint32_t) x )) << 32 ) | ( static_cast( rbo32( (uint32_t) (x >> 32)))); + } +#endif + + //****************************************************** + // Set bit count. Return count of non-zero bits in word + //****************************************************** +#ifndef cds_bitop_sbc32_DEFINED + static inline int sbc32( uint32_t x ) + { +# ifdef cds_beans_zbc32_DEFINED + return 32 - zbc32( x ); +# else + // Algorithm from Sean Eron Anderson's great collection + x = x - ((x >> 1) & 0x55555555); + x = (x & 0x33333333) + ((x >> 2) & 0x33333333); + return (((x + (x >> 4)) & 0xF0F0F0F) * 0x1010101) >> 24; +# endif + } +#endif + +#ifndef cds_bitop_sbc64_DEFINED + static inline int sbc64( uint64_t x ) + { +# ifdef cds_beans_zbc64_DEFINED + return 64 - zbc64( x ); +# else + return sbc32( (uint32_t) (x >> 32)) + sbc32( (uint32_t) x ); +# endif + } +#endif + + //****************************************************** + // Zero bit count. Return count of zero bits in word + //****************************************************** +#ifndef cds_bitop_zbc32_DEFINED + static inline int zbc32( uint32_t x ) + { + return 32 - sbc32( x ); + } +#endif + +#ifndef cds_bitop_zbc64_DEFINED + static inline int zbc64( uint64_t x ) + { + return 64 - sbc64( x ); + } +#endif + + // Bit complement +#ifndef cds_bitop_complement32_DEFINED + static inline bool complement32( uint32_t * pArg, unsigned int nBit ) + { + assert( pArg ); + uint32_t nVal = *pArg & (1 << nBit); + *pArg ^= 1 << nBit; + return nVal != 0; + } +#endif + +#ifndef cds_bitop_complement64_DEFINED + static inline bool complement64( uint64_t * pArg, unsigned int nBit ) + { + assert( pArg ); + uint64_t nVal = *pArg & (uint64_t(1) << nBit); + *pArg ^= uint64_t(1) << nBit; + return nVal != 0; + } +#endif + + /* + Simple random number generator + Source: + [2003] George Marsaglia "Xorshift RNGs" + */ + static inline uint32_t RandXorShift32(uint32_t x) + { + //static uint32_t xRandom = 2463534242UL ; //rand() | 0x0100 ; // must be nonzero + //uint32_t x = xRandom; + if ( !x ) + x = (( std::rand() + 1) << 16 ) + std::rand() + 1; + x ^= x << 13; + x ^= x >> 15; + return x ^= x << 5; + } + + static inline uint64_t RandXorShift64(uint64_t x) + { + //static uint64_t xRandom = 88172645463325252LL; + //uint64_t x = xRandom; + if ( !x ) + x = 88172645463325252LL; + x ^= x << 13; + x ^= x >> 7; + return x ^= x << 17; + } + }} // namespace bitop::platform +} // namespace cds + +#endif // CDSLIB_DETAILS_BITOP_GENERIC_H diff --git a/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/details/bounded_array.h b/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/details/bounded_array.h new file mode 100644 index 0000000..40fd811 --- /dev/null +++ b/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/details/bounded_array.h @@ -0,0 +1,142 @@ +/* + This file is a part of libcds - Concurrent Data Structures library + + (C) Copyright Maxim Khizhinsky (libcds.dev@gmail.com) 2006-2017 + + Source code repo: http://github.com/khizmax/libcds/ + Download: http://sourceforge.net/projects/libcds/files/ + + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. + + * Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ + +#ifndef CDSLIB_IMPL_BOUNDED_ARRAY_H +#define CDSLIB_IMPL_BOUNDED_ARRAY_H + +/* + Dynamic non-growing array + + Editions: + 2008.03.08 Maxim.Khiszinsky Created +*/ + +#include +#include +#include + +//@cond +namespace cds { + namespace details { + /// Bounded dynamic array + /** + The class template is intended for storing fixed-size sequences of objects. + Array capacity is constant and cannot be changed after creation of object of the class. + It is suitable for managing objects of non-copyable type \p T. + + \par Template parameters + - \p T type of elements + - \p Allocator dynamic memory allocator class (std::allocator semantics) + + */ + template + class bounded_array + { + public: + typedef T value_type ; ///< value type stored in the array + typedef Allocator allocator_type ; ///< allocator type + + typedef value_type * iterator ; ///< item iterator + typedef value_type const * const_iterator ; ///< item const iterator + + private: + typedef cds::details::Allocator< T, allocator_type> allocator_impl; + + value_type * m_arr; + const size_t m_nCapacity; + + public: + /// Default ctor + explicit bounded_array( + size_t nCapacity ///< capacity + ) + : m_arr( allocator_impl().NewArray( nCapacity )) + , m_nCapacity( nCapacity ) + {} + + ~bounded_array() + { + allocator_impl().Delete( m_arr, capacity()); + } + + const value_type& operator []( size_t nItem ) const + { + assert( nItem < capacity()); + return m_arr[nItem]; + } + + value_type& operator []( size_t nItem ) + { + assert( nItem < capacity()); + return m_arr[nItem]; + } + + size_t size() const noexcept + { + return capacity(); + } + + size_t capacity() const noexcept + { + return m_nCapacity; + } + + /// Returns pointer to the first item in the array + value_type * top() noexcept + { + return m_arr; + } + + /// Get begin iterator + const_iterator begin() const noexcept + { + return m_arr; + } + iterator begin() noexcept + { + return m_arr; + } + + /// Get end iterator + const_iterator end() const noexcept + { + return begin() + capacity(); + } + iterator end() noexcept + { + return begin() + capacity(); + } + }; + + } // namespace details +} // namespace cds +//@endcond + +#endif // #ifndef CDSLIB_IMPL_BOUNDED_ARRAY_H diff --git a/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/details/bounded_container.h b/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/details/bounded_container.h new file mode 100644 index 0000000..1c39713 --- /dev/null +++ b/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/details/bounded_container.h @@ -0,0 +1,43 @@ +/* + This file is a part of libcds - Concurrent Data Structures library + + (C) Copyright Maxim Khizhinsky (libcds.dev@gmail.com) 2006-2017 + + Source code repo: http://github.com/khizmax/libcds/ + Download: http://sourceforge.net/projects/libcds/files/ + + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. + + * Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ + +#ifndef CDSLIB_BOUNDED_CONTAINER_H +#define CDSLIB_BOUNDED_CONTAINER_H + +namespace cds { + /// Bounded container + /** + If a container has upper limit of item then it should be based on bounded_container class. + Example of those containers: cyclic queue (\p cds::container::VyukovMPMCCycleQueue) + */ + struct bounded_container {}; +} // namespace cds + +#endif // CDSLIB_BOUNDED_CONTAINER_H diff --git a/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/details/defs.h b/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/details/defs.h new file mode 100644 index 0000000..2de6714 --- /dev/null +++ b/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/details/defs.h @@ -0,0 +1,388 @@ +/* + This file is a part of libcds - Concurrent Data Structures library + + (C) Copyright Maxim Khizhinsky (libcds.dev@gmail.com) 2006-2017 + + Source code repo: http://github.com/khizmax/libcds/ + Download: http://sourceforge.net/projects/libcds/files/ + + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. + + * Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ + +#ifndef CDSLIB_DEFS_H +#define CDSLIB_DEFS_H + +#include +#include +#include +#include +#include +#include +#include +#include + +#include + +/** \mainpage CDS: Concurrent Data Structures library + + This library is a collection of lock-free and lock-based fine-grained algorithms of data structures + like maps, queues, list etc. The library contains implementation of well-known data structures + and memory reclamation schemas for modern processor architectures. The library is written on C++11. + + The main namespace for the library is \ref cds. + To see the full list of container's class go to modules tab. + + Supported processor architectures and operating systems (OS) are: + - x86 [32bit] Linux, Windows, FreeBSD, MinGW + - amd64 (x86-64) [64bit] Linux, Windows, FreeBSD, MinGW + - ia64 (itanium) [64bit] Linux, HP-UX 11.23, HP-UX 11.31 + - sparc [64bit] Sun Solaris + - Mac OS X amd64 + - ppc64 Linux + + Supported compilers: + - GCC 4.8+ + - Clang 3.6+ + - MS Visual C++ 2015 and above + - Intel C++ Compiler 15 + + For each lock-free data structure the \p CDS library presents several implementation based on published papers. For + example, there are several implementations of queue, each of them is divided by memory reclamation + schema used. However, any implementation supports common interface for the type of data structure. + + To use any lock-free data structure, the following are needed: + - atomic operation library conforming with C++11 memory model. + The libcds can be built with \p std::atomic, \p boost::atomic or its own + @ref cds_cxx11_atomic "atomic implementation" + - safe memory reclamation (SMR) or garbage collecting (GC) algorithm. + + SMR is the main part of lock-free data structs. The SMR solves the problem of safe + memory reclamation that is one of the main problem for lock-free programming. + The library contains the implementations of several light-weight \ref cds_garbage_collector "memory reclamation schemes": + - M.Michael's Hazard Pointer - see \p cds::gc::HP, \p cds::gc::DHP for more explanation + - User-space Read-Copy Update (RCU) - see \p cds::urcu namespace + - there is an empty \p cds::gc::nogc "GC" for append-only containers that do not support item reclamation. + + Many GC requires a support from the thread. The library does not define the threading model you must use, + it is developed to support various ones; about incorporating cds library to your threading model see \p cds::threading. + + \anchor cds_how_to_use + \par How to use + + The main part of lock-free programming is SMR, so-called garbage collector, for safe memory reclamation. + The library provides several types of SMR schemes. One of widely used and well-tested is Hazard Pointer + memory reclamation schema discovered by M. Micheal and implemented in the library as \p cds::gc::HP class. + Usually, the application is based on only one type of GC. + + In the next example we mean that you use Hazard Pointer \p cds::gc::HP - based containers. + + First, in your code you should initialize \p cds library and Hazard Pointer in \p main() function: + \code + #include // for cds::Initialize and cds::Terminate + #include // for cds::HP (Hazard Pointer) SMR + + int main(int argc, char** argv) + { + // Initialize libcds + cds::Initialize(); + + { + // Initialize Hazard Pointer singleton + cds::gc::HP hpGC; + + // If main thread uses lock-free containers + // the main thread should be attached to libcds infrastructure + cds::threading::Manager::attachThread(); + + // Now you can use HP-based containers in the main thread + //... + } + + // Terminate libcds + cds::Terminate(); + } + \endcode + + Second, any of your thread should be attached to \p cds infrastructure. + \code + #include + + int myThreadEntryPoint(void *) + { + // Attach the thread to libcds infrastructure + cds::threading::Manager::attachThread(); + + // Now you can use HP-based containers in the thread + //... + + // Detach thread when terminating + cds::threading::Manager::detachThread(); + } + \endcode + + After that, you can use \p cds lock-free containers safely without any external synchronization. + + In some cases, you should work in an external thread. For example, your application + is a plug-in for a server that calls your code in a thread that has been created by the server. + In this case, you should use persistent mode of garbage collecting. In this mode, the thread attaches + to the GC singleton only if it is not attached yet and never call detaching: + \code + #include + + int plugin_entry_point() + { + // Attach the thread if it is not attached yet + if ( !cds::threading::Manager::isThreadAttached()) + cds::threading::Manager::attachThread(); + + // Do some work with HP-related containers + ... + } + \endcode + + + \par How to build + + The cds is mostly header-only library. Only small part of library related to GC core functionality + should be compiled. cds depends on C++ standard library only. + + Test suite depends on: + - \p boost.thread (thread-loal storage support), boost.system + - \p google-test + + Some parts of libcds may depend on DCAS (double-width compare-and-swap) atomic primitive if + the target architecture supports it. For x86, cmake build script enables -mcx16 compiler flag that + switches DCAS support on. You may manually disable DCAS support with the following command line flags + in GCC/clang (for MS VC++ compiler DCAS is not supported): + - \p -DCDS_DISABLE_128BIT_ATOMIC - for 64bit build + - \p -DCDS_DISABLE_64BIT_ATOMIC - for 32bit build + + @warning All your projects AND libcds MUST be compiled with the same flags - either with DCAS support or without it. + + \par Windows build + + Prerequisites: for building cds library and test suite you need: + - perl installed; \p PATH environment variable + should contain full path to Perl binary. Perl is used to generate large dictionary for testing purpose; + - boost library 1.51 and above. You should create environment variable + \p BOOST_PATH containing full path to \p boost root directory (for example, C:\\libs\\boost_1_57_0). + + Open solution file cds\projects\vc141\cds.sln with Microsoft VisualStudio 2017. + The solution contains \p cds project and a lot of test projects. Just build the library using solution. + + Warning: the solution depends on \p BOOST_PATH environment variable that specifies full path + to \p boost library root directory. The test projects search \p boost libraries in: + - for 32bit: \$(BOOST_PATH)/stage/lib, \$(BOOST_PATH)/stage32/lib, and \$(BOOST_PATH)/bin. + - for 64bit: \$(BOOST_PATH)/stage64/lib and \$(BOOST_PATH)/bin. + + All tests are based on googletest framework. The following environment variables specify + where to find gtest include and library directories: + - \p GTEST_ROOT - gtest root directory. \$(GTEST_ROOT)/include specifies full path to + gtest include files; + - \p GTEST_LIB64 - the path to 64bit gtest library dir; + - \p GTEST_LIB32 - the path to 32bit gtest library dir. + + \par *NIX build + + For Unix-like systems GCC and Clang compilers are supported. + Use GCC 4.8+ compiler or Clang 3.6+ to build cds library with CMake. + See accompanying file /build/cmake/readme.md for more info. +*/ + + +/// The main library namespace +namespace cds {} + +/* + \brief Basic typedefs and defines + + You do not need include this header directly. All library header files depends on defs.h and include it. + + Defines macros: + + CDS_COMPILER Compiler: + - CDS_COMPILER_MSVC Microsoft Visual C++ + - CDS_COMPILER_GCC GNU C++ + - CDS_COMPILER_CLANG clang + - CDS_COMPILER_UNKNOWN unknown compiler + + CDS_COMPILER__NAME Character compiler name + + CDS_COMPILER_VERSION Compliler version (number) + + CDS_BUILD_BITS Resulting binary code: + - 32 32bit + - 64 64bit + - -1 undefined + + CDS_POW2_BITS CDS_BUILD_BITS == 2**CDS_POW2_BITS + + CDS_PROCESSOR_ARCH The processor architecture: + - CDS_PROCESSOR_X86 Intel x86 (32bit) + - CDS_PROCESSOR_AMD64 Amd64, Intel x86-64 (64bit) + - CDS_PROCESSOR_IA64 Intel IA64 (Itanium) + - CDS_PROCESSOR_SPARC Sparc + - CDS_PROCESSOR_PPC64 PowerPC64 + - CDS_PROCESSOR_ARM7 ARM v7 + - CDS_PROCESSOR_ARM8 ARM v8 + - CDS_PROCESSOR_UNKNOWN undefined processor architecture + + CDS_PROCESSOR__NAME The name (string) of processor architecture + + CDS_OS_TYPE Operating system type: + - CDS_OS_UNKNOWN unknown OS + - CDS_OS_PTHREAD unknown OS with pthread + - CDS_OS_WIN32 Windows 32bit + - CDS_OS_WIN64 Windows 64bit + - CDS_OS_LINUX Linux + - CDS_OS_SUN_SOLARIS Sun Solaris + - CDS_OS_HPUX HP-UX + - CDS_OS_AIX IBM AIX + - CDS_OS_BSD FreeBSD, OpenBSD, NetBSD - common flag + - CDS_OS_FREE_BSD FreeBSD + - CDS_OS_OPEN_BSD OpenBSD + - CSD_OS_NET_BSD NetBSD + - CDS_OS_MINGW MinGW + - CDS_OS_OSX Apple OS X + + CDS_OS__NAME The name (string) of operating system type + + CDS_OS_INTERFACE OS interface: + - CDS_OSI_UNIX Unix (POSIX) + - CDS_OSI_WINDOWS Windows + + + CDS_BUILD_TYPE Build type: 'RELEASE' or 'DEBUG' string + +*/ + +#if defined(_DEBUG) || !defined(NDEBUG) +# define CDS_DEBUG +# define CDS_BUILD_TYPE "DEBUG" +#else +# define CDS_BUILD_TYPE "RELEASE" +#endif + +/// Unused function argument +#define CDS_UNUSED(x) (void)(x) + +// Supported compilers: +#define CDS_COMPILER_MSVC 1 +#define CDS_COMPILER_GCC 2 +#define CDS_COMPILER_INTEL 3 +#define CDS_COMPILER_CLANG 4 +#define CDS_COMPILER_UNKNOWN -1 + +// Supported processor architectures: +#define CDS_PROCESSOR_X86 1 +#define CDS_PROCESSOR_IA64 2 +#define CDS_PROCESSOR_SPARC 3 +#define CDS_PROCESSOR_AMD64 4 +#define CDS_PROCESSOR_PPC64 5 // PowerPC 64bit +#define CDS_PROCESSOR_ARM7 7 +#define CDS_PROCESSOR_ARM8 8 +#define CDS_PROCESSOR_UNKNOWN -1 + +// Supported OS interfaces +#define CDS_OSI_UNKNOWN 0 +#define CDS_OSI_UNIX 1 +#define CDS_OSI_WINDOWS 2 + +// Supported operating systems (value of CDS_OS_TYPE): +#define CDS_OS_UNKNOWN -1 +#define CDS_OS_WIN32 1 +#define CDS_OS_WIN64 5 +#define CDS_OS_LINUX 10 +#define CDS_OS_SUN_SOLARIS 20 +#define CDS_OS_HPUX 30 +#define CDS_OS_AIX 50 // IBM AIX +#define CDS_OS_FREE_BSD 61 +#define CDS_OS_OPEN_BSD 62 +#define CDS_OS_NET_BSD 63 +#define CDS_OS_MINGW 70 +#define CDS_OS_OSX 80 +#define CDS_OS_PTHREAD 100 + +#if defined(_MSC_VER) +# if defined(__ICL) || defined(__INTEL_COMPILER) +# define CDS_COMPILER CDS_COMPILER_INTEL +# elif defined(__clang__) +# define CDS_COMPILER CDS_COMPILER_CLANG +# else +# define CDS_COMPILER CDS_COMPILER_MSVC +# endif +#elif defined(__clang__) // Clang checking must be before GCC since Clang defines __GCC__ too +# define CDS_COMPILER CDS_COMPILER_CLANG +#elif defined( __GCC__ ) || defined(__GNUC__) +# if defined(__ICL) || defined(__INTEL_COMPILER) +# define CDS_COMPILER CDS_COMPILER_INTEL +# else +# define CDS_COMPILER CDS_COMPILER_GCC +# endif +#else +# define CDS_COMPILER CDS_COMPILER_UNKNOWN +#endif // Compiler choice + + +// CDS_VERIFY: Debug - assert(_expr); Release - _expr +#ifdef CDS_DEBUG +# define CDS_VERIFY( _expr ) assert( _expr ) +# define CDS_VERIFY_FALSE( _expr ) assert( !( _expr )) +# define CDS_DEBUG_ONLY( _expr ) _expr +# define CDS_VERIFY_EQ( expr, val ) assert( expr == val ) +#else +# define CDS_VERIFY( _expr ) _expr +# define CDS_VERIFY_FALSE( _expr ) _expr +# define CDS_DEBUG_ONLY( _expr ) +# define CDS_VERIFY_EQ( expr, val ) expr +#endif + +#ifdef CDS_STRICT +# define CDS_STRICT_DO(_expr) _expr +#else +# define CDS_STRICT_DO( _expr ) +#endif + +#ifdef CDS_DEBUG +# define cds_assert( expr ) assert( expr ) +#else + static inline void cds_assert( bool expr ) { + if ( !expr ) + abort(); + } +#endif + +// Compiler-specific defines +#include + +/************************************************************************* + Common things +**************************************************************************/ + +namespace cds { + + /// any_type is used as a placeholder for auto-calculated type (usually in \p rebind templates) + struct any_type {}; + +} // namespace cds + +#endif // #ifndef CDSLIB_DEFS_H diff --git a/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/details/is_aligned.h b/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/details/is_aligned.h new file mode 100644 index 0000000..479d5b3 --- /dev/null +++ b/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/details/is_aligned.h @@ -0,0 +1,64 @@ +/* + This file is a part of libcds - Concurrent Data Structures library + + (C) Copyright Maxim Khizhinsky (libcds.dev@gmail.com) 2006-2017 + + Source code repo: http://github.com/khizmax/libcds/ + Download: http://sourceforge.net/projects/libcds/files/ + + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. + + * Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ + +#ifndef CDSLIB_DETAILS_IS_ALIGNED_H +#define CDSLIB_DETAILS_IS_ALIGNED_H + +#include + +namespace cds { namespace details { + + /// Checks if the pointer \p p has \p ALIGN byte alignment + /** + \p ALIGN must be power of 2. + + The function is mostly intended for run-time assertion + */ + template + static inline bool is_aligned(T const * p) + { + return (((uintptr_t)p) & uintptr_t(ALIGN - 1)) == 0; + } + + /// Checks if the pointer \p p has \p nAlign byte alignment + /** + \p nAlign must be power of 2. + + The function is mostly intended for run-time assertion + */ + template + static inline bool is_aligned(T const * p, size_t nAlign) + { + return (((uintptr_t)p) & uintptr_t(nAlign - 1)) == 0; + } + +}} // namespace cds::details + +#endif // #ifndef CDSLIB_DETAILS_IS_ALIGNED_H diff --git a/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/details/lib.h b/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/details/lib.h new file mode 100644 index 0000000..404084c --- /dev/null +++ b/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/details/lib.h @@ -0,0 +1,54 @@ +/* + This file is a part of libcds - Concurrent Data Structures library + + (C) Copyright Maxim Khizhinsky (libcds.dev@gmail.com) 2006-2017 + + Source code repo: http://github.com/khizmax/libcds/ + Download: http://sourceforge.net/projects/libcds/files/ + + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. + + * Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ + +#ifndef CDSLIB_LIB_H +#define CDSLIB_LIB_H +//@cond + +#include + +#ifndef CDS_BUILD_LIB + +#ifdef _DEBUG +# define CDS_LIB_DEBUG_SUFFIX "-dbg" +#else +# define CDS_LIB_DEBUG_SUFFIX "" +#endif + +#if CDS_COMPILER == CDS_COMPILER_MSVC || CDS_COMPILER == CDS_COMPILER_INTEL +# pragma comment( lib, "libcds-" CDS_PROCESSOR__NICK CDS_LIB_DEBUG_SUFFIX ) +#endif + +#undef CDS_LIB_DEBUG_SUFFIX + +#endif // #ifndef CDS_BUILD_LIB + +//@endcond +#endif // #ifndef CDSLIB_LIB_H diff --git a/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/details/make_const_type.h b/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/details/make_const_type.h new file mode 100644 index 0000000..6335859 --- /dev/null +++ b/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/details/make_const_type.h @@ -0,0 +1,58 @@ +/* + This file is a part of libcds - Concurrent Data Structures library + + (C) Copyright Maxim Khizhinsky (libcds.dev@gmail.com) 2006-2017 + + Source code repo: http://github.com/khizmax/libcds/ + Download: http://sourceforge.net/projects/libcds/files/ + + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. + + * Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ + +#ifndef CDSLIB_DETAILS_MAKE_CONST_TYPE_H +#define CDSLIB_DETAILS_MAKE_CONST_TYPE_H + +#include + +namespace cds { namespace details { + + //@cond + template + struct make_const_type + { + typedef T type; + typedef T * pointer; + typedef T & reference; + }; + template + struct make_const_type + { + typedef T const type; + typedef T const * pointer; + typedef T const & reference; + }; + + //@endcond + +}} // namespace cds::details + +#endif // #ifndef CDSLIB_DETAILS_MAKE_CONST_TYPE_H diff --git a/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/details/marked_ptr.h b/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/details/marked_ptr.h new file mode 100644 index 0000000..9678b23 --- /dev/null +++ b/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/details/marked_ptr.h @@ -0,0 +1,396 @@ +/* + This file is a part of libcds - Concurrent Data Structures library + + (C) Copyright Maxim Khizhinsky (libcds.dev@gmail.com) 2006-2017 + + Source code repo: http://github.com/khizmax/libcds/ + Download: http://sourceforge.net/projects/libcds/files/ + + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. + + * Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ + +#ifndef CDSLIB_DETAILS_MARKED_PTR_H +#define CDSLIB_DETAILS_MARKED_PTR_H + +#include + +namespace cds { + namespace details { + + /// Marked pointer + /** + On the modern architectures, the default data alignment is 4 (for 32bit) or 8 byte for 64bit. + Therefore, the least 2 or 3 bits of the pointer is always zero and can + be used as a bitfield to store logical flags. This trick is widely used in + lock-free programming to operate with the pointer and its flags atomically. + + Template parameters: + - T - type of pointer + - Bitmask - bitmask of least unused bits + */ + template + class marked_ptr + { + T * m_ptr ; ///< pointer and its mark bits + + public: + typedef T value_type ; ///< type of value the class points to + typedef T * pointer_type ; ///< type of pointer + static constexpr const uintptr_t bitmask = Bitmask; ///< bitfield bitmask + static constexpr const uintptr_t pointer_bitmask = ~bitmask; ///< pointer bitmask + + public: + /// Constructs null marked pointer. The flag is cleared. + constexpr marked_ptr() noexcept + : m_ptr( nullptr ) + {} + + /// Constructs marked pointer with \p ptr value. The least bit(s) of \p ptr is the flag. + constexpr explicit marked_ptr( value_type * ptr ) noexcept + : m_ptr( ptr ) + {} + + /// Constructs marked pointer with \p ptr value and \p nMask flag. + /** + The \p nMask argument defines the OR-bits + */ + marked_ptr( value_type * ptr, int nMask ) noexcept + : m_ptr( ptr ) + { + assert( bits() == 0 ); + *this |= nMask; + } + + /// Copy constructor + marked_ptr( marked_ptr const& src ) noexcept = default; + /// Copy-assignment operator + marked_ptr& operator =( marked_ptr const& p ) noexcept = default; +# if !defined(CDS_DISABLE_DEFAULT_MOVE_CTOR) + //@cond + marked_ptr( marked_ptr&& src ) noexcept = default; + marked_ptr& operator =( marked_ptr&& p ) noexcept = default; + //@endcond +# endif + + //TODO: make move ctor + + private: + //@cond + union pointer_cast { + T * ptr; + uintptr_t n; + + pointer_cast(T * p) : ptr(p) {} + pointer_cast(uintptr_t i) : n(i) {} + }; + static uintptr_t to_int( value_type * p ) noexcept + { + return pointer_cast(p).n; + } + + static value_type * to_ptr( uintptr_t n ) noexcept + { + return pointer_cast(n).ptr; + } + + uintptr_t to_int() const noexcept + { + return to_int( m_ptr ); + } + //@endcond + + public: + /// Returns the pointer without mark bits (real pointer) const version + value_type * ptr() const noexcept + { + return to_ptr( to_int() & ~bitmask ); + } + + /// Returns the pointer and bits together + value_type * all() const noexcept + { + return m_ptr; + } + + /// Returns the least bits of pointer according to \p Bitmask template argument of the class + uintptr_t bits() const noexcept + { + return to_int() & bitmask; + } + + /// Analogue for \ref ptr + value_type * operator ->() const noexcept + { + return ptr(); + } + + /// Assignment operator sets markup bits to zero + marked_ptr operator =( T * p ) noexcept + { + m_ptr = p; + return *this; + } + + /// Set LSB bits as bits() | nBits + marked_ptr& operator |=( int nBits ) noexcept + { + assert( (nBits & pointer_bitmask) == 0 ); + m_ptr = to_ptr( to_int() | nBits ); + return *this; + } + + /// Set LSB bits as bits() & nBits + marked_ptr& operator &=( int nBits ) noexcept + { + assert( (nBits & pointer_bitmask) == 0 ); + m_ptr = to_ptr( to_int() & (pointer_bitmask | nBits)); + return *this; + } + + /// Set LSB bits as bits() ^ nBits + marked_ptr& operator ^=( int nBits ) noexcept + { + assert( (nBits & pointer_bitmask) == 0 ); + m_ptr = to_ptr( to_int() ^ nBits ); + return *this; + } + + /// Returns p |= nBits + friend marked_ptr operator |( marked_ptr p, int nBits) noexcept + { + p |= nBits; + return p; + } + + /// Returns p |= nBits + friend marked_ptr operator |( int nBits, marked_ptr p ) noexcept + { + p |= nBits; + return p; + } + + /// Returns p &= nBits + friend marked_ptr operator &( marked_ptr p, int nBits) noexcept + { + p &= nBits; + return p; + } + + /// Returns p &= nBits + friend marked_ptr operator &( int nBits, marked_ptr p ) noexcept + { + p &= nBits; + return p; + } + + /// Returns p ^= nBits + friend marked_ptr operator ^( marked_ptr p, int nBits) noexcept + { + p ^= nBits; + return p; + } + /// Returns p ^= nBits + friend marked_ptr operator ^( int nBits, marked_ptr p ) noexcept + { + p ^= nBits; + return p; + } + + /// Inverts LSBs of pointer \p p + friend marked_ptr operator ~( marked_ptr p ) noexcept + { + return p ^ marked_ptr::bitmask; + } + + + /// Comparing two marked pointer including its mark bits + friend bool operator ==( marked_ptr p1, marked_ptr p2 ) noexcept + { + return p1.all() == p2.all(); + } + + /// Comparing marked pointer and raw pointer, mark bits of \p p1 is ignored + friend bool operator ==( marked_ptr p1, value_type const * p2 ) noexcept + { + return p1.ptr() == p2; + } + + /// Comparing marked pointer and raw pointer, mark bits of \p p2 is ignored + friend bool operator ==( value_type const * p1, marked_ptr p2 ) noexcept + { + return p1 == p2.ptr(); + } + + /// Comparing two marked pointer including its mark bits + friend bool operator !=( marked_ptr p1, marked_ptr p2 ) noexcept + { + return p1.all() != p2.all(); + } + + /// Comparing marked pointer and raw pointer, mark bits of \p p1 is ignored + friend bool operator !=( marked_ptr p1, value_type const * p2 ) noexcept + { + return p1.ptr() != p2; + } + + /// Comparing marked pointer and raw pointer, mark bits of \p p2 is ignored + friend bool operator !=( value_type const * p1, marked_ptr p2 ) noexcept + { + return p1 != p2.ptr(); + } + + //@cond + /// atomic< marked_ptr< T, Bitmask > > support + T *& impl_ref() noexcept + { + return m_ptr; + } + //@endcond + }; + } // namespace details + +} // namespace cds + +//@cond +CDS_CXX11_ATOMIC_BEGIN_NAMESPACE + + template + class atomic< cds::details::marked_ptr > + { + private: + typedef cds::details::marked_ptr marked_ptr; + typedef atomics::atomic atomic_impl; + + atomic_impl m_atomic; + public: + bool is_lock_free() const volatile noexcept + { + return m_atomic.is_lock_free(); + } + bool is_lock_free() const noexcept + { + return m_atomic.is_lock_free(); + } + + void store(marked_ptr val, memory_order order = memory_order_seq_cst) volatile noexcept + { + m_atomic.store( val.all(), order ); + } + void store(marked_ptr val, memory_order order = memory_order_seq_cst) noexcept + { + m_atomic.store( val.all(), order ); + } + + marked_ptr load(memory_order order = memory_order_seq_cst) const volatile noexcept + { + return marked_ptr( m_atomic.load( order )); + } + marked_ptr load(memory_order order = memory_order_seq_cst) const noexcept + { + return marked_ptr( m_atomic.load( order )); + } + + operator marked_ptr() const volatile noexcept + { + return load(); + } + operator marked_ptr() const noexcept + { + return load(); + } + + marked_ptr exchange(marked_ptr val, memory_order order = memory_order_seq_cst) volatile noexcept + { + return marked_ptr( m_atomic.exchange( val.all(), order )); + } + marked_ptr exchange(marked_ptr val, memory_order order = memory_order_seq_cst) noexcept + { + return marked_ptr( m_atomic.exchange( val.all(), order )); + } + + bool compare_exchange_weak(marked_ptr& expected, marked_ptr desired, memory_order success_order, memory_order failure_order) volatile noexcept + { + return m_atomic.compare_exchange_weak( expected.impl_ref(), desired.all(), success_order, failure_order ); + } + bool compare_exchange_weak(marked_ptr& expected, marked_ptr desired, memory_order success_order, memory_order failure_order) noexcept + { + return m_atomic.compare_exchange_weak( expected.impl_ref(), desired.all(), success_order, failure_order ); + } + bool compare_exchange_strong(marked_ptr& expected, marked_ptr desired, memory_order success_order, memory_order failure_order) volatile noexcept + { + return m_atomic.compare_exchange_strong( expected.impl_ref(), desired.all(), success_order, failure_order ); + } + bool compare_exchange_strong(marked_ptr& expected, marked_ptr desired, memory_order success_order, memory_order failure_order) noexcept + { + return m_atomic.compare_exchange_strong( expected.impl_ref(), desired.all(), success_order, failure_order ); + } + bool compare_exchange_weak(marked_ptr& expected, marked_ptr desired, memory_order success_order = memory_order_seq_cst) volatile noexcept + { + return m_atomic.compare_exchange_weak( expected.impl_ref(), desired.all(), success_order ); + } + bool compare_exchange_weak(marked_ptr& expected, marked_ptr desired, memory_order success_order = memory_order_seq_cst) noexcept + { + return m_atomic.compare_exchange_weak( expected.impl_ref(), desired.all(), success_order ); + } + bool compare_exchange_strong(marked_ptr& expected, marked_ptr desired, memory_order success_order = memory_order_seq_cst) volatile noexcept + { + return m_atomic.compare_exchange_strong( expected.impl_ref(), desired.all(), success_order ); + } + bool compare_exchange_strong(marked_ptr& expected, marked_ptr desired, memory_order success_order = memory_order_seq_cst) noexcept + { + return m_atomic.compare_exchange_strong( expected.impl_ref(), desired.all(), success_order ); + } + + constexpr atomic() noexcept + : m_atomic( nullptr ) + {} + + constexpr explicit atomic(marked_ptr val) noexcept + : m_atomic( val.all()) + {} + constexpr explicit atomic(T * p) noexcept + : m_atomic( p ) + {} + + atomic(const atomic&) = delete; + atomic& operator=(const atomic&) = delete; + +#if !(CDS_COMPILER == CDS_COMPILER_MSVC && CDS_COMPILER_VERSION < CDS_COMPILER_MSVC15) + // MSVC12, MSVC14, MSVC14.1: warning C4522: multiple assignment operators specified + atomic& operator=(const atomic&) volatile = delete; + marked_ptr operator=(marked_ptr val) volatile noexcept + { + store( val ); + return val; + } +#endif + marked_ptr operator=(marked_ptr val) noexcept + { + store( val ); + return val; + } + }; + +CDS_CXX11_ATOMIC_END_NAMESPACE +//@endcond + +#endif // #ifndef CDSLIB_DETAILS_MARKED_PTR_H diff --git a/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/details/size_t_cast.h b/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/details/size_t_cast.h new file mode 100644 index 0000000..f79dbd9 --- /dev/null +++ b/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/details/size_t_cast.h @@ -0,0 +1,63 @@ +/* +This file is a part of libcds - Concurrent Data Structures library + +(C) Copyright Maxim Khizhinsky (libcds.dev@gmail.com) 2006-2017 + +Source code repo: http://github.com/khizmax/libcds/ +Download: http://sourceforge.net/projects/libcds/files/ + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + +* Redistributions of source code must retain the above copyright notice, this +list of conditions and the following disclaimer. + +* Redistributions in binary form must reproduce the above copyright notice, +this list of conditions and the following disclaimer in the documentation +and/or other materials provided with the distribution. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ + +#ifndef CDSLIB_DETAILS_SIZE_T_CAST_H +#define CDSLIB_DETAILS_SIZE_T_CAST_H + +#include + +//@cond +namespace cds { namespace details { + + template + struct size_t_unsigned; + + template <> + struct size_t_unsigned<4> + { + typedef uint32_t type; + }; + + template <> + struct size_t_unsigned<8> + { + typedef uint64_t type; + }; + + static inline size_t_unsigned::type size_t_cast( size_t n ) + { + return static_cast< size_t_unsigned::type>( n ); + } + +}} // namespace cds::details +//@endcond + +#endif // #ifndef CDSLIB_DETAILS_SIZE_T_CAST_H + diff --git a/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/details/static_functor.h b/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/details/static_functor.h new file mode 100644 index 0000000..52986bc --- /dev/null +++ b/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/details/static_functor.h @@ -0,0 +1,49 @@ +/* + This file is a part of libcds - Concurrent Data Structures library + + (C) Copyright Maxim Khizhinsky (libcds.dev@gmail.com) 2006-2017 + + Source code repo: http://github.com/khizmax/libcds/ + Download: http://sourceforge.net/projects/libcds/files/ + + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. + + * Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ + +#ifndef CDSLIB_DETAILS_STATIC_FUNCTOR_H +#define CDSLIB_DETAILS_STATIC_FUNCTOR_H + +//@cond +namespace cds { namespace details { + + template + struct static_functor + { + static void call( void* p ) + { + Functor()( reinterpret_cast( p )); + } + }; + +}} // namespace cds::details +//@endcond + +#endif // #ifndef CDSLIB_DETAILS_STATIC_FUNCTOR_H diff --git a/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/details/throw_exception.h b/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/details/throw_exception.h new file mode 100644 index 0000000..047e00f --- /dev/null +++ b/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/details/throw_exception.h @@ -0,0 +1,88 @@ +/* + This file is a part of libcds - Concurrent Data Structures library + + (C) Copyright Maxim Khizhinsky (libcds.dev@gmail.com) 2006-2017 + + Source code repo: http://github.com/khizmax/libcds/ + Download: http://sourceforge.net/projects/libcds/files/ + + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. + + * Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ + +#ifndef CDSLIB_DETAILS_THROW_EXCEPTION_H +#define CDSLIB_DETAILS_THROW_EXCEPTION_H + +#include +#if !defined( CDS_EXCEPTION_ENABLED ) && !defined( CDS_USER_DEFINED_THROW_HANDLER ) +# include +#endif + +namespace cds { + +#if !defined( CDS_USER_DEFINED_THROW_EXCEPTION ) +#if defined( CDS_EXCEPTION_ENABLED ) + /// Function to throw an exception + /** + If you compile your code with exception enabled, \p %throw_exception() function + throws the \p exception. + + If exception is disabled, \p %throw_exception() prints an exception message to + standard output and call \p abort(). + + You can supply your own \p %cds::throw_exception() function; + for that you should specify \p -DCDS_USER_DEFINED_THROW_EXCEPTION + in compiler command line. + + @note \p %throw_exception() never returns. If the user-defined \p %throw_exception() returns, + the behavior is undefined. + */ + template + CDS_NORETURN static inline void throw_exception( + E&& exception, ///< Exception to throw + char const* file, ///< Source filename + int line ///< File line + ) + { + CDS_UNUSED( file ); + CDS_UNUSED( line ); + + throw exception; + } +#else + template + CDS_NORETURN static inline void throw_exception( E&& exception, char const* file, int line ) + { + printf( "file %s, line %d: %s\n", file, line, exception.what()); + abort(); + } +#endif +//#else + // User-provided cds::throw_exception() +#endif + +#define CDS_THROW_EXCEPTION( exception ) ::cds::throw_exception( exception, __FILE__, __LINE__ ) + +} // namespace cds + + +#endif // #ifndef CDSLIB_DETAILS_THROW_EXCEPTION_H + diff --git a/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/details/trivial_assign.h b/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/details/trivial_assign.h new file mode 100644 index 0000000..4b5b237 --- /dev/null +++ b/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/details/trivial_assign.h @@ -0,0 +1,50 @@ +/* + This file is a part of libcds - Concurrent Data Structures library + + (C) Copyright Maxim Khizhinsky (libcds.dev@gmail.com) 2006-2017 + + Source code repo: http://github.com/khizmax/libcds/ + Download: http://sourceforge.net/projects/libcds/files/ + + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. + + * Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ + +#ifndef CDSLIB_DETAILS_TRIVIAL_ASSIGN_H +#define CDSLIB_DETAILS_TRIVIAL_ASSIGN_H + +#include + +//@cond +namespace cds { namespace details { + + template + struct trivial_assign + { + Dest& operator()( Dest& dest, const Source& src ) + { + return dest = src; + } + }; +}} // namespace cds::details +//@endcond + +#endif // #ifndef CDSLIB_DETAILS_TRIVIAL_ASSIGN_H diff --git a/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/details/type_padding.h b/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/details/type_padding.h new file mode 100644 index 0000000..c2d4a87 --- /dev/null +++ b/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/details/type_padding.h @@ -0,0 +1,87 @@ +/* + This file is a part of libcds - Concurrent Data Structures library + + (C) Copyright Maxim Khizhinsky (libcds.dev@gmail.com) 2006-2017 + + Source code repo: http://github.com/khizmax/libcds/ + Download: http://sourceforge.net/projects/libcds/files/ + + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. + + * Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ + +#ifndef CDSLIB_DETAILS_TYPE_PADDING_H +#define CDSLIB_DETAILS_TYPE_PADDING_H + +namespace cds { namespace details { + + //@cond none + template + struct type_padding_helper: public T + { + enum { + value = Modulo + }; + char _[Align - Modulo] ; // padding + + using T::T; + }; + template + struct type_padding_helper: public T + { + enum { + value = 0 + }; + + using T::T; + }; + //@endcond + + /// Automatic alignment type \p T to \p AlignFactor + /** + The class adds appropriate bytes to type T that the following condition is true: + \code + sizeof( type_padding::type ) % AlignFactor == 0 + \endcode + It is guaranteed that count of padding bytes no more than AlignFactor - 1. + + \b Applicability: type \p T must not have constructors another that default ctor. + For example, \p T may be any POD type. + */ + template + class type_padding { + public: + /// Align factor + enum { + align_factor = AlignFactor <= 0 ? 1 : AlignFactor + }; + + /// Result type + typedef type_padding_helper type; + + /// Padding constant + enum { + value = type::value + }; + }; + +}} // namespace cds::details +#endif // #ifndef CDSLIB_DETAILS_TYPE_PADDING_H diff --git a/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/gc/default_gc.h b/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/gc/default_gc.h new file mode 100644 index 0000000..ea1ff90 --- /dev/null +++ b/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/gc/default_gc.h @@ -0,0 +1,44 @@ +/* + This file is a part of libcds - Concurrent Data Structures library + + (C) Copyright Maxim Khizhinsky (libcds.dev@gmail.com) 2006-2017 + + Source code repo: http://github.com/khizmax/libcds/ + Download: http://sourceforge.net/projects/libcds/files/ + + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. + + * Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ + +#ifndef CDSLIB_GC_DEFAULT_GC_H +#define CDSLIB_GC_DEFAULT_GC_H + +#include + +namespace cds { namespace gc { + + /// Default garbage collector + typedef HP default_gc; + +}} // namespace cds::gc + + +#endif // #ifndef CDSLIB_GC_DEFAULT_GC_H diff --git a/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/gc/details/hp_common.h b/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/gc/details/hp_common.h new file mode 100644 index 0000000..3beea93 --- /dev/null +++ b/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/gc/details/hp_common.h @@ -0,0 +1,184 @@ +/* + This file is a part of libcds - Concurrent Data Structures library + + (C) Copyright Maxim Khizhinsky (libcds.dev@gmail.com) 2006-2017 + + Source code repo: http://github.com/khizmax/libcds/ + Download: http://sourceforge.net/projects/libcds/files/ + + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. + + * Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ + +#ifndef CDSLIB_GC_DETAILS_HP_COMMON_H +#define CDSLIB_GC_DETAILS_HP_COMMON_H + +#include +#include + +#ifdef CDS_ENABLE_HPSTAT +# define CDS_HPSTAT( expr ) expr +#else +# define CDS_HPSTAT( expr ) +#endif + +//@cond +namespace cds { namespace gc { namespace hp { namespace common { + + /// Hazard pointer type + typedef void* hazard_ptr; + + /// Retired pointer + using cds::gc::details::retired_ptr; + using cds::gc::make_retired_ptr; + + /// Hazard pointer guard + class guard + { + public: + guard() noexcept + : hp_( nullptr ) + , next_( nullptr ) + {} + + template + T* operator=( T* ptr ) noexcept + { + set( ptr ); + return ptr; + } + + std::nullptr_t operator=( std::nullptr_t ) noexcept + { + clear(); + return nullptr; + } + + hazard_ptr get() const noexcept + { + return hp_.load( atomics::memory_order_acquire ); + } + + hazard_ptr get( atomics::memory_order order ) const noexcept + { + return hp_.load( order ); + } + + template + T* get_as() const noexcept + { + return reinterpret_cast( get()); + } + + template + void set( T* ptr ) noexcept + { + hp_.store( reinterpret_cast( ptr ), atomics::memory_order_release ); + } + + void clear( atomics::memory_order order ) noexcept + { + hp_.store( nullptr, order ); + } + + void clear() noexcept + { + clear( atomics::memory_order_release ); + } + + private: + atomics::atomic hp_; + + public: + guard* next_; // free guard list + }; + + /// Array of guards + template + class guard_array + { + public: + static size_t const c_nCapacity = Capacity; + + public: + guard_array() + : arr_{ nullptr } + {} + + static constexpr size_t capacity() + { + return c_nCapacity; + } + + guard* operator[]( size_t idx ) const noexcept + { + assert( idx < capacity()); + return arr_[idx]; + } + + template + void set( size_t idx, T* ptr ) noexcept + { + assert( idx < capacity()); + assert( arr_[idx] != nullptr ); + + arr_[idx]->set( ptr ); + } + + void clear( size_t idx ) noexcept + { + assert( idx < capacity()); + assert( arr_[idx] != nullptr ); + + arr_[idx]->clear(); + } + + guard* release( size_t idx ) noexcept + { + assert( idx < capacity()); + + guard* g = arr_[idx]; + arr_[idx] = nullptr; + return g; + } + + void reset( size_t idx, guard* g ) noexcept + { + assert( idx < capacity()); + assert( arr_[idx] == nullptr ); + + arr_[idx] = g; + } + + private: + guard* arr_[c_nCapacity]; + }; + + + /// Retired pointer disposer + typedef void ( *disposer_func )( void* ); + +}}}} // namespace cds::gc::hp::common +//@endcond + +#endif // #ifndef CDSLIB_GC_DETAILS_HP_COMMON_H + + diff --git a/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/gc/details/retired_ptr.h b/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/gc/details/retired_ptr.h new file mode 100644 index 0000000..a581afd --- /dev/null +++ b/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/gc/details/retired_ptr.h @@ -0,0 +1,148 @@ +/* + This file is a part of libcds - Concurrent Data Structures library + + (C) Copyright Maxim Khizhinsky (libcds.dev@gmail.com) 2006-2017 + + Source code repo: http://github.com/khizmax/libcds/ + Download: http://sourceforge.net/projects/libcds/files/ + + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. + + * Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ + +#ifndef CDSLIB_GC_DETAILS_RETIRED_PTR_H +#define CDSLIB_GC_DETAILS_RETIRED_PTR_H + +#include +#include + +//@cond +namespace cds { namespace gc { + /// Common implementation details for any GC + namespace details { + + /// Pointer to function to free (destruct and deallocate) retired pointer of specific type + typedef void (* free_retired_ptr_func )( void * ); + + /// Retired pointer + /** + Pointer to an object that is ready to delete. + */ + struct retired_ptr { + /// Pointer type + typedef void * pointer; + + union { + pointer m_p; ///< retired pointer + uintptr_t m_n; + }; + free_retired_ptr_func m_funcFree; ///< pointer to the destructor function + + /// Comparison of two retired pointers + static bool less( const retired_ptr& p1, const retired_ptr& p2 ) noexcept + { + return p1.m_p < p2.m_p; + } + + /// Default ctor initializes pointer to \p nullptr + retired_ptr() noexcept + : m_p( nullptr ) + , m_funcFree( nullptr ) + {} + + /// Ctor + retired_ptr( pointer p, free_retired_ptr_func func ) noexcept + : m_p( p ) + , m_funcFree( func ) + {} + + /// Typecasting ctor + template + retired_ptr( T* p, free_retired_ptr_func func) noexcept + : m_p( reinterpret_cast(p)) + , m_funcFree( func ) + {} +/* + template + retired_ptr( T * p, void (* pFreeFunc)(T *)) noexcept + : m_p( reinterpret_cast(p)) + , m_funcFree( reinterpret_cast< free_retired_ptr_func >( pFreeFunc )) + {} +*/ + + /// Assignment operator + retired_ptr& operator =( retired_ptr const& s) noexcept + { + m_p = s.m_p; + m_funcFree = s.m_funcFree; + return *this; + } + + /// Invokes destructor function for the pointer + void free() + { + assert( m_funcFree ); + assert( m_p ); + m_funcFree( m_p ); + + CDS_STRICT_DO( clear()); + } + + /// Checks if the retired pointer is not empty + explicit operator bool() const noexcept + { + return m_p != nullptr; + } + + /// Clears retired pointer without \p free() call + void clear() + { + m_p = nullptr; + m_funcFree = nullptr; + } + }; + + static inline bool operator <( const retired_ptr& p1, const retired_ptr& p2 ) noexcept + { + return retired_ptr::less( p1, p2 ); + } + + static inline bool operator ==( const retired_ptr& p1, const retired_ptr& p2 ) noexcept + { + return p1.m_p == p2.m_p; + } + + static inline bool operator !=( const retired_ptr& p1, const retired_ptr& p2 ) noexcept + { + return !(p1 == p2); + } + } // namespace details + + template + static inline cds::gc::details::retired_ptr make_retired_ptr( T * p ) + { + return cds::gc::details::retired_ptr( p, cds::details::static_functor::call ); + } + +}} // namespace cds::gc +//@endcond + +#endif // #ifndef CDSLIB_GC_DETAILS_RETIRED_PTR_H diff --git a/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/gc/dhp.h b/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/gc/dhp.h new file mode 100644 index 0000000..9ec9fca --- /dev/null +++ b/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/gc/dhp.h @@ -0,0 +1,1539 @@ +/* + This file is a part of libcds - Concurrent Data Structures library + + (C) Copyright Maxim Khizhinsky (libcds.dev@gmail.com) 2006-2017 + + Source code repo: http://github.com/khizmax/libcds/ + Download: http://sourceforge.net/projects/libcds/files/ + + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. + + * Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ + +#ifndef CDSLIB_GC_DHP_SMR_H +#define CDSLIB_GC_DHP_SMR_H + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +namespace cds { namespace gc { + + /// Dynamic (adaptive) Hazard Pointer implementation details + namespace dhp { + using namespace cds::gc::hp::common; + + /// Exception "Dynamic Hazard Pointer SMR is not initialized" + class not_initialized: public std::runtime_error + { + public: + //@cond + not_initialized() + : std::runtime_error( "Global DHP SMR object is not initialized" ) + {} + //@endcond + }; + + //@cond + struct guard_block: public cds::intrusive::FreeListImpl::node + { + atomics::atomic next_block_; // next block in the thread list + + guard_block() + : next_block_( nullptr ) + {} + + guard* first() + { + return reinterpret_cast( this + 1 ); + } + }; + //@endcond + + //@cond + /// \p guard_block allocator (global object) + class hp_allocator + { + friend class smr; + public: + static hp_allocator& instance(); + + CDS_EXPORT_API guard_block* alloc(); + void free( guard_block* block ) + { + free_list_.put( block ); + } + + private: + hp_allocator() +#ifdef CDS_ENABLE_HPSTAT + : block_allocated_(0) +#endif + {} + CDS_EXPORT_API ~hp_allocator(); + + private: + cds::intrusive::FreeListImpl free_list_; ///< list of free \p guard_block +#ifdef CDS_ENABLE_HPSTAT + public: + atomics::atomic block_allocated_; ///< count of allocated blocks +#endif + }; + //@endcond + + //@cond + /// Per-thread hazard pointer storage + class thread_hp_storage + { + friend class smr; + public: + thread_hp_storage( guard* arr, size_t nSize ) noexcept + : free_head_( arr ) + , array_( arr ) + , initial_capacity_( nSize ) +# ifdef CDS_ENABLE_HPSTAT + , alloc_guard_count_( 0 ) + , free_guard_count_( 0 ) + , extend_call_count_( 0 ) +# endif + { + // Initialize guards + new( arr ) guard[nSize]; + extended_list_.store( nullptr, atomics::memory_order_release ); + } + + thread_hp_storage() = delete; + thread_hp_storage( thread_hp_storage const& ) = delete; + thread_hp_storage( thread_hp_storage&& ) = delete; + + ~thread_hp_storage() + { + clear(); + } + + guard* alloc() + { + if ( cds_unlikely( free_head_ == nullptr )) { + extend(); + assert( free_head_ != nullptr ); + } + + guard* g = free_head_; + free_head_ = g->next_; + CDS_HPSTAT( ++alloc_guard_count_ ); + return g; + } + + void free( guard* g ) noexcept + { + if ( g ) { + g->clear(); + g->next_ = free_head_; + free_head_ = g; + CDS_HPSTAT( ++free_guard_count_ ); + } + } + + template< size_t Capacity> + size_t alloc( guard_array& arr ) + { + for ( size_t i = 0; i < Capacity; ++i ) { + if ( cds_unlikely( free_head_ == nullptr )) + extend(); + arr.reset( i, free_head_ ); + free_head_ = free_head_->next_; + } + CDS_HPSTAT( alloc_guard_count_ += Capacity ); + return Capacity; + } + + template + void free( guard_array& arr ) noexcept + { + guard* gList = free_head_; + for ( size_t i = 0; i < Capacity; ++i ) { + guard* g = arr[i]; + if ( g ) { + g->clear(); + g->next_ = gList; + gList = g; + CDS_HPSTAT( ++free_guard_count_ ); + } + } + free_head_ = gList; + } + + void clear() + { + // clear array_ + for ( guard* cur = array_, *last = array_ + initial_capacity_; cur < last; ++cur ) + cur->clear(); + + // free all extended blocks + hp_allocator& a = hp_allocator::instance(); + for ( guard_block* p = extended_list_.load( atomics::memory_order_relaxed ); p; ) { + guard_block* next = p->next_block_.load( atomics::memory_order_relaxed ); + a.free( p ); + p = next; + } + + extended_list_.store( nullptr, atomics::memory_order_release ); + } + + void init() + { + assert( extended_list_.load(atomics::memory_order_relaxed) == nullptr ); + + guard* p = array_; + for ( guard* pEnd = p + initial_capacity_ - 1; p != pEnd; ++p ) + p->next_ = p + 1; + p->next_ = nullptr; + free_head_ = array_; + } + + private: + void extend() + { + assert( free_head_ == nullptr ); + + guard_block* block = hp_allocator::instance().alloc(); + block->next_block_.store( extended_list_.load( atomics::memory_order_relaxed ), atomics::memory_order_release ); + extended_list_.store( block, atomics::memory_order_release ); + free_head_ = block->first(); + CDS_HPSTAT( ++extend_call_count_ ); + } + + private: + guard* free_head_; ///< Head of free guard list + atomics::atomic extended_list_; ///< Head of extended guard blocks allocated for the thread + guard* const array_; ///< initial HP array + size_t const initial_capacity_; ///< Capacity of \p array_ +# ifdef CDS_ENABLE_HPSTAT + public: + size_t alloc_guard_count_; + size_t free_guard_count_; + size_t extend_call_count_; +# endif + }; + //@endcond + + //@cond + struct retired_block: public cds::intrusive::FreeListImpl::node + { + retired_block* next_; ///< Next block in thread-private retired array + + static size_t const c_capacity = 256; + + retired_block() + : next_( nullptr ) + {} + + retired_ptr* first() const + { + return reinterpret_cast( const_cast( this ) + 1 ); + } + + retired_ptr* last() const + { + return first() + c_capacity; + } + }; + //@endcond + + //@cond + class retired_allocator + { + friend class smr; + public: + static retired_allocator& instance(); + + CDS_EXPORT_API retired_block* alloc(); + void free( retired_block* block ) + { + block->next_ = nullptr; + free_list_.put( block ); + } + + private: + retired_allocator() +#ifdef CDS_ENABLE_HPSTAT + : block_allocated_(0) +#endif + {} + CDS_EXPORT_API ~retired_allocator(); + + private: + cds::intrusive::FreeListImpl free_list_; ///< list of free \p guard_block +#ifdef CDS_ENABLE_HPSTAT + public: + atomics::atomic block_allocated_; ///< Count of allocated blocks +#endif + }; + //@endcond + + //@cond + /// Per-thread retired array + class retired_array + { + friend class smr; + public: + retired_array() noexcept + : current_block_( nullptr ) + , current_cell_( nullptr ) + , list_head_( nullptr ) + , list_tail_( nullptr ) + , block_count_(0) +# ifdef CDS_ENABLE_HPSTAT + , retire_call_count_( 0 ) + , extend_call_count_( 0 ) +# endif + {} + + retired_array( retired_array const& ) = delete; + retired_array( retired_array&& ) = delete; + + ~retired_array() + { + assert( empty()); + fini(); + } + + bool push( retired_ptr const& p ) noexcept + { + assert( current_block_ != nullptr ); + assert( current_block_->first() <= current_cell_ ); + assert( current_cell_ < current_block_->last()); + //assert( &p != current_cell_ ); + + *current_cell_ = p; + CDS_HPSTAT( ++retire_call_count_ ); + + if ( ++current_cell_ == current_block_->last()) { + // goto next block if exists + if ( current_block_->next_ ) { + current_block_ = current_block_->next_; + current_cell_ = current_block_->first(); + return true; + } + + // no free block + // smr::scan() extend retired_array if needed + return false; + } + + return true; + } + + bool repush( retired_ptr* p ) noexcept + { + bool ret = push( *p ); + CDS_HPSTAT( --retire_call_count_ ); + assert( ret ); + return ret; + } + + private: // called by smr + void init() + { + if ( list_head_ == nullptr ) { + retired_block* block = retired_allocator::instance().alloc(); + assert( block->next_ == nullptr ); + + current_block_ = + list_head_ = + list_tail_ = block; + current_cell_ = block->first(); + + block_count_ = 1; + } + } + + void fini() + { + retired_allocator& alloc = retired_allocator::instance(); + for ( retired_block* p = list_head_; p; ) { + retired_block* next = p->next_; + alloc.free( p ); + p = next; + } + + current_block_ = + list_head_ = + list_tail_ = nullptr; + current_cell_ = nullptr; + + block_count_ = 0; + } + + void extend() + { + assert( list_head_ != nullptr ); + assert( current_block_ == list_tail_ ); + assert( current_cell_ == current_block_->last()); + + retired_block* block = retired_allocator::instance().alloc(); + assert( block->next_ == nullptr ); + + current_block_ = list_tail_ = list_tail_->next_ = block; + current_cell_ = block->first(); + ++block_count_; + CDS_HPSTAT( ++extend_call_count_ ); + } + + bool empty() const + { + return current_block_ == nullptr + || ( current_block_ == list_head_ && current_cell_ == current_block_->first()); + } + + private: + retired_block* current_block_; + retired_ptr* current_cell_; // in current_block_ + + retired_block* list_head_; + retired_block* list_tail_; + size_t block_count_; +# ifdef CDS_ENABLE_HPSTAT + public: + size_t retire_call_count_; + size_t extend_call_count_; +# endif + }; + //@endcond + + /// Internal statistics + struct stat { + size_t guard_allocated; ///< Count of allocated HP guards + size_t guard_freed; ///< Count of freed HP guards + size_t retired_count; ///< Count of retired pointers + size_t free_count; ///< Count of free pointers + size_t scan_count; ///< Count of \p scan() call + size_t help_scan_count; ///< Count of \p help_scan() call + + size_t thread_rec_count; ///< Count of thread records + + size_t hp_block_count; ///< Count of extended HP blocks allocated + size_t retired_block_count; ///< Count of retired blocks allocated + size_t hp_extend_count; ///< Count of hp array \p extend() call + size_t retired_extend_count; ///< Count of retired array \p extend() call + + /// Default ctor + stat() + { + clear(); + } + + /// Clears all counters + void clear() + { + guard_allocated = + guard_freed = + retired_count = + free_count = + scan_count = + help_scan_count = + thread_rec_count = + hp_block_count = + retired_block_count = + hp_extend_count = + retired_extend_count = 0; + } + }; + + //@cond + /// Per-thread data + struct thread_data { + thread_hp_storage hazards_; ///< Hazard pointers private to the thread + retired_array retired_; ///< Retired data private to the thread + + char pad1_[cds::c_nCacheLineSize]; + atomics::atomic sync_; ///< dummy var to introduce synchronizes-with relationship between threads + char pad2_[cds::c_nCacheLineSize]; + +# ifdef CDS_ENABLE_HPSTAT + size_t free_call_count_; + size_t scan_call_count_; + size_t help_scan_call_count_; +# endif + + // CppCheck warn: pad1_ and pad2_ is uninitialized in ctor + // cppcheck-suppress uninitMemberVar + thread_data( guard* guards, size_t guard_count ) + : hazards_( guards, guard_count ) + , sync_( 0 ) +# ifdef CDS_ENABLE_HPSTAT + , free_call_count_(0) + , scan_call_count_(0) + , help_scan_call_count_(0) +# endif + {} + + thread_data() = delete; + thread_data( thread_data const& ) = delete; + thread_data( thread_data&& ) = delete; + + void sync() + { + sync_.fetch_add( 1, atomics::memory_order_acq_rel ); + } + }; + //@endcond + + //@cond + // Dynamic (adaptive) Hazard Pointer SMR (Safe Memory Reclamation) + class smr + { + struct thread_record; + + public: + /// Returns the instance of Hazard Pointer \ref smr + static smr& instance() + { +# ifdef CDS_DISABLE_SMR_EXCEPTION + assert( instance_ != nullptr ); +# else + if ( !instance_ ) + CDS_THROW_EXCEPTION( not_initialized()); +# endif + return *instance_; + } + + /// Creates Dynamic Hazard Pointer SMR singleton + /** + Dynamic Hazard Pointer SMR is a singleton. If DHP instance is not initialized then the function creates the instance. + Otherwise it does nothing. + + The Michael's HP reclamation schema depends of three parameters: + - \p nHazardPtrCount - HP pointer count per thread. Usually it is small number (2-4) depending from + the data structure algorithms. By default, if \p nHazardPtrCount = 0, + the function uses maximum of HP count for CDS library + - \p nMaxThreadCount - max count of thread with using HP GC in your application. Default is 100. + - \p nMaxRetiredPtrCount - capacity of array of retired pointers for each thread. Must be greater than + nHazardPtrCount * nMaxThreadCount + Default is 2 * nHazardPtrCount * nMaxThreadCount + */ + static CDS_EXPORT_API void construct( + size_t nInitialHazardPtrCount = 16 ///< Initial number of hazard pointer per thread + ); + + // for back-copatibility + static void Construct( + size_t nInitialHazardPtrCount = 16 ///< Initial number of hazard pointer per thread + ) + { + construct( nInitialHazardPtrCount ); + } + + /// Destroys global instance of \ref smr + /** + The parameter \p bDetachAll should be used carefully: if its value is \p true, + then the object destroyed automatically detaches all attached threads. This feature + can be useful when you have no control over the thread termination, for example, + when \p libcds is injected into existing external thread. + */ + static CDS_EXPORT_API void destruct( + bool bDetachAll = false ///< Detach all threads + ); + + // for back-compatibility + static void Destruct( + bool bDetachAll = false ///< Detach all threads + ) + { + destruct( bDetachAll ); + } + + /// Checks if global SMR object is constructed and may be used + static bool isUsed() noexcept + { + return instance_ != nullptr; + } + + /// Set memory management functions + /** + @note This function may be called BEFORE creating an instance + of Dynamic Hazard Pointer SMR + + SMR object allocates some memory for thread-specific data and for + creating SMR object. + By default, a standard \p new and \p delete operators are used for this. + */ + static CDS_EXPORT_API void set_memory_allocator( + void* ( *alloc_func )( size_t size ), + void( *free_func )( void * p ) + ); + + /// Returns thread-local data for the current thread + static CDS_EXPORT_API thread_data* tls(); + + static CDS_EXPORT_API void attach_thread(); + static CDS_EXPORT_API void detach_thread(); + + /// Get internal statistics + CDS_EXPORT_API void statistics( stat& st ); + + public: // for internal use only + /// The main garbage collecting function + CDS_EXPORT_API void scan( thread_data* pRec ); + + /// Helper scan routine + /** + The function guarantees that every node that is eligible for reuse is eventually freed, barring + thread failures. To do so, after executing \p scan(), a thread executes a \p %help_scan(), + where it checks every HP record. If an HP record is inactive, the thread moves all "lost" reclaimed pointers + to thread's list of reclaimed pointers. + + The function is called internally by \p scan(). + */ + CDS_EXPORT_API void help_scan( thread_data* pThis ); + + hp_allocator& get_hp_allocator() + { + return hp_allocator_; + } + + retired_allocator& get_retired_allocator() + { + return retired_allocator_; + } + + private: + CDS_EXPORT_API explicit smr( + size_t nInitialHazardPtrCount + ); + + CDS_EXPORT_API ~smr(); + + CDS_EXPORT_API void detach_all_thread(); + + private: + CDS_EXPORT_API thread_record* create_thread_data(); + static CDS_EXPORT_API void destroy_thread_data( thread_record* pRec ); + + /// Allocates Hazard Pointer SMR thread private data + CDS_EXPORT_API thread_record* alloc_thread_data(); + + /// Free HP SMR thread-private data + CDS_EXPORT_API void free_thread_data( thread_record* pRec ); + + private: + static CDS_EXPORT_API smr* instance_; + + atomics::atomic< thread_record*> thread_list_; ///< Head of thread list + size_t const initial_hazard_count_; ///< initial number of hazard pointers per thread + hp_allocator hp_allocator_; + retired_allocator retired_allocator_; + + // temporaries + std::atomic last_plist_size_; ///< HP array size in last scan() call + }; + //@endcond + + //@cond + // for backward compatibility + typedef smr GarbageCollector; + + + // inlines + inline hp_allocator& hp_allocator::instance() + { + return smr::instance().get_hp_allocator(); + } + + inline retired_allocator& retired_allocator::instance() + { + return smr::instance().get_retired_allocator(); + } + //@endcond + + } // namespace dhp + + + /// Dynamic (adaptie) Hazard Pointer SMR + /** @ingroup cds_garbage_collector + + Implementation of Dynamic (adaptive) Hazard Pointer SMR + + Sources: + - [2002] Maged M.Michael "Safe memory reclamation for dynamic lock-freeobjects using atomic reads and writes" + - [2003] Maged M.Michael "Hazard Pointers: Safe memory reclamation for lock-free objects" + - [2004] Andrei Alexandrescy, Maged Michael "Lock-free Data Structures with Hazard Pointers" + + %DHP is an adaptive variant of classic \p cds::gc::HP, see @ref cds_garbage_collectors_comparison "Compare HP implementation" + + @note Internally, %DHP depends on free-list implementation. There are + DCAS-based free-list \p cds::intrusive::TaggedFreeList and more complicated CAS-based free-list + \p cds::intrusive::FreeList. For x86 architecture and GCC/clang, libcds selects appropriate free-list + based on \p -mcx16 compiler flag. You may manually disable DCAS support specifying + \p -DCDS_DISABLE_128BIT_ATOMIC for 64bit build or \p -DCDS_DISABLE_64BIT_ATOMIC for 32bit build + in compiler command line. All your projects and libcds MUST be compiled with the same flags - + either with DCAS support or without it. + For MS VC++ compiler DCAS is not supported. + + See \ref cds_how_to_use "How to use" section for details how to apply SMR. + */ + class DHP + { + public: + /// Native guarded pointer type + typedef void* guarded_pointer; + + /// Atomic reference + template using atomic_ref = atomics::atomic; + + /// Atomic type + /** + @headerfile cds/gc/dhp.h + */ + template using atomic_type = atomics::atomic; + + /// Atomic marked pointer + template using atomic_marked_ptr = atomics::atomic; + + /// Internal statistics + typedef dhp::stat stat; + + /// Dynamic Hazard Pointer guard + /** + A guard is a hazard pointer. + Additionally, the \p %Guard class manages allocation and deallocation of the hazard pointer + + \p %Guard object is movable but not copyable. + + The guard object can be in two states: + - unlinked - the guard is not linked with any internal hazard pointer. + In this state no operation except \p link() and move assignment is supported. + - linked (default) - the guard allocates an internal hazard pointer and fully operable. + + Due to performance reason the implementation does not check state of the guard in runtime. + + @warning Move assignment can transfer the guard in unlinked state, use with care. + */ + class Guard + { + public: + /// Default ctor allocates a guard (hazard pointer) from thread-private storage + Guard() noexcept + : guard_( dhp::smr::tls()->hazards_.alloc()) + {} + + /// Initilalizes an unlinked guard i.e. the guard contains no hazard pointer. Used for move semantics support + explicit Guard( std::nullptr_t ) noexcept + : guard_( nullptr ) + {} + + /// Move ctor - \p src guard becomes unlinked (transfer internal guard ownership) + Guard( Guard&& src ) noexcept + : guard_( src.guard_ ) + { + src.guard_ = nullptr; + } + + /// Move assignment: the internal guards are swapped between \p src and \p this + /** + @warning \p src will become in unlinked state if \p this was unlinked on entry. + */ + Guard& operator=( Guard&& src ) noexcept + { + std::swap( guard_, src.guard_ ); + return *this; + } + + /// Copy ctor is prohibited - the guard is not copyable + Guard( Guard const& ) = delete; + + /// Copy assignment is prohibited + Guard& operator=( Guard const& ) = delete; + + /// Frees the internal hazard pointer if the guard is in linked state + ~Guard() + { + unlink(); + } + + /// Checks if the guard object linked with any internal hazard pointer + bool is_linked() const + { + return guard_ != nullptr; + } + + /// Links the guard with internal hazard pointer if the guard is in unlinked state + void link() + { + if ( !guard_ ) + guard_ = dhp::smr::tls()->hazards_.alloc(); + } + + /// Unlinks the guard from internal hazard pointer; the guard becomes in unlinked state + void unlink() + { + if ( guard_ ) { + dhp::smr::tls()->hazards_.free( guard_ ); + guard_ = nullptr; + } + } + + /// Protects a pointer of type atomic + /** + Return the value of \p toGuard + + The function tries to load \p toGuard and to store it + to the HP slot repeatedly until the guard's value equals \p toGuard + */ + template + T protect( atomics::atomic const& toGuard ) + { + assert( guard_ != nullptr ); + + T pCur = toGuard.load(atomics::memory_order_acquire); + T pRet; + do { + pRet = assign( pCur ); + pCur = toGuard.load(atomics::memory_order_acquire); + } while ( pRet != pCur ); + return pCur; + } + + /// Protects a converted pointer of type atomic + /** + Return the value of \p toGuard + + The function tries to load \p toGuard and to store result of \p f functor + to the HP slot repeatedly until the guard's value equals \p toGuard. + + The function is useful for intrusive containers when \p toGuard is a node pointer + that should be converted to a pointer to the value type before guarding. + The parameter \p f of type Func is a functor that makes this conversion: + \code + struct functor { + value_type * operator()( T * p ); + }; + \endcode + Really, the result of f( toGuard.load()) is assigned to the hazard pointer. + */ + template + T protect( atomics::atomic const& toGuard, Func f ) + { + assert( guard_ != nullptr ); + + T pCur = toGuard.load(atomics::memory_order_acquire); + T pRet; + do { + pRet = pCur; + assign( f( pCur )); + pCur = toGuard.load(atomics::memory_order_acquire); + } while ( pRet != pCur ); + return pCur; + } + + /// Store \p p to the guard + /** + The function is just an assignment, no loop is performed. + Can be used for a pointer that cannot be changed concurrently + or for already guarded pointer. + */ + template + T* assign( T* p ) + { + assert( guard_ != nullptr ); + + guard_->set( p ); + dhp::smr::tls()->sync(); + return p; + } + + //@cond + std::nullptr_t assign( std::nullptr_t ) + { + assert( guard_ != nullptr ); + + clear(); + return nullptr; + } + //@endcond + + /// Store marked pointer \p p to the guard + /** + The function is just an assignment of p.ptr(), no loop is performed. + Can be used for a marked pointer that cannot be changed concurrently + or for already guarded pointer. + */ + template + T* assign( cds::details::marked_ptr p ) + { + return assign( p.ptr()); + } + + /// Copy from \p src guard to \p this guard + void copy( Guard const& src ) + { + assign( src.get_native()); + } + + /// Clears value of the guard + void clear() + { + assert( guard_ != nullptr ); + + guard_->clear(); + } + + /// Gets the value currently protected (relaxed read) + template + T * get() const + { + assert( guard_ != nullptr ); + return guard_->get_as(); + } + + /// Gets native guarded pointer stored + void* get_native() const + { + assert( guard_ != nullptr ); + return guard_->get(); + } + + //@cond + dhp::guard* release() + { + dhp::guard* g = guard_; + guard_ = nullptr; + return g; + } + + dhp::guard*& guard_ref() + { + return guard_; + } + //@endcond + + private: + //@cond + dhp::guard* guard_; + //@endcond + }; + + /// Array of Dynamic Hazard Pointer guards + /** + The class is intended for allocating an array of hazard pointer guards. + Template parameter \p Count defines the size of the array. + + A \p %GuardArray object is not copy- and move-constructible + and not copy- and move-assignable. + */ + template + class GuardArray + { + public: + /// Rebind array for other size \p OtherCount + template + struct rebind { + typedef GuardArray other ; ///< rebinding result + }; + + /// Array capacity + static constexpr const size_t c_nCapacity = Count; + + public: + /// Default ctor allocates \p Count hazard pointers + GuardArray() + { + dhp::smr::tls()->hazards_.alloc( guards_ ); + } + + /// Move ctor is prohibited + GuardArray( GuardArray&& ) = delete; + + /// Move assignment is prohibited + GuardArray& operator=( GuardArray&& ) = delete; + + /// Copy ctor is prohibited + GuardArray( GuardArray const& ) = delete; + + /// Copy assignment is prohibited + GuardArray& operator=( GuardArray const& ) = delete; + + /// Frees allocated hazard pointers + ~GuardArray() + { + dhp::smr::tls()->hazards_.free( guards_ ); + } + + /// Protects a pointer of type \p atomic + /** + Return the value of \p toGuard + + The function tries to load \p toGuard and to store it + to the slot \p nIndex repeatedly until the guard's value equals \p toGuard + */ + template + T protect( size_t nIndex, atomics::atomic const& toGuard ) + { + assert( nIndex < capacity()); + + T pRet; + do { + pRet = assign( nIndex, toGuard.load(atomics::memory_order_acquire)); + } while ( pRet != toGuard.load(atomics::memory_order_relaxed)); + + return pRet; + } + + /// Protects a pointer of type \p atomic + /** + Return the value of \p toGuard + + The function tries to load \p toGuard and to store it + to the slot \p nIndex repeatedly until the guard's value equals \p toGuard + + The function is useful for intrusive containers when \p toGuard is a node pointer + that should be converted to a pointer to the value type before guarding. + The parameter \p f of type Func is a functor to make that conversion: + \code + struct functor { + value_type * operator()( T * p ); + }; + \endcode + Actually, the result of f( toGuard.load()) is assigned to the hazard pointer. + */ + template + T protect( size_t nIndex, atomics::atomic const& toGuard, Func f ) + { + assert( nIndex < capacity()); + + T pRet; + do { + assign( nIndex, f( pRet = toGuard.load(atomics::memory_order_acquire))); + } while ( pRet != toGuard.load(atomics::memory_order_relaxed)); + + return pRet; + } + + /// Store \p p to the slot \p nIndex + /** + The function is just an assignment, no loop is performed. + */ + template + T * assign( size_t nIndex, T * p ) + { + assert( nIndex < capacity()); + + guards_.set( nIndex, p ); + dhp::smr::tls()->sync(); + return p; + } + + /// Store marked pointer \p p to the guard + /** + The function is just an assignment of p.ptr(), no loop is performed. + Can be used for a marked pointer that cannot be changed concurrently + or for already guarded pointer. + */ + template + T * assign( size_t nIndex, cds::details::marked_ptr p ) + { + return assign( nIndex, p.ptr()); + } + + /// Copy guarded value from \p src guard to slot at index \p nIndex + void copy( size_t nIndex, Guard const& src ) + { + assign( nIndex, src.get_native()); + } + + /// Copy guarded value from slot \p nSrcIndex to slot at index \p nDestIndex + void copy( size_t nDestIndex, size_t nSrcIndex ) + { + assign( nDestIndex, get_native( nSrcIndex )); + } + + /// Clear value of the slot \p nIndex + void clear( size_t nIndex ) + { + guards_.clear( nIndex ); + } + + /// Get current value of slot \p nIndex + template + T * get( size_t nIndex ) const + { + assert( nIndex < capacity()); + return guards_[nIndex]->template get_as(); + } + + /// Get native guarded pointer stored + guarded_pointer get_native( size_t nIndex ) const + { + assert( nIndex < capacity()); + return guards_[nIndex]->get(); + } + + //@cond + dhp::guard* release( size_t nIndex ) noexcept + { + return guards_.release( nIndex ); + } + //@endcond + + /// Capacity of the guard array + static constexpr size_t capacity() + { + return Count; + } + + private: + //@cond + dhp::guard_array guards_; + //@endcond + }; + + /// Guarded pointer + /** + A guarded pointer is a pair of a pointer and GC's guard. + Usually, it is used for returning a pointer to the item from an lock-free container. + The guard prevents the pointer to be early disposed (freed) by GC. + After destructing \p %guarded_ptr object the pointer can be disposed (freed) automatically at any time. + + Template arguments: + - \p GuardedType - a type which the guard stores + - \p ValueType - a value type + - \p Cast - a functor for converting GuardedType* to ValueType*. Default is \p void (no casting). + + For intrusive containers, \p GuardedType is the same as \p ValueType and no casting is needed. + In such case the \p %guarded_ptr is: + @code + typedef cds::gc::DHP::guarded_ptr< foo > intrusive_guarded_ptr; + @endcode + + For standard (non-intrusive) containers \p GuardedType is not the same as \p ValueType and casting is needed. + For example: + @code + struct foo { + int const key; + std::string value; + }; + + struct value_accessor { + std::string* operator()( foo* pFoo ) const + { + return &(pFoo->value); + } + }; + + // Guarded ptr + typedef cds::gc::DHP::guarded_ptr< Foo, std::string, value_accessor > nonintrusive_guarded_ptr; + @endcode + + You don't need use this class directly. + All set/map container classes from \p libcds declare the typedef for \p %guarded_ptr with appropriate casting functor. + */ + template + class guarded_ptr + { + //@cond + struct trivial_cast { + ValueType * operator()( GuardedType * p ) const + { + return p; + } + }; + + template friend class guarded_ptr; + //@endcond + + public: + typedef GuardedType guarded_type; ///< Guarded type + typedef ValueType value_type; ///< Value type + + /// Functor for casting \p guarded_type to \p value_type + typedef typename std::conditional< std::is_same::value, trivial_cast, Cast >::type value_cast; + + public: + /// Creates empty guarded pointer + guarded_ptr() noexcept + : guard_( nullptr ) + {} + + //@cond + explicit guarded_ptr( dhp::guard* g ) noexcept + : guard_( g ) + {} + + /// Initializes guarded pointer with \p p + explicit guarded_ptr( guarded_type * p ) noexcept + : guard_( nullptr ) + { + reset( p ); + } + explicit guarded_ptr( std::nullptr_t ) noexcept + : guard_( nullptr ) + {} + //@endcond + + /// Move ctor + guarded_ptr( guarded_ptr&& gp ) noexcept + : guard_( gp.guard_ ) + { + gp.guard_ = nullptr; + } + + /// Move ctor + template + guarded_ptr( guarded_ptr&& gp ) noexcept + : guard_( gp.guard_ ) + { + gp.guard_ = nullptr; + } + + /// Ctor from \p Guard + explicit guarded_ptr( Guard&& g ) noexcept + : guard_( g.release()) + {} + + /// The guarded pointer is not copy-constructible + guarded_ptr( guarded_ptr const& gp ) = delete; + + /// Clears the guarded pointer + /** + \ref release is called if guarded pointer is not \ref empty + */ + ~guarded_ptr() noexcept + { + release(); + } + + /// Move-assignment operator + guarded_ptr& operator=( guarded_ptr&& gp ) noexcept + { + std::swap( guard_, gp.guard_ ); + return *this; + } + + /// Move-assignment from \p Guard + guarded_ptr& operator=( Guard&& g ) noexcept + { + std::swap( guard_, g.guard_ref()); + return *this; + } + + /// The guarded pointer is not copy-assignable + guarded_ptr& operator=(guarded_ptr const& gp) = delete; + + /// Returns a pointer to guarded value + value_type * operator ->() const noexcept + { + assert( !empty()); + return value_cast()( guard_->get_as()); + } + + /// Returns a reference to guarded value + value_type& operator *() noexcept + { + assert( !empty()); + return *value_cast()( guard_->get_as()); + } + + /// Returns const reference to guarded value + value_type const& operator *() const noexcept + { + assert( !empty()); + return *value_cast()(reinterpret_cast(guard_->get())); + } + + /// Checks if the guarded pointer is \p nullptr + bool empty() const noexcept + { + return guard_ == nullptr || guard_->get( atomics::memory_order_relaxed ) == nullptr; + } + + /// \p bool operator returns !empty() + explicit operator bool() const noexcept + { + return !empty(); + } + + /// Clears guarded pointer + /** + If the guarded pointer has been released, the pointer can be disposed (freed) at any time. + Dereferncing the guarded pointer after \p release() is dangerous. + */ + void release() noexcept + { + free_guard(); + } + + //@cond + // For internal use only!!! + void reset(guarded_type * p) noexcept + { + alloc_guard(); + assert( guard_ ); + guard_->set( p ); + } + + //@endcond + + private: + //@cond + void alloc_guard() + { + if ( !guard_ ) + guard_ = dhp::smr::tls()->hazards_.alloc(); + } + + void free_guard() + { + if ( guard_ ) { + dhp::smr::tls()->hazards_.free( guard_ ); + guard_ = nullptr; + } + } + //@endcond + + private: + //@cond + dhp::guard* guard_; + //@endcond + }; + + public: + /// Initializes %DHP memory manager singleton + /** + Constructor creates and initializes %DHP global object. + %DHP object should be created before using CDS data structure based on \p %cds::gc::DHP. Usually, + it is created in the beginning of \p main() function. + After creating of global object you may use CDS data structures based on \p %cds::gc::DHP. + + \p nInitialThreadGuardCount - initial count of guard allocated for each thread. + When a thread is initialized the GC allocates local guard pool for the thread from a common guard pool. + By perforce the local thread's guard pool is grown automatically from common pool. + When the thread terminated its guard pool is backed to common GC's pool. + */ + explicit DHP( + size_t nInitialHazardPtrCount = 16 ///< Initial number of hazard pointer per thread + ) + { + dhp::smr::construct( nInitialHazardPtrCount ); + } + + /// Destroys %DHP memory manager + /** + The destructor destroys %DHP global object. After calling of this function you may \b NOT + use CDS data structures based on \p %cds::gc::DHP. + Usually, %DHP object is destroyed at the end of your \p main(). + */ + ~DHP() + { + dhp::GarbageCollector::destruct( true ); + } + + /// Checks if count of hazard pointer is no less than \p nCountNeeded + /** + The function always returns \p true since the guard count is unlimited for + \p %gc::DHP garbage collector. + */ + static constexpr bool check_available_guards( +#ifdef CDS_DOXYGEN_INVOKED + size_t nCountNeeded, +#else + size_t +#endif + ) + { + return true; + } + + /// Set memory management functions + /** + @note This function may be called BEFORE creating an instance + of Dynamic Hazard Pointer SMR + + SMR object allocates some memory for thread-specific data and for creating SMR object. + By default, a standard \p new and \p delete operators are used for this. + */ + static void set_memory_allocator( + void* ( *alloc_func )( size_t size ), ///< \p malloc() function + void( *free_func )( void * p ) ///< \p free() function + ) + { + dhp::smr::set_memory_allocator( alloc_func, free_func ); + } + + /// Retire pointer \p p with function \p pFunc + /** + The function places pointer \p p to array of pointers ready for removing. + (so called retired pointer array). The pointer can be safely removed when no hazard pointer points to it. + \p func is a disposer: when \p p can be safely removed, \p func is called. + */ + template + static void retire( T * p, void (* func)(void *)) + { + dhp::thread_data* rec = dhp::smr::tls(); + if ( !rec->retired_.push( dhp::retired_ptr( p, func ))) + dhp::smr::instance().scan( rec ); + } + + /// Retire pointer \p p with functor of type \p Disposer + /** + The function places pointer \p p to array of pointers ready for removing. + (so called retired pointer array). The pointer can be safely removed when no hazard pointer points to it. + + Deleting the pointer is an invocation of some object of type \p Disposer; the interface of \p Disposer is: + \code + template + struct disposer { + void operator()( T * p ) ; // disposing operator + }; + \endcode + Since the functor call can happen at any time after \p retire() call, additional restrictions are imposed to \p Disposer type: + - it should be stateless functor + - it should be default-constructible + - the result of functor call with argument \p p should not depend on where the functor will be called. + + \par Examples: + Operator \p delete functor: + \code + template + struct disposer { + void operator ()( T * p ) { + delete p; + } + }; + + // How to call HP::retire method + int * p = new int; + + // ... use p in lock-free manner + + cds::gc::DHP::retire( p ) ; // place p to retired pointer array of DHP SMR + \endcode + + Functor based on \p std::allocator : + \code + template > + struct disposer { + template + void operator()( T * p ) { + typedef typename Alloc::templare rebind::other alloc_t; + alloc_t a; + a.destroy( p ); + a.deallocate( p, 1 ); + } + }; + \endcode + */ + template + static void retire( T* p ) + { + if ( !dhp::smr::tls()->retired_.push( dhp::retired_ptr( p, cds::details::static_functor::call ))) + scan(); + } + + /// Checks if Dynamic Hazard Pointer GC is constructed and may be used + static bool isUsed() + { + return dhp::smr::isUsed(); + } + + /// Forced GC cycle call for current thread + /** + Usually, this function should not be called directly. + */ + static void scan() + { + dhp::smr::instance().scan( dhp::smr::tls()); + } + + /// Synonym for \p scan() + static void force_dispose() + { + scan(); + } + + /// Returns internal statistics + /** + The function clears \p st before gathering statistics. + + @note Internal statistics is available only if you compile + \p libcds and your program with \p -DCDS_ENABLE_HPSTAT. + */ + static void statistics( stat& st ) + { + dhp::smr::instance().statistics( st ); + } + + /// Returns post-mortem statistics + /** + Post-mortem statistics is gathered in the \p %DHP object destructor + and can be accessible after destructing the global \p %DHP object. + + @note Internal statistics is available only if you compile + \p libcds and your program with \p -DCDS_ENABLE_HPSTAT. + + Usage: + \code + int main() + { + cds::Initialize(); + { + // Initialize DHP SMR + cds::gc::DHP dhp; + + // deal with DHP-based data structured + // ... + } + + // DHP object destroyed + // Get total post-mortem statistics + cds::gc::DHP::stat const& st = cds::gc::DHP::postmortem_statistics(); + + printf( "DHP statistics:\n" + " thread count = %llu\n" + " guard allocated = %llu\n" + " guard freed = %llu\n" + " retired data count = %llu\n" + " free data count = %llu\n" + " scan() call count = %llu\n" + " help_scan() call count = %llu\n", + st.thread_rec_count, + st.guard_allocated, st.guard_freed, + st.retired_count, st.free_count, + st.scan_count, st.help_scan_count + ); + + cds::Terminate(); + } + \endcode + */ + CDS_EXPORT_API static stat const& postmortem_statistics(); + }; + +}} // namespace cds::gc + +#endif // #ifndef CDSLIB_GC_DHP_SMR_H + + diff --git a/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/gc/hp.h b/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/gc/hp.h new file mode 100644 index 0000000..8deffb4 --- /dev/null +++ b/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/gc/hp.h @@ -0,0 +1,1535 @@ +/* + This file is a part of libcds - Concurrent Data Structures library + + (C) Copyright Maxim Khizhinsky (libcds.dev@gmail.com) 2006-2017 + + Source code repo: http://github.com/khizmax/libcds/ + Download: http://sourceforge.net/projects/libcds/files/ + + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. + + * Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ + +#ifndef CDSLIB_GC_HP_SMR_H +#define CDSLIB_GC_HP_SMR_H + +#include +#include +#include +#include +#include +#include +#include +#include + +/** + @page cds_garbage_collectors_comparison Hazard Pointer SMR implementations + @ingroup cds_garbage_collector + + + + + + + + + + + + + + + + + + + + + + +
Feature%cds::gc::HP%cds::gc::DHP
Max number of guarded (hazard) pointers per threadlimited (specified at construction time)unlimited (dynamically allocated when needed)
Max number of retired pointers1bounded, specified at construction timebounded, adaptive, depends on current thread count and number of hazard pointer for each thread
Thread countbounded, upper bound is specified at construction timeunbounded
+ + 1Unbounded count of retired pointers means a possibility of memory exhaustion. +*/ + +namespace cds { + /// @defgroup cds_garbage_collector Garbage collectors + + + /// Different safe memory reclamation schemas (garbage collectors) + /** @ingroup cds_garbage_collector + + This namespace specifies different safe memory reclamation (SMR) algorithms. + See \ref cds_garbage_collector "Garbage collectors" + */ + namespace gc { + } // namespace gc + +} // namespace cds + + +namespace cds { namespace gc { + /// Hazard pointer implementation details + namespace hp { + using namespace cds::gc::hp::common; + + /// Exception "Not enough Hazard Pointer" + class not_enought_hazard_ptr: public std::length_error + { + //@cond + public: + not_enought_hazard_ptr() + : std::length_error( "Not enough Hazard Pointer" ) + {} + //@endcond + }; + + /// Exception "Hazard Pointer SMR is not initialized" + class not_initialized: public std::runtime_error + { + //@cond + public: + not_initialized() + : std::runtime_error( "Global Hazard Pointer SMR object is not initialized" ) + {} + //@endcond + }; + + //@cond + /// Per-thread hazard pointer storage + class thread_hp_storage { + public: + thread_hp_storage( guard* arr, size_t nSize ) noexcept + : free_head_( arr ) + , array_( arr ) + , capacity_( nSize ) +# ifdef CDS_ENABLE_HPSTAT + , alloc_guard_count_(0) + , free_guard_count_(0) +# endif + { + // Initialize guards + new( arr ) guard[nSize]; + + for ( guard* pEnd = arr + nSize - 1; arr < pEnd; ++arr ) + arr->next_ = arr + 1; + arr->next_ = nullptr; + } + + thread_hp_storage() = delete; + thread_hp_storage( thread_hp_storage const& ) = delete; + thread_hp_storage( thread_hp_storage&& ) = delete; + + size_t capacity() const noexcept + { + return capacity_; + } + + bool full() const noexcept + { + return free_head_ == nullptr; + } + + guard* alloc() + { +# ifdef CDS_DISABLE_SMR_EXCEPTION + assert( !full()); +# else + if ( full()) + CDS_THROW_EXCEPTION( not_enought_hazard_ptr()); +# endif + guard* g = free_head_; + free_head_ = g->next_; + CDS_HPSTAT( ++alloc_guard_count_ ); + return g; + } + + void free( guard* g ) noexcept + { + assert( g >= array_ && g < array_ + capacity()); + + if ( g ) { + g->clear(); + g->next_ = free_head_; + free_head_ = g; + CDS_HPSTAT( ++free_guard_count_ ); + } + } + + template< size_t Capacity> + size_t alloc( guard_array& arr ) + { + size_t i; + guard* g = free_head_; + for ( i = 0; i < Capacity && g; ++i ) { + arr.reset( i, g ); + g = g->next_; + } + +# ifdef CDS_DISABLE_SMR_EXCEPTION + assert( i == Capacity ); +# else + if ( i != Capacity ) + CDS_THROW_EXCEPTION( not_enought_hazard_ptr()); +# endif + free_head_ = g; + CDS_HPSTAT( alloc_guard_count_ += Capacity ); + return i; + } + + template + void free( guard_array& arr ) noexcept + { + guard* gList = free_head_; + for ( size_t i = 0; i < Capacity; ++i ) { + guard* g = arr[i]; + if ( g ) { + g->clear(); + g->next_ = gList; + gList = g; + CDS_HPSTAT( ++free_guard_count_ ); + } + } + free_head_ = gList; + } + + // cppcheck-suppress functionConst + void clear() + { + for ( guard* cur = array_, *last = array_ + capacity(); cur < last; ++cur ) + cur->clear(); + } + + guard& operator[]( size_t idx ) + { + assert( idx < capacity()); + + return array_[idx]; + } + + static size_t calc_array_size( size_t capacity ) + { + return sizeof( guard ) * capacity; + } + + private: + guard* free_head_; ///< Head of free guard list + guard* const array_; ///< HP array + size_t const capacity_; ///< HP array capacity +# ifdef CDS_ENABLE_HPSTAT + public: + size_t alloc_guard_count_; + size_t free_guard_count_; +# endif + }; + //@endcond + + //@cond + /// Per-thread retired array + class retired_array + { + public: + retired_array( retired_ptr* arr, size_t capacity ) noexcept + : current_( arr ) + , last_( arr + capacity ) + , retired_( arr ) +# ifdef CDS_ENABLE_HPSTAT + , retire_call_count_(0) +# endif + {} + + retired_array() = delete; + retired_array( retired_array const& ) = delete; + retired_array( retired_array&& ) = delete; + + size_t capacity() const noexcept + { + return last_ - retired_; + } + + size_t size() const noexcept + { + return current_.load(atomics::memory_order_relaxed) - retired_; + } + + bool push( retired_ptr&& p ) noexcept + { + retired_ptr* cur = current_.load( atomics::memory_order_relaxed ); + *cur = p; + CDS_HPSTAT( ++retire_call_count_ ); + current_.store( cur + 1, atomics::memory_order_relaxed ); + return cur + 1 < last_; + } + + retired_ptr* first() const noexcept + { + return retired_; + } + + retired_ptr* last() const noexcept + { + return current_.load( atomics::memory_order_relaxed ); + } + + void reset( size_t nSize ) noexcept + { + current_.store( first() + nSize, atomics::memory_order_relaxed ); + } + + void interthread_clear() + { + current_.exchange( first(), atomics::memory_order_acq_rel ); + } + + bool full() const noexcept + { + return current_.load( atomics::memory_order_relaxed ) == last_; + } + + static size_t calc_array_size( size_t capacity ) + { + return sizeof( retired_ptr ) * capacity; + } + + private: + atomics::atomic current_; + retired_ptr* const last_; + retired_ptr* const retired_; +# ifdef CDS_ENABLE_HPSTAT + public: + size_t retire_call_count_; +# endif + }; + //@endcond + + /// Internal statistics + struct stat { + size_t guard_allocated; ///< Count of allocated HP guards + size_t guard_freed; ///< Count of freed HP guards + size_t retired_count; ///< Count of retired pointers + size_t free_count; ///< Count of free pointers + size_t scan_count; ///< Count of \p scan() call + size_t help_scan_count; ///< Count of \p help_scan() call + + size_t thread_rec_count; ///< Count of thread records + + /// Default ctor + stat() + { + clear(); + } + + /// Clears all counters + void clear() + { + guard_allocated = + guard_freed = + retired_count = + free_count = + scan_count = + help_scan_count = + thread_rec_count = 0; + } + }; + + //@cond + /// Per-thread data + struct thread_data { + thread_hp_storage hazards_; ///< Hazard pointers private to the thread + retired_array retired_; ///< Retired data private to the thread + + char pad1_[cds::c_nCacheLineSize]; + atomics::atomic sync_; ///< dummy var to introduce synchronizes-with relationship between threads + char pad2_[cds::c_nCacheLineSize]; + +# ifdef CDS_ENABLE_HPSTAT + // Internal statistics: + size_t free_count_; + size_t scan_count_; + size_t help_scan_count_; +# endif + + // CppCheck warn: pad1_ and pad2_ is uninitialized in ctor + // cppcheck-suppress uninitMemberVar + thread_data( guard* guards, size_t guard_count, retired_ptr* retired_arr, size_t retired_capacity ) + : hazards_( guards, guard_count ) + , retired_( retired_arr, retired_capacity ) + , sync_(0) +# ifdef CDS_ENABLE_HPSTAT + , free_count_(0) + , scan_count_(0) + , help_scan_count_(0) +# endif + {} + + thread_data() = delete; + thread_data( thread_data const& ) = delete; + thread_data( thread_data&& ) = delete; + + void sync() + { + sync_.fetch_add( 1, atomics::memory_order_acq_rel ); + } + }; + //@endcond + + /// \p smr::scan() strategy + enum scan_type { + classic, ///< classic scan as described in Michael's works (see smr::classic_scan()) + inplace ///< inplace scan without allocation (see smr::inplace_scan()) + }; + + //@cond + /// Hazard Pointer SMR (Safe Memory Reclamation) + class smr + { + struct thread_record; + + public: + /// Returns the instance of Hazard Pointer \ref smr + static smr& instance() + { +# ifdef CDS_DISABLE_SMR_EXCEPTION + assert( instance_ != nullptr ); +# else + if ( !instance_ ) + CDS_THROW_EXCEPTION( not_initialized()); +# endif + return *instance_; + } + + /// Creates Hazard Pointer SMR singleton + /** + Hazard Pointer SMR is a singleton. If HP instance is not initialized then the function creates the instance. + Otherwise it does nothing. + + The Michael's HP reclamation schema depends of three parameters: + - \p nHazardPtrCount - HP pointer count per thread. Usually it is small number (2-4) depending from + the data structure algorithms. By default, if \p nHazardPtrCount = 0, + the function uses maximum of HP count for CDS library + - \p nMaxThreadCount - max count of thread with using HP GC in your application. Default is 100. + - \p nMaxRetiredPtrCount - capacity of array of retired pointers for each thread. Must be greater than + nHazardPtrCount * nMaxThreadCount + Default is 2 * nHazardPtrCount * nMaxThreadCount + */ + static CDS_EXPORT_API void construct( + size_t nHazardPtrCount = 0, ///< Hazard pointer count per thread + size_t nMaxThreadCount = 0, ///< Max count of simultaneous working thread in your application + size_t nMaxRetiredPtrCount = 0, ///< Capacity of the array of retired objects for the thread + scan_type nScanType = inplace ///< Scan type (see \ref scan_type enum) + ); + + // for back-copatibility + static void Construct( + size_t nHazardPtrCount = 0, ///< Hazard pointer count per thread + size_t nMaxThreadCount = 0, ///< Max count of simultaneous working thread in your application + size_t nMaxRetiredPtrCount = 0, ///< Capacity of the array of retired objects for the thread + scan_type nScanType = inplace ///< Scan type (see \ref scan_type enum) + ) + { + construct( nHazardPtrCount, nMaxThreadCount, nMaxRetiredPtrCount, nScanType ); + } + + /// Destroys global instance of \ref smr + /** + The parameter \p bDetachAll should be used carefully: if its value is \p true, + then the object destroyed automatically detaches all attached threads. This feature + can be useful when you have no control over the thread termination, for example, + when \p libcds is injected into existing external thread. + */ + static CDS_EXPORT_API void destruct( + bool bDetachAll = false ///< Detach all threads + ); + + // for back-compatibility + static void Destruct( + bool bDetachAll = false ///< Detach all threads + ) + { + destruct( bDetachAll ); + } + + /// Checks if global SMR object is constructed and may be used + static bool isUsed() noexcept + { + return instance_ != nullptr; + } + + /// Set memory management functions + /** + @note This function may be called BEFORE creating an instance + of Hazard Pointer SMR + + SMR object allocates some memory for thread-specific data and for + creating SMR object. + By default, a standard \p new and \p delete operators are used for this. + */ + static CDS_EXPORT_API void set_memory_allocator( + void* ( *alloc_func )( size_t size ), + void (*free_func )( void * p ) + ); + + /// Returns max Hazard Pointer count per thread + size_t get_hazard_ptr_count() const noexcept + { + return hazard_ptr_count_; + } + + /// Returns max thread count + size_t get_max_thread_count() const noexcept + { + return max_thread_count_; + } + + /// Returns max size of retired objects array + size_t get_max_retired_ptr_count() const noexcept + { + return max_retired_ptr_count_; + } + + /// Get current scan strategy + scan_type get_scan_type() const + { + return scan_type_; + } + + /// Checks that required hazard pointer count \p nRequiredCount is less or equal then max hazard pointer count + /** + If nRequiredCount > get_hazard_ptr_count() then the exception \p not_enought_hazard_ptr is thrown + */ + static void check_hazard_ptr_count( size_t nRequiredCount ) + { + if ( instance().get_hazard_ptr_count() < nRequiredCount ) { +# ifdef CDS_DISABLE_SMR_EXCEPTION + assert( false ); // not enough hazard ptr +# else + CDS_THROW_EXCEPTION( not_enought_hazard_ptr()); +# endif + } + } + + /// Returns thread-local data for the current thread + static CDS_EXPORT_API thread_data* tls(); + + static CDS_EXPORT_API void attach_thread(); + static CDS_EXPORT_API void detach_thread(); + + /// Get internal statistics + CDS_EXPORT_API void statistics( stat& st ); + + public: // for internal use only + /// The main garbage collecting function + /** + This function is called internally when upper bound of thread's list of reclaimed pointers + is reached. + + There are the following scan algorithm: + - \ref hzp_gc_classic_scan "classic_scan" allocates memory for internal use + - \ref hzp_gc_inplace_scan "inplace_scan" does not allocate any memory + + Use \p set_scan_type() member function to setup appropriate scan algorithm. + */ + void scan( thread_data* pRec ) + { + ( this->*scan_func_ )( pRec ); + } + + /// Helper scan routine + /** + The function guarantees that every node that is eligible for reuse is eventually freed, barring + thread failures. To do so, after executing \p scan(), a thread executes a \p %help_scan(), + where it checks every HP record. If an HP record is inactive, the thread moves all "lost" reclaimed pointers + to thread's list of reclaimed pointers. + + The function is called internally by \p scan(). + */ + CDS_EXPORT_API void help_scan( thread_data* pThis ); + + private: + CDS_EXPORT_API smr( + size_t nHazardPtrCount, ///< Hazard pointer count per thread + size_t nMaxThreadCount, ///< Max count of simultaneous working thread in your application + size_t nMaxRetiredPtrCount, ///< Capacity of the array of retired objects for the thread + scan_type nScanType ///< Scan type (see \ref scan_type enum) + ); + + CDS_EXPORT_API ~smr(); + + CDS_EXPORT_API void detach_all_thread(); + + /// Classic scan algorithm + /** @anchor hzp_gc_classic_scan + Classical scan algorithm as described in Michael's paper. + + A scan includes four stages. The first stage involves scanning the array HP for non-null values. + Whenever a non-null value is encountered, it is inserted in a local list of currently protected pointer. + Only stage 1 accesses shared variables. The following stages operate only on private variables. + + The second stage of a scan involves sorting local list of protected pointers to allow + binary search in the third stage. + + The third stage of a scan involves checking each reclaimed node + against the pointers in local list of protected pointers. If the binary search yields + no match, the node is freed. Otherwise, it cannot be deleted now and must kept in thread's list + of reclaimed pointers. + + The forth stage prepares new thread's private list of reclaimed pointers + that could not be freed during the current scan, where they remain until the next scan. + + This algorithm allocates memory for internal HP array. + + This function is called internally by ThreadGC object when upper bound of thread's list of reclaimed pointers + is reached. + */ + CDS_EXPORT_API void classic_scan( thread_data* pRec ); + + /// In-place scan algorithm + /** @anchor hzp_gc_inplace_scan + Unlike the \p classic_scan() algorithm, \p %inplace_scan() does not allocate any memory. + All operations are performed in-place. + */ + CDS_EXPORT_API void inplace_scan( thread_data* pRec ); + + private: + CDS_EXPORT_API thread_record* create_thread_data(); + static CDS_EXPORT_API void destroy_thread_data( thread_record* pRec ); + + /// Allocates Hazard Pointer SMR thread private data + CDS_EXPORT_API thread_record* alloc_thread_data(); + + /// Free HP SMR thread-private data + CDS_EXPORT_API void free_thread_data( thread_record* pRec ); + + private: + static CDS_EXPORT_API smr* instance_; + + atomics::atomic< thread_record*> thread_list_; ///< Head of thread list + + size_t const hazard_ptr_count_; ///< max count of thread's hazard pointer + size_t const max_thread_count_; ///< max count of thread + size_t const max_retired_ptr_count_; ///< max count of retired ptr per thread + scan_type const scan_type_; ///< scan type (see \ref scan_type enum) + void ( smr::*scan_func_ )( thread_data* pRec ); + }; + //@endcond + + //@cond + // for backward compatibility + typedef smr GarbageCollector; + //@endcond + + } // namespace hp + + /// Hazard Pointer SMR (Safe Memory Reclamation) + /** @ingroup cds_garbage_collector + + Implementation of classic Hazard Pointer SMR + + Sources: + - [2002] Maged M.Michael "Safe memory reclamation for dynamic lock-freeobjects using atomic reads and writes" + - [2003] Maged M.Michael "Hazard Pointers: Safe memory reclamation for lock-free objects" + - [2004] Andrei Alexandrescy, Maged Michael "Lock-free Data Structures with Hazard Pointers" + + Hazard Pointer SMR is a singleton. The main user-level part of Hazard Pointer schema is + \p %cds::gc::HP class and its nested classes. Before use any HP-related class you must initialize \p %HP + by contructing \p %cds::gc::HP object in beginning of your \p main(). + See \ref cds_how_to_use "How to use" section for details how to apply SMR schema. + */ + class HP + { + public: + /// Native guarded pointer type + typedef hp::hazard_ptr guarded_pointer; + + /// Atomic reference + template using atomic_ref = atomics::atomic; + + /// Atomic marked pointer + template using atomic_marked_ptr = atomics::atomic; + + /// Atomic type + template using atomic_type = atomics::atomic; + + /// Exception "Not enough Hazard Pointer" + typedef hp::not_enought_hazard_ptr not_enought_hazard_ptr_exception; + + /// Internal statistics + typedef hp::stat stat; + + /// Hazard Pointer guard + /** + A guard is a hazard pointer. + Additionally, the \p %Guard class manages allocation and deallocation of the hazard pointer. + + \p %Guard object is movable but not copyable. + + The guard object can be in two states: + - unlinked - the guard is not linked with any internal hazard pointer. + In this state no operation except \p link() and move assignment is supported. + - linked (default) - the guard allocates an internal hazard pointer and completely operable. + + Due to performance reason the implementation does not check state of the guard in runtime. + + @warning Move assignment transfers the guard in unlinked state, use with care. + */ + class Guard + { + public: + /// Default ctor allocates a guard (hazard pointer) from thread-private storage + /** + @warning Can throw \p too_many_hazard_ptr_exception if internal hazard pointer objects are exhausted. + */ + Guard() + : guard_( hp::smr::tls()->hazards_.alloc()) + {} + + /// Initilalizes an unlinked guard i.e. the guard contains no hazard pointer. Used for move semantics support + explicit Guard( std::nullptr_t ) noexcept + : guard_( nullptr ) + {} + + /// Move ctor - \p src guard becomes unlinked (transfer internal guard ownership) + Guard( Guard&& src ) noexcept + : guard_( src.guard_ ) + { + src.guard_ = nullptr; + } + + /// Move assignment: the internal guards are swapped between \p src and \p this + /** + @warning \p src will become in unlinked state if \p this was unlinked on entry. + */ + Guard& operator=( Guard&& src ) noexcept + { + std::swap( guard_, src.guard_ ); + return *this; + } + + /// Copy ctor is prohibited - the guard is not copyable + Guard( Guard const& ) = delete; + + /// Copy assignment is prohibited + Guard& operator=( Guard const& ) = delete; + + /// Frees the internal hazard pointer if the guard is in linked state + ~Guard() + { + unlink(); + } + + /// Checks if the guard object linked with any internal hazard pointer + bool is_linked() const + { + return guard_ != nullptr; + } + + /// Links the guard with internal hazard pointer if the guard is in unlinked state + /** + @warning Can throw \p not_enought_hazard_ptr_exception if internal hazard pointer array is exhausted. + */ + void link() + { + if ( !guard_ ) + guard_ = hp::smr::tls()->hazards_.alloc(); + } + + /// Unlinks the guard from internal hazard pointer; the guard becomes in unlinked state + void unlink() + { + if ( guard_ ) { + hp::smr::tls()->hazards_.free( guard_ ); + guard_ = nullptr; + } + } + + /// Protects a pointer of type \p atomic + /** + Return the value of \p toGuard + + The function tries to load \p toGuard and to store it + to the HP slot repeatedly until the guard's value equals \p toGuard + + @warning The guad object should be in linked state, otherwise the result is undefined + */ + template + T protect( atomics::atomic const& toGuard ) + { + assert( guard_ != nullptr ); + + T pCur = toGuard.load(atomics::memory_order_acquire); + T pRet; + do { + pRet = assign( pCur ); + pCur = toGuard.load(atomics::memory_order_acquire); + } while ( pRet != pCur ); + return pCur; + } + + /// Protects a converted pointer of type \p atomic + /** + Return the value of \p toGuard + + The function tries to load \p toGuard and to store result of \p f functor + to the HP slot repeatedly until the guard's value equals \p toGuard. + + The function is useful for intrusive containers when \p toGuard is a node pointer + that should be converted to a pointer to the value before protecting. + The parameter \p f of type Func is a functor that makes this conversion: + \code + struct functor { + value_type * operator()( T * p ); + }; + \endcode + Actually, the result of f( toGuard.load()) is assigned to the hazard pointer. + + @warning The guad object should be in linked state, otherwise the result is undefined + */ + template + T protect( atomics::atomic const& toGuard, Func f ) + { + assert( guard_ != nullptr ); + + T pCur = toGuard.load(atomics::memory_order_acquire); + T pRet; + do { + pRet = pCur; + assign( f( pCur )); + pCur = toGuard.load(atomics::memory_order_acquire); + } while ( pRet != pCur ); + return pCur; + } + + /// Store \p p to the guard + /** + The function equals to a simple assignment the value \p p to guard, no loop is performed. + Can be used for a pointer that cannot be changed concurrently or if the pointer is already + guarded by another guard. + + @warning The guad object should be in linked state, otherwise the result is undefined + */ + template + T * assign( T* p ) + { + assert( guard_ != nullptr ); + + guard_->set( p ); + hp::smr::tls()->sync(); + return p; + } + + //@cond + std::nullptr_t assign( std::nullptr_t ) + { + assert( guard_ != nullptr ); + + guard_->clear(); + return nullptr; + } + //@endcond + + /// Copy a value guarded from \p src guard to \p this guard (valid only in linked state) + void copy( Guard const& src ) + { + assign( src.get_native()); + } + + /// Store marked pointer \p p to the guard + /** + The function equals to a simple assignment of p.ptr(), no loop is performed. + Can be used for a marked pointer that cannot be changed concurrently or if the marked pointer + is already guarded by another guard. + + @warning The guard object should be in linked state, otherwise the result is undefined + */ + template + T * assign( cds::details::marked_ptr p ) + { + return assign( p.ptr()); + } + + /// Clear value of the guard (valid only in linked state) + void clear() + { + assign( nullptr ); + } + + /// Get the value currently protected (valid only in linked state) + template + T * get() const + { + assert( guard_ != nullptr ); + return guard_->get_as(); + } + + /// Get native hazard pointer stored (valid only in linked state) + guarded_pointer get_native() const + { + assert( guard_ != nullptr ); + return guard_->get(); + } + + //@cond + hp::guard* release() + { + hp::guard* g = guard_; + guard_ = nullptr; + return g; + } + + hp::guard*& guard_ref() + { + return guard_; + } + //@endcond + + private: + //@cond + hp::guard* guard_; + //@endcond + }; + + /// Array of Hazard Pointer guards + /** + The class is intended for allocating an array of hazard pointer guards. + Template parameter \p Count defines the size of the array. + */ + template + class GuardArray + { + public: + /// Rebind array for other size \p Count2 + template + struct rebind { + typedef GuardArray other; ///< rebinding result + }; + + /// Array capacity + static constexpr const size_t c_nCapacity = Count; + + public: + /// Default ctor allocates \p Count hazard pointers + GuardArray() + { + hp::smr::tls()->hazards_.alloc( guards_ ); + } + + /// Move ctor is prohibited + GuardArray( GuardArray&& ) = delete; + + /// Move assignment is prohibited + GuardArray& operator=( GuardArray&& ) = delete; + + /// Copy ctor is prohibited + GuardArray( GuardArray const& ) = delete; + + /// Copy assignment is prohibited + GuardArray& operator=( GuardArray const& ) = delete; + + /// Frees allocated hazard pointers + ~GuardArray() + { + hp::smr::tls()->hazards_.free( guards_ ); + } + + /// Protects a pointer of type \p atomic + /** + Return the value of \p toGuard + + The function tries to load \p toGuard and to store it + to the slot \p nIndex repeatedly until the guard's value equals \p toGuard + */ + template + T protect( size_t nIndex, atomics::atomic const& toGuard ) + { + assert( nIndex < capacity()); + + T pRet; + do { + pRet = assign( nIndex, toGuard.load(atomics::memory_order_acquire)); + } while ( pRet != toGuard.load(atomics::memory_order_acquire)); + + return pRet; + } + + /// Protects a pointer of type \p atomic + /** + Return the value of \p toGuard + + The function tries to load \p toGuard and to store it + to the slot \p nIndex repeatedly until the guard's value equals \p toGuard + + The function is useful for intrusive containers when \p toGuard is a node pointer + that should be converted to a pointer to the value type before guarding. + The parameter \p f of type Func is a functor that makes this conversion: + \code + struct functor { + value_type * operator()( T * p ); + }; + \endcode + Really, the result of f( toGuard.load()) is assigned to the hazard pointer. + */ + template + T protect( size_t nIndex, atomics::atomic const& toGuard, Func f ) + { + assert( nIndex < capacity()); + + T pRet; + do { + assign( nIndex, f( pRet = toGuard.load(atomics::memory_order_acquire))); + } while ( pRet != toGuard.load(atomics::memory_order_acquire)); + + return pRet; + } + + /// Store \p to the slot \p nIndex + /** + The function equals to a simple assignment, no loop is performed. + */ + template + T * assign( size_t nIndex, T * p ) + { + assert( nIndex < capacity()); + + guards_.set( nIndex, p ); + hp::smr::tls()->sync(); + return p; + } + + /// Store marked pointer \p p to the guard + /** + The function equals to a simple assignment of p.ptr(), no loop is performed. + Can be used for a marked pointer that cannot be changed concurrently. + */ + template + T * assign( size_t nIndex, cds::details::marked_ptr p ) + { + return assign( nIndex, p.ptr()); + } + + /// Copy guarded value from \p src guard to slot at index \p nIndex + void copy( size_t nIndex, Guard const& src ) + { + assign( nIndex, src.get_native()); + } + + /// Copy guarded value from slot \p nSrcIndex to the slot \p nDestIndex + void copy( size_t nDestIndex, size_t nSrcIndex ) + { + assign( nDestIndex, get_native( nSrcIndex )); + } + + /// Clear value of the slot \p nIndex + void clear( size_t nIndex ) + { + guards_.clear( nIndex ); + } + + /// Get current value of slot \p nIndex + template + T * get( size_t nIndex ) const + { + assert( nIndex < capacity()); + return guards_[nIndex]->template get_as(); + } + + /// Get native hazard pointer stored + guarded_pointer get_native( size_t nIndex ) const + { + assert( nIndex < capacity()); + return guards_[nIndex]->get(); + } + + //@cond + hp::guard* release( size_t nIndex ) noexcept + { + return guards_.release( nIndex ); + } + //@endcond + + /// Capacity of the guard array + static constexpr size_t capacity() + { + return c_nCapacity; + } + + private: + //@cond + hp::guard_array guards_; + //@endcond + }; + + /// Guarded pointer + /** + A guarded pointer is a pair of a pointer and GC's guard. + Usually, it is used for returning a pointer to an element of a lock-free container. + The guard prevents the pointer to be early disposed (freed) by SMR. + After destructing \p %guarded_ptr object the pointer can be disposed (freed) automatically at any time. + + Template arguments: + - \p GuardedType - a type which the guard stores + - \p ValueType - a value type + - \p Cast - a functor for converting GuardedType* to ValueType*. Default is \p void (no casting). + + For intrusive containers, \p GuardedType is the same as \p ValueType and no casting is needed. + In such case the \p %guarded_ptr is: + @code + typedef cds::gc::HP::guarded_ptr< foo > intrusive_guarded_ptr; + @endcode + + For standard (non-intrusive) containers \p GuardedType is not the same as \p ValueType and casting is needed. + For example: + @code + struct foo { + int const key; + std::string value; + }; + + struct value_accessor { + std::string* operator()( foo* pFoo ) const + { + return &(pFoo->value); + } + }; + + // Guarded ptr + typedef cds::gc::HP::guarded_ptr< Foo, std::string, value_accessor > nonintrusive_guarded_ptr; + @endcode + + You don't need use this class directly. + All set/map container classes from \p libcds declare the typedef for \p %guarded_ptr with appropriate casting functor. + */ + template + class guarded_ptr + { + //@cond + struct trivial_cast { + ValueType * operator()( GuardedType * p ) const + { + return p; + } + }; + + template friend class guarded_ptr; + //@endcond + + public: + typedef GuardedType guarded_type; ///< Guarded type + typedef ValueType value_type; ///< Value type + + /// Functor for casting \p guarded_type to \p value_type + typedef typename std::conditional< std::is_same::value, trivial_cast, Cast >::type value_cast; + + public: + /// Creates empty guarded pointer + guarded_ptr() noexcept + : guard_(nullptr) + {} + + //@cond + explicit guarded_ptr( hp::guard* g ) noexcept + : guard_( g ) + {} + + /// Initializes guarded pointer with \p p + explicit guarded_ptr( guarded_type* p ) noexcept + : guard_( nullptr ) + { + reset(p); + } + explicit guarded_ptr( std::nullptr_t ) noexcept + : guard_( nullptr ) + {} + //@endcond + + /// Move ctor + guarded_ptr( guarded_ptr&& gp ) noexcept + : guard_( gp.guard_ ) + { + gp.guard_ = nullptr; + } + + /// Move ctor + template + guarded_ptr( guarded_ptr&& gp ) noexcept + : guard_( gp.guard_ ) + { + gp.guard_ = nullptr; + } + + /// Ctor from \p Guard + explicit guarded_ptr( Guard&& g ) noexcept + : guard_( g.release()) + {} + + /// The guarded pointer is not copy-constructible + guarded_ptr( guarded_ptr const& gp ) = delete; + + /// Clears the guarded pointer + /** + \ref release() is called if guarded pointer is not \ref empty() + */ + ~guarded_ptr() noexcept + { + release(); + } + + /// Move-assignment operator + guarded_ptr& operator=( guarded_ptr&& gp ) noexcept + { + std::swap( guard_, gp.guard_ ); + return *this; + } + + /// Move-assignment from \p Guard + guarded_ptr& operator=( Guard&& g ) noexcept + { + std::swap( guard_, g.guard_ref()); + return *this; + } + + /// The guarded pointer is not copy-assignable + guarded_ptr& operator=(guarded_ptr const& gp) = delete; + + /// Returns a pointer to guarded value + value_type * operator ->() const noexcept + { + assert( !empty()); + return value_cast()( guard_->get_as()); + } + + /// Returns a reference to guarded value + value_type& operator *() noexcept + { + assert( !empty()); + return *value_cast()( guard_->get_as()); + } + + /// Returns const reference to guarded value + value_type const& operator *() const noexcept + { + assert( !empty()); + return *value_cast()( guard_->get_as()); + } + + /// Checks if the guarded pointer is \p nullptr + bool empty() const noexcept + { + return !guard_ || guard_->get( atomics::memory_order_relaxed ) == nullptr; + } + + /// \p bool operator returns !empty() + explicit operator bool() const noexcept + { + return !empty(); + } + + /// Clears guarded pointer + /** + If the guarded pointer has been released, the pointer can be disposed (freed) at any time. + Dereferncing the guarded pointer after \p release() is dangerous. + */ + void release() noexcept + { + free_guard(); + } + + //@cond + // For internal use only!!! + void reset(guarded_type * p) noexcept + { + alloc_guard(); + assert( guard_ ); + guard_->set(p); + } + //@endcond + + private: + //@cond + void alloc_guard() + { + if ( !guard_ ) + guard_ = hp::smr::tls()->hazards_.alloc(); + } + + void free_guard() + { + if ( guard_ ) { + hp::smr::tls()->hazards_.free( guard_ ); + guard_ = nullptr; + } + } + //@endcond + + private: + //@cond + hp::guard* guard_; + //@endcond + }; + + public: + /// \p scan() type + enum class scan_type { + classic = hp::classic, ///< classic scan as described in Michael's papers + inplace = hp::inplace ///< inplace scan without allocation + }; + + /// Initializes %HP singleton + /** + The constructor initializes Hazard Pointer SMR singleton with passed parameters. + If the instance does not yet exist then the function creates the instance. + Otherwise it does nothing. + + The Michael's %HP reclamation schema depends of three parameters: + - \p nHazardPtrCount - hazard pointer count per thread. Usually it is small number (up to 10) depending from + the data structure algorithms. If \p nHazardPtrCount = 0, the defaul value 8 is used + - \p nMaxThreadCount - max count of thread with using Hazard Pointer GC in your application. Default is 100. + - \p nMaxRetiredPtrCount - capacity of array of retired pointers for each thread. Must be greater than + nHazardPtrCount * nMaxThreadCount . Default is 2 * nHazardPtrCount * nMaxThreadCount . + */ + HP( + size_t nHazardPtrCount = 0, ///< Hazard pointer count per thread + size_t nMaxThreadCount = 0, ///< Max count of simultaneous working thread in your application + size_t nMaxRetiredPtrCount = 0, ///< Capacity of the array of retired objects for the thread + scan_type nScanType = scan_type::inplace ///< Scan type (see \p scan_type enum) + ) + { + hp::smr::construct( + nHazardPtrCount, + nMaxThreadCount, + nMaxRetiredPtrCount, + static_cast(nScanType) + ); + } + + /// Terminates GC singleton + /** + The destructor destroys %HP global object. After calling of this function you may \b NOT + use CDS data structures based on \p %cds::gc::HP. + Usually, %HP object is destroyed at the end of your \p main(). + */ + ~HP() + { + hp::smr::destruct( true ); + } + + /// Checks that required hazard pointer count \p nCountNeeded is less or equal then max hazard pointer count + /** + If nRequiredCount > get_hazard_ptr_count() then the exception \p not_enought_hazard_ptr is thrown + */ + static void check_available_guards( size_t nCountNeeded ) + { + hp::smr::check_hazard_ptr_count( nCountNeeded ); + } + + /// Set memory management functions + /** + @note This function may be called BEFORE creating an instance + of Hazard Pointer SMR + + SMR object allocates some memory for thread-specific data and for + creating SMR object. + By default, a standard \p new and \p delete operators are used for this. + */ + static void set_memory_allocator( + void* ( *alloc_func )( size_t size ), ///< \p malloc() function + void( *free_func )( void * p ) ///< \p free() function + ) + { + hp::smr::set_memory_allocator( alloc_func, free_func ); + } + + /// Returns max Hazard Pointer count + static size_t max_hazard_count() + { + return hp::smr::instance().get_hazard_ptr_count(); + } + + /// Returns max count of thread + static size_t max_thread_count() + { + return hp::smr::instance().get_max_thread_count(); + } + + /// Returns capacity of retired pointer array + static size_t retired_array_capacity() + { + return hp::smr::instance().get_max_retired_ptr_count(); + } + + /// Retire pointer \p p with function \p func + /** + The function places pointer \p p to array of pointers ready for removing. + (so called retired pointer array). The pointer can be safely removed when no hazard pointer points to it. + \p func is a disposer: when \p p can be safely removed, \p func is called. + */ + template + static void retire( T * p, void( *func )( void * )) + { + hp::thread_data* rec = hp::smr::tls(); + if ( !rec->retired_.push( hp::retired_ptr( p, func ))) + hp::smr::instance().scan( rec ); + } + + /// Retire pointer \p p with functor of type \p Disposer + /** + The function places pointer \p p to array of pointers ready for removing. + (so called retired pointer array). The pointer can be safely removed when no hazard pointer points to it. + + Deleting the pointer is an invocation of some object of type \p Disposer; the interface of \p Disposer is: + \code + template + struct disposer { + void operator()( T * p ) ; // disposing operator + }; + \endcode + Since the functor call can happen at any time after \p retire() call, additional restrictions are imposed to \p Disposer type: + - it should be stateless functor + - it should be default-constructible + - the result of functor call with argument \p p should not depend on where the functor will be called. + + \par Examples: + Operator \p delete functor: + \code + template + struct disposer { + void operator ()( T * p ) { + delete p; + } + }; + + // How to call HP::retire method + int * p = new int; + + // ... use p in lock-free manner + + cds::gc::HP::retire( p ) ; // place p to retired pointer array of HP GC + \endcode + + Functor based on \p std::allocator : + \code + template > + struct disposer { + template + void operator()( T * p ) { + typedef typename Alloc::templare rebind::other alloc_t; + alloc_t a; + a.destroy( p ); + a.deallocate( p, 1 ); + } + }; + \endcode + */ + template + static void retire( T * p ) + { + if ( !hp::smr::tls()->retired_.push( hp::retired_ptr( p, cds::details::static_functor::call ))) + scan(); + } + + /// Get current scan strategy + static scan_type getScanType() + { + return static_cast( hp::smr::instance().get_scan_type()); + } + + /// Checks if Hazard Pointer GC is constructed and may be used + static bool isUsed() + { + return hp::smr::isUsed(); + } + + /// Forces SMR call for current thread + /** + Usually, this function should not be called directly. + */ + static void scan() + { + hp::smr::instance().scan( hp::smr::tls()); + } + + /// Synonym for \p scan() + static void force_dispose() + { + scan(); + } + + /// Returns internal statistics + /** + The function clears \p st before gathering statistics. + + @note Internal statistics is available only if you compile + \p libcds and your program with \p -DCDS_ENABLE_HPSTAT. + */ + static void statistics( stat& st ) + { + hp::smr::instance().statistics( st ); + } + + /// Returns post-mortem statistics + /** + Post-mortem statistics is gathered in the \p %HP object destructor + and can be accessible after destructing the global \p %HP object. + + @note Internal statistics is available only if you compile + \p libcds and your program with \p -DCDS_ENABLE_HPSTAT. + + Usage: + \code + int main() + { + cds::Initialize(); + { + // Initialize HP SMR + cds::gc::HP hp; + + // deal with HP-based data structured + // ... + } + + // HP object destroyed + // Get total post-mortem statistics + cds::gc::HP::stat const& st = cds::gc::HP::postmortem_statistics(); + + printf( "HP statistics:\n" + " thread count = %llu\n" + " guard allocated = %llu\n" + " guard freed = %llu\n" + " retired data count = %llu\n" + " free data count = %llu\n" + " scan() call count = %llu\n" + " help_scan() call count = %llu\n", + st.thread_rec_count, + st.guard_allocated, st.guard_freed, + st.retired_count, st.free_count, + st.scan_count, st.help_scan_count + ); + + cds::Terminate(); + } + \endcode + */ + CDS_EXPORT_API static stat const& postmortem_statistics(); + }; + +}} // namespace cds::gc + +#endif // #ifndef CDSLIB_GC_HP_SMR_H + diff --git a/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/gc/nogc.h b/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/gc/nogc.h new file mode 100644 index 0000000..e5773a0 --- /dev/null +++ b/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/gc/nogc.h @@ -0,0 +1,57 @@ +/* + This file is a part of libcds - Concurrent Data Structures library + + (C) Copyright Maxim Khizhinsky (libcds.dev@gmail.com) 2006-2017 + + Source code repo: http://github.com/khizmax/libcds/ + Download: http://sourceforge.net/projects/libcds/files/ + + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. + + * Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ + +#ifndef CDSLIB_GC_NOGC_H +#define CDSLIB_GC_NOGC_H + +namespace cds { namespace gc { + + /// No garbage collecting + /** @ingroup cds_garbage_collector + This empty class is used in \p libcds to mark that a template specialization implements + the container without any garbage collector schema. + + Usually, the container with this "GC" does not support the item removal. + */ + class nogc + { + public: + //@cond + /// Faked scan + static void scan() + {} + static void force_dispose() + {} + //@endcond + }; + +}} // namespace cds::gc + +#endif // #define CDSLIB_GC_NOGC_H diff --git a/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/init.h b/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/init.h new file mode 100644 index 0000000..e13ed2d --- /dev/null +++ b/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/init.h @@ -0,0 +1,97 @@ +/* + This file is a part of libcds - Concurrent Data Structures library + + (C) Copyright Maxim Khizhinsky (libcds.dev@gmail.com) 2006-2017 + + Source code repo: http://github.com/khizmax/libcds/ + Download: http://sourceforge.net/projects/libcds/files/ + + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. + + * Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ + +#ifndef CDSLIB_INIT_H +#define CDSLIB_INIT_H + +#include +#include +#include +#include + +namespace cds { + + //@cond + namespace details { + bool CDS_EXPORT_API init_first_call(); + bool CDS_EXPORT_API fini_last_call(); + } // namespace details + //@endcond + + /// Initialize CDS library + /** + The function initializes \p CDS library framework. + Before usage of \p CDS library features your application must initialize it + by calling \p %Initialize() function, see \ref cds_how_to_use "how to use the library". + + You can call \p Initialize several times, only first call is significant others will be ignored. + To terminate the \p CDS library correctly, each call to \p %Initialize() must be balanced + by a corresponding \p Terminate() call. + + Note, that this function does not initialize garbage collectors. To use GC you need you should call + GC-specific constructor function to initialize internal structures of GC. + See \p cds::gc for details. + */ + static inline void Initialize( + unsigned int nFeatureFlags = 0 ///< for future use, must be zero. + ) + { + CDS_UNUSED( nFeatureFlags ); + + if ( cds::details::init_first_call()) + { + cds::OS::topology::init(); + cds::threading::ThreadData::s_nProcCount = cds::OS::topology::processor_count(); + if ( cds::threading::ThreadData::s_nProcCount == 0 ) + cds::threading::ThreadData::s_nProcCount = 1; + + cds::threading::Manager::init(); + } + } + + /// Terminate CDS library + /** + This function terminates \p CDS library. + After \p %Terminate() calling many features of the library are unavailable. + This call should be the last call of \p CDS library in your application, + see \ref cds_how_to_use "how to use the library". + */ + static inline void Terminate() + { + if ( cds::details::fini_last_call()) { + cds::threading::Manager::fini(); + + cds::OS::topology::fini(); + } + } + +} // namespace cds + +#endif // CDSLIB_INIT_H diff --git a/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/intrusive/basket_queue.h b/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/intrusive/basket_queue.h new file mode 100644 index 0000000..1b854fe --- /dev/null +++ b/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/intrusive/basket_queue.h @@ -0,0 +1,812 @@ +/* + This file is a part of libcds - Concurrent Data Structures library + + (C) Copyright Maxim Khizhinsky (libcds.dev@gmail.com) 2006-2017 + + Source code repo: http://github.com/khizmax/libcds/ + Download: http://sourceforge.net/projects/libcds/files/ + + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. + + * Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ + +#ifndef CDSLIB_INTRUSIVE_BASKET_QUEUE_H +#define CDSLIB_INTRUSIVE_BASKET_QUEUE_H + +#include +#include +#include + +namespace cds { namespace intrusive { + + /// BasketQueue -related definitions + /** @ingroup cds_intrusive_helper + */ + namespace basket_queue { + /// BasketQueue node + /** + Template parameters: + Template parameters: + - GC - garbage collector used + - Tag - a \ref cds_intrusive_hook_tag "tag" + */ + template + struct node + { + typedef GC gc ; ///< Garbage collector + typedef Tag tag ; ///< tag + + typedef cds::details::marked_ptr marked_ptr; ///< marked pointer + typedef typename gc::template atomic_marked_ptr< marked_ptr> atomic_marked_ptr; ///< atomic marked pointer specific for GC + + /// Rebind node for other template parameters + template + struct rebind { + typedef node other ; ///< Rebinding result + }; + + atomic_marked_ptr m_pNext ; ///< pointer to the next node in the container + + node() + { + m_pNext.store( marked_ptr(), atomics::memory_order_release ); + } + }; + + using cds::intrusive::single_link::default_hook; + + //@cond + template < typename HookType, typename... Options> + struct hook + { + typedef typename opt::make_options< default_hook, Options...>::type options; + typedef typename options::gc gc; + typedef typename options::tag tag; + typedef node node_type; + typedef HookType hook_type; + }; + //@endcond + + + /// Base hook + /** + \p Options are: + - opt::gc - garbage collector used. + - opt::tag - a \ref cds_intrusive_hook_tag "tag" + */ + template < typename... Options > + struct base_hook: public hook< opt::base_hook_tag, Options... > + {}; + + /// Member hook + /** + \p MemberOffset defines offset in bytes of \ref node member into your structure. + Use \p offsetof macro to define \p MemberOffset + + \p Options are: + - opt::gc - garbage collector used. + - opt::tag - a \ref cds_intrusive_hook_tag "tag" + */ + template < size_t MemberOffset, typename... Options > + struct member_hook: public hook< opt::member_hook_tag, Options... > + { + //@cond + static const size_t c_nMemberOffset = MemberOffset; + //@endcond + }; + + /// Traits hook + /** + \p NodeTraits defines type traits for node. + See \ref node_traits for \p NodeTraits interface description + + \p Options are: + - opt::gc - garbage collector used. + - opt::tag - a \ref cds_intrusive_hook_tag "tag" + */ + template + struct traits_hook: public hook< opt::traits_hook_tag, Options... > + { + //@cond + typedef NodeTraits node_traits; + //@endcond + }; + + /// BasketQueue internal statistics. May be used for debugging or profiling + /** + Template argument \p Counter defines type of counter. + Default is \p cds::atomicity::event_counter, that is weak, i.e. it is not guaranteed + strict event counting. + You may use stronger type of counter like as \p cds::atomicity::item_counter, + or even integral type, for example, \p int. + */ + template + struct stat + { + typedef Counter counter_type; ///< Counter type + + counter_type m_EnqueueCount; ///< Enqueue call count + counter_type m_DequeueCount; ///< Dequeue call count + counter_type m_EnqueueRace; ///< Count of enqueue race conditions encountered + counter_type m_DequeueRace; ///< Count of dequeue race conditions encountered + counter_type m_AdvanceTailError;///< Count of "advance tail failed" events + counter_type m_BadTail; ///< Count of events "Tail is not pointed to the last item in the queue" + counter_type m_TryAddBasket; ///< Count of attemps adding new item to a basket (only or BasketQueue, for other queue this metric is not used) + counter_type m_AddBasketCount; ///< Count of events "Enqueue a new item into basket" (only or BasketQueue, for other queue this metric is not used) + counter_type m_EmptyDequeue; ///< Count of dequeue from empty queue + + /// Register enqueue call + void onEnqueue() { ++m_EnqueueCount; } + /// Register dequeue call + void onDequeue() { ++m_DequeueCount; } + /// Register enqueue race event + void onEnqueueRace() { ++m_EnqueueRace; } + /// Register dequeue race event + void onDequeueRace() { ++m_DequeueRace; } + /// Register "advance tail failed" event + void onAdvanceTailFailed() { ++m_AdvanceTailError; } + /// Register event "Tail is not pointed to last item in the queue" + void onBadTail() { ++m_BadTail; } + /// Register an attempt t add new item to basket + void onTryAddBasket() { ++m_TryAddBasket; } + /// Register event "Enqueue a new item into basket" (only or BasketQueue, for other queue this metric is not used) + void onAddBasket() { ++m_AddBasketCount; } + /// Register dequeuing from empty queue + void onEmptyDequeue() { ++m_EmptyDequeue; } + + + //@cond + void reset() + { + m_EnqueueCount.reset(); + m_DequeueCount.reset(); + m_EnqueueRace.reset(); + m_DequeueRace.reset(); + m_AdvanceTailError.reset(); + m_BadTail.reset(); + m_TryAddBasket.reset(); + m_AddBasketCount.reset(); + m_EmptyDequeue.reset(); + } + + stat& operator +=( stat const& s ) + { + m_EnqueueCount += s.m_EnqueueCount.get(); + m_DequeueCount += s.m_DequeueCount.get(); + m_EnqueueRace += s.m_EnqueueRace.get(); + m_DequeueRace += s.m_DequeueRace.get(); + m_AdvanceTailError += s.m_AdvanceTailError.get(); + m_BadTail += s.m_BadTail.get(); + m_TryAddBasket += s.m_TryAddBasket.get(); + m_AddBasketCount += s.m_AddBasketCount.get(); + m_EmptyDequeue += s.m_EmptyDequeue.get(); + return *this; + } + //@endcond + }; + + /// Dummy BasketQueue statistics - no counting is performed, no overhead. Support interface like \p basket_queue::stat + struct empty_stat + { + //@cond + void onEnqueue() const {} + void onDequeue() const {} + void onEnqueueRace() const {} + void onDequeueRace() const {} + void onAdvanceTailFailed() const {} + void onBadTail() const {} + void onTryAddBasket() const {} + void onAddBasket() const {} + void onEmptyDequeue() const {} + + void reset() {} + empty_stat& operator +=( empty_stat const& ) + { + return *this; + } + //@endcond + }; + + /// BasketQueue default type traits + struct traits + { + /// Back-off strategy + typedef cds::backoff::empty back_off; + + /// Hook, possible types are \p basket_queue::base_hook, \p basket_queue::member_hook, \p basket_queue::traits_hook + typedef basket_queue::base_hook<> hook; + + /// The functor used for dispose removed items. Default is \p opt::v::empty_disposer. This option is used for dequeuing + typedef opt::v::empty_disposer disposer; + + /// Item counting feature; by default, disabled. Use \p cds::atomicity::item_counter to enable item counting + typedef atomicity::empty_item_counter item_counter; + + /// Internal statistics (by default, disabled) + /** + Possible option value are: \p basket_queue::stat, \p basket_queue::empty_stat (the default), + user-provided class that supports \p %basket_queue::stat interface. + */ + typedef basket_queue::empty_stat stat; + + /// C++ memory ordering model + /** + Can be \p opt::v::relaxed_ordering (relaxed memory model, the default) + or \p opt::v::sequential_consistent (sequentially consisnent memory model). + */ + typedef opt::v::relaxed_ordering memory_model; + + /// Link checking, see \p cds::opt::link_checker + static constexpr const opt::link_check_type link_checker = opt::debug_check_link; + + /// Padding for internal critical atomic data. Default is \p opt::cache_line_padding + enum { padding = opt::cache_line_padding }; + }; + + + /// Metafunction converting option list to \p basket_queue::traits + /** + Supported \p Options are: + - \p opt::hook - hook used. Possible hooks are: \p basket_queue::base_hook, \p basket_queue::member_hook, \p basket_queue::traits_hook. + If the option is not specified, \p %basket_queue::base_hook<> is used. + - \p opt::back_off - back-off strategy used, default is \p cds::backoff::empty. + - \p opt::disposer - the functor used for dispose removed items. Default is \p opt::v::empty_disposer. This option is used + when dequeuing. + - \p opt::link_checker - the type of node's link fields checking. Default is \p opt::debug_check_link + - \p opt::item_counter - the type of item counting feature. Default is \p cds::atomicity::empty_item_counter (item counting disabled) + To enable item counting use \p cds::atomicity::item_counter + - \p opt::stat - the type to gather internal statistics. + Possible statistics types are: \p basket_queue::stat, \p basket_queue::empty_stat, user-provided class that supports \p %basket_queue::stat interface. + Default is \p %basket_queue::empty_stat (internal statistics disabled). + - \p opt::padding - padding for internal critical atomic data. Default is \p opt::cache_line_padding + - \p opt::memory_model - C++ memory ordering model. Can be \p opt::v::relaxed_ordering (relaxed memory model, the default) + or \p opt::v::sequential_consistent (sequentially consisnent memory model). + + Example: declare \p %BasketQueue with item counting and internal statistics + \code + typedef cds::intrusive::BasketQueue< cds::gc::HP, Foo, + typename cds::intrusive::basket_queue::make_traits< + cds::intrusive::opt:hook< cds::intrusive::basket_queue::base_hook< cds::opt::gc >>, + cds::opt::item_counte< cds::atomicity::item_counter >, + cds::opt::stat< cds::intrusive::basket_queue::stat<> > + >::type + > myQueue; + \endcode + */ + template + struct make_traits { +# ifdef CDS_DOXYGEN_INVOKED + typedef implementation_defined type; ///< Metafunction result +# else + typedef typename cds::opt::make_options< + typename cds::opt::find_type_traits< traits, Options... >::type + , Options... + >::type type; +# endif + }; + } // namespace basket_queue + + /// Basket lock-free queue (intrusive variant) + /** @ingroup cds_intrusive_queue + Implementation of basket queue algorithm. + + \par Source: + [2007] Moshe Hoffman, Ori Shalev, Nir Shavit "The Baskets Queue" + + Key idea + + In the 'basket' approach, instead of + the traditional ordered list of nodes, the queue consists of an ordered list of groups + of nodes (logical baskets). The order of nodes in each basket need not be specified, and in + fact, it is easiest to maintain them in FIFO order. The baskets fulfill the following basic + rules: + - Each basket has a time interval in which all its nodes' enqueue operations overlap. + - The baskets are ordered by the order of their respective time intervals. + - For each basket, its nodes' dequeue operations occur after its time interval. + - The dequeue operations are performed according to the order of baskets. + + Two properties define the FIFO order of nodes: + - The order of nodes in a basket is not specified. + - The order of nodes in different baskets is the FIFO-order of their respective baskets. + + In algorithms such as the MS-queue or optimistic + queue, threads enqueue items by applying a Compare-and-swap (CAS) operation to the + queue's tail pointer, and all the threads that fail on a particular CAS operation (and also + the winner of that CAS) overlap in time. In particular, they share the time interval of + the CAS operation itself. Hence, all the threads that fail to CAS on the tail-node of + the queue may be inserted into the same basket. By integrating the basket-mechanism + as the back-off mechanism, the time usually spent on backing-off before trying to link + onto the new tail, can now be utilized to insert the failed operations into the basket, + allowing enqueues to complete sooner. In the meantime, the next successful CAS operations + by enqueues allow new baskets to be formed down the list, and these can be + filled concurrently. Moreover, the failed operations don't retry their link attempt on the + new tail, lowering the overall contention on it. This leads to a queue + algorithm that unlike all former concurrent queue algorithms requires virtually no tuning + of the backoff mechanisms to reduce contention, making the algorithm an attractive + out-of-the-box queue. + + In order to enqueue, just as in \p MSQueue, a thread first tries to link the new node to + the last node. If it failed to do so, then another thread has already succeeded. Thus it + tries to insert the new node into the new basket that was created by the winner thread. + To dequeue a node, a thread first reads the head of the queue to obtain the + oldest basket. It may then dequeue any node in the oldest basket. + + Template arguments: + - \p GC - garbage collector type: \p gc::HP, \p gc::DHP + - \p T - type of value to be stored in the queue + - \p Traits - queue traits, default is \p basket_queue::traits. You can use \p basket_queue::make_traits + metafunction to make your traits or just derive your traits from \p %basket_queue::traits: + \code + struct myTraits: public cds::intrusive::basket_queue::traits { + typedef cds::intrusive::basket_queue::stat<> stat; + typedef cds::atomicity::item_counter item_counter; + }; + typedef cds::intrusive::BasketQueue< cds::gc::HP, Foo, myTraits > myQueue; + + // Equivalent make_traits example: + typedef cds::intrusive::BasketQueue< cds::gc::HP, Foo, + typename cds::intrusive::basket_queue::make_traits< + cds::opt::stat< cds::intrusive::basket_queue::stat<> >, + cds::opt::item_counter< cds::atomicity::item_counter > + >::type + > myQueue; + \endcode + + Garbage collecting schema \p GC must be consistent with the \p basket_queue::node GC. + + \par About item disposing + Like \p MSQueue, the Baskets queue algo has a key feature: even if the queue is empty it contains one item that is "dummy" one from + the standpoint of the algo. See \p dequeue() function doc for explanation. + + \par Examples + \code + #include + #include + + namespace ci = cds::inrtusive; + typedef cds::gc::HP hp_gc; + + // Basket queue with Hazard Pointer garbage collector, base hook + item disposer: + struct Foo: public ci::basket_queue::node< hp_gc > + { + // Your data + ... + }; + + // Disposer for Foo struct just deletes the object passed in + struct fooDisposer { + void operator()( Foo * p ) + { + delete p; + } + }; + + struct fooTraits: public ci::basket_queue::traits { + typedef ci::basket_queue::base_hook< ci::opt::gc > hook; + typedef fooDisposer disposer; + }; + typedef ci::BasketQueue< hp_gc, Foo, fooTraits > fooQueue; + + // BasketQueue with Hazard Pointer garbage collector, + // member hook + item disposer + item counter, + // without padding of internal queue data: + struct Bar + { + // Your data + ... + ci::basket_queue::node< hp_gc > hMember; + }; + + struct barTraits: public + ci::basket_queue::make_traits< + ci::opt::hook< + ci::basket_queue::member_hook< + offsetof(Bar, hMember) + ,ci::opt::gc + > + > + ,ci::opt::disposer< fooDisposer > + ,cds::opt::item_counter< cds::atomicity::item_counter > + ,cds::opt::padding< cds::opt::no_special_padding > + >::type + {}; + typedef ci::BasketQueue< hp_gc, Bar, barTraits > barQueue; + \endcode + */ + template + class BasketQueue + { + public: + typedef GC gc; ///< Garbage collector + typedef T value_type; ///< type of value stored in the queue + typedef Traits traits; ///< Queue traits + typedef typename traits::hook hook; ///< hook type + typedef typename hook::node_type node_type; ///< node type + typedef typename traits::disposer disposer; ///< disposer used + typedef typename get_node_traits< value_type, node_type, hook>::type node_traits; ///< node traits + typedef typename single_link::get_link_checker< node_type, traits::link_checker >::type link_checker; ///< link checker + + typedef typename traits::back_off back_off; ///< back-off strategy + typedef typename traits::item_counter item_counter; ///< Item counting policy used + typedef typename traits::stat stat; ///< Internal statistics policy used + typedef typename traits::memory_model memory_model; ///< Memory ordering. See cds::opt::memory_model option + + /// Rebind template arguments + template + struct rebind { + typedef BasketQueue< GC2, T2, Traits2> other ; ///< Rebinding result + }; + + static constexpr const size_t c_nHazardPtrCount = 6 ; ///< Count of hazard pointer required for the algorithm + + protected: + //@cond + typedef typename node_type::marked_ptr marked_ptr; + typedef typename node_type::atomic_marked_ptr atomic_marked_ptr; + + // GC and node_type::gc must be the same + static_assert( std::is_same::value, "GC and node_type::gc must be the same"); + //@endcond + + atomic_marked_ptr m_pHead ; ///< Queue's head pointer (aligned) + //@cond + typename opt::details::apply_padding< atomic_marked_ptr, traits::padding >::padding_type pad1_; + //@endcond + atomic_marked_ptr m_pTail ; ///< Queue's tail pointer (aligned) + //@cond + typename opt::details::apply_padding< atomic_marked_ptr, traits::padding >::padding_type pad2_; + //@endcond + node_type m_Dummy ; ///< dummy node + //@cond + typename opt::details::apply_padding< node_type, traits::padding >::padding_type pad3_; + //@endcond + item_counter m_ItemCounter ; ///< Item counter + stat m_Stat ; ///< Internal statistics + //@cond + size_t const m_nMaxHops; + //@endcond + + //@cond + + struct dequeue_result { + typename gc::template GuardArray<3> guards; + node_type * pNext; + }; + + bool do_dequeue( dequeue_result& res, bool bDeque ) + { + // Note: + // If bDeque == false then the function is called from empty method and no real dequeuing operation is performed + + back_off bkoff; + + marked_ptr h; + marked_ptr t; + marked_ptr pNext; + + while ( true ) { + h = res.guards.protect( 0, m_pHead, []( marked_ptr p ) -> value_type * { return node_traits::to_value_ptr( p.ptr());}); + t = res.guards.protect( 1, m_pTail, []( marked_ptr p ) -> value_type * { return node_traits::to_value_ptr( p.ptr());}); + pNext = res.guards.protect( 2, h->m_pNext, []( marked_ptr p ) -> value_type * { return node_traits::to_value_ptr( p.ptr());}); + + if ( h == m_pHead.load( memory_model::memory_order_acquire )) { + if ( h.ptr() == t.ptr()) { + if ( !pNext.ptr()) { + m_Stat.onEmptyDequeue(); + return false; + } + + { + typename gc::Guard g; + while ( pNext->m_pNext.load(memory_model::memory_order_relaxed).ptr() && m_pTail.load(memory_model::memory_order_relaxed) == t ) { + pNext = g.protect( pNext->m_pNext, []( marked_ptr p ) -> value_type * { return node_traits::to_value_ptr( p.ptr());}); + res.guards.copy( 2, g ); + } + } + + m_pTail.compare_exchange_weak( t, marked_ptr(pNext.ptr()), memory_model::memory_order_acquire, atomics::memory_order_relaxed ); + } + else { + marked_ptr iter( h ); + size_t hops = 0; + + typename gc::Guard g; + + while ( pNext.ptr() && pNext.bits() && iter.ptr() != t.ptr() && m_pHead.load(memory_model::memory_order_relaxed) == h ) { + iter = pNext; + g.assign( res.guards.template get(2)); + pNext = res.guards.protect( 2, pNext->m_pNext, []( marked_ptr p ) -> value_type * { return node_traits::to_value_ptr( p.ptr());}); + ++hops; + } + + if ( m_pHead.load(memory_model::memory_order_relaxed) != h ) + continue; + + if ( iter.ptr() == t.ptr()) + free_chain( h, iter ); + else if ( bDeque ) { + res.pNext = pNext.ptr(); + + if ( iter->m_pNext.compare_exchange_weak( pNext, marked_ptr( pNext.ptr(), 1 ), memory_model::memory_order_acquire, atomics::memory_order_relaxed )) { + if ( hops >= m_nMaxHops ) + free_chain( h, pNext ); + break; + } + } + else + return true; + } + } + + if ( bDeque ) + m_Stat.onDequeueRace(); + bkoff(); + } + + if ( bDeque ) { + --m_ItemCounter; + m_Stat.onDequeue(); + } + + return true; + } + + void free_chain( marked_ptr head, marked_ptr newHead ) + { + // "head" and "newHead" are guarded + + if ( m_pHead.compare_exchange_strong( head, marked_ptr(newHead.ptr()), memory_model::memory_order_release, atomics::memory_order_relaxed )) + { + typename gc::template GuardArray<2> guards; + guards.assign( 0, node_traits::to_value_ptr(head.ptr())); + while ( head.ptr() != newHead.ptr()) { + marked_ptr pNext = guards.protect( 1, head->m_pNext, []( marked_ptr p ) -> value_type * { return node_traits::to_value_ptr( p.ptr());}); + assert( pNext.bits() != 0 ); + dispose_node( head.ptr()); + guards.copy( 0, 1 ); + head = pNext; + } + } + } + + static void clear_links( node_type * pNode ) + { + pNode->m_pNext.store( marked_ptr( nullptr ), memory_model::memory_order_release ); + } + + void dispose_node( node_type * p ) + { + if ( p != &m_Dummy ) { + struct internal_disposer + { + void operator()( value_type * p ) + { + assert( p != nullptr ); + BasketQueue::clear_links( node_traits::to_node_ptr( p )); + disposer()(p); + } + }; + gc::template retire( node_traits::to_value_ptr(p)); + } + } + //@endcond + + public: + /// Initializes empty queue + BasketQueue() + : m_pHead( &m_Dummy ) + , m_pTail( &m_Dummy ) + , m_nMaxHops( 3 ) + {} + + /// Destructor clears the queue + /** + Since the baskets queue contains at least one item even + if the queue is empty, the destructor may call item disposer. + */ + ~BasketQueue() + { + clear(); + + node_type * pHead = m_pHead.load(memory_model::memory_order_relaxed).ptr(); + assert( pHead != nullptr ); + + { + node_type * pNext = pHead->m_pNext.load( memory_model::memory_order_relaxed ).ptr(); + while ( pNext ) { + node_type * p = pNext; + pNext = pNext->m_pNext.load( memory_model::memory_order_relaxed ).ptr(); + p->m_pNext.store( marked_ptr(), memory_model::memory_order_relaxed ); + dispose_node( p ); + } + pHead->m_pNext.store( marked_ptr(), memory_model::memory_order_relaxed ); + //m_pTail.store( marked_ptr( pHead ), memory_model::memory_order_relaxed ); + } + + m_pHead.store( marked_ptr( nullptr ), memory_model::memory_order_relaxed ); + m_pTail.store( marked_ptr( nullptr ), memory_model::memory_order_relaxed ); + + dispose_node( pHead ); + } + + /// Enqueues \p val value into the queue. + /** @anchor cds_intrusive_BasketQueue_enqueue + The function always returns \p true. + */ + bool enqueue( value_type& val ) + { + node_type * pNew = node_traits::to_node_ptr( val ); + link_checker::is_empty( pNew ); + + typename gc::Guard guard; + typename gc::Guard gNext; + back_off bkoff; + + marked_ptr t; + while ( true ) { + t = guard.protect( m_pTail, []( marked_ptr p ) -> value_type * { return node_traits::to_value_ptr( p.ptr());}); + + marked_ptr pNext = t->m_pNext.load(memory_model::memory_order_relaxed ); + + if ( pNext.ptr() == nullptr ) { + pNew->m_pNext.store( marked_ptr(), memory_model::memory_order_relaxed ); + if ( t->m_pNext.compare_exchange_weak( pNext, marked_ptr(pNew), memory_model::memory_order_release, atomics::memory_order_relaxed )) { + if ( !m_pTail.compare_exchange_strong( t, marked_ptr(pNew), memory_model::memory_order_release, atomics::memory_order_relaxed )) + m_Stat.onAdvanceTailFailed(); + break; + } + + // Try adding to basket + m_Stat.onTryAddBasket(); + + // Reread tail next + try_again: + pNext = gNext.protect( t->m_pNext, []( marked_ptr p ) -> value_type * { return node_traits::to_value_ptr( p.ptr());}); + + // add to the basket + if ( m_pTail.load( memory_model::memory_order_relaxed ) == t + && t->m_pNext.load( memory_model::memory_order_relaxed) == pNext + && !pNext.bits()) + { + bkoff(); + pNew->m_pNext.store( pNext, memory_model::memory_order_relaxed ); + if ( t->m_pNext.compare_exchange_weak( pNext, marked_ptr( pNew ), memory_model::memory_order_release, atomics::memory_order_relaxed )) { + m_Stat.onAddBasket(); + break; + } + goto try_again; + } + } + else { + // Tail is misplaced, advance it + + typename gc::template GuardArray<2> g; + g.assign( 0, node_traits::to_value_ptr( pNext.ptr())); + if ( m_pTail.load( memory_model::memory_order_acquire ) != t + || t->m_pNext.load( memory_model::memory_order_relaxed ) != pNext ) + { + m_Stat.onEnqueueRace(); + bkoff(); + continue; + } + + marked_ptr p; + bool bTailOk = true; + while ( (p = pNext->m_pNext.load( memory_model::memory_order_acquire )).ptr() != nullptr ) + { + bTailOk = m_pTail.load( memory_model::memory_order_relaxed ) == t; + if ( !bTailOk ) + break; + + g.assign( 1, node_traits::to_value_ptr( p.ptr())); + if ( pNext->m_pNext.load( memory_model::memory_order_relaxed ) != p ) + continue; + pNext = p; + g.assign( 0, g.template get( 1 )); + } + if ( !bTailOk || !m_pTail.compare_exchange_weak( t, marked_ptr( pNext.ptr()), memory_model::memory_order_release, atomics::memory_order_relaxed )) + m_Stat.onAdvanceTailFailed(); + + m_Stat.onBadTail(); + } + + m_Stat.onEnqueueRace(); + } + + ++m_ItemCounter; + m_Stat.onEnqueue(); + + return true; + } + + /// Synonym for \p enqueue() function + bool push( value_type& val ) + { + return enqueue( val ); + } + + /// Dequeues a value from the queue + /** @anchor cds_intrusive_BasketQueue_dequeue + If the queue is empty the function returns \p nullptr. + + @note See \p MSQueue::dequeue() note about item disposing + */ + value_type * dequeue() + { + dequeue_result res; + + if ( do_dequeue( res, true )) + return node_traits::to_value_ptr( *res.pNext ); + return nullptr; + } + + /// Synonym for \p dequeue() function + value_type * pop() + { + return dequeue(); + } + + /// Checks if the queue is empty + /** + Note that this function is not \p const. + The function is based on \p dequeue() algorithm + but really it does not dequeue any item. + */ + bool empty() + { + dequeue_result res; + return !do_dequeue( res, false ); + } + + /// Clear the queue + /** + The function repeatedly calls \p dequeue() until it returns \p nullptr. + The disposer defined in template \p Traits is called for each item + that can be safely disposed. + */ + void clear() + { + while ( dequeue()); + } + + /// Returns queue's item count + /** + The value returned depends on \p Traits (see basket_queue::traits::item_counter). For \p atomicity::empty_item_counter, + this function always returns 0. + + @note Even if you use real item counter and it returns 0, this fact is not mean that the queue + is empty. To check queue emptyness use \p empty() method. + */ + size_t size() const + { + return m_ItemCounter.value(); + } + + /// Returns reference to internal statistics + const stat& statistics() const + { + return m_Stat; + } + }; + +}} // namespace cds::intrusive + +#endif // #ifndef CDSLIB_INTRUSIVE_BASKET_QUEUE_H diff --git a/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/intrusive/cuckoo_set.h b/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/intrusive/cuckoo_set.h new file mode 100644 index 0000000..6ad0ad7 --- /dev/null +++ b/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/intrusive/cuckoo_set.h @@ -0,0 +1,2829 @@ +/* + This file is a part of libcds - Concurrent Data Structures library + + (C) Copyright Maxim Khizhinsky (libcds.dev@gmail.com) 2006-2017 + + Source code repo: http://github.com/khizmax/libcds/ + Download: http://sourceforge.net/projects/libcds/files/ + + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. + + * Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ + +#ifndef CDSLIB_INTRUSIVE_CUCKOO_SET_H +#define CDSLIB_INTRUSIVE_CUCKOO_SET_H + +#include +#include +#include +#include // ref +#include +#include +#include +#include +#include +#include + + +namespace cds { namespace intrusive { + + /// CuckooSet-related definitions + namespace cuckoo { + /// Option to define probeset type + /** + The option specifies probeset type for the CuckooSet. + Available values: + - \p cds::intrusive::cuckoo::list - the probeset is a single-linked list. + The node contains pointer to next node in probeset. + - \p cds::intrusive::cuckoo::vector - the probeset is a vector + with constant-size \p Capacity where \p Capacity is an unsigned int constant. + The node does not contain any auxiliary data. + */ + template + struct probeset_type + { + //@cond + template + struct pack: public Base { + typedef Type probeset_type; + }; + //@endcond + }; + + /// Option specifying whether to store hash values in the node + /** + This option reserves additional space in the hook to store the hash value of the object once it's introduced in the container. + When this option is used, the unordered container will store the calculated hash value in the hook and rehashing operations won't need + to recalculate the hash of the value. This option will improve the performance of unordered containers + when rehashing is frequent or hashing the value is a slow operation + + The \p Count template parameter defines the size of hash array. Remember that cuckoo hashing implies at least two + hash values per item. + + Possible values of \p Count: + - 0 - no hash storing in the node + - greater that 1 - store hash values. + + Value 1 is deprecated. + */ + template + struct store_hash + { + //@cond + template + struct pack: public Base { + static unsigned int const store_hash = Count; + }; + //@endcond + }; + + + //@cond + // Probeset type placeholders + struct list_probeset_class; + struct vector_probeset_class; + //@endcond + + //@cond + /// List probeset type + struct list; + //@endcond + + /// Vector probeset type + template + struct vector + { + /// Vector capacity + static unsigned int const c_nCapacity = Capacity; + }; + + /// CuckooSet node + /** + Template arguments: + - \p ProbesetType - type of probeset. Can be \p cds::intrusive::cuckoo::list + or \p cds::intrusive::cuckoo::vector. + - \p StoreHashCount - constant that defines whether to store node hash values. + See cuckoo::store_hash option for explanation + - \p Tag - a \ref cds_intrusive_hook_tag "tag" + */ + template + struct node +#ifdef CDS_DOXYGEN_INVOKED + { + typedef ProbesetType probeset_type ; ///< Probeset type + typedef Tag tag ; ///< Tag + static unsigned int const hash_array_size = StoreHashCount ; ///< The size of hash array + } +#endif +; + + //@cond + template + struct node< cuckoo::list, 0, Tag> + { + typedef list_probeset_class probeset_class; + typedef cuckoo::list probeset_type; + typedef Tag tag; + static unsigned int const hash_array_size = 0; + static unsigned int const probeset_size = 0; + + node * m_pNext; + + constexpr node() noexcept + : m_pNext( nullptr ) + {} + + void store_hash( size_t const* ) + {} + + size_t * get_hash() const + { + // This node type does not store hash values!!! + assert(false); + return nullptr; + } + + void clear() + { + m_pNext = nullptr; + } + }; + + template + struct node< cuckoo::list, StoreHashCount, Tag> + { + typedef list_probeset_class probeset_class; + typedef cuckoo::list probeset_type; + typedef Tag tag; + static unsigned int const hash_array_size = StoreHashCount; + static unsigned int const probeset_size = 0; + + node * m_pNext; + size_t m_arrHash[ hash_array_size ]; + + node() noexcept + : m_pNext( nullptr ) + { + memset( m_arrHash, 0, sizeof(m_arrHash)); + } + + void store_hash( size_t const* pHashes ) + { + memcpy( m_arrHash, pHashes, sizeof( m_arrHash )); + } + + size_t * get_hash() const + { + return const_cast( m_arrHash ); + } + + void clear() + { + m_pNext = nullptr; + } + }; + + template + struct node< cuckoo::vector, 0, Tag> + { + typedef vector_probeset_class probeset_class; + typedef cuckoo::vector probeset_type; + typedef Tag tag; + static unsigned int const hash_array_size = 0; + static unsigned int const probeset_size = probeset_type::c_nCapacity; + + node() noexcept + {} + + void store_hash( size_t const* ) + {} + + size_t * get_hash() const + { + // This node type does not store hash values!!! + assert(false); + return nullptr; + } + + void clear() + {} + }; + + template + struct node< cuckoo::vector, StoreHashCount, Tag> + { + typedef vector_probeset_class probeset_class; + typedef cuckoo::vector probeset_type; + typedef Tag tag; + static unsigned int const hash_array_size = StoreHashCount; + static unsigned int const probeset_size = probeset_type::c_nCapacity; + + size_t m_arrHash[ hash_array_size ]; + + node() noexcept + { + memset( m_arrHash, 0, sizeof(m_arrHash)); + } + + void store_hash( size_t const* pHashes ) + { + memcpy( m_arrHash, pHashes, sizeof( m_arrHash )); + } + + size_t * get_hash() const + { + return const_cast( m_arrHash ); + } + + void clear() + {} + }; + //@endcond + + + //@cond + struct default_hook { + typedef cuckoo::list probeset_type; + static unsigned int const store_hash = 0; + typedef opt::none tag; + }; + + template < typename HookType, typename... Options> + struct hook + { + typedef typename opt::make_options< default_hook, Options...>::type traits; + + typedef typename traits::probeset_type probeset_type; + typedef typename traits::tag tag; + static unsigned int const store_hash = traits::store_hash; + + typedef node node_type; + + typedef HookType hook_type; + }; + //@endcond + + /// Base hook + /** + \p Options are: + - \p cuckoo::probeset_type - probeset type. Defaul is \p cuckoo::list + - \p cuckoo::store_hash - store hash values in the node or not. Default is 0 (no storing) + - \p opt::tag - a \ref cds_intrusive_hook_tag "tag" + */ + template < typename... Options > + struct base_hook: public hook< opt::base_hook_tag, Options... > + {}; + + /// Member hook + /** + \p MemberOffset defines offset in bytes of \ref node member into your structure. + Use \p offsetof macro to define \p MemberOffset + + \p Options are: + - \p cuckoo::probeset_type - probeset type. Defaul is \p cuckoo::list + - \p cuckoo::store_hash - store hash values in the node or not. Default is 0 (no storing) + - \p opt::tag - a \ref cds_intrusive_hook_tag "tag" + */ + template < size_t MemberOffset, typename... Options > + struct member_hook: public hook< opt::member_hook_tag, Options... > + { + //@cond + static const size_t c_nMemberOffset = MemberOffset; + //@endcond + }; + + /// Traits hook + /** + \p NodeTraits defines type traits for node. + See \ref node_traits for \p NodeTraits interface description + + \p Options are: + - \p cuckoo::probeset_type - probeset type. Defaul is \p cuckoo::list + - \p cuckoo::store_hash - store hash values in the node or not. Default is 0 (no storing) + - \p opt::tag - a \ref cds_intrusive_hook_tag "tag" + */ + template + struct traits_hook: public hook< opt::traits_hook_tag, Options... > + { + //@cond + typedef NodeTraits node_traits; + //@endcond + }; + + /// Internal statistics for \ref striping mutex policy + struct striping_stat { + typedef cds::atomicity::event_counter counter_type; ///< Counter type + + counter_type m_nCellLockCount ; ///< Count of obtaining cell lock + counter_type m_nCellTryLockCount ; ///< Count of cell \p try_lock attempts + counter_type m_nFullLockCount ; ///< Count of obtaining full lock + counter_type m_nResizeLockCount ; ///< Count of obtaining resize lock + counter_type m_nResizeCount ; ///< Count of resize event + + //@cond + void onCellLock() { ++m_nCellLockCount; } + void onCellTryLock() { ++m_nCellTryLockCount; } + void onFullLock() { ++m_nFullLockCount; } + void onResizeLock() { ++m_nResizeLockCount; } + void onResize() { ++m_nResizeCount; } + //@endcond + }; + + /// Dummy internal statistics for \ref striping mutex policy + struct empty_striping_stat { + //@cond + void onCellLock() const {} + void onCellTryLock() const {} + void onFullLock() const {} + void onResizeLock() const {} + void onResize() const {} + //@endcond + }; + + /// Lock striping concurrent access policy + /** + This is one of available opt::mutex_policy option type for CuckooSet + + Lock striping is very simple technique. + The cuckoo set consists of the bucket tables and the array of locks. + There is single lock array for each bucket table, at least, the count of bucket table is 2. + Initially, the capacity of lock array and each bucket table is the same. + When set is resized, bucket table capacity will be doubled but lock array will not. + The lock \p i protects each bucket \p j, where j = i mod L , + where \p L - the size of lock array. + + The policy contains an internal array of \p RecursiveLock locks. + + Template arguments: + - \p RecursiveLock - the type of recursive mutex. The default is \p std::recursive_mutex. The mutex type should be default-constructible. + Note that a recursive spin-lock is not suitable for lock striping for performance reason. + - \p Arity - unsigned int constant that specifies an arity. The arity is the count of hash functors, i.e., the + count of lock arrays. Default value is 2. + - \p Alloc - allocator type used for lock array memory allocation. Default is \p CDS_DEFAULT_ALLOCATOR. + - \p Stat - internal statistics type. Note that this template argument is automatically selected by \ref CuckooSet + class according to its \p opt::stat option. + */ + template < + class RecursiveLock = std::recursive_mutex, + unsigned int Arity = 2, + class Alloc = CDS_DEFAULT_ALLOCATOR, + class Stat = empty_striping_stat + > + class striping + { + public: + typedef RecursiveLock lock_type ; ///< lock type + typedef Alloc allocator_type ; ///< allocator type + static unsigned int const c_nArity = Arity ; ///< the arity + typedef Stat statistics_type ; ///< Internal statistics type (\ref striping_stat or \ref empty_striping_stat) + + //@cond + typedef striping_stat real_stat; + typedef empty_striping_stat empty_stat; + + template + struct rebind_statistics { + typedef striping other; + }; + //@endcond + + typedef cds::sync::lock_array< lock_type, cds::sync::pow2_select_policy, allocator_type > lock_array_type ; ///< lock array type + + protected: + //@cond + class lock_array: public lock_array_type + { + public: + // placeholder ctor + lock_array(): lock_array_type( typename lock_array_type::select_cell_policy(2)) {} + + // real ctor + lock_array( size_t nCapacity ): lock_array_type( nCapacity, typename lock_array_type::select_cell_policy(nCapacity)) {} + }; + + class scoped_lock: public std::unique_lock< lock_array_type > + { + typedef std::unique_lock< lock_array_type > base_class; + public: + scoped_lock( lock_array& arrLock, size_t nHash ): base_class( arrLock, nHash ) {} + }; + //@endcond + + protected: + //@cond + lock_array m_Locks[c_nArity] ; ///< array of \p lock_array_type + statistics_type m_Stat ; ///< internal statistics + //@endcond + + public: + //@cond + class scoped_cell_lock { + lock_type * m_guard[c_nArity]; + + public: + scoped_cell_lock( striping& policy, size_t const* arrHash ) + { + for ( unsigned int i = 0; i < c_nArity; ++i ) { + m_guard[i] = &( policy.m_Locks[i].at( policy.m_Locks[i].lock( arrHash[i] ))); + } + policy.m_Stat.onCellLock(); + } + + ~scoped_cell_lock() + { + for ( unsigned int i = 0; i < c_nArity; ++i ) + m_guard[i]->unlock(); + } + }; + + class scoped_cell_trylock + { + typedef typename lock_array_type::lock_type lock_type; + + lock_type * m_guard[c_nArity]; + bool m_bLocked; + + public: + scoped_cell_trylock( striping& policy, size_t const* arrHash ) + { + size_t nCell = policy.m_Locks[0].try_lock( arrHash[0] ); + m_bLocked = nCell != lock_array_type::c_nUnspecifiedCell; + if ( m_bLocked ) { + m_guard[0] = &(policy.m_Locks[0].at(nCell)); + for ( unsigned int i = 1; i < c_nArity; ++i ) { + m_guard[i] = &( policy.m_Locks[i].at( policy.m_Locks[i].lock( arrHash[i] ))); + } + } + else { + std::fill( m_guard, m_guard + c_nArity, nullptr ); + } + policy.m_Stat.onCellTryLock(); + } + ~scoped_cell_trylock() + { + if ( m_bLocked ) { + for ( unsigned int i = 0; i < c_nArity; ++i ) + m_guard[i]->unlock(); + } + } + + bool locked() const + { + return m_bLocked; + } + }; + + class scoped_full_lock { + std::unique_lock< lock_array_type > m_guard; + public: + scoped_full_lock( striping& policy ) + : m_guard( policy.m_Locks[0] ) + { + policy.m_Stat.onFullLock(); + } + + /// Ctor for scoped_resize_lock - no statistics is incremented + scoped_full_lock( striping& policy, bool ) + : m_guard( policy.m_Locks[0] ) + {} + }; + + class scoped_resize_lock: public scoped_full_lock { + public: + scoped_resize_lock( striping& policy ) + : scoped_full_lock( policy, false ) + { + policy.m_Stat.onResizeLock(); + } + }; + //@endcond + + public: + /// Constructor + striping( + size_t nLockCount ///< The size of lock array. Must be power of two. + ) + { + // Trick: initialize the array of locks + for ( unsigned int i = 0; i < c_nArity; ++i ) { + lock_array * pArr = m_Locks + i; + pArr->lock_array::~lock_array(); + new ( pArr ) lock_array( nLockCount ); + } + } + + /// Returns lock array size + /** + Lock array size is unchanged during \p striping object lifetime + */ + size_t lock_count() const + { + return m_Locks[0].size(); + } + + //@cond + void resize( size_t ) + { + m_Stat.onResize(); + } + //@endcond + + /// Returns the arity of striping mutex policy + constexpr unsigned int arity() const noexcept + { + return c_nArity; + } + + /// Returns internal statistics + statistics_type const& statistics() const + { + return m_Stat; + } + }; + + /// Internal statistics for \ref refinable mutex policy + struct refinable_stat { + typedef cds::atomicity::event_counter counter_type ; ///< Counter type + + counter_type m_nCellLockCount ; ///< Count of obtaining cell lock + counter_type m_nCellLockWaitResizing ; ///< Count of loop iteration to wait for resizing + counter_type m_nCellLockArrayChanged ; ///< Count of event "Lock array has been changed when obtaining cell lock" + counter_type m_nCellLockFailed ; ///< Count of event "Cell lock failed because of the array is owned by other thread" + + counter_type m_nSecondCellLockCount ; ///< Count of obtaining cell lock when another cell is already locked + counter_type m_nSecondCellLockFailed ; ///< Count of unsuccess obtaining cell lock when another cell is already locked + + counter_type m_nFullLockCount ; ///< Count of obtaining full lock + counter_type m_nFullLockIter ; ///< Count of unsuccessfull iteration to obtain full lock + + counter_type m_nResizeLockCount ; ///< Count of obtaining resize lock + counter_type m_nResizeLockIter ; ///< Count of unsuccessfull iteration to obtain resize lock + counter_type m_nResizeLockArrayChanged; ///< Count of event "Lock array has been changed when obtaining resize lock" + counter_type m_nResizeCount ; ///< Count of resize event + + //@cond + void onCellLock() { ++m_nCellLockCount; } + void onCellWaitResizing() { ++m_nCellLockWaitResizing; } + void onCellArrayChanged() { ++m_nCellLockArrayChanged; } + void onCellLockFailed() { ++m_nCellLockFailed; } + void onSecondCellLock() { ++m_nSecondCellLockCount; } + void onSecondCellLockFailed() { ++m_nSecondCellLockFailed; } + void onFullLock() { ++m_nFullLockCount; } + void onFullLockIter() { ++m_nFullLockIter; } + void onResizeLock() { ++m_nResizeLockCount; } + void onResizeLockIter() { ++m_nResizeLockIter; } + void onResizeLockArrayChanged() { ++m_nResizeLockArrayChanged; } + void onResize() { ++m_nResizeCount; } + //@endcond + }; + + /// Dummy internal statistics for \ref refinable mutex policy + struct empty_refinable_stat { + //@cond + void onCellLock() const {} + void onCellWaitResizing() const {} + void onCellArrayChanged() const {} + void onCellLockFailed() const {} + void onSecondCellLock() const {} + void onSecondCellLockFailed() const {} + void onFullLock() const {} + void onFullLockIter() const {} + void onResizeLock() const {} + void onResizeLockIter() const {} + void onResizeLockArrayChanged() const {} + void onResize() const {} + //@endcond + }; + + /// Refinable concurrent access policy + /** + This is one of available \p opt::mutex_policy option type for \p CuckooSet + + Refining is like a striping technique (see \p cuckoo::striping) + but it allows growing the size of lock array when resizing the hash table. + So, the sizes of hash table and lock array are equal. + + Template arguments: + - \p RecursiveLock - the type of mutex. Reentrant (recursive) mutex is required. + The default is \p std::recursive_mutex. The mutex type should be default-constructible. + - \p Arity - unsigned int constant that specifies an arity. The arity is the count of hash functors, i.e., the + count of lock arrays. Default value is 2. + - \p BackOff - back-off strategy. Default is \p cds::backoff::Default + - \p Alloc - allocator type used for lock array memory allocation. Default is \p CDS_DEFAULT_ALLOCATOR. + - \p Stat - internal statistics type. Note that this template argument is automatically selected by \ref CuckooSet + class according to its \p opt::stat option. + */ + template < + class RecursiveLock = std::recursive_mutex, + unsigned int Arity = 2, + typename BackOff = cds::backoff::Default, + class Alloc = CDS_DEFAULT_ALLOCATOR, + class Stat = empty_refinable_stat + > + class refinable + { + public: + typedef RecursiveLock lock_type ; ///< lock type + typedef Alloc allocator_type ; ///< allocator type + typedef BackOff back_off ; ///< back-off strategy + typedef Stat statistics_type ; ///< internal statistics type + static unsigned int const c_nArity = Arity; ///< the arity + + //@cond + typedef refinable_stat real_stat; + typedef empty_refinable_stat empty_stat; + + template + struct rebind_statistics { + typedef refinable< lock_type, c_nArity, back_off, allocator_type, Stat2> other; + }; + //@endcond + + protected: + //@cond + typedef cds::sync::trivial_select_policy lock_selection_policy; + + class lock_array_type + : public cds::sync::lock_array< lock_type, lock_selection_policy, allocator_type > + , public std::enable_shared_from_this< lock_array_type > + { + typedef cds::sync::lock_array< lock_type, lock_selection_policy, allocator_type > lock_array_base; + public: + lock_array_type( size_t nCapacity ) + : lock_array_base( nCapacity ) + {} + }; + typedef std::shared_ptr< lock_array_type > lock_array_ptr; + typedef cds::details::Allocator< lock_array_type, allocator_type > lock_array_allocator; + + typedef unsigned long long owner_t; + typedef cds::OS::ThreadId threadId_t; + + typedef cds::sync::spin spinlock_type; + typedef std::unique_lock< spinlock_type > scoped_spinlock; + //@endcond + + protected: + //@cond + static owner_t const c_nOwnerMask = (((owner_t) 1) << (sizeof(owner_t) * 8 - 1)) - 1; + + atomics::atomic< owner_t > m_Owner ; ///< owner mark (thread id + boolean flag) + atomics::atomic m_nCapacity ; ///< lock array capacity + lock_array_ptr m_arrLocks[ c_nArity ] ; ///< Lock array. The capacity of array is specified in constructor. + spinlock_type m_access ; ///< access to m_arrLocks + statistics_type m_Stat ; ///< internal statistics + //@endcond + + protected: + //@cond + struct lock_array_disposer { + void operator()( lock_array_type * pArr ) + { + // Seems, there is a false positive in std::shared_ptr deallocation in uninstrumented libc++ + // see, for example, https://groups.google.com/forum/#!topic/thread-sanitizer/eHu4dE_z7Cc + // https://reviews.llvm.org/D21609 + CDS_TSAN_ANNOTATE_IGNORE_WRITES_BEGIN; + lock_array_allocator().Delete( pArr ); + CDS_TSAN_ANNOTATE_IGNORE_WRITES_END; + } + }; + + lock_array_ptr create_lock_array( size_t nCapacity ) + { + return lock_array_ptr( lock_array_allocator().New( nCapacity ), lock_array_disposer()); + } + + void acquire( size_t const * arrHash, lock_array_ptr * pLockArr, lock_type ** parrLock ) + { + owner_t me = (owner_t) cds::OS::get_current_thread_id(); + owner_t who; + size_t cur_capacity; + + back_off bkoff; + while ( true ) { + + { + scoped_spinlock sl(m_access); + for ( unsigned int i = 0; i < c_nArity; ++i ) + pLockArr[i] = m_arrLocks[i]; + cur_capacity = m_nCapacity.load( atomics::memory_order_acquire ); + } + + // wait while resizing + while ( true ) { + who = m_Owner.load( atomics::memory_order_acquire ); + if ( !( who & 1 ) || (who >> 1) == (me & c_nOwnerMask)) + break; + bkoff(); + m_Stat.onCellWaitResizing(); + } + + if ( cur_capacity == m_nCapacity.load( atomics::memory_order_acquire )) { + + size_t const nMask = pLockArr[0]->size() - 1; + assert( cds::beans::is_power2( nMask + 1 )); + + for ( unsigned int i = 0; i < c_nArity; ++i ) { + parrLock[i] = &( pLockArr[i]->at( arrHash[i] & nMask )); + parrLock[i]->lock(); + } + + who = m_Owner.load( atomics::memory_order_acquire ); + if ( ( !(who & 1) || (who >> 1) == (me & c_nOwnerMask)) && cur_capacity == m_nCapacity.load( atomics::memory_order_acquire )) { + m_Stat.onCellLock(); + return; + } + + for ( unsigned int i = 0; i < c_nArity; ++i ) + parrLock[i]->unlock(); + + m_Stat.onCellLockFailed(); + } + else + m_Stat.onCellArrayChanged(); + + // clears pLockArr can lead to calling dtor for each item of pLockArr[i] that may be a heavy-weighted operation + // (each pLockArr[i] is a shared pointer to array of a ton of mutexes) + // It is better to do this before the next loop iteration where we will use spin-locked assignment to pLockArr + // However, destructing a lot of mutexes under spin-lock is a bad solution + for ( unsigned int i = 0; i < c_nArity; ++i ) + pLockArr[i].reset(); + } + } + + bool try_second_acquire( size_t const * arrHash, lock_type ** parrLock ) + { + // It is assumed that the current thread already has a lock + // and requires a second lock for other hash + + size_t const nMask = m_nCapacity.load(atomics::memory_order_acquire) - 1; + size_t nCell = m_arrLocks[0]->try_lock( arrHash[0] & nMask); + if ( nCell == lock_array_type::c_nUnspecifiedCell ) { + m_Stat.onSecondCellLockFailed(); + return false; + } + parrLock[0] = &(m_arrLocks[0]->at(nCell)); + + for ( unsigned int i = 1; i < c_nArity; ++i ) { + parrLock[i] = &( m_arrLocks[i]->at( m_arrLocks[i]->lock( arrHash[i] & nMask))); + } + + m_Stat.onSecondCellLock(); + return true; + } + + void acquire_all() + { + owner_t me = (owner_t) cds::OS::get_current_thread_id(); + + back_off bkoff; + while ( true ) { + owner_t ownNull = 0; + if ( m_Owner.compare_exchange_strong( ownNull, (me << 1) | 1, atomics::memory_order_acq_rel, atomics::memory_order_relaxed )) { + m_arrLocks[0]->lock_all(); + + m_Stat.onFullLock(); + return; + } + bkoff(); + m_Stat.onFullLockIter(); + } + } + + void release_all() + { + m_arrLocks[0]->unlock_all(); + m_Owner.store( 0, atomics::memory_order_release ); + } + + void acquire_resize( lock_array_ptr * pOldLocks ) + { + owner_t me = (owner_t) cds::OS::get_current_thread_id(); + size_t cur_capacity; + + while ( true ) { + { + scoped_spinlock sl(m_access); + for ( unsigned int i = 0; i < c_nArity; ++i ) + pOldLocks[i] = m_arrLocks[i]; + cur_capacity = m_nCapacity.load( atomics::memory_order_acquire ); + } + + // global lock + owner_t ownNull = 0; + if ( m_Owner.compare_exchange_strong( ownNull, (me << 1) | 1, atomics::memory_order_acq_rel, atomics::memory_order_relaxed )) { + if ( cur_capacity == m_nCapacity.load( atomics::memory_order_acquire )) { + pOldLocks[0]->lock_all(); + m_Stat.onResizeLock(); + return; + } + + m_Owner.store( 0, atomics::memory_order_release ); + m_Stat.onResizeLockArrayChanged(); + } + else + m_Stat.onResizeLockIter(); + + // clears pOldLocks can lead to calling dtor for each item of pOldLocks[i] that may be a heavy-weighted operation + // (each pOldLocks[i] is a shared pointer to array of a ton of mutexes) + // It is better to do this before the next loop iteration where we will use spin-locked assignment to pOldLocks + // However, destructing a lot of mutexes under spin-lock is a bad solution + for ( unsigned int i = 0; i < c_nArity; ++i ) + pOldLocks[i].reset(); + } + } + + void release_resize( lock_array_ptr * pOldLocks ) + { + m_Owner.store( 0, atomics::memory_order_release ); + pOldLocks[0]->unlock_all(); + } + //@endcond + + public: + //@cond + class scoped_cell_lock { + lock_type * m_arrLock[ c_nArity ]; + lock_array_ptr m_arrLockArr[ c_nArity ]; + + public: + scoped_cell_lock( refinable& policy, size_t const* arrHash ) + { + policy.acquire( arrHash, m_arrLockArr, m_arrLock ); + } + + ~scoped_cell_lock() + { + for ( unsigned int i = 0; i < c_nArity; ++i ) + m_arrLock[i]->unlock(); + } + }; + + class scoped_cell_trylock { + lock_type * m_arrLock[ c_nArity ]; + bool m_bLocked; + + public: + scoped_cell_trylock( refinable& policy, size_t const* arrHash ) + { + m_bLocked = policy.try_second_acquire( arrHash, m_arrLock ); + } + + ~scoped_cell_trylock() + { + if ( m_bLocked ) { + for ( unsigned int i = 0; i < c_nArity; ++i ) + m_arrLock[i]->unlock(); + } + } + + bool locked() const + { + return m_bLocked; + } + }; + + class scoped_full_lock { + refinable& m_policy; + public: + scoped_full_lock( refinable& policy ) + : m_policy( policy ) + { + policy.acquire_all(); + } + ~scoped_full_lock() + { + m_policy.release_all(); + } + }; + + class scoped_resize_lock + { + refinable& m_policy; + lock_array_ptr m_arrLocks[ c_nArity ]; + public: + scoped_resize_lock( refinable& policy ) + : m_policy(policy) + { + policy.acquire_resize( m_arrLocks ); + } + ~scoped_resize_lock() + { + m_policy.release_resize( m_arrLocks ); + } + }; + //@endcond + + public: + /// Constructor + refinable( + size_t nLockCount ///< The size of lock array. Must be power of two. + ) : m_Owner(0) + , m_nCapacity( nLockCount ) + { + assert( cds::beans::is_power2( nLockCount )); + for ( unsigned int i = 0; i < c_nArity; ++i ) + m_arrLocks[i] = create_lock_array( nLockCount ); + } + + //@cond + void resize( size_t nCapacity ) + { + lock_array_ptr pNew[ c_nArity ]; + for ( unsigned int i = 0; i < c_nArity; ++i ) + pNew[i] = create_lock_array( nCapacity ); + + { + scoped_spinlock sl(m_access); + m_nCapacity.store( nCapacity, atomics::memory_order_release ); + for ( unsigned int i = 0; i < c_nArity; ++i ) + m_arrLocks[i] = pNew[i]; + } + + m_Stat.onResize(); + } + //@endcond + + /// Returns lock array size + /** + Lock array size is not a constant for \p refinable policy and can be changed when the set is resized. + */ + size_t lock_count() const + { + return m_nCapacity.load(atomics::memory_order_relaxed); + } + + /// Returns the arity of \p refinable mutex policy + constexpr unsigned int arity() const noexcept + { + return c_nArity; + } + + /// Returns internal statistics + statistics_type const& statistics() const + { + return m_Stat; + } + }; + + /// \p CuckooSet internal statistics + struct stat { + typedef cds::atomicity::event_counter counter_type ; ///< Counter type + + counter_type m_nRelocateCallCount ; ///< Count of \p relocate() function call + counter_type m_nRelocateRoundCount ; ///< Count of attempts to relocate items + counter_type m_nFalseRelocateCount ; ///< Count of unneeded attempts of \p relocate call + counter_type m_nSuccessRelocateCount ; ///< Count of successful item relocating + counter_type m_nRelocateAboveThresholdCount; ///< Count of item relocating above probeset threshold + counter_type m_nFailedRelocateCount ; ///< Count of failed relocation attemp (when all probeset is full) + + counter_type m_nResizeCallCount ; ///< Count of \p resize() function call + counter_type m_nFalseResizeCount ; ///< Count of false \p resize() function call (when other thread has been resized the set) + counter_type m_nResizeSuccessNodeMove; ///< Count of successful node moving when resizing + counter_type m_nResizeRelocateCall ; ///< Count of \p relocate() function call from \p resize function + + counter_type m_nInsertSuccess ; ///< Count of successful \p insert() function call + counter_type m_nInsertFailed ; ///< Count of failed \p insert() function call + counter_type m_nInsertResizeCount ; ///< Count of \p resize() function call from \p insert() + counter_type m_nInsertRelocateCount ; ///< Count of \p relocate() function call from \p insert() + counter_type m_nInsertRelocateFault ; ///< Count of failed \p relocate() function call from \p insert() + + counter_type m_nUpdateExistCount ; ///< Count of call \p update() function for existing node + counter_type m_nUpdateSuccessCount ; ///< Count of successful \p insert() function call for new node + counter_type m_nUpdateResizeCount ; ///< Count of \p resize() function call from \p update() + counter_type m_nUpdateRelocateCount ; ///< Count of \p relocate() function call from \p update() + counter_type m_nUpdateRelocateFault ; ///< Count of failed \p relocate() function call from \p update() + + counter_type m_nUnlinkSuccess ; ///< Count of success \p unlink() function call + counter_type m_nUnlinkFailed ; ///< Count of failed \p unlink() function call + + counter_type m_nEraseSuccess ; ///< Count of success \p erase() function call + counter_type m_nEraseFailed ; ///< Count of failed \p erase() function call + + counter_type m_nFindSuccess ; ///< Count of success \p find() function call + counter_type m_nFindFailed ; ///< Count of failed \p find() function call + + counter_type m_nFindEqualSuccess ; ///< Count of success \p find_equal() function call + counter_type m_nFindEqualFailed ; ///< Count of failed \p find_equal() function call + + counter_type m_nFindWithSuccess ; ///< Count of success \p find_with() function call + counter_type m_nFindWithFailed ; ///< Count of failed \p find_with() function call + + //@cond + void onRelocateCall() { ++m_nRelocateCallCount; } + void onRelocateRound() { ++m_nRelocateRoundCount; } + void onFalseRelocateRound() { ++m_nFalseRelocateCount; } + void onSuccessRelocateRound(){ ++m_nSuccessRelocateCount; } + void onRelocateAboveThresholdRound() { ++m_nRelocateAboveThresholdCount; } + void onFailedRelocate() { ++m_nFailedRelocateCount; } + + void onResizeCall() { ++m_nResizeCallCount; } + void onFalseResizeCall() { ++m_nFalseResizeCount; } + void onResizeSuccessMove() { ++m_nResizeSuccessNodeMove; } + void onResizeRelocateCall() { ++m_nResizeRelocateCall; } + + void onInsertSuccess() { ++m_nInsertSuccess; } + void onInsertFailed() { ++m_nInsertFailed; } + void onInsertResize() { ++m_nInsertResizeCount; } + void onInsertRelocate() { ++m_nInsertRelocateCount; } + void onInsertRelocateFault() { ++m_nInsertRelocateFault; } + + void onUpdateExist() { ++m_nUpdateExistCount; } + void onUpdateSuccess() { ++m_nUpdateSuccessCount; } + void onUpdateResize() { ++m_nUpdateResizeCount; } + void onUpdateRelocate() { ++m_nUpdateRelocateCount; } + void onUpdateRelocateFault() { ++m_nUpdateRelocateFault; } + + void onUnlinkSuccess() { ++m_nUnlinkSuccess; } + void onUnlinkFailed() { ++m_nUnlinkFailed; } + + void onEraseSuccess() { ++m_nEraseSuccess; } + void onEraseFailed() { ++m_nEraseFailed; } + + void onFindSuccess() { ++m_nFindSuccess; } + void onFindFailed() { ++m_nFindFailed; } + + void onFindWithSuccess() { ++m_nFindWithSuccess; } + void onFindWithFailed() { ++m_nFindWithFailed; } + //@endcond + }; + + /// CuckooSet empty internal statistics + struct empty_stat { + //@cond + void onRelocateCall() const {} + void onRelocateRound() const {} + void onFalseRelocateRound() const {} + void onSuccessRelocateRound()const {} + void onRelocateAboveThresholdRound() const {} + void onFailedRelocate() const {} + + void onResizeCall() const {} + void onFalseResizeCall() const {} + void onResizeSuccessMove() const {} + void onResizeRelocateCall() const {} + + void onInsertSuccess() const {} + void onInsertFailed() const {} + void onInsertResize() const {} + void onInsertRelocate() const {} + void onInsertRelocateFault() const {} + + void onUpdateExist() const {} + void onUpdateSuccess() const {} + void onUpdateResize() const {} + void onUpdateRelocate() const {} + void onUpdateRelocateFault() const {} + + void onUnlinkSuccess() const {} + void onUnlinkFailed() const {} + + void onEraseSuccess() const {} + void onEraseFailed() const {} + + void onFindSuccess() const {} + void onFindFailed() const {} + + void onFindWithSuccess() const {} + void onFindWithFailed() const {} + //@endcond + }; + + /// Type traits for CuckooSet class + struct traits + { + /// Hook used + /** + Possible values are: cuckoo::base_hook, cuckoo::member_hook, cuckoo::traits_hook. + */ + typedef base_hook<> hook; + + /// Hash functors tuple + /** + This is mandatory type and has no predefined one. + + At least, two hash functors should be provided. All hash functor + should be orthogonal (different): for each i,j: i != j => h[i](x) != h[j](x) . + The hash functors are defined as std::tuple< H1, H2, ... Hn > : + \@code cds::opt::hash< std::tuple< h1, h2 > > \@endcode + The number of hash functors specifies the number \p k - the count of hash tables in cuckoo hashing. + + To specify hash tuple in traits you should use \p cds::opt::hash_tuple: + \code + struct my_traits: public cds::intrusive::cuckoo::traits { + typedef cds::opt::hash_tuple< hash1, hash2 > hash; + }; + \endcode + */ + typedef cds::opt::none hash; + + /// Concurrent access policy + /** + Available opt::mutex_policy types: + - \p cuckoo::striping - simple, but the lock array is not resizable + - \p cuckoo::refinable - resizable lock array, but more complex access to set data. + + Default is \p cuckoo::striping. + */ + typedef cuckoo::striping<> mutex_policy; + + /// Key equality functor + /** + Default is std::equal_to + */ + typedef opt::none equal_to; + + /// Key comparing functor + /** + No default functor is provided. If the option is not specified, the \p less is used. + */ + typedef opt::none compare; + + /// specifies binary predicate used for key comparison. + /** + Default is \p std::less. + */ + typedef opt::none less; + + /// Item counter + /** + The type for item counting feature. + Default is \p cds::atomicity::item_counter + + Only atomic item counter type is allowed. + */ + typedef atomicity::item_counter item_counter; + + /// Allocator type + /** + The allocator type for allocating bucket tables. + */ + typedef CDS_DEFAULT_ALLOCATOR allocator; + + /// Disposer + /** + The disposer functor is used in \p CuckooSet::clear() member function + to free set's node. + */ + typedef intrusive::opt::v::empty_disposer disposer; + + /// Internal statistics. Available statistics: \p cuckoo::stat, \p cuckoo::empty_stat + typedef empty_stat stat; + }; + + /// Metafunction converting option list to \p CuckooSet traits + /** + Template argument list \p Options... are: + - \p intrusive::opt::hook - hook used. Possible values are: \p cuckoo::base_hook, \p cuckoo::member_hook, + \p cuckoo::traits_hook. + If the option is not specified, %cuckoo::base_hook<> is used. + - \p opt::hash - hash functor tuple, mandatory option. At least, two hash functors should be provided. All hash functor + should be orthogonal (different): for each i,j: i != j => h[i](x) != h[j](x) . + The hash functors are passed as std::tuple< H1, H2, ... Hn > . The number of hash functors specifies + the number \p k - the count of hash tables in cuckoo hashing. + - \p opt::mutex_policy - concurrent access policy. + Available policies: \p cuckoo::striping, \p cuckoo::refinable. + Default is \p %cuckoo::striping. + - \p opt::equal_to - key equality functor like \p std::equal_to. + If this functor is defined then the probe-set will be unordered. + If \p %opt::compare or \p %opt::less option is specified too, then the probe-set will be ordered + and \p %opt::equal_to will be ignored. + - \p opt::compare - key comparison functor. No default functor is provided. + If the option is not specified, the \p %opt::less is used. + If \p %opt::compare or \p %opt::less option is specified, then the probe-set will be ordered. + - \p opt::less - specifies binary predicate used for key comparison. Default is \p std::less. + If \p %opt::compare or \p %opt::less option is specified, then the probe-set will be ordered. + - \p opt::item_counter - the type of item counting feature. Default is \p atomicity::item_counter + The item counter should be atomic. + - \p opt::allocator - the allocator type using for allocating bucket tables. + Default is \ref CDS_DEFAULT_ALLOCATOR + - \p intrusive::opt::disposer - the disposer type used in \p clear() member function for + freeing nodes. Default is \p intrusive::opt::v::empty_disposer + - \p opt::stat - internal statistics. Possibly types: \p cuckoo::stat, \p cuckoo::empty_stat. + Default is \p %cuckoo::empty_stat + + The probe set traits \p cuckoo::probeset_type and \p cuckoo::store_hash are taken from \p node type + specified by \p opt::hook option. + */ + template + struct make_traits { + typedef typename cds::opt::make_options< + typename cds::opt::find_type_traits< cuckoo::traits, Options... >::type + ,Options... + >::type type ; ///< Result of metafunction + }; + + //@cond + namespace details { + template + class bucket_entry; + + template + class bucket_entry + { + public: + typedef Node node_type; + typedef cuckoo::list_probeset_class probeset_class; + typedef cuckoo::list probeset_type; + + protected: + node_type * pHead; + unsigned int nSize; + + public: + class iterator + { + node_type * pNode; + friend class bucket_entry; + + public: + iterator() + : pNode( nullptr ) + {} + iterator( node_type * p ) + : pNode( p ) + {} + iterator( iterator const& it) + : pNode( it.pNode ) + {} + + iterator& operator=( iterator const& it ) + { + pNode = it.pNode; + return *this; + } + + iterator& operator=( node_type * p ) + { + pNode = p; + return *this; + } + + node_type * operator->() + { + return pNode; + } + node_type& operator*() + { + assert( pNode != nullptr ); + return *pNode; + } + + // preinc + iterator& operator ++() + { + if ( pNode ) + pNode = pNode->m_pNext; + return *this; + } + + bool operator==(iterator const& it ) const + { + return pNode == it.pNode; + } + bool operator!=(iterator const& it ) const + { + return !( *this == it ); + } + }; + + public: + bucket_entry() + : pHead( nullptr ) + , nSize(0) + { + static_assert(( std::is_same::value ), "Incompatible node type" ); + } + + iterator begin() + { + return iterator(pHead); + } + iterator end() + { + return iterator(); + } + + void insert_after( iterator it, node_type * p ) + { + node_type * pPrev = it.pNode; + if ( pPrev ) { + p->m_pNext = pPrev->m_pNext; + pPrev->m_pNext = p; + } + else { + // insert as head + p->m_pNext = pHead; + pHead = p; + } + ++nSize; + } + + void remove( iterator itPrev, iterator itWhat ) + { + node_type * pPrev = itPrev.pNode; + node_type * pWhat = itWhat.pNode; + assert( (!pPrev && pWhat == pHead) || (pPrev && pPrev->m_pNext == pWhat)); + + if ( pPrev ) + pPrev->m_pNext = pWhat->m_pNext; + else { + assert( pWhat == pHead ); + pHead = pHead->m_pNext; + } + pWhat->clear(); + --nSize; + } + + void clear() + { + node_type * pNext; + for ( node_type * pNode = pHead; pNode; pNode = pNext ) { + pNext = pNode->m_pNext; + pNode->clear(); + } + + nSize = 0; + pHead = nullptr; + } + + template + void clear( Disposer disp ) + { + node_type * pNext; + for ( node_type * pNode = pHead; pNode; pNode = pNext ) { + pNext = pNode->m_pNext; + pNode->clear(); + disp( pNode ); + } + + nSize = 0; + pHead = nullptr; + } + + unsigned int size() const + { + return nSize; + } + }; + + template + class bucket_entry> + { + public: + typedef Node node_type; + typedef cuckoo::vector_probeset_class probeset_class; + typedef cuckoo::vector probeset_type; + + static unsigned int const c_nCapacity = probeset_type::c_nCapacity; + + protected: + node_type * m_arrNode[c_nCapacity]; + unsigned int m_nSize; + + void shift_up( unsigned int nFrom ) + { + assert( m_nSize < c_nCapacity ); + + if ( nFrom < m_nSize ) + std::copy_backward( m_arrNode + nFrom, m_arrNode + m_nSize, m_arrNode + m_nSize + 1 ); + } + + void shift_down( node_type ** pFrom ) + { + assert( m_arrNode <= pFrom && pFrom < m_arrNode + m_nSize); + std::copy( pFrom + 1, m_arrNode + m_nSize, pFrom ); + } + public: + class iterator + { + node_type ** pArr; + friend class bucket_entry; + + public: + iterator() + : pArr( nullptr ) + {} + iterator( node_type ** p ) + : pArr(p) + {} + iterator( iterator const& it) + : pArr( it.pArr ) + {} + + iterator& operator=( iterator const& it ) + { + pArr = it.pArr; + return *this; + } + + node_type * operator->() + { + assert( pArr != nullptr ); + return *pArr; + } + node_type& operator*() + { + assert( pArr != nullptr ); + assert( *pArr != nullptr ); + return *(*pArr); + } + + // preinc + iterator& operator ++() + { + ++pArr; + return *this; + } + + bool operator==(iterator const& it ) const + { + return pArr == it.pArr; + } + bool operator!=(iterator const& it ) const + { + return !( *this == it ); + } + }; + + public: + bucket_entry() + : m_nSize(0) + { + memset( m_arrNode, 0, sizeof(m_arrNode)); + static_assert(( std::is_same::value ), "Incompatible node type" ); + } + + iterator begin() + { + return iterator(m_arrNode); + } + iterator end() + { + return iterator(m_arrNode + size()); + } + + void insert_after( iterator it, node_type * p ) + { + assert( m_nSize < c_nCapacity ); + assert( !it.pArr || (m_arrNode <= it.pArr && it.pArr <= m_arrNode + m_nSize)); + + if ( it.pArr ) { + shift_up( static_cast(it.pArr - m_arrNode) + 1 ); + it.pArr[1] = p; + } + else { + shift_up(0); + m_arrNode[0] = p; + } + ++m_nSize; + } + + void remove( iterator /*itPrev*/, iterator itWhat ) + { + itWhat->clear(); + shift_down( itWhat.pArr ); + --m_nSize; + } + + void clear() + { + m_nSize = 0; + } + + template + void clear( Disposer disp ) + { + for ( unsigned int i = 0; i < m_nSize; ++i ) { + disp( m_arrNode[i] ); + } + m_nSize = 0; + } + + unsigned int size() const + { + return m_nSize; + } + }; + + template + struct hash_ops { + static void store( Node * pNode, size_t const* pHashes ) + { + memcpy( pNode->m_arrHash, pHashes, sizeof(pHashes[0]) * ArraySize ); + } + static bool equal_to( Node& node, unsigned int nTable, size_t nHash ) + { + return node.m_arrHash[nTable] == nHash; + } + }; + template + struct hash_ops + { + static void store( Node * /*pNode*/, size_t * /*pHashes*/ ) + {} + static bool equal_to( Node& /*node*/, unsigned int /*nTable*/, size_t /*nHash*/ ) + { + return true; + } + }; + + template + struct contains; + + template + struct contains + { + template + static bool find( BucketEntry& probeset, Position& pos, unsigned int /*nTable*/, size_t /*nHash*/, Q const& val, Compare cmp ) + { + // Ordered version + typedef typename BucketEntry::iterator bucket_iterator; + + bucket_iterator itPrev; + + for ( bucket_iterator it = probeset.begin(), itEnd = probeset.end(); it != itEnd; ++it ) { + int cmpRes = cmp( *NodeTraits::to_value_ptr(*it), val ); + if ( cmpRes >= 0 ) { + pos.itFound = it; + pos.itPrev = itPrev; + return cmpRes == 0; + } + + itPrev = it; + } + + pos.itPrev = itPrev; + pos.itFound = probeset.end(); + return false; + } + }; + + template + struct contains + { + template + static bool find( BucketEntry& probeset, Position& pos, unsigned int nTable, size_t nHash, Q const& val, EqualTo eq ) + { + // Unordered version + typedef typename BucketEntry::iterator bucket_iterator; + typedef typename BucketEntry::node_type node_type; + + bucket_iterator itPrev; + + for ( bucket_iterator it = probeset.begin(), itEnd = probeset.end(); it != itEnd; ++it ) { + if ( hash_ops::equal_to( *it, nTable, nHash ) && eq( *NodeTraits::to_value_ptr(*it), val )) { + pos.itFound = it; + pos.itPrev = itPrev; + return true; + } + itPrev = it; + } + + pos.itPrev = itPrev; + pos.itFound = probeset.end(); + return false; + } + }; + + } // namespace details + //@endcond + + } // namespace cuckoo + + /// Cuckoo hash set + /** @ingroup cds_intrusive_map + + Source + - [2007] M.Herlihy, N.Shavit, M.Tzafrir "Concurrent Cuckoo Hashing. Technical report" + - [2008] Maurice Herlihy, Nir Shavit "The Art of Multiprocessor Programming" + + About Cuckoo hashing + + [From "The Art of Multiprocessor Programming"] + Cuckoo hashing is a hashing algorithm in which a newly added item displaces any earlier item + occupying the same slot. For brevity, a table is a k-entry array of items. For a hash set of size + N = 2k we use a two-entry array of tables, and two independent hash functions, + h0, h1: KeyRange -> 0,...,k-1 + mapping the set of possible keys to entries in he array. To test whether a value \p x is in the set, + find(x) tests whether either table[0][h0(x)] or table[1][h1(x)] is + equal to \p x. Similarly, erase(x)checks whether \p x is in either table[0][h0(x)] + or table[1][h1(x)], ad removes it if found. + + The insert(x) successively "kicks out" conflicting items until every key has a slot. + To add \p x, the method swaps \p x with \p y, the current occupant of table[0][h0(x)]. + If the prior value was \p nullptr, it is done. Otherwise, it swaps the newly nest-less value \p y + for the current occupant of table[1][h1(y)] in the same way. As before, if the prior value + was \p nullptr, it is done. Otherwise, the method continues swapping entries (alternating tables) + until it finds an empty slot. We might not find an empty slot, either because the table is full, + or because the sequence of displacement forms a cycle. We therefore need an upper limit on the + number of successive displacements we are willing to undertake. When this limit is exceeded, + we resize the hash table, choose new hash functions and start over. + + For concurrent cuckoo hashing, rather than organizing the set as a two-dimensional table of + items, we use two-dimensional table of probe sets, where a probe set is a constant-sized set + of items with the same hash code. Each probe set holds at most \p PROBE_SIZE items, but the algorithm + tries to ensure that when the set is quiescent (i.e no method call in progress) each probe set + holds no more than THRESHOLD < PROBE_SET items. While method calls are in-flight, a probe + set may temporarily hold more than \p THRESHOLD but never more than \p PROBE_SET items. + + In current implementation, a probe set can be defined either as a (single-linked) list + or as a fixed-sized vector, optionally ordered. + + In description above two-table cuckoo hashing (k = 2) has been considered. + We can generalize this approach for k >= 2 when we have \p k hash functions + h[0], ... h[k-1] and \p k tables table[0], ... table[k-1]. + + The search in probe set is linear, the complexity is O(PROBE_SET) . + The probe set may be ordered or not. Ordered probe set can be more efficient since + the average search complexity is O(PROBE_SET/2). + However, the overhead of sorting can eliminate a gain of ordered search. + + The probe set is ordered if \p compare or \p less is specified in \p Traits template + parameter. Otherwise, the probe set is unordered and \p Traits should provide + \p equal_to predicate. + + The \p cds::intrusive::cuckoo namespace contains \p %CuckooSet-related declarations. + + Template arguments: + - \p T - the type stored in the set. The type must be based on \p cuckoo::node (for \p cuckoo::base_hook) + or it must have a member of type %cuckoo::node (for \p cuckoo::member_hook), + or it must be convertible to \p %cuckoo::node (for \p cuckoo::traits_hook) + - \p Traits - type traits, default is \p cuckoo::traits. It is possible to declare option-based + set with \p cuckoo::make_traits metafunction result as \p Traits template argument. + + How to use + + You should incorporate \p cuckoo::node into your struct \p T and provide + appropriate \p cuckoo::traits::hook in your \p Traits template parameters. + Usually, for \p Traits you define a struct based on \p cuckoo::traits. + + Example for base hook and list-based probe-set: + \code + #include + + // Data stored in cuckoo set + // We use list as probe-set container and store hash values in the node + // (since we use two hash functions we should store 2 hash values per node) + struct my_data: public cds::intrusive::cuckoo::node< cds::intrusive::cuckoo::list, 2 > + { + // key field + std::string strKey; + + // other data + // ... + }; + + // Provide equal_to functor for my_data since we will use unordered probe-set + struct my_data_equal_to { + bool operator()( const my_data& d1, const my_data& d2 ) const + { + return d1.strKey.compare( d2.strKey ) == 0; + } + + bool operator()( const my_data& d, const std::string& s ) const + { + return d.strKey.compare(s) == 0; + } + + bool operator()( const std::string& s, const my_data& d ) const + { + return s.compare( d.strKey ) == 0; + } + }; + + // Provide two hash functor for my_data + struct hash1 { + size_t operator()(std::string const& s) const + { + return cds::opt::v::hash( s ); + } + size_t operator()( my_data const& d ) const + { + return (*this)( d.strKey ); + } + }; + + struct hash2: private hash1 { + size_t operator()(std::string const& s) const + { + size_t h = ~( hash1::operator()(s)); + return ~h + 0x9e3779b9 + (h << 6) + (h >> 2); + } + size_t operator()( my_data const& d ) const + { + return (*this)( d.strKey ); + } + }; + + // Declare type traits + struct my_traits: public cds::intrusive::cuckoo::traits + { + typedef cds::intrusive::cuckoo::base_hook< + cds::intrusive::cuckoo::probeset_type< my_data::probeset_type > + ,cds::intrusive::cuckoo::store_hash< my_data::hash_array_size > + > hook; + typedef my_data_equa_to equal_to; + typedef cds::opt::hash_tuple< hash1, hash2 > hash; + }; + + // Declare CuckooSet type + typedef cds::intrusive::CuckooSet< my_data, my_traits > my_cuckoo_set; + + // Equal option-based declaration + typedef cds::intrusive::CuckooSet< my_data, + cds::intrusive::cuckoo::make_traits< + cds::intrusive::opt::hook< cds::intrusive::cuckoo::base_hook< + cds::intrusive::cuckoo::probeset_type< my_data::probeset_type > + ,cds::intrusive::cuckoo::store_hash< my_data::hash_array_size > + > > + ,cds::opt::hash< std::tuple< hash1, hash2 > > + ,cds::opt::equal_to< my_data_equal_to > + >::type + > opt_cuckoo_set; + \endcode + + If we provide \p compare function instead of \p equal_to for \p my_data + we get as a result a cuckoo set with ordered probe set that may improve + performance. + Example for base hook and ordered vector-based probe-set: + + \code + #include + + // Data stored in cuckoo set + // We use a vector of capacity 4 as probe-set container and store hash values in the node + // (since we use two hash functions we should store 2 hash values per node) + struct my_data: public cds::intrusive::cuckoo::node< cds::intrusive::cuckoo::vector<4>, 2 > + { + // key field + std::string strKey; + + // other data + // ... + }; + + // Provide compare functor for my_data since we want to use ordered probe-set + struct my_data_compare { + int operator()( const my_data& d1, const my_data& d2 ) const + { + return d1.strKey.compare( d2.strKey ); + } + + int operator()( const my_data& d, const std::string& s ) const + { + return d.strKey.compare(s); + } + + int operator()( const std::string& s, const my_data& d ) const + { + return s.compare( d.strKey ); + } + }; + + // Provide two hash functor for my_data + struct hash1 { + size_t operator()(std::string const& s) const + { + return cds::opt::v::hash( s ); + } + size_t operator()( my_data const& d ) const + { + return (*this)( d.strKey ); + } + }; + + struct hash2: private hash1 { + size_t operator()(std::string const& s) const + { + size_t h = ~( hash1::operator()(s)); + return ~h + 0x9e3779b9 + (h << 6) + (h >> 2); + } + size_t operator()( my_data const& d ) const + { + return (*this)( d.strKey ); + } + }; + + // Declare type traits + struct my_traits: public cds::intrusive::cuckoo::traits + { + typedef cds::intrusive::cuckoo::base_hook< + cds::intrusive::cuckoo::probeset_type< my_data::probeset_type > + ,cds::intrusive::cuckoo::store_hash< my_data::hash_array_size > + > hook; + typedef my_data_compare compare; + typedef cds::opt::hash_tuple< hash1, hash2 > hash; + }; + + // Declare CuckooSet type + typedef cds::intrusive::CuckooSet< my_data, my_traits > my_cuckoo_set; + + // Equal option-based declaration + typedef cds::intrusive::CuckooSet< my_data, + cds::intrusive::cuckoo::make_traits< + cds::intrusive::opt::hook< cds::intrusive::cuckoo::base_hook< + cds::intrusive::cuckoo::probeset_type< my_data::probeset_type > + ,cds::intrusive::cuckoo::store_hash< my_data::hash_array_size > + > > + ,cds::opt::hash< std::tuple< hash1, hash2 > > + ,cds::opt::compare< my_data_compare > + >::type + > opt_cuckoo_set; + \endcode + + */ + template + class CuckooSet + { + public: + typedef T value_type; ///< The value type stored in the set + typedef Traits traits; ///< Set traits + + typedef typename traits::hook hook; ///< hook type + typedef typename hook::node_type node_type; ///< node type + typedef typename get_node_traits< value_type, node_type, hook>::type node_traits; ///< node traits + + typedef typename traits::hash hash; ///< hash functor tuple wrapped for internal use + typedef typename hash::hash_tuple_type hash_tuple_type; ///< Type of hash tuple + + typedef typename traits::stat stat; ///< internal statistics type + + typedef typename traits::mutex_policy original_mutex_policy; ///< Concurrent access policy, see \p cuckoo::traits::mutex_policy + + //@cond + typedef typename original_mutex_policy::template rebind_statistics< + typename std::conditional< + std::is_same< stat, cuckoo::empty_stat >::value + ,typename original_mutex_policy::empty_stat + ,typename original_mutex_policy::real_stat + >::type + >::other mutex_policy; + //@endcond + + /// Probe set should be ordered or not + /** + If \p Traits specifies \p cmpare or \p less functor then the set is ordered. + Otherwise, it is unordered and \p Traits should provide \p equal_to functor. + */ + static bool const c_isSorted = !( std::is_same< typename traits::compare, opt::none >::value + && std::is_same< typename traits::less, opt::none >::value ); + static size_t const c_nArity = hash::size ; ///< the arity of cuckoo hashing: the number of hash functors provided; minimum 2. + + /// Key equality functor; used only for unordered probe-set + typedef typename opt::details::make_equal_to< value_type, traits, !c_isSorted>::type key_equal_to; + + /// key comparing functor based on \p opt::compare and \p opt::less option setter. Used only for ordered probe set + typedef typename opt::details::make_comparator< value_type, traits >::type key_comparator; + + /// allocator type + typedef typename traits::allocator allocator; + + /// item counter type + typedef typename traits::item_counter item_counter; + + /// node disposer + typedef typename traits::disposer disposer; + + protected: + //@cond + typedef typename node_type::probeset_class probeset_class; + typedef typename node_type::probeset_type probeset_type; + static unsigned int const c_nNodeHashArraySize = node_type::hash_array_size; + + typedef typename mutex_policy::scoped_cell_lock scoped_cell_lock; + typedef typename mutex_policy::scoped_cell_trylock scoped_cell_trylock; + typedef typename mutex_policy::scoped_full_lock scoped_full_lock; + typedef typename mutex_policy::scoped_resize_lock scoped_resize_lock; + + typedef cuckoo::details::bucket_entry< node_type, probeset_type > bucket_entry; + typedef typename bucket_entry::iterator bucket_iterator; + typedef cds::details::Allocator< bucket_entry, allocator > bucket_table_allocator; + + typedef size_t hash_array[c_nArity] ; ///< hash array + + struct position { + bucket_iterator itPrev; + bucket_iterator itFound; + }; + + typedef cuckoo::details::contains< node_traits, c_isSorted > contains_action; + + template + struct predicate_wrapper { + typedef typename std::conditional< c_isSorted, cds::opt::details::make_comparator_from_less, Predicate>::type type; + }; + + typedef typename std::conditional< c_isSorted, key_comparator, key_equal_to >::type key_predicate; + //@endcond + + public: + static unsigned int const c_nDefaultProbesetSize = 4; ///< default probeset size + static size_t const c_nDefaultInitialSize = 16; ///< default initial size + static unsigned int const c_nRelocateLimit = c_nArity * 2 - 1; ///< Count of attempts to relocate before giving up + + protected: + bucket_entry * m_BucketTable[ c_nArity ] ; ///< Bucket tables + + atomics::atomic m_nBucketMask ; ///< Hash bitmask; bucket table size minus 1. + unsigned int const m_nProbesetSize ; ///< Probe set size + unsigned int const m_nProbesetThreshold ; ///< Probe set threshold + + hash m_Hash ; ///< Hash functor tuple + mutex_policy m_MutexPolicy ; ///< concurrent access policy + item_counter m_ItemCounter ; ///< item counter + mutable stat m_Stat ; ///< internal statistics + + protected: + //@cond + static void check_common_constraints() + { + static_assert( (c_nArity == mutex_policy::c_nArity), "The count of hash functors must be equal to mutex_policy arity" ); + } + + void check_probeset_properties() const + { + assert( m_nProbesetThreshold < m_nProbesetSize ); + + // if probe set type is cuckoo::vector then m_nProbesetSize == N + assert( node_type::probeset_size == 0 || node_type::probeset_size == m_nProbesetSize ); + } + + template + void hashing( size_t * pHashes, Q const& v ) const + { + m_Hash( pHashes, v ); + } + + void copy_hash( size_t * pHashes, value_type const& v ) const + { + constexpr_if ( c_nNodeHashArraySize != 0 ) + memcpy( pHashes, node_traits::to_node_ptr( v )->get_hash(), sizeof( pHashes[0] ) * c_nNodeHashArraySize ); + else + hashing( pHashes, v ); + } + + bucket_entry& bucket( unsigned int nTable, size_t nHash ) + { + assert( nTable < c_nArity ); + return m_BucketTable[nTable][nHash & m_nBucketMask.load( atomics::memory_order_relaxed ) ]; + } + + static void store_hash( node_type * pNode, size_t * pHashes ) + { + cuckoo::details::hash_ops< node_type, c_nNodeHashArraySize >::store( pNode, pHashes ); + } + + static bool equal_hash( node_type& node, unsigned int nTable, size_t nHash ) + { + return cuckoo::details::hash_ops< node_type, c_nNodeHashArraySize >::equal_to( node, nTable, nHash ); + } + + void allocate_bucket_tables( size_t nSize ) + { + assert( cds::beans::is_power2( nSize )); + + m_nBucketMask.store( nSize - 1, atomics::memory_order_release ); + bucket_table_allocator alloc; + for ( unsigned int i = 0; i < c_nArity; ++i ) + m_BucketTable[i] = alloc.NewArray( nSize ); + } + + static void free_bucket_tables( bucket_entry ** pTable, size_t nCapacity ) + { + bucket_table_allocator alloc; + for ( unsigned int i = 0; i < c_nArity; ++i ) { + alloc.Delete( pTable[i], nCapacity ); + pTable[i] = nullptr; + } + } + void free_bucket_tables() + { + free_bucket_tables( m_BucketTable, m_nBucketMask.load( atomics::memory_order_relaxed ) + 1 ); + } + + static constexpr unsigned int const c_nUndefTable = (unsigned int) -1; + template + unsigned int contains( position * arrPos, size_t * arrHash, Q const& val, Predicate pred ) + { + // Buckets must be locked + + for ( unsigned int i = 0; i < c_nArity; ++i ) { + bucket_entry& probeset = bucket( i, arrHash[i] ); + if ( contains_action::find( probeset, arrPos[i], i, arrHash[i], val, pred )) + return i; + } + return c_nUndefTable; + } + + template + value_type * erase_( Q const& val, Predicate pred, Func f ) + { + hash_array arrHash; + hashing( arrHash, val ); + position arrPos[ c_nArity ]; + + { + scoped_cell_lock guard( m_MutexPolicy, arrHash ); + + unsigned int nTable = contains( arrPos, arrHash, val, pred ); + if ( nTable != c_nUndefTable ) { + node_type& node = *arrPos[nTable].itFound; + f( *node_traits::to_value_ptr(node)); + bucket( nTable, arrHash[nTable]).remove( arrPos[nTable].itPrev, arrPos[nTable].itFound ); + --m_ItemCounter; + m_Stat.onEraseSuccess(); + return node_traits::to_value_ptr( node ); + } + } + + m_Stat.onEraseFailed(); + return nullptr; + } + + template + bool find_( Q& val, Predicate pred, Func f ) + { + hash_array arrHash; + position arrPos[ c_nArity ]; + hashing( arrHash, val ); + scoped_cell_lock sl( m_MutexPolicy, arrHash ); + + unsigned int nTable = contains( arrPos, arrHash, val, pred ); + if ( nTable != c_nUndefTable ) { + f( *node_traits::to_value_ptr( *arrPos[nTable].itFound ), val ); + m_Stat.onFindSuccess(); + return true; + } + + m_Stat.onFindFailed(); + return false; + } + + bool relocate( unsigned int nTable, size_t * arrGoalHash ) + { + // arrGoalHash contains hash values for relocating element + // Relocating element is first one from bucket( nTable, arrGoalHash[nTable] ) probeset + + m_Stat.onRelocateCall(); + + hash_array arrHash; + value_type * pVal; + for ( unsigned int nRound = 0; nRound < c_nRelocateLimit; ++nRound ) { + m_Stat.onRelocateRound(); + + while ( true ) { + scoped_cell_lock guard( m_MutexPolicy, arrGoalHash ); + + bucket_entry& refBucket = bucket( nTable, arrGoalHash[nTable] ); + if ( refBucket.size() < m_nProbesetThreshold ) { + // probeset is not above the threshold + m_Stat.onFalseRelocateRound(); + return true; + } + + pVal = node_traits::to_value_ptr( *refBucket.begin()); + copy_hash( arrHash, *pVal ); + + scoped_cell_trylock guard2( m_MutexPolicy, arrHash ); + if ( !guard2.locked()) + continue ; // try one more time + + refBucket.remove( typename bucket_entry::iterator(), refBucket.begin()); + + unsigned int i = (nTable + 1) % c_nArity; + + // try insert into free probeset + while ( i != nTable ) { + bucket_entry& bkt = bucket( i, arrHash[i] ); + if ( bkt.size() < m_nProbesetThreshold ) { + position pos; + contains_action::find( bkt, pos, i, arrHash[i], *pVal, key_predicate()) ; // must return false! + bkt.insert_after( pos.itPrev, node_traits::to_node_ptr( pVal )); + m_Stat.onSuccessRelocateRound(); + return true; + } + i = ( i + 1 ) % c_nArity; + } + + // try insert into partial probeset + i = (nTable + 1) % c_nArity; + while ( i != nTable ) { + bucket_entry& bkt = bucket( i, arrHash[i] ); + if ( bkt.size() < m_nProbesetSize ) { + position pos; + contains_action::find( bkt, pos, i, arrHash[i], *pVal, key_predicate()) ; // must return false! + bkt.insert_after( pos.itPrev, node_traits::to_node_ptr( pVal )); + nTable = i; + memcpy( arrGoalHash, arrHash, sizeof(arrHash)); + m_Stat.onRelocateAboveThresholdRound(); + goto next_iteration; + } + i = (i + 1) % c_nArity; + } + + // all probeset is full, relocating fault + refBucket.insert_after( typename bucket_entry::iterator(), node_traits::to_node_ptr( pVal )); + m_Stat.onFailedRelocate(); + return false; + } + + next_iteration:; + } + return false; + } + + void resize() + { + m_Stat.onResizeCall(); + + size_t nOldCapacity = bucket_count( atomics::memory_order_acquire ); + bucket_entry* pOldTable[ c_nArity ]; + { + scoped_resize_lock guard( m_MutexPolicy ); + + if ( nOldCapacity != bucket_count()) { + m_Stat.onFalseResizeCall(); + return; + } + + size_t nCapacity = nOldCapacity * 2; + + m_MutexPolicy.resize( nCapacity ); + memcpy( pOldTable, m_BucketTable, sizeof(pOldTable)); + allocate_bucket_tables( nCapacity ); + + hash_array arrHash; + position arrPos[ c_nArity ]; + + for ( unsigned int nTable = 0; nTable < c_nArity; ++nTable ) { + bucket_entry * pTable = pOldTable[nTable]; + for ( size_t k = 0; k < nOldCapacity; ++k ) { + bucket_iterator itNext; + for ( bucket_iterator it = pTable[k].begin(), itEnd = pTable[k].end(); it != itEnd; it = itNext ) { + itNext = it; + ++itNext; + + value_type& val = *node_traits::to_value_ptr( *it ); + copy_hash( arrHash, val ); + CDS_VERIFY_EQ( contains( arrPos, arrHash, val, key_predicate()), c_nUndefTable ); + + for ( unsigned int i = 0; i < c_nArity; ++i ) { + bucket_entry& refBucket = bucket( i, arrHash[i] ); + if ( refBucket.size() < m_nProbesetThreshold ) { + refBucket.insert_after( arrPos[i].itPrev, &*it ); + m_Stat.onResizeSuccessMove(); + goto do_next; + } + } + + for ( unsigned int i = 0; i < c_nArity; ++i ) { + bucket_entry& refBucket = bucket( i, arrHash[i] ); + if ( refBucket.size() < m_nProbesetSize ) { + refBucket.insert_after( arrPos[i].itPrev, &*it ); + assert( refBucket.size() > 1 ); + copy_hash( arrHash, *node_traits::to_value_ptr( *refBucket.begin())); + m_Stat.onResizeRelocateCall(); + relocate( i, arrHash ); + break; + } + } + do_next:; + } + } + } + } + free_bucket_tables( pOldTable, nOldCapacity ); + } + + constexpr static unsigned int calc_probeset_size( unsigned int nProbesetSize ) noexcept + { + return std::is_same< probeset_class, cuckoo::vector_probeset_class >::value + ? node_type::probeset_size + : (nProbesetSize + ? nProbesetSize + : ( node_type::probeset_size ? node_type::probeset_size : c_nDefaultProbesetSize )); + } + //@endcond + + public: + /// Default constructor + /** + Initial size = \ref c_nDefaultInitialSize + + Probe set size: + - \p c_nDefaultProbesetSize if \p probeset_type is \p cuckoo::list + - \p Capacity if \p probeset_type is cuckoo::vector + + Probe set threshold = probe set size - 1 + */ + CuckooSet() + : m_nProbesetSize( calc_probeset_size(0)) + , m_nProbesetThreshold( m_nProbesetSize - 1 ) + , m_MutexPolicy( c_nDefaultInitialSize ) + { + check_common_constraints(); + check_probeset_properties(); + + allocate_bucket_tables( c_nDefaultInitialSize ); + } + + /// Constructs the set object with given probe set size and threshold + /** + If probe set type is cuckoo::vector vector + then \p nProbesetSize is ignored since it should be equal to vector's \p Capacity. + */ + CuckooSet( + size_t nInitialSize ///< Initial set size; if 0 - use default initial size \p c_nDefaultInitialSize + , unsigned int nProbesetSize ///< probe set size + , unsigned int nProbesetThreshold = 0 ///< probe set threshold, nProbesetThreshold < nProbesetSize. If 0, nProbesetThreshold = nProbesetSize - 1 + ) + : m_nProbesetSize( calc_probeset_size(nProbesetSize)) + , m_nProbesetThreshold( nProbesetThreshold ? nProbesetThreshold : m_nProbesetSize - 1 ) + , m_MutexPolicy( cds::beans::ceil2(nInitialSize ? nInitialSize : c_nDefaultInitialSize )) + { + check_common_constraints(); + check_probeset_properties(); + + allocate_bucket_tables( nInitialSize ? cds::beans::ceil2( nInitialSize ) : c_nDefaultInitialSize ); + } + + /// Constructs the set object with given hash functor tuple + /** + The probe set size and threshold are set as default, see \p CuckooSet() + */ + CuckooSet( + hash_tuple_type const& h ///< hash functor tuple of type std::tuple where n == \ref c_nArity + ) + : m_nProbesetSize( calc_probeset_size(0)) + , m_nProbesetThreshold( m_nProbesetSize -1 ) + , m_Hash( h ) + , m_MutexPolicy( c_nDefaultInitialSize ) + { + check_common_constraints(); + check_probeset_properties(); + + allocate_bucket_tables( c_nDefaultInitialSize ); + } + + /// Constructs the set object with given probe set properties and hash functor tuple + /** + If probe set type is cuckoo::vector vector + then \p nProbesetSize should be equal to vector's \p Capacity. + */ + CuckooSet( + size_t nInitialSize ///< Initial set size; if 0 - use default initial size \p c_nDefaultInitialSize + , unsigned int nProbesetSize ///< probe set size, positive integer + , unsigned int nProbesetThreshold ///< probe set threshold, nProbesetThreshold < nProbesetSize. If 0, nProbesetThreshold = nProbesetSize - 1 + , hash_tuple_type const& h ///< hash functor tuple of type std::tuple where n == \ref c_nArity + ) + : m_nProbesetSize( calc_probeset_size(nProbesetSize)) + , m_nProbesetThreshold( nProbesetThreshold ? nProbesetThreshold : m_nProbesetSize - 1) + , m_Hash( h ) + , m_MutexPolicy( cds::beans::ceil2(nInitialSize ? nInitialSize : c_nDefaultInitialSize )) + { + check_common_constraints(); + check_probeset_properties(); + + allocate_bucket_tables( nInitialSize ? cds::beans::ceil2( nInitialSize ) : c_nDefaultInitialSize ); + } + + /// Constructs the set object with given hash functor tuple (move semantics) + /** + The probe set size and threshold are set as default, see \p CuckooSet() + */ + CuckooSet( + hash_tuple_type&& h ///< hash functor tuple of type std::tuple where n == \ref c_nArity + ) + : m_nProbesetSize( calc_probeset_size(0)) + , m_nProbesetThreshold( m_nProbesetSize / 2 ) + , m_Hash( std::forward(h)) + , m_MutexPolicy( c_nDefaultInitialSize ) + { + check_common_constraints(); + check_probeset_properties(); + + allocate_bucket_tables( c_nDefaultInitialSize ); + } + + /// Constructs the set object with given probe set properties and hash functor tuple (move semantics) + /** + If probe set type is cuckoo::vector vector + then \p nProbesetSize should be equal to vector's \p Capacity. + */ + CuckooSet( + size_t nInitialSize ///< Initial set size; if 0 - use default initial size \p c_nDefaultInitialSize + , unsigned int nProbesetSize ///< probe set size, positive integer + , unsigned int nProbesetThreshold ///< probe set threshold, nProbesetThreshold < nProbesetSize. If 0, nProbesetThreshold = nProbesetSize - 1 + , hash_tuple_type&& h ///< hash functor tuple of type std::tuple where n == \ref c_nArity + ) + : m_nProbesetSize( calc_probeset_size(nProbesetSize)) + , m_nProbesetThreshold( nProbesetThreshold ? nProbesetThreshold : m_nProbesetSize - 1) + , m_Hash( std::forward(h)) + , m_MutexPolicy( cds::beans::ceil2(nInitialSize ? nInitialSize : c_nDefaultInitialSize )) + { + check_common_constraints(); + check_probeset_properties(); + + allocate_bucket_tables( nInitialSize ? cds::beans::ceil2( nInitialSize ) : c_nDefaultInitialSize ); + } + + /// Destructor + ~CuckooSet() + { + free_bucket_tables(); + } + + public: + /// Inserts new node + /** + The function inserts \p val in the set if it does not contain an item with key equal to \p val. + + Returns \p true if \p val is inserted into the set, \p false otherwise. + */ + bool insert( value_type& val ) + { + return insert( val, []( value_type& ) {} ); + } + + /// Inserts new node + /** + The function allows to split creating of new item into two part: + - create item with key only + - insert new item into the set + - if inserting is success, calls \p f functor to initialize value-field of \p val. + + The functor signature is: + \code + void func( value_type& val ); + \endcode + where \p val is the item inserted. + + The user-defined functor is called only if the inserting is success. + */ + template + bool insert( value_type& val, Func f ) + { + hash_array arrHash; + position arrPos[ c_nArity ]; + unsigned int nGoalTable; + + hashing( arrHash, val ); + node_type * pNode = node_traits::to_node_ptr( val ); + store_hash( pNode, arrHash ); + + while (true) { + { + scoped_cell_lock guard( m_MutexPolicy, arrHash ); + + if ( contains( arrPos, arrHash, val, key_predicate()) != c_nUndefTable ) { + m_Stat.onInsertFailed(); + return false; + } + + for ( unsigned int i = 0; i < c_nArity; ++i ) { + bucket_entry& refBucket = bucket( i, arrHash[i] ); + if ( refBucket.size() < m_nProbesetThreshold ) { + refBucket.insert_after( arrPos[i].itPrev, pNode ); + f( val ); + ++m_ItemCounter; + m_Stat.onInsertSuccess(); + return true; + } + } + + for ( unsigned int i = 0; i < c_nArity; ++i ) { + bucket_entry& refBucket = bucket( i, arrHash[i] ); + if ( refBucket.size() < m_nProbesetSize ) { + refBucket.insert_after( arrPos[i].itPrev, pNode ); + f( val ); + ++m_ItemCounter; + nGoalTable = i; + assert( refBucket.size() > 1 ); + copy_hash( arrHash, *node_traits::to_value_ptr( *refBucket.begin())); + goto do_relocate; + } + } + } + + m_Stat.onInsertResize(); + resize(); + } + + do_relocate: + m_Stat.onInsertRelocate(); + if ( !relocate( nGoalTable, arrHash )) { + m_Stat.onInsertRelocateFault(); + m_Stat.onInsertResize(); + resize(); + } + + m_Stat.onInsertSuccess(); + return true; + } + + /// Updates the node + /** + The operation performs inserting or changing data with lock-free manner. + + If the item \p val is not found in the set, then \p val is inserted into the set + iff \p bAllowInsert is \p true. + Otherwise, the functor \p func is called with item found. + The functor \p func signature is: + \code + void func( bool bNew, value_type& item, value_type& val ); + \endcode + with arguments: + - \p bNew - \p true if the item has been inserted, \p false otherwise + - \p item - item of the set + - \p val - argument \p val passed into the \p %update() function + If new item has been inserted (i.e. \p bNew is \p true) then \p item and \p val arguments + refer to the same thing. + + The functor may change non-key fields of the \p item. + + Returns std::pair where \p first is \p true if operation is successful, + i.e. the node has been inserted or updated, + \p second is \p true if new item has been added or \p false if the item with \p key + already exists. + */ + template + std::pair update( value_type& val, Func func, bool bAllowInsert = true ) + { + hash_array arrHash; + position arrPos[ c_nArity ]; + unsigned int nGoalTable; + + hashing( arrHash, val ); + node_type * pNode = node_traits::to_node_ptr( val ); + store_hash( pNode, arrHash ); + + while (true) { + { + scoped_cell_lock guard( m_MutexPolicy, arrHash ); + + unsigned int nTable = contains( arrPos, arrHash, val, key_predicate()); + if ( nTable != c_nUndefTable ) { + func( false, *node_traits::to_value_ptr( *arrPos[nTable].itFound ), val ); + m_Stat.onUpdateExist(); + return std::make_pair( true, false ); + } + + if ( !bAllowInsert ) + return std::make_pair( false, false ); + + //node_type * pNode = node_traits::to_node_ptr( val ); + //store_hash( pNode, arrHash ); + + for ( unsigned int i = 0; i < c_nArity; ++i ) { + bucket_entry& refBucket = bucket( i, arrHash[i] ); + if ( refBucket.size() < m_nProbesetThreshold ) { + refBucket.insert_after( arrPos[i].itPrev, pNode ); + func( true, val, val ); + ++m_ItemCounter; + m_Stat.onUpdateSuccess(); + return std::make_pair( true, true ); + } + } + + for ( unsigned int i = 0; i < c_nArity; ++i ) { + bucket_entry& refBucket = bucket( i, arrHash[i] ); + if ( refBucket.size() < m_nProbesetSize ) { + refBucket.insert_after( arrPos[i].itPrev, pNode ); + func( true, val, val ); + ++m_ItemCounter; + nGoalTable = i; + assert( refBucket.size() > 1 ); + copy_hash( arrHash, *node_traits::to_value_ptr( *refBucket.begin())); + goto do_relocate; + } + } + } + + m_Stat.onUpdateResize(); + resize(); + } + + do_relocate: + m_Stat.onUpdateRelocate(); + if ( !relocate( nGoalTable, arrHash )) { + m_Stat.onUpdateRelocateFault(); + m_Stat.onUpdateResize(); + resize(); + } + + m_Stat.onUpdateSuccess(); + return std::make_pair( true, true ); + } + //@cond + template + CDS_DEPRECATED("ensure() is deprecated, use update()") + std::pair ensure( value_type& val, Func func ) + { + return update( val, func, true ); + } + //@endcond + + /// Unlink the item \p val from the set + /** + The function searches the item \p val in the set and unlink it + if it is found and is equal to \p val (here, the equality means that + \p val belongs to the set: if \p item is an item found then + unlink is successful iif &val == &item) + + The function returns \p true if success and \p false otherwise. + */ + bool unlink( value_type& val ) + { + hash_array arrHash; + hashing( arrHash, val ); + position arrPos[ c_nArity ]; + + { + scoped_cell_lock guard( m_MutexPolicy, arrHash ); + + unsigned int nTable = contains( arrPos, arrHash, val, key_predicate()); + if ( nTable != c_nUndefTable && node_traits::to_value_ptr(*arrPos[nTable].itFound) == &val ) { + bucket( nTable, arrHash[nTable]).remove( arrPos[nTable].itPrev, arrPos[nTable].itFound ); + --m_ItemCounter; + m_Stat.onUnlinkSuccess(); + return true; + } + } + + m_Stat.onUnlinkFailed(); + return false; + } + + /// Deletes the item from the set + /** \anchor cds_intrusive_CuckooSet_erase + The function searches an item with key equal to \p val in the set, + unlinks it from the set, and returns a pointer to unlinked item. + + If the item with key equal to \p val is not found the function return \p nullptr. + + Note the hash functor should accept a parameter of type \p Q that can be not the same as \p value_type. + */ + template + value_type * erase( Q const& val ) + { + return erase( val, [](value_type const&) {} ); + } + + /// Deletes the item from the set using \p pred predicate for searching + /** + The function is an analog of \ref cds_intrusive_CuckooSet_erase "erase(Q const&)" + but \p pred is used for key comparing. + If cuckoo set is ordered, then \p Predicate should have the interface and semantics like \p std::less. + If cuckoo set is unordered, then \p Predicate should have the interface and semantics like \p std::equal_to. + \p Predicate must imply the same element order as the comparator used for building the set. + */ + template + value_type * erase_with( Q const& val, Predicate pred ) + { + CDS_UNUSED( pred ); + return erase_( val, typename predicate_wrapper::type(), [](value_type const&) {} ); + } + + /// Delete the item from the set + /** \anchor cds_intrusive_CuckooSet_erase_func + The function searches an item with key equal to \p val in the set, + call \p f functor with item found, unlinks it from the set, and returns a pointer to unlinked item. + + The \p Func interface is + \code + struct functor { + void operator()( value_type const& item ); + }; + \endcode + + If the item with key equal to \p val is not found the function return \p nullptr. + + Note the hash functor should accept a parameter of type \p Q that can be not the same as \p value_type. + */ + template + value_type * erase( Q const& val, Func f ) + { + return erase_( val, key_predicate(), f ); + } + + /// Deletes the item from the set using \p pred predicate for searching + /** + The function is an analog of \ref cds_intrusive_CuckooSet_erase_func "erase(Q const&, Func)" + but \p pred is used for key comparing. + If you use ordered cuckoo set, then \p Predicate should have the interface and semantics like \p std::less. + If you use unordered cuckoo set, then \p Predicate should have the interface and semantics like \p std::equal_to. + \p Predicate must imply the same element order as the comparator used for building the set. + */ + template + value_type * erase_with( Q const& val, Predicate pred, Func f ) + { + CDS_UNUSED( pred ); + return erase_( val, typename predicate_wrapper::type(), f ); + } + + /// Find the key \p val + /** \anchor cds_intrusive_CuckooSet_find_func + The function searches the item with key equal to \p val and calls the functor \p f for item found. + The interface of \p Func functor is: + \code + struct functor { + void operator()( value_type& item, Q& val ); + }; + \endcode + where \p item is the item found, \p val is the find function argument. + + The functor may change non-key fields of \p item. + + The \p val argument is non-const since it can be used as \p f functor destination i.e., the functor + may modify both arguments. + + Note the hash functor specified for class \p Traits template parameter + should accept a parameter of type \p Q that can be not the same as \p value_type. + + The function returns \p true if \p val is found, \p false otherwise. + */ + template + bool find( Q& val, Func f ) + { + return find_( val, key_predicate(), f ); + } + //@cond + template + bool find( Q const& val, Func f ) + { + return find_( val, key_predicate(), f ); + } + //@endcond + + /// Find the key \p val using \p pred predicate for comparing + /** + The function is an analog of \ref cds_intrusive_CuckooSet_find_func "find(Q&, Func)" + but \p pred is used for key comparison. + If you use ordered cuckoo set, then \p Predicate should have the interface and semantics like \p std::less. + If you use unordered cuckoo set, then \p Predicate should have the interface and semantics like \p std::equal_to. + \p pred must imply the same element order as the comparator used for building the set. + */ + template + bool find_with( Q& val, Predicate pred, Func f ) + { + CDS_UNUSED( pred ); + return find_( val, typename predicate_wrapper::type(), f ); + } + //@cond + template + bool find_with( Q const& val, Predicate pred, Func f ) + { + CDS_UNUSED( pred ); + return find_( val, typename predicate_wrapper::type(), f ); + } + //@endcond + + /// Checks whether the set contains \p key + /** + The function searches the item with key equal to \p key + and returns \p true if it is found, and \p false otherwise. + */ + template + bool contains( Q const& key ) + { + return find( key, [](value_type&, Q const& ) {} ); + } + //@cond + template + CDS_DEPRECATED("deprecated, use contains()") + bool find( Q const& key ) + { + return contains( key ); + } + //@endcond + + /// Checks whether the set contains \p key using \p pred predicate for searching + /** + The function is similar to contains( key ) but \p pred is used for key comparing. + If the set is unordered, \p Predicate has semantics like \p std::equal_to. + For ordered set \p Predicate has \p std::less semantics. In that case \p pred + must imply the same element order as the comparator used for building the set. + */ + template + bool contains( Q const& key, Predicate pred ) + { + CDS_UNUSED( pred ); + return find_with( key, typename predicate_wrapper::type(), [](value_type& , Q const& ) {} ); + } + //@cond + template + CDS_DEPRECATED("deprecated, use contains()") + bool find_with( Q const& key, Predicate pred ) + { + return contains( key, pred ); + } + //@endcond + + /// Clears the set + /** + The function unlinks all items from the set. + For any item \p Traits::disposer is called + */ + void clear() + { + clear_and_dispose( disposer()); + } + + /// Clears the set and calls \p disposer for each item + /** + The function unlinks all items from the set calling \p oDisposer for each item. + \p Disposer functor interface is: + \code + struct Disposer{ + void operator()( value_type * p ); + }; + \endcode + + The \p Traits::disposer is not called. + */ + template + void clear_and_dispose( Disposer oDisposer ) + { + // locks entire array + scoped_full_lock sl( m_MutexPolicy ); + + for ( unsigned int i = 0; i < c_nArity; ++i ) { + bucket_entry * pEntry = m_BucketTable[i]; + bucket_entry * pEnd = pEntry + m_nBucketMask.load( atomics::memory_order_relaxed ) + 1; + for ( ; pEntry != pEnd ; ++pEntry ) { + pEntry->clear( [&oDisposer]( node_type * pNode ){ oDisposer( node_traits::to_value_ptr( pNode )) ; } ); + } + } + m_ItemCounter.reset(); + } + + /// Checks if the set is empty + /** + Emptiness is checked by item counting: if item count is zero then the set is empty. + */ + bool empty() const + { + return size() == 0; + } + + /// Returns item count in the set + size_t size() const + { + return m_ItemCounter; + } + + /// Returns the size of hash table + /** + The hash table size is non-constant and can be increased via resizing. + */ + size_t bucket_count() const + { + return m_nBucketMask.load( atomics::memory_order_relaxed ) + 1; + } + //@cond + size_t bucket_count( atomics::memory_order load_mo ) const + { + return m_nBucketMask.load( load_mo ) + 1; + } + //@endcond + + /// Returns lock array size + size_t lock_count() const + { + return m_MutexPolicy.lock_count(); + } + + /// Returns const reference to internal statistics + stat const& statistics() const + { + return m_Stat; + } + + /// Returns const reference to mutex policy internal statistics + typename mutex_policy::statistics_type const& mutex_policy_statistics() const + { + return m_MutexPolicy.statistics(); + } + }; +}} // namespace cds::intrusive + +#endif // #ifndef CDSLIB_INTRUSIVE_CUCKOO_SET_H diff --git a/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/intrusive/details/base.h b/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/intrusive/details/base.h new file mode 100644 index 0000000..5118967 --- /dev/null +++ b/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/intrusive/details/base.h @@ -0,0 +1,338 @@ +/* + This file is a part of libcds - Concurrent Data Structures library + + (C) Copyright Maxim Khizhinsky (libcds.dev@gmail.com) 2006-2017 + + Source code repo: http://github.com/khizmax/libcds/ + Download: http://sourceforge.net/projects/libcds/files/ + + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. + + * Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ + +#ifndef CDSLIB_INTRUSIVE_DETAILS_BASE_H +#define CDSLIB_INTRUSIVE_DETAILS_BASE_H + +#include +#include +#include + +namespace cds { + +/// Intrusive containers +/** + @ingroup cds_intrusive_containers + The namespace \p cds::intrusive contains intrusive lock-free containers. + The idea comes from \p boost::intrusive library, see http://boost.org/doc/ as a good introduction to intrusive approach. + The intrusive containers of libcds library is developed as close to \p boost::intrusive + + In terms of lock-free approach, the main advantage of intrusive containers is + that no memory allocation is performed to maintain container elements. + However, additional requirements are imposed for types and values that can be stored in intrusive container. + See the container documentation for details. + + \anchor cds_intrusive_hook_tag + \par Tags + Many hooks and nodes for intrusive containers contain template argument \p Tag. + This argument serves as a tag, so you can derive from more than one container's node and hence put an object in multiple intrusive containers + at the same time. An incomplete type can serve as a tag. If you specify two hooks, you must specify a different tag for each one. + Example: + \code + struct tag1; + cds::intrusive::treiber_stack::node< cds::gc::HP, tag > + \endcode + If no tag is specified the default \p cds::opt::none will be used. + + \anchor cds_intrusive_item_creating + \par Inserting items + Many intrusive and non-intrusive (standard-like) containers in the library have the member functions + that take a functor argument to initialize the inserted item after it has been successfully inserted, + for example: + \code + template + bool insert( Q& key, Func f ); + + template + std::pair update( Q& key, Func f, bool bAllowInsert = true ); + \endcode + The first member function calls \p f functor iif a new item has been inserted. The functor takes two parameter: a reference to inserted item and + \p key. + + The second member function, \p update(), allows to insert a new item to the container if \p key is not found, or to find the item with \p key and + to perform some action with it. The \p f signature is: + \code + void f( bool bNew, item_type& item, Q& key ); + \endcode + where \p bNew is a flag to indicate whether \p item is a new created node or not. + + Such functions should be used with caution in multi-threaded environment + since they can cause races. The library does not synchronize access + to container's items, so many threads can access to one item simultaneously. + For example, for \p insert member function the following race is possible: + \code + // Suppose, Foo is a complex structure with int key field + SomeContainer q; + + Thread 1 Thread 2 + + q.insert( Foo(5), q.find( 5, []( Foo& item ) { + []( Foo& item ){ // access to item fields + // complex initialization ... + item.f1 = ...; }); + ... + }); + \endcode + Execute sequence: + \code + Find 5 in the container. + Key 5 is not found + Create a new item Find key 5 + with calling Foo(5) ctor + Insert the new item + The key 5 is found - + call the functor (!) + Perform complex + initialization - + call the functor + \endcode + (!): Thread 2 found the key and call its functor on incomplete initialized item. + Simultaneous access to the item also is possible. In this case Thread 1 is + initializing the item, thread 2 is reading (or writing) the item's fields. + In any case, Thread 2 can read uninitialized or incomplete initialized fields. + + \p update() member function race. Suppose, thread 1 and thread 2 perform + the + following code: + \code + q.update( 5, []( bool bNew, Foo& item, int arg ) + { + // bNew: true if the new element has been created + // false otherwise + if ( bNew ) { + // initialize item + item.f1=...; + //... + } + else { + // do some work + if ( !item.f1 ) + item.f1 = ...; + else { + //... + } + //... + } + } + ); + \endcode + Execute sequence: + \code + Thread 1 Thread 2 + key 5 not found + insert new item Foo(5) Find 5 + Key 5 found + call the functor with + bNew = false (!) + call the functor with + bNew = true + \endcode + (!): Thread 2 executes its functor on incomplete initialized item. + + To protect your code from such races you can use some item-level synchronization, + for example: + \code + struct Foo { + spinlock lock; // item-level lock + bool initialized = false; // initialization flag + // other fields + // .... + }; + + q.update( 5, []( bool bNew, Foo& item, int arg ) + { + // Lock access to the item + std::unique_lock( item.lock ); + + if ( !item.initialized ) { + // initialize item + item.f1=...; + //... + item.initialized = true; // mark the item as initialized + } + else { + // do some work + if ( !item.f1 ) + item.f1 = ...; + else { + //... + } + //... + } + } + ); + \endcode + If the item-level synchronization is not suitable, you should not use any inserting member function + with post-insert functor argument. + + \anchor cds_intrusive_item_destroying + \par Destroying items + + It should be very careful when destroying an item removed from intrusive container. + In other threads the references to popped item may exists some time after removing. + To destroy the removed item in thread-safe manner you should call static function \p retire + of garbage collector you use, for example: + \code + struct destroyer { + void operator ()( my_type * p ) + { + delete p; + } + }; + + typedef cds::intrusive::TreiberStack< cds::gc::HP, my_type, cds::opt::disposer< destroyer > > stack; + stack s; + + // .... + + my_type * p = s.pop(); + + if ( p ) { + // It is wrong + // delete p; + + // It is correct + cds::gc:HP::retire< destroyer >( p ); + } + \endcode + The situation becomes even more complicated when you want store items in different intrusive containers. + In this case the best way is using reference counting: + \code + struct my_type { + ... + std::atomic nRefCount; + + my_type() + : nRefCount(0) + {} + }; + + struct destroyer { + void operator ()( my_type * p ) + { + if ( --p->nRefCount == 0 ) + delete p ; // delete only after no reference pointing to p + } + }; + + typedef cds::intrusive::TreiberStack< cds::gc::HP, my_type, cds::opt::disposer< destroyer > > stack; + typedef cds::intrusive::MSQueue< cds::gc::HP, my_type, cds::opt::disposer< destroyer > > queue; + stack s; + queue q; + + my_type * v = new my_type(); + + v.nRefCount++ ; // increment counter before pushing the item to the stack + s.push(v); + + v.nRefCount++ ; // increment counter before pushing the item to the queue + q.push(v); + + // .... + + my_type * ps = s.pop(); + if ( ps ) { + // It is wrong + // delete ps; + + // It is correct + cds::gc:HP::retire< destroyer >( ps ); + } + + my_type * pq = q.pop(); + if ( pq ) { + // It is wrong + // delete pq; + + // It is correct + cds::gc:HP::retire< destroyer >( pq ); + } + \endcode + Violation of these rules may lead to a crash. + + \par Intrusive containers and Hazard Pointer-like garbage collectors + + If you develop your intrusive container based on libcds library framework, you should + take in the account the following. + The main idea of garbage collectors (GC) based on Hazard Pointer schema is protecting a shared pointer + by publishing it as a "hazard" i.e. as a pointer that is changing at the current time and cannot be + deleted at this moment. In intrusive container paradigm, the pointer to a node of the container + and the pointer to a item stored in the container are not equal in the general case. + However, any pointer to node should be castable to appropriate pointer to container's item. + In general, any item can be placed to two or more intrusive containers simultaneously, + and each of those container holds an unique pointer to its node that refers to the same item. + When we protect a pointer, we want to protect an item pointer that is the invariant + for any container stored that item. In your intrusive container, instead of protecting by GC's guard a pointer to node + you should cast it to the pointer to item and then protect that item pointer. + Otherwise an unpredictable result may occur. +*/ +namespace intrusive { + + /// @defgroup cds_intrusive_containers Intrusive containers + /** @defgroup cds_intrusive_helper Helper structs for intrusive containers + @ingroup cds_intrusive_containers + */ + /** @defgroup cds_intrusive_stack Stack + @ingroup cds_intrusive_containers + */ + /** @defgroup cds_intrusive_queue Queue + @ingroup cds_intrusive_containers + */ + /** @defgroup cds_intrusive_priority_queue Priority queue + @ingroup cds_intrusive_containers + */ + /** @defgroup cds_intrusive_deque Deque + @ingroup cds_intrusive_containers + */ + /** @defgroup cds_intrusive_map Set + @ingroup cds_intrusive_containers + */ + /** @defgroup cds_intrusive_tree Tree + @ingroup cds_intrusive_containers + */ + /** @defgroup cds_intrusive_list List + @ingroup cds_intrusive_containers + */ + /** @defgroup cds_intrusive_freelist Free-list + @ingroup cds_intrusive_containers + */ + + //@cond + class iterable_list_tag + {}; + + template + struct is_iterable_list: public std::is_base_of< iterable_list_tag, List> + {}; + //@endcond + +}} // namespace cds::intrusuve + +#endif // #ifndef CDSLIB_INTRUSIVE_DETAILS_BASE_H diff --git a/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/intrusive/details/ellen_bintree_base.h b/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/intrusive/details/ellen_bintree_base.h new file mode 100644 index 0000000..49e2884 --- /dev/null +++ b/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/intrusive/details/ellen_bintree_base.h @@ -0,0 +1,762 @@ +/* + This file is a part of libcds - Concurrent Data Structures library + + (C) Copyright Maxim Khizhinsky (libcds.dev@gmail.com) 2006-2017 + + Source code repo: http://github.com/khizmax/libcds/ + Download: http://sourceforge.net/projects/libcds/files/ + + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. + + * Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ + +#ifndef CDSLIB_INTRUSIVE_DETAILS_ELLEN_BINTREE_BASE_H +#define CDSLIB_INTRUSIVE_DETAILS_ELLEN_BINTREE_BASE_H + +#include +#include +#include +#include +#include + +namespace cds { namespace intrusive { + + /// EllenBinTree related declarations + namespace ellen_bintree { + //Forwards + template struct base_node; + template struct node; + template struct internal_node; + + /// Update descriptor + /** + Update descriptor is used internally for helping concurrent threads + to complete modifying operation. + Usually, you should not use \p update_desc type directly until + you want to develop special free-list of update descriptor. + + Template parameters: + - \p LeafNode - leaf node type, see \ref node + - \p InternalNode - internal node type, see \ref internal_node + + @note Size of update descriptor is constant. + It does not depends of template arguments. + */ + template + struct update_desc { + //@cond + typedef LeafNode leaf_node; + typedef InternalNode internal_node; + + typedef cds::details::marked_ptr< update_desc, 3 > update_ptr; + + enum { + Clean = 0, + DFlag = 1, + IFlag = 2, + Mark = 3 + }; + + struct insert_info { + internal_node * pParent; + internal_node * pNew; + leaf_node * pLeaf; + bool bRightLeaf; + }; + struct delete_info { + internal_node * pGrandParent; + internal_node * pParent; + leaf_node * pLeaf; + update_desc * pUpdateParent; + bool bDisposeLeaf; // true if pLeaf should be disposed, false otherwise (for extract operation, RCU) + bool bRightParent; + bool bRightLeaf; + }; + + union { + insert_info iInfo; + delete_info dInfo; + }; + + update_desc * pNextRetire; // for local retired list (RCU) + + update_desc() + : pNextRetire( nullptr ) + {} + //@endcond + }; + + //@cond + struct alignas( void* ) basic_node + { + enum flags { + internal = 1, ///< set for internal node + key_infinite1 = 2, ///< set if node's key is Inf1 + key_infinite2 = 4, ///< set if node's key is Inf2 + + key_infinite = key_infinite1 | key_infinite2 ///< Cumulative infinite flags + }; + + atomics::atomic m_nFlags; ///< Internal flags + + /// Constructs leaf (bIntrenal == false) or internal (bInternal == true) node + explicit basic_node( bool bInternal ) + { + m_nFlags.store( bInternal ? internal: 0, atomics::memory_order_release ); + } + + /// Checks if the node is a leaf + bool is_leaf() const + { + return !is_internal(); + } + + /// Checks if the node is internal + bool is_internal() const + { + return (m_nFlags.load(atomics::memory_order_acquire) & internal) != 0; + } + + /// Returns infinite key, 0 if the node is not infinite + unsigned int infinite_key() const + { + return m_nFlags.load(atomics::memory_order_acquire) & key_infinite; + } + + /// Sets infinite key for the node (for internal use only!!!) + void infinite_key( int nInf ) + { + unsigned int nFlags = m_nFlags.load(atomics::memory_order_relaxed); + nFlags &= ~key_infinite; + switch ( nInf ) { + case 1: + nFlags |= key_infinite1; + break; + case 2: + nFlags |= key_infinite2; + break; + case 0: + break; + default: + assert( false ); + break; + } + m_nFlags.store( nFlags, atomics::memory_order_release ); + } + }; + + template + struct base_node: public basic_node + { + typedef basic_node base_class; + + typedef GC gc ; ///< Garbage collector + + /// Constructs leaf (bIntrenal == false) or internal (bInternal == true) node + explicit base_node( bool bInternal ) + : base_class( bInternal ) + {} + }; + //@endcond + + /// Ellen's binary tree leaf node + /** + Template parameters: + - \p GC - one of \ref cds_garbage_collector "garbage collector type" + - \p Tag - a \ref cds_intrusive_hook_tag "tag" + */ + template + struct node +# ifndef CDS_DOXYGEN_INVOKED + : public base_node< GC > +# endif + { + //@cond + typedef base_node< GC > base_class; + //@endcond + + typedef GC gc; ///< Garbage collector + typedef Tag tag; ///< Tag + + /// Default ctor + node() + : base_class( false ) + {} + }; + + /// Ellen's binary tree internal node + /** + Template arguments: + - \p Key - key type + - \p LeafNode - leaf node type + */ + template + struct internal_node +# ifndef CDS_DOXYGEN_INVOKED + : public base_node +# endif + { + //@cond + typedef base_node base_class; + //@endcond + + typedef Key key_type; ///< key type + typedef LeafNode leaf_node; ///< type of leaf node + typedef update_desc< leaf_node, internal_node > update_desc_type; ///< Update descriptor + typedef typename update_desc_type::update_ptr update_ptr; ///< Marked pointer to update descriptor + + key_type m_Key; ///< Regular key + atomics::atomic m_pLeft; ///< Left subtree + atomics::atomic m_pRight; ///< Right subtree + atomics::atomic m_pUpdate; ///< Update descriptor + //@cond + atomics::atomic m_nEmptyUpdate; ///< ABA prevention for m_pUpdate, from 0..2^16 step 4 + //@endcond + + /// Default ctor + internal_node() + : base_class( true ) + , m_pLeft( nullptr ) + , m_pRight( nullptr ) + , m_pUpdate( update_ptr()) + { + m_nEmptyUpdate.store( 0, atomics::memory_order_release ); + } + + //@cond + update_ptr null_update_desc() + { + return update_ptr( reinterpret_cast( ((m_nEmptyUpdate.fetch_add(1, atomics::memory_order_relaxed) + 1 ) << 2) & 0xFFFF )); + } + + base_class * get_child( bool bRight, atomics::memory_order mo ) const + { + return bRight ? m_pRight.load( mo ) : m_pLeft.load( mo ); + } + //@endcond + }; + + /// Types of EllenBinTree node + /** + This struct declares different \p %EllenBinTree node types. + It can be useful for simplifying \p %EllenBinTree node declaration in your application. + + Template parameters: + - \p GC - one of \ref cds_garbage_collector "garbage collector type" + - \p Key - key type + - \p Tag - a \ref cds_intrusive_hook_tag "tag" + */ + template + struct node_types + { + typedef node leaf_node_type; ///< Leaf node type + typedef internal_node internal_node_type; ///< Internal node type + typedef update_desc update_desc_type; ///< Update descriptor type + }; + + //@cond + struct undefined_gc; + struct default_hook { + typedef undefined_gc gc; + typedef opt::none tag; + }; + //@endcond + + //@cond + template < typename HookType, typename... Options> + struct hook + { + typedef typename opt::make_options< default_hook, Options...>::type options; + typedef typename options::gc gc; + typedef typename options::tag tag; + typedef node node_type; + typedef HookType hook_type; + }; + //@endcond + + /// Base hook + /** + \p Options are: + - \p opt::gc - garbage collector + - \p opt::tag - a \ref cds_intrusive_hook_tag "tag" + */ + template < typename... Options > + struct base_hook: public hook< opt::base_hook_tag, Options... > + {}; + + /// Member hook + /** + \p MemberOffset defines offset in bytes of \ref node member into your structure. + Use \p offsetof macro to define \p MemberOffset + + \p Options are: + - \p opt::gc - garbage collector + - \p opt::tag - a \ref cds_intrusive_hook_tag "tag" + */ + template < size_t MemberOffset, typename... Options > + struct member_hook: public hook< opt::member_hook_tag, Options... > + { + //@cond + static constexpr const size_t c_nMemberOffset = MemberOffset; + //@endcond + }; + + /// Traits hook + /** + \p NodeTraits defines type traits for node. + See \ref node_traits for \p NodeTraits interface description + + \p Options are: + - opt::gc - garbage collector + - \p opt::tag - a \ref cds_intrusive_hook_tag "tag" + */ + template + struct traits_hook: public hook< opt::traits_hook_tag, Options... > + { + //@cond + typedef NodeTraits node_traits; + //@endcond + }; + + /// Key extracting functor option setter + template + struct key_extractor { + //@cond + template struct pack: public Base + { + typedef Type key_extractor; + }; + //@endcond + }; + + /// Update descriptor allocator option setter + template + struct update_desc_allocator { + //@cond + template struct pack: public Base + { + typedef Type update_desc_allocator; + }; + //@endcond + }; + + /// EllenBinTree internal statistics + template + struct stat { + typedef Counter event_counter ; ///< Event counter type + + event_counter m_nInternalNodeCreated ; ///< Total count of created internal node + event_counter m_nInternalNodeDeleted ; ///< Total count of deleted internal node + event_counter m_nUpdateDescCreated ; ///< Total count of created update descriptors + event_counter m_nUpdateDescDeleted ; ///< Total count of deleted update descriptors + + event_counter m_nInsertSuccess ; ///< Count of success insertion + event_counter m_nInsertFailed ; ///< Count of failed insertion + event_counter m_nInsertRetries ; ///< Count of unsuccessful retries of insertion + event_counter m_nUpdateExist ; ///< Count of \p update() call for existed node + event_counter m_nUpdateNew ; ///< Count of \p update() call for new node + event_counter m_nUpdateRetries ; ///< Count of unsuccessful retries of ensuring + event_counter m_nEraseSuccess ; ///< Count of successful call of \p erase and \p unlink + event_counter m_nEraseFailed ; ///< Count of failed call of \p erase and \p unlink + event_counter m_nEraseRetries ; ///< Count of unsuccessful retries inside erasing/unlinking + event_counter m_nFindSuccess ; ///< Count of successful \p find call + event_counter m_nFindFailed ; ///< Count of failed \p find call + event_counter m_nExtractMinSuccess ; ///< Count of successful call of \p extract_min + event_counter m_nExtractMinFailed ; ///< Count of failed call of \p extract_min + event_counter m_nExtractMinRetries ; ///< Count of unsuccessful retries inside \p extract_min + event_counter m_nExtractMaxSuccess ; ///< Count of successful call of \p extract_max + event_counter m_nExtractMaxFailed ; ///< Count of failed call of \p extract_max + event_counter m_nExtractMaxRetries ; ///< Count of unsuccessful retries inside \p extract_max + event_counter m_nSearchRetry ; ///< How many times the deleting node was encountered while searching + + event_counter m_nHelpInsert ; ///< The number of insert help from the other thread + event_counter m_nHelpDelete ; ///< The number of delete help from the other thread + event_counter m_nHelpMark ; ///< The number of delete help (mark phase) from the other thread + event_counter m_nHelpGuardSuccess ; ///< The number of successful guarding of update descriptor data + event_counter m_nHelpGuardFailed ; ///< The number of failed guarding of update descriptor data + + //@cond + void onInternalNodeCreated() { ++m_nInternalNodeCreated ; } + void onInternalNodeDeleted() { ++m_nInternalNodeDeleted ; } + void onUpdateDescCreated() { ++m_nUpdateDescCreated ; } + void onUpdateDescDeleted() { ++m_nUpdateDescDeleted ; } + void onInsertSuccess() { ++m_nInsertSuccess ; } + void onInsertFailed() { ++m_nInsertFailed ; } + void onInsertRetry() { ++m_nInsertRetries ; } + void onUpdateExist() { ++m_nUpdateExist ; } + void onUpdateNew() { ++m_nUpdateNew ; } + void onUpdateRetry() { ++m_nUpdateRetries ; } + void onEraseSuccess() { ++m_nEraseSuccess ; } + void onEraseFailed() { ++m_nEraseFailed ; } + void onEraseRetry() { ++m_nEraseRetries ; } + void onExtractMinSuccess() { ++m_nExtractMinSuccess ; } + void onExtractMinFailed() { ++m_nExtractMinFailed ; } + void onExtractMinRetry() { ++m_nExtractMinRetries ; } + void onExtractMaxSuccess() { ++m_nExtractMaxSuccess ; } + void onExtractMaxFailed() { ++m_nExtractMaxFailed ; } + void onExtractMaxRetry() { ++m_nExtractMaxRetries ; } + void onFindSuccess() { ++m_nFindSuccess ; } + void onFindFailed() { ++m_nFindFailed ; } + void onSearchRetry() { ++m_nSearchRetry ; } + void onHelpInsert() { ++m_nHelpInsert ; } + void onHelpDelete() { ++m_nHelpDelete ; } + void onHelpMark() { ++m_nHelpMark ; } + void onHelpGuardSuccess() { ++m_nHelpGuardSuccess ; } + void onHelpGuardFailed() { ++m_nHelpGuardFailed ; } + //@endcond + }; + + /// EllenBinTree empty statistics + struct empty_stat { + //@cond + void onInternalNodeCreated() const {} + void onInternalNodeDeleted() const {} + void onUpdateDescCreated() const {} + void onUpdateDescDeleted() const {} + void onInsertSuccess() const {} + void onInsertFailed() const {} + void onInsertRetry() const {} + void onUpdateExist() const {} + void onUpdateNew() const {} + void onUpdateRetry() const {} + void onEraseSuccess() const {} + void onEraseFailed() const {} + void onEraseRetry() const {} + void onExtractMinSuccess() const {} + void onExtractMinFailed() const {} + void onExtractMinRetry() const {} + void onExtractMaxSuccess() const {} + void onExtractMaxFailed() const {} + void onExtractMaxRetry() const {} + void onFindSuccess() const {} + void onFindFailed() const {} + void onSearchRetry() const {} + void onHelpInsert() const {} + void onHelpDelete() const {} + void onHelpMark() const {} + void onHelpGuardSuccess() const {} + void onHelpGuardFailed() const {} + //@endcond + }; + + /// EllenBinTree traits + struct traits + { + /// Hook used (mandatory) + /** + Possible values are: \p ellen_bintree::base_hook, \p ellen_bintree::member_hook, \p ellen_bintree::traits_hook. + */ + typedef base_hook<> hook; + + /// Key extracting functor (mandatory) + /** + You should explicit define a valid functor. + The functor has the following prototype: + \code + struct key_extractor { + void operator ()( Key& dest, T const& src ); + }; + \endcode + It should initialize \p dest key from \p src data. + The functor is used to initialize internal nodes. + */ + typedef opt::none key_extractor; + + /// Key comparison functor + /** + No default functor is provided. If the option is not specified, the \p less is used. + + See \p cds::opt::compare option description for functor interface. + + You should provide \p compare or \p less functor. + See \ref cds_intrusive_EllenBinTree_rcu_less "predicate requirements". + */ + typedef opt::none compare; + + /// Specifies binary predicate used for key compare. + /** + See \p cds::opt::less option description for predicate interface. + + You should provide \p compare or \p less functor. + See \ref cds_intrusive_EllenBinTree_rcu_less "predicate requirements". + */ + typedef opt::none less; + + /// Disposer + /** + The functor used for dispose removed items. Default is \p opt::v::empty_disposer. + */ + typedef opt::v::empty_disposer disposer; + + /// Item counter + /** + The type for item counter, by default it is disabled (\p atomicity::empty_item_counter). + To enable it use \p atomicity::item_counter or \p atomicity::cache_friendly_item_counter + */ + typedef atomicity::empty_item_counter item_counter; + + /// C++ memory ordering model + /** + List of available memory ordering see \p opt::memory_model + */ + typedef opt::v::relaxed_ordering memory_model; + + /// Allocator for update descriptors + /** + The allocator type is used for \p ellen_bintree::update_desc. + + Update descriptor is helping data structure with short lifetime and it is good candidate + for pooling. The number of simultaneously existing descriptors is bounded and it is + limited by number of threads working with the tree. + Therefore, a bounded lock-free container like \p cds::container::VyukovMPMCCycleQueue + is a good choice for the free-list of update descriptors, + see \p cds::memory::vyukov_queue_pool free-list implementation. + + Also notice that size of update descriptor is constant and not dependent on the type of data + stored in the tree so single free-list object can be used for several \p EllenBinTree object. + */ + typedef CDS_DEFAULT_ALLOCATOR update_desc_allocator; + + /// Allocator for internal nodes + /** + The allocator type is used for \p ellen_bintree::internal_node. + */ + typedef CDS_DEFAULT_ALLOCATOR node_allocator; + + /// Internal statistics + /** + By default, internal statistics is disabled (\p ellen_bintree::empty_stat). + To enable it use \p ellen_bintree::stat. + */ + typedef empty_stat stat; + + /// Back-off strategy + typedef cds::backoff::empty back_off; + + /// RCU deadlock checking policy (only for \ref cds_intrusive_EllenBinTree_rcu "RCU-based EllenBinTree") + /** + List of available options see \p opt::rcu_check_deadlock + */ + typedef cds::opt::v::rcu_throw_deadlock rcu_check_deadlock; + }; + + /// Metafunction converting option list to EllenBinTree traits + /** + \p Options are: + - \p opt::hook - hook used. Possible values are: \p ellen_bintree::base_hook, \p ellen_bintree::member_hook, \p ellen_bintree::traits_hook. + If the option is not specified, ellen_bintree::base_hook<> is used. + - \p ellen_bintree::key_extractor - key extracting functor, mandatory option. The functor has the following prototype: + \code + struct key_extractor { + void operator ()( Key& dest, T const& src ); + }; + \endcode + It should initialize \p dest key from \p src data. The functor is used to initialize internal nodes. + - \p opt::compare - key compare functor. No default functor is provided. + If the option is not specified, \p %opt::less is used. + - \p opt::less - specifies binary predicate used for key compare. At least \p %opt::compare or \p %opt::less should be defined. + - \p opt::disposer - the functor used for dispose removed nodes. Default is \p opt::v::empty_disposer. Due the nature + of GC schema the disposer may be called asynchronously. The disposer is used only for leaf nodes. + - \p opt::item_counter - the type of item counting feature, by default it is disabled (\p atomicity::empty_item_counter) + To enable it use \p atomicity::item_counter or \p atomicity::cache_friendly_item_counter + - \p opt::memory_model - C++ memory ordering model. Can be \p opt::v::relaxed_ordering (relaxed memory model, the default) + or \p opt::v::sequential_consistent (sequentially consisnent memory model). + - \p ellen_bintree::update_desc_allocator - an allocator of \ref ellen_bintree::update_desc "update descriptors", + default is \ref CDS_DEFAULT_ALLOCATOR. + Note that update descriptor is helping data structure with short lifetime and it is good candidate for pooling. + The number of simultaneously existing descriptors is bounded and depends on the number of threads + working with the tree and GC internals. + A bounded lock-free container like \p cds::container::VyukovMPMCCycleQueue is good candidate + for the free-list of update descriptors, see cds::memory::vyukov_queue_pool free-list implementation. + Also notice that size of update descriptor is constant and not dependent on the type of data + stored in the tree so single free-list object can be used for all \p %EllenBinTree objects. + - \p opt::node_allocator - the allocator for internal nodes. Default is \ref CDS_DEFAULT_ALLOCATOR. + - \p opt::stat - internal statistics, by default it is disabled (\p ellen_bintree::empty_stat) + To enable statistics use \p \p ellen_bintree::stat + - \p opt::backoff - back-off strategy, by default no strategy is used (\p cds::backoff::empty) + - \p opt::rcu_check_deadlock - a deadlock checking policy for RCU-based tree, default is \p opt::v::rcu_throw_deadlock + */ + template + struct make_traits { +# ifdef CDS_DOXYGEN_INVOKED + typedef implementation_defined type ; ///< Metafunction result +# else + typedef typename cds::opt::make_options< + typename cds::opt::find_type_traits< traits, Options... >::type + ,Options... + >::type type; +# endif + }; + + //@cond + namespace details { + + template + struct compare + { + typedef Compare key_compare; + typedef Key key_type; + typedef T value_type; + typedef NodeTraits node_traits; + + template + int operator()( Q1 const& v1, Q2 const& v2) const + { + return key_compare()( v1, v2 ); + } + + template + int operator()( internal_node const& n1, internal_node const& n2 ) const + { + if ( n1.infinite_key()) + return n2.infinite_key() ? n1.infinite_key() - n2.infinite_key() : 1; + else if ( n2.infinite_key()) + return -1; + return operator()( n1.m_Key, n2.m_Key ); + } + + template + int operator()( internal_node const& n, Q const& v ) const + { + if ( n.infinite_key()) + return 1; + return operator()( n.m_Key, v ); + } + + template + int operator()( Q const& v, internal_node const& n ) const + { + if ( n.infinite_key()) + return -1; + return operator()( v, n.m_Key ); + } + + template + int operator()( node const& n1, node const& n2 ) const + { + if ( n1.infinite_key() != n2.infinite_key()) + return n1.infinite_key() - n2.infinite_key(); + return operator()( *node_traits::to_value_ptr( n1 ), *node_traits::to_value_ptr( n2 )); + } + + template + int operator()( node const& n, Q const& v ) const + { + if ( n.infinite_key()) + return 1; + return operator()( *node_traits::to_value_ptr( n ), v ); + } + + template + int operator()( Q const& v, node const& n ) const + { + if ( n.infinite_key()) + return -1; + return operator()( v, *node_traits::to_value_ptr( n )); + } + + template + int operator()( base_node const& n1, base_node const& n2 ) const + { + if ( n1.infinite_key() != n2.infinite_key()) + return n1.infinite_key() - n2.infinite_key(); + if ( n1.is_leaf()) { + if ( n2.is_leaf()) + return operator()( node_traits::to_leaf_node( n1 ), node_traits::to_leaf_node( n2 )); + else + return operator()( node_traits::to_leaf_node( n1 ), node_traits::to_internal_node( n2 )); + } + + if ( n2.is_leaf()) + return operator()( node_traits::to_internal_node( n1 ), node_traits::to_leaf_node( n2 )); + else + return operator()( node_traits::to_internal_node( n1 ), node_traits::to_internal_node( n2 )); + } + + template + int operator()( base_node const& n, Q const& v ) const + { + if ( n.infinite_key()) + return 1; + if ( n.is_leaf()) + return operator()( node_traits::to_leaf_node( n ), v ); + return operator()( node_traits::to_internal_node( n ), v ); + } + + template + int operator()( Q const& v, base_node const& n ) const + { + return -operator()( n, v ); + } + + template + int operator()( base_node const& i, internal_node const& n ) const + { + if ( i.is_leaf()) + return operator()( static_cast(i), n ); + return operator()( static_cast const&>(i), n ); + } + + template + int operator()( internal_node const& n, base_node const& i ) const + { + return -operator()( i, n ); + } + + template + int operator()( node const& n, internal_node > const& i ) const + { + if ( !n.infinite_key()) { + if ( i.infinite_key()) + return -1; + return operator()( n, i.m_Key ); + } + + if ( !i.infinite_key()) + return 1; + return int( n.infinite_key()) - int( i.infinite_key()); + } + + template + int operator()( internal_node > const& i, node const& n ) const + { + return -operator()( n, i ); + } + + }; + + } // namespace details + //@endcond + } // namespace ellen_bintree + + // Forwards + template < class GC, typename Key, typename T, class Traits = ellen_bintree::traits > + class EllenBinTree; + +}} // namespace cds::intrusive + +#endif // #ifndef CDSLIB_INTRUSIVE_DETAILS_ELLEN_BINTREE_BASE_H diff --git a/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/intrusive/details/feldman_hashset_base.h b/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/intrusive/details/feldman_hashset_base.h new file mode 100644 index 0000000..002739b --- /dev/null +++ b/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/intrusive/details/feldman_hashset_base.h @@ -0,0 +1,699 @@ +/* + This file is a part of libcds - Concurrent Data Structures library + + (C) Copyright Maxim Khizhinsky (libcds.dev@gmail.com) 2006-2017 + + Source code repo: http://github.com/khizmax/libcds/ + Download: http://sourceforge.net/projects/libcds/files/ + + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. + + * Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ + +#ifndef CDSLIB_INTRUSIVE_DETAILS_FELDMAN_HASHSET_BASE_H +#define CDSLIB_INTRUSIVE_DETAILS_FELDMAN_HASHSET_BASE_H + +#include // memcmp, memcpy +#include + +#include +#include +#include +#include +#include +#include + +namespace cds { namespace intrusive { + + /// FeldmanHashSet related definitions + /** @ingroup cds_intrusive_helper + */ + namespace feldman_hashset { + /// Hash accessor option + /** + @copydetails traits::hash_accessor + */ + template + struct hash_accessor { + //@cond + template struct pack: public Base + { + typedef Accessor hash_accessor; + }; + //@endcond + }; + + /// Hash size option + /** + @copydetails traits::hash_size + */ + template + struct hash_size { + //@cond + template struct pack: public Base + { + enum: size_t { + hash_size = Size + }; + }; + //@endcond + }; + + /// Hash splitter option + /** + @copydetails traits::hash_splitter + */ + template + struct hash_splitter { + //@cond + template struct pack: public Base + { + typedef Splitter hash_splitter; + }; + //@endcond + }; + + + /// \p FeldmanHashSet internal statistics + template + struct stat { + typedef EventCounter event_counter ; ///< Event counter type + + event_counter m_nInsertSuccess; ///< Number of success \p insert() operations + event_counter m_nInsertFailed; ///< Number of failed \p insert() operations + event_counter m_nInsertRetry; ///< Number of attempts to insert new item + event_counter m_nUpdateNew; ///< Number of new item inserted for \p update() + event_counter m_nUpdateExisting; ///< Number of existing item updates + event_counter m_nUpdateFailed; ///< Number of failed \p update() call + event_counter m_nUpdateRetry; ///< Number of attempts to update the item + event_counter m_nEraseSuccess; ///< Number of successful \p erase(), \p unlink(), \p extract() operations + event_counter m_nEraseFailed; ///< Number of failed \p erase(), \p unlink(), \p extract() operations + event_counter m_nEraseRetry; ///< Number of attempts to \p erase() an item + event_counter m_nFindSuccess; ///< Number of successful \p find() and \p get() operations + event_counter m_nFindFailed; ///< Number of failed \p find() and \p get() operations + + event_counter m_nExpandNodeSuccess; ///< Number of succeeded attempts converting data node to array node + event_counter m_nExpandNodeFailed; ///< Number of failed attempts converting data node to array node + event_counter m_nSlotChanged; ///< Number of array node slot changing by other thread during an operation + event_counter m_nSlotConverting; ///< Number of events when we encounter a slot while it is converting to array node + + event_counter m_nArrayNodeCount; ///< Number of array nodes + event_counter m_nHeight; ///< Current height of the tree + + //@cond + void onInsertSuccess() { ++m_nInsertSuccess; } + void onInsertFailed() { ++m_nInsertFailed; } + void onInsertRetry() { ++m_nInsertRetry; } + void onUpdateNew() { ++m_nUpdateNew; } + void onUpdateExisting() { ++m_nUpdateExisting; } + void onUpdateFailed() { ++m_nUpdateFailed; } + void onUpdateRetry() { ++m_nUpdateRetry; } + void onEraseSuccess() { ++m_nEraseSuccess; } + void onEraseFailed() { ++m_nEraseFailed; } + void onEraseRetry() { ++m_nEraseRetry; } + void onFindSuccess() { ++m_nFindSuccess; } + void onFindFailed() { ++m_nFindFailed; } + + void onExpandNodeSuccess() { ++m_nExpandNodeSuccess; } + void onExpandNodeFailed() { ++m_nExpandNodeFailed; } + void onSlotChanged() { ++m_nSlotChanged; } + void onSlotConverting() { ++m_nSlotConverting; } + void onArrayNodeCreated() { ++m_nArrayNodeCount; } + void height( size_t h ) { if (m_nHeight < h ) m_nHeight = h; } + //@endcond + }; + + /// \p FeldmanHashSet empty internal statistics + struct empty_stat { + //@cond + void onInsertSuccess() const {} + void onInsertFailed() const {} + void onInsertRetry() const {} + void onUpdateNew() const {} + void onUpdateExisting() const {} + void onUpdateFailed() const {} + void onUpdateRetry() const {} + void onEraseSuccess() const {} + void onEraseFailed() const {} + void onEraseRetry() const {} + void onFindSuccess() const {} + void onFindFailed() const {} + + void onExpandNodeSuccess() const {} + void onExpandNodeFailed() const {} + void onSlotChanged() const {} + void onSlotConverting() const {} + void onArrayNodeCreated() const {} + void height(size_t) const {} + //@endcond + }; + + /// \p FeldmanHashSet traits + struct traits + { + /// Mandatory functor to get hash value from data node + /** + It is most-important feature of \p FeldmanHashSet. + That functor must return a reference to fixed-sized hash value of data node. + The return value of that functor specifies the type of hash value. + + Example: + \code + typedef uint8_t hash_type[32]; // 256-bit hash type + struct foo { + hash_type hash; // 256-bit hash value + // ... other fields + }; + + // Hash accessor + struct foo_hash_accessor { + hash_type const& operator()( foo const& d ) const + { + return d.hash; + } + }; + \endcode + */ + typedef cds::opt::none hash_accessor; + + /// The size of hash value in bytes + /** + By default, the size of hash value is sizeof( hash_type ). + Sometimes it is not correct, for example, for that 6-byte struct \p static_assert will be thrown: + \code + struct key_type { + uint32_t key1; + uint16_t subkey; + }; + + static_assert( sizeof( key_type ) == 6, "Key type size mismatch" ); + \endcode + For that case you can specify \p hash_size explicitly. + + Value \p 0 means sizeof( hash_type ). + */ + static constexpr size_t const hash_size = 0; + + /// Hash splitter + /** + This trait specifies hash bit-string splitter algorithm. + By default, \p cds::algo::number_splitter is used if \p HashType is a number, + \p cds::algo::split_bitstring otherwise. + */ + typedef cds::opt::none hash_splitter; + + /// Disposer for removing data nodes + typedef cds::intrusive::opt::v::empty_disposer disposer; + + /// Hash comparing functor + /** + No default functor is provided. + If the option is not specified, the \p less option is used. + */ + typedef cds::opt::none compare; + + /// Specifies binary predicate used for hash compare. + /** + If \p %less and \p %compare are not specified, \p memcmp() -like @ref bitwise_compare "bit-wise hash comparator" is used + because the hash value is treated as fixed-sized bit-string. + */ + typedef cds::opt::none less; + + /// Item counter + /** + The item counting is an important part of \p FeldmanHashSet algorithm: + the \p empty() member function depends on correct item counting. + Therefore, \p atomicity::empty_item_counter is not allowed as a type of the option. + + Default is \p atomicity::item_counter. To avoid false sharing you can aldo use \p atomicity::cache_friendly_item_counter + */ + typedef cds::atomicity::item_counter item_counter; + + /// Array node allocator + /** + Allocator for array nodes. The allocator is used for creating \p headNode and \p arrayNode when the set grows. + Default is \ref CDS_DEFAULT_ALLOCATOR + */ + typedef CDS_DEFAULT_ALLOCATOR node_allocator; + + /// C++ memory ordering model + /** + Can be \p opt::v::relaxed_ordering (relaxed memory model, the default) + or \p opt::v::sequential_consistent (sequentially consisnent memory model). + */ + typedef cds::opt::v::relaxed_ordering memory_model; + + /// Back-off strategy + typedef cds::backoff::Default back_off; + + /// Internal statistics + /** + By default, internal statistics is disabled (\p feldman_hashset::empty_stat). + Use \p feldman_hashset::stat to enable it. + */ + typedef empty_stat stat; + + /// RCU deadlock checking policy (only for \ref cds_intrusive_FeldmanHashSet_rcu "RCU-based FeldmanHashSet") + /** + List of available policy see \p opt::rcu_check_deadlock + */ + typedef cds::opt::v::rcu_throw_deadlock rcu_check_deadlock; + }; + + /// Metafunction converting option list to \p feldman_hashset::traits + /** + Supported \p Options are: + - \p feldman_hashset::hash_accessor - mandatory option, hash accessor functor. + @copydetails traits::hash_accessor + - \p feldman_hashset::hash_size - the size of hash value in bytes. + @copydetails traits::hash_size + - \p feldman_hashset::hash_splitter - a hash splitter algorithm + @copydetails traits::hash_splitter + - \p opt::node_allocator - array node allocator. + @copydetails traits::node_allocator + - \p opt::compare - hash comparison functor. No default functor is provided. + If the option is not specified, the \p opt::less is used. + - \p opt::less - specifies binary predicate used for hash comparison. + If the option is not specified, \p memcmp() -like bit-wise hash comparator is used + because the hash value is treated as fixed-sized bit-string. + - \p opt::back_off - back-off strategy used. If the option is not specified, the \p cds::backoff::Default is used. + - \p opt::disposer - the functor used for disposing removed data node. Default is \p opt::v::empty_disposer. Due the nature + of GC schema the disposer may be called asynchronously. + - \p opt::item_counter - the type of item counting feature. + The item counting is an important part of \p FeldmanHashSet algorithm: + the \p empty() member function depends on correct item counting. + Therefore, \p atomicity::empty_item_counter is not allowed as a type of the option. + Default is \p atomicity::item_counter. To avoid false sharing you can use or \p atomicity::cache_friendly_item_counter + - \p opt::memory_model - C++ memory ordering model. Can be \p opt::v::relaxed_ordering (relaxed memory model, the default) + or \p opt::v::sequential_consistent (sequentially consisnent memory model). + - \p opt::stat - internal statistics. By default, it is disabled (\p feldman_hashset::empty_stat). + To enable it use \p feldman_hashset::stat + - \p opt::rcu_check_deadlock - a deadlock checking policy for \ref cds_intrusive_FeldmanHashSet_rcu "RCU-based FeldmanHashSet" + Default is \p opt::v::rcu_throw_deadlock + */ + template + struct make_traits + { +# ifdef CDS_DOXYGEN_INVOKED + typedef implementation_defined type ; ///< Metafunction result +# else + typedef typename cds::opt::make_options< + typename cds::opt::find_type_traits< traits, Options... >::type + ,Options... + >::type type; +# endif + }; + + /// Bit-wise memcmp-based comparator for hash value \p T + template + struct bitwise_compare + { + /// Compares \p lhs and \p rhs + /** + Returns: + - < 0 if lhs < rhs + - 0 if lhs == rhs + - > 0 if lhs > rhs + */ + int operator()( T const& lhs, T const& rhs ) const + { + return memcmp( &lhs, &rhs, sizeof(T)); + } + }; + + /// One-level statistics, see \p FeldmanHashSet::get_level_statistics + struct level_statistics + { + size_t array_node_count; ///< Count of array node at the level + size_t node_capacity; ///< Array capacity + + size_t data_cell_count; ///< The number of data cells in all array node at this level + size_t array_cell_count; ///< The number of array cells in all array node at this level + size_t empty_cell_count; ///< The number of empty cells in all array node at this level + + //@cond + level_statistics() + : array_node_count(0) + , data_cell_count(0) + , array_cell_count(0) + , empty_cell_count(0) + {} + //@endcond + }; + + //@cond + namespace details { + template + using hash_splitter = cds::algo::split_bitstring< HashType, HashSize >; + + struct metrics { + size_t head_node_size; // power-of-two + size_t head_node_size_log; // log2( head_node_size ) + size_t array_node_size; // power-of-two + size_t array_node_size_log;// log2( array_node_size ) + + static metrics make(size_t head_bits, size_t array_bits, size_t hash_size ) + { + size_t const hash_bits = hash_size * 8; + + if (array_bits < 2) + array_bits = 2; + if (head_bits < 4) + head_bits = 4; + if (head_bits > hash_bits) + head_bits = hash_bits; + if ((hash_bits - head_bits) % array_bits != 0) + head_bits += (hash_bits - head_bits) % array_bits; + + assert((hash_bits - head_bits) % array_bits == 0); + + metrics m; + m.head_node_size_log = head_bits; + m.head_node_size = size_t(1) << head_bits; + m.array_node_size_log = array_bits; + m.array_node_size = size_t(1) << array_bits; + return m; + } + }; + + } // namespace details + //@endcond + + //@cond + template + class multilevel_array + { + public: + typedef T value_type; + typedef Traits traits; + typedef typename Traits::node_allocator node_allocator; + typedef typename traits::memory_model memory_model; + typedef typename traits::back_off back_off; ///< Backoff strategy + typedef typename traits::stat stat; ///< Internal statistics type + + typedef typename traits::hash_accessor hash_accessor; + static_assert(!std::is_same< hash_accessor, cds::opt::none >::value, "hash_accessor functor must be specified"); + + /// Hash type deduced from \p hash_accessor return type + typedef typename std::decay< + typename std::remove_reference< + decltype(hash_accessor()(std::declval())) + >::type + >::type hash_type; + static_assert(!std::is_pointer::value, "hash_accessor should return a reference to hash value"); + + typedef typename cds::opt::details::make_comparator_from< + hash_type, + traits, + feldman_hashset::bitwise_compare< hash_type > + >::type hash_comparator; + + /// The size of hash_type in bytes, see \p traits::hash_size for explanation + static constexpr size_t const c_hash_size = traits::hash_size == 0 ? sizeof( hash_type ) : static_cast( traits::hash_size ); + + typedef typename std::conditional< + std::is_same< typename traits::hash_splitter, cds::opt::none >::value, + typename cds::algo::select_splitter< hash_type, c_hash_size >::type, + typename traits::hash_splitter + >::type hash_splitter; + + enum node_flags { + flag_array_converting = 1, ///< the cell is converting from data node to an array node + flag_array_node = 2 ///< the cell is a pointer to an array node + }; + + protected: + + typedef cds::details::marked_ptr< value_type, 3 > node_ptr; + typedef atomics::atomic< node_ptr > atomic_node_ptr; + + struct array_node { + array_node * const pParent; ///< parent array node + size_t const idxParent; ///< index in parent array node + atomic_node_ptr nodes[1]; ///< node array + + array_node(array_node * parent, size_t idx) + : pParent(parent) + , idxParent(idx) + {} + + array_node() = delete; + array_node(array_node const&) = delete; + array_node(array_node&&) = delete; + }; + + typedef cds::details::Allocator< array_node, node_allocator > cxx_array_node_allocator; + + struct traverse_data { + hash_splitter splitter; + array_node * pArr; + typename hash_splitter::uint_type nSlot; + size_t nHeight; + + traverse_data( hash_type const& hash, multilevel_array& arr ) + : splitter( hash ) + { + reset( arr ); + } + + void reset( multilevel_array& arr ) + { + splitter.reset(); + pArr = arr.head(); + nSlot = splitter.cut( static_cast( arr.metrics().head_node_size_log )); + assert( static_cast( nSlot ) < arr.metrics().head_node_size ); + nHeight = 1; + } + }; + + protected: + feldman_hashset::details::metrics const m_Metrics; + array_node * m_Head; + mutable stat m_Stat; + + public: + multilevel_array(size_t head_bits, size_t array_bits ) + : m_Metrics(feldman_hashset::details::metrics::make( head_bits, array_bits, c_hash_size )) + , m_Head( alloc_head_node()) + { + assert( hash_splitter::is_correct( static_cast( metrics().head_node_size_log ))); + assert( hash_splitter::is_correct( static_cast( metrics().array_node_size_log ))); + } + + ~multilevel_array() + { + destroy_tree(); + free_array_node( m_Head, head_size()); + } + + node_ptr traverse(traverse_data& pos) + { + back_off bkoff; + while (true) { + node_ptr slot = pos.pArr->nodes[pos.nSlot].load(memory_model::memory_order_acquire); + if ( slot.bits() == flag_array_node ) { + // array node, go down the tree + assert(slot.ptr() != nullptr); + assert( !pos.splitter.eos()); + pos.nSlot = pos.splitter.cut( static_cast( metrics().array_node_size_log )); + assert( static_cast( pos.nSlot ) < metrics().array_node_size ); + pos.pArr = to_array(slot.ptr()); + ++pos.nHeight; + } + else if (slot.bits() == flag_array_converting) { + // the slot is converting to array node right now + bkoff(); + stats().onSlotConverting(); + } + else { + // data node + assert(slot.bits() == 0); + return slot; + } + } // while + } + + size_t head_size() const + { + return m_Metrics.head_node_size; + } + + size_t array_node_size() const + { + return m_Metrics.array_node_size; + } + + void get_level_statistics(std::vector< feldman_hashset::level_statistics>& stat) const + { + stat.clear(); + gather_level_statistics(stat, 0, m_Head, head_size()); + } + + protected: + array_node * head() const + { + return m_Head; + } + + stat& stats() const + { + return m_Stat; + } + + feldman_hashset::details::metrics const& metrics() const + { + return m_Metrics; + } + + void destroy_tree() + { + // The function is not thread-safe. For use in dtor only + // Destroy all array nodes + destroy_array_nodes(m_Head, head_size()); + } + + void destroy_array_nodes(array_node * pArr, size_t nSize) + { + for (atomic_node_ptr * p = pArr->nodes, *pLast = p + nSize; p != pLast; ++p) { + node_ptr slot = p->load(memory_model::memory_order_relaxed); + if (slot.bits() == flag_array_node) { + destroy_array_nodes( to_array(slot.ptr()), array_node_size()); + free_array_node( to_array( slot.ptr()), array_node_size()); + p->store(node_ptr(), memory_model::memory_order_relaxed); + } + } + } + + static array_node * alloc_array_node(size_t nSize, array_node * pParent, size_t idxParent) + { + array_node * pNode = cxx_array_node_allocator().NewBlock(sizeof(array_node) + sizeof(atomic_node_ptr) * (nSize - 1), pParent, idxParent); + new (pNode->nodes) atomic_node_ptr[nSize]; + return pNode; + } + + array_node * alloc_head_node() const + { + return alloc_array_node(head_size(), nullptr, 0); + } + + array_node * alloc_array_node(array_node * pParent, size_t idxParent) const + { + return alloc_array_node(array_node_size(), pParent, idxParent); + } + + static void free_array_node( array_node * parr, size_t /*nSize*/ ) + { + cxx_array_node_allocator().Delete( parr, 1 ); + } + + union converter { + value_type * pData; + array_node * pArr; + + converter(value_type * p) + : pData(p) + {} + + converter(array_node * p) + : pArr(p) + {} + }; + + static array_node * to_array(value_type * p) + { + return converter(p).pArr; + } + static value_type * to_node(array_node * p) + { + return converter(p).pData; + } + + void gather_level_statistics(std::vector& stat, size_t nLevel, array_node * pArr, size_t nSize) const + { + if (stat.size() <= nLevel) { + stat.resize(nLevel + 1); + stat[nLevel].node_capacity = nSize; + } + + ++stat[nLevel].array_node_count; + for (atomic_node_ptr * p = pArr->nodes, *pLast = p + nSize; p != pLast; ++p) { + node_ptr slot = p->load(memory_model::memory_order_relaxed); + if (slot.bits()) { + ++stat[nLevel].array_cell_count; + if (slot.bits() == flag_array_node) + gather_level_statistics(stat, nLevel + 1, to_array(slot.ptr()), array_node_size()); + } + else if (slot.ptr()) + ++stat[nLevel].data_cell_count; + else + ++stat[nLevel].empty_cell_count; + } + } + + bool expand_slot( traverse_data& pos, node_ptr current) + { + assert( !pos.splitter.eos()); + return expand_slot( pos.pArr, pos.nSlot, current, pos.splitter.bit_offset()); + } + + private: + bool expand_slot(array_node * pParent, size_t idxParent, node_ptr current, size_t nOffset) + { + assert(current.bits() == 0); + assert(current.ptr()); + + array_node * pArr = alloc_array_node(pParent, idxParent); + + node_ptr cur(current.ptr()); + atomic_node_ptr& slot = pParent->nodes[idxParent]; + if (!slot.compare_exchange_strong(cur, cur | flag_array_converting, memory_model::memory_order_release, atomics::memory_order_relaxed)) + { + stats().onExpandNodeFailed(); + free_array_node( pArr, array_node_size()); + return false; + } + + typename hash_splitter::uint_type idx = hash_splitter( hash_accessor()(*current.ptr()), nOffset ).cut( + static_cast( m_Metrics.array_node_size_log )); + pArr->nodes[idx].store(current, memory_model::memory_order_release); + + cur = cur | flag_array_converting; + CDS_VERIFY( + slot.compare_exchange_strong(cur, node_ptr(to_node(pArr), flag_array_node), memory_model::memory_order_release, atomics::memory_order_relaxed) + ); + + stats().onExpandNodeSuccess(); + stats().onArrayNodeCreated(); + return true; + } + }; + //@endcond + } // namespace feldman_hashset + + //@cond + // Forward declaration + template < class GC, typename T, class Traits = feldman_hashset::traits > + class FeldmanHashSet; + //@endcond + +}} // namespace cds::intrusive + +#endif // #ifndef CDSLIB_INTRUSIVE_DETAILS_FELDMAN_HASHSET_BASE_H diff --git a/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/intrusive/details/iterable_list_base.h b/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/intrusive/details/iterable_list_base.h new file mode 100644 index 0000000..113b72d --- /dev/null +++ b/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/intrusive/details/iterable_list_base.h @@ -0,0 +1,292 @@ +/* + This file is a part of libcds - Concurrent Data Structures library + + (C) Copyright Maxim Khizhinsky (libcds.dev@gmail.com) 2006-2017 + + Source code repo: http://github.com/khizmax/libcds/ + Download: http://sourceforge.net/projects/libcds/files/ + + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. + + * Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ + +#ifndef CDSLIB_INTRUSIVE_DETAILS_ITERABLE_LIST_BASE_H +#define CDSLIB_INTRUSIVE_DETAILS_ITERABLE_LIST_BASE_H + +#include +#include +#include +#include +#include +#include + +namespace cds { namespace intrusive { + + /// \p IterableList ordered list related definitions + /** @ingroup cds_intrusive_helper + */ + namespace iterable_list { + + /// Node type + template + struct node + { + typedef T value_type; ///< Value type + typedef cds::details::marked_ptr marked_data_ptr; ///< marked pointer to the value + + atomics::atomic< node* > next; ///< pointer to next node in the list + atomics::atomic< marked_data_ptr > data; ///< pointer to user data, \p nullptr if the node is free + + //@cond + node() + { + next.store( nullptr, atomics::memory_order_release ); + data.store( marked_data_ptr(), atomics::memory_order_release ); + } + + node( value_type * pVal ) + { + next.store( nullptr, atomics::memory_order_release ); + data.store( marked_data_ptr( pVal ), atomics::memory_order_release ); + } + //@endcond + }; + + /// \p IterableList internal statistics + template + struct stat { + typedef EventCounter event_counter; ///< Event counter type + + event_counter m_nInsertSuccess; ///< Number of success \p insert() operations + event_counter m_nInsertFailed; ///< Number of failed \p insert() operations + event_counter m_nInsertRetry; ///< Number of attempts to insert new item + event_counter m_nReuseNode; ///< Number of reusing empty node when inserting/updating + event_counter m_nNodeMarkFailed; ///< Number of unsuccessful marking attempts when we try to insert new data + event_counter m_nNodeSeqBreak; ///< Number of breaking sequence events of \p prev -> \p next node when we try to insert new data + event_counter m_nNullPrevABA; ///< Number of ABA-problem for \p nullptr prev node + event_counter m_nNewNodeCreated; ///< Number of new node created when we try to insert new data + event_counter m_nUpdateNew; ///< Number of new item inserted for \p update() + event_counter m_nUpdateExisting; ///< Number of existing item updates + event_counter m_nUpdateFailed; ///< Number of failed \p update() call + event_counter m_nUpdateRetry; ///< Number of attempts to update the item + event_counter m_nEraseSuccess; ///< Number of successful \p erase(), \p unlink(), \p extract() operations + event_counter m_nEraseFailed; ///< Number of failed \p erase(), \p unlink(), \p extract() operations + event_counter m_nEraseRetry; ///< Number of attempts to \p erase() an item + event_counter m_nFindSuccess; ///< Number of successful \p find() and \p get() operations + event_counter m_nFindFailed; ///< Number of failed \p find() and \p get() operations + + event_counter m_nNodeCreated; ///< Number of created internal nodes + event_counter m_nNodeRemoved; ///< Number of removed internal nodes + + //@cond + void onInsertSuccess() { ++m_nInsertSuccess; } + void onInsertFailed() { ++m_nInsertFailed; } + void onInsertRetry() { ++m_nInsertRetry; } + void onReuseNode() { ++m_nReuseNode; } + void onNodeMarkFailed() { ++m_nNodeMarkFailed; } + void onNodeSeqBreak() { ++m_nNodeSeqBreak; } + void onNullPrevABA() { ++m_nNullPrevABA; } + void onNewNodeCreated() { ++m_nNewNodeCreated; } + void onUpdateNew() { ++m_nUpdateNew; } + void onUpdateExisting() { ++m_nUpdateExisting; } + void onUpdateFailed() { ++m_nUpdateFailed; } + void onUpdateRetry() { ++m_nUpdateRetry; } + void onEraseSuccess() { ++m_nEraseSuccess; } + void onEraseFailed() { ++m_nEraseFailed; } + void onEraseRetry() { ++m_nEraseRetry; } + void onFindSuccess() { ++m_nFindSuccess; } + void onFindFailed() { ++m_nFindFailed; } + + void onNodeCreated() { ++m_nNodeCreated; } + void onNodeRemoved() { ++m_nNodeRemoved; } + //@endcond + }; + + /// \p IterableList empty internal statistics + struct empty_stat { + //@cond + void onInsertSuccess() const {} + void onInsertFailed() const {} + void onInsertRetry() const {} + void onReuseNode() const {} + void onNodeMarkFailed() const {} + void onNodeSeqBreak() const {} + void onNullPrevABA() const {} + void onNewNodeCreated() const {} + void onUpdateNew() const {} + void onUpdateExisting() const {} + void onUpdateFailed() const {} + void onUpdateRetry() const {} + void onEraseSuccess() const {} + void onEraseFailed() const {} + void onEraseRetry() const {} + void onFindSuccess() const {} + void onFindFailed() const {} + + void onNodeCreated() const {} + void onNodeRemoved() const {} + //@endcond + }; + + //@cond + template > + struct wrapped_stat { + typedef Stat stat_type; + + wrapped_stat( stat_type& st ) + : m_stat( st ) + {} + + void onInsertSuccess() { m_stat.onInsertSuccess(); } + void onInsertFailed() { m_stat.onInsertFailed(); } + void onInsertRetry() { m_stat.onInsertRetry(); } + void onReuseNode() { m_stat.onReuseNode(); } + void onNodeMarkFailed() { m_stat.onNodeMarkFailed();} + void onNodeSeqBreak() { m_stat.onNodeSeqBreak(); } + void onNullPrevABA() { m_stat.onNullPrevABA(); } + void onNewNodeCreated() { m_stat.onNewNodeCreated();} + void onUpdateNew() { m_stat.onUpdateNew(); } + void onUpdateExisting() { m_stat.onUpdateExisting();} + void onUpdateFailed() { m_stat.onUpdateFailed(); } + void onUpdateRetry() { m_stat.onUpdateRetry(); } + void onEraseSuccess() { m_stat.onEraseSuccess(); } + void onEraseFailed() { m_stat.onEraseFailed(); } + void onEraseRetry() { m_stat.onEraseRetry(); } + void onFindSuccess() { m_stat.onFindSuccess(); } + void onFindFailed() { m_stat.onFindFailed(); } + + void onNodeCreated() { m_stat.onNodeCreated(); } + void onNodeRemoved() { m_stat.onNodeRemoved(); } + + stat_type& m_stat; + }; + //@endcond + + + /// \p IterableList traits + struct traits + { + /// Key comparison functor + /** + No default functor is provided. If the option is not specified, the \p less is used. + */ + typedef opt::none compare; + + /// Specifies binary predicate used for key compare. + /** + Default is \p std::less + */ + typedef opt::none less; + + /// Node allocator + typedef CDS_DEFAULT_ALLOCATOR node_allocator; + + /// Back-off strategy + typedef cds::backoff::Default back_off; + + /// Disposer for removing items + typedef opt::v::empty_disposer disposer; + + /// Internal statistics + /** + By default, internal statistics is disabled (\p iterable_list::empty_stat). + Use \p iterable_list::stat to enable it. + */ + typedef empty_stat stat; + + /// Item counting feature; by default, disabled. Use \p cds::atomicity::item_counter or \p atomicity::cache_friendly_item_counter to enable item counting + typedef atomicity::empty_item_counter item_counter; + + /// C++ memory ordering model + /** + Can be \p opt::v::relaxed_ordering (relaxed memory model, the default) + or \p opt::v::sequential_consistent (sequentially consisnent memory model). + */ + typedef opt::v::relaxed_ordering memory_model; + }; + + /// Metafunction converting option list to \p iterable_list::traits + /** + Supported \p Options are: + - \p opt::compare - key comparison functor. No default functor is provided. + If the option is not specified, the \p opt::less is used. + - \p opt::less - specifies binary predicate used for key comparison. Default is \p std::less. + - \p opt::node_allocator - node allocator, default is \p std::allocator. + - \p opt::back_off - back-off strategy used. If the option is not specified, the \p cds::backoff::Default is used. + - \p opt::disposer - the functor used for disposing removed items. Default is \p opt::v::empty_disposer. Due the nature + of GC schema the disposer may be called asynchronously. + - \p opt::item_counter - the type of item counting feature. Default is disabled (\p atomicity::empty_item_counter). + To enable item counting use \p atomicity::item_counter or \p atomicity::cache_friendly_item_counter + - \p opt::stat - internal statistics. By default, it is disabled (\p iterable_list::empty_stat). + To enable it use \p iterable_list::stat + - \p opt::memory_model - C++ memory ordering model. Can be \p opt::v::relaxed_ordering (relaxed memory model, the default) + or \p opt::v::sequential_consistent (sequentially consistent memory model). + */ + template + struct make_traits { +# ifdef CDS_DOXYGEN_INVOKED + typedef implementation_defined type ; ///< Metafunction result +# else + typedef typename cds::opt::make_options< + typename cds::opt::find_type_traits< traits, Options... >::type + ,Options... + >::type type; +# endif + }; + + + //@cond + template + struct select_stat_wrapper + { + typedef Stat stat; + typedef iterable_list::wrapped_stat wrapped_stat; + enum { + empty = false + }; + }; + + template <> + struct select_stat_wrapper< empty_stat > + { + typedef empty_stat stat; + typedef empty_stat wrapped_stat; + enum { + empty = true + }; + }; + + template + struct select_stat_wrapper< iterable_list::wrapped_stat>: public select_stat_wrapper + {}; + //@endcond + + } // namespace iterable_list + + //@cond + // Forward declaration + template < class GC, typename T, class Traits = iterable_list::traits > + class IterableList; + //@endcond + +}} // namespace cds::intrusive + +#endif // #ifndef CDSLIB_INTRUSIVE_DETAILS_ITERABLE_LIST_BASE_H diff --git a/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/intrusive/details/lazy_list_base.h b/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/intrusive/details/lazy_list_base.h new file mode 100644 index 0000000..bf3731c --- /dev/null +++ b/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/intrusive/details/lazy_list_base.h @@ -0,0 +1,474 @@ +/* + This file is a part of libcds - Concurrent Data Structures library + + (C) Copyright Maxim Khizhinsky (libcds.dev@gmail.com) 2006-2017 + + Source code repo: http://github.com/khizmax/libcds/ + Download: http://sourceforge.net/projects/libcds/files/ + + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. + + * Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ + +#ifndef CDSLIB_INTRUSIVE_DETAILS_LAZY_LIST_BASE_H +#define CDSLIB_INTRUSIVE_DETAILS_LAZY_LIST_BASE_H + +#include +#include +#include +#include +#include +#include + +namespace cds { namespace intrusive { + + /// LazyList ordered list related definitions + /** @ingroup cds_intrusive_helper + */ + namespace lazy_list { + /// Lazy list node + /** + Template parameters: + - GC - garbage collector + - Lock - lock type. Default is \p cds::sync::spin + - Tag - a \ref cds_intrusive_hook_tag "tag" + */ + template < + class GC + ,typename Lock = cds::sync::spin + ,typename Tag = opt::none + > + struct node + { + typedef GC gc ; ///< Garbage collector + typedef Lock lock_type ; ///< Lock type + typedef Tag tag ; ///< tag + + typedef cds::details::marked_ptr marked_ptr ; ///< marked pointer + typedef typename gc::template atomic_marked_ptr< marked_ptr> atomic_marked_ptr ; ///< atomic marked pointer specific for GC + + atomic_marked_ptr m_pNext; ///< pointer to the next node in the list + logical deletion mark + mutable lock_type m_Lock; ///< Node lock + + /// Checks if node is marked + bool is_marked() const + { + return m_pNext.load(atomics::memory_order_relaxed).bits() != 0; + } + + /// Default ctor + node() + : m_pNext( nullptr ) + {} + }; + + //@cond + template + struct node_cleaner { + void operator()( Node * p ) + { + typedef typename Node::marked_ptr marked_ptr; + p->m_pNext.store( marked_ptr(), MemoryModel::memory_order_release ); + } + }; + //@endcond + + //@cond + struct undefined_gc; + struct default_hook { + typedef undefined_gc gc; + typedef opt::none tag; + typedef sync::spin lock_type; + }; + //@endcond + + //@cond + template < typename HookType, typename... Options> + struct hook + { + typedef typename opt::make_options< default_hook, Options...>::type options; + typedef typename options::gc gc; + typedef typename options::tag tag; + typedef typename options::lock_type lock_type; + typedef node node_type; + typedef HookType hook_type; + }; + //@endcond + + /// Base hook + /** + \p Options are: + - opt::gc - garbage collector + - opt::lock_type - lock type used for node locking. Default is sync::spin + - opt::tag - a \ref cds_intrusive_hook_tag "tag" + */ + template < typename... Options > + struct base_hook: public hook< opt::base_hook_tag, Options... > + {}; + + /// Member hook + /** + \p MemberOffset defines offset in bytes of \ref node member into your structure. + Use \p offsetof macro to define \p MemberOffset + + \p Options are: + - opt::gc - garbage collector + - opt::lock_type - lock type used for node locking. Default is sync::spin + - opt::tag - a \ref cds_intrusive_hook_tag "tag" + */ + template < size_t MemberOffset, typename... Options > + struct member_hook: public hook< opt::member_hook_tag, Options... > + { + //@cond + static const size_t c_nMemberOffset = MemberOffset; + //@endcond + }; + + /// Traits hook + /** + \p NodeTraits defines type traits for node. + See \ref node_traits for \p NodeTraits interface description + + \p Options are: + - opt::gc - garbage collector used. + - opt::lock_type - lock type used for node locking. Default is sync::spin + - opt::tag - a \ref cds_intrusive_hook_tag "tag" + */ + template + struct traits_hook: public hook< opt::traits_hook_tag, Options... > + { + //@cond + typedef NodeTraits node_traits; + //@endcond + }; + + /// Check link + template + struct link_checker + { + //@cond + typedef Node node_type; + //@endcond + + /// Checks if the link field of node \p pNode is \p nullptr + /** + An asserting is generated if \p pNode link field is not \p nullptr + */ + static void is_empty( node_type const * pNode ) + { + assert( pNode->m_pNext.load( atomics::memory_order_relaxed ) == nullptr ); + CDS_UNUSED( pNode ); + } + }; + + //@cond + template + struct link_checker_selector; + + template + struct link_checker_selector< GC, Node, opt::never_check_link > + { + typedef intrusive::opt::v::empty_link_checker type; + }; + + template + struct link_checker_selector< GC, Node, opt::debug_check_link > + { +# ifdef _DEBUG + typedef link_checker type; +# else + typedef intrusive::opt::v::empty_link_checker type; +# endif + }; + + template + struct link_checker_selector< GC, Node, opt::always_check_link > + { + typedef link_checker type; + }; + //@endcond + + /// Metafunction for selecting appropriate link checking policy + template < typename Node, opt::link_check_type LinkType > + struct get_link_checker + { + //@cond + typedef typename link_checker_selector< typename Node::gc, Node, LinkType>::type type; + //@endcond + }; + + /// \p LazyList internal statistics + template + struct stat { + typedef EventCounter event_counter; ///< Event counter type + + event_counter m_nInsertSuccess; ///< Number of success \p insert() operations + event_counter m_nInsertFailed; ///< Number of failed \p insert() operations + event_counter m_nInsertRetry; ///< Number of attempts to insert new item + event_counter m_nUpdateNew; ///< Number of new item inserted for \p update() + event_counter m_nUpdateExisting; ///< Number of existing item updates + event_counter m_nUpdateFailed; ///< Number of failed \p update() call + event_counter m_nUpdateRetry; ///< Number of attempts to \p update() the item + event_counter m_nUpdateMarked; ///< Number of attempts to \p update() logically deleted (marked) items + event_counter m_nEraseSuccess; ///< Number of successful \p erase(), \p unlink(), \p extract() operations + event_counter m_nEraseFailed; ///< Number of failed \p erase(), \p unlink(), \p extract() operations + event_counter m_nEraseRetry; ///< Number of attempts to \p erase() an item + event_counter m_nFindSuccess; ///< Number of successful \p find() and \p get() operations + event_counter m_nFindFailed; ///< Number of failed \p find() and \p get() operations + + event_counter m_nValidationSuccess; ///< Number of successful validating of search result + event_counter m_nValidationFailed; ///< Number of failed validating of search result + + //@cond + void onInsertSuccess() { ++m_nInsertSuccess; } + void onInsertFailed() { ++m_nInsertFailed; } + void onInsertRetry() { ++m_nInsertRetry; } + void onUpdateNew() { ++m_nUpdateNew; } + void onUpdateExisting() { ++m_nUpdateExisting; } + void onUpdateFailed() { ++m_nUpdateFailed; } + void onUpdateRetry() { ++m_nUpdateRetry; } + void onUpdateMarked() { ++m_nUpdateMarked; } + void onEraseSuccess() { ++m_nEraseSuccess; } + void onEraseFailed() { ++m_nEraseFailed; } + void onEraseRetry() { ++m_nEraseRetry; } + void onFindSuccess() { ++m_nFindSuccess; } + void onFindFailed() { ++m_nFindFailed; } + + void onValidationSuccess() { ++m_nValidationSuccess; } + void onValidationFailed() { ++m_nValidationFailed; } + //@endcond + }; + + /// \p LazyList empty internal statistics + struct empty_stat { + //@cond + void onInsertSuccess() const {} + void onInsertFailed() const {} + void onInsertRetry() const {} + void onUpdateNew() const {} + void onUpdateExisting() const {} + void onUpdateFailed() const {} + void onUpdateRetry() const {} + void onUpdateMarked() const {} + void onEraseSuccess() const {} + void onEraseFailed() const {} + void onEraseRetry() const {} + void onFindSuccess() const {} + void onFindFailed() const {} + + void onValidationSuccess() const {} + void onValidationFailed() const {} + //@endcond + }; + + //@cond + template > + struct wrapped_stat { + typedef Stat stat_type; + + wrapped_stat( stat_type& st ) + : m_stat( st ) + {} + + void onInsertSuccess() { m_stat.onInsertSuccess(); } + void onInsertFailed() { m_stat.onInsertFailed(); } + void onInsertRetry() { m_stat.onInsertRetry(); } + void onUpdateNew() { m_stat.onUpdateNew(); } + void onUpdateExisting() { m_stat.onUpdateExisting(); } + void onUpdateFailed() { m_stat.onUpdateFailed(); } + void onUpdateRetry() { m_stat.onUpdateRetry(); } + void onUpdateMarked() { m_stat.onUpdateMarked(); } + void onEraseSuccess() { m_stat.onEraseSuccess(); } + void onEraseFailed() { m_stat.onEraseFailed(); } + void onEraseRetry() { m_stat.onEraseRetry(); } + void onFindSuccess() { m_stat.onFindSuccess(); } + void onFindFailed() { m_stat.onFindFailed(); } + + void onValidationSuccess() { m_stat.onValidationSuccess(); } + void onValidationFailed() { m_stat.onValidationFailed(); } + + stat_type& m_stat; + }; + //@endcond + + + /// LazyList traits + struct traits + { + /// Hook used + /** + Possible values are: \p lazy_list::base_hook, \p lazy_list::member_hook, \p lazy_list::traits_hook. + */ + typedef base_hook<> hook; + + /// Key comparing functor + /** + No default functor is provided. If the functor is not specified, the \p less is used. + */ + typedef opt::none compare; + + /// Specifies binary predicate used for comparing keys + /** + Default is \p std::less. + */ + typedef opt::none less; + + /// Specifies binary functor used for comparing keys for equality (for unordered list only) + /** + If \p equal_to option is not specified, \p compare is used, if \p compare is not specified, \p less is used, + if \p less is not specified, then \p std::equal_to is used. + */ + typedef opt::none equal_to; + + /// Specifies list ordering policy + /** + If \p sort is \p true, than list maintains items in sorted order, otherwise the list is unordered. + Default is \p true. + Note that if \p sort is \p false, than lookup operations scan entire list. + */ + static const bool sort = true; + + /// Back-off strategy + typedef cds::backoff::Default back_off; + + /// Disposer for removing items + typedef opt::v::empty_disposer disposer; + + /// Item counting feature; by default, disabled. Use \p cds::atomicity::item_counter or \p atomicity::cache_friendly_item_counter to enable item counting + typedef atomicity::empty_item_counter item_counter; + + /// Internal statistics + /** + By default, internal statistics is disabled (\p lazy_list::empty_stat). + Use \p lazy_list::stat to enable it. + */ + typedef empty_stat stat; + + /// Link fields checking feature + /** + Default is \p opt::debug_check_link + */ + static const opt::link_check_type link_checker = opt::debug_check_link; + + /// C++ memory ordering model + /** + Can be \p opt::v::relaxed_ordering (relaxed memory model, the default) + or \p opt::v::sequential_consistent (sequentially consisnent memory model). + */ + typedef opt::v::relaxed_ordering memory_model; + + /// RCU deadlock checking policy (only for \ref cds_intrusive_LazyList_rcu "RCU-based LazyList") + /** + List of available options see \p opt::rcu_check_deadlock + */ + typedef opt::v::rcu_throw_deadlock rcu_check_deadlock; + }; + + /// Metafunction converting option list to \p lazy_list::traits + /** + Supported \p Options are: + - \p opt::hook - hook used. Possible values are: \p lazy_list::base_hook, \p lazy_list::member_hook, \p lazy_list::traits_hook. + If the option is not specified, \p %lazy_list::base_hook and \p gc::HP is used. + - \p opt::compare - key comparison functor. No default functor is provided. + If the option is not specified, the \p opt::less is used. + - \p opt::less - specifies binary predicate used for key comparison. Default is \p std::less. + - \p opt::equal_to - specifies binary functor for comparing keys for equality. This option is applicable only for unordered list. + If \p equal_to is not specified, \p compare is used, \p compare is not specified, \p less is used. + - \p opt::sort - specifies ordering policy. Default value is \p true, i.e. the list is ordered. + Note: unordering feature is not fully supported yet. + - \p opt::back_off - back-off strategy used. If the option is not specified, the \p cds::backoff::Default is used. + - \p opt::disposer - the functor used for dispose removed items. Default is \p opt::v::empty_disposer. Due the nature + of GC schema the disposer may be called asynchronously. + - \p opt::link_checker - the type of node's link fields checking. Default is \p opt::debug_check_link + - \p opt::item_counter - the type of item counting feature. Default is disabled (\p atomicity::empty_item_counter). + To enable item counting use \p atomicity::item_counter or \p atomicity::cache_friendly_item_counter + - \p opt::stat - internal statistics. By default, it is disabled (\p lazy_list::empty_stat). + To enable it use \p lazy_list::stat + - \p opt::memory_model - C++ memory ordering model. Can be \p opt::v::relaxed_ordering (relaxed memory model, the default) + or \p opt::v::sequential_consistent (sequentially consisnent memory model). + - \p opt::rcu_check_deadlock - a deadlock checking policy for \ref cds_intrusive_MichaelList_rcu "RCU-based MichaelList" + Default is \p opt::v::rcu_throw_deadlock + */ + template + struct make_traits { +# ifdef CDS_DOXYGEN_INVOKED + typedef implementation_defined type ; ///< Metafunction result +# else + typedef typename cds::opt::make_options< + typename cds::opt::find_type_traits< traits, Options... >::type + ,Options... + >::type type; +# endif + }; + + //@cond + template + struct select_stat_wrapper + { + typedef Stat stat; + typedef lazy_list::wrapped_stat wrapped_stat; + enum { + empty = false + }; + }; + + template <> + struct select_stat_wrapper< empty_stat > + { + typedef empty_stat stat; + typedef empty_stat wrapped_stat; + enum { + empty = true + }; + }; + + template + struct select_stat_wrapper< lazy_list::wrapped_stat>: public select_stat_wrapper< Stat > + {}; + //@endcond + + } // namespace lazy_list + + //@cond + // Forward declaration + template < class GC, typename T, class Traits = lazy_list::traits > + class LazyList; + //@endcond + + //@cond + template + struct is_lazy_list { + enum { + value = false + }; + }; + + template + struct is_lazy_list< LazyList< GC, T, Traits >> { + enum { + value = true + }; + }; + //@endcond + +}} // namespace cds::intrusive + +#endif // #ifndef CDSLIB_INTRUSIVE_DETAILS_LAZY_LIST_BASE_H diff --git a/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/intrusive/details/michael_list_base.h b/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/intrusive/details/michael_list_base.h new file mode 100644 index 0000000..dcc96aa --- /dev/null +++ b/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/intrusive/details/michael_list_base.h @@ -0,0 +1,439 @@ +/* + This file is a part of libcds - Concurrent Data Structures library + + (C) Copyright Maxim Khizhinsky (libcds.dev@gmail.com) 2006-2017 + + Source code repo: http://github.com/khizmax/libcds/ + Download: http://sourceforge.net/projects/libcds/files/ + + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. + + * Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ + +#ifndef CDSLIB_INTRUSIVE_DETAILS_MICHAEL_LIST_BASE_H +#define CDSLIB_INTRUSIVE_DETAILS_MICHAEL_LIST_BASE_H + +#include +#include +#include +#include +#include +#include + +namespace cds { namespace intrusive { + + /// MichaelList ordered list related definitions + /** @ingroup cds_intrusive_helper + */ + namespace michael_list { + /// Michael's list node + /** + Template parameters: + - \p GC - garbage collector + - \p Tag - a \ref cds_intrusive_hook_tag "tag" + */ + template + struct node + { + typedef GC gc ; ///< Garbage collector + typedef Tag tag ; ///< tag + + typedef cds::details::marked_ptr marked_ptr; ///< marked pointer + typedef typename gc::template atomic_marked_ptr atomic_marked_ptr; ///< atomic marked pointer specific for GC + + atomic_marked_ptr m_pNext ; ///< pointer to the next node in the container + + constexpr node() noexcept + : m_pNext( nullptr ) + {} + }; + + //@cond + template + struct node_cleaner { + void operator()( Node * p ) + { + typedef typename Node::marked_ptr marked_ptr; + p->m_pNext.store( marked_ptr(), MemoryModel::memory_order_release ); + } + }; + //@endcond + + //@cond + struct undefined_gc; + struct default_hook { + typedef undefined_gc gc; + typedef opt::none tag; + }; + //@endcond + + //@cond + template < typename HookType, typename... Options> + struct hook + { + typedef typename opt::make_options< default_hook, Options...>::type options; + typedef typename options::gc gc; + typedef typename options::tag tag; + typedef node node_type; + typedef HookType hook_type; + }; + //@endcond + + /// Base hook + /** + \p Options are: + - opt::gc - garbage collector used. + - opt::tag - a \ref cds_intrusive_hook_tag "tag" + */ + template < typename... Options > + struct base_hook: public hook< opt::base_hook_tag, Options... > + {}; + + /// Member hook + /** + \p MemberOffset defines offset in bytes of \ref node member into your structure. + Use \p offsetof macro to define \p MemberOffset + + \p Options are: + - opt::gc - garbage collector used. + - opt::tag - a \ref cds_intrusive_hook_tag "tag" + */ + template < size_t MemberOffset, typename... Options > + struct member_hook: public hook< opt::member_hook_tag, Options... > + { + //@cond + static const size_t c_nMemberOffset = MemberOffset; + //@endcond + }; + + /// Traits hook + /** + \p NodeTraits defines type traits for node. + See \ref node_traits for \p NodeTraits interface description + + \p Options are: + - opt::gc - garbage collector used. + - opt::tag - a \ref cds_intrusive_hook_tag "tag" + */ + template + struct traits_hook: public hook< opt::traits_hook_tag, Options... > + { + //@cond + typedef NodeTraits node_traits; + //@endcond + }; + + /// Checks link + template + struct link_checker + { + //@cond + typedef Node node_type; + //@endcond + + /// Checks if the link field of node \p pNode is \p nullptr + /** + An asserting is generated if \p pNode link field is not \p nullptr + */ + static void is_empty( const node_type * pNode ) + { + assert( pNode->m_pNext.load( atomics::memory_order_relaxed ) == nullptr ); + CDS_UNUSED( pNode ); + } + }; + + //@cond + template + struct link_checker_selector; + + template + struct link_checker_selector< GC, Node, opt::never_check_link > + { + typedef intrusive::opt::v::empty_link_checker type; + }; + + template + struct link_checker_selector< GC, Node, opt::debug_check_link > + { +# ifdef _DEBUG + typedef link_checker type; +# else + typedef intrusive::opt::v::empty_link_checker type; +# endif + }; + + template + struct link_checker_selector< GC, Node, opt::always_check_link > + { + typedef link_checker type; + }; + //@endcond + + /// Metafunction for selecting appropriate link checking policy + template < typename Node, opt::link_check_type LinkType > + struct get_link_checker + { + //@cond + typedef typename link_checker_selector< typename Node::gc, Node, LinkType>::type type; + //@endcond + }; + + + /// \p MichaelList internal statistics + template + struct stat { + typedef EventCounter event_counter; ///< Event counter type + + event_counter m_nInsertSuccess; ///< Number of success \p insert() operations + event_counter m_nInsertFailed; ///< Number of failed \p insert() operations + event_counter m_nInsertRetry; ///< Number of attempts to insert new item + event_counter m_nUpdateNew; ///< Number of new item inserted for \p update() + event_counter m_nUpdateExisting; ///< Number of existing item updates + event_counter m_nUpdateFailed; ///< Number of failed \p update() call + event_counter m_nUpdateRetry; ///< Number of attempts to \p update() the item + event_counter m_nUpdateMarked; ///< Number of attempts to \p update() logically deleted (marked) items + event_counter m_nEraseSuccess; ///< Number of successful \p erase(), \p unlink(), \p extract() operations + event_counter m_nEraseFailed; ///< Number of failed \p erase(), \p unlink(), \p extract() operations + event_counter m_nEraseRetry; ///< Number of attempts to \p erase() an item + event_counter m_nFindSuccess; ///< Number of successful \p find() and \p get() operations + event_counter m_nFindFailed; ///< Number of failed \p find() and \p get() operations + + event_counter m_nHelpingSuccess; ///< Number of successful help attempts to remove marked item during searching + event_counter m_nHelpingFailed; ///< Number if failed help attempts to remove marked item during searching + + //@cond + void onInsertSuccess() { ++m_nInsertSuccess; } + void onInsertFailed() { ++m_nInsertFailed; } + void onInsertRetry() { ++m_nInsertRetry; } + void onUpdateNew() { ++m_nUpdateNew; } + void onUpdateExisting() { ++m_nUpdateExisting; } + void onUpdateFailed() { ++m_nUpdateFailed; } + void onUpdateRetry() { ++m_nUpdateRetry; } + void onUpdateMarked() { ++m_nUpdateMarked; } + void onEraseSuccess() { ++m_nEraseSuccess; } + void onEraseFailed() { ++m_nEraseFailed; } + void onEraseRetry() { ++m_nEraseRetry; } + void onFindSuccess() { ++m_nFindSuccess; } + void onFindFailed() { ++m_nFindFailed; } + + void onHelpingSuccess() { ++m_nHelpingSuccess; } + void onHelpingFailed() { ++m_nHelpingFailed; } + //@endcond + }; + + /// \p MichaelList empty internal statistics + struct empty_stat { + //@cond + void onInsertSuccess() const {} + void onInsertFailed() const {} + void onInsertRetry() const {} + void onUpdateNew() const {} + void onUpdateExisting() const {} + void onUpdateFailed() const {} + void onUpdateRetry() const {} + void onUpdateMarked() const {} + void onEraseSuccess() const {} + void onEraseFailed() const {} + void onEraseRetry() const {} + void onFindSuccess() const {} + void onFindFailed() const {} + + void onHelpingSuccess() const {} + void onHelpingFailed() const {} + //@endcond + }; + + //@cond + template > + struct wrapped_stat { + typedef Stat stat_type; + + wrapped_stat( stat_type& st ) + : m_stat( st ) + {} + + void onInsertSuccess() { m_stat.onInsertSuccess(); } + void onInsertFailed() { m_stat.onInsertFailed(); } + void onInsertRetry() { m_stat.onInsertRetry(); } + void onUpdateNew() { m_stat.onUpdateNew(); } + void onUpdateExisting() { m_stat.onUpdateExisting(); } + void onUpdateFailed() { m_stat.onUpdateFailed(); } + void onUpdateRetry() { m_stat.onUpdateRetry(); } + void onUpdateMarked() { m_stat.onUpdateMarked(); } + void onEraseSuccess() { m_stat.onEraseSuccess(); } + void onEraseFailed() { m_stat.onEraseFailed(); } + void onEraseRetry() { m_stat.onEraseRetry(); } + void onFindSuccess() { m_stat.onFindSuccess(); } + void onFindFailed() { m_stat.onFindFailed(); } + + void onHelpingSuccess() { m_stat.onHelpingSuccess(); } + void onHelpingFailed() { m_stat.onHelpingFailed(); } + + stat_type& m_stat; + }; + //@endcond + + /// MichaelList traits + struct traits + { + /// Hook used + /** + Possible values are: \p michael_list::base_hook, \p michael_list::member_hook, \p michael_list::traits_hook. + */ + typedef base_hook<> hook; + + /// Key comparison functor + /** + No default functor is provided. If the option is not specified, the \p less is used. + */ + typedef opt::none compare; + + /// Specifies binary predicate used for key compare. + /** + Default is \p std::less. + */ + typedef opt::none less; + + /// Back-off strategy + typedef cds::backoff::Default back_off; + + /// Disposer for removing items + typedef opt::v::empty_disposer disposer; + + /// Item counting feature; by default, disabled. Use \p cds::atomicity::item_counter or \p atomicity::cache_friendly_item_counter to enable item counting + typedef atomicity::empty_item_counter item_counter; + + /// Internal statistics + /** + By default, internal statistics is disabled (\p michael_list::empty_stat). + Use \p michael_list::stat to enable it. + */ + typedef empty_stat stat; + + /// Link fields checking feature + /** + Default is \p opt::debug_check_link + */ + static const opt::link_check_type link_checker = opt::debug_check_link; + + /// C++ memory ordering model + /** + Can be \p opt::v::relaxed_ordering (relaxed memory model, the default) + or \p opt::v::sequential_consistent (sequentially consisnent memory model). + */ + typedef opt::v::relaxed_ordering memory_model; + + /// RCU deadlock checking policy (only for \ref cds_intrusive_MichaelList_rcu "RCU-based MichaelList") + /** + List of available policy see \p opt::rcu_check_deadlock + */ + typedef opt::v::rcu_throw_deadlock rcu_check_deadlock; + }; + + /// Metafunction converting option list to \p michael_list::traits + /** + Supported \p Options are: + - \p opt::hook - hook used. Possible values are: \p michael_list::base_hook, \p michael_list::member_hook, \p michael_list::traits_hook. + If the option is not specified, \p %michael_list::base_hook<> and \p gc::HP is used. + - \p opt::compare - key comparison functor. No default functor is provided. + If the option is not specified, the \p opt::less is used. + - \p opt::less - specifies binary predicate used for key comparison. Default is \p std::less. + - \p opt::back_off - back-off strategy used. If the option is not specified, the \p cds::backoff::Default is used. + - \p opt::disposer - the functor used for disposing removed items. Default is \p opt::v::empty_disposer. Due the nature + of GC schema the disposer may be called asynchronously. + - \p opt::link_checker - the type of node's link fields checking. Default is \p opt::debug_check_link + - \p opt::item_counter - the type of item counting feature. Default is disabled (\p atomicity::empty_item_counter). + To enable item counting use \p atomicity::item_counter or \p atomicity::cache_friendly_item_counter + - \p opt::stat - internal statistics. By default, it is disabled (\p michael_list::empty_stat). + To enable it use \p michael_list::stat + - \p opt::memory_model - C++ memory ordering model. Can be \p opt::v::relaxed_ordering (relaxed memory model, the default) + or \p opt::v::sequential_consistent (sequentially consistent memory model). + - \p opt::rcu_check_deadlock - a deadlock checking policy for \ref cds_intrusive_MichaelList_rcu "RCU-based MichaelList" + Default is \p opt::v::rcu_throw_deadlock + */ + template + struct make_traits { +# ifdef CDS_DOXYGEN_INVOKED + typedef implementation_defined type ; ///< Metafunction result +# else + typedef typename cds::opt::make_options< + typename cds::opt::find_type_traits< traits, Options... >::type + ,Options... + >::type type; +# endif + }; + + + //@cond + template + struct select_stat_wrapper + { + typedef Stat stat; + typedef michael_list::wrapped_stat wrapped_stat; + enum { + empty = false + }; + }; + + template <> + struct select_stat_wrapper< empty_stat > + { + typedef empty_stat stat; + typedef empty_stat wrapped_stat; + enum { + empty = true + }; + }; + + template + struct select_stat_wrapper< michael_list::wrapped_stat>: public select_stat_wrapper< Stat > + {}; + + //@endcond + + } // namespace michael_list + + //@cond + // Forward declaration + template < class GC, typename T, class Traits = michael_list::traits > + class MichaelList; + //@endcond + + + //@cond + template + struct is_michael_list { + enum { + value = false + }; + }; + + template + struct is_michael_list< MichaelList< GC, T, Traits >> { + enum { + value = true + }; + }; + //@endcond + +}} // namespace cds::intrusive + +#endif // #ifndef CDSLIB_INTRUSIVE_DETAILS_MICHAEL_LIST_BASE_H diff --git a/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/intrusive/details/michael_set_base.h b/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/intrusive/details/michael_set_base.h new file mode 100644 index 0000000..8e63cf3 --- /dev/null +++ b/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/intrusive/details/michael_set_base.h @@ -0,0 +1,236 @@ +/* + This file is a part of libcds - Concurrent Data Structures library + + (C) Copyright Maxim Khizhinsky (libcds.dev@gmail.com) 2006-2017 + + Source code repo: http://github.com/khizmax/libcds/ + Download: http://sourceforge.net/projects/libcds/files/ + + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. + + * Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ + +#ifndef CDSLIB_INTRUSIVE_DETAILS_MICHAEL_SET_BASE_H +#define CDSLIB_INTRUSIVE_DETAILS_MICHAEL_SET_BASE_H + +#include +#include +#include +#include +#include + +namespace cds { namespace intrusive { + + /// MichaelHashSet related definitions + /** @ingroup cds_intrusive_helper + */ + namespace michael_set { + /// MichaelHashSet traits + struct traits { + /// Hash function + /** + Hash function converts the key fields of struct \p T stored in the hash-set + into value of type \p size_t called hash value that is an index of hash table. + + This is mandatory type and has no predefined one. + */ + typedef opt::none hash; + + /// Item counter + /** + The item counting is an important part of \p MichaelHashSet algorithm: + the \p empty() member function depends on correct item counting. + You may use \p atomicity::empty_item_counter if don't need \p empty() and \p size() + member functions. + + Default is \p atomicity::item_counter; to avoid false sharing you may use \p atomicity::cache_friendly_item_counter + */ + typedef cds::atomicity::item_counter item_counter; + + /// Bucket table allocator + /** + Allocator for bucket table. Default is \ref CDS_DEFAULT_ALLOCATOR + The allocator uses only in constructor for allocating bucket table + and in destructor for destroying bucket table + */ + typedef CDS_DEFAULT_ALLOCATOR allocator; + }; + + /// Metafunction converting option list to traits struct + /** + Available \p Options: + - \p opt::hash - mandatory option, specifies hash functor. + - \p opt::item_counter - optional, specifies item counting policy. See \p traits::item_counter + for default type. + - \p opt::allocator - optional, bucket table allocator. Default is \ref CDS_DEFAULT_ALLOCATOR. + */ + template + struct make_traits { + typedef typename cds::opt::make_options< traits, Options...>::type type; ///< Metafunction result + }; + + //@cond + namespace details { + static inline size_t init_hash_bitmask( size_t nMaxItemCount, size_t nLoadFactor ) + { + if ( nLoadFactor == 0 ) + nLoadFactor = 1; + if ( nMaxItemCount == 0 ) + nMaxItemCount = 4; + const size_t nBucketCount = nMaxItemCount / nLoadFactor; + const size_t exp2 = size_t( 1 ) << cds::bitop::MSB( nBucketCount ); + + return ( exp2 < nBucketCount ? exp2 * 2 : exp2 ) - 1; + } + + template + struct list_iterator_selector; + + template + struct list_iterator_selector< OrderedList, false> + { + typedef OrderedList * bucket_ptr; + typedef typename OrderedList::iterator type; + }; + + template + struct list_iterator_selector< OrderedList, true> + { + typedef OrderedList const * bucket_ptr; + typedef typename OrderedList::const_iterator type; + }; + + template + class iterator + { + friend class iterator< OrderedList, !IsConst >; + + protected: + typedef OrderedList bucket_type; + typedef typename list_iterator_selector< bucket_type, IsConst>::bucket_ptr bucket_ptr; + typedef typename list_iterator_selector< bucket_type, IsConst>::type list_iterator; + + bucket_ptr m_pCurBucket; + list_iterator m_itList; + bucket_ptr m_pEndBucket; + + void next() + { + if ( m_pCurBucket < m_pEndBucket ) { + if ( ++m_itList != m_pCurBucket->end()) + return; + while ( ++m_pCurBucket < m_pEndBucket ) { + m_itList = m_pCurBucket->begin(); + if ( m_itList != m_pCurBucket->end()) + return; + } + } + m_pCurBucket = m_pEndBucket - 1; + m_itList = m_pCurBucket->end(); + } + + public: + typedef typename list_iterator::value_ptr value_ptr; + typedef typename list_iterator::value_ref value_ref; + + public: + iterator() + : m_pCurBucket( nullptr ) + , m_itList() + , m_pEndBucket( nullptr ) + {} + + iterator( list_iterator const& it, bucket_ptr pFirst, bucket_ptr pLast ) + : m_pCurBucket( pFirst ) + , m_itList( it ) + , m_pEndBucket( pLast ) + { + if ( it == pFirst->end()) + next(); + } + + iterator( iterator const& src ) + : m_pCurBucket( src.m_pCurBucket ) + , m_itList( src.m_itList ) + , m_pEndBucket( src.m_pEndBucket ) + {} + + value_ptr operator ->() const + { + assert( m_pCurBucket != nullptr ); + return m_itList.operator ->(); + } + + value_ref operator *() const + { + assert( m_pCurBucket != nullptr ); + return m_itList.operator *(); + } + + /// Pre-increment + iterator& operator ++() + { + next(); + return *this; + } + + iterator& operator = (const iterator& src) + { + m_pCurBucket = src.m_pCurBucket; + m_pEndBucket = src.m_pEndBucket; + m_itList = src.m_itList; + return *this; + } + + bucket_ptr bucket() const + { + return m_pCurBucket != m_pEndBucket ? m_pCurBucket : nullptr; + } + + list_iterator const& underlying_iterator() const + { + return m_itList; + } + + template + bool operator ==(iterator const& i) const + { + return m_pCurBucket == i.m_pCurBucket && m_itList == i.m_itList; + } + template + bool operator !=(iterator const& i ) const + { + return !( *this == i ); + } + }; + } + //@endcond + } // namespace michael_set + + //@cond + // Forward declarations + template + class MichaelHashSet; + //@endcond + +}} // namespace cds::intrusive + +#endif // #ifndef CDSLIB_INTRUSIVE_DETAILS_MICHAEL_SET_BASE_H diff --git a/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/intrusive/details/node_traits.h b/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/intrusive/details/node_traits.h new file mode 100644 index 0000000..958ee6a --- /dev/null +++ b/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/intrusive/details/node_traits.h @@ -0,0 +1,195 @@ +/* + This file is a part of libcds - Concurrent Data Structures library + + (C) Copyright Maxim Khizhinsky (libcds.dev@gmail.com) 2006-2017 + + Source code repo: http://github.com/khizmax/libcds/ + Download: http://sourceforge.net/projects/libcds/files/ + + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. + + * Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ + +#ifndef CDSLIB_INTRUSIVE_DETAILS_NODE_TRAITS_H +#define CDSLIB_INTRUSIVE_DETAILS_NODE_TRAITS_H + +#include + +namespace cds { namespace intrusive { + +#ifdef CDS_DOXYGEN_INVOKED + /// Container's node traits + /** @ingroup cds_intrusive_helper + This traits is intended for converting between type \p T of value stored in the intrusive container + and container's node type \p NodeType. + + There are separate specializations for each \p Hook type. + */ + template + struct node_traits + { + typedef T value_type ; ///< Value type + typedef NodeType node_type ; ///< Node type + + /// Convert value reference to node pointer + static node_type * to_node_ptr( value_type& v ); + + /// Convert value pointer to node pointer + static node_type * to_node_ptr( value_type * v ); + + /// Convert value reference to node pointer (const version) + static const node_type * to_node_ptr( value_type const& v ); + + /// Convert value pointer to node pointer (const version) + static const node_type * to_node_ptr( value_type const * v ); + + /// Convert node refernce to value pointer + static value_type * to_value_ptr( node_type& n ); + + /// Convert node pointer to value pointer + static value_type * to_value_ptr( node_type * n ); + + /// Convert node reference to value pointer (const version) + static const value_type * to_value_ptr( node_type const & n ); + + /// Convert node pointer to value pointer (const version) + static const value_type * to_value_ptr( node_type const * n ); + }; + +#else + template + struct node_traits; +#endif + + //@cond + template + struct node_traits + { + typedef T value_type; + typedef NodeType node_type; + + static node_type * to_node_ptr( value_type& v ) + { + return static_cast( &v ); + } + static node_type * to_node_ptr( value_type * v ) + { + return v ? static_cast(v) : nullptr; + } + static const node_type * to_node_ptr( const value_type& v ) + { + return static_cast( &v ); + } + static const node_type * to_node_ptr( const value_type * v ) + { + return v ? static_cast(v) : nullptr; + } + static value_type * to_value_ptr( node_type& n ) + { + return static_cast( &n ); + } + static value_type * to_value_ptr( node_type * n ) + { + return n ? static_cast(n) : nullptr; + } + static const value_type * to_value_ptr( const node_type& n ) + { + return static_cast( &n ); + } + static const value_type * to_value_ptr( const node_type * n ) + { + return n ? static_cast(n) : nullptr; + } + }; + + template + struct node_traits + { + typedef T value_type; + typedef NodeType node_type; + + static node_type * to_node_ptr( value_type& v ) + { + return reinterpret_cast( reinterpret_cast(&v) + Hook::c_nMemberOffset ); + } + static node_type * to_node_ptr( value_type * v ) + { + return v ? to_node_ptr( *v ) : nullptr; + } + static const node_type * to_node_ptr( const value_type& v ) + { + return reinterpret_cast( reinterpret_cast(&v) + Hook::c_nMemberOffset ); + } + static const node_type * to_node_ptr( const value_type * v ) + { + return v ? to_node_ptr( *v ) : nullptr; + } + static value_type * to_value_ptr( node_type& n ) + { + return reinterpret_cast( reinterpret_cast(&n) - Hook::c_nMemberOffset ); + } + static value_type * to_value_ptr( node_type * n ) + { + return n ? to_value_ptr( *n ) : nullptr; + } + static const value_type * to_value_ptr( const node_type& n ) + { + return reinterpret_cast( reinterpret_cast(&n) - Hook::c_nMemberOffset ); + } + static const value_type * to_value_ptr( const node_type * n ) + { + return n ? to_value_ptr( *n ) : nullptr; + } + }; + + template + struct node_traits: public Hook::node_traits + {}; + //@endcond + + /// Node traits selector metafunction + /** @ingroup cds_intrusive_helper + The metafunction selects appropriate \ref node_traits specialization based on value type \p T, node type \p NodeType, and hook type \p Hook. + */ + template + struct get_node_traits + { + //@cond + typedef node_traits type; + //@endcond + }; + + //@cond + /// Functor converting container's node type to value type + //TODO: delete + template + struct node_to_value { + typename Container::value_type * operator()( typename Container::node_type * p ) const + { + typedef typename Container::node_traits node_traits; + return node_traits::to_value_ptr( p ); + } + }; + //@endcond + +}} // namespace cds::intrusuve + +#endif // #ifndef CDSLIB_INTRUSIVE_DETAILS_NODE_TRAITS_H diff --git a/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/intrusive/details/raw_ptr_disposer.h b/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/intrusive/details/raw_ptr_disposer.h new file mode 100644 index 0000000..eda2da5 --- /dev/null +++ b/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/intrusive/details/raw_ptr_disposer.h @@ -0,0 +1,103 @@ +/* + This file is a part of libcds - Concurrent Data Structures library + + (C) Copyright Maxim Khizhinsky (libcds.dev@gmail.com) 2006-2017 + + Source code repo: http://github.com/khizmax/libcds/ + Download: http://sourceforge.net/projects/libcds/files/ + + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. + + * Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ + +#ifndef CDSLIB_INTRUSIVE_DETAILS_RAW_PTR_DISPOSER_H +#define CDSLIB_INTRUSIVE_DETAILS_RAW_PTR_DISPOSER_H + +#include + +//@cond +namespace cds { namespace intrusive { namespace details { + + template + struct raw_ptr_disposer + { + typedef RCU gc; + typedef NodeType node_type; + typedef Disposer disposer; + + node_type * pReclaimedChain; + + raw_ptr_disposer() + : pReclaimedChain( nullptr ) + {} + + template + explicit raw_ptr_disposer( Position& pos ) + : pReclaimedChain( pos.pDelChain ) + { + pos.pDelChain = nullptr; + } + + raw_ptr_disposer( raw_ptr_disposer&& d ) + : pReclaimedChain( d.pReclaimedChain ) + { + d.pReclaimedChain = nullptr; + } + + raw_ptr_disposer( raw_ptr_disposer const& ) = delete; + + ~raw_ptr_disposer() + { + apply(); + } + + raw_ptr_disposer& combine(raw_ptr_disposer&& d) + { + if ( pReclaimedChain == nullptr ) + pReclaimedChain = d.pReclaimedChain; + else if ( d.pReclaimedChain ) { + // union reclaimed chains + node_type * pEnd = d.pReclaimedChain; + for ( ; pEnd->m_pDelChain; pEnd = pEnd->m_pDelChain ); + pEnd->m_pDelChain = pReclaimedChain; + pReclaimedChain = d.pReclaimedChain; + } + d.pReclaimedChain = nullptr; + return *this; + } + + raw_ptr_disposer& operator=(raw_ptr_disposer const& d) = delete; + raw_ptr_disposer& operator=( raw_ptr_disposer&& d ) = delete; + + void apply() + { + if ( pReclaimedChain ) { + assert( !gc::is_locked()); + disposer()( pReclaimedChain ); + pReclaimedChain = nullptr; + } + } + }; + +}}} // namespace cds::intrusive::details +//@endcond + +#endif // #ifndef CDSLIB_INTRUSIVE_DETAILS_RAW_PTR_DISPOSER_H diff --git a/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/intrusive/details/single_link_struct.h b/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/intrusive/details/single_link_struct.h new file mode 100644 index 0000000..014246f --- /dev/null +++ b/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/intrusive/details/single_link_struct.h @@ -0,0 +1,196 @@ +/* + This file is a part of libcds - Concurrent Data Structures library + + (C) Copyright Maxim Khizhinsky (libcds.dev@gmail.com) 2006-2017 + + Source code repo: http://github.com/khizmax/libcds/ + Download: http://sourceforge.net/projects/libcds/files/ + + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. + + * Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ + +#ifndef CDSLIB_INTRUSIVE_DETAILS_SINGLE_LINK_STRUCT_H +#define CDSLIB_INTRUSIVE_DETAILS_SINGLE_LINK_STRUCT_H + +#include +#include +#include + +namespace cds { namespace intrusive { + + /// Definitions common for single-linked data structures + /** @ingroup cds_intrusive_helper + */ + namespace single_link { + + /// Container's node + /** + Template parameters: + - GC - garbage collector used + - Tag - a tag used to distinguish between different implementation + */ + template + struct node + { + typedef GC gc ; ///< Garbage collector + typedef Tag tag ; ///< tag + + typedef typename gc::template atomic_ref atomic_node_ptr; ///< atomic pointer + + /// Rebind node for other template parameters + template + struct rebind { + typedef node other ; ///< Rebinding result + }; + + atomic_node_ptr m_pNext ; ///< pointer to the next node in the container + + node() noexcept + { + m_pNext.store( nullptr, atomics::memory_order_release ); + } + }; + + //@cond + struct default_hook { + typedef cds::gc::default_gc gc; + typedef opt::none tag; + }; + //@endcond + + //@cond + template < typename HookType, typename... Options> + struct hook + { + typedef typename opt::make_options< default_hook, Options...>::type options; + typedef typename options::gc gc; + typedef typename options::tag tag; + typedef node node_type; + typedef HookType hook_type; + }; + //@endcond + + /// Base hook + /** + \p Options are: + - opt::gc - garbage collector used. + - opt::tag - tag + */ + template < typename... Options > + struct base_hook: public hook< opt::base_hook_tag, Options... > + {}; + + /// Member hook + /** + \p MemberOffset defines offset in bytes of \ref node member into your structure. + Use \p offsetof macro to define \p MemberOffset + + \p Options are: + - opt::gc - garbage collector used. + - opt::tag - tag + */ + template < size_t MemberOffset, typename... Options > + struct member_hook: public hook< opt::member_hook_tag, Options... > + { + //@cond + static const size_t c_nMemberOffset = MemberOffset; + //@endcond + }; + + /// Traits hook + /** + \p NodeTraits defines type traits for node. + See \ref node_traits for \p NodeTraits interface description + + \p Options are: + - opt::gc - garbage collector used. + - opt::tag - tag + */ + template + struct traits_hook: public hook< opt::traits_hook_tag, Options... > + { + //@cond + typedef NodeTraits node_traits; + //@endcond + }; + + /// Check link + template + struct link_checker { + //@cond + typedef Node node_type; + //@endcond + + /// Checks if the link field of node \p pNode is \p nullptr + /** + An asserting is generated if \p pNode link field is not \p nullptr + */ + static void is_empty( const node_type * pNode ) + { + assert( pNode->m_pNext.load( atomics::memory_order_relaxed ) == nullptr ); + CDS_UNUSED( pNode ); + } + }; + + //@cond + template + struct link_checker_selector; + + template + struct link_checker_selector< GC, Node, opt::never_check_link > + { + typedef intrusive::opt::v::empty_link_checker type; + }; + + template + struct link_checker_selector< GC, Node, opt::debug_check_link > + { +# ifdef _DEBUG + typedef link_checker type; +# else + typedef intrusive::opt::v::empty_link_checker type; +# endif + }; + + template + struct link_checker_selector< GC, Node, opt::always_check_link > + { + typedef link_checker type; + }; + //@endcond + + /// Metafunction for selecting appropriate link checking policy + template < typename Node, opt::link_check_type LinkType > + struct get_link_checker + { + //@cond + typedef typename link_checker_selector< typename Node::gc, Node, LinkType>::type type; + //@endcond + }; + + } // namespace single_link + +}} // namespace cds::intrusive + + + +#endif // #ifndef CDSLIB_INTRUSIVE_DETAILS_SINGLE_LINK_STRUCT_H diff --git a/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/intrusive/details/skip_list_base.h b/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/intrusive/details/skip_list_base.h new file mode 100644 index 0000000..89371d5 --- /dev/null +++ b/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/intrusive/details/skip_list_base.h @@ -0,0 +1,784 @@ +/* + This file is a part of libcds - Concurrent Data Structures library + + (C) Copyright Maxim Khizhinsky (libcds.dev@gmail.com) 2006-2017 + + Source code repo: http://github.com/khizmax/libcds/ + Download: http://sourceforge.net/projects/libcds/files/ + + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. + + * Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ + +#ifndef CDSLIB_INTRUSIVE_DETAILS_SKIP_LIST_BASE_H +#define CDSLIB_INTRUSIVE_DETAILS_SKIP_LIST_BASE_H + +#include +#include +#include +#include +#include + +namespace cds { namespace intrusive { + /// SkipListSet related definitions + /** @ingroup cds_intrusive_helper + */ + namespace skip_list { + /// The maximum possible height of any skip-list + static unsigned int const c_nHeightLimit = 32; + + /// Skip list node + /** + Template parameters: + - \p GC - garbage collector + - \p Tag - a \ref cds_intrusive_hook_tag "tag" + */ + template + class node + { + public: + typedef GC gc; ///< Garbage collector + typedef Tag tag; ///< tag + + typedef cds::details::marked_ptr marked_ptr; ///< marked pointer + typedef typename gc::template atomic_marked_ptr< marked_ptr> atomic_marked_ptr; ///< atomic marked pointer specific for GC + //@cond + typedef atomic_marked_ptr tower_item_type; + + //@endcond + + protected: + //@cond + atomic_marked_ptr m_pNext; ///< Next item in bottom-list (list at level 0) + unsigned int m_nHeight; ///< Node height (size of \p m_arrNext array). For node at level 0 the height is 1. + atomic_marked_ptr * m_arrNext; ///< Array of next items for levels 1 .. m_nHeight - 1. For node at level 0 \p m_arrNext is \p nullptr + atomics::atomic m_nUnlink; ///< Unlink helper + //@endcond + + public: + node() + : m_pNext( nullptr ) + , m_nHeight( 1 ) + , m_arrNext( nullptr ) + { + m_nUnlink.store( 1, atomics::memory_order_release ); + } + + + /// Constructs a node's tower of height \p nHeight + void make_tower( unsigned int nHeight, atomic_marked_ptr * nextTower ) + { + assert( nHeight > 0 ); + assert( (nHeight == 1 && nextTower == nullptr) // bottom-list node + || (nHeight > 1 && nextTower != nullptr) // node at level of more than 0 + ); + + m_arrNext = nextTower; + m_nHeight = nHeight; + m_nUnlink.store( nHeight, atomics::memory_order_release ); + } + + //@cond + atomic_marked_ptr * release_tower() + { + atomic_marked_ptr * pTower = m_arrNext; + m_arrNext = nullptr; + m_nHeight = 1; + return pTower; + } + + atomic_marked_ptr * get_tower() const + { + return m_arrNext; + } + + bool has_tower() const + { + return m_nHeight > 1; + } + //@endcond + + /// Access to element of next pointer array + atomic_marked_ptr& next( unsigned int nLevel ) + { + assert( nLevel < height()); + assert( nLevel == 0 || (nLevel > 0 && m_arrNext != nullptr)); + + if ( nLevel ) { + // TSan: data race between m_arrNext[ nLevel - 1 ] and make_tower() + // In fact, m_arrNext is a const array that is never changed + CDS_TSAN_ANNOTATE_HAPPENS_BEFORE( &m_arrNext[ nLevel - 1 ] ); + return m_arrNext[nLevel - 1]; + } + return m_pNext; + } + + /// Access to element of next pointer array (const version) + atomic_marked_ptr const& next( unsigned int nLevel ) const + { + assert( nLevel < height()); + assert( nLevel == 0 || nLevel > 0 && m_arrNext != nullptr ); + + if ( nLevel ) { + CDS_TSAN_ANNOTATE_HAPPENS_BEFORE( &m_arrNext[nLevel - 1] ); + return m_arrNext[nLevel - 1]; + } + return m_pNext; + } + + /// Access to element of next pointer array (synonym for \p next() function) + atomic_marked_ptr& operator[]( unsigned int nLevel ) + { + return next( nLevel ); + } + + /// Access to element of next pointer array (synonym for \p next() function) + atomic_marked_ptr const& operator[]( unsigned int nLevel ) const + { + return next( nLevel ); + } + + /// Height of the node + unsigned int height() const + { + return m_nHeight; + } + + /// Clears internal links + void clear() + { + assert( m_arrNext == nullptr ); + m_pNext.store( marked_ptr(), atomics::memory_order_release ); + } + + //@cond + bool is_cleared() const + { + return m_pNext == atomic_marked_ptr() + && m_arrNext == nullptr + && m_nHeight <= 1; + } + + bool level_unlinked( unsigned nCount = 1 ) + { + return m_nUnlink.fetch_sub( nCount, atomics::memory_order_relaxed ) == 1; + } + + bool is_upper_level( unsigned nLevel ) const + { + return m_nUnlink.load( atomics::memory_order_relaxed ) == nLevel + 1; + } + //@endcond + }; + + //@cond + struct undefined_gc; + struct default_hook { + typedef undefined_gc gc; + typedef opt::none tag; + }; + //@endcond + + //@cond + template < typename HookType, typename... Options> + struct hook + { + typedef typename opt::make_options< default_hook, Options...>::type options; + typedef typename options::gc gc; + typedef typename options::tag tag; + typedef node node_type; + typedef HookType hook_type; + }; + //@endcond + + /// Base hook + /** + \p Options are: + - \p opt::gc - garbage collector + - \p opt::tag - a \ref cds_intrusive_hook_tag "tag" + */ + template < typename... Options > + struct base_hook: public hook< opt::base_hook_tag, Options... > + {}; + + /// Member hook + /** + \p MemberOffset defines offset in bytes of \ref node member into your structure. + Use \p offsetof macro to define \p MemberOffset + + \p Options are: + - \p opt::gc - garbage collector + - \p opt::tag - a \ref cds_intrusive_hook_tag "tag" + */ + template < size_t MemberOffset, typename... Options > + struct member_hook: public hook< opt::member_hook_tag, Options... > + { + //@cond + static const size_t c_nMemberOffset = MemberOffset; + //@endcond + }; + + /// Traits hook + /** + \p NodeTraits defines type traits for node. + See \ref node_traits for \p NodeTraits interface description + + \p Options are: + - \p opt::gc - garbage collector + - \p opt::tag - a \ref cds_intrusive_hook_tag "tag" + */ + template + struct traits_hook: public hook< opt::traits_hook_tag, Options... > + { + //@cond + typedef NodeTraits node_traits; + //@endcond + }; + + /// Option specifying random level generator + /** + The random level generator is an important part of skip-list algorithm. + The node height in the skip-list have a probabilistic distribution + where half of the nodes that have level \p i pointers also have level i+1 pointers + (i = 0..30). + The random level generator should provide such distribution. + + The \p Type functor interface is: + \code + struct random_generator { + static unsigned int const c_nUpperBound = 32; + random_generator(); + unsigned int operator()(); + }; + \endcode + + where + - \p c_nUpperBound - constant that specifies the upper bound of random number generated. + The generator produces a number from range [0 .. c_nUpperBound) (upper bound excluded). + \p c_nUpperBound must be no more than 32. + - random_generator() - the constructor of generator object initialises the generator instance (its internal state). + - unsigned int operator()() - the main generating function. Returns random level from range [0 .. c_nUpperBound - 1] + + + Stateful generators are supported. + + Available \p Type implementations: + - \p skip_list::xor_shift + - \p skip_list::turbo + */ + template + struct random_level_generator { + //@cond + template + struct pack: public Base + { + typedef Type random_level_generator; + }; + //@endcond + }; + + /// Xor-shift random level generator + /** + The simplest of the generators described in George Marsaglia's "Xorshift RNGs" paper. + This is not a high-quality generator but is acceptable for skip-list. + + The random generator should return numbers from range [0 .. MaxHeight - 1]. + + From Doug Lea's ConcurrentSkipListMap.java. + */ + template + class xor_shift { + //@cond + atomics::atomic m_nSeed; + + static_assert( MaxHeight > 1, "MaxHeight" ); + static_assert( MaxHeight <= c_nHeightLimit, "MaxHeight is too large" ); + static unsigned int const c_nBitMask = (1u << ( MaxHeight - 1 )) - 1; + //@endcond + + public: + /// The upper bound of generator's return value. The generator produces random number in range [0..c_nUpperBound) + static unsigned int const c_nUpperBound = MaxHeight; + + /// Initializes the generator instance + xor_shift() + { + m_nSeed.store( (unsigned int) cds::OS::Timer::random_seed(), atomics::memory_order_relaxed ); + } + + /// Main generator function + unsigned int operator()() + { + /* ConcurrentSkipListMap.java + private int randomLevel() { + int x = randomSeed; + x ^= x << 13; + x ^= x >>> 17; + randomSeed = x ^= x << 5; + if ((x & 0x80000001) != 0) // test highest and lowest bits + return 0; + int level = 1; + while (((x >>>= 1) & 1) != 0) ++level; + return level; + } + */ + unsigned int x = m_nSeed.load( atomics::memory_order_relaxed ); + x ^= x << 13; + x ^= x >> 17; + x ^= x << 5; + m_nSeed.store( x, atomics::memory_order_relaxed ); + unsigned int nLevel = ((x & 0x00000001) != 0) ? 0 : cds::bitop::LSB( (~(x >> 1)) & c_nBitMask ); + + assert( nLevel < c_nUpperBound ); + return nLevel; + } + }; + + /// Xor-shift random level generator, max height 32 + typedef xor_shift xorshift32; + + //@cond + // For backward compatibility + typedef xorshift32 xorshift; + //@endcond + + /// \ref xor_shift generator, max height 24 + typedef xor_shift< 24 > xorshift24; + + /// \ref xor_shift generator, max height = 16 + typedef xor_shift< 16 > xorshift16; + + /// Turbo-pascal random level generator + /** + This uses a cheap pseudo-random function that was used in Turbo Pascal. + + The random generator should return numbers from range [0..31]. + + From Doug Lea's ConcurrentSkipListMap.java. + */ + template + class turbo + { + //@cond + atomics::atomic m_nSeed; + + static_assert( MaxHeight > 1, "MaxHeight" ); + static_assert( MaxHeight <= c_nHeightLimit, "MaxHeight is too large" ); + static unsigned int const c_nBitMask = (1u << ( MaxHeight - 1 )) - 1; + //@endcond + public: + /// The upper bound of generator's return value. The generator produces random number in range [0..c_nUpperBound) + static unsigned int const c_nUpperBound = MaxHeight; + + /// Initializes the generator instance + turbo() + { + m_nSeed.store( (unsigned int) cds::OS::Timer::random_seed(), atomics::memory_order_relaxed ); + } + + /// Main generator function + unsigned int operator()() + { + /* + private int randomLevel() { + int level = 0; + int r = randomSeed; + randomSeed = r * 134775813 + 1; + if (r < 0) { + while ((r <<= 1) > 0) + ++level; + } + return level; + } + */ + /* + The low bits are apparently not very random (the original used only + upper 16 bits) so we traverse from highest bit down (i.e., test + sign), thus hardly ever use lower bits. + */ + unsigned int x = m_nSeed.load( atomics::memory_order_relaxed ) * 134775813 + 1; + m_nSeed.store( x, atomics::memory_order_relaxed ); + unsigned int nLevel = ( x & 0x80000000 ) ? ( c_nUpperBound - 1 - cds::bitop::MSBnz( (x & c_nBitMask ) | 1 )) : 0; + + assert( nLevel < c_nUpperBound ); + return nLevel; + } + }; + + /// Turbo-Pascal random level generator, max height 32 + typedef turbo turbo32; + + //@cond + // For backward compatibility + typedef turbo32 turbo_pascal; + //@endcond + + /// Turbo-Pascal generator, max height 24 + typedef turbo< 24 > turbo24; + + /// Turbo-Pascal generator, max height 16 + typedef turbo< 16 > turbo16; + + /// \p SkipListSet internal statistics + template + struct stat { + typedef EventCounter event_counter ; ///< Event counter type + + event_counter m_nNodeHeightAdd[c_nHeightLimit] ; ///< Count of added node of each height + event_counter m_nNodeHeightDel[c_nHeightLimit] ; ///< Count of deleted node of each height + event_counter m_nInsertSuccess ; ///< Count of success insertion + event_counter m_nInsertFailed ; ///< Count of failed insertion + event_counter m_nInsertRetries ; ///< Count of unsuccessful retries of insertion + event_counter m_nUpdateExist ; ///< Count of \p update() call for existed node + event_counter m_nUpdateNew ; ///< Count of \p update() call for new node + event_counter m_nUnlinkSuccess ; ///< Count of successful call of \p unlink + event_counter m_nUnlinkFailed ; ///< Count of failed call of \p unlink + event_counter m_nEraseSuccess ; ///< Count of successful call of \p erase + event_counter m_nEraseFailed ; ///< Count of failed call of \p erase + event_counter m_nEraseRetry ; ///< Count of retries while erasing node + event_counter m_nFindFastSuccess ; ///< Count of successful call of \p find and all derivatives (via fast-path) + event_counter m_nFindFastFailed ; ///< Count of failed call of \p find and all derivatives (via fast-path) + event_counter m_nFindSlowSuccess ; ///< Count of successful call of \p find and all derivatives (via slow-path) + event_counter m_nFindSlowFailed ; ///< Count of failed call of \p find and all derivatives (via slow-path) + event_counter m_nRenewInsertPosition ; ///< Count of renewing position events while inserting + event_counter m_nLogicDeleteWhileInsert; ///< Count of events "The node has been logically deleted while inserting" + event_counter m_nRemoveWhileInsert ; ///< Count of evnts "The node is removing while inserting" + event_counter m_nFastErase ; ///< Fast erase event counter + event_counter m_nFastExtract ; ///< Fast extract event counter + event_counter m_nSlowErase ; ///< Slow erase event counter + event_counter m_nSlowExtract ; ///< Slow extract event counter + event_counter m_nExtractSuccess ; ///< Count of successful call of \p extract + event_counter m_nExtractFailed ; ///< Count of failed call of \p extract + event_counter m_nExtractRetries ; ///< Count of retries of \p extract call + event_counter m_nExtractMinSuccess ; ///< Count of successful call of \p extract_min + event_counter m_nExtractMinFailed ; ///< Count of failed call of \p extract_min + event_counter m_nExtractMinRetries ; ///< Count of retries of \p extract_min call + event_counter m_nExtractMaxSuccess ; ///< Count of successful call of \p extract_max + event_counter m_nExtractMaxFailed ; ///< Count of failed call of \p extract_max + event_counter m_nExtractMaxRetries ; ///< Count of retries of \p extract_max call + event_counter m_nEraseWhileFind ; ///< Count of erased item while searching + event_counter m_nExtractWhileFind ; ///< Count of extracted item while searching (RCU only) + event_counter m_nMarkFailed ; ///< Count of failed node marking (logical deletion mark) + event_counter m_nEraseContention ; ///< Count of key erasing contention encountered + + //@cond + void onAddNode( unsigned int nHeight ) + { + assert( nHeight > 0 && nHeight <= sizeof(m_nNodeHeightAdd) / sizeof(m_nNodeHeightAdd[0])); + ++m_nNodeHeightAdd[nHeight - 1]; + } + void onRemoveNode( unsigned int nHeight ) + { + assert( nHeight > 0 && nHeight <= sizeof(m_nNodeHeightDel) / sizeof(m_nNodeHeightDel[0])); + ++m_nNodeHeightDel[nHeight - 1]; + } + + void onInsertSuccess() { ++m_nInsertSuccess ; } + void onInsertFailed() { ++m_nInsertFailed ; } + void onInsertRetry() { ++m_nInsertRetries ; } + void onUpdateExist() { ++m_nUpdateExist ; } + void onUpdateNew() { ++m_nUpdateNew ; } + void onUnlinkSuccess() { ++m_nUnlinkSuccess ; } + void onUnlinkFailed() { ++m_nUnlinkFailed ; } + void onEraseSuccess() { ++m_nEraseSuccess ; } + void onEraseFailed() { ++m_nEraseFailed ; } + void onEraseRetry() { ++m_nEraseRetry; } + void onFindFastSuccess() { ++m_nFindFastSuccess ; } + void onFindFastFailed() { ++m_nFindFastFailed ; } + void onFindSlowSuccess() { ++m_nFindSlowSuccess ; } + void onFindSlowFailed() { ++m_nFindSlowFailed ; } + void onEraseWhileFind() { ++m_nEraseWhileFind ; } + void onExtractWhileFind() { ++m_nExtractWhileFind ; } + void onRenewInsertPosition() { ++m_nRenewInsertPosition; } + void onLogicDeleteWhileInsert() { ++m_nLogicDeleteWhileInsert; } + void onRemoveWhileInsert() { ++m_nRemoveWhileInsert; } + void onFastErase() { ++m_nFastErase; } + void onFastExtract() { ++m_nFastExtract; } + void onSlowErase() { ++m_nSlowErase; } + void onSlowExtract() { ++m_nSlowExtract; } + void onExtractSuccess() { ++m_nExtractSuccess; } + void onExtractFailed() { ++m_nExtractFailed; } + void onExtractRetry() { ++m_nExtractRetries; } + void onExtractMinSuccess() { ++m_nExtractMinSuccess; } + void onExtractMinFailed() { ++m_nExtractMinFailed; } + void onExtractMinRetry() { ++m_nExtractMinRetries; } + void onExtractMaxSuccess() { ++m_nExtractMaxSuccess; } + void onExtractMaxFailed() { ++m_nExtractMaxFailed; } + void onExtractMaxRetry() { ++m_nExtractMaxRetries; } + void onMarkFailed() { ++m_nMarkFailed; } + void onEraseContention() { ++m_nEraseContention; } + //@endcond + }; + + /// \p SkipListSet empty internal statistics + struct empty_stat { + //@cond + void onAddNode( unsigned int /*nHeight*/ ) const {} + void onRemoveNode( unsigned int /*nHeight*/ ) const {} + void onInsertSuccess() const {} + void onInsertFailed() const {} + void onInsertRetry() const {} + void onUpdateExist() const {} + void onUpdateNew() const {} + void onUnlinkSuccess() const {} + void onUnlinkFailed() const {} + void onEraseSuccess() const {} + void onEraseFailed() const {} + void onEraseRetry() const {} + void onFindFastSuccess() const {} + void onFindFastFailed() const {} + void onFindSlowSuccess() const {} + void onFindSlowFailed() const {} + void onEraseWhileFind() const {} + void onExtractWhileFind() const {} + void onRenewInsertPosition() const {} + void onLogicDeleteWhileInsert() const {} + void onRemoveWhileInsert() const {} + void onFastErase() const {} + void onFastExtract() const {} + void onSlowErase() const {} + void onSlowExtract() const {} + void onExtractSuccess() const {} + void onExtractFailed() const {} + void onExtractRetry() const {} + void onExtractMinSuccess() const {} + void onExtractMinFailed() const {} + void onExtractMinRetry() const {} + void onExtractMaxSuccess() const {} + void onExtractMaxFailed() const {} + void onExtractMaxRetry() const {} + void onMarkFailed() const {} + void onEraseContention() const {} + //@endcond + }; + + //@cond + // For internal use only!!! + template + struct internal_node_builder { + template + struct pack: public Base + { + typedef Type internal_node_builder; + }; + }; + //@endcond + + /// \p SkipListSet traits + struct traits + { + /// Hook used + /** + Possible values are: \p skip_list::base_hook, \p skip_list::member_hook, \p skip_list::traits_hook. + */ + typedef base_hook<> hook; + + /// Key comparison functor + /** + No default functor is provided. If the option is not specified, the \p less is used. + */ + typedef opt::none compare; + + /// specifies binary predicate used for key compare. + /** + Default is \p std::less. + */ + typedef opt::none less; + + /// Disposer + /** + The functor used for dispose removed items. Default is \p opt::v::empty_disposer. + */ + typedef opt::v::empty_disposer disposer; + + /// Item counter + /** + The type for item counting feature. + By default, item counting is disabled (\p atomicity::empty_item_counter), + \p atomicity::item_counter or \p atomicity::cache_friendly_item_counter enables it. + */ + typedef atomicity::empty_item_counter item_counter; + + /// C++ memory ordering model + /** + List of available memory ordering see \p opt::memory_model + */ + typedef opt::v::relaxed_ordering memory_model; + + /// Random level generator + /** + The random level generator is an important part of skip-list algorithm. + The node height in the skip-list have a probabilistic distribution + where half of the nodes that have level \p i pointers also have level i+1 pointers + (i = 0..30). So, the height of a node is in range [0..31]. + + See \p skip_list::random_level_generator option setter. + */ + typedef turbo32 random_level_generator; + + /// Allocator + /** + Although the skip-list is an intrusive container, + an allocator should be provided to maintain variable randomly-calculated height of the node + since the node can contain up to 32 next pointers. + The allocator specified is used to allocate an array of next pointers + for nodes which height is more than 1. + */ + typedef CDS_DEFAULT_ALLOCATOR allocator; + + /// back-off strategy + /** + If the option is not specified, the \p cds::backoff::Default is used. + */ + typedef cds::backoff::Default back_off; + + /// Internal statistics + /** + By default, internal statistics is disabled (\p skip_list::empty_stat). + Use \p skip_list::stat to enable it. + */ + typedef empty_stat stat; + + /// RCU deadlock checking policy (only for \ref cds_intrusive_SkipListSet_rcu "RCU-based SkipListSet") + /** + List of available options see \p opt::rcu_check_deadlock + */ + typedef opt::v::rcu_throw_deadlock rcu_check_deadlock; + + //@cond + // For internal use only!!! + typedef opt::none internal_node_builder; + //@endcond + }; + + /// Metafunction converting option list to \p SkipListSet traits + /** + \p Options are: + - \p opt::hook - hook used. Possible values are: \p skip_list::base_hook, \p skip_list::member_hook, \p skip_list::traits_hook. + If the option is not specified, skip_list::base_hook<> and \p gc::HP is used. + - \p opt::compare - key comparison functor. No default functor is provided. + If the option is not specified, the \p opt::less is used. + - \p opt::less - specifies binary predicate used for key comparison. Default is \p std::less. + - \p opt::disposer - the functor used for dispose removed items. Default is \p opt::v::empty_disposer. Due the nature + of GC schema the disposer may be called asynchronously. + - \p opt::item_counter - the type of item counting feature. Default is disabled, i.e. \p atomicity::empty_item_counter. + To enable it use \p atomicity::item_counter or \p atomicity::cache_friendly_item_counter + - \p opt::memory_model - C++ memory ordering model. Can be \p opt::v::relaxed_ordering (relaxed memory model, the default) + or \p opt::v::sequential_consistent (sequentially consisnent memory model). + - \p skip_list::random_level_generator - random level generator. Can be \p skip_list::xor_shift, + \p skip_list::turbo32 (the default) or user-provided one. + See \p skip_list::random_level_generator option description for explanation. + - \p opt::allocator - although the skip-list is an intrusive container, + an allocator should be provided to maintain variable randomly-calculated height of the node + since the node can contain up to 32 next pointers. The allocator option is used to allocate an array of next pointers + for nodes which height is more than 1. Default is \ref CDS_DEFAULT_ALLOCATOR. + - \p opt::back_off - back-off strategy, default is \p cds::backoff::Default. + - \p opt::stat - internal statistics. By default, it is disabled (\p skip_list::empty_stat). + To enable it use \p skip_list::stat + */ + template + struct make_traits { +# ifdef CDS_DOXYGEN_INVOKED + typedef implementation_defined type ; ///< Metafunction result +# else + typedef typename cds::opt::make_options< + typename cds::opt::find_type_traits< traits, Options... >::type + ,Options... + >::type type; +# endif + }; + + //@cond + namespace details { + template + class head_node: public Node + { + typedef Node node_type; + typename node_type::atomic_marked_ptr m_Tower[skip_list::c_nHeightLimit]; + + public: + head_node( unsigned int nHeight ) + { + for ( size_t i = 0; i < sizeof(m_Tower) / sizeof(m_Tower[0]); ++i ) + m_Tower[i].store( typename node_type::marked_ptr(), atomics::memory_order_relaxed ); + + node_type::make_tower( nHeight, m_Tower ); + } + + node_type * head() const + { + return const_cast( static_cast(this)); + } + }; + + template + struct intrusive_node_builder + { + typedef NodeType node_type; + typedef AtomicNodePtr atomic_node_ptr; + typedef Alloc allocator_type; + + typedef cds::details::Allocator< atomic_node_ptr, allocator_type > tower_allocator; + + template + static node_type * make_tower( node_type * pNode, RandomGen& gen ) + { + return make_tower( pNode, gen() + 1 ); + } + + static node_type * make_tower( node_type * pNode, unsigned int nHeight ) + { + if ( nHeight > 1 ) + pNode->make_tower( nHeight, tower_allocator().NewArray( nHeight - 1, nullptr )); + return pNode; + } + + static void dispose_tower( node_type * pNode ) + { + unsigned int nHeight = pNode->height(); + if ( nHeight > 1 ) + tower_allocator().Delete( pNode->release_tower(), nHeight ); + } + + struct node_disposer { + void operator()( node_type * pNode ) + { + dispose_tower( pNode ); + } + }; + }; + + // Forward declaration + template + class iterator; + + } // namespace details + //@endcond + + } // namespace skip_list + + // Forward declaration + template + class SkipListSet; + +}} // namespace cds::intrusive + +#endif // #ifndef CDSLIB_INTRUSIVE_DETAILS_SKIP_LIST_BASE_H diff --git a/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/intrusive/details/split_list_base.h b/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/intrusive/details/split_list_base.h new file mode 100644 index 0000000..8f2f0ed --- /dev/null +++ b/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/intrusive/details/split_list_base.h @@ -0,0 +1,1326 @@ +/* + This file is a part of libcds - Concurrent Data Structures library + + (C) Copyright Maxim Khizhinsky (libcds.dev@gmail.com) 2006-2017 + + Source code repo: http://github.com/khizmax/libcds/ + Download: http://sourceforge.net/projects/libcds/files/ + + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. + + * Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ + +#ifndef CDSLIB_INTRUSIVE_DETAILS_SPLIT_LIST_BASE_H +#define CDSLIB_INTRUSIVE_DETAILS_SPLIT_LIST_BASE_H + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +namespace cds { namespace intrusive { + + /// Split-ordered list related definitions + /** @ingroup cds_intrusive_helper + */ + namespace split_list { + //@cond + struct hash_node + { + size_t m_nHash; ///< Hash value for node + + /// Default constructor + hash_node() + : m_nHash( 0 ) + { + assert( is_dummy()); + } + + /// Initializes dummy node with \p nHash value + explicit hash_node( size_t nHash ) + : m_nHash( nHash ) + { + assert( is_dummy()); + } + + /// Checks if the node is dummy node + bool is_dummy() const + { + return (m_nHash & 1) == 0; + } + }; + //@endcond + + /// Split-ordered list node + /** + Template parameter: + - \p OrderedListNode - node type for underlying ordered list + */ + template + struct node: public OrderedListNode, public hash_node + { + //@cond + typedef OrderedListNode base_class; + //@endcond + + /// Default constructor + node() + : hash_node(0) + { + assert( is_dummy()); + } + + /// Initializes dummy node with \p nHash value + explicit node( size_t nHash ) + : hash_node( nHash ) + { + assert( is_dummy()); + } + + /// Checks if the node is dummy node + bool is_dummy() const + { + return hash_node::is_dummy(); + } + }; + + //@cond + // for IterableList + template <> + struct node: public hash_node + { + // Default ctor + node() + : hash_node( 0 ) + { + assert( is_dummy()); + } + + /// Initializes dummy node with \p nHash value + explicit node( size_t nHash ) + : hash_node( nHash ) + { + assert( is_dummy()); + } + + /// Checks if the node is dummy node + bool is_dummy() const + { + return hash_node::is_dummy(); + } + }; + //@endcond + + /// \p SplitListSet internal statistics. May be used for debugging or profiling + /** + Template argument \p Counter defines type of counter, default is \p cds::atomicity::event_counter. + */ + template + struct stat + { + typedef Counter counter_type; ///< Counter type + + counter_type m_nInsertSuccess; ///< Count of success inserting + counter_type m_nInsertFailed; ///< Count of failed inserting + counter_type m_nUpdateNew; ///< Count of new item created by \p ensure() member function + counter_type m_nUpdateExist; ///< Count of \p ensure() call for existing item + counter_type m_nEraseSuccess; ///< Count of success erasing of items + counter_type m_nEraseFailed; ///< Count of attempts to erase unknown item + counter_type m_nExtractSuccess; ///< Count of success extracting of items + counter_type m_nExtractFailed; ///< Count of attempts to extract unknown item + counter_type m_nFindSuccess; ///< Count of success finding + counter_type m_nFindFailed; ///< Count of failed finding + counter_type m_nHeadNodeAllocated; ///< Count of allocated head node + counter_type m_nHeadNodeFreed; ///< Count of freed head node + counter_type m_nBucketCount; ///< Current bucket count + counter_type m_nInitBucketRecursive; ///< Count of recursive bucket initialization + counter_type m_nInitBucketContention; ///< Count of bucket init contention encountered + counter_type m_nBusyWaitBucketInit; ///< Count of busy wait cycle while a bucket is initialized + counter_type m_nBucketsExhausted; ///< Count of failed bucket allocation + + //@cond + void onInsertSuccess() { ++m_nInsertSuccess; } + void onInsertFailed() { ++m_nInsertFailed; } + void onUpdateNew() { ++m_nUpdateNew; } + void onUpdateExist() { ++m_nUpdateExist; } + void onEraseSuccess() { ++m_nEraseSuccess; } + void onEraseFailed() { ++m_nEraseFailed; } + void onExtractSuccess() { ++m_nExtractSuccess; } + void onExtractFailed() { ++m_nExtractFailed; } + void onFindSuccess() { ++m_nFindSuccess; } + void onFindFailed() { ++m_nFindFailed; } + bool onFind(bool bSuccess) + { + if ( bSuccess ) + onFindSuccess(); + else + onFindFailed(); + return bSuccess; + } + void onHeadNodeAllocated() { ++m_nHeadNodeAllocated; } + void onHeadNodeFreed() { ++m_nHeadNodeFreed; } + void onNewBucket() { ++m_nBucketCount; } + void onRecursiveInitBucket() { ++m_nInitBucketRecursive; } + void onBucketInitContenton() { ++m_nInitBucketContention; } + void onBusyWaitBucketInit() { ++m_nBusyWaitBucketInit; } + void onBucketsExhausted() { ++m_nBucketsExhausted; } + //@endcond + }; + + /// Dummy queue statistics - no counting is performed, no overhead. Support interface like \p split_list::stat + struct empty_stat { + //@cond + void onInsertSuccess() const {} + void onInsertFailed() const {} + void onUpdateNew() const {} + void onUpdateExist() const {} + void onEraseSuccess() const {} + void onEraseFailed() const {} + void onExtractSuccess() const {} + void onExtractFailed() const {} + void onFindSuccess() const {} + void onFindFailed() const {} + bool onFind( bool bSuccess ) const { return bSuccess; } + void onHeadNodeAllocated() const {} + void onHeadNodeFreed() const {} + void onNewBucket() const {} + void onRecursiveInitBucket() const {} + void onBucketInitContenton() const {} + void onBusyWaitBucketInit() const {} + void onBucketsExhausted() const {} + //@endcond + }; + + /// Option to control bit reversal algorithm + /** + Bit reversal is a significant part of split-list. + \p Type can be one of predefined algorithm in \p cds::algo::bit_reversal namespace. + */ + template + struct bit_reversal { + //@cond + template + struct pack: public Base + { + typedef Type bit_reversal; + }; + //@endcond + }; + + /// SplitListSet traits + struct traits + { + /// Hash function + /** + Hash function converts the key fields of struct \p T stored in the split list + into hash value of type \p size_t that is an index in hash table. + By default, \p std::hash is used. + */ + typedef opt::none hash; + + /// Bit reversal algorithm + /** + Bit reversal is a significant part of split-list. + There are several predefined algorithm in \p cds::algo::bit_reversal namespace, + \p cds::algo::bit_reversal::lookup is the best general purpose one. + + There are more efficient bit reversal algoritm for particular processor architecture, + for example, based on x86 SIMD/AVX instruction set, see here + */ + typedef cds::algo::bit_reversal::lookup bit_reversal; + + /// Item counter + /** + The item counting is an important part of \p SplitListSet algorithm: + the empty() member function depends on correct item counting. + Therefore, \p cds::atomicity::empty_item_counter is not allowed as a type of the option. + + Default is \p cds::atomicity::item_counter; to avoid false sharing you may use \p atomicity::cache_friendly_item_counter + */ + typedef cds::atomicity::item_counter item_counter; + + /// Bucket table allocator + /** + Allocator for bucket table. Default is \ref CDS_DEFAULT_ALLOCATOR + */ + typedef CDS_DEFAULT_ALLOCATOR allocator; + + /// Internal statistics (by default, disabled) + /** + Possible statistics types are: \p split_list::stat (enable internal statistics), + \p split_list::empty_stat (the default, internal statistics disabled), + user-provided class that supports \p %split_list::stat interface. + */ + typedef split_list::empty_stat stat; + + + /// C++ memory ordering model + /** + Can be \p opt::v::relaxed_ordering (relaxed memory model, the default) + or \p opt::v::sequential_consistent (sequentially consisnent memory model). + */ + typedef opt::v::relaxed_ordering memory_model; + + /// What type of bucket table is used + /** + \p true - use \p split_list::expandable_bucket_table that can be expanded + if the load factor of the set is exhausted. + \p false - use \p split_list::static_bucket_table that cannot be expanded + and is allocated in \p SplitListSet constructor. + + Default is \p true. + */ + static const bool dynamic_bucket_table = true; + + /// Back-off strategy + typedef cds::backoff::Default back_off; + + /// Padding; default is cache-line padding + enum { + padding = cds::opt::cache_line_padding + }; + + /// Free-list of auxiliary nodes + /** + The split-list contains auxiliary nodes marked the start of buckets. + To increase performance, there is a pool of preallocated aux nodes. The part of the pool is a free-list + of aux nodes. + + Default is: + - \p cds::intrusive::FreeList - if architecture and/or compiler does not support double-width CAS primitive + - \p cds::intrusive::TaggedFreeList - if architecture and/or compiler supports double-width CAS primitive + */ + typedef FreeListImpl free_list; + }; + + /// [value-option] Split-list dynamic bucket table option + /** + The option is used to select bucket table implementation. + Possible values of \p Value are: + - \p true - select \p expandable_bucket_table + - \p false - select \p static_bucket_table + */ + template + struct dynamic_bucket_table + { + //@cond + template struct pack: public Base + { + enum { dynamic_bucket_table = Value }; + }; + //@endcond + }; + + /// Metafunction converting option list to \p split_list::traits + /** + Available \p Options: + - \p opt::hash - mandatory option, specifies hash functor. + - \p split_list::bit_reversal - bit reversal algorithm, see \p traits::bit_reversal for explanation + default is \p cds::algo::bit_reversal::lookup + - \p opt::item_counter - optional, specifies item counting policy. See \p traits::item_counter + for default type. + - \p opt::memory_model - C++ memory model for atomic operations. + Can be \p opt::v::relaxed_ordering (relaxed memory model, the default) + or \p opt::v::sequential_consistent (sequentially consistent memory model). + - \p opt::allocator - optional, bucket table allocator. Default is \ref CDS_DEFAULT_ALLOCATOR. + - \p split_list::dynamic_bucket_table - use dynamic or static bucket table implementation. + Dynamic bucket table expands its size up to maximum bucket count when necessary + - \p opt::back_off - back-off strategy used for spinning, default is \p cds::backoff::Default. + - \p opt::stat - internal statistics, default is \p split_list::empty_stat (disabled). + To enable internal statistics use \p split_list::stat. + - \p opt::padding - a padding to solve false-sharing issues; default is cache-line padding + - \p opt::free_list - a free-list implementation, see \p traits::free_list + */ + template + struct make_traits { + typedef typename cds::opt::make_options< traits, Options...>::type type ; ///< Result of metafunction + }; + + /// Static bucket table + /** + Non-resizeable bucket table for \p SplitListSet class. + The capacity of table (max bucket count) is defined in the constructor call. + + Template parameter: + - \p GC - garbage collector + - \p Node - node type, must be a type based on \p split_list::node + - \p Options... - options + + \p Options are: + - \p opt::allocator - allocator used to allocate bucket table. Default is \ref CDS_DEFAULT_ALLOCATOR + - \p opt::memory_model - memory model used. Possible types are \p opt::v::sequential_consistent, \p opt::v::relaxed_ordering + - \p opt::free_list - free-list implementation; default is \p TaggedFreeList if the processor supports double-with CAS + otherwise \p FreeList. + */ + template + class static_bucket_table + { + //@cond + struct default_options + { + typedef CDS_DEFAULT_ALLOCATOR allocator; + typedef opt::v::relaxed_ordering memory_model; + typedef FreeListImpl free_list; + }; + typedef typename opt::make_options< default_options, Options... >::type options; + //@endcond + + public: + typedef GC gc; ///< Garbage collector + typedef Node node_type; ///< Bucket node type + typedef typename options::allocator allocator; ///< allocator + typedef typename options::memory_model memory_model; ///< Memory model for atomic operations + typedef typename options::free_list free_list; ///< Free-list + + /// Auxiliary node type + struct aux_node_type: public node_type, public free_list::node + { +# ifdef CDS_DEBUG + atomics::atomic m_busy; + + aux_node_type() + { + m_busy.store( false, atomics::memory_order_release ); + } +# endif + }; + + typedef atomics::atomic table_entry; ///< Table entry type + typedef cds::details::Allocator< table_entry, allocator > bucket_table_allocator; ///< Bucket table allocator + + protected: + //@cond + const size_t m_nLoadFactor; ///< load factor (average count of items per bucket) + const size_t m_nCapacity; ///< Bucket table capacity + table_entry * m_Table; ///< Bucket table + + typedef typename allocator::template rebind< aux_node_type >::other aux_node_allocator; + + aux_node_type* m_auxNode; ///< Array of pre-allocated auxiliary nodes + atomics::atomic m_nAuxNodeAllocated; ///< how many auxiliary node allocated + free_list m_freeList; ///< Free list + //@endcond + + protected: + //@cond + void allocate_table() + { + m_Table = bucket_table_allocator().NewArray( m_nCapacity, nullptr ); + m_auxNode = aux_node_allocator().allocate( m_nCapacity ); + } + + void destroy_table() + { + m_freeList.clear( []( typename free_list::node* ) {} ); + aux_node_allocator().deallocate( m_auxNode, m_nCapacity ); + bucket_table_allocator().Delete( m_Table, m_nCapacity ); + } + //@endcond + + public: + /// Constructs bucket table for 512K buckets. Load factor is 1. + static_bucket_table() + : m_nLoadFactor(1) + , m_nCapacity( 512 * 1024 ) + , m_nAuxNodeAllocated( 0 ) + { + allocate_table(); + } + + /// Creates the table with specified size rounded up to nearest power-of-two + static_bucket_table( + size_t nItemCount, ///< Max expected item count in split-ordered list + size_t nLoadFactor ///< Load factor + ) + : m_nLoadFactor( nLoadFactor > 0 ? nLoadFactor : (size_t) 1 ) + , m_nCapacity( cds::beans::ceil2( nItemCount / m_nLoadFactor )) + , m_nAuxNodeAllocated( 0 ) + { + // m_nCapacity must be power of 2 + assert( cds::beans::is_power2( m_nCapacity )); + allocate_table(); + } + + /// Destroys bucket table + ~static_bucket_table() + { + destroy_table(); + } + + /// Returns head node of bucket \p nBucket + aux_node_type * bucket( size_t nBucket ) const + { + assert( nBucket < capacity()); + return m_Table[ nBucket ].load(memory_model::memory_order_acquire); + } + + /// Set \p pNode as a head of bucket \p nBucket + void bucket( size_t nBucket, aux_node_type * pNode ) + { + assert( nBucket < capacity()); + assert( bucket( nBucket ) == nullptr ); + + m_Table[ nBucket ].store( pNode, memory_model::memory_order_release ); + } + + /// Allocates auxiliary node; can return \p nullptr if the table exhausted + aux_node_type* alloc_aux_node() + { + if ( m_nAuxNodeAllocated.load( memory_model::memory_order_relaxed ) < capacity()) { + // alloc next free node from m_auxNode + size_t const idx = m_nAuxNodeAllocated.fetch_add( 1, memory_model::memory_order_relaxed ); + if ( idx < capacity()) { + CDS_TSAN_ANNOTATE_NEW_MEMORY( &m_auxNode[idx], sizeof( aux_node_type )); + return new( &m_auxNode[idx] ) aux_node_type(); + } + } + + // get from free-list + auto pFree = m_freeList.get(); + if ( pFree ) + return static_cast( pFree ); + + // table exhausted + return nullptr; + } + + /// Places node type to free-list + void free_aux_node( aux_node_type* p ) + { + m_freeList.put( static_cast( p )); + } + + /// Returns the capacity of the bucket table + size_t capacity() const + { + return m_nCapacity; + } + + /// Returns the load factor, i.e. average count of items per bucket + size_t load_factor() const + { + return m_nLoadFactor; + } + }; + + /// Expandable bucket table + /** + This bucket table can dynamically grow its capacity when necessary + up to maximum bucket count. + + Template parameter: + - \p GC - garbage collector + - \p Node - node type, must be derived from \p split_list::node + - \p Options... - options + + \p Options are: + - \p opt::allocator - allocator used to allocate bucket table. Default is \ref CDS_DEFAULT_ALLOCATOR + - \p opt::memory_model - memory model used. Possible types are \p opt::v::sequential_consistent, \p opt::v::relaxed_ordering + - \p opt::free_list - free-list implementation; default is \p TaggedFreeList if the processor supports double-with CAS + otherwise \p FreeList. + */ + template + class expandable_bucket_table + { + //@cond + struct default_options + { + typedef CDS_DEFAULT_ALLOCATOR allocator; + typedef opt::v::relaxed_ordering memory_model; + typedef FreeListImpl free_list; + }; + typedef typename opt::make_options< default_options, Options... >::type options; + //@endcond + + public: + typedef GC gc; ///< Garbage collector + typedef Node node_type; ///< Bucket node type + typedef typename options::allocator allocator; ///< allocator + + /// Memory model for atomic operations + typedef typename options::memory_model memory_model; + + /// Free-list + typedef typename options::free_list free_list; + + /// Auxiliary node type + struct aux_node_type: public node_type, public free_list::node + { +# ifdef CDS_DEBUG + atomics::atomic m_busy; + + aux_node_type() + { + m_busy.store( false, atomics::memory_order_release ); + } +# endif + }; + + protected: + //@cond + typedef atomics::atomic table_entry; ///< Table entry type + typedef atomics::atomic segment_type; ///< Bucket table segment type + + struct aux_node_segment { + atomics::atomic< size_t > aux_node_count; // how many aux nodes allocated from the segment + aux_node_segment* next_segment; + // aux_node_type nodes[]; + + aux_node_segment() + : next_segment( nullptr ) + { + aux_node_count.store( 0, atomics::memory_order_release ); + } + + aux_node_type* segment() + { + return reinterpret_cast( this + 1 ); + } + }; + + /// Bucket table metrics + struct metrics { + size_t nSegmentCount; ///< max count of segments in bucket table + size_t nSegmentSize; ///< the segment's capacity. The capacity must be power of two. + size_t nSegmentSizeLog2; ///< log2( m_nSegmentSize ) + size_t nLoadFactor; ///< load factor + size_t nCapacity; ///< max capacity of bucket table + + metrics() + : nSegmentCount( 1024 ) + , nSegmentSize( 512 ) + , nSegmentSizeLog2( cds::beans::log2( nSegmentSize )) + , nLoadFactor( 1 ) + , nCapacity( nSegmentCount * nSegmentSize ) + {} + }; + + /// Bucket table allocator + typedef cds::details::Allocator< segment_type, allocator > bucket_table_allocator; + + /// Bucket table segment allocator + typedef cds::details::Allocator< table_entry, allocator > segment_allocator; + + // Aux node segment allocator + typedef typename allocator::template rebind::other raw_allocator; + + //@endcond + + public: + /// Constructs bucket table for 512K buckets. Load factor is 1. + expandable_bucket_table() + : m_metrics( calc_metrics( 512 * 1024, 1 )) + { + init(); + } + + /// Creates the table with specified capacity rounded up to nearest power-of-two + expandable_bucket_table( + size_t nItemCount, ///< Max expected item count in split-ordered list + size_t nLoadFactor ///< Load factor + ) + : m_metrics( calc_metrics( nItemCount, nLoadFactor )) + { + init(); + } + + /// Destroys bucket table + ~expandable_bucket_table() + { + m_freeList.clear( []( typename free_list::node* ) {} ); + + for ( auto aux_segment = m_auxNodeList.load( atomics::memory_order_relaxed ); aux_segment; ) { + auto next_segment = aux_segment->next_segment; + free_aux_segment( aux_segment ); + aux_segment = next_segment; + } + + segment_type * pSegments = m_Segments; + for ( size_t i = 0; i < m_metrics.nSegmentCount; ++i ) { + table_entry* pEntry = pSegments[i].load(memory_model::memory_order_relaxed); + if ( pEntry != nullptr ) + destroy_segment( pEntry ); + } + + destroy_table( pSegments ); + } + + /// Returns head node of the bucket \p nBucket + aux_node_type * bucket( size_t nBucket ) const + { + size_t nSegment = nBucket >> m_metrics.nSegmentSizeLog2; + assert( nSegment < m_metrics.nSegmentCount ); + + table_entry* pSegment = m_Segments[ nSegment ].load(memory_model::memory_order_acquire); + if ( pSegment == nullptr ) + return nullptr; // uninitialized bucket + return pSegment[ nBucket & (m_metrics.nSegmentSize - 1) ].load(memory_model::memory_order_acquire); + } + + /// Set \p pNode as a head of bucket \p nBucket + void bucket( size_t nBucket, aux_node_type * pNode ) + { + size_t nSegment = nBucket >> m_metrics.nSegmentSizeLog2; + assert( nSegment < m_metrics.nSegmentCount ); + + segment_type& segment = m_Segments[nSegment]; + if ( segment.load( memory_model::memory_order_relaxed ) == nullptr ) { + table_entry* pNewSegment = allocate_segment(); + table_entry * pNull = nullptr; + if ( !segment.compare_exchange_strong( pNull, pNewSegment, memory_model::memory_order_release, atomics::memory_order_relaxed )) + destroy_segment( pNewSegment ); + } + + assert( segment.load( atomics::memory_order_relaxed )[nBucket & (m_metrics.nSegmentSize - 1)].load( atomics::memory_order_relaxed ) == nullptr ); + segment.load(memory_model::memory_order_acquire)[ nBucket & (m_metrics.nSegmentSize - 1) ].store( pNode, memory_model::memory_order_release ); + } + + /// Allocates auxiliary node; can return \p nullptr if the table exhausted + aux_node_type* alloc_aux_node() + { + aux_node_segment* aux_segment = m_auxNodeList.load( memory_model::memory_order_acquire ); + + for ( ;; ) { + assert( aux_segment != nullptr ); + + // try to allocate from current aux segment + if ( aux_segment->aux_node_count.load( memory_model::memory_order_acquire ) < m_metrics.nSegmentSize ) { + size_t idx = aux_segment->aux_node_count.fetch_add( 1, memory_model::memory_order_relaxed ); + if ( idx < m_metrics.nSegmentSize ) { + CDS_TSAN_ANNOTATE_NEW_MEMORY( aux_segment->segment() + idx, sizeof( aux_node_type )); + return new( aux_segment->segment() + idx ) aux_node_type(); + } + } + + // try allocate from free-list + auto pFree = m_freeList.get(); + if ( pFree ) + return static_cast( pFree ); + + // free-list is empty, current segment is full + // try to allocate new aux segment + // We can allocate more aux segments than we need but it is not a problem in this context + aux_node_segment* new_aux_segment = allocate_aux_segment(); + new_aux_segment->next_segment = aux_segment; + new_aux_segment->aux_node_count.fetch_add( 1, memory_model::memory_order_relaxed ); + + if ( m_auxNodeList.compare_exchange_strong( aux_segment, new_aux_segment, memory_model::memory_order_release, atomics::memory_order_acquire )) { + CDS_TSAN_ANNOTATE_NEW_MEMORY( new_aux_segment->segment(), sizeof( aux_node_type )); + return new( new_aux_segment->segment()) aux_node_type(); + } + + free_aux_segment( new_aux_segment ); + } + } + + /// Places auxiliary node type to free-list + void free_aux_node( aux_node_type* p ) + { + m_freeList.put( static_cast( p )); + } + + /// Returns the capacity of the bucket table + size_t capacity() const + { + return m_metrics.nCapacity; + } + + /// Returns the load factor, i.e. average count of items per bucket + size_t load_factor() const + { + return m_metrics.nLoadFactor; + } + + protected: + //@cond + metrics calc_metrics( size_t nItemCount, size_t nLoadFactor ) + { + metrics m; + + // Calculate m_nSegmentSize and m_nSegmentCount by nItemCount + m.nLoadFactor = nLoadFactor > 0 ? nLoadFactor : 1; + + size_t nBucketCount = ( nItemCount + m.nLoadFactor - 1 ) / m.nLoadFactor; + if ( nBucketCount <= 2 ) { + m.nSegmentCount = 1; + m.nSegmentSize = 2; + } + else if ( nBucketCount <= 1024 ) { + m.nSegmentCount = 1; + m.nSegmentSize = ((size_t)1) << beans::log2ceil( nBucketCount ); + } + else { + nBucketCount = beans::log2ceil( nBucketCount ); + m.nSegmentCount = + m.nSegmentSize = ((size_t)1) << (nBucketCount / 2); + if ( nBucketCount & 1 ) + m.nSegmentSize *= 2; + if ( m.nSegmentCount * m.nSegmentSize * m.nLoadFactor < nItemCount ) + m.nSegmentSize *= 2; + } + m.nCapacity = m.nSegmentCount * m.nSegmentSize; + m.nSegmentSizeLog2 = cds::beans::log2( m.nSegmentSize ); + assert( m.nSegmentSizeLog2 != 0 ); // + return m; + } + + segment_type * allocate_table() + { + return bucket_table_allocator().NewArray( m_metrics.nSegmentCount, nullptr ); + } + + void destroy_table( segment_type * pTable ) + { + bucket_table_allocator().Delete( pTable, m_metrics.nSegmentCount ); + } + + table_entry* allocate_segment() + { + return segment_allocator().NewArray( m_metrics.nSegmentSize, nullptr ); + } + + void destroy_segment( table_entry* pSegment ) + { + segment_allocator().Delete( pSegment, m_metrics.nSegmentSize ); + } + + aux_node_segment* allocate_aux_segment() + { + char* p = raw_allocator().allocate( sizeof( aux_node_segment ) + sizeof( aux_node_type ) * m_metrics.nSegmentSize ); + CDS_TSAN_ANNOTATE_NEW_MEMORY( p, sizeof( aux_node_segment )); + return new(p) aux_node_segment(); + } + + void free_aux_segment( aux_node_segment* p ) + { + raw_allocator().deallocate( reinterpret_cast( p ), sizeof( aux_node_segment ) + sizeof( aux_node_type ) * m_metrics.nSegmentSize ); + } + + void init() + { + // m_nSegmentSize must be 2**N + assert( cds::beans::is_power2( m_metrics.nSegmentSize )); + assert( (((size_t)1) << m_metrics.nSegmentSizeLog2) == m_metrics.nSegmentSize ); + + // m_nSegmentCount must be 2**K + assert( cds::beans::is_power2( m_metrics.nSegmentCount )); + + m_Segments = allocate_table(); + m_auxNodeList = allocate_aux_segment(); + } + //@endcond + + protected: + //@cond + metrics const m_metrics; ///< Dynamic bucket table metrics + segment_type* m_Segments; ///< bucket table - array of segments + atomics::atomic m_auxNodeList; ///< segment list of aux nodes + free_list m_freeList; ///< List of free aux nodes + //@endcond + }; + + + //@cond + namespace details { + template + struct bucket_table_selector; + + template + struct bucket_table_selector< true, GC, Node, Options...> + { + typedef expandable_bucket_table type; + }; + + template + struct bucket_table_selector< false, GC, Node, Options...> + { + typedef static_bucket_table type; + }; + + template + struct search_value_type + { + Q& val; + size_t nHash; + + search_value_type( Q& v, size_t h ) + : val( v ) + , nHash( h ) + {} + }; + + template + class ordered_list_adapter; + + template + class ordered_list_adapter< OrderedList, Traits, false > + { + typedef OrderedList native_ordered_list; + typedef Traits traits; + + typedef typename native_ordered_list::gc gc; + typedef typename native_ordered_list::key_comparator native_key_comparator; + typedef typename native_ordered_list::node_type node_type; + typedef typename native_ordered_list::value_type value_type; + typedef typename native_ordered_list::node_traits native_node_traits; + typedef typename native_ordered_list::disposer native_disposer; + + typedef split_list::node splitlist_node_type; + + struct key_compare { + int operator()( value_type const& v1, value_type const& v2 ) const + { + splitlist_node_type const * n1 = static_cast(native_node_traits::to_node_ptr( v1 )); + splitlist_node_type const * n2 = static_cast(native_node_traits::to_node_ptr( v2 )); + if ( n1->m_nHash != n2->m_nHash ) + return n1->m_nHash < n2->m_nHash ? -1 : 1; + + if ( n1->is_dummy()) { + assert( n2->is_dummy()); + return 0; + } + + assert( !n1->is_dummy() && !n2->is_dummy()); + + return native_key_comparator()(v1, v2); + } + + template + int operator()( value_type const& v, search_value_type const& q ) const + { + splitlist_node_type const * n = static_cast(native_node_traits::to_node_ptr( v )); + if ( n->m_nHash != q.nHash ) + return n->m_nHash < q.nHash ? -1 : 1; + + assert( !n->is_dummy()); + return native_key_comparator()(v, q.val); + } + + template + int operator()( search_value_type const& q, value_type const& v ) const + { + return -operator()( v, q ); + } + }; + + struct wrapped_disposer + { + void operator()( value_type * v ) + { + splitlist_node_type * p = static_cast(native_node_traits::to_node_ptr( v )); + if ( !p->is_dummy()) + native_disposer()(v); + } + }; + + public: + typedef node_type ordered_list_node_type; + typedef splitlist_node_type aux_node; + + struct node_traits: private native_node_traits + { + typedef native_node_traits base_class; ///< Base ordered list node type + typedef typename base_class::value_type value_type; ///< Value type + typedef typename base_class::node_type base_node_type; ///< Ordered list node type + typedef node node_type; ///< Split-list node type + + /// Convert value reference to node pointer + static node_type * to_node_ptr( value_type& v ) + { + return static_cast(base_class::to_node_ptr( v )); + } + + /// Convert value pointer to node pointer + static node_type * to_node_ptr( value_type * v ) + { + return static_cast(base_class::to_node_ptr( v )); + } + + /// Convert value reference to node pointer (const version) + static node_type const * to_node_ptr( value_type const& v ) + { + return static_cast(base_class::to_node_ptr( v )); + } + + /// Convert value pointer to node pointer (const version) + static node_type const * to_node_ptr( value_type const * v ) + { + return static_cast(base_class::to_node_ptr( v )); + } + + /// Convert node reference to value pointer + static value_type * to_value_ptr( node_type& n ) + { + return base_class::to_value_ptr( static_cast(n)); + } + + /// Convert node pointer to value pointer + static value_type * to_value_ptr( node_type * n ) + { + return base_class::to_value_ptr( static_cast(n)); + } + + /// Convert node reference to value pointer (const version) + static const value_type * to_value_ptr( node_type const & n ) + { + return base_class::to_value_ptr( static_cast(n)); + } + + /// Convert node pointer to value pointer (const version) + static const value_type * to_value_ptr( node_type const * n ) + { + return base_class::to_value_ptr( static_cast(n)); + } + }; + + template + struct make_compare_from_less: public cds::opt::details::make_comparator_from_less + { + typedef cds::opt::details::make_comparator_from_less base_class; + + template + int operator()( value_type const& v, search_value_type const& q ) const + { + splitlist_node_type const * n = static_cast(native_node_traits::to_node_ptr( v )); + if ( n->m_nHash != q.nHash ) + return n->m_nHash < q.nHash ? -1 : 1; + + assert( !n->is_dummy()); + return base_class()(v, q.val); + } + + template + int operator()( search_value_type const& q, value_type const& v ) const + { + splitlist_node_type const * n = static_cast(native_node_traits::to_node_ptr( v )); + if ( n->m_nHash != q.nHash ) + return q.nHash < n->m_nHash ? -1 : 1; + + assert( !n->is_dummy()); + return base_class()(q.val, v); + } + + int operator()( value_type const& lhs, value_type const& rhs ) const + { + splitlist_node_type const * n1 = static_cast(native_node_traits::to_node_ptr( lhs )); + splitlist_node_type const * n2 = static_cast(native_node_traits::to_node_ptr( rhs )); + if ( n1->m_nHash != n2->m_nHash ) + return n1->m_nHash < n2->m_nHash ? -1 : 1; + + if ( n1->is_dummy()) { + assert( n2->is_dummy()); + return 0; + } + + assert( !n1->is_dummy() && !n2->is_dummy()); + + return native_key_comparator()( lhs, rhs ); + } + }; + + typedef typename native_ordered_list::template rebind_traits< + opt::compare< key_compare > + , opt::disposer< wrapped_disposer > + , opt::boundary_node_type< splitlist_node_type > + >::type result; + }; + + template + class ordered_list_adapter< OrderedList, Traits, true > + { + typedef OrderedList native_ordered_list; + typedef Traits traits; + + typedef typename native_ordered_list::gc gc; + typedef typename native_ordered_list::key_comparator native_key_comparator; + typedef typename native_ordered_list::value_type value_type; + typedef typename native_ordered_list::disposer native_disposer; + + struct key_compare { + int operator()( value_type const& v1, value_type const& v2 ) const + { + hash_node const& n1 = static_cast( v1 ); + hash_node const& n2 = static_cast( v2 ); + if ( n1.m_nHash != n2.m_nHash ) + return n1.m_nHash < n2.m_nHash ? -1 : 1; + + if ( n1.is_dummy()) { + assert( n2.is_dummy()); + return 0; + } + + assert( !n1.is_dummy() && !n2.is_dummy()); + + return native_key_comparator()(v1, v2); + } + + template + int operator()( value_type const& v, search_value_type const& q ) const + { + hash_node const& n = static_cast( v ); + if ( n.m_nHash != q.nHash ) + return n.m_nHash < q.nHash ? -1 : 1; + + assert( !n.is_dummy()); + return native_key_comparator()(v, q.val); + } + + template + int operator()( search_value_type const& q, value_type const& v ) const + { + return -operator()( v, q ); + } + }; + + struct wrapped_disposer + { + void operator()( value_type * v ) + { + if ( !static_cast( v )->is_dummy()) + native_disposer()( v ); + } + }; + + public: + typedef void ordered_list_node_type; + + struct aux_node: public native_ordered_list::node_type, public hash_node + { + aux_node() + { + typedef typename native_ordered_list::node_type list_node_type; + + list_node_type::data.store( typename list_node_type::marked_data_ptr( + static_cast( static_cast( this ))), + atomics::memory_order_release + ); + } + }; + + struct node_traits + { + static hash_node * to_node_ptr( value_type& v ) + { + return static_cast( &v ); + } + + static hash_node * to_node_ptr( value_type * v ) + { + return static_cast( v ); + } + + static hash_node const * to_node_ptr( value_type const& v ) + { + return static_cast( &v ); + } + + static hash_node const * to_node_ptr( value_type const * v ) + { + return static_cast( v ); + } + }; + + template + struct make_compare_from_less: public cds::opt::details::make_comparator_from_less + { + typedef cds::opt::details::make_comparator_from_less base_class; + + template + int operator()( value_type const& v, search_value_type const& q ) const + { + hash_node const& n = static_cast( v ); + if ( n.m_nHash != q.nHash ) + return n.m_nHash < q.nHash ? -1 : 1; + + assert( !n.is_dummy()); + return base_class()(v, q.val); + } + + template + int operator()( search_value_type const& q, value_type const& v ) const + { + hash_node const& n = static_cast( v ); + if ( n.m_nHash != q.nHash ) + return q.nHash < n.m_nHash ? -1 : 1; + + assert( !n.is_dummy()); + return base_class()(q.val, v); + } + + int operator()( value_type const& lhs, value_type const& rhs ) const + { + hash_node const& n1 = static_cast( lhs ); + hash_node const& n2 = static_cast( rhs ); + if ( n1.m_nHash != n2.m_nHash ) + return n1.m_nHash < n2.m_nHash ? -1 : 1; + + if ( n1.is_dummy()) { + assert( n2.is_dummy()); + return 0; + } + + assert( !n1.is_dummy() && !n2.is_dummy()); + + return base_class()( lhs, rhs ); + } + }; + + typedef typename native_ordered_list::template rebind_traits< + opt::compare< key_compare > + , opt::disposer< wrapped_disposer > + , opt::boundary_node_type< aux_node > + >::type result; + }; + + template + using rebind_list_traits = ordered_list_adapter< OrderedList, Traits, is_iterable_list::value >; + + template + struct select_list_iterator; + + template + struct select_list_iterator + { + typedef typename OrderedList::iterator type; + }; + + template + struct select_list_iterator + { + typedef typename OrderedList::const_iterator type; + }; + + template + class iterator_type + { + typedef OrderedList ordered_list_type; + friend class iterator_type ; + + protected: + typedef typename select_list_iterator::type list_iterator; + typedef NodeTraits node_traits; + + private: + list_iterator m_itCur; + list_iterator m_itEnd; + + public: + typedef typename list_iterator::value_ptr value_ptr; + typedef typename list_iterator::value_ref value_ref; + + public: + iterator_type() + {} + + iterator_type( iterator_type const& src ) + : m_itCur( src.m_itCur ) + , m_itEnd( src.m_itEnd ) + {} + + // This ctor should be protected... + iterator_type( list_iterator itCur, list_iterator itEnd ) + : m_itCur( itCur ) + , m_itEnd( itEnd ) + { + // skip dummy nodes + while ( m_itCur != m_itEnd && node_traits::to_node_ptr( *m_itCur )->is_dummy()) + ++m_itCur; + } + + value_ptr operator ->() const + { + return m_itCur.operator->(); + } + + value_ref operator *() const + { + return m_itCur.operator*(); + } + + /// Pre-increment + iterator_type& operator ++() + { + if ( m_itCur != m_itEnd ) { + do { + ++m_itCur; + } while ( m_itCur != m_itEnd && node_traits::to_node_ptr( *m_itCur )->is_dummy()); + } + return *this; + } + + iterator_type& operator = (iterator_type const& src) + { + m_itCur = src.m_itCur; + m_itEnd = src.m_itEnd; + return *this; + } + + template + bool operator ==(iterator_type const& i ) const + { + return m_itCur == i.m_itCur; + } + template + bool operator !=(iterator_type const& i ) const + { + return m_itCur != i.m_itCur; + } + + protected: + list_iterator const& underlying_iterator() const + { + return m_itCur; + } + }; + } // namespace details + //@endcond + + //@cond + // Helper functions + template + static inline size_t regular_hash( size_t nHash ) + { + return static_cast( BitReversalAlgo()( cds::details::size_t_cast( nHash ))) | size_t(1); + } + + template + static inline size_t dummy_hash( size_t nHash ) + { + return static_cast( BitReversalAlgo()( cds::details::size_t_cast( nHash ))) & ~size_t(1); + } + //@endcond + + } // namespace split_list + + //@cond + // Forward declaration + template + class SplitListSet; + //@endcond + +}} // namespace cds::intrusive + +#endif // #ifndef CDSLIB_INTRUSIVE_DETAILS_SPLIT_LIST_BASE_H diff --git a/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/intrusive/ellen_bintree_dhp.h b/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/intrusive/ellen_bintree_dhp.h new file mode 100644 index 0000000..c527e90 --- /dev/null +++ b/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/intrusive/ellen_bintree_dhp.h @@ -0,0 +1,37 @@ +/* + This file is a part of libcds - Concurrent Data Structures library + + (C) Copyright Maxim Khizhinsky (libcds.dev@gmail.com) 2006-2017 + + Source code repo: http://github.com/khizmax/libcds/ + Download: http://sourceforge.net/projects/libcds/files/ + + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. + + * Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ + +#ifndef CDSLIB_INTRUSIVE_ELLEN_BINTREE_DHP_H +#define CDSLIB_INTRUSIVE_ELLEN_BINTREE_DHP_H + +#include +#include + +#endif // #ifndef CDSLIB_INTRUSIVE_ELLEN_BINTREE_DHP_H diff --git a/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/intrusive/ellen_bintree_hp.h b/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/intrusive/ellen_bintree_hp.h new file mode 100644 index 0000000..e5dbed2 --- /dev/null +++ b/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/intrusive/ellen_bintree_hp.h @@ -0,0 +1,37 @@ +/* + This file is a part of libcds - Concurrent Data Structures library + + (C) Copyright Maxim Khizhinsky (libcds.dev@gmail.com) 2006-2017 + + Source code repo: http://github.com/khizmax/libcds/ + Download: http://sourceforge.net/projects/libcds/files/ + + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. + + * Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ + +#ifndef CDSLIB_INTRUSIVE_ELLEN_BINTREE_HP_H +#define CDSLIB_INTRUSIVE_ELLEN_BINTREE_HP_H + +#include +#include + +#endif // #ifndef CDSLIB_INTRUSIVE_ELLEN_BINTREE_HP_H diff --git a/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/intrusive/ellen_bintree_rcu.h b/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/intrusive/ellen_bintree_rcu.h new file mode 100644 index 0000000..22d8e3f --- /dev/null +++ b/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/intrusive/ellen_bintree_rcu.h @@ -0,0 +1,2015 @@ +/* + This file is a part of libcds - Concurrent Data Structures library + + (C) Copyright Maxim Khizhinsky (libcds.dev@gmail.com) 2006-2017 + + Source code repo: http://github.com/khizmax/libcds/ + Download: http://sourceforge.net/projects/libcds/files/ + + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. + + * Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ + +#ifndef CDSLIB_INTRUSIVE_ELLEN_BINTREE_RCU_H +#define CDSLIB_INTRUSIVE_ELLEN_BINTREE_RCU_H + +#include +#include +#include +#include +#include +#include + +namespace cds { namespace intrusive { + //@cond + namespace ellen_bintree { + + template + struct base_node >: public basic_node + { + typedef basic_node base_class; + + base_node * m_pNextRetired; + + typedef cds::urcu::gc gc ; ///< Garbage collector + + /// Constructs leaf (bIntrenal == false) or internal (bInternal == true) node + explicit base_node( bool bInternal ) + : basic_node( bInternal ) + , m_pNextRetired( nullptr ) + {} + }; + + } // namespace ellen_bintree + //@endcond + + /// Ellen's et al binary search tree (RCU specialization) + /** @ingroup cds_intrusive_map + @ingroup cds_intrusive_tree + @anchor cds_intrusive_EllenBinTree_rcu + + Source: + - [2010] F.Ellen, P.Fatourou, E.Ruppert, F.van Breugel "Non-blocking Binary Search Tree" + + %EllenBinTree is an unbalanced leaf-oriented binary search tree that implements the set + abstract data type. Nodes maintains child pointers but not parent pointers. + Every internal node has exactly two children, and all data of type \p T currently in + the tree are stored in the leaves. Internal nodes of the tree are used to direct \p find + operation along the path to the correct leaf. The keys (of \p Key type) stored in internal nodes + may or may not be in the set. \p Key type is a subset of \p T type. + There should be exactly defined a key extracting functor for converting object of type \p T to + object of type \p Key. + + Due to \p extract_min and \p extract_max member functions the \p %EllenBinTree can act as + a priority queue. In this case you should provide unique compound key, for example, + the priority value plus some uniformly distributed random value. + + @attention Recall the tree is unbalanced. The complexity of operations is O(log N) + for uniformly distributed random keys, but in the worst case the complexity is O(N). + + @note In the current implementation we do not use helping technique described in the original paper. + Instead of helping, when a thread encounters a concurrent operation it just spins waiting for + the operation done. Such solution allows greatly simplify the implementation of tree. + + Template arguments: + - \p RCU - one of \ref cds_urcu_gc "RCU type" + - \p Key - key type, a subset of \p T + - \p T - type to be stored in tree's leaf nodes. The type must be based on \p ellen_bintree::node + (for \p ellen_bintree::base_hook) or it must have a member of type \p ellen_bintree::node + (for \p ellen_bintree::member_hook). + - \p Traits - tree traits, default is \p ellen_bintree::traits + It is possible to declare option-based tree with \p ellen_bintree::make_traits metafunction + instead of \p Traits template argument. + + @note Before including you should include appropriate RCU header file, + see \ref cds_urcu_gc "RCU type" for list of existing RCU class and corresponding header files. + + @anchor cds_intrusive_EllenBinTree_rcu_less + Predicate requirements + + \p Traits::less, \p Traits::compare and other predicates using with member fuctions should accept at least parameters + of type \p T and \p Key in any combination. + For example, for \p Foo struct with \p std::string key field the appropiate \p less functor is: + \code + struct Foo: public cds::intrusive::ellen_bintree::node< ... > + { + std::string m_strKey; + ... + }; + + struct less { + bool operator()( Foo const& v1, Foo const& v2 ) const + { return v1.m_strKey < v2.m_strKey ; } + + bool operator()( Foo const& v, std::string const& s ) const + { return v.m_strKey < s ; } + + bool operator()( std::string const& s, Foo const& v ) const + { return s < v.m_strKey ; } + + // Support comparing std::string and char const * + bool operator()( std::string const& s, char const * p ) const + { return s.compare(p) < 0 ; } + + bool operator()( Foo const& v, char const * p ) const + { return v.m_strKey.compare(p) < 0 ; } + + bool operator()( char const * p, std::string const& s ) const + { return s.compare(p) > 0; } + + bool operator()( char const * p, Foo const& v ) const + { return v.m_strKey.compare(p) > 0; } + }; + \endcode + + @anchor cds_intrusive_EllenBinTree_usage + Usage + + Suppose we have the following Foo struct with string key type: + \code + struct Foo { + std::string m_strKey ; // The key + //... // other non-key data + }; + \endcode + + We want to utilize RCU-based \p %cds::intrusive::EllenBinTree set for \p Foo data. + We may use base hook or member hook. Consider base hook variant. + First, we need deriving \p Foo struct from \p cds::intrusive::ellen_bintree::node: + \code + #include + #include + + // RCU type we use + typedef cds::urcu::gc< cds::urcu::general_buffered<> > gpb_rcu; + + struct Foo: public cds::intrusive:ellen_bintree::node< gpb_rcu > + { + std::string m_strKey ; // The key + //... // other non-key data + }; + \endcode + + Second, we need to implement auxiliary structures and functors: + - key extractor functor for extracting the key from \p Foo object. + Such functor is necessary because the tree internal nodes store the keys. + - \p less predicate. We want our set should accept \p std::string + and char const * parameters for searching, so our \p less + predicate will not be trivial, see below. + - item counting feature: we want our set's \p size() member function + returns actual item count. + + \code + // Key extractor functor + struct my_key_extractor + { + void operator ()( std::string& key, Foo const& src ) const + { + key = src.m_strKey; + } + }; + + // Less predicate + struct my_less { + bool operator()( Foo const& v1, Foo const& v2 ) const + { return v1.m_strKey < v2.m_strKey ; } + + bool operator()( Foo const& v, std::string const& s ) const + { return v.m_strKey < s ; } + + bool operator()( std::string const& s, Foo const& v ) const + { return s < v.m_strKey ; } + + // Support comparing std::string and char const * + bool operator()( std::string const& s, char const * p ) const + { return s.compare(p) < 0 ; } + + bool operator()( Foo const& v, char const * p ) const + { return v.m_strKey.compare(p) < 0 ; } + + bool operator()( char const * p, std::string const& s ) const + { return s.compare(p) > 0; } + + bool operator()( char const * p, Foo const& v ) const + { return v.m_strKey.compare(p) > 0; } + }; + + // Tree traits for our set + // It is necessary to specify only those typedefs that differ from + // cds::intrusive::ellen_bintree::traits defaults. + struct set_traits: public cds::intrusive::ellen_bintree::traits + { + typedef cds::intrusive::ellen_bintree::base_hook< cds::opt::gc > > hook; + typedef my_key_extractor key_extractor; + typedef my_less less; + typedef cds::atomicity::item_counter item_counter; + }; + \endcode + + Now we declare \p %EllenBinTree set and use it: + \code + typedef cds::intrusive::EllenBinTree< gpb_rcu, std::string, Foo, set_traits > set_type; + + set_type theSet; + // ... + \endcode + + Instead of declaring \p set_traits type traits we can use option-based syntax with + \p ellen_bintree::make_traits metafunction, for example: + \code + typedef cds::intrusive::EllenBinTree< gpb_rcu, std::string, Foo, + typename cds::intrusive::ellen_bintree::make_traits< + cds::opt::hook< cds::intrusive::ellen_bintree::base_hook< cds::opt::gc > > + ,cds::intrusive::ellen_bintree::key_extractor< my_key_extractor > + ,cds::opt::less< my_less > + ,cds::opt::item_counter< cds::atomicity::item_counter > + >::type + > set_type2; + \endcode + + Functionally, \p set_type and \p set_type2 are equivalent. + + Member-hooked tree + + Sometimes, we cannot use base hook, for example, when the \p Foo structure is external. + In such case we can use member hook feature. + \code + #include + #include + + // Struct Foo is external and its declaration cannot be modified. + struct Foo { + std::string m_strKey ; // The key + //... // other non-key data + }; + + // RCU type we use + typedef cds::urcu::gc< cds::urcu::general_buffered<> > gpb_rcu; + + // Foo wrapper + struct MyFoo + { + Foo m_foo; + cds::intrusive:ellen_bintree::node< gpb_rcu > set_hook; // member hook + }; + + // Key extractor functor + struct member_key_extractor + { + void operator ()( std::string& key, MyFoo const& src ) const + { + key = src.m_foo.m_strKey; + } + }; + + // Less predicate + struct member_less { + bool operator()( MyFoo const& v1, MyFoo const& v2 ) const + { return v1.m_foo.m_strKey < v2.m_foo.m_strKey ; } + + bool operator()( MyFoo const& v, std::string const& s ) const + { return v.m_foo.m_strKey < s ; } + + bool operator()( std::string const& s, MyFoo const& v ) const + { return s < v.m_foo.m_strKey ; } + + // Support comparing std::string and char const * + bool operator()( std::string const& s, char const * p ) const + { return s.compare(p) < 0 ; } + + bool operator()( MyFoo const& v, char const * p ) const + { return v.m_foo.m_strKey.compare(p) < 0 ; } + + bool operator()( char const * p, std::string const& s ) const + { return s.compare(p) > 0; } + + bool operator()( char const * p, MyFoo const& v ) const + { return v.m_foo.m_strKey.compare(p) > 0; } + }; + + // Tree traits for our member-based set + struct member_set_traits: public cds::intrusive::ellen_bintree::traits + { + cds::intrusive::ellen_bintree::member_hook< offsetof(MyFoo, set_hook), cds::opt::gc > > hook; + typedef member_key_extractor key_extractor; + typedef member_less less; + typedef cds::atomicity::item_counter item_counter; + }; + + // Tree containing MyFoo objects + typedef cds::intrusive::EllenBinTree< gpb_rcu, std::string, MyFoo, member_set_traits > member_set_type; + + member_set_type theMemberSet; + \endcode + + Multiple containers + + Sometimes we need that our \p Foo struct should be used in several different containers. + Suppose, \p Foo struct has two key fields: + \code + struct Foo { + std::string m_strKey ; // string key + int m_nKey ; // int key + //... // other non-key data fields + }; + \endcode + + We want to build two intrusive \p %EllenBinTree sets: one indexed on \p Foo::m_strKey field, + another indexed on \p Foo::m_nKey field. To decide such case we should use a tag option for + tree's hook: + \code + #include + #include + + // RCU type we use + typedef cds::urcu::gc< cds::urcu::general_buffered<> > gpb_rcu; + + // Declare tag structs + struct int_tag ; // int key tag + struct string_tag ; // string key tag + + // Foo struct is derived from two ellen_bintree::node class + // with different tags + struct Foo + : public cds::intrusive::ellen_bintree::node< gpb_rcu, cds::opt::tag< string_tag >> + , public cds::intrusive::ellen_bintree::node< gpb_rcu, cds::opt::tag< int_tag >> + { + std::string m_strKey ; // string key + int m_nKey ; // int key + //... // other non-key data fields + }; + + // String key extractor functor + struct string_key_extractor + { + void operator ()( std::string& key, Foo const& src ) const + { + key = src.m_strKey; + } + }; + + // Int key extractor functor + struct int_key_extractor + { + void operator ()( int& key, Foo const& src ) const + { + key = src.m_nKey; + } + }; + + // String less predicate + struct string_less { + bool operator()( Foo const& v1, Foo const& v2 ) const + { return v1.m_strKey < v2.m_strKey ; } + + bool operator()( Foo const& v, std::string const& s ) const + { return v.m_strKey < s ; } + + bool operator()( std::string const& s, Foo const& v ) const + { return s < v.m_strKey ; } + + // Support comparing std::string and char const * + bool operator()( std::string const& s, char const * p ) const + { return s.compare(p) < 0 ; } + + bool operator()( Foo const& v, char const * p ) const + { return v.m_strKey.compare(p) < 0 ; } + + bool operator()( char const * p, std::string const& s ) const + { return s.compare(p) > 0; } + + bool operator()( char const * p, Foo const& v ) const + { return v.m_strKey.compare(p) > 0; } + }; + + // Int less predicate + struct int_less { + bool operator()( Foo const& v1, Foo const& v2 ) const + { return v1.m_nKey < v2.m_nKey ; } + + bool operator()( Foo const& v, int n ) const + { return v.m_nKey < n ; } + + bool operator()( int n, Foo const& v ) const + { return n < v.m_nKey ; } + }; + + // Type traits for string-indexed set + struct string_set_traits: public cds::intrusive::ellen_bintree::traits + { + typedef cds::intrusive::ellen_bintree::base_hook< cds::opt::gc >, cds::opt::tag< string_tag > > hook; + typedef string_key_extractor key_extractor; + typedef string_less less; + typedef cds::atomicity::item_counter item_counter; + }; + + // Type traits for int-indexed set + struct int_set_traits: public cds::intrusive::ellen_bintree::traits + { + typedef cds::intrusive::ellen_bintree::base_hook< cds::opt::gc >, cds::opt::tag< int_tag > > hook; + typedef int_key_extractor key_extractor; + typedef int_less less; + typedef cds::atomicity::item_counter item_counter; + }; + + // Declare string-indexed set + typedef cds::intrusive::EllenBinTree< gpb_rcu, std::string, Foo, string_set_traits > string_set_type; + string_set_type theStringSet; + + // Declare int-indexed set + typedef cds::intrusive::EllenBinTree< gpb_rcu, int, Foo, int_set_traits > int_set_type; + int_set_type theIntSet; + + // Now we can use theStringSet and theIntSet in our program + // ... + \endcode + */ + template < class RCU, + typename Key, + typename T, +#ifdef CDS_DOXYGEN_INVOKED + class Traits = ellen_bintree::traits +#else + class Traits +#endif + > + class EllenBinTree< cds::urcu::gc, Key, T, Traits > + { + public: + typedef cds::urcu::gc gc; ///< RCU Garbage collector + typedef Key key_type; ///< type of a key stored in internal nodes; key is a part of \p value_type + typedef T value_type; ///< type of value stored in the binary tree + typedef Traits traits; ///< Traits template parameter + + typedef typename traits::hook hook; ///< hook type + typedef typename hook::node_type node_type; ///< node type + + typedef typename traits::disposer disposer; ///< leaf node disposer + typedef typename traits::back_off back_off; ///< back-off strategy + + protected: + //@cond + typedef ellen_bintree::base_node< gc > tree_node; ///< Base type of tree node + typedef node_type leaf_node; ///< Leaf node type + typedef ellen_bintree::internal_node< key_type, leaf_node > internal_node; ///< Internal node type + typedef ellen_bintree::update_desc< leaf_node, internal_node> update_desc; ///< Update descriptor + typedef typename update_desc::update_ptr update_ptr; ///< Marked pointer to update descriptor + //@endcond + + public: + using exempt_ptr = cds::urcu::exempt_ptr< gc, value_type, value_type, disposer, void >; ///< pointer to extracted node + + public: +# ifdef CDS_DOXYGEN_INVOKED + typedef implementation_defined key_comparator; ///< key compare functor based on \p Traits::compare and \p Traits::less + typedef typename get_node_traits< value_type, node_type, hook>::type node_traits; ///< Node traits +# else + typedef typename opt::details::make_comparator< value_type, traits >::type key_comparator; + struct node_traits: public get_node_traits< value_type, node_type, hook>::type + { + static internal_node const& to_internal_node( tree_node const& n ) + { + assert( n.is_internal()); + return static_cast( n ); + } + + static leaf_node const& to_leaf_node( tree_node const& n ) + { + assert( n.is_leaf()); + return static_cast( n ); + } + }; +# endif + + typedef typename traits::item_counter item_counter; ///< Item counting policy used + typedef typename traits::memory_model memory_model; ///< Memory ordering. See \p cds::opt::memory_model option + typedef typename traits::stat stat; ///< internal statistics type + typedef typename traits::rcu_check_deadlock rcu_check_deadlock; ///< Deadlock checking policy + typedef typename traits::key_extractor key_extractor; ///< key extracting functor + + typedef typename traits::node_allocator node_allocator; ///< Internal node allocator + typedef typename traits::update_desc_allocator update_desc_allocator; ///< Update descriptor allocator + + typedef typename gc::scoped_lock rcu_lock; ///< RCU scoped lock + + static constexpr const bool c_bExtractLockExternal = false; ///< Group of \p extract_xxx functions do not require external locking + + protected: + //@cond + typedef ellen_bintree::details::compare< key_type, value_type, key_comparator, node_traits > node_compare; + + typedef cds::urcu::details::check_deadlock_policy< gc, rcu_check_deadlock > check_deadlock_policy; + + typedef cds::details::Allocator< internal_node, node_allocator > cxx_node_allocator; + typedef cds::details::Allocator< update_desc, update_desc_allocator > cxx_update_desc_allocator; + + struct search_result { + internal_node * pGrandParent; + internal_node * pParent; + leaf_node * pLeaf; + update_ptr updParent; + update_ptr updGrandParent; + bool bRightLeaf ; // true if pLeaf is right child of pParent, false otherwise + bool bRightParent ; // true if pParent is right child of pGrandParent, false otherwise + + search_result() + :pGrandParent( nullptr ) + , pParent( nullptr ) + , pLeaf( nullptr ) + ,bRightLeaf( false ) + ,bRightParent( false ) + {} + }; + //@endcond + + protected: + //@cond + internal_node m_Root; ///< Tree root node (key= Infinite2) + leaf_node m_LeafInf1; + leaf_node m_LeafInf2; + //@endcond + + item_counter m_ItemCounter; ///< item counter + mutable stat m_Stat; ///< internal statistics + + protected: + //@cond + static void free_leaf_node( value_type* p ) + { + disposer()( p ); + } + static void free_leaf_node_void( void* p ) + { + free_leaf_node( reinterpret_cast( p )); + } + + internal_node * alloc_internal_node() const + { + m_Stat.onInternalNodeCreated(); + internal_node * pNode = cxx_node_allocator().New(); + //pNode->clean(); + return pNode; + } + + static void free_internal_node( internal_node* pNode ) + { + cxx_node_allocator().Delete( pNode ); + } + static void free_internal_node_void( void* pNode ) + { + free_internal_node( reinterpret_cast( pNode )); + } + + struct internal_node_deleter { + void operator()( internal_node * p) const + { + free_internal_node( p ); + } + }; + + typedef std::unique_ptr< internal_node, internal_node_deleter> unique_internal_node_ptr; + + update_desc * alloc_update_desc() const + { + m_Stat.onUpdateDescCreated(); + return cxx_update_desc_allocator().New(); + } + + static void free_update_desc( update_desc* pDesc ) + { + cxx_update_desc_allocator().Delete( pDesc ); + } + static void free_update_desc_void( void* pDesc ) + { + free_update_desc( reinterpret_cast( pDesc )); + } + + class retired_list + { + update_desc * pUpdateHead; + tree_node * pNodeHead; + + private: + class forward_iterator + { + update_desc * m_pUpdate; + tree_node * m_pNode; + + public: + forward_iterator( retired_list const& l ) + : m_pUpdate( l.pUpdateHead ) + , m_pNode( l.pNodeHead ) + {} + + forward_iterator() + : m_pUpdate( nullptr ) + , m_pNode( nullptr ) + {} + + cds::urcu::retired_ptr operator *() + { + if ( m_pUpdate ) { + return cds::urcu::retired_ptr( reinterpret_cast( m_pUpdate ), free_update_desc_void ); + } + if ( m_pNode ) { + if ( m_pNode->is_leaf()) { + return cds::urcu::retired_ptr( reinterpret_cast( node_traits::to_value_ptr( static_cast( m_pNode ))), + free_leaf_node_void ); + } + else { + return cds::urcu::retired_ptr( reinterpret_cast( static_cast( m_pNode )), + free_internal_node_void ); + } + } + return cds::urcu::retired_ptr( nullptr, free_update_desc_void ); + } + + void operator ++() + { + if ( m_pUpdate ) { + m_pUpdate = m_pUpdate->pNextRetire; + return; + } + if ( m_pNode ) + m_pNode = m_pNode->m_pNextRetired; + } + + friend bool operator ==( forward_iterator const& i1, forward_iterator const& i2 ) + { + return i1.m_pUpdate == i2.m_pUpdate && i1.m_pNode == i2.m_pNode; + } + friend bool operator !=( forward_iterator const& i1, forward_iterator const& i2 ) + { + return !( i1 == i2 ); + } + }; + + public: + retired_list() + : pUpdateHead( nullptr ) + , pNodeHead( nullptr ) + {} + + ~retired_list() + { + gc::batch_retire( forward_iterator(*this), forward_iterator()); + } + + void push( update_desc * p ) + { + p->pNextRetire = pUpdateHead; + pUpdateHead = p; + } + + void push( tree_node * p ) + { + p->m_pNextRetired = pNodeHead; + pNodeHead = p; + } + }; + + void retire_node( tree_node * pNode, retired_list& rl ) const + { + if ( pNode->is_leaf()) { + assert( static_cast( pNode ) != &m_LeafInf1 ); + assert( static_cast( pNode ) != &m_LeafInf2 ); + } + else { + assert( static_cast( pNode ) != &m_Root ); + m_Stat.onInternalNodeDeleted(); + } + rl.push( pNode ); + } + + void retire_update_desc( update_desc * p, retired_list& rl, bool bDirect ) const + { + m_Stat.onUpdateDescDeleted(); + if ( bDirect ) + free_update_desc( p ); + else + rl.push( p ); + } + + void make_empty_tree() + { + m_Root.infinite_key( 2 ); + m_LeafInf1.infinite_key( 1 ); + m_LeafInf2.infinite_key( 2 ); + m_Root.m_pLeft.store( &m_LeafInf1, memory_model::memory_order_relaxed ); + m_Root.m_pRight.store( &m_LeafInf2, memory_model::memory_order_release ); + } + //@endcond + + public: + /// Default constructor + EllenBinTree() + { + static_assert( !std::is_same< key_extractor, opt::none >::value, "The key extractor option must be specified" ); + make_empty_tree(); + } + + /// Clears the tree + ~EllenBinTree() + { + unsafe_clear(); + } + + /// Inserts new node + /** + The function inserts \p val in the tree if it does not contain + an item with key equal to \p val. + + The function applies RCU lock internally. + + Returns \p true if \p val is placed into the set, \p false otherwise. + */ + bool insert( value_type& val ) + { + return insert( val, []( value_type& ) {} ); + } + + /// Inserts new node + /** + This function is intended for derived non-intrusive containers. + + The function allows to split creating of new item into two part: + - create item with key only + - insert new item into the tree + - if inserting is success, calls \p f functor to initialize value-field of \p val. + + The functor signature is: + \code + void func( value_type& val ); + \endcode + where \p val is the item inserted. User-defined functor \p f should guarantee that during changing + \p val no any other changes could be made on this tree's item by concurrent threads. + The user-defined functor is called only if the inserting is success. + + RCU \p synchronize method can be called. RCU should not be locked. + */ + template + bool insert( value_type& val, Func f ) + { + check_deadlock_policy::check(); + + unique_internal_node_ptr pNewInternal; + retired_list updRetire; + back_off bkoff; + + { + rcu_lock l; + + search_result res; + for ( ;; ) { + if ( search( res, val, node_compare())) { + if ( pNewInternal.get()) + m_Stat.onInternalNodeDeleted() ; // unique_internal_node_ptr deletes internal node + m_Stat.onInsertFailed(); + return false; + } + + if ( res.updGrandParent.bits() == update_desc::Clean && res.updParent.bits() == update_desc::Clean ) { + if ( !pNewInternal.get()) + pNewInternal.reset( alloc_internal_node()); + + if ( try_insert( val, pNewInternal.get(), res, updRetire )) { + f( val ); + pNewInternal.release() ; // internal node is linked into the tree and should not be deleted + break; + } + } + else + help( res.updParent, updRetire ); + + bkoff(); + m_Stat.onInsertRetry(); + } + } + + ++m_ItemCounter; + m_Stat.onInsertSuccess(); + + return true; + } + + /// Updates the node + /** + The operation performs inserting or changing data with lock-free manner. + + If the item \p val is not found in the set, then \p val is inserted into the set + iff \p bAllowInsert is \p true. + Otherwise, the functor \p func is called with item found. + The functor \p func signature is: + \code + void func( bool bNew, value_type& item, value_type& val ); + \endcode + with arguments: + - \p bNew - \p true if the item has been inserted, \p false otherwise + - \p item - item of the set + - \p val - argument \p val passed into the \p %update() function + If new item has been inserted (i.e. \p bNew is \p true) then \p item and \p val arguments + refer to the same thing. + + The functor can change non-key fields of the \p item; however, \p func must guarantee + that during changing no any other modifications could be made on this item by concurrent threads. + + RCU \p synchronize method can be called. RCU should not be locked. + + Returns std::pair where \p first is \p true if operation is successful, + i.e. the node has been inserted or updated, + \p second is \p true if new item has been added or \p false if the item with \p key + already exists. + + @warning See \ref cds_intrusive_item_creating "insert item troubleshooting" + */ + template + std::pair update( value_type& val, Func func, bool bAllowInsert = true ) + { + check_deadlock_policy::check(); + + unique_internal_node_ptr pNewInternal; + retired_list updRetire; + back_off bkoff; + + { + rcu_lock l; + + search_result res; + for ( ;; ) { + if ( search( res, val, node_compare())) { + func( false, *node_traits::to_value_ptr( res.pLeaf ), val ); + if ( pNewInternal.get()) + m_Stat.onInternalNodeDeleted() ; // unique_internal_node_ptr deletes internal node + m_Stat.onUpdateExist(); + return std::make_pair( true, false ); + } + + if ( res.updGrandParent.bits() == update_desc::Clean && res.updParent.bits() == update_desc::Clean ) { + if ( !bAllowInsert ) + return std::make_pair( false, false ); + + if ( !pNewInternal.get()) + pNewInternal.reset( alloc_internal_node()); + + if ( try_insert( val, pNewInternal.get(), res, updRetire )) { + func( true, val, val ); + pNewInternal.release() ; // internal node is linked into the tree and should not be deleted + break; + } + } + else + help( res.updParent, updRetire ); + + bkoff(); + m_Stat.onUpdateRetry(); + } + } + + ++m_ItemCounter; + m_Stat.onUpdateNew(); + + return std::make_pair( true, true ); + } + //@cond + template + CDS_DEPRECATED("ensure() is deprecated, use update()") + std::pair ensure( value_type& val, Func func ) + { + return update( val, func, true ); + } + //@endcond + + /// Unlinks the item \p val from the tree + /** + The function searches the item \p val in the tree and unlink it from the tree + if it is found and is equal to \p val. + + Difference between \p erase() and \p %unlink() functions: \p %erase() finds a key + and deletes the item found. \p %unlink() finds an item by key and deletes it + only if \p val is an item of the tree, i.e. the pointer to item found + is equal to &val . + + RCU \p synchronize method can be called. RCU should not be locked. + + The \ref disposer specified in \p Traits class template parameter is called + by garbage collector \p GC asynchronously. + + The function returns \p true if success and \p false otherwise. + */ + bool unlink( value_type& val ) + { + return erase_( val, node_compare(), + []( value_type const& v, leaf_node const& n ) -> bool { return &v == node_traits::to_value_ptr( n ); }, + [](value_type const&) {} ); + } + + /// Deletes the item from the tree + /** \anchor cds_intrusive_EllenBinTree_rcu_erase + The function searches an item with key equal to \p key in the tree, + unlinks it from the tree, and returns \p true. + If the item with key equal to \p key is not found the function return \p false. + + Note the \p Traits::less and/or \p Traits::compare predicate should accept a parameter of type \p Q + that can be not the same as \p value_type. + + RCU \p synchronize method can be called. RCU should not be locked. + */ + template + bool erase( const Q& key ) + { + return erase_( key, node_compare(), + []( Q const&, leaf_node const& ) -> bool { return true; }, + [](value_type const&) {} ); + } + + /// Delete the item from the tree with comparing functor \p pred + /** + The function is an analog of \ref cds_intrusive_EllenBinTree_rcu_erase "erase(Q const&)" + but \p pred predicate is used for key comparing. + \p Less has the interface like \p std::less and should meet \ref cds_intrusive_EllenBinTree_rcu_less + "Predicate requirements". + \p pred must imply the same element order as the comparator used for building the tree. + */ + template + bool erase_with( const Q& key, Less pred ) + { + CDS_UNUSED( pred ); + typedef ellen_bintree::details::compare< + key_type, + value_type, + opt::details::make_comparator_from_less, + node_traits + > compare_functor; + + return erase_( key, compare_functor(), + []( Q const&, leaf_node const& ) -> bool { return true; }, + [](value_type const&) {} ); + } + + /// Deletes the item from the tree + /** \anchor cds_intrusive_EllenBinTree_rcu_erase_func + The function searches an item with key equal to \p key in the tree, + call \p f functor with item found, unlinks it from the tree, and returns \p true. + The \ref disposer specified in \p Traits class template parameter is called + by garbage collector \p GC asynchronously. + + The \p Func interface is + \code + struct functor { + void operator()( value_type const& item ); + }; + \endcode + + If the item with key equal to \p key is not found the function return \p false. + + Note the \p Traits::less and/or \p Traits::compare predicate should accept a parameter of type \p Q + that can be not the same as \p value_type. + + RCU \p synchronize method can be called. RCU should not be locked. + */ + template + bool erase( Q const& key, Func f ) + { + return erase_( key, node_compare(), + []( Q const&, leaf_node const& ) -> bool { return true; }, + f ); + } + + /// Delete the item from the tree with comparing functor \p pred + /** + The function is an analog of \ref cds_intrusive_EllenBinTree_rcu_erase_func "erase(Q const&, Func)" + but \p pred predicate is used for key comparing. + \p Less has the interface like \p std::less and should meet \ref cds_intrusive_EllenBinTree_rcu_less + "Predicate requirements". + \p pred must imply the same element order as the comparator used for building the tree. + */ + template + bool erase_with( Q const& key, Less pred, Func f ) + { + CDS_UNUSED( pred ); + typedef ellen_bintree::details::compare< + key_type, + value_type, + opt::details::make_comparator_from_less, + node_traits + > compare_functor; + + return erase_( key, compare_functor(), + []( Q const&, leaf_node const& ) -> bool { return true; }, + f ); + } + + /// Extracts an item with minimal key from the tree + /** + The function searches an item with minimal key, unlinks it, and returns + \ref cds::urcu::exempt_ptr "exempt_ptr" pointer to the leftmost item. + If the tree is empty the function returns empty \p exempt_ptr. + + @note Due the concurrent nature of the tree, the function extracts nearly minimum key. + It means that the function gets leftmost leaf of the tree and tries to unlink it. + During unlinking, a concurrent thread may insert an item with key less than leftmost item's key. + So, the function returns the item with minimum key at the moment of tree traversing. + + RCU \p synchronize method can be called. RCU should NOT be locked. + The function does not call the disposer for the item found. + The disposer will be implicitly invoked when the returned object is destroyed or when + its \p release() member function is called. + */ + exempt_ptr extract_min() + { + return exempt_ptr( extract_min_()); + } + + /// Extracts an item with maximal key from the tree + /** + The function searches an item with maximal key, unlinks it, and returns + \ref cds::urcu::exempt_ptr "exempt_ptr" pointer to the rightmost item. + If the tree is empty the function returns empty \p exempt_ptr. + + @note Due the concurrent nature of the tree, the function extracts nearly maximal key. + It means that the function gets rightmost leaf of the tree and tries to unlink it. + During unlinking, a concurrent thread may insert an item with key great than rightmost item's key. + So, the function returns the item with maximum key at the moment of tree traversing. + + RCU \p synchronize method can be called. RCU should NOT be locked. + The function does not call the disposer for the item found. + The disposer will be implicitly invoked when the returned object is destroyed or when + its \p release() member function is called. + */ + exempt_ptr extract_max() + { + return exempt_ptr( extract_max_()); + } + + /// Extracts an item from the tree + /** \anchor cds_intrusive_EllenBinTree_rcu_extract + The function searches an item with key equal to \p key in the tree, + unlinks it, and returns \ref cds::urcu::exempt_ptr "exempt_ptr" pointer to an item found. + If the item with the key equal to \p key is not found the function returns empty \p exempt_ptr. + + RCU \p synchronize method can be called. RCU should NOT be locked. + The function does not call the disposer for the item found. + The disposer will be implicitly invoked when the returned object is destroyed or when + its \p release() member function is called. + */ + template + exempt_ptr extract( Q const& key ) + { + return exempt_ptr( extract_( key, node_compare())); + } + + /// Extracts an item from the set using \p pred for searching + /** + The function is an analog of \p extract(Q const&) but \p pred is used for key compare. + \p Less has the interface like \p std::less and should meet \ref cds_intrusive_EllenBinTree_rcu_less + "predicate requirements". + \p pred must imply the same element order as the comparator used for building the tree. + */ + template + exempt_ptr extract_with( Q const& key, Less pred ) + { + return exempt_ptr( extract_with_( key, pred )); + } + + /// Checks whether the set contains \p key + /** + The function searches the item with key equal to \p key + and returns \p true if it is found, and \p false otherwise. + + The function applies RCU lock internally. + */ + template + bool contains( Q const& key ) const + { + rcu_lock l; + search_result res; + if ( search( res, key, node_compare())) { + m_Stat.onFindSuccess(); + return true; + } + + m_Stat.onFindFailed(); + return false; + } + //@cond + template + CDS_DEPRECATED("deprecated, use contains()") + bool find( Q const& key ) const + { + return contains( key ); + } + //@endcond + + /// Checks whether the set contains \p key using \p pred predicate for searching + /** + The function is similar to contains( key ) but \p pred is used for key comparing. + \p Less functor has the interface like \p std::less and should meet \ref cds_intrusive_EllenBinTree_rcu_less + "Predicate requirements". + \p Less must imply the same element order as the comparator used for building the set. + \p pred should accept arguments of type \p Q, \p key_type, \p value_type in any combination. + */ + template + bool contains( Q const& key, Less pred ) const + { + CDS_UNUSED( pred ); + typedef ellen_bintree::details::compare< + key_type, + value_type, + opt::details::make_comparator_from_less, + node_traits + > compare_functor; + + rcu_lock l; + search_result res; + if ( search( res, key, compare_functor())) { + m_Stat.onFindSuccess(); + return true; + } + m_Stat.onFindFailed(); + return false; + } + //@cond + template + CDS_DEPRECATED("deprecated, use contains()") + bool find_with( Q const& key, Less pred ) const + { + return contains( key, pred ); + } + //@endcond + + /// Finds the key \p key + /** @anchor cds_intrusive_EllenBinTree_rcu_find_func + The function searches the item with key equal to \p key and calls the functor \p f for item found. + The interface of \p Func functor is: + \code + struct functor { + void operator()( value_type& item, Q& key ); + }; + \endcode + where \p item is the item found, \p key is the find function argument. + + The functor can change non-key fields of \p item. Note that the functor is only guarantee + that \p item cannot be disposed during functor is executing. + The functor does not serialize simultaneous access to the tree \p item. If such access is + possible you must provide your own synchronization schema on item level to exclude unsafe item modifications. + + The function applies RCU lock internally. + + The function returns \p true if \p key is found, \p false otherwise. + */ + template + bool find( Q& key, Func f ) const + { + return find_( key, f ); + } + //@cond + template + bool find( Q const& key, Func f ) const + { + return find_( key, f ); + } + //@endcond + + /// Finds the key \p key with comparing functor \p pred + /** + The function is an analog of \ref cds_intrusive_EllenBinTree_rcu_find_func "find(Q&, Func)" + but \p pred is used for key comparison. + \p Less functor has the interface like \p std::less and should meet \ref cds_intrusive_EllenBinTree_rcu_less + "Predicate requirements". + \p pred must imply the same element order as the comparator used for building the tree. + */ + template + bool find_with( Q& key, Less pred, Func f ) const + { + return find_with_( key, pred, f ); + } + //@cond + template + bool find_with( Q const& key, Less pred, Func f ) const + { + return find_with_( key, pred, f ); + } + //@endcond + + /// Finds \p key and return the item found + /** \anchor cds_intrusive_EllenBinTree_rcu_get + The function searches the item with key equal to \p key and returns the pointer to item found. + If \p key is not found it returns \p nullptr. + + RCU should be locked before call the function. + Returned pointer is valid while RCU is locked. + */ + template + value_type * get( Q const& key ) const + { + return get_( key, node_compare()); + } + + /// Finds \p key with \p pred predicate and return the item found + /** + The function is an analog of \ref cds_intrusive_EllenBinTree_rcu_get "get(Q const&)" + but \p pred is used for comparing the keys. + + \p Less functor has the semantics like \p std::less but should take arguments of type \ref value_type and \p Q + in any order. + \p pred must imply the same element order as the comparator used for building the tree. + */ + template + value_type * get_with( Q const& key, Less pred ) const + { + CDS_UNUSED( pred ); + typedef ellen_bintree::details::compare< + key_type, + value_type, + opt::details::make_comparator_from_less, + node_traits + > compare_functor; + + return get_( key, compare_functor()); + } + + /// Checks if the tree is empty + bool empty() const + { + return m_Root.m_pLeft.load( memory_model::memory_order_relaxed )->is_leaf(); + } + + /// Clears the tree (thread safe, not atomic) + /** + The function unlink all items from the tree. + The function is thread safe but not atomic: in multi-threaded environment with parallel insertions + this sequence + \code + set.clear(); + assert( set.empty()); + \endcode + the assertion could be raised. + + For each leaf the \ref disposer will be called after unlinking. + + RCU \p synchronize method can be called. RCU should not be locked. + */ + void clear() + { + for ( exempt_ptr ep = extract_min(); !ep.empty(); ep = extract_min()) + ep.release(); + } + + /// Clears the tree (not thread safe) + /** + This function is not thread safe and may be called only when no other thread deals with the tree. + The function is used in the tree destructor. + */ + void unsafe_clear() + { + rcu_lock l; + + while ( true ) { + internal_node * pParent = nullptr; + internal_node * pGrandParent = nullptr; + tree_node * pLeaf = const_cast( &m_Root ); + + // Get leftmost leaf + while ( pLeaf->is_internal()) { + pGrandParent = pParent; + pParent = static_cast( pLeaf ); + pLeaf = pParent->m_pLeft.load( memory_model::memory_order_relaxed ); + } + + if ( pLeaf->infinite_key()) { + // The tree is empty + return; + } + + // Remove leftmost leaf and its parent node + assert( pGrandParent ); + assert( pParent ); + assert( pLeaf->is_leaf()); + + pGrandParent->m_pLeft.store( pParent->m_pRight.load( memory_model::memory_order_relaxed ), memory_model::memory_order_relaxed ); + free_leaf_node( node_traits::to_value_ptr( static_cast( pLeaf ))); + free_internal_node( pParent ); + } + } + + /// Returns item count in the tree + /** + Only leaf nodes containing user data are counted. + + The value returned depends on item counter type provided by \p Traits template parameter. + If it is \p atomicity::empty_item_counter this function always returns 0. + + The function is not suitable for checking the tree emptiness, use \p empty() + member function for that. + */ + size_t size() const + { + return m_ItemCounter; + } + + /// Returns const reference to internal statistics + stat const& statistics() const + { + return m_Stat; + } + + /// Checks internal consistency (not atomic, not thread-safe) + /** + The debugging function to check internal consistency of the tree. + */ + bool check_consistency() const + { + return check_consistency( &m_Root ); + } + + protected: + //@cond + + bool check_consistency( internal_node const * pRoot ) const + { + tree_node * pLeft = pRoot->m_pLeft.load( atomics::memory_order_relaxed ); + tree_node * pRight = pRoot->m_pRight.load( atomics::memory_order_relaxed ); + assert( pLeft ); + assert( pRight ); + + if ( node_compare()( *pLeft, *pRoot ) < 0 + && node_compare()( *pRoot, *pRight ) <= 0 + && node_compare()( *pLeft, *pRight ) < 0 ) + { + bool bRet = true; + if ( pLeft->is_internal()) + bRet = check_consistency( static_cast( pLeft )); + assert( bRet ); + + if ( bRet && pRight->is_internal()) + bRet = bRet && check_consistency( static_cast( pRight )); + assert( bRet ); + + return bRet; + } + return false; + } + + void help( update_ptr /*pUpdate*/, retired_list& /*rl*/ ) + { + /* + switch ( pUpdate.bits()) { + case update_desc::IFlag: + help_insert( pUpdate.ptr()); + m_Stat.onHelpInsert(); + break; + case update_desc::DFlag: + //help_delete( pUpdate.ptr(), rl ); + //m_Stat.onHelpDelete(); + break; + case update_desc::Mark: + //help_marked( pUpdate.ptr()); + //m_Stat.onHelpMark(); + break; + } + */ + } + + void help_insert( update_desc * pOp ) + { + assert( gc::is_locked()); + + tree_node * pLeaf = static_cast( pOp->iInfo.pLeaf ); + if ( pOp->iInfo.bRightLeaf ) { + pOp->iInfo.pParent->m_pRight.compare_exchange_strong( pLeaf, static_cast( pOp->iInfo.pNew ), + memory_model::memory_order_release, atomics::memory_order_relaxed ); + } + else { + pOp->iInfo.pParent->m_pLeft.compare_exchange_strong( pLeaf, static_cast( pOp->iInfo.pNew ), + memory_model::memory_order_release, atomics::memory_order_relaxed ); + } + + update_ptr cur( pOp, update_desc::IFlag ); + pOp->iInfo.pParent->m_pUpdate.compare_exchange_strong( cur, pOp->iInfo.pParent->null_update_desc(), + memory_model::memory_order_release, atomics::memory_order_relaxed ); + } + + bool check_delete_precondition( search_result& res ) + { + assert( res.pGrandParent != nullptr ); + + return + static_cast( res.pGrandParent->get_child( res.bRightParent, memory_model::memory_order_relaxed )) == res.pParent + && static_cast( res.pParent->get_child( res.bRightLeaf, memory_model::memory_order_relaxed )) == res.pLeaf; + } + + bool help_delete( update_desc * pOp, retired_list& rl ) + { + assert( gc::is_locked()); + + update_ptr pUpdate( pOp->dInfo.pUpdateParent ); + update_ptr pMark( pOp, update_desc::Mark ); + if ( pOp->dInfo.pParent->m_pUpdate.compare_exchange_strong( pUpdate, pMark, + memory_model::memory_order_acquire, atomics::memory_order_relaxed )) + { + help_marked( pOp ); + retire_node( pOp->dInfo.pParent, rl ); + // For extract operations the leaf should NOT be disposed + if ( pOp->dInfo.bDisposeLeaf ) + retire_node( pOp->dInfo.pLeaf, rl ); + retire_update_desc( pOp, rl, false ); + + return true; + } + else if ( pUpdate == pMark ) { + // some other thread is processing help_marked() + help_marked( pOp ); + m_Stat.onHelpMark(); + return true; + } + else { + // pUpdate has been changed by CAS + help( pUpdate, rl ); + + // Undo grandparent dInfo + update_ptr pDel( pOp, update_desc::DFlag ); + if ( pOp->dInfo.pGrandParent->m_pUpdate.compare_exchange_strong( pDel, pOp->dInfo.pGrandParent->null_update_desc(), + memory_model::memory_order_release, atomics::memory_order_relaxed )) + { + retire_update_desc( pOp, rl, false ); + } + return false; + } + } + + void help_marked( update_desc * pOp ) + { + assert( gc::is_locked()); + + tree_node * p = pOp->dInfo.pParent; + if ( pOp->dInfo.bRightParent ) { + pOp->dInfo.pGrandParent->m_pRight.compare_exchange_strong( p, + pOp->dInfo.pParent->get_child( !pOp->dInfo.bRightLeaf, memory_model::memory_order_acquire ), + memory_model::memory_order_release, atomics::memory_order_relaxed ); + } + else { + pOp->dInfo.pGrandParent->m_pLeft.compare_exchange_strong( p, + pOp->dInfo.pParent->get_child( !pOp->dInfo.bRightLeaf, memory_model::memory_order_acquire ), + memory_model::memory_order_release, atomics::memory_order_relaxed ); + } + + update_ptr upd( pOp, update_desc::DFlag ); + pOp->dInfo.pGrandParent->m_pUpdate.compare_exchange_strong( upd, pOp->dInfo.pGrandParent->null_update_desc(), + memory_model::memory_order_release, atomics::memory_order_relaxed ); + } + + template + bool search( search_result& res, KeyValue const& key, Compare cmp ) const + { + assert( gc::is_locked()); + + internal_node * pParent; + internal_node * pGrandParent = nullptr; + tree_node * pLeaf; + update_ptr updParent; + update_ptr updGrandParent; + bool bRightLeaf; + bool bRightParent = false; + + int nCmp = 0; + + retry: + pParent = nullptr; + pLeaf = const_cast( &m_Root ); + updParent = nullptr; + bRightLeaf = false; + while ( pLeaf->is_internal()) { + pGrandParent = pParent; + pParent = static_cast( pLeaf ); + bRightParent = bRightLeaf; + updGrandParent = updParent; + updParent = pParent->m_pUpdate.load( memory_model::memory_order_acquire ); + + switch ( updParent.bits()) { + case update_desc::DFlag: + case update_desc::Mark: + m_Stat.onSearchRetry(); + goto retry; + } + + nCmp = cmp( key, *pParent ); + bRightLeaf = nCmp >= 0; + pLeaf = pParent->get_child( nCmp >= 0, memory_model::memory_order_acquire ); + } + + assert( pLeaf->is_leaf()); + nCmp = cmp( key, *static_cast(pLeaf)); + + res.pGrandParent = pGrandParent; + res.pParent = pParent; + res.pLeaf = static_cast( pLeaf ); + res.updParent = updParent; + res.updGrandParent = updGrandParent; + res.bRightParent = bRightParent; + res.bRightLeaf = bRightLeaf; + + return nCmp == 0; + } + + bool search_min( search_result& res ) const + { + assert( gc::is_locked()); + + internal_node * pParent; + internal_node * pGrandParent = nullptr; + tree_node * pLeaf; + update_ptr updParent; + update_ptr updGrandParent; + + retry: + pParent = nullptr; + pLeaf = const_cast( &m_Root ); + while ( pLeaf->is_internal()) { + pGrandParent = pParent; + pParent = static_cast( pLeaf ); + updGrandParent = updParent; + updParent = pParent->m_pUpdate.load( memory_model::memory_order_acquire ); + + switch ( updParent.bits()) { + case update_desc::DFlag: + case update_desc::Mark: + m_Stat.onSearchRetry(); + goto retry; + } + + pLeaf = pParent->m_pLeft.load( memory_model::memory_order_acquire ); + } + + if ( pLeaf->infinite_key()) + return false; + + res.pGrandParent = pGrandParent; + res.pParent = pParent; + assert( pLeaf->is_leaf()); + res.pLeaf = static_cast( pLeaf ); + res.updParent = updParent; + res.updGrandParent = updGrandParent; + res.bRightParent = false; + res.bRightLeaf = false; + + return true; + } + + bool search_max( search_result& res ) const + { + assert( gc::is_locked()); + + internal_node * pParent; + internal_node * pGrandParent = nullptr; + tree_node * pLeaf; + update_ptr updParent; + update_ptr updGrandParent; + bool bRightLeaf; + bool bRightParent = false; + + retry: + pParent = nullptr; + pLeaf = const_cast( &m_Root ); + bRightLeaf = false; + while ( pLeaf->is_internal()) { + pGrandParent = pParent; + pParent = static_cast( pLeaf ); + bRightParent = bRightLeaf; + updGrandParent = updParent; + updParent = pParent->m_pUpdate.load( memory_model::memory_order_acquire ); + + switch ( updParent.bits()) { + case update_desc::DFlag: + case update_desc::Mark: + m_Stat.onSearchRetry(); + goto retry; + } + + bRightLeaf = !pParent->infinite_key(); + pLeaf = pParent->get_child( bRightLeaf, memory_model::memory_order_acquire ); + } + + if ( pLeaf->infinite_key()) + return false; + + res.pGrandParent = pGrandParent; + res.pParent = pParent; + assert( pLeaf->is_leaf()); + res.pLeaf = static_cast( pLeaf ); + res.updParent = updParent; + res.updGrandParent = updGrandParent; + res.bRightParent = bRightParent; + res.bRightLeaf = bRightLeaf; + + return true; + } + + template + bool erase_( Q const& val, Compare cmp, Equal eq, Func f ) + { + check_deadlock_policy::check(); + + retired_list updRetire; + update_desc * pOp = nullptr; + search_result res; + back_off bkoff; + + { + rcu_lock l; + for ( ;; ) { + if ( !search( res, val, cmp ) || !eq( val, *res.pLeaf )) { + if ( pOp ) + retire_update_desc( pOp, updRetire, false ); + m_Stat.onEraseFailed(); + return false; + } + + if ( res.updGrandParent.bits() != update_desc::Clean ) + help( res.updGrandParent, updRetire ); + else if ( res.updParent.bits() != update_desc::Clean ) + help( res.updParent, updRetire ); + else { + if ( !pOp ) + pOp = alloc_update_desc(); + if ( check_delete_precondition( res )) { + pOp->dInfo.pGrandParent = res.pGrandParent; + pOp->dInfo.pParent = res.pParent; + pOp->dInfo.pLeaf = res.pLeaf; + pOp->dInfo.bDisposeLeaf = true; + pOp->dInfo.pUpdateParent = res.updParent.ptr(); + pOp->dInfo.bRightParent = res.bRightParent; + pOp->dInfo.bRightLeaf = res.bRightLeaf; + + update_ptr updGP( res.updGrandParent.ptr()); + if ( res.pGrandParent->m_pUpdate.compare_exchange_strong( updGP, update_ptr( pOp, update_desc::DFlag ), + memory_model::memory_order_acq_rel, atomics::memory_order_acquire )) + { + if ( help_delete( pOp, updRetire )) { + // res.pLeaf is not deleted yet since RCU is blocked + f( *node_traits::to_value_ptr( res.pLeaf )); + break; + } + pOp = nullptr; + } + else { + // updGP has been changed by CAS + help( updGP, updRetire ); + } + } + } + + bkoff(); + m_Stat.onEraseRetry(); + } + } + + --m_ItemCounter; + m_Stat.onEraseSuccess(); + return true; + } + + template + value_type * extract_with_( Q const& val, Less /*pred*/ ) + { + typedef ellen_bintree::details::compare< + key_type, + value_type, + opt::details::make_comparator_from_less, + node_traits + > compare_functor; + + return extract_( val, compare_functor()); + } + + template + value_type * extract_( Q const& val, Compare cmp ) + { + check_deadlock_policy::check(); + + retired_list updRetire; + update_desc * pOp = nullptr; + search_result res; + back_off bkoff; + value_type * pResult; + + { + rcu_lock l; + for ( ;; ) { + if ( !search( res, val, cmp )) { + if ( pOp ) + retire_update_desc( pOp, updRetire, false ); + m_Stat.onEraseFailed(); + return nullptr; + } + + if ( res.updGrandParent.bits() != update_desc::Clean ) + help( res.updGrandParent, updRetire ); + else if ( res.updParent.bits() != update_desc::Clean ) + help( res.updParent, updRetire ); + else { + if ( !pOp ) + pOp = alloc_update_desc(); + if ( check_delete_precondition( res )) { + pOp->dInfo.pGrandParent = res.pGrandParent; + pOp->dInfo.pParent = res.pParent; + pOp->dInfo.pLeaf = res.pLeaf; + pOp->dInfo.bDisposeLeaf = false; + pOp->dInfo.pUpdateParent = res.updParent.ptr(); + pOp->dInfo.bRightParent = res.bRightParent; + pOp->dInfo.bRightLeaf = res.bRightLeaf; + + update_ptr updGP( res.updGrandParent.ptr()); + if ( res.pGrandParent->m_pUpdate.compare_exchange_strong( updGP, update_ptr( pOp, update_desc::DFlag ), + memory_model::memory_order_acq_rel, atomics::memory_order_acquire )) + { + if ( help_delete( pOp, updRetire )) { + pResult = node_traits::to_value_ptr( res.pLeaf ); + break; + } + pOp = nullptr; + } + else { + // updGP has been changed by CAS + help( updGP, updRetire ); + } + } + } + + bkoff(); + m_Stat.onEraseRetry(); + } + } + + --m_ItemCounter; + m_Stat.onEraseSuccess(); + return pResult; + } + + + value_type * extract_max_() + { + check_deadlock_policy::check(); + + retired_list updRetire; + update_desc * pOp = nullptr; + search_result res; + back_off bkoff; + value_type * pResult; + + { + rcu_lock l; + for ( ;; ) { + if ( !search_max( res )) { + // Tree is empty + if ( pOp ) + retire_update_desc( pOp, updRetire, false ); + m_Stat.onExtractMaxFailed(); + return nullptr; + } + + if ( res.updGrandParent.bits() != update_desc::Clean ) + help( res.updGrandParent, updRetire ); + else if ( res.updParent.bits() != update_desc::Clean ) + help( res.updParent, updRetire ); + else { + if ( !pOp ) + pOp = alloc_update_desc(); + if ( check_delete_precondition( res )) { + pOp->dInfo.pGrandParent = res.pGrandParent; + pOp->dInfo.pParent = res.pParent; + pOp->dInfo.pLeaf = res.pLeaf; + pOp->dInfo.bDisposeLeaf = false; + pOp->dInfo.pUpdateParent = res.updParent.ptr(); + pOp->dInfo.bRightParent = res.bRightParent; + pOp->dInfo.bRightLeaf = res.bRightLeaf; + + update_ptr updGP( res.updGrandParent.ptr()); + if ( res.pGrandParent->m_pUpdate.compare_exchange_strong( updGP, update_ptr( pOp, update_desc::DFlag ), + memory_model::memory_order_acq_rel, atomics::memory_order_acquire )) + { + if ( help_delete( pOp, updRetire )) { + pResult = node_traits::to_value_ptr( res.pLeaf ); + break; + } + pOp = nullptr; + } + else { + // updGP has been changed by CAS + help( updGP, updRetire ); + } + } + } + + bkoff(); + m_Stat.onExtractMaxRetry(); + } + } + + --m_ItemCounter; + m_Stat.onExtractMaxSuccess(); + return pResult; + } + + value_type * extract_min_() + { + check_deadlock_policy::check(); + + retired_list updRetire; + update_desc * pOp = nullptr; + search_result res; + back_off bkoff; + value_type * pResult; + + { + rcu_lock l; + for ( ;; ) { + if ( !search_min( res )) { + // Tree is empty + if ( pOp ) + retire_update_desc( pOp, updRetire, false ); + m_Stat.onExtractMinFailed(); + return nullptr; + } + + if ( res.updGrandParent.bits() != update_desc::Clean ) + help( res.updGrandParent, updRetire ); + else if ( res.updParent.bits() != update_desc::Clean ) + help( res.updParent, updRetire ); + else { + if ( !pOp ) + pOp = alloc_update_desc(); + if ( check_delete_precondition( res )) { + pOp->dInfo.pGrandParent = res.pGrandParent; + pOp->dInfo.pParent = res.pParent; + pOp->dInfo.pLeaf = res.pLeaf; + pOp->dInfo.bDisposeLeaf = false; + pOp->dInfo.pUpdateParent = res.updParent.ptr(); + pOp->dInfo.bRightParent = res.bRightParent; + pOp->dInfo.bRightLeaf = res.bRightLeaf; + + update_ptr updGP( res.updGrandParent.ptr()); + if ( res.pGrandParent->m_pUpdate.compare_exchange_strong( updGP, update_ptr( pOp, update_desc::DFlag ), + memory_model::memory_order_acq_rel, atomics::memory_order_acquire )) + { + if ( help_delete( pOp, updRetire )) { + pResult = node_traits::to_value_ptr( res.pLeaf ); + break; + } + pOp = nullptr; + } + else { + // updGP has been changed by CAS + help( updGP, updRetire ); + } + } + } + + bkoff(); + m_Stat.onExtractMinRetry(); + } + } + + --m_ItemCounter; + m_Stat.onExtractMinSuccess(); + return pResult; + } + + template + bool find_with_( Q& val, Less /*pred*/, Func f ) const + { + typedef ellen_bintree::details::compare< + key_type, + value_type, + opt::details::make_comparator_from_less, + node_traits + > compare_functor; + + rcu_lock l; + search_result res; + if ( search( res, val, compare_functor())) { + assert( res.pLeaf ); + f( *node_traits::to_value_ptr( res.pLeaf ), val ); + + m_Stat.onFindSuccess(); + return true; + } + + m_Stat.onFindFailed(); + return false; + } + + template + bool find_( Q& key, Func f ) const + { + rcu_lock l; + search_result res; + if ( search( res, key, node_compare())) { + assert( res.pLeaf ); + f( *node_traits::to_value_ptr( res.pLeaf ), key ); + + m_Stat.onFindSuccess(); + return true; + } + + m_Stat.onFindFailed(); + return false; + } + + template + value_type * get_( Q const& key, Compare cmp ) const + { + assert( gc::is_locked()); + + search_result res; + if ( search( res, key, cmp )) { + m_Stat.onFindSuccess(); + return node_traits::to_value_ptr( res.pLeaf ); + } + + m_Stat.onFindFailed(); + return nullptr; + } + + + bool try_insert( value_type& val, internal_node * pNewInternal, search_result& res, retired_list& updRetire ) + { + assert( gc::is_locked()); + assert( res.updParent.bits() == update_desc::Clean ); + + // check search result + if ( static_cast( res.pParent->get_child( res.bRightLeaf, memory_model::memory_order_relaxed )) == res.pLeaf ) { + leaf_node * pNewLeaf = node_traits::to_node_ptr( val ); + + int nCmp = node_compare()( val, *res.pLeaf ); + if ( nCmp < 0 ) { + if ( res.pGrandParent ) { + pNewInternal->infinite_key( 0 ); + key_extractor()( pNewInternal->m_Key, *node_traits::to_value_ptr( res.pLeaf )); + assert( !res.pLeaf->infinite_key()); + } + else { + assert( res.pLeaf->infinite_key() == tree_node::key_infinite1 ); + pNewInternal->infinite_key( 1 ); + } + pNewInternal->m_pLeft.store( static_cast(pNewLeaf), memory_model::memory_order_relaxed ); + pNewInternal->m_pRight.store( static_cast(res.pLeaf), memory_model::memory_order_relaxed ); + } + else { + assert( !res.pLeaf->is_internal()); + pNewInternal->infinite_key( 0 ); + + key_extractor()( pNewInternal->m_Key, val ); + pNewInternal->m_pLeft.store( static_cast(res.pLeaf), memory_model::memory_order_relaxed ); + pNewInternal->m_pRight.store( static_cast(pNewLeaf), memory_model::memory_order_relaxed ); + assert( !res.pLeaf->infinite_key()); + } + + update_desc * pOp = alloc_update_desc(); + + pOp->iInfo.pParent = res.pParent; + pOp->iInfo.pNew = pNewInternal; + pOp->iInfo.pLeaf = res.pLeaf; + pOp->iInfo.bRightLeaf = res.bRightLeaf; + + update_ptr updCur( res.updParent.ptr()); + if ( res.pParent->m_pUpdate.compare_exchange_strong( updCur, update_ptr( pOp, update_desc::IFlag ), + memory_model::memory_order_acq_rel, atomics::memory_order_acquire )) + { + // do insert + help_insert( pOp ); + retire_update_desc( pOp, updRetire, false ); + return true; + } + else { + // updCur has been updated by CAS + help( updCur, updRetire ); + retire_update_desc( pOp, updRetire, true ); + } + } + return false; + } + + //@endcond + }; + +}} // namespace cds::intrusive + +#endif // #ifndef CDSLIB_INTRUSIVE_ELLEN_BINTREE_RCU_H diff --git a/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/intrusive/fcqueue.h b/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/intrusive/fcqueue.h new file mode 100644 index 0000000..4997a00 --- /dev/null +++ b/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/intrusive/fcqueue.h @@ -0,0 +1,411 @@ +/* + This file is a part of libcds - Concurrent Data Structures library + + (C) Copyright Maxim Khizhinsky (libcds.dev@gmail.com) 2006-2017 + + Source code repo: http://github.com/khizmax/libcds/ + Download: http://sourceforge.net/projects/libcds/files/ + + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. + + * Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ + +#ifndef CDSLIB_INTRUSIVE_FCQUEUE_H +#define CDSLIB_INTRUSIVE_FCQUEUE_H + +#include +#include +#include +#include + +namespace cds { namespace intrusive { + + /// \p FCQueue related definitions + namespace fcqueue { + + /// \p FCQueue internal statistics + template + struct stat: public cds::algo::flat_combining::stat + { + typedef cds::algo::flat_combining::stat flat_combining_stat; ///< Flat-combining statistics + typedef typename flat_combining_stat::counter_type counter_type; ///< Counter type + + counter_type m_nEnqueue ; ///< Count of push operations + counter_type m_nDequeue ; ///< Count of success pop operations + counter_type m_nFailedDeq ; ///< Count of failed pop operations (pop from empty queue) + counter_type m_nCollided ; ///< How many pairs of push/pop were collided, if elimination is enabled + + //@cond + void onEnqueue() { ++m_nEnqueue; } + void onDequeue( bool bFailed ) { if ( bFailed ) ++m_nFailedDeq; else ++m_nDequeue; } + void onCollide() { ++m_nCollided; } + //@endcond + }; + + /// FCQueue dummy statistics, no overhead + struct empty_stat: public cds::algo::flat_combining::empty_stat + { + //@cond + void onEnqueue() {} + void onDequeue(bool) {} + void onCollide() {} + //@endcond + }; + + /// \p FCQueue type traits + struct traits: public cds::algo::flat_combining::traits + { + typedef cds::intrusive::opt::v::empty_disposer disposer ; ///< Disposer to erase removed elements. Used only in \p FCQueue::clear() function + typedef empty_stat stat; ///< Internal statistics + static constexpr const bool enable_elimination = false; ///< Enable \ref cds_elimination_description "elimination" + }; + + /// Metafunction converting option list to traits + /** + \p Options are: + - any \p cds::algo::flat_combining::make_traits options + - \p opt::disposer - the functor used to dispose removed items. Default is \p opt::intrusive::v::empty_disposer. + This option is used only in \p FCQueue::clear() function. + - \p opt::stat - internal statistics, possible type: \p fcqueue::stat, \p fcqueue::empty_stat (the default) + - \p opt::enable_elimination - enable/disable operation \ref cds_elimination_description "elimination" + By default, the elimination is disabled (\p false) + */ + template + struct make_traits { +# ifdef CDS_DOXYGEN_INVOKED + typedef implementation_defined type ; ///< Metafunction result +# else + typedef typename cds::opt::make_options< + typename cds::opt::find_type_traits< traits, Options... >::type + ,Options... + >::type type; +# endif + }; + } // namespace fcqueue + + /// Flat-combining intrusive queue + /** + @ingroup cds_intrusive_queue + @ingroup cds_flat_combining_intrusive + + \ref cds_flat_combining_description "Flat combining" sequential intrusive queue. + + Template parameters: + - \p T - a value type stored in the queue + - \p Container - sequential intrusive container with \p push_back and \p pop_front functions. + Default is \p boost::intrusive::list + - \p Traits - type traits of flat combining, default is \p fcqueue::traits. + \p fcqueue::make_traits metafunction can be used to construct \p %fcqueue::traits specialization + */ + template + ,typename Traits = fcqueue::traits + > + class FCQueue +#ifndef CDS_DOXYGEN_INVOKED + : public cds::algo::flat_combining::container +#endif + { + public: + typedef T value_type; ///< Value type + typedef Container container_type; ///< Sequential container type + typedef Traits traits; ///< Queue traits + + typedef typename traits::disposer disposer; ///< The disposer functor. The disposer is used only in \ref clear() function + typedef typename traits::stat stat; ///< Internal statistics type + static constexpr const bool c_bEliminationEnabled = traits::enable_elimination; ///< \p true if elimination is enabled + + protected: + //@cond + /// Queue operation IDs + enum fc_operation { + op_enq = cds::algo::flat_combining::req_Operation, ///< Enqueue + op_deq, ///< Dequeue + op_clear, ///< Clear + op_clear_and_dispose ///< Clear and dispose + }; + + /// Flat combining publication list record + struct fc_record: public cds::algo::flat_combining::publication_record + { + value_type * pVal; ///< Value to enqueue or dequeue + bool bEmpty; ///< \p true if the queue is empty + }; + //@endcond + + /// Flat combining kernel + typedef cds::algo::flat_combining::kernel< fc_record, traits > fc_kernel; + + protected: + //@cond + mutable fc_kernel m_FlatCombining; + container_type m_Queue; + //@endcond + + public: + /// Initializes empty queue object + FCQueue() + {} + + /// Initializes empty queue object and gives flat combining parameters + FCQueue( + unsigned int nCompactFactor ///< Flat combining: publication list compacting factor + ,unsigned int nCombinePassCount ///< Flat combining: number of combining passes for combiner thread + ) + : m_FlatCombining( nCompactFactor, nCombinePassCount ) + {} + + /// Inserts a new element at the end of the queue + /** + The function always returns \p true. + */ + bool enqueue( value_type& val ) + { + auto pRec = m_FlatCombining.acquire_record(); + pRec->pVal = &val; + + constexpr_if ( c_bEliminationEnabled ) + m_FlatCombining.batch_combine( op_enq, pRec, *this ); + else + m_FlatCombining.combine( op_enq, pRec, *this ); + + assert( pRec->is_done()); + m_FlatCombining.release_record( pRec ); + m_FlatCombining.internal_statistics().onEnqueue(); + return true; + } + + /// Inserts a new element at the end of the queue (a synonym for \ref enqueue) + bool push( value_type& val ) + { + return enqueue( val ); + } + + /// Removes the next element from the queue + /** + If the queue is empty the function returns \p nullptr + */ + value_type * dequeue() + { + auto pRec = m_FlatCombining.acquire_record(); + pRec->pVal = nullptr; + + constexpr_if ( c_bEliminationEnabled ) + m_FlatCombining.batch_combine( op_deq, pRec, *this ); + else + m_FlatCombining.combine( op_deq, pRec, *this ); + + assert( pRec->is_done()); + m_FlatCombining.release_record( pRec ); + + m_FlatCombining.internal_statistics().onDequeue( pRec->bEmpty ); + return pRec->pVal; + } + + /// Removes the next element from the queue (a synonym for \ref dequeue) + value_type * pop() + { + return dequeue(); + } + + /// Clears the queue + /** + If \p bDispose is \p true, the disposer provided in \p Traits class' template parameter + will be called for each removed element. + */ + void clear( bool bDispose = false ) + { + auto pRec = m_FlatCombining.acquire_record(); + + constexpr_if ( c_bEliminationEnabled ) + m_FlatCombining.batch_combine( bDispose ? op_clear_and_dispose : op_clear, pRec, *this ); + else + m_FlatCombining.combine( bDispose ? op_clear_and_dispose : op_clear, pRec, *this ); + + assert( pRec->is_done()); + m_FlatCombining.release_record( pRec ); + } + + /// Exclusive access to underlying queue object + /** + The functor \p f can do any operation with underlying \p container_type in exclusive mode. + For example, you can iterate over the queue. + \p Func signature is: + \code + void f( container_type& queue ); + \endcode + */ + template + void apply( Func f ) + { + auto& queue = m_Queue; + m_FlatCombining.invoke_exclusive( [&queue, &f]() { f( queue ); } ); + } + + /// Exclusive access to underlying queue object + /** + The functor \p f can do any operation with underlying \p container_type in exclusive mode. + For example, you can iterate over the queue. + \p Func signature is: + \code + void f( container_type const& queue ); + \endcode + */ + template + void apply( Func f ) const + { + auto const& queue = m_Queue; + m_FlatCombining.invoke_exclusive( [&queue, &f]() { f( queue ); } ); + } + + /// Returns the number of elements in the queue. + /** + Note that size() == 0 is not mean that the queue is empty because + combining record can be in process. + To check emptiness use \ref empty function. + */ + size_t size() const + { + return m_Queue.size(); + } + + /// Checks if the queue is empty + /** + If the combining is in process the function waits while it is done. + */ + bool empty() const + { + bool bRet = false; + auto const& queue = m_Queue; + m_FlatCombining.invoke_exclusive([&queue, &bRet]() { bRet = queue.empty(); }); + return bRet; + } + + /// Internal statistics + stat const& statistics() const + { + return m_FlatCombining.statistics(); + } + + public: // flat combining cooperation, not for direct use! + //@cond + /// Flat combining supporting function. Do not call it directly! + /** + The function is called by \ref cds::algo::flat_combining::kernel "flat combining kernel" + object if the current thread becomes a combiner. Invocation of the function means that + the queue should perform an action recorded in \p pRec. + */ + void fc_apply( fc_record * pRec ) + { + assert( pRec ); + + // this function is called under FC mutex, so switch TSan off + // All TSan warnings are false positive + //CDS_TSAN_ANNOTATE_IGNORE_RW_BEGIN; + + switch ( pRec->op()) { + case op_enq: + assert( pRec->pVal ); + m_Queue.push_back( *(pRec->pVal )); + break; + case op_deq: + pRec->bEmpty = m_Queue.empty(); + if ( !pRec->bEmpty ) { + pRec->pVal = &m_Queue.front(); + m_Queue.pop_front(); + } + break; + case op_clear: + m_Queue.clear(); + break; + case op_clear_and_dispose: + m_Queue.clear_and_dispose( disposer()); + break; + default: + assert(false); + break; + } + //CDS_TSAN_ANNOTATE_IGNORE_RW_END; + } + + /// Batch-processing flat combining + void fc_process( typename fc_kernel::iterator itBegin, typename fc_kernel::iterator itEnd ) + { + // this function is called under FC mutex, so switch TSan off + // All TSan warnings are false positive + //CDS_TSAN_ANNOTATE_IGNORE_RW_BEGIN; + + typedef typename fc_kernel::iterator fc_iterator; + for ( fc_iterator it = itBegin, itPrev = itEnd; it != itEnd; ++it ) { + switch ( it->op( atomics::memory_order_acquire )) { + case op_enq: + case op_deq: + if ( m_Queue.empty()) { + if ( itPrev != itEnd && collide( *itPrev, *it )) + itPrev = itEnd; + else + itPrev = it; + } + break; + } + } + //CDS_TSAN_ANNOTATE_IGNORE_RW_END; + } + //@endcond + + private: + //@cond + bool collide( fc_record& rec1, fc_record& rec2 ) + { + assert( m_Queue.empty()); + + switch ( rec1.op()) { + case op_enq: + if ( rec2.op() == op_deq ) { + assert(rec1.pVal); + rec2.pVal = rec1.pVal; + rec2.bEmpty = false; + m_FlatCombining.operation_done( rec1 ); + m_FlatCombining.operation_done( rec2 ); + m_FlatCombining.internal_statistics().onCollide(); + return true; + } + break; + case op_deq: + if ( rec2.op() == op_enq ) { + assert(rec2.pVal); + rec1.pVal = rec2.pVal; + rec1.bEmpty = false; + m_FlatCombining.operation_done( rec1 ); + m_FlatCombining.operation_done( rec2 ); + m_FlatCombining.internal_statistics().onCollide(); + return true; + } + break; + } + return false; + } + //@endcond + }; + +}} // namespace cds::intrusive + +#endif // #ifndef CDSLIB_INTRUSIVE_FCQUEUE_H diff --git a/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/intrusive/fcstack.h b/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/intrusive/fcstack.h new file mode 100644 index 0000000..87aca2b --- /dev/null +++ b/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/intrusive/fcstack.h @@ -0,0 +1,384 @@ +/* + This file is a part of libcds - Concurrent Data Structures library + + (C) Copyright Maxim Khizhinsky (libcds.dev@gmail.com) 2006-2017 + + Source code repo: http://github.com/khizmax/libcds/ + Download: http://sourceforge.net/projects/libcds/files/ + + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. + + * Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ + +#ifndef CDSLIB_INTRUSIVE_FCSTACK_H +#define CDSLIB_INTRUSIVE_FCSTACK_H + +#include +#include +#include +#include + +namespace cds { namespace intrusive { + + /// FCStack related definitions + namespace fcstack { + + /// FCStack internal statistics + template + struct stat: public cds::algo::flat_combining::stat + { + typedef cds::algo::flat_combining::stat flat_combining_stat; ///< Flat-combining statistics + typedef typename flat_combining_stat::counter_type counter_type; ///< Counter type + + counter_type m_nPush ; ///< Count of push operations + counter_type m_nPop ; ///< Count of success pop operations + counter_type m_nFailedPop; ///< Count of failed pop operations (pop from empty stack) + counter_type m_nCollided ; ///< How many pairs of push/pop were collided, if elimination is enabled + + //@cond + void onPush() { ++m_nPush; } + void onPop( bool bFailed ) { if ( bFailed ) ++m_nFailedPop; else ++m_nPop; } + void onCollide() { ++m_nCollided; } + //@endcond + }; + + /// FCStack dummy statistics, no overhead + struct empty_stat: public cds::algo::flat_combining::empty_stat + { + //@cond + void onPush() {} + void onPop(bool) {} + void onCollide() {} + //@endcond + }; + + /// FCStack type traits + struct traits: public cds::algo::flat_combining::traits + { + typedef cds::intrusive::opt::v::empty_disposer disposer ; ///< Disposer to erase removed elements. Used only in \p FCStack::clear() function + typedef empty_stat stat; ///< Internal statistics + static constexpr const bool enable_elimination = false; ///< Enable \ref cds_elimination_description "elimination" + }; + + /// Metafunction converting option list to traits + /** + \p Options are: + - any \p cds::algo::flat_combining::make_traits options + - \p opt::disposer - the functor used for dispose removed items. Default is \p opt::intrusive::v::empty_disposer. + This option is used only in \p FCStack::clear() function. + - \p opt::stat - internal statistics, possible type: \p fcstack::stat, \p fcstack::empty_stat (the default) + - \p opt::enable_elimination - enable/disable operation \ref cds_elimination_description "elimination" + By default, the elimination is disabled. + */ + template + struct make_traits { +# ifdef CDS_DOXYGEN_INVOKED + typedef implementation_defined type ; ///< Metafunction result +# else + typedef typename cds::opt::make_options< + typename cds::opt::find_type_traits< traits, Options... >::type + ,Options... + >::type type; +# endif + }; + + } // namespace fcstack + + /// Flat-combining intrusive stack + /** + @ingroup cds_intrusive_stack + @ingroup cds_flat_combining_intrusive + + \ref cds_flat_combining_description "Flat combining" sequential intrusive stack. + + Template parameters: + - \p T - a value type stored in the stack + - \p Container - sequential intrusive container with \p push_front and \p pop_front functions. + Possible containers are \p boost::intrusive::slist (the default), \p boost::inrtrusive::list + - \p Traits - type traits of flat combining, default is \p fcstack::traits. + \p fcstack::make_traits metafunction can be used to construct specialized \p %traits + */ + template + ,typename Traits = fcstack::traits + > + class FCStack +#ifndef CDS_DOXYGEN_INVOKED + : public cds::algo::flat_combining::container +#endif + { + public: + typedef T value_type; ///< Value type + typedef Container container_type; ///< Sequential container type + typedef Traits traits; ///< Stack traits + + typedef typename traits::disposer disposer; ///< The disposer functor. The disposer is used only in \ref clear() function + typedef typename traits::stat stat; ///< Internal statistics type + static constexpr const bool c_bEliminationEnabled = traits::enable_elimination; ///< \p true if elimination is enabled + + protected: + //@cond + /// Stack operation IDs + enum fc_operation { + op_push = cds::algo::flat_combining::req_Operation, ///< Push + op_pop, ///< Pop + op_clear, ///< Clear + op_clear_and_dispose ///< Clear and dispose + }; + + /// Flat combining publication list record + struct fc_record: public cds::algo::flat_combining::publication_record + { + value_type * pVal; ///< Value to push or pop + bool bEmpty; ///< \p true if the stack is empty + }; + //@endcond + + /// Flat combining kernel + typedef cds::algo::flat_combining::kernel< fc_record, traits > fc_kernel; + + protected: + //@cond + mutable fc_kernel m_FlatCombining; + container_type m_Stack; + //@endcond + + public: + /// Initializes empty stack object + FCStack() + {} + + /// Initializes empty stack object and gives flat combining parameters + FCStack( + unsigned int nCompactFactor ///< Flat combining: publication list compacting factor + ,unsigned int nCombinePassCount ///< Flat combining: number of combining passes for combiner thread + ) + : m_FlatCombining( nCompactFactor, nCombinePassCount ) + {} + + /// Inserts a new element at the top of stack + /** + The content of the new element initialized to a copy of \p val. + */ + bool push( value_type& val ) + { + auto pRec = m_FlatCombining.acquire_record(); + pRec->pVal = &val; + + constexpr_if ( c_bEliminationEnabled ) + m_FlatCombining.batch_combine( op_push, pRec, *this ); + else + m_FlatCombining.combine( op_push, pRec, *this ); + + assert( pRec->is_done()); + m_FlatCombining.release_record( pRec ); + m_FlatCombining.internal_statistics().onPush(); + return true; + } + + /// Removes the element on top of the stack + value_type * pop() + { + auto pRec = m_FlatCombining.acquire_record(); + pRec->pVal = nullptr; + + constexpr_if ( c_bEliminationEnabled ) + m_FlatCombining.batch_combine( op_pop, pRec, *this ); + else + m_FlatCombining.combine( op_pop, pRec, *this ); + + assert( pRec->is_done()); + m_FlatCombining.release_record( pRec ); + + m_FlatCombining.internal_statistics().onPop( pRec->bEmpty ); + return pRec->pVal; + } + + /// Clears the stack + /** + If \p bDispose is \p true, the disposer provided in \p Traits class' template parameter + will be called for each removed element. + */ + void clear( bool bDispose = false ) + { + auto pRec = m_FlatCombining.acquire_record(); + + constexpr_if ( c_bEliminationEnabled ) + m_FlatCombining.batch_combine( bDispose ? op_clear_and_dispose : op_clear, pRec, *this ); + else + m_FlatCombining.combine( bDispose ? op_clear_and_dispose : op_clear, pRec, *this ); + + assert( pRec->is_done()); + m_FlatCombining.release_record( pRec ); + } + + /// Exclusive access to underlying stack object + /** + The functor \p f can do any operation with underlying \p container_type in exclusive mode. + For example, you can iterate over the stack. + \p Func signature is: + \code + void f( container_type& stack ); + \endcode + */ + template + void apply( Func f ) + { + auto& stack = m_Stack; + m_FlatCombining.invoke_exclusive( [&stack, &f]() { f( stack ); } ); + } + + /// Exclusive access to underlying stack object + /** + The functor \p f can do any operation with underlying \p container_type in exclusive mode. + For example, you can iterate over the stack. + \p Func signature is: + \code + void f( container_type const& stack ); + \endcode + */ + template + void apply( Func f ) const + { + auto const& stack = m_Stack; + m_FlatCombining.invoke_exclusive( [&stack, &f]() { f( stack ); } ); + } + + /// Returns the number of elements in the stack. + /** + Note that size() == 0 is not mean that the stack is empty because + combining record can be in process. + To check emptiness use \ref empty function. + */ + size_t size() const + { + return m_Stack.size(); + } + + /// Checks if the stack is empty + /** + If the combining is in process the function waits while it is done. + */ + bool empty() const + { + bool bRet = false; + auto const& stack = m_Stack; + m_FlatCombining.invoke_exclusive( [&stack, &bRet]() { bRet = stack.empty(); } ); + return bRet; + } + + /// Internal statistics + stat const& statistics() const + { + return m_FlatCombining.statistics(); + } + + public: // flat combining cooperation, not for direct use! + //@cond + /// Flat combining supporting function. Do not call it directly! + /** + The function is called by \ref cds::algo::flat_combining::kernel "flat combining kernel" + object if the current thread becomes a combiner. Invocation of the function means that + the stack should perform an action recorded in \p pRec. + */ + void fc_apply( fc_record* pRec ) + { + assert( pRec ); + + switch ( pRec->op()) { + case op_push: + assert( pRec->pVal ); + m_Stack.push_front( *(pRec->pVal )); + break; + case op_pop: + pRec->bEmpty = m_Stack.empty(); + if ( !pRec->bEmpty ) { + pRec->pVal = &m_Stack.front(); + m_Stack.pop_front(); + } + break; + case op_clear: + m_Stack.clear(); + break; + case op_clear_and_dispose: + m_Stack.clear_and_dispose( disposer()); + break; + default: + assert(false); + break; + } + } + + /// Batch-processing flat combining + void fc_process( typename fc_kernel::iterator itBegin, typename fc_kernel::iterator itEnd ) + { + typedef typename fc_kernel::iterator fc_iterator; + for ( fc_iterator it = itBegin, itPrev = itEnd; it != itEnd; ++it ) { + switch ( it->op( atomics::memory_order_acquire )) { + case op_push: + case op_pop: + if ( itPrev != itEnd && collide( *itPrev, *it )) + itPrev = itEnd; + else + itPrev = it; + break; + } + } + } + //@endcond + + private: + //@cond + bool collide( fc_record& rec1, fc_record& rec2 ) + { + switch ( rec1.op()) { + case op_push: + if ( rec2.op() == op_pop ) { + assert(rec1.pVal); + rec2.pVal = rec1.pVal; + rec2.bEmpty = false; + m_FlatCombining.operation_done( rec1 ); + m_FlatCombining.operation_done( rec2 ); + m_FlatCombining.internal_statistics().onCollide(); + return true; + } + break; + case op_pop: + if ( rec2.op() == op_push ) { + assert(rec2.pVal); + rec1.pVal = rec2.pVal; + rec1.bEmpty = false; + m_FlatCombining.operation_done( rec1 ); + m_FlatCombining.operation_done( rec2 ); + m_FlatCombining.internal_statistics().onCollide(); + return true; + } + break; + } + return false; + } + //@endcond + + }; + +}} // namespace cds::intrusive + +#endif // #ifndef CDSLIB_INTRUSIVE_FCSTACK_H diff --git a/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/intrusive/feldman_hashset_dhp.h b/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/intrusive/feldman_hashset_dhp.h new file mode 100644 index 0000000..a5a4300 --- /dev/null +++ b/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/intrusive/feldman_hashset_dhp.h @@ -0,0 +1,37 @@ +/* + This file is a part of libcds - Concurrent Data Structures library + + (C) Copyright Maxim Khizhinsky (libcds.dev@gmail.com) 2006-2017 + + Source code repo: http://github.com/khizmax/libcds/ + Download: http://sourceforge.net/projects/libcds/files/ + + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. + + * Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ + +#ifndef CDSLIB_INTRUSIVE_FELDMAN_HASHSET_DHP_H +#define CDSLIB_INTRUSIVE_FELDMAN_HASHSET_DHP_H + +#include +#include + +#endif // #ifndef CDSLIB_INTRUSIVE_FELDMAN_HASHSET_DHP_H diff --git a/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/intrusive/feldman_hashset_hp.h b/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/intrusive/feldman_hashset_hp.h new file mode 100644 index 0000000..a9894a6 --- /dev/null +++ b/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/intrusive/feldman_hashset_hp.h @@ -0,0 +1,37 @@ +/* + This file is a part of libcds - Concurrent Data Structures library + + (C) Copyright Maxim Khizhinsky (libcds.dev@gmail.com) 2006-2017 + + Source code repo: http://github.com/khizmax/libcds/ + Download: http://sourceforge.net/projects/libcds/files/ + + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. + + * Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ + +#ifndef CDSLIB_INTRUSIVE_FELDMAN_HASHSET_HP_H +#define CDSLIB_INTRUSIVE_FELDMAN_HASHSET_HP_H + +#include +#include + +#endif // #ifndef CDSLIB_INTRUSIVE_FELDMAN_HASHSET_HP_H diff --git a/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/intrusive/feldman_hashset_rcu.h b/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/intrusive/feldman_hashset_rcu.h new file mode 100644 index 0000000..9edd8f4 --- /dev/null +++ b/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/intrusive/feldman_hashset_rcu.h @@ -0,0 +1,1243 @@ +/* + This file is a part of libcds - Concurrent Data Structures library + + (C) Copyright Maxim Khizhinsky (libcds.dev@gmail.com) 2006-2017 + + Source code repo: http://github.com/khizmax/libcds/ + Download: http://sourceforge.net/projects/libcds/files/ + + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. + + * Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ + +#ifndef CDSLIB_INTRUSIVE_FELDMAN_HASHSET_RCU_H +#define CDSLIB_INTRUSIVE_FELDMAN_HASHSET_RCU_H + +#include // std::ref +#include // std::iterator_traits +#include + +#include +#include +#include +#include +#include + + +namespace cds { namespace intrusive { + /// Intrusive hash set based on multi-level array, \ref cds_urcu_desc "RCU" specialization + /** @ingroup cds_intrusive_map + @anchor cds_intrusive_FeldmanHashSet_rcu + + Source: + - [2013] Steven Feldman, Pierre LaBorde, Damian Dechev "Concurrent Multi-level Arrays: + Wait-free Extensible Hash Maps" + + See algorithm short description @ref cds_intrusive_FeldmanHashSet_hp "here" + + @note Two important things you should keep in mind when you're using \p %FeldmanHashSet: + - all keys must be fixed-size. It means that you cannot use \p std::string as a key for \p %FeldmanHashSet. + Instead, for the strings you should use well-known hashing algorithms like SHA1, SHA2, + MurmurHash, CityHash + or its successor FarmHash and so on, which + converts variable-length strings to fixed-length bit-strings, and use that hash as a key in \p %FeldmanHashSet. + - \p %FeldmanHashSet uses a perfect hashing. It means that if two different keys, for example, of type \p std::string, + have identical hash then you cannot insert both that keys in the set. \p %FeldmanHashSet does not maintain the key, + it maintains its fixed-size hash value. + + Template parameters: + - \p RCU - one of \ref cds_urcu_gc "RCU type" + - \p T - a value type to be stored in the set + - \p Traits - type traits, the structure based on \p feldman_hashset::traits or result of \p feldman_hashset::make_traits metafunction. + \p Traits is the mandatory argument because it has one mandatory type - an @ref feldman_hashset::traits::hash_accessor "accessor" + to hash value of \p T. The set algorithm does not calculate that hash value. + + @note Before including you should include appropriate RCU header file, + see \ref cds_urcu_gc "RCU type" for list of existing RCU class and corresponding header files. + + The set supports @ref cds_intrusive_FeldmanHashSet_rcu_iterators "bidirectional thread-safe iterators" + + with some restrictions. + */ + template < + class RCU, + class T, +#ifdef CDS_DOXYGEN_INVOKED + class Traits = feldman_hashset::traits +#else + class Traits +#endif + > + class FeldmanHashSet< cds::urcu::gc< RCU >, T, Traits >: protected feldman_hashset::multilevel_array + { + //@cond + typedef feldman_hashset::multilevel_array base_class; + //@endcond + + public: + typedef cds::urcu::gc< RCU > gc; ///< RCU garbage collector + typedef T value_type; ///< type of value stored in the set + typedef Traits traits; ///< Traits template parameter + + typedef typename traits::hash_accessor hash_accessor; ///< Hash accessor functor + typedef typename base_class::hash_type hash_type; ///< Hash type deduced from \p hash_accessor return type + typedef typename traits::disposer disposer; ///< data node disposer + typedef typename base_class::hash_comparator hash_comparator; ///< hash compare functor based on \p traits::compare and \p traits::less options + + typedef typename traits::item_counter item_counter; ///< Item counter type + typedef typename traits::node_allocator node_allocator; ///< Array node allocator + typedef typename traits::memory_model memory_model; ///< Memory model + typedef typename traits::back_off back_off; ///< Backoff strategy + typedef typename traits::stat stat; ///< Internal statistics type + typedef typename traits::rcu_check_deadlock rcu_check_deadlock; ///< Deadlock checking policy + typedef typename gc::scoped_lock rcu_lock; ///< RCU scoped lock + static constexpr const bool c_bExtractLockExternal = false; ///< Group of \p extract_xxx functions does not require external locking + + using exempt_ptr = cds::urcu::exempt_ptr< gc, value_type, value_type, disposer, void >; ///< pointer to extracted node + + /// The size of hash_type in bytes, see \p feldman_hashset::traits::hash_size for explanation + static constexpr size_t const c_hash_size = base_class::c_hash_size; + + //@cond + typedef feldman_hashset::level_statistics level_statistics; + //@endcond + + protected: + //@cond + typedef typename base_class::node_ptr node_ptr; + typedef typename base_class::atomic_node_ptr atomic_node_ptr; + typedef typename base_class::array_node array_node; + typedef typename base_class::traverse_data traverse_data; + + using base_class::to_array; + using base_class::to_node; + using base_class::stats; + using base_class::head; + using base_class::metrics; + + typedef cds::urcu::details::check_deadlock_policy< gc, rcu_check_deadlock> check_deadlock_policy; + //@endcond + + private: + //@cond + item_counter m_ItemCounter; ///< Item counter + //@endcond + + public: + /// Creates empty set + /** + @param head_bits - 2head_bits specifies the size of head array, minimum is 4. + @param array_bits - 2array_bits specifies the size of array node, minimum is 2. + + Equation for \p head_bits and \p array_bits: + \code + sizeof(hash_type) * 8 == head_bits + N * array_bits + \endcode + where \p N is multi-level array depth. + */ + FeldmanHashSet(size_t head_bits = 8, size_t array_bits = 4) + : base_class(head_bits, array_bits) + {} + + /// Destructs the set and frees all data + ~FeldmanHashSet() + { + clear(); + } + + /// Inserts new node + /** + The function inserts \p val in the set if it does not contain + an item with that hash. + + Returns \p true if \p val is placed into the set, \p false otherwise. + + The function locks RCU internally. + */ + bool insert( value_type& val ) + { + return insert( val, [](value_type&) {} ); + } + + /// Inserts new node + /** + This function is intended for derived non-intrusive containers. + + The function allows to split creating of new item into two part: + - create item with key only + - insert new item into the set + - if inserting is success, calls \p f functor to initialize \p val. + + The functor signature is: + \code + void func( value_type& val ); + \endcode + where \p val is the item inserted. + + The user-defined functor is called only if the inserting is success. + + The function locks RCU internally. + @warning See \ref cds_intrusive_item_creating "insert item troubleshooting". + */ + template + bool insert( value_type& val, Func f ) + { + hash_type const& hash = hash_accessor()( val ); + traverse_data pos( hash, *this ); + hash_comparator cmp; + + while (true) { + rcu_lock rcuLock; + + node_ptr slot = base_class::traverse( pos ); + assert(slot.bits() == 0); + + if ( pos.pArr->nodes[pos.nSlot].load(memory_model::memory_order_acquire) == slot) { + if (slot.ptr()) { + if ( cmp( hash, hash_accessor()(*slot.ptr())) == 0 ) { + // the item with that hash value already exists + stats().onInsertFailed(); + return false; + } + + // the slot must be expanded + base_class::expand_slot( pos, slot ); + } + else { + // the slot is empty, try to insert data node + node_ptr pNull; + if ( pos.pArr->nodes[pos.nSlot].compare_exchange_strong(pNull, node_ptr(&val), memory_model::memory_order_release, atomics::memory_order_relaxed)) + { + // the new data node has been inserted + f(val); + ++m_ItemCounter; + stats().onInsertSuccess(); + stats().height( pos.nHeight ); + return true; + } + + // insert failed - slot has been changed by another thread + // retry inserting + stats().onInsertRetry(); + } + } + else + stats().onSlotChanged(); + } + } + + /// Updates the node + /** + Performs inserting or updating the item with hash value equal to \p val. + - If hash value is found then existing item is replaced with \p val, old item is disposed + with \p Traits::disposer. Note that the disposer is called by \p GC asynchronously. + The function returns std::pair + - If hash value is not found and \p bInsert is \p true then \p val is inserted, + the function returns std::pair + - If hash value is not found and \p bInsert is \p false then the set is unchanged, + the function returns std::pair + + Returns std::pair where \p first is \p true if operation is successful + (i.e. the item has been inserted or updated), + \p second is \p true if new item has been added or \p false if the set contains that hash. + + The function locks RCU internally. + */ + std::pair update( value_type& val, bool bInsert = true ) + { + return do_update(val, [](value_type&, value_type *) {}, bInsert ); + } + + /// Unlinks the item \p val from the set + /** + The function searches the item \p val in the set and unlink it + if it is found and its address is equal to &val. + + The function returns \p true if success and \p false otherwise. + + RCU should not be locked. The function locks RCU internally. + */ + bool unlink( value_type const& val ) + { + check_deadlock_policy::check(); + + auto pred = [&val](value_type const& item) -> bool { return &item == &val; }; + value_type * p; + { + rcu_lock rcuLock; + p = do_erase( hash_accessor()( val ), std::ref( pred )); + } + if ( p ) { + gc::template retire_ptr( p ); + return true; + } + return false; + } + + /// Deletes the item from the set + /** + The function searches \p hash in the set, + unlinks the item found, and returns \p true. + If that item is not found the function returns \p false. + + The \ref disposer specified in \p Traits is called by garbage collector \p GC asynchronously. + + RCU should not be locked. The function locks RCU internally. + */ + bool erase( hash_type const& hash ) + { + return erase(hash, [](value_type const&) {} ); + } + + /// Deletes the item from the set + /** + The function searches \p hash in the set, + call \p f functor with item found, and unlinks it from the set. + The \ref disposer specified in \p Traits is called + by garbage collector \p GC asynchronously. + + The \p Func interface is + \code + struct functor { + void operator()( value_type& item ); + }; + \endcode + + If \p hash is not found the function returns \p false. + + RCU should not be locked. The function locks RCU internally. + */ + template + bool erase( hash_type const& hash, Func f ) + { + check_deadlock_policy::check(); + + value_type * p; + + { + rcu_lock rcuLock; + p = do_erase( hash, []( value_type const&) -> bool { return true; } ); + } + + // p is guarded by HP + if ( p ) { + f( *p ); + gc::template retire_ptr(p); + return true; + } + return false; + } + + /// Extracts the item with specified \p hash + /** + The function searches \p hash in the set, + unlinks it from the set, and returns \ref cds::urcu::exempt_ptr "exempt_ptr" pointer to the item found. + If the item with key equal to \p key is not found the function returns an empty \p exempt_ptr. + + RCU \p synchronize method can be called. RCU should NOT be locked. + The function does not call the disposer for the item found. + The disposer will be implicitly invoked when the returned object is destroyed or when + its \p release() member function is called. + Example: + \code + typedef cds::intrusive::FeldmanHashSet< cds::urcu::gc< cds::urcu::general_buffered<> >, foo, my_traits > set_type; + set_type theSet; + // ... + + typename set_type::exempt_ptr ep( theSet.extract( 5 )); + if ( ep ) { + // Deal with ep + //... + + // Dispose returned item. + ep.release(); + } + \endcode + */ + exempt_ptr extract( hash_type const& hash ) + { + check_deadlock_policy::check(); + + value_type * p; + { + rcu_lock rcuLock; + p = do_erase( hash, []( value_type const&) -> bool {return true;} ); + } + return exempt_ptr( p ); + } + + /// Finds an item by it's \p hash + /** + The function searches the item by \p hash and calls the functor \p f for item found. + The interface of \p Func functor is: + \code + struct functor { + void operator()( value_type& item ); + }; + \endcode + where \p item is the item found. + + The functor may change non-key fields of \p item. Note that the functor is only guarantee + that \p item cannot be disposed during the functor is executing. + The functor does not serialize simultaneous access to the set's \p item. If such access is + possible you must provide your own synchronization schema on item level to prevent unsafe item modifications. + + The function returns \p true if \p hash is found, \p false otherwise. + + The function applies RCU lock internally. + */ + template + bool find( hash_type const& hash, Func f ) + { + rcu_lock rcuLock; + + value_type * p = search( hash ); + if ( p ) { + f( *p ); + return true; + } + return false; + } + + /// Checks whether the set contains \p hash + /** + The function searches the item by its \p hash + and returns \p true if it is found, or \p false otherwise. + + The function applies RCU lock internally. + */ + bool contains( hash_type const& hash ) + { + return find( hash, [](value_type&) {} ); + } + + /// Finds an item by it's \p hash and returns the item found + /** + The function searches the item by its \p hash + and returns the pointer to the item found. + If \p hash is not found the function returns \p nullptr. + + RCU should be locked before the function invocation. + Returned pointer is valid only while RCU is locked. + + Usage: + \code + typedef cds::intrusive::FeldmanHashSet< your_template_params > my_set; + my_set theSet; + // ... + { + // lock RCU + my_set::rcu_lock; + + foo * p = theSet.get( 5 ); + if ( p ) { + // Deal with p + //... + } + } + \endcode + */ + value_type * get( hash_type const& hash ) + { + assert( gc::is_locked()); + return search( hash ); + } + + /// Clears the set (non-atomic) + /** + The function unlink all data node from the set. + The function is not atomic but is thread-safe. + After \p %clear() the set may not be empty because another threads may insert items. + + For each item the \p disposer is called after unlinking. + */ + void clear() + { + clear_array( head(), head_size()); + } + + /// Checks if the set is empty + /** + Emptiness is checked by item counting: if item count is zero then the set is empty. + Thus, the correct item counting feature is an important part of the set implementation. + */ + bool empty() const + { + return size() == 0; + } + + /// Returns item count in the set + size_t size() const + { + return m_ItemCounter; + } + + /// Returns const reference to internal statistics + stat const& statistics() const + { + return stats(); + } + + /// Returns the size of head node + using base_class::head_size; + + /// Returns the size of the array node + using base_class::array_node_size; + + /// Collects tree level statistics into \p stat + /** + The function traverses the set and collects statistics for each level of the tree + into \p feldman_hashset::level_statistics struct. The element of \p stat[i] + represents statistics for level \p i, level 0 is head array. + The function is thread-safe and may be called in multi-threaded environment. + + Result can be useful for estimating efficiency of hash functor you use. + */ + void get_level_statistics(std::vector& stat) const + { + base_class::get_level_statistics(stat); + } + + protected: + //@cond + class iterator_base + { + friend class FeldmanHashSet; + + protected: + array_node * m_pNode; ///< current array node + size_t m_idx; ///< current position in m_pNode + value_type * m_pValue; ///< current value + FeldmanHashSet const* m_set; ///< Hash set + + public: + iterator_base() noexcept + : m_pNode(nullptr) + , m_idx(0) + , m_pValue(nullptr) + , m_set(nullptr) + {} + + iterator_base(iterator_base const& rhs) noexcept + : m_pNode(rhs.m_pNode) + , m_idx(rhs.m_idx) + , m_pValue(rhs.m_pValue) + , m_set(rhs.m_set) + {} + + iterator_base& operator=(iterator_base const& rhs) noexcept + { + m_pNode = rhs.m_pNode; + m_idx = rhs.m_idx; + m_pValue = rhs.m_pValue; + m_set = rhs.m_set; + return *this; + } + + iterator_base& operator++() + { + forward(); + return *this; + } + + iterator_base& operator--() + { + backward(); + return *this; + } + + bool operator ==(iterator_base const& rhs) const noexcept + { + return m_pNode == rhs.m_pNode && m_idx == rhs.m_idx && m_set == rhs.m_set; + } + + bool operator !=(iterator_base const& rhs) const noexcept + { + return !(*this == rhs); + } + + protected: + iterator_base(FeldmanHashSet const& set, array_node * pNode, size_t idx, bool) + : m_pNode(pNode) + , m_idx(idx) + , m_pValue(nullptr) + , m_set(&set) + {} + + iterator_base(FeldmanHashSet const& set, array_node * pNode, size_t idx) + : m_pNode(pNode) + , m_idx(idx) + , m_pValue(nullptr) + , m_set(&set) + { + forward(); + } + + value_type * pointer() const noexcept + { + return m_pValue; + } + + void forward() + { + assert(m_set != nullptr); + assert(m_pNode != nullptr); + + size_t const arrayNodeSize = m_set->array_node_size(); + size_t const headSize = m_set->head_size(); + array_node * pNode = m_pNode; + size_t idx = m_idx + 1; + size_t nodeSize = m_pNode->pParent ? arrayNodeSize : headSize; + + for (;;) { + if (idx < nodeSize) { + node_ptr slot = pNode->nodes[idx].load(memory_model::memory_order_acquire); + if (slot.bits() == base_class::flag_array_node ) { + // array node, go down the tree + assert(slot.ptr() != nullptr); + pNode = to_array(slot.ptr()); + idx = 0; + nodeSize = arrayNodeSize; + } + else if (slot.bits() == base_class::flag_array_converting ) { + // the slot is converting to array node right now - skip the node + ++idx; + } + else { + if (slot.ptr()) { + // data node + m_pNode = pNode; + m_idx = idx; + m_pValue = slot.ptr(); + return; + } + ++idx; + } + } + else { + // up to parent node + if (pNode->pParent) { + idx = pNode->idxParent + 1; + pNode = pNode->pParent; + nodeSize = pNode->pParent ? arrayNodeSize : headSize; + } + else { + // end() + assert(pNode == m_set->head()); + assert(idx == headSize); + m_pNode = pNode; + m_idx = idx; + m_pValue = nullptr; + return; + } + } + } + } + + void backward() + { + assert(m_set != nullptr); + assert(m_pNode != nullptr); + + size_t const arrayNodeSize = m_set->array_node_size(); + size_t const headSize = m_set->head_size(); + size_t const endIdx = size_t(0) - 1; + + array_node * pNode = m_pNode; + size_t idx = m_idx - 1; + size_t nodeSize = m_pNode->pParent ? arrayNodeSize : headSize; + + for (;;) { + if (idx != endIdx) { + node_ptr slot = pNode->nodes[idx].load(memory_model::memory_order_acquire); + if (slot.bits() == base_class::flag_array_node ) { + // array node, go down the tree + assert(slot.ptr() != nullptr); + pNode = to_array(slot.ptr()); + nodeSize = arrayNodeSize; + idx = nodeSize - 1; + } + else if (slot.bits() == base_class::flag_array_converting ) { + // the slot is converting to array node right now - skip the node + --idx; + } + else { + if (slot.ptr()) { + // data node + m_pNode = pNode; + m_idx = idx; + m_pValue = slot.ptr(); + return; + } + --idx; + } + } + else { + // up to parent node + if (pNode->pParent) { + idx = pNode->idxParent - 1; + pNode = pNode->pParent; + nodeSize = pNode->pParent ? arrayNodeSize : headSize; + } + else { + // rend() + assert(pNode == m_set->head()); + assert(idx == endIdx); + m_pNode = pNode; + m_idx = idx; + m_pValue = nullptr; + return; + } + } + } + } + }; + + template + Iterator init_begin() const + { + return Iterator(*this, head(), size_t(0) - 1); + } + + template + Iterator init_end() const + { + return Iterator(*this, head(), head_size(), false); + } + + template + Iterator init_rbegin() const + { + return Iterator(*this, head(), head_size()); + } + + template + Iterator init_rend() const + { + return Iterator(*this, head(), size_t(0) - 1, false); + } + + /// Bidirectional iterator class + template + class bidirectional_iterator : protected iterator_base + { + friend class FeldmanHashSet; + + protected: + static constexpr bool const c_bConstantIterator = IsConst; + + public: + typedef typename std::conditional< IsConst, value_type const*, value_type*>::type value_ptr; ///< Value pointer + typedef typename std::conditional< IsConst, value_type const&, value_type&>::type value_ref; ///< Value reference + + public: + bidirectional_iterator() noexcept + {} + + bidirectional_iterator(bidirectional_iterator const& rhs) noexcept + : iterator_base(rhs) + {} + + bidirectional_iterator& operator=(bidirectional_iterator const& rhs) noexcept + { + iterator_base::operator=(rhs); + return *this; + } + + bidirectional_iterator& operator++() + { + iterator_base::operator++(); + return *this; + } + + bidirectional_iterator& operator--() + { + iterator_base::operator--(); + return *this; + } + + value_ptr operator ->() const noexcept + { + return iterator_base::pointer(); + } + + value_ref operator *() const noexcept + { + value_ptr p = iterator_base::pointer(); + assert(p); + return *p; + } + + template + bool operator ==(bidirectional_iterator const& rhs) const noexcept + { + return iterator_base::operator==(rhs); + } + + template + bool operator !=(bidirectional_iterator const& rhs) const noexcept + { + return !(*this == rhs); + } + + protected: + bidirectional_iterator(FeldmanHashSet& set, array_node * pNode, size_t idx, bool) + : iterator_base(set, pNode, idx, false) + {} + + bidirectional_iterator(FeldmanHashSet& set, array_node * pNode, size_t idx) + : iterator_base(set, pNode, idx) + {} + }; + + /// Reverse bidirectional iterator + template + class reverse_bidirectional_iterator : public iterator_base + { + friend class FeldmanHashSet; + + public: + typedef typename std::conditional< IsConst, value_type const*, value_type*>::type value_ptr; ///< Value pointer + typedef typename std::conditional< IsConst, value_type const&, value_type&>::type value_ref; ///< Value reference + + public: + reverse_bidirectional_iterator() noexcept + : iterator_base() + {} + + reverse_bidirectional_iterator(reverse_bidirectional_iterator const& rhs) noexcept + : iterator_base(rhs) + {} + + reverse_bidirectional_iterator& operator=(reverse_bidirectional_iterator const& rhs) noexcept + { + iterator_base::operator=(rhs); + return *this; + } + + reverse_bidirectional_iterator& operator++() + { + iterator_base::operator--(); + return *this; + } + + reverse_bidirectional_iterator& operator--() + { + iterator_base::operator++(); + return *this; + } + + value_ptr operator ->() const noexcept + { + return iterator_base::pointer(); + } + + value_ref operator *() const noexcept + { + value_ptr p = iterator_base::pointer(); + assert(p); + return *p; + } + + template + bool operator ==(reverse_bidirectional_iterator const& rhs) const + { + return iterator_base::operator==(rhs); + } + + template + bool operator !=(reverse_bidirectional_iterator const& rhs) + { + return !(*this == rhs); + } + + private: + reverse_bidirectional_iterator(FeldmanHashSet& set, array_node * pNode, size_t idx, bool) + : iterator_base(set, pNode, idx, false) + {} + + reverse_bidirectional_iterator(FeldmanHashSet& set, array_node * pNode, size_t idx) + : iterator_base(set, pNode, idx, false) + { + iterator_base::backward(); + } + }; + //@endcond + + public: +#ifdef CDS_DOXYGEN_INVOKED + typedef implementation_defined iterator; ///< @ref cds_intrusive_FeldmanHashSet_rcu_iterators "bidirectional iterator" type + typedef implementation_defined const_iterator; ///< @ref cds_intrusive_FeldmanHashSet_rcu_iterators "bidirectional const iterator" type + typedef implementation_defined reverse_iterator; ///< @ref cds_intrusive_FeldmanHashSet_rcu_iterators "bidirectional reverse iterator" type + typedef implementation_defined const_reverse_iterator; ///< @ref cds_intrusive_FeldmanHashSet_rcu_iterators "bidirectional reverse const iterator" type +#else + typedef bidirectional_iterator iterator; + typedef bidirectional_iterator const_iterator; + typedef reverse_bidirectional_iterator reverse_iterator; + typedef reverse_bidirectional_iterator const_reverse_iterator; +#endif + + ///@name Thread-safe iterators + /** @anchor cds_intrusive_FeldmanHashSet_rcu_iterators + The set supports thread-safe iterators: you may iterate over the set in multi-threaded environment + under explicit RCU lock. + + RCU lock requirement means that inserting or searching is allowed for iterating thread + but you must not erase the items from the set because erasing under RCU lock can lead + to a deadlock. However, another thread can call \p erase() safely while your thread is iterating. + + A typical example is: + \code + struct foo { + uint32_t hash; + // ... other fields + uint32_t payload; // only for example + }; + struct set_traits: cds::intrusive::feldman_hashset::traits + { + struct hash_accessor { + uint32_t operator()( foo const& src ) const + { + retur src.hash; + } + }; + }; + + typedef cds::urcu::gc< cds::urcu::general_buffered<>> rcu; + typedef cds::intrusive::FeldmanHashSet< rcu, foo, set_traits > set_type; + + set_type s; + + // ... + + // iterate over the set + { + // lock the RCU. + typename set_type::rcu_lock l; // scoped RCU lock + + // traverse the set + for ( auto i = s.begin(); i != s.end(); ++i ) { + // deal with i. Remember, erasing is prohibited here! + i->payload++; + } + } // at this point RCU lock is released + \endcode + + Each iterator object supports the common interface: + - dereference operators: + @code + value_type [const] * operator ->() noexcept + value_type [const] & operator *() noexcept + @endcode + - pre-increment and pre-decrement. Post-operators is not supported + - equality operators == and !=. + Iterators are equal iff they point to the same cell of the same array node. + Note that for two iterators \p it1 and \p it2 the condition it1 == it2 + does not entail &(*it1) == &(*it2) : welcome to concurrent containers + + @note It is possible the item can be iterated more that once, for example, if an iterator points to the item + in an array node that is being splitted. + */ + ///@{ + + /// Returns an iterator to the beginning of the set + iterator begin() + { + return iterator(*this, head(), size_t(0) - 1); + } + + /// Returns an const iterator to the beginning of the set + const_iterator begin() const + { + return const_iterator(*this, head(), size_t(0) - 1); + } + + /// Returns an const iterator to the beginning of the set + const_iterator cbegin() + { + return const_iterator(*this, head(), size_t(0) - 1); + } + + /// Returns an iterator to the element following the last element of the set. This element acts as a placeholder; attempting to access it results in undefined behavior. + iterator end() + { + return iterator(*this, head(), head_size(), false); + } + + /// Returns a const iterator to the element following the last element of the set. This element acts as a placeholder; attempting to access it results in undefined behavior. + const_iterator end() const + { + return const_iterator(*this, head(), head_size(), false); + } + + /// Returns a const iterator to the element following the last element of the set. This element acts as a placeholder; attempting to access it results in undefined behavior. + const_iterator cend() + { + return const_iterator(*this, head(), head_size(), false); + } + + /// Returns a reverse iterator to the first element of the reversed set + reverse_iterator rbegin() + { + return reverse_iterator(*this, head(), head_size()); + } + + /// Returns a const reverse iterator to the first element of the reversed set + const_reverse_iterator rbegin() const + { + return const_reverse_iterator(*this, head(), head_size()); + } + + /// Returns a const reverse iterator to the first element of the reversed set + const_reverse_iterator crbegin() + { + return const_reverse_iterator(*this, head(), head_size()); + } + + /// Returns a reverse iterator to the element following the last element of the reversed set + /** + It corresponds to the element preceding the first element of the non-reversed container. + This element acts as a placeholder, attempting to access it results in undefined behavior. + */ + reverse_iterator rend() + { + return reverse_iterator(*this, head(), size_t(0) - 1, false); + } + + /// Returns a const reverse iterator to the element following the last element of the reversed set + /** + It corresponds to the element preceding the first element of the non-reversed container. + This element acts as a placeholder, attempting to access it results in undefined behavior. + */ + const_reverse_iterator rend() const + { + return const_reverse_iterator(*this, head(), size_t(0) - 1, false); + } + + /// Returns a const reverse iterator to the element following the last element of the reversed set + /** + It corresponds to the element preceding the first element of the non-reversed container. + This element acts as a placeholder, attempting to access it results in undefined behavior. + */ + const_reverse_iterator crend() + { + return const_reverse_iterator(*this, head(), size_t(0) - 1, false); + } + ///@} + + protected: + //@cond + template + std::pair do_update(value_type& val, Func f, bool bInsert = true) + { + hash_type const& hash = hash_accessor()(val); + traverse_data pos( hash, *this ); + hash_comparator cmp; + value_type * pOld; + + while ( true ) { + rcu_lock rcuLock; + + node_ptr slot = base_class::traverse( pos ); + assert(slot.bits() == 0); + + pOld = nullptr; + if ( pos.pArr->nodes[pos.nSlot].load(memory_model::memory_order_acquire) == slot) { + if ( slot.ptr()) { + if ( cmp( hash, hash_accessor()(*slot.ptr())) == 0 ) { + // the item with that hash value already exists + // Replace it with val + if ( slot.ptr() == &val ) { + stats().onUpdateExisting(); + return std::make_pair(true, false); + } + + if ( pos.pArr->nodes[pos.nSlot].compare_exchange_strong(slot, node_ptr(&val), memory_model::memory_order_release, atomics::memory_order_relaxed)) { + // slot can be disposed + f( val, slot.ptr()); + pOld = slot.ptr(); + stats().onUpdateExisting(); + goto update_existing_done; + } + + stats().onUpdateRetry(); + } + else { + if ( bInsert ) { + // the slot must be expanded + base_class::expand_slot( pos, slot ); + } + else { + stats().onUpdateFailed(); + return std::make_pair(false, false); + } + } + } + else { + // the slot is empty, try to insert data node + if (bInsert) { + node_ptr pNull; + if ( pos.pArr->nodes[pos.nSlot].compare_exchange_strong(pNull, node_ptr(&val), memory_model::memory_order_release, atomics::memory_order_relaxed)) + { + // the new data node has been inserted + f(val, nullptr); + ++m_ItemCounter; + stats().onUpdateNew(); + stats().height( pos.nHeight ); + return std::make_pair(true, true); + } + } + else { + stats().onUpdateFailed(); + return std::make_pair(false, false); + } + + // insert failed - slot has been changed by another thread + // retry updating + stats().onUpdateRetry(); + } + } + else + stats().onSlotChanged(); + } // while + + // update success + // retire_ptr must be called only outside of RCU lock + update_existing_done: + if (pOld) + gc::template retire_ptr(pOld); + return std::make_pair(true, false); + } + + template + value_type * do_erase( hash_type const& hash, Predicate pred) + { + assert(gc::is_locked()); + traverse_data pos( hash, *this ); + hash_comparator cmp; + + while ( true ) { + node_ptr slot = base_class::traverse( pos ); + assert( slot.bits() == 0 ); + + if ( pos.pArr->nodes[pos.nSlot].load( memory_model::memory_order_acquire ) == slot ) { + if ( slot.ptr()) { + if ( cmp( hash, hash_accessor()(*slot.ptr())) == 0 && pred( *slot.ptr())) { + // item found - replace it with nullptr + if ( pos.pArr->nodes[pos.nSlot].compare_exchange_strong( slot, node_ptr( nullptr ), memory_model::memory_order_acquire, atomics::memory_order_relaxed )) { + --m_ItemCounter; + stats().onEraseSuccess(); + + return slot.ptr(); + } + stats().onEraseRetry(); + continue; + } + stats().onEraseFailed(); + return nullptr; + } + else { + // the slot is empty + stats().onEraseFailed(); + return nullptr; + } + } + else + stats().onSlotChanged(); + } + } + + value_type * search(hash_type const& hash ) + { + assert( gc::is_locked()); + traverse_data pos( hash, *this ); + hash_comparator cmp; + + while ( true ) { + node_ptr slot = base_class::traverse( pos ); + assert( slot.bits() == 0 ); + + if ( pos.pArr->nodes[pos.nSlot].load( memory_model::memory_order_acquire ) != slot ) { + // slot value has been changed - retry + stats().onSlotChanged(); + continue; + } + else if ( slot.ptr() && cmp( hash, hash_accessor()(*slot.ptr())) == 0 ) { + // item found + stats().onFindSuccess(); + return slot.ptr(); + } + stats().onFindFailed(); + return nullptr; + } + } + + //@endcond + + private: + //@cond + void clear_array(array_node * pArrNode, size_t nSize) + { + back_off bkoff; + + + for (atomic_node_ptr * pArr = pArrNode->nodes, *pLast = pArr + nSize; pArr != pLast; ++pArr) { + while (true) { + node_ptr slot = pArr->load(memory_model::memory_order_acquire); + if (slot.bits() == base_class::flag_array_node ) { + // array node, go down the tree + assert(slot.ptr() != nullptr); + clear_array(to_array(slot.ptr()), array_node_size()); + break; + } + else if (slot.bits() == base_class::flag_array_converting ) { + // the slot is converting to array node right now + while ((slot = pArr->load(memory_model::memory_order_acquire)).bits() == base_class::flag_array_converting ) { + bkoff(); + stats().onSlotConverting(); + } + bkoff.reset(); + + assert(slot.ptr() != nullptr); + assert(slot.bits() == base_class::flag_array_node ); + clear_array(to_array(slot.ptr()), array_node_size()); + break; + } + else { + // data node + if (pArr->compare_exchange_strong(slot, node_ptr(), memory_model::memory_order_acquire, atomics::memory_order_relaxed)) { + if (slot.ptr()) { + gc::template retire_ptr(slot.ptr()); + --m_ItemCounter; + stats().onEraseSuccess(); + } + break; + } + } + } + } + } + //@endcond + }; + +}} // namespace cds::intrusive + +#endif // #ifndef CDSLIB_INTRUSIVE_FELDMAN_HASHSET_RCU_H diff --git a/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/intrusive/free_list.h b/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/intrusive/free_list.h new file mode 100644 index 0000000..e7e8862 --- /dev/null +++ b/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/intrusive/free_list.h @@ -0,0 +1,246 @@ +/* + This file is a part of libcds - Concurrent Data Structures library + + (C) Copyright Maxim Khizhinsky (libcds.dev@gmail.com) 2006-2017 + + Source code repo: http://github.com/khizmax/libcds/ + Download: http://sourceforge.net/projects/libcds/files/ + + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. + + * Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ + +#ifndef CDSLIB_INTRUSIVE_FREE_LIST_H +#define CDSLIB_INTRUSIVE_FREE_LIST_H + +#include + +namespace cds { namespace intrusive { + + /// Lock-free free list + /** @ingroup cds_intrusive_freelist + + Free list is a helper class intended for reusing objects instead of freeing them completely; + this avoids the overhead of \p malloc(), and also avoids its worst-case behavior of taking an operating system lock. + So, the free list can be considered as a specialized allocator for objects of some type. + + The algorithm is taken from this article. + The algo does not require any SMR like Hazard Pointer to prevent ABA problem. + + There is \ref TaggedFreeList "tagged pointers" variant of free list for processors with double-width CAS support. + + \b How to use + \code + #include + + // Your struct should be derived from FreeList::node + struct Foo: public cds::intrusive::FreeList::node + { + // Foo fields + }; + + // Simplified Foo allocator + class FooAllocator + { + public: + // free-list clear() must be explicitly called before destroying the free-list object + ~FooAllocator() + { + m_FreeList.clear( []( freelist_node * p ) { delete static_cast( p ); }); + } + + Foo * alloc() + { + freelist_node * p = m_FreeList.get(); + if ( p ) + return static_cast( p ); + return new Foo; + }; + + void dealloc( Foo * p ) + { + m_FreeList.put( static_cast( p )); + }; + + private: + typedef cds::intrusive::FreeList::node freelist_node; + cds::intrusive::FreeList m_FreeList; + }; + \endcode + */ + class FreeList + { + public: + /// Free list node + struct node { + //@cond + atomics::atomic m_freeListRefs; + atomics::atomic m_freeListNext; + + node() + : m_freeListRefs( 0 ) + { + m_freeListNext.store( nullptr, atomics::memory_order_release ); + } + //@endcond + }; + + public: + /// Creates empty free list + FreeList() + : m_Head( nullptr ) + {} + + /// Destroys the free list. Free-list must be empty. + /** + @warning dtor does not free elements of the list. + To free elements you should manually call \p clear() with an appropriate disposer. + */ + ~FreeList() + { + assert( empty()); + } + + /// Puts \p pNode to the free list + void put( node * pNode ) + { + // We know that the should-be-on-freelist bit is 0 at this point, so it's safe to + // set it using a fetch_add + if ( pNode->m_freeListRefs.fetch_add( c_ShouldBeOnFreeList, atomics::memory_order_release ) == 0 ) { + // Oh look! We were the last ones referencing this node, and we know + // we want to add it to the free list, so let's do it! + add_knowing_refcount_is_zero( pNode ); + } + } + + /// Gets a node from the free list. If the list is empty, returns \p nullptr + node * get() + { + auto head = m_Head.load( atomics::memory_order_acquire ); + while ( head != nullptr ) { + auto prevHead = head; + auto refs = head->m_freeListRefs.load( atomics::memory_order_relaxed ); + + if ( cds_unlikely( (refs & c_RefsMask) == 0 || !head->m_freeListRefs.compare_exchange_strong( refs, refs + 1, + atomics::memory_order_acquire, atomics::memory_order_relaxed ))) + { + head = m_Head.load( atomics::memory_order_acquire ); + continue; + } + + // Good, reference count has been incremented (it wasn't at zero), which means + // we can read the next and not worry about it changing between now and the time + // we do the CAS + node * next = head->m_freeListNext.load( atomics::memory_order_relaxed ); + if ( cds_likely( m_Head.compare_exchange_strong( head, next, atomics::memory_order_acquire, atomics::memory_order_relaxed ))) { + // Yay, got the node. This means it was on the list, which means + // shouldBeOnFreeList must be false no matter the refcount (because + // nobody else knows it's been taken off yet, it can't have been put back on). + assert( (head->m_freeListRefs.load( atomics::memory_order_relaxed ) & c_ShouldBeOnFreeList) == 0 ); + + // Decrease refcount twice, once for our ref, and once for the list's ref + head->m_freeListRefs.fetch_sub( 2, atomics::memory_order_relaxed ); + + return head; + } + + // OK, the head must have changed on us, but we still need to decrease the refcount we + // increased + refs = prevHead->m_freeListRefs.fetch_sub( 1, atomics::memory_order_acq_rel ); + if ( refs == c_ShouldBeOnFreeList + 1 ) + add_knowing_refcount_is_zero( prevHead ); + } + + return nullptr; + } + + /// Checks whether the free list is empty + bool empty() const + { + return m_Head.load( atomics::memory_order_relaxed ) == nullptr; + } + + /// Clears the free list (not atomic) + /** + For each element \p disp disposer is called to free memory. + The \p Disposer interface: + \code + struct disposer + { + void operator()( FreeList::node * node ); + }; + \endcode + + This method must be explicitly called before the free list destructor. + */ + template + void clear( Disposer disp ) + { + node * head = m_Head.load( atomics::memory_order_relaxed ); + m_Head.store( nullptr, atomics::memory_order_relaxed ); + while ( head ) { + node * next = head->m_freeListNext.load( atomics::memory_order_relaxed ); + disp( head ); + head = next; + } + } + + private: + //@cond + void add_knowing_refcount_is_zero( node * pNode ) + { + // Since the refcount is zero, and nobody can increase it once it's zero (except us, and we + // run only one copy of this method per node at a time, i.e. the single thread case), then we + // know we can safely change the next pointer of the node; however, once the refcount is back + // above zero, then other threads could increase it (happens under heavy contention, when the + // refcount goes to zero in between a load and a refcount increment of a node in try_get, then + // back up to something non-zero, then the refcount increment is done by the other thread) -- + // so, if the CAS to add the node to the actual list fails, decrease the refcount and leave + // the add operation to the next thread who puts the refcount back at zero (which could be us, + // hence the loop). + node * head = m_Head.load( atomics::memory_order_relaxed ); + while ( true ) { + pNode->m_freeListNext.store( head, atomics::memory_order_relaxed ); + pNode->m_freeListRefs.store( 1, atomics::memory_order_release ); + if ( cds_unlikely( !m_Head.compare_exchange_strong( head, pNode, atomics::memory_order_release, atomics::memory_order_relaxed ))) { + // Hmm, the add failed, but we can only try again when the refcount goes back to zero + if ( pNode->m_freeListRefs.fetch_add( c_ShouldBeOnFreeList - 1, atomics::memory_order_release ) == 1 ) + continue; + } + return; + } + } + //@endcond + + private: + //@cond + static constexpr uint32_t const c_RefsMask = 0x7FFFFFFF; + static constexpr uint32_t const c_ShouldBeOnFreeList = 0x80000000; + + // Implemented like a stack, but where node order doesn't matter (nodes are + // inserted out of order under contention) + atomics::atomic m_Head; + //@endcond + }; + +}} // namespace cds::intrusive + +#endif // CDSLIB_INTRUSIVE_FREE_LIST_H diff --git a/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/intrusive/free_list_cached.h b/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/intrusive/free_list_cached.h new file mode 100644 index 0000000..3fd3b09 --- /dev/null +++ b/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/intrusive/free_list_cached.h @@ -0,0 +1,192 @@ +/* + This file is a part of libcds - Concurrent Data Structures library + + (C) Copyright Maxim Khizhinsky (libcds.dev@gmail.com) 2006-2017 + + Source code repo: http://github.com/khizmax/libcds/ + Download: http://sourceforge.net/projects/libcds/files/ + + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. + + * Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ + +#ifndef CDSLIB_INTRUSIVE_FREE_LIST_CACHED_H +#define CDSLIB_INTRUSIVE_FREE_LIST_CACHED_H + +#include +#include +#include +#include + +#include +#include + +namespace cds { namespace intrusive { + + /// Cached free list + /** @ingroup cds_intrusive_freelist + + The class that is a wrapper over other \p FreeList contains a small cache of free elements. + Before placing a new item into underlying \p FreeList the cached free-list tryes + to put that item into the cache if its corresponding slot is empty. The slot is calculated by + current thread id: + \code + int slot = std::hash()( std::this_thread::get_id()) & (CacheSize - 1); + \endcode + + When getting the free-list checks the corresponding cache slot. If it is not empty, its + contents is returned. + + In some cases such simple algorithm significantly reduces \p FreeList contention. + + Template parameters: + - \p FreeList - a free-list implementation: \p FreeList, \p TaggedFreeList + - \p CacheSize - size of cache, a small power-of-two number, default is 16 + - \p Padding - padding of cache elements for solving false sharing, default is \p cds::c_nCacheLineSize + */ + template + class CachedFreeList + { + public: + typedef FreeList free_list_type; ///< Undelying free-list type + typedef typename free_list_type::node node; ///< Free-list node + + static size_t const c_cache_size = CacheSize; ///< Cache size + static unsigned const c_padding = Padding; ///< Cache element padding + + static_assert( c_cache_size >= 4, "Cache size is too small" ); + static_assert( (c_cache_size & (c_cache_size - 1)) == 0, "CacheSize must be power of two" ); + static_assert( (c_padding & (c_padding - 1)) == 0, "Padding must be power-of-two"); + + public: + /// Creates empty free list + CachedFreeList() + { + for ( auto& i: m_cache ) + i.store( nullptr, atomics::memory_order_relaxed ); + } + + /// Destroys the free list. Free-list must be empty. + /** + @warning dtor does not free elements of the list. + To free elements you should manually call \p clear() with an appropriate disposer. + */ + ~CachedFreeList() + { + assert( empty()); + } + + /// Puts \p pNode to the free list + void put( node* pNode ) + { + // try to put into free cell of cache + node* expect = nullptr; + if ( m_cache[ get_hash() ].compare_exchange_weak( expect, pNode, atomics::memory_order_release, atomics::memory_order_relaxed )) + return; + + // cache cell is not empty - use free-list + m_freeList.put( pNode ); + } + + /// Gets a node from the free list. If the list is empty, returns \p nullptr + node * get() + { + // try get from cache + atomics::atomic& cell = m_cache[ get_hash() ]; + node* p = cell.load( atomics::memory_order_relaxed ); + if ( p && cell.compare_exchange_weak( p, nullptr, atomics::memory_order_acquire, atomics::memory_order_relaxed )) + return p; + + // try read from free-list + p = m_freeList.get(); + if ( p ) + return p; + + // iterate the cache + for ( auto& item : m_cache ) { + p = item.load( atomics::memory_order_relaxed ); + if ( p && item.compare_exchange_weak( p, nullptr, atomics::memory_order_acquire, atomics::memory_order_relaxed )) + return p; + } + + return m_freeList.get(); + } + + /// Checks whether the free list is empty + bool empty() const + { + if ( !m_freeList.empty()) + return false; + + for ( auto& cell : m_cache ) { + node* p = cell.load( atomics::memory_order_relaxed ); + if ( p ) + return false; + } + + return true; + } + + /// Clears the free list (not atomic) + /** + For each element \p disp disposer is called to free memory. + The \p Disposer interface: + \code + struct disposer + { + void operator()( FreeList::node * node ); + }; + \endcode + + This method must be explicitly called before the free list destructor. + */ + template + void clear( Disposer disp ) + { + m_freeList.clear( disp ); + for ( auto& cell : m_cache ) { + node* p = cell.load( atomics::memory_order_relaxed ); + if ( p ) { + disp( p ); + cell.store( nullptr, atomics::memory_order_relaxed ); + } + } + } + + private: + //@cond + size_t get_hash() + { + return std::hash()( std::this_thread::get_id()) & (c_cache_size - 1); + } + //@endcond + private: + //@cond + typedef typename cds::details::type_padding< atomics::atomic, c_padding >::type array_item; + array_item m_cache[ c_cache_size ]; + free_list_type m_freeList; + //@endcond + }; + +}} // namespace cds::intrusive +//@endcond + +#endif // CDSLIB_INTRUSIVE_FREE_LIST_CACHED_H diff --git a/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/intrusive/free_list_selector.h b/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/intrusive/free_list_selector.h new file mode 100644 index 0000000..10b7cc9 --- /dev/null +++ b/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/intrusive/free_list_selector.h @@ -0,0 +1,54 @@ +/* + This file is a part of libcds - Concurrent Data Structures library + + (C) Copyright Maxim Khizhinsky (libcds.dev@gmail.com) 2006-2017 + + Source code repo: http://github.com/khizmax/libcds/ + Download: http://sourceforge.net/projects/libcds/files/ + + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. + + * Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ + +#ifndef CDSLIB_INTRUSIVE_FREE_LIST_SELECTOR_H +#define CDSLIB_INTRUSIVE_FREE_LIST_SELECTOR_H + +#include + +#ifdef CDS_DCAS_SUPPORT +# include +#else +# include +#endif + +//@cond +namespace cds { namespace intrusive { + +#ifdef CDS_DCAS_SUPPORT + typedef TaggedFreeList FreeListImpl; +#else + typedef FreeList FreeListImpl; +#endif + +}} // namespace cds::intrusive +//@endcond + +#endif // CDSLIB_INTRUSIVE_FREE_LIST_SELECTOR_H diff --git a/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/intrusive/free_list_tagged.h b/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/intrusive/free_list_tagged.h new file mode 100644 index 0000000..b9ecd4e --- /dev/null +++ b/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/intrusive/free_list_tagged.h @@ -0,0 +1,205 @@ +/* + This file is a part of libcds - Concurrent Data Structures library + + (C) Copyright Maxim Khizhinsky (libcds.dev@gmail.com) 2006-2017 + + Source code repo: http://github.com/khizmax/libcds/ + Download: http://sourceforge.net/projects/libcds/files/ + + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. + + * Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ + +#ifndef CDSLIB_INTRUSIVE_FREE_LIST_TAGGED_H +#define CDSLIB_INTRUSIVE_FREE_LIST_TAGGED_H + +#include + +namespace cds { namespace intrusive { + + /// Lock-free free list based on tagged pointers (required double-width CAS) + /** @ingroup cds_intrusive_freelist + + This variant of \p FreeList is intended for processor architectures that support double-width CAS. + It uses tagged pointer technique to solve ABA problem. + + \b How to use + \code + #include + + // Your struct should be derived from TaggedFreeList::node + struct Foo: public cds::intrusive::TaggedFreeList::node + { + // Foo fields + }; + + // Simplified Foo allocator + class FooAllocator + { + public: + // free-list clear() must be explicitly called before destroying the free-list object + ~FooAllocator() + { + m_FreeList.clear( []( freelist_node * p ) { delete static_cast( p ); }); + } + + Foo * alloc() + { + freelist_node * p = m_FreeList.get(); + if ( p ) + return static_cast( p ); + return new Foo; + }; + + void dealloc( Foo * p ) + { + m_FreeList.put( static_cast( p )); + }; + + private: + typedef cds::intrusive::TaggedFreeList::node freelist_node; + cds::intrusive::TaggedFreeList m_FreeList; + }; + \endcode + */ + class TaggedFreeList + { + public: + struct node { + //@cond + atomics::atomic m_freeListNext; + + node() + { + m_freeListNext.store( nullptr, atomics::memory_order_release ); + } + //@endcond + }; + + private: + //@cond + struct tagged_ptr + { + node * ptr; + uintptr_t tag; + + tagged_ptr() + : ptr( nullptr ) + , tag( 0 ) + {} + + tagged_ptr( node* p ) + : ptr( p ) + , tag( 0 ) + {} + }; + + static_assert(sizeof( tagged_ptr ) == sizeof( void * ) * 2, "sizeof( tagged_ptr ) violation"); + //@endcond + + public: + /// Creates empty free-list + TaggedFreeList() + : m_Head( tagged_ptr()) + { + // Your platform must support double-width CAS + assert( m_Head.is_lock_free()); + } + + /// Destroys the free list. Free-list must be empty. + /** + @warning dtor does not free elements of the list. + To free elements you should manually call \p clear() with an appropriate disposer. + */ + ~TaggedFreeList() + { + assert( empty()); + } + + /// Puts \p pNode to the free list + void put( node * pNode ) + { + assert( m_Head.is_lock_free()); + + tagged_ptr currentHead = m_Head.load( atomics::memory_order_relaxed ); + tagged_ptr newHead = { pNode }; + do { + newHead.tag = currentHead.tag + 1; + pNode->m_freeListNext.store( currentHead.ptr, atomics::memory_order_relaxed ); + CDS_TSAN_ANNOTATE_HAPPENS_BEFORE( &pNode->m_freeListNext ); + } while ( cds_unlikely( !m_Head.compare_exchange_weak( currentHead, newHead, atomics::memory_order_release, atomics::memory_order_acquire ))); + } + + /// Gets a node from the free list. If the list is empty, returns \p nullptr + node * get() + { + tagged_ptr currentHead = m_Head.load( atomics::memory_order_acquire ); + tagged_ptr newHead; + while ( currentHead.ptr != nullptr ) { + CDS_TSAN_ANNOTATE_HAPPENS_AFTER( ¤tHead.ptr->m_freeListNext ); + newHead.ptr = currentHead.ptr->m_freeListNext.load( atomics::memory_order_relaxed ); + newHead.tag = currentHead.tag + 1; + if ( cds_likely( m_Head.compare_exchange_weak( currentHead, newHead, atomics::memory_order_release, atomics::memory_order_acquire ))) + break; + } + return currentHead.ptr; + } + + /// Checks whether the free list is empty + bool empty() const + { + return m_Head.load( atomics::memory_order_relaxed ).ptr == nullptr; + } + + /// Clears the free list (not atomic) + /** + For each element \p disp disposer is called to free memory. + The \p Disposer interface: + \code + struct disposer + { + void operator()( FreeList::node * node ); + }; + \endcode + + This method must be explicitly called before the free list destructor. + */ + template + void clear( Disposer disp ) + { + node * head = m_Head.load( atomics::memory_order_relaxed ).ptr; + m_Head.store( { nullptr }, atomics::memory_order_relaxed ); + while ( head ) { + node * next = head->m_freeListNext.load( atomics::memory_order_relaxed ); + disp( head ); + head = next; + } + } + + private: + //@cond + atomics::atomic m_Head; + //@endcond + }; + +}} // namespace cds::intrusive + +#endif // CDSLIB_INTRUSIVE_FREE_LIST_TAGGED_H diff --git a/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/intrusive/impl/ellen_bintree.h b/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/intrusive/impl/ellen_bintree.h new file mode 100644 index 0000000..2bab032 --- /dev/null +++ b/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/intrusive/impl/ellen_bintree.h @@ -0,0 +1,1597 @@ +/* + This file is a part of libcds - Concurrent Data Structures library + + (C) Copyright Maxim Khizhinsky (libcds.dev@gmail.com) 2006-2017 + + Source code repo: http://github.com/khizmax/libcds/ + Download: http://sourceforge.net/projects/libcds/files/ + + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. + + * Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ + +#ifndef CDSLIB_INTRUSIVE_IMPL_ELLEN_BINTREE_H +#define CDSLIB_INTRUSIVE_IMPL_ELLEN_BINTREE_H + +#include +#include +#include +#include +#include + +namespace cds { namespace intrusive { + + /// Ellen's et al binary search tree + /** @ingroup cds_intrusive_map + @ingroup cds_intrusive_tree + @anchor cds_intrusive_EllenBinTree + + Source: + - [2010] F.Ellen, P.Fatourou, E.Ruppert, F.van Breugel "Non-blocking Binary Search Tree" + + %EllenBinTree is an unbalanced leaf-oriented binary search tree that implements the set + abstract data type. Nodes maintains child pointers but not parent pointers. + Every internal node has exactly two children, and all data of type \p T currently in + the tree are stored in the leaves. Internal nodes of the tree are used to direct \p find() + operation along the path to the correct leaf. The keys (of \p Key type) stored in internal nodes + may or may not be in the set. \p Key type is a subset of \p T type. + There should be exactly defined a key extracting functor for converting object of type \p T to + object of type \p Key. + + Due to \p extract_min() and \p extract_max() member functions the \p %EllenBinTree can act as + a priority queue. In this case you should provide unique compound key, for example, + the priority value plus some uniformly distributed random value. + + @note In the current implementation we do not use helping technique described in the original paper. + In Hazard Pointer schema the helping is too complicated and does not give any observable benefits. + Instead of helping, when a thread encounters a concurrent operation it just spins waiting for + the operation done. Such solution allows greatly simplify implementation of the tree. + + @attention Recall the tree is unbalanced. The complexity of operations is O(log N) + for uniformly distributed random keys, but in the worst case the complexity is O(N). + + @note Do not include header file explicitly. + There are header file for each GC type: + - - for Hazard Pointer GC \p cds::gc::HP + - - for Dynamic Hazard Pointer GC \p cds::gc::DHP + - - for RCU (see \ref cds_intrusive_EllenBinTree_rcu "RCU-based EllenBinTree") + + Template arguments : + - \p GC - garbage collector, possible types are cds::gc::HP, cds::gc::DHP. + - \p Key - key type, a subset of \p T + - \p T - type to be stored in tree's leaf nodes. The type must be based on \p ellen_bintree::node + (for \p ellen_bintree::base_hook) or it must have a member of type \p ellen_bintree::node + (for \p ellen_bintree::member_hook). + - \p Traits - tree traits, default is \p ellen_bintree::traits + It is possible to declare option-based tree with \p ellen_bintree::make_traits metafunction + instead of \p Traits template argument. + + @anchor cds_intrusive_EllenBinTree_less + Predicate requirements + + \p Traits::less, \p Traits::compare and other predicates using with member fuctions should accept at least parameters + of type \p T and \p Key in any combination. + For example, for \p Foo struct with \p std::string key field the appropiate \p less functor is: + \code + struct Foo: public cds::intrusive::ellen_bintree::node< ... > + { + std::string m_strKey; + ... + }; + + struct less { + bool operator()( Foo const& v1, Foo const& v2 ) const + { return v1.m_strKey < v2.m_strKey ; } + + bool operator()( Foo const& v, std::string const& s ) const + { return v.m_strKey < s ; } + + bool operator()( std::string const& s, Foo const& v ) const + { return s < v.m_strKey ; } + + // Support comparing std::string and char const * + bool operator()( std::string const& s, char const * p ) const + { return s.compare(p) < 0 ; } + + bool operator()( Foo const& v, char const * p ) const + { return v.m_strKey.compare(p) < 0 ; } + + bool operator()( char const * p, std::string const& s ) const + { return s.compare(p) > 0; } + + bool operator()( char const * p, Foo const& v ) const + { return v.m_strKey.compare(p) > 0; } + }; + \endcode + + Usage examples see \ref cds_intrusive_EllenBinTree_usage "here" + */ + template < class GC, + typename Key, + typename T, +#ifdef CDS_DOXYGEN_INVOKED + class Traits = ellen_bintree::traits +#else + class Traits +#endif + > + class EllenBinTree + { + public: + typedef GC gc; ///< Garbage collector + typedef Key key_type; ///< type of a key to be stored in internal nodes; key is a part of \p value_type + typedef T value_type; ///< type of value stored in the binary tree + typedef Traits traits; ///< Traits template parameter + + typedef typename traits::hook hook; ///< hook type + typedef typename hook::node_type node_type; ///< node type + typedef typename traits::disposer disposer; ///< leaf node disposer + typedef typename traits::back_off back_off; ///< back-off strategy + + typedef typename gc::template guarded_ptr< value_type > guarded_ptr; ///< Guarded pointer + + protected: + //@cond + typedef ellen_bintree::base_node< gc > tree_node; ///< Base type of tree node + typedef node_type leaf_node; ///< Leaf node type + typedef ellen_bintree::node_types< gc, key_type, typename leaf_node::tag > node_factory; + typedef typename node_factory::internal_node_type internal_node; ///< Internal node type + typedef typename node_factory::update_desc_type update_desc; ///< Update descriptor + typedef typename update_desc::update_ptr update_ptr; ///< Marked pointer to update descriptor + //@endcond + + public: +# ifdef CDS_DOXYGEN_INVOKED + typedef implementation_defined key_comparator; ///< key compare functor based on \p Traits::compare and \p Traits::less + typedef typename get_node_traits< value_type, node_type, hook>::type node_traits; ///< Node traits +# else + typedef typename opt::details::make_comparator< value_type, traits >::type key_comparator; + struct node_traits: public get_node_traits< value_type, node_type, hook>::type + { + static internal_node const& to_internal_node( tree_node const& n ) + { + assert( n.is_internal()); + return static_cast( n ); + } + + static leaf_node const& to_leaf_node( tree_node const& n ) + { + assert( n.is_leaf()); + return static_cast( n ); + } + }; +# endif + + typedef typename traits::item_counter item_counter; ///< Item counting policy + typedef typename traits::memory_model memory_model; ///< Memory ordering, see \p cds::opt::memory_model + typedef typename traits::stat stat; ///< internal statistics type + typedef typename traits::key_extractor key_extractor; ///< key extracting functor + + typedef typename traits::node_allocator node_allocator; ///< Allocator for internal node + typedef typename traits::update_desc_allocator update_desc_allocator; ///< Update descriptor allocator + + static constexpr const size_t c_nHazardPtrCount = 9; ///< Count of hazard pointer required for the algorithm + + protected: + //@cond + typedef ellen_bintree::details::compare< key_type, value_type, key_comparator, node_traits > node_compare; + + typedef cds::details::Allocator< internal_node, node_allocator > cxx_node_allocator; + typedef cds::details::Allocator< update_desc, update_desc_allocator > cxx_update_desc_allocator; + + struct search_result { + enum guard_index { + Guard_GrandParent, + Guard_Parent, + Guard_Leaf, + Guard_updGrandParent, + Guard_updParent, + Guard_temporary, + + // end of guard indices + guard_count + }; + + typedef typename gc::template GuardArray< guard_count > guard_array; + guard_array guards; + + internal_node * pGrandParent; + internal_node * pParent; + leaf_node * pLeaf; + update_ptr updParent; + update_ptr updGrandParent; + bool bRightLeaf; // true if pLeaf is right child of pParent, false otherwise + bool bRightParent; // true if pParent is right child of pGrandParent, false otherwise + + search_result() + :pGrandParent( nullptr ) + ,pParent( nullptr ) + ,pLeaf( nullptr ) + ,bRightLeaf( false ) + ,bRightParent( false ) + {} + }; + //@endcond + + protected: + //@cond + internal_node m_Root; ///< Tree root node (key= Infinite2) + leaf_node m_LeafInf1; ///< Infinite leaf 1 (key= Infinite1) + leaf_node m_LeafInf2; ///< Infinite leaf 2 (key= Infinite2) + //@endcond + + item_counter m_ItemCounter; ///< item counter + mutable stat m_Stat; ///< internal statistics + + protected: + //@cond + static void free_leaf_node( void* p ) + { + disposer()( reinterpret_cast( p )); + } + + internal_node * alloc_internal_node() const + { + m_Stat.onInternalNodeCreated(); + internal_node * pNode = cxx_node_allocator().New(); + return pNode; + } + + static void free_internal_node( void* pNode ) + { + cxx_node_allocator().Delete( reinterpret_cast( pNode )); + } + + struct internal_node_deleter { + void operator()( internal_node* p) const + { + cxx_node_allocator().Delete( p ); + } + }; + + typedef std::unique_ptr< internal_node, internal_node_deleter> unique_internal_node_ptr; + + update_desc * alloc_update_desc() const + { + m_Stat.onUpdateDescCreated(); + return cxx_update_desc_allocator().New(); + } + + static void free_update_desc( void* pDesc ) + { + cxx_update_desc_allocator().Delete( reinterpret_cast( pDesc )); + } + + void retire_node( tree_node * pNode ) const + { + if ( pNode->is_leaf()) { + assert( static_cast( pNode ) != &m_LeafInf1 ); + assert( static_cast( pNode ) != &m_LeafInf2 ); + + gc::template retire( node_traits::to_value_ptr( static_cast( pNode )), free_leaf_node ); + } + else { + assert( static_cast( pNode ) != &m_Root ); + m_Stat.onInternalNodeDeleted(); + + gc::template retire( static_cast( pNode ), free_internal_node ); + } + } + + void retire_update_desc( update_desc * p ) const + { + m_Stat.onUpdateDescDeleted(); + gc::template retire( p, free_update_desc ); + } + + void make_empty_tree() + { + m_Root.infinite_key( 2 ); + m_LeafInf1.infinite_key( 1 ); + m_LeafInf2.infinite_key( 2 ); + m_Root.m_pLeft.store( &m_LeafInf1, memory_model::memory_order_relaxed ); + m_Root.m_pRight.store( &m_LeafInf2, memory_model::memory_order_release ); + } + //@endcond + + public: + /// Default constructor + EllenBinTree() + { + static_assert( !std::is_same< key_extractor, opt::none >::value, "The key extractor option must be specified" ); + make_empty_tree(); + } + + /// Clears the tree + ~EllenBinTree() + { + unsafe_clear(); + } + + /// Inserts new node + /** + The function inserts \p val in the tree if it does not contain + an item with key equal to \p val. + + Returns \p true if \p val is placed into the tree, \p false otherwise. + */ + bool insert( value_type& val ) + { + return insert( val, []( value_type& ) {} ); + } + + /// Inserts new node + /** + This function is intended for derived non-intrusive containers. + + The function allows to split creating of new item into two part: + - create item with key only + - insert new item into the tree + - if inserting is success, calls \p f functor to initialize value-field of \p val. + + The functor signature is: + \code + void func( value_type& val ); + \endcode + where \p val is the item inserted. User-defined functor \p f should guarantee that during changing + \p val no any other changes could be made on this tree's item by concurrent threads. + The user-defined functor is called only if the inserting is success. + */ + template + bool insert( value_type& val, Func f ) + { + typename gc::Guard guardInsert; + guardInsert.assign( &val ); + + unique_internal_node_ptr pNewInternal; + search_result res; + back_off bkoff; + + for ( ;; ) { + if ( search( res, val, node_compare())) { + if ( pNewInternal.get()) + m_Stat.onInternalNodeDeleted() ; // unique_internal_node_ptr deletes internal node + m_Stat.onInsertFailed(); + return false; + } + + if ( res.updGrandParent.bits() == update_desc::Clean && res.updParent.bits() == update_desc::Clean ) { + + if ( !pNewInternal.get()) + pNewInternal.reset( alloc_internal_node()); + + if ( try_insert( val, pNewInternal.get(), res )) { + f( val ); + pNewInternal.release(); // internal node is linked into the tree and should not be deleted + break; + } + } + + bkoff(); + m_Stat.onInsertRetry(); + } + + ++m_ItemCounter; + m_Stat.onInsertSuccess(); + return true; + } + + /// Updates the node + /** + The operation performs inserting or changing data with lock-free manner. + + If the item \p val is not found in the set, then \p val is inserted into the set + iff \p bAllowInsert is \p true. + Otherwise, the functor \p func is called with item found. + The functor \p func signature is: + \code + void func( bool bNew, value_type& item, value_type& val ); + \endcode + with arguments: + - \p bNew - \p true if the item has been inserted, \p false otherwise + - \p item - item of the set + - \p val - argument \p val passed into the \p %update() function + If new item has been inserted (i.e. \p bNew is \p true) then \p item and \p val arguments + refer to the same thing. + + The functor can change non-key fields of the \p item; however, \p func must guarantee + that during changing no any other modifications could be made on this item by concurrent threads. + + Returns std::pair where \p first is \p true if operation is successful, + i.e. the node has been inserted or updated, + \p second is \p true if new item has been added or \p false if the item with \p key + already exists. + + @warning See \ref cds_intrusive_item_creating "insert item troubleshooting" + */ + template + std::pair update( value_type& val, Func func, bool bAllowInsert = true ) + { + typename gc::Guard guardInsert; + guardInsert.assign( &val ); + + unique_internal_node_ptr pNewInternal; + search_result res; + back_off bkoff; + + for ( ;; ) { + if ( search( res, val, node_compare())) { + func( false, *node_traits::to_value_ptr( res.pLeaf ), val ); + if ( pNewInternal.get()) + m_Stat.onInternalNodeDeleted() ; // unique_internal_node_ptr deletes internal node + m_Stat.onUpdateExist(); + return std::make_pair( true, false ); + } + + if ( res.updGrandParent.bits() == update_desc::Clean && res.updParent.bits() == update_desc::Clean ) { + if ( !bAllowInsert ) + return std::make_pair( false, false ); + + if ( !pNewInternal.get()) + pNewInternal.reset( alloc_internal_node()); + + if ( try_insert( val, pNewInternal.get(), res )) { + func( true, val, val ); + pNewInternal.release() ; // internal node has been linked into the tree and should not be deleted + break; + } + } + + bkoff(); + m_Stat.onUpdateRetry(); + } + + ++m_ItemCounter; + m_Stat.onUpdateNew(); + return std::make_pair( true, true ); + } + //@cond + template + CDS_DEPRECATED("ensure() is deprecated, use update()") + std::pair ensure( value_type& val, Func func ) + { + return update( val, func, true ); + } + //@endcond + + /// Unlinks the item \p val from the tree + /** + The function searches the item \p val in the tree and unlink it from the tree + if it is found and is equal to \p val. + + Difference between \ref erase and \p unlink functions: \p erase finds a key + and deletes the item found. \p unlink finds an item by key and deletes it + only if \p val is a node, i.e. the pointer to item found is equal to &val . + + The \p disposer specified in \p Traits class template parameter is called + by garbage collector \p GC asynchronously. + + The function returns \p true if success and \p false otherwise. + */ + bool unlink( value_type& val ) + { + return erase_( val, node_compare(), + []( value_type const& v, leaf_node const& n ) -> bool { return &v == node_traits::to_value_ptr( n ); }, + [](value_type const&) {} ); + } + + /// Deletes the item from the tree + /** \anchor cds_intrusive_EllenBinTree_erase + The function searches an item with key equal to \p key in the tree, + unlinks it from the tree, and returns \p true. + If the item with key equal to \p key is not found the function return \p false. + + Note the \p Traits::less and/or \p Traits::compare predicate should accept a parameter of type \p Q + that can be not the same as \p value_type. + */ + template + bool erase( const Q& key ) + { + return erase_( key, node_compare(), + []( Q const&, leaf_node const& ) -> bool { return true; }, + [](value_type const&) {} ); + } + + /// Delete the item from the tree with comparing functor \p pred + /** + The function is an analog of \ref cds_intrusive_EllenBinTree_erase "erase(Q const&)" + but \p pred predicate is used for key comparing. + \p Less has the interface like \p std::less and should meet \ref cds_intrusive_EllenBinTree_less + "Predicate requirements". + \p pred must imply the same element order as the comparator used for building the tree. + */ + template + bool erase_with( const Q& key, Less pred ) + { + CDS_UNUSED( pred ); + typedef ellen_bintree::details::compare< + key_type, + value_type, + opt::details::make_comparator_from_less, + node_traits + > compare_functor; + + return erase_( key, compare_functor(), + []( Q const&, leaf_node const& ) -> bool { return true; }, + [](value_type const&) {} ); + } + + /// Deletes the item from the tree + /** \anchor cds_intrusive_EllenBinTree_erase_func + The function searches an item with key equal to \p key in the tree, + call \p f functor with item found, unlinks it from the tree, and returns \p true. + The \ref disposer specified in \p Traits class template parameter is called + by garbage collector \p GC asynchronously. + + The \p Func interface is + \code + struct functor { + void operator()( value_type const& item ); + }; + \endcode + + If the item with key equal to \p key is not found the function return \p false. + + Note the \p Traits::less and/or \p Traits::compare predicate should accept a parameter of type \p Q + that can be not the same as \p value_type. + */ + template + bool erase( Q const& key, Func f ) + { + return erase_( key, node_compare(), + []( Q const&, leaf_node const& ) -> bool { return true; }, + f ); + } + + /// Delete the item from the tree with comparing functor \p pred + /** + The function is an analog of \ref cds_intrusive_EllenBinTree_erase_func "erase(Q const&, Func)" + but \p pred predicate is used for key comparing. + \p Less has the interface like \p std::less and should meet \ref cds_intrusive_EllenBinTree_less + "Predicate requirements". + \p pred must imply the same element order as the comparator used for building the tree. + */ + template + bool erase_with( Q const& key, Less pred, Func f ) + { + CDS_UNUSED( pred ); + typedef ellen_bintree::details::compare< + key_type, + value_type, + opt::details::make_comparator_from_less, + node_traits + > compare_functor; + + return erase_( key, compare_functor(), + []( Q const&, leaf_node const& ) -> bool { return true; }, + f ); + } + + /// Extracts an item with minimal key from the tree + /** + The function searches an item with minimal key, unlinks it, and returns a guarded pointer to an item found. + If the tree is empty the function returns an empty guarded pointer. + + @note Due the concurrent nature of the tree, the function extracts nearly minimum key. + It means that the function gets leftmost leaf of the tree and tries to unlink it. + During unlinking, a concurrent thread may insert an item with key less than leftmost item's key. + So, the function returns the item with minimum key at the moment of tree traversing. + + The returned \p guarded_ptr prevents disposer invocation for returned item, + see \p cds::gc::guarded_ptr for explanation. + @note Each \p guarded_ptr object uses the GC's guard that can be limited resource. + */ + guarded_ptr extract_min() + { + return extract_min_(); + } + + /// Extracts an item with maximal key from the tree + /** + The function searches an item with maximal key, unlinks it, and returns a guarded pointer to an item found. + If the tree is empty the function returns an empty \p guarded_ptr. + + @note Due the concurrent nature of the tree, the function extracts nearly maximal key. + It means that the function gets rightmost leaf of the tree and tries to unlink it. + During unlinking, a concurrent thread may insert an item with key great than rightmost item's key. + So, the function returns the item with maximal key at the moment of tree traversing. + + The returned \p guarded_ptr prevents disposer invocation for returned item, + see cds::gc::guarded_ptr for explanation. + @note Each \p guarded_ptr object uses the GC's guard that can be limited resource. + */ + guarded_ptr extract_max() + { + return extract_max_(); + } + + /// Extracts an item from the tree + /** \anchor cds_intrusive_EllenBinTree_extract + The function searches an item with key equal to \p key in the tree, + unlinks it, and returns a guarded pointer to an item found. + If the item is not found the function returns an empty \p guarded_ptr. + + \p guarded_ptr prevents disposer invocation for returned item, + see cds::gc::guarded_ptr for explanation. + @note Each \p guarded_ptr object uses the GC's guard that can be limited resource. + */ + template + guarded_ptr extract( Q const& key ) + { + return extract_( key ); + } + + /// Extracts an item from the tree using \p pred for searching + /** + The function is an analog of \ref cds_intrusive_EllenBinTree_extract "extract(Q const&)" + but \p pred is used for key compare. + \p Less has the interface like \p std::less and should meet \ref cds_intrusive_EllenBinTree_less + "Predicate requirements". + \p pred must imply the same element order as the comparator used for building the tree. + */ + template + guarded_ptr extract_with( Q const& key, Less pred ) + { + return extract_with_( key, pred ); + } + + /// Checks whether the set contains \p key + /** + The function searches the item with key equal to \p key + and returns \p true if it is found, and \p false otherwise. + */ + template + bool contains( Q const& key ) const + { + search_result res; + if ( search( res, key, node_compare())) { + m_Stat.onFindSuccess(); + return true; + } + + m_Stat.onFindFailed(); + return false; + } + //@cond + template + CDS_DEPRECATED("deprecated, use contains()") + bool find( Q const& key ) const + { + return contains( key ); + } + //@endcond + + /// Checks whether the set contains \p key using \p pred predicate for searching + /** + The function is similar to contains( key ) but \p pred is used for key comparing. + \p Less functor has the interface like \p std::less. + \p Less must imply the same element order as the comparator used for building the set. + */ + template + bool contains( Q const& key, Less pred ) const + { + CDS_UNUSED( pred ); + typedef ellen_bintree::details::compare< + key_type, + value_type, + opt::details::make_comparator_from_less, + node_traits + > compare_functor; + + search_result res; + if ( search( res, key, compare_functor())) { + m_Stat.onFindSuccess(); + return true; + } + m_Stat.onFindFailed(); + return false; + } + //@cond + template + CDS_DEPRECATED("deprecated, use contains()") + bool find_with( Q const& key, Less pred ) const + { + return contains( key, pred ); + } + //@endcond + + /// Finds the key \p key + /** @anchor cds_intrusive_EllenBinTree_find_func + The function searches the item with key equal to \p key and calls the functor \p f for item found. + The interface of \p Func functor is: + \code + struct functor { + void operator()( value_type& item, Q& key ); + }; + \endcode + where \p item is the item found, \p key is the find function argument. + + The functor can change non-key fields of \p item. Note that the functor is only guarantee + that \p item cannot be disposed during functor is executing. + The functor does not serialize simultaneous access to the tree \p item. If such access is + possible you must provide your own synchronization schema on item level to exclude unsafe item modifications. + + The function returns \p true if \p key is found, \p false otherwise. + */ + template + bool find( Q& key, Func f ) const + { + return find_( key, f ); + } + //@cond + template + bool find( Q const& key, Func f ) const + { + return find_( key, f ); + } + //@endcond + + /// Finds the key \p key with comparing functor \p pred + /** + The function is an analog of \ref cds_intrusive_EllenBinTree_find_func "find(Q&, Func)" + but \p pred is used for key comparison. + \p Less functor has the interface like \p std::less and should meet \ref cds_intrusive_EllenBinTree_less + "Predicate requirements". + \p pred must imply the same element order as the comparator used for building the tree. + */ + template + bool find_with( Q& key, Less pred, Func f ) const + { + return find_with_( key, pred, f ); + } + //@cond + template + bool find_with( Q const& key, Less pred, Func f ) const + { + return find_with_( key, pred, f ); + } + //@endcond + + /// Finds \p key and returns the item found + /** @anchor cds_intrusive_EllenBinTree_get + The function searches the item with key equal to \p key and returns the item found as \p guarded_ptr object. + The function returns an empty guarded pointer is \p key is not found. + + \p guarded_ptr prevents disposer invocation for returned item, + see \p cds::gc::guarded_ptr for explanation. + @note Each \p guarded_ptr object uses the GC's guard that can be limited resource. + */ + template + guarded_ptr get( Q const& key ) const + { + return get_( key ); + } + + /// Finds \p key with predicate \p pred and returns the item found + /** + The function is an analog of \ref cds_intrusive_EllenBinTree_get "get(Q const&)" + but \p pred is used for key comparing. + \p Less functor has the interface like \p std::less and should meet \ref cds_intrusive_EllenBinTree_less + "Predicate requirements". + \p pred must imply the same element order as the comparator used for building the tree. + */ + template + guarded_ptr get_with( Q const& key, Less pred ) const + { + return get_with_( key, pred ); + } + + /// Checks if the tree is empty + bool empty() const + { + return m_Root.m_pLeft.load( memory_model::memory_order_relaxed )->is_leaf(); + } + + /// Clears the tree (thread safe, not atomic) + /** + The function unlink all items from the tree. + The function is thread safe but not atomic: in multi-threaded environment with parallel insertions + this sequence + \code + tree.clear(); + assert( tree.empty()); + \endcode + the assertion could be raised. + + For each leaf the \p disposer will be called after unlinking. + */ + void clear() + { + guarded_ptr gp; + do { + gp = extract_min(); + } while ( gp ); + } + + /// Clears the tree (not thread safe) + /** + This function is not thread safe and may be called only when no other thread deals with the tree. + The function is used in the tree destructor. + */ + void unsafe_clear() + { + while ( true ) { + internal_node * pParent = nullptr; + internal_node * pGrandParent = nullptr; + tree_node * pLeaf = const_cast( &m_Root ); + + // Get leftmost leaf + while ( pLeaf->is_internal()) { + pGrandParent = pParent; + pParent = static_cast( pLeaf ); + pLeaf = pParent->m_pLeft.load( memory_model::memory_order_relaxed ); + } + + if ( pLeaf->infinite_key()) { + // The tree is empty + return; + } + + // Remove leftmost leaf and its parent node + assert( pGrandParent ); + assert( pParent ); + assert( pLeaf->is_leaf()); + + pGrandParent->m_pLeft.store( pParent->m_pRight.load( memory_model::memory_order_relaxed ), memory_model::memory_order_relaxed ); + free_leaf_node( node_traits::to_value_ptr( static_cast( pLeaf ))); + free_internal_node( pParent ); + } + } + + /// Returns item count in the tree + /** + Only leaf nodes containing user data are counted. + + The value returned depends on item counter type provided by \p Traits template parameter. + If it is \p atomicity::empty_item_counter this function always returns 0. + The function is not suitable for checking the tree emptiness, use \p empty() + member function for this purpose. + */ + size_t size() const + { + return m_ItemCounter; + } + + /// Returns const reference to internal statistics + stat const& statistics() const + { + return m_Stat; + } + + /// Checks internal consistency (not atomic, not thread-safe) + /** + The debugging function to check internal consistency of the tree. + */ + bool check_consistency() const + { + return check_consistency( &m_Root ); + } + + protected: + //@cond + + bool check_consistency( internal_node const * pRoot ) const + { + tree_node * pLeft = pRoot->m_pLeft.load( atomics::memory_order_relaxed ); + tree_node * pRight = pRoot->m_pRight.load( atomics::memory_order_relaxed ); + assert( pLeft ); + assert( pRight ); + + if ( node_compare()( *pLeft, *pRoot ) < 0 + && node_compare()( *pRoot, *pRight ) <= 0 + && node_compare()( *pLeft, *pRight ) < 0 ) + { + bool bRet = true; + if ( pLeft->is_internal()) + bRet = check_consistency( static_cast( pLeft )); + assert( bRet ); + + if ( bRet && pRight->is_internal()) + bRet = bRet && check_consistency( static_cast( pRight )); + assert( bRet ); + + return bRet; + } + return false; + } + + tree_node * protect_child_node( search_result& res, internal_node * pParent, bool bRight, update_ptr updParent ) const + { + retry: + tree_node * p = bRight + ? res.guards.protect( search_result::Guard_Leaf, pParent->m_pRight, + []( tree_node * pn ) -> internal_node* { return static_cast(pn);}) + : res.guards.protect( search_result::Guard_Leaf, pParent->m_pLeft, + []( tree_node * pn ) -> internal_node* { return static_cast(pn);}); + + // If we use member hook, data node pointer != internal node pointer + // So, we need protect the child twice: as internal node and as data node + // and then analyze what kind of node we have + tree_node * pVal = bRight + ? res.guards.protect( search_result::Guard_temporary, pParent->m_pRight, + []( tree_node * pn ) -> value_type* { return node_traits::to_value_ptr( static_cast(pn));} ) + : res.guards.protect( search_result::Guard_temporary, pParent->m_pLeft, + []( tree_node * pn ) -> value_type* { return node_traits::to_value_ptr( static_cast(pn));} ); + + // child node is guarded + // See whether pParent->m_pUpdate has not been changed + if ( pParent->m_pUpdate.load( memory_model::memory_order_acquire ) != updParent ) { + // update has been changed - returns nullptr as a flag to search retry + return nullptr; + } + + if ( p != pVal ) + goto retry; + + if ( p && p->is_leaf()) + res.guards.assign( search_result::Guard_Leaf, node_traits::to_value_ptr( static_cast( p ))); + + res.guards.clear( search_result::Guard_temporary ); + + return p; + } + + static update_ptr search_protect_update( search_result& res, atomics::atomic const& src ) + { + return res.guards.protect( search_result::Guard_updParent, src, [](update_ptr p) -> update_desc* { return p.ptr(); }); + } + + template + bool search( search_result& res, KeyValue const& key, Compare cmp ) const + { + internal_node * pParent; + internal_node * pGrandParent = nullptr; + update_ptr updParent; + update_ptr updGrandParent; + bool bRightLeaf; + bool bRightParent = false; + + int nCmp = 0; + + retry: + pParent = nullptr; + //pGrandParent = nullptr; + updParent = nullptr; + bRightLeaf = false; + tree_node * pLeaf = const_cast( &m_Root ); + while ( pLeaf->is_internal()) { + res.guards.copy( search_result::Guard_GrandParent, search_result::Guard_Parent ); + pGrandParent = pParent; + res.guards.copy( search_result::Guard_Parent, search_result::Guard_Leaf ); + pParent = static_cast( pLeaf ); + bRightParent = bRightLeaf; + res.guards.copy( search_result::Guard_updGrandParent, search_result::Guard_updParent ); + updGrandParent = updParent; + + updParent = search_protect_update( res, pParent->m_pUpdate ); + + switch ( updParent.bits()) { + case update_desc::DFlag: + case update_desc::Mark: + m_Stat.onSearchRetry(); + goto retry; + } + + nCmp = cmp( key, *pParent ); + bRightLeaf = nCmp >= 0; + + pLeaf = protect_child_node( res, pParent, bRightLeaf, updParent ); + if ( !pLeaf ) { + m_Stat.onSearchRetry(); + goto retry; + } + } + + assert( pLeaf->is_leaf()); + nCmp = cmp( key, *static_cast(pLeaf)); + + res.pGrandParent = pGrandParent; + res.pParent = pParent; + res.pLeaf = static_cast( pLeaf ); + res.updParent = updParent; + res.updGrandParent = updGrandParent; + res.bRightParent = bRightParent; + res.bRightLeaf = bRightLeaf; + + return nCmp == 0; + } + + bool search_min( search_result& res ) const + { + internal_node * pParent; + internal_node * pGrandParent; + update_ptr updParent; + update_ptr updGrandParent; + + retry: + pParent = nullptr; + pGrandParent = nullptr; + updParent = nullptr; + tree_node * pLeaf = const_cast( &m_Root ); + while ( pLeaf->is_internal()) { + res.guards.copy( search_result::Guard_GrandParent, search_result::Guard_Parent ); + pGrandParent = pParent; + res.guards.copy( search_result::Guard_Parent, search_result::Guard_Leaf ); + pParent = static_cast( pLeaf ); + res.guards.copy( search_result::Guard_updGrandParent, search_result::Guard_updParent ); + updGrandParent = updParent; + + updParent = search_protect_update( res, pParent->m_pUpdate ); + + switch ( updParent.bits()) { + case update_desc::DFlag: + case update_desc::Mark: + m_Stat.onSearchRetry(); + goto retry; + } + + pLeaf = protect_child_node( res, pParent, false, updParent ); + if ( !pLeaf ) { + m_Stat.onSearchRetry(); + goto retry; + } + } + + if ( pLeaf->infinite_key()) + return false; + + res.pGrandParent = pGrandParent; + res.pParent = pParent; + assert( pLeaf->is_leaf()); + res.pLeaf = static_cast( pLeaf ); + res.updParent = updParent; + res.updGrandParent = updGrandParent; + res.bRightParent = false; + res.bRightLeaf = false; + + return true; + } + + bool search_max( search_result& res ) const + { + internal_node * pParent; + internal_node * pGrandParent; + update_ptr updParent; + update_ptr updGrandParent; + bool bRightLeaf; + bool bRightParent = false; + + retry: + pParent = nullptr; + pGrandParent = nullptr; + updParent = nullptr; + bRightLeaf = false; + tree_node * pLeaf = const_cast( &m_Root ); + while ( pLeaf->is_internal()) { + res.guards.copy( search_result::Guard_GrandParent, search_result::Guard_Parent ); + pGrandParent = pParent; + res.guards.copy( search_result::Guard_Parent, search_result::Guard_Leaf ); + pParent = static_cast( pLeaf ); + bRightParent = bRightLeaf; + res.guards.copy( search_result::Guard_updGrandParent, search_result::Guard_updParent ); + updGrandParent = updParent; + + updParent = search_protect_update( res, pParent->m_pUpdate ); + + switch ( updParent.bits()) { + case update_desc::DFlag: + case update_desc::Mark: + m_Stat.onSearchRetry(); + goto retry; + } + + bRightLeaf = !pParent->infinite_key(); + pLeaf = protect_child_node( res, pParent, bRightLeaf, updParent ); + if ( !pLeaf ) { + m_Stat.onSearchRetry(); + goto retry; + } + } + + if ( pLeaf->infinite_key()) + return false; + + res.pGrandParent = pGrandParent; + res.pParent = pParent; + assert( pLeaf->is_leaf()); + res.pLeaf = static_cast( pLeaf ); + res.updParent = updParent; + res.updGrandParent = updGrandParent; + res.bRightParent = bRightParent; + res.bRightLeaf = bRightLeaf; + + return true; + } + + /* + void help( update_ptr pUpdate ) + { + // pUpdate must be guarded! + switch ( pUpdate.bits()) { + case update_desc::IFlag: + help_insert( pUpdate.ptr()); + m_Stat.onHelpInsert(); + break; + case update_desc::DFlag: + help_delete( pUpdate.ptr()); + m_Stat.onHelpDelete(); + break; + case update_desc::Mark: + //m_Stat.onHelpMark(); + //help_marked( pUpdate.ptr()); + break; + } + } + */ + + void help_insert( update_desc * pOp ) + { + // pOp must be guarded + + tree_node * pLeaf = static_cast( pOp->iInfo.pLeaf ); + if ( pOp->iInfo.bRightLeaf ) { + CDS_VERIFY( pOp->iInfo.pParent->m_pRight.compare_exchange_strong( pLeaf, static_cast( pOp->iInfo.pNew ), + memory_model::memory_order_release, atomics::memory_order_relaxed )); + } + else { + CDS_VERIFY( pOp->iInfo.pParent->m_pLeft.compare_exchange_strong( pLeaf, static_cast( pOp->iInfo.pNew ), + memory_model::memory_order_release, atomics::memory_order_relaxed )); + } + + // Unflag parent + update_ptr cur( pOp, update_desc::IFlag ); + CDS_VERIFY( pOp->iInfo.pParent->m_pUpdate.compare_exchange_strong( cur, pOp->iInfo.pParent->null_update_desc(), + memory_model::memory_order_release, atomics::memory_order_relaxed )); + } + + bool check_delete_precondition( search_result& res ) const + { + // precondition: all member of res must be guarded + + assert( res.pGrandParent != nullptr ); + + return static_cast(res.pGrandParent->get_child( res.bRightParent, memory_model::memory_order_relaxed )) == res.pParent + && static_cast( res.pParent->get_child( res.bRightLeaf, memory_model::memory_order_relaxed )) == res.pLeaf; + } + + bool help_delete( update_desc * pOp ) + { + // precondition: pOp must be guarded + + update_ptr pUpdate( pOp->dInfo.pUpdateParent ); + update_ptr pMark( pOp, update_desc::Mark ); + if ( pOp->dInfo.pParent->m_pUpdate.compare_exchange_strong( pUpdate, pMark, // * + memory_model::memory_order_acquire, atomics::memory_order_relaxed )) + { + help_marked( pOp ); + + retire_node( pOp->dInfo.pParent ); + retire_node( pOp->dInfo.pLeaf ); + retire_update_desc( pOp ); + return true; + } + else if ( pUpdate == pMark ) { + // some other thread is processing help_marked() + help_marked( pOp ); + m_Stat.onHelpMark(); + return true; + } + else { + // Undo grandparent dInfo + update_ptr pDel( pOp, update_desc::DFlag ); + if ( pOp->dInfo.pGrandParent->m_pUpdate.compare_exchange_strong( pDel, pOp->dInfo.pGrandParent->null_update_desc(), + memory_model::memory_order_release, atomics::memory_order_relaxed )) + { + retire_update_desc( pOp ); + } + return false; + } + } + + static tree_node * protect_sibling( typename gc::Guard& guard, atomics::atomic& sibling ) + { + tree_node * pSibling = guard.protect( sibling, [](tree_node * p) -> internal_node* { return static_cast(p); } ); + if ( pSibling->is_leaf()) + guard.assign( node_traits::to_value_ptr( static_cast( pSibling ))); + return pSibling; + } + + void help_marked( update_desc * pOp ) + { + // precondition: pOp must be guarded + + tree_node * pParent = pOp->dInfo.pParent; + + typename gc::Guard guard; + tree_node * pOpposite = protect_sibling( guard, pOp->dInfo.bRightLeaf ? pOp->dInfo.pParent->m_pLeft : pOp->dInfo.pParent->m_pRight ); + + if ( pOp->dInfo.bRightParent ) { + CDS_VERIFY( pOp->dInfo.pGrandParent->m_pRight.compare_exchange_strong( pParent, pOpposite, + memory_model::memory_order_release, atomics::memory_order_relaxed )); + } + else { + CDS_VERIFY( pOp->dInfo.pGrandParent->m_pLeft.compare_exchange_strong( pParent, pOpposite, + memory_model::memory_order_release, atomics::memory_order_relaxed )); + } + + update_ptr upd( pOp, update_desc::DFlag ); + CDS_VERIFY( pOp->dInfo.pGrandParent->m_pUpdate.compare_exchange_strong( upd, pOp->dInfo.pGrandParent->null_update_desc(), + memory_model::memory_order_release, atomics::memory_order_relaxed )); + } + + bool try_insert( value_type& val, internal_node * pNewInternal, search_result& res ) + { + assert( res.updParent.bits() == update_desc::Clean ); + assert( res.pLeaf->is_leaf()); + + // check search result + if ( res.pParent->get_child( res.bRightLeaf, memory_model::memory_order_acquire ) == res.pLeaf ) { + leaf_node * pNewLeaf = node_traits::to_node_ptr( val ); + + int nCmp = node_compare()(val, *res.pLeaf); + if ( nCmp < 0 ) { + if ( res.pGrandParent ) { + assert( !res.pLeaf->infinite_key()); + pNewInternal->infinite_key( 0 ); + key_extractor()(pNewInternal->m_Key, *node_traits::to_value_ptr( res.pLeaf )); + } + else { + assert( res.pLeaf->infinite_key() == tree_node::key_infinite1 ); + pNewInternal->infinite_key( 1 ); + } + pNewInternal->m_pLeft.store( static_cast(pNewLeaf), memory_model::memory_order_relaxed ); + pNewInternal->m_pRight.store( static_cast(res.pLeaf), memory_model::memory_order_relaxed ); + } + else { + assert( !res.pLeaf->is_internal()); + + pNewInternal->infinite_key( 0 ); + key_extractor()(pNewInternal->m_Key, val); + pNewInternal->m_pLeft.store( static_cast(res.pLeaf), memory_model::memory_order_relaxed ); + pNewInternal->m_pRight.store( static_cast(pNewLeaf), memory_model::memory_order_relaxed ); + assert( !res.pLeaf->infinite_key()); + } + + typename gc::Guard guard; + update_desc * pOp = alloc_update_desc(); + guard.assign( pOp ); + + pOp->iInfo.pParent = res.pParent; + pOp->iInfo.pNew = pNewInternal; + pOp->iInfo.pLeaf = res.pLeaf; + pOp->iInfo.bRightLeaf = res.bRightLeaf; + + update_ptr updCur( res.updParent.ptr()); + if ( res.pParent->m_pUpdate.compare_exchange_strong( updCur, update_ptr( pOp, update_desc::IFlag ), + memory_model::memory_order_acq_rel, atomics::memory_order_relaxed )) { + // do insert + help_insert( pOp ); + retire_update_desc( pOp ); + return true; + } + else { + m_Stat.onUpdateDescDeleted(); + free_update_desc( pOp ); + } + } + + return false; + } + + template + bool erase_( Q const& val, Compare cmp, Equal eq, Func f ) + { + update_desc * pOp = nullptr; + search_result res; + back_off bkoff; + + for ( ;; ) { + if ( !search( res, val, cmp ) || !eq( val, *res.pLeaf )) { + if ( pOp ) + retire_update_desc( pOp ); + m_Stat.onEraseFailed(); + return false; + } + + if ( res.updGrandParent.bits() == update_desc::Clean && res.updParent.bits() == update_desc::Clean ) { + if ( !pOp ) + pOp = alloc_update_desc(); + if ( check_delete_precondition( res )) { + typename gc::Guard guard; + guard.assign( pOp ); + + pOp->dInfo.pGrandParent = res.pGrandParent; + pOp->dInfo.pParent = res.pParent; + pOp->dInfo.pLeaf = res.pLeaf; + pOp->dInfo.pUpdateParent = res.updParent.ptr(); + pOp->dInfo.bRightParent = res.bRightParent; + pOp->dInfo.bRightLeaf = res.bRightLeaf; + + update_ptr updGP( res.updGrandParent.ptr()); + if ( res.pGrandParent->m_pUpdate.compare_exchange_strong( updGP, update_ptr( pOp, update_desc::DFlag ), + memory_model::memory_order_acquire, atomics::memory_order_relaxed )) { + if ( help_delete( pOp )) { + // res.pLeaf is not deleted yet since it is guarded + f( *node_traits::to_value_ptr( res.pLeaf )); + break; + } + pOp = nullptr; + } + } + } + + bkoff(); + m_Stat.onEraseRetry(); + } + + --m_ItemCounter; + m_Stat.onEraseSuccess(); + return true; + } + + template + guarded_ptr extract_item( Q const& key, Compare cmp ) + { + update_desc * pOp = nullptr; + search_result res; + back_off bkoff; + + for ( ;; ) { + if ( !search( res, key, cmp )) { + if ( pOp ) + retire_update_desc( pOp ); + m_Stat.onEraseFailed(); + return guarded_ptr(); + } + + if ( res.updGrandParent.bits() == update_desc::Clean && res.updParent.bits() == update_desc::Clean ) { + if ( !pOp ) + pOp = alloc_update_desc(); + if ( check_delete_precondition( res )) { + typename gc::Guard guard; + guard.assign( pOp ); + + pOp->dInfo.pGrandParent = res.pGrandParent; + pOp->dInfo.pParent = res.pParent; + pOp->dInfo.pLeaf = res.pLeaf; + pOp->dInfo.pUpdateParent = res.updParent.ptr(); + pOp->dInfo.bRightParent = res.bRightParent; + pOp->dInfo.bRightLeaf = res.bRightLeaf; + + update_ptr updGP( res.updGrandParent.ptr()); + if ( res.pGrandParent->m_pUpdate.compare_exchange_strong( updGP, update_ptr( pOp, update_desc::DFlag ), + memory_model::memory_order_acquire, atomics::memory_order_relaxed )) { + if ( help_delete( pOp )) + break; + pOp = nullptr; + } + } + } + + bkoff(); + m_Stat.onEraseRetry(); + } + + --m_ItemCounter; + m_Stat.onEraseSuccess(); + return guarded_ptr( res.guards.release( search_result::Guard_Leaf )); + } + + template + guarded_ptr extract_( Q const& key ) + { + return extract_item( key, node_compare()); + } + + template + guarded_ptr extract_with_( Q const& key, Less /*pred*/ ) + { + typedef ellen_bintree::details::compare< + key_type, + value_type, + opt::details::make_comparator_from_less, + node_traits + > compare_functor; + + return extract_item( key, compare_functor()); + } + + guarded_ptr extract_max_() + { + update_desc * pOp = nullptr; + search_result res; + back_off bkoff; + + for ( ;; ) { + if ( !search_max( res )) { + // Tree is empty + if ( pOp ) + retire_update_desc( pOp ); + m_Stat.onExtractMaxFailed(); + return guarded_ptr(); + } + + if ( res.updGrandParent.bits() == update_desc::Clean && res.updParent.bits() == update_desc::Clean ) { + if ( !pOp ) + pOp = alloc_update_desc(); + if ( check_delete_precondition( res )) { + typename gc::Guard guard; + guard.assign( pOp ); + + pOp->dInfo.pGrandParent = res.pGrandParent; + pOp->dInfo.pParent = res.pParent; + pOp->dInfo.pLeaf = res.pLeaf; + pOp->dInfo.pUpdateParent = res.updParent.ptr(); + pOp->dInfo.bRightParent = res.bRightParent; + pOp->dInfo.bRightLeaf = res.bRightLeaf; + + update_ptr updGP( res.updGrandParent.ptr()); + if ( res.pGrandParent->m_pUpdate.compare_exchange_strong( updGP, update_ptr( pOp, update_desc::DFlag ), + memory_model::memory_order_acquire, atomics::memory_order_relaxed )) + { + if ( help_delete( pOp )) + break; + pOp = nullptr; + } + } + } + + bkoff(); + m_Stat.onExtractMaxRetry(); + } + + --m_ItemCounter; + m_Stat.onExtractMaxSuccess(); + return guarded_ptr( res.guards.release( search_result::Guard_Leaf )); + } + + guarded_ptr extract_min_() + { + update_desc * pOp = nullptr; + search_result res; + back_off bkoff; + + for ( ;; ) { + if ( !search_min( res )) { + // Tree is empty + if ( pOp ) + retire_update_desc( pOp ); + m_Stat.onExtractMinFailed(); + return guarded_ptr(); + } + + if ( res.updGrandParent.bits() == update_desc::Clean && res.updParent.bits() == update_desc::Clean ) { + if ( !pOp ) + pOp = alloc_update_desc(); + if ( check_delete_precondition( res )) { + typename gc::Guard guard; + guard.assign( pOp ); + + pOp->dInfo.pGrandParent = res.pGrandParent; + pOp->dInfo.pParent = res.pParent; + pOp->dInfo.pLeaf = res.pLeaf; + pOp->dInfo.pUpdateParent = res.updParent.ptr(); + pOp->dInfo.bRightParent = res.bRightParent; + pOp->dInfo.bRightLeaf = res.bRightLeaf; + + update_ptr updGP( res.updGrandParent.ptr()); + if ( res.pGrandParent->m_pUpdate.compare_exchange_strong( updGP, update_ptr( pOp, update_desc::DFlag ), + memory_model::memory_order_acquire, atomics::memory_order_relaxed )) + { + if ( help_delete( pOp )) + break; + pOp = nullptr; + } + } + } + + bkoff(); + m_Stat.onExtractMinRetry(); + } + + --m_ItemCounter; + m_Stat.onExtractMinSuccess(); + return guarded_ptr( res.guards.release( search_result::Guard_Leaf )); + } + + template + bool find_( Q& val, Func f ) const + { + search_result res; + if ( search( res, val, node_compare())) { + assert( res.pLeaf ); + f( *node_traits::to_value_ptr( res.pLeaf ), val ); + + m_Stat.onFindSuccess(); + return true; + } + + m_Stat.onFindFailed(); + return false; + } + + template + bool find_with_( Q& val, Less /*pred*/, Func f ) const + { + typedef ellen_bintree::details::compare< + key_type, + value_type, + opt::details::make_comparator_from_less, + node_traits + > compare_functor; + + search_result res; + if ( search( res, val, compare_functor())) { + assert( res.pLeaf ); + f( *node_traits::to_value_ptr( res.pLeaf ), val ); + + m_Stat.onFindSuccess(); + return true; + } + + m_Stat.onFindFailed(); + return false; + } + + template + guarded_ptr get_( Q const& val ) const + { + search_result res; + if ( search( res, val, node_compare())) { + assert( res.pLeaf ); + m_Stat.onFindSuccess(); + return guarded_ptr( res.guards.release( search_result::Guard_Leaf )); + } + + m_Stat.onFindFailed(); + return guarded_ptr(); + } + + template + guarded_ptr get_with_( Q const& val, Less pred ) const + { + CDS_UNUSED( pred ); + + typedef ellen_bintree::details::compare< + key_type, + value_type, + opt::details::make_comparator_from_less, + node_traits + > compare_functor; + + search_result res; + if ( search( res, val, compare_functor())) { + assert( res.pLeaf ); + m_Stat.onFindSuccess(); + return guarded_ptr( res.guards.release( search_result::Guard_Leaf )); + } + + m_Stat.onFindFailed(); + return guarded_ptr(); + + } + + //@endcond + }; + +}} // namespace cds::intrusive + +#endif // #ifndef CDSLIB_INTRUSIVE_IMPL_ELLEN_BINTREE_H diff --git a/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/intrusive/impl/feldman_hashset.h b/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/intrusive/impl/feldman_hashset.h new file mode 100644 index 0000000..2f1f4cc --- /dev/null +++ b/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/intrusive/impl/feldman_hashset.h @@ -0,0 +1,1263 @@ +/* + This file is a part of libcds - Concurrent Data Structures library + + (C) Copyright Maxim Khizhinsky (libcds.dev@gmail.com) 2006-2017 + + Source code repo: http://github.com/khizmax/libcds/ + Download: http://sourceforge.net/projects/libcds/files/ + + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. + + * Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ + +#ifndef CDSLIB_INTRUSIVE_IMPL_FELDMAN_HASHSET_H +#define CDSLIB_INTRUSIVE_IMPL_FELDMAN_HASHSET_H + +#include // std::ref +#include // std::iterator_traits +#include + +#include +#include + +namespace cds { namespace intrusive { + /// Intrusive hash set based on multi-level array + /** @ingroup cds_intrusive_map + @anchor cds_intrusive_FeldmanHashSet_hp + + Source: + - [2013] Steven Feldman, Pierre LaBorde, Damian Dechev "Concurrent Multi-level Arrays: + Wait-free Extensible Hash Maps" + + [From the paper] The hardest problem encountered while developing a parallel hash map is how to perform + a global resize, the process of redistributing the elements in a hash map that occurs when adding new + buckets. The negative impact of blocking synchronization is multiplied during a global resize, because all + threads will be forced to wait on the thread that is performing the involved process of resizing the hash map + and redistributing the elements. \p %FeldmanHashSet implementation avoids global resizes through new array + allocation. By allowing concurrent expansion this structure is free from the overhead of an explicit resize, + which facilitates concurrent operations. + + The presented design includes dynamic hashing, the use of sub-arrays within the hash map data structure; + which, in combination with perfect hashing, means that each element has a unique final, as well as current, position. + It is important to note that the perfect hash function required by our hash map is trivial to realize as + any hash function that permutes the bits of the key is suitable. This is possible because of our approach + to the hash function; we require that it produces hash values that are equal in size to that of the key. + We know that if we expand the hash map a fixed number of times there can be no collision as duplicate keys + are not provided for in the standard semantics of a hash map. + + \p %FeldmanHashSet is a multi-level array which has a structure similar to a tree: + @image html feldman_hashset.png + The multi-level array differs from a tree in that each position on the tree could hold an array of nodes or a single node. + A position that holds a single node is a \p dataNode which holds the hash value of a key and the value that is associated + with that key; it is a simple struct holding two variables. A \p dataNode in the multi-level array could be marked. + A \p markedDataNode refers to a pointer to a \p dataNode that has been bitmarked at the least significant bit (LSB) + of the pointer to the node. This signifies that this \p dataNode is contended. An expansion must occur at this node; + any thread that sees this \p markedDataNode will try to replace it with an \p arrayNode; which is a position that holds + an array of nodes. The pointer to an \p arrayNode is differentiated from that of a pointer to a \p dataNode by a bitmark + on the second-least significant bit. + + \p %FeldmanHashSet multi-level array is similar to a tree in that we keep a pointer to the root, which is a memory array + called \p head. The length of the \p head memory array is unique, whereas every other \p arrayNode has a uniform length; + a normal \p arrayNode has a fixed power-of-two length equal to the binary logarithm of a variable called \p arrayLength. + The maximum depth of the tree, \p maxDepth, is the maximum number of pointers that must be followed to reach any node. + We define \p currentDepth as the number of memory arrays that we need to traverse to reach the \p arrayNode on which + we need to operate; this is initially one, because of \p head. + + That approach to the structure of the hash set uses an extensible hashing scheme; the hash value is treated as a bit + string and rehash incrementally. + + @note Two important things you should keep in mind when you're using \p %FeldmanHashSet: + - all keys must be fixed-size. It means that you cannot use \p std::string as a key for \p %FeldmanHashSet. + Instead, for the strings you should use well-known hashing algorithms like SHA1, SHA2, + MurmurHash, CityHash + or its successor FarmHash and so on, which + converts variable-length strings to fixed-length bit-strings, and use that hash as a key in \p %FeldmanHashSet. + - \p %FeldmanHashSet uses a perfect hashing. It means that if two different keys, for example, of type \p std::string, + have identical hash then you cannot insert both that keys in the set. \p %FeldmanHashSet does not maintain the key, + it maintains its fixed-size hash value. + + The set supports @ref cds_intrusive_FeldmanHashSet_iterators "bidirectional thread-safe iterators". + + Template parameters: + - \p GC - safe memory reclamation schema. Can be \p gc::HP, \p gc::DHP or one of \ref cds_urcu_type "RCU type" + - \p T - a value type to be stored in the set + - \p Traits - type traits, the structure based on \p feldman_hashset::traits or result of \p feldman_hashset::make_traits metafunction. + \p Traits is the mandatory argument because it has one mandatory type - an @ref feldman_hashset::traits::hash_accessor "accessor" + to hash value of \p T. The set algorithm does not calculate that hash value. + + There are several specializations of \p %FeldmanHashSet for each \p GC. You should include: + - for \p gc::HP garbage collector + - for \p gc::DHP garbage collector + - for \ref cds_intrusive_FeldmanHashSet_rcu "RCU type". RCU specialization + has a slightly different interface. + */ + template < + class GC + ,typename T +#ifdef CDS_DOXYGEN_INVOKED + ,typename Traits = feldman_hashset::traits +#else + ,typename Traits +#endif + > + class FeldmanHashSet: protected feldman_hashset::multilevel_array + { + //@cond + typedef feldman_hashset::multilevel_array base_class; + //@endcond + + public: + typedef GC gc; ///< Garbage collector + typedef T value_type; ///< type of value stored in the set + typedef Traits traits; ///< Traits template parameter, see \p feldman_hashset::traits + + typedef typename traits::hash_accessor hash_accessor; ///< Hash accessor functor + typedef typename base_class::hash_type hash_type; ///< Hash type deduced from \p hash_accessor return type + typedef typename traits::disposer disposer; ///< data node disposer + typedef typename base_class::hash_comparator hash_comparator; ///< hash compare functor based on \p traits::compare and \p traits::less options + + typedef typename traits::item_counter item_counter; ///< Item counter type + typedef typename traits::node_allocator node_allocator; ///< Array node allocator + typedef typename traits::memory_model memory_model; ///< Memory model + typedef typename traits::back_off back_off; ///< Backoff strategy + typedef typename traits::stat stat; ///< Internal statistics type + + typedef typename gc::template guarded_ptr< value_type > guarded_ptr; ///< Guarded pointer + + /// Count of hazard pointers required + static constexpr size_t const c_nHazardPtrCount = 2; + + /// The size of hash_type in bytes, see \p feldman_hashset::traits::hash_size for explanation + static constexpr size_t const c_hash_size = base_class::c_hash_size; + + /// Level statistics + typedef feldman_hashset::level_statistics level_statistics; + + protected: + //@cond + typedef typename base_class::node_ptr node_ptr; + typedef typename base_class::atomic_node_ptr atomic_node_ptr; + typedef typename base_class::array_node array_node; + typedef typename base_class::traverse_data traverse_data; + + using base_class::to_array; + using base_class::to_node; + using base_class::stats; + using base_class::head; + using base_class::metrics; + //@endcond + + protected: + //@cond + class iterator_base + { + friend class FeldmanHashSet; + + protected: + array_node * m_pNode; ///< current array node + size_t m_idx; ///< current position in m_pNode + typename gc::Guard m_guard; ///< HP guard + FeldmanHashSet const* m_set; ///< Hash set + + public: + iterator_base() noexcept + : m_pNode( nullptr ) + , m_idx( 0 ) + , m_set( nullptr ) + {} + + iterator_base( iterator_base const& rhs ) noexcept + : m_pNode( rhs.m_pNode ) + , m_idx( rhs.m_idx ) + , m_set( rhs.m_set ) + { + m_guard.copy( rhs.m_guard ); + } + + iterator_base& operator=( iterator_base const& rhs ) noexcept + { + m_pNode = rhs.m_pNode; + m_idx = rhs.m_idx; + m_set = rhs.m_set; + m_guard.copy( rhs.m_guard ); + return *this; + } + + iterator_base& operator++() + { + forward(); + return *this; + } + + iterator_base& operator--() + { + backward(); + return *this; + } + + void release() + { + m_guard.clear(); + } + + bool operator ==( iterator_base const& rhs ) const noexcept + { + return m_pNode == rhs.m_pNode && m_idx == rhs.m_idx && m_set == rhs.m_set; + } + + bool operator !=( iterator_base const& rhs ) const noexcept + { + return !( *this == rhs ); + } + + protected: + iterator_base( FeldmanHashSet const& set, array_node * pNode, size_t idx, bool ) + : m_pNode( pNode ) + , m_idx( idx ) + , m_set( &set ) + {} + + iterator_base( FeldmanHashSet const& set, array_node * pNode, size_t idx ) + : m_pNode( pNode ) + , m_idx( idx ) + , m_set( &set ) + { + forward(); + } + + value_type * pointer() const noexcept + { + return m_guard.template get(); + } + + void forward() + { + assert( m_set != nullptr ); + assert( m_pNode != nullptr ); + + size_t const arrayNodeSize = m_set->array_node_size(); + size_t const headSize = m_set->head_size(); + array_node * pNode = m_pNode; + size_t idx = m_idx + 1; + size_t nodeSize = m_pNode->pParent? arrayNodeSize : headSize; + + for ( ;; ) { + if ( idx < nodeSize ) { + node_ptr slot = pNode->nodes[idx].load( memory_model::memory_order_acquire ); + if ( slot.bits() == base_class::flag_array_node ) { + // array node, go down the tree + assert( slot.ptr() != nullptr ); + pNode = to_array( slot.ptr()); + idx = 0; + nodeSize = arrayNodeSize; + } + else if ( slot.bits() == base_class::flag_array_converting ) { + // the slot is converting to array node right now - skip the node + ++idx; + } + else { + if ( slot.ptr()) { + // data node + if ( m_guard.protect( pNode->nodes[idx], []( node_ptr p ) -> value_type* { return p.ptr(); }) == slot ) { + m_pNode = pNode; + m_idx = idx; + return; + } + } + ++idx; + } + } + else { + // up to parent node + if ( pNode->pParent ) { + idx = pNode->idxParent + 1; + pNode = pNode->pParent; + nodeSize = pNode->pParent ? arrayNodeSize : headSize; + } + else { + // end() + assert( pNode == m_set->head()); + assert( idx == headSize ); + m_pNode = pNode; + m_idx = idx; + return; + } + } + } + } + + void backward() + { + assert( m_set != nullptr ); + assert( m_pNode != nullptr ); + + size_t const arrayNodeSize = m_set->array_node_size(); + size_t const headSize = m_set->head_size(); + size_t const endIdx = size_t(0) - 1; + + array_node * pNode = m_pNode; + size_t idx = m_idx - 1; + size_t nodeSize = m_pNode->pParent? arrayNodeSize : headSize; + + for ( ;; ) { + if ( idx != endIdx ) { + node_ptr slot = pNode->nodes[idx].load( memory_model::memory_order_acquire ); + if ( slot.bits() == base_class::flag_array_node ) { + // array node, go down the tree + assert( slot.ptr() != nullptr ); + pNode = to_array( slot.ptr()); + nodeSize = arrayNodeSize; + idx = nodeSize - 1; + } + else if ( slot.bits() == base_class::flag_array_converting ) { + // the slot is converting to array node right now - skip the node + --idx; + } + else { + if ( slot.ptr()) { + // data node + if ( m_guard.protect( pNode->nodes[idx], []( node_ptr p ) -> value_type* { return p.ptr(); }) == slot ) { + m_pNode = pNode; + m_idx = idx; + return; + } + } + --idx; + } + } + else { + // up to parent node + if ( pNode->pParent ) { + idx = pNode->idxParent - 1; + pNode = pNode->pParent; + nodeSize = pNode->pParent ? arrayNodeSize : headSize; + } + else { + // rend() + assert( pNode == m_set->head()); + assert( idx == endIdx ); + m_pNode = pNode; + m_idx = idx; + return; + } + } + } + } + }; + + template + Iterator init_begin() const + { + return Iterator( *this, head(), size_t(0) - 1 ); + } + + template + Iterator init_end() const + { + return Iterator( *this, head(), head_size(), false ); + } + + template + Iterator init_rbegin() const + { + return Iterator( *this, head(), head_size()); + } + + template + Iterator init_rend() const + { + return Iterator( *this, head(), size_t(0) - 1, false ); + } + + /// Bidirectional iterator class + template + class bidirectional_iterator: protected iterator_base + { + friend class FeldmanHashSet; + + protected: + static constexpr bool const c_bConstantIterator = IsConst; + + public: + typedef typename std::conditional< IsConst, value_type const*, value_type*>::type value_ptr; ///< Value pointer + typedef typename std::conditional< IsConst, value_type const&, value_type&>::type value_ref; ///< Value reference + + public: + bidirectional_iterator() noexcept + {} + + bidirectional_iterator( bidirectional_iterator const& rhs ) noexcept + : iterator_base( rhs ) + {} + + bidirectional_iterator& operator=( bidirectional_iterator const& rhs ) noexcept + { + iterator_base::operator=( rhs ); + return *this; + } + + bidirectional_iterator& operator++() + { + iterator_base::operator++(); + return *this; + } + + bidirectional_iterator& operator--() + { + iterator_base::operator--(); + return *this; + } + + value_ptr operator ->() const noexcept + { + return iterator_base::pointer(); + } + + value_ref operator *() const noexcept + { + value_ptr p = iterator_base::pointer(); + assert( p ); + return *p; + } + + void release() + { + iterator_base::release(); + } + + template + bool operator ==( bidirectional_iterator const& rhs ) const noexcept + { + return iterator_base::operator==( rhs ); + } + + template + bool operator !=( bidirectional_iterator const& rhs ) const noexcept + { + return !( *this == rhs ); + } + + protected: + bidirectional_iterator( FeldmanHashSet& set, array_node * pNode, size_t idx, bool ) + : iterator_base( set, pNode, idx, false ) + {} + + bidirectional_iterator( FeldmanHashSet& set, array_node * pNode, size_t idx ) + : iterator_base( set, pNode, idx ) + {} + }; + + /// Reverse bidirectional iterator + template + class reverse_bidirectional_iterator : public iterator_base + { + friend class FeldmanHashSet; + + public: + typedef typename std::conditional< IsConst, value_type const*, value_type*>::type value_ptr; ///< Value pointer + typedef typename std::conditional< IsConst, value_type const&, value_type&>::type value_ref; ///< Value reference + + public: + reverse_bidirectional_iterator() noexcept + : iterator_base() + {} + + reverse_bidirectional_iterator( reverse_bidirectional_iterator const& rhs ) noexcept + : iterator_base( rhs ) + {} + + reverse_bidirectional_iterator& operator=( reverse_bidirectional_iterator const& rhs) noexcept + { + iterator_base::operator=( rhs ); + return *this; + } + + reverse_bidirectional_iterator& operator++() + { + iterator_base::operator--(); + return *this; + } + + reverse_bidirectional_iterator& operator--() + { + iterator_base::operator++(); + return *this; + } + + value_ptr operator ->() const noexcept + { + return iterator_base::pointer(); + } + + value_ref operator *() const noexcept + { + value_ptr p = iterator_base::pointer(); + assert( p ); + return *p; + } + + void release() + { + iterator_base::release(); + } + + template + bool operator ==( reverse_bidirectional_iterator const& rhs ) const + { + return iterator_base::operator==( rhs ); + } + + template + bool operator !=( reverse_bidirectional_iterator const& rhs ) + { + return !( *this == rhs ); + } + + private: + reverse_bidirectional_iterator( FeldmanHashSet& set, array_node * pNode, size_t idx, bool ) + : iterator_base( set, pNode, idx, false ) + {} + + reverse_bidirectional_iterator( FeldmanHashSet& set, array_node * pNode, size_t idx ) + : iterator_base( set, pNode, idx, false ) + { + iterator_base::backward(); + } + }; + //@endcond + + public: +#ifdef CDS_DOXYGEN_INVOKED + typedef implementation_defined iterator; ///< @ref cds_intrusive_FeldmanHashSet_iterators "bidirectional iterator" type + typedef implementation_defined const_iterator; ///< @ref cds_intrusive_FeldmanHashSet_iterators "bidirectional const iterator" type + typedef implementation_defined reverse_iterator; ///< @ref cds_intrusive_FeldmanHashSet_iterators "bidirectional reverse iterator" type + typedef implementation_defined const_reverse_iterator; ///< @ref cds_intrusive_FeldmanHashSet_iterators "bidirectional reverse const iterator" type +#else + typedef bidirectional_iterator iterator; + typedef bidirectional_iterator const_iterator; + typedef reverse_bidirectional_iterator reverse_iterator; + typedef reverse_bidirectional_iterator const_reverse_iterator; +#endif + + private: + //@cond + item_counter m_ItemCounter; ///< Item counter + //@endcond + + public: + /// Creates empty set + /** + @param head_bits - 2head_bits specifies the size of head array, minimum is 4. + @param array_bits - 2array_bits specifies the size of array node, minimum is 2. + + Equation for \p head_bits and \p array_bits: + \code + sizeof( hash_type ) * 8 == head_bits + N * array_bits + \endcode + where \p N is multi-level array depth. + */ + FeldmanHashSet( size_t head_bits = 8, size_t array_bits = 4 ) + : base_class( head_bits, array_bits ) + {} + + /// Destructs the set and frees all data + ~FeldmanHashSet() + { + clear(); + } + + /// Inserts new node + /** + The function inserts \p val in the set if it does not contain + an item with that hash. + + Returns \p true if \p val is placed into the set, \p false otherwise. + */ + bool insert( value_type& val ) + { + return insert( val, []( value_type& ) {} ); + } + + /// Inserts new node + /** + This function is intended for derived non-intrusive containers. + + The function allows to split creating of new item into two part: + - create item with key only + - insert new item into the set + - if inserting is success, calls \p f functor to initialize \p val. + + The functor signature is: + \code + void func( value_type& val ); + \endcode + where \p val is the item inserted. + + The user-defined functor is called only if the inserting is success. + + @warning See \ref cds_intrusive_item_creating "insert item troubleshooting". + */ + template + bool insert( value_type& val, Func f ) + { + hash_type const& hash = hash_accessor()( val ); + traverse_data pos( hash, *this ); + hash_comparator cmp; + typename gc::template GuardArray<2> guards; + + guards.assign( 1, &val ); + while ( true ) { + node_ptr slot = base_class::traverse( pos ); + assert( slot.bits() == 0 ); + + // protect data node by hazard pointer + if ( guards.protect( 0, pos.pArr->nodes[pos.nSlot], []( node_ptr p ) -> value_type* { return p.ptr(); }) != slot ) { + // slot value has been changed - retry + stats().onSlotChanged(); + } + else if ( slot.ptr()) { + if ( cmp( hash, hash_accessor()( *slot.ptr())) == 0 ) { + // the item with that hash value already exists + stats().onInsertFailed(); + return false; + } + + if ( !pos.splitter.eos()) { + // the slot must be expanded + base_class::expand_slot( pos, slot ); + } + else + return false; + } + else { + // the slot is empty, try to insert data node + node_ptr pNull; + if ( pos.pArr->nodes[pos.nSlot].compare_exchange_strong( pNull, node_ptr( &val ), memory_model::memory_order_release, atomics::memory_order_relaxed )) + { + // the new data node has been inserted + f( val ); + ++m_ItemCounter; + stats().onInsertSuccess(); + stats().height( pos.nHeight ); + return true; + } + + // insert failed - slot has been changed by another thread + // retry inserting + stats().onInsertRetry(); + } + } + } + + /// Updates the node + /** + Performs inserting or updating the item with hash value equal to \p val. + - If hash value is found then existing item is replaced with \p val, old item is disposed + with \p Traits::disposer. Note that the disposer is called by \p GC asynchronously. + The function returns std::pair + - If hash value is not found and \p bInsert is \p true then \p val is inserted, + the function returns std::pair + - If hash value is not found and \p bInsert is \p false then the set is unchanged, + the function returns std::pair + + Returns std::pair where \p first is \p true if operation is successful + (i.e. the item has been inserted or updated), + \p second is \p true if new item has been added or \p false if the set contains that hash. + */ + std::pair update( value_type& val, bool bInsert = true ) + { + return do_update( val, []( value_type&, value_type* ) {}, bInsert ); + } + + /// Unlinks the item \p val from the set + /** + The function searches the item \p val in the set and unlink it + if it is found and its address is equal to &val. + + The function returns \p true if success and \p false otherwise. + */ + bool unlink( value_type const& val ) + { + typename gc::Guard guard; + auto pred = [&val]( value_type const& item ) -> bool { return &item == &val; }; + value_type * p = do_erase( hash_accessor()( val ), guard, std::ref( pred )); + return p != nullptr; + } + + /// Deletes the item from the set + /** + The function searches \p hash in the set, + unlinks the item found, and returns \p true. + If that item is not found the function returns \p false. + + The \ref disposer specified in \p Traits is called by garbage collector \p GC asynchronously. + */ + bool erase( hash_type const& hash ) + { + return erase( hash, []( value_type const& ) {} ); + } + + /// Deletes the item from the set + /** + The function searches \p hash in the set, + call \p f functor with item found, and unlinks it from the set. + The \ref disposer specified in \p Traits is called + by garbage collector \p GC asynchronously. + + The \p Func interface is + \code + struct functor { + void operator()( value_type& item ); + }; + \endcode + + If \p hash is not found the function returns \p false. + */ + template + bool erase( hash_type const& hash, Func f ) + { + typename gc::Guard guard; + value_type * p = do_erase( hash, guard, []( value_type const&) -> bool {return true; } ); + + // p is guarded by HP + if ( p ) { + f( *p ); + return true; + } + return false; + } + + /// Deletes the item pointed by iterator \p iter + /** + Returns \p true if the operation is successful, \p false otherwise. + + The function does not invalidate the iterator, it remains valid and can be used for further traversing. + */ + bool erase_at( iterator const& iter ) + { + return do_erase_at( iter ); + } + //@cond + bool erase_at( reverse_iterator const& iter ) + { + return do_erase_at( iter ); + } + //@endcond + + /// Extracts the item with specified \p hash + /** + The function searches \p hash in the set, + unlinks it from the set, and returns an guarded pointer to the item extracted. + If \p hash is not found the function returns an empty guarded pointer. + + The \p disposer specified in \p Traits class' template parameter is called automatically + by garbage collector \p GC when returned \ref guarded_ptr object to be destroyed or released. + @note Each \p guarded_ptr object uses the GC's guard that can be limited resource. + + Usage: + \code + typedef cds::intrusive::FeldmanHashSet< your_template_args > my_set; + my_set theSet; + // ... + { + my_set::guarded_ptr gp( theSet.extract( 5 )); + if ( gp ) { + // Deal with gp + // ... + } + // Destructor of gp releases internal HP guard + } + \endcode + */ + guarded_ptr extract( hash_type const& hash ) + { + typename gc::Guard guard; + if ( do_erase( hash, guard, []( value_type const&) -> bool {return true;} )) + return guarded_ptr( std::move( guard )); + return guarded_ptr(); + } + + /// Finds an item by it's \p hash + /** + The function searches the item by \p hash and calls the functor \p f for item found. + The interface of \p Func functor is: + \code + struct functor { + void operator()( value_type& item ); + }; + \endcode + where \p item is the item found. + + The functor may change non-key fields of \p item. Note that the functor is only guarantee + that \p item cannot be disposed during the functor is executing. + The functor does not serialize simultaneous access to the set's \p item. If such access is + possible you must provide your own synchronization schema on item level to prevent unsafe item modifications. + + The function returns \p true if \p hash is found, \p false otherwise. + */ + template + bool find( hash_type const& hash, Func f ) + { + typename gc::Guard guard; + value_type * p = search( hash, guard ); + + // p is guarded by HP + if ( p ) { + f( *p ); + return true; + } + return false; + } + + /// Checks whether the set contains \p hash + /** + The function searches the item by its \p hash + and returns \p true if it is found, or \p false otherwise. + */ + bool contains( hash_type const& hash ) + { + return find( hash, []( value_type& ) {} ); + } + + /// Finds an item by it's \p hash and returns the item found + /** + The function searches the item by its \p hash + and returns the guarded pointer to the item found. + If \p hash is not found the function returns an empty \p guarded_ptr. + + @note Each \p guarded_ptr object uses one GC's guard which can be limited resource. + + Usage: + \code + typedef cds::intrusive::FeldmanHashSet< your_template_params > my_set; + my_set theSet; + // ... + { + my_set::guarded_ptr gp( theSet.get( 5 )); + if ( theSet.get( 5 )) { + // Deal with gp + //... + } + // Destructor of guarded_ptr releases internal HP guard + } + \endcode + */ + guarded_ptr get( hash_type const& hash ) + { + typename gc::Guard guard; + if ( search( hash, guard )) + return guarded_ptr( std::move( guard )); + return guarded_ptr(); + } + + /// Clears the set (non-atomic) + /** + The function unlink all data node from the set. + The function is not atomic but is thread-safe. + After \p %clear() the set may not be empty because another threads may insert items. + + For each item the \p disposer is called after unlinking. + */ + void clear() + { + clear_array( head(), head_size()); + } + + /// Checks if the set is empty + /** + Emptiness is checked by item counting: if item count is zero then the set is empty. + Thus, the correct item counting feature is an important part of the set implementation. + */ + bool empty() const + { + return size() == 0; + } + + /// Returns item count in the set + size_t size() const + { + return m_ItemCounter; + } + + /// Returns const reference to internal statistics + stat const& statistics() const + { + return stats(); + } + + /// Returns the size of head node + using base_class::head_size; + + /// Returns the size of the array node + using base_class::array_node_size; + + /// Collects tree level statistics into \p stat + /** + The function traverses the set and collects statistics for each level of the tree + into \p feldman_hashset::level_statistics struct. The element of \p stat[i] + represents statistics for level \p i, level 0 is head array. + The function is thread-safe and may be called in multi-threaded environment. + + Result can be useful for estimating efficiency of hash functor you use. + */ + void get_level_statistics( std::vector< feldman_hashset::level_statistics>& stat ) const + { + base_class::get_level_statistics( stat ); + } + + public: + ///@name Thread-safe iterators + /** @anchor cds_intrusive_FeldmanHashSet_iterators + The set supports thread-safe iterators: you may iterate over the set in multi-threaded environment. + It is guaranteed that the iterators will remain valid even if another thread deletes the node the iterator points to: + Hazard Pointer embedded into the iterator object protects the node from physical reclamation. + + @note Since the iterator object contains hazard pointer that is a thread-local resource, + the iterator should not be passed to another thread. + + Each iterator object supports the common interface: + - dereference operators: + @code + value_type [const] * operator ->() noexcept + value_type [const] & operator *() noexcept + @endcode + - pre-increment and pre-decrement. Post-operators is not supported + - equality operators == and !=. + Iterators are equal iff they point to the same cell of the same array node. + Note that for two iterators \p it1 and \p it2, the conditon it1 == it2 + does not entail &(*it1) == &(*it2) : welcome to concurrent containers + - helper member function \p release() that clears internal hazard pointer. + After \p release() the iterator points to \p nullptr but it still remain valid: further iterating is possible. + + During iteration you may safely erase any item from the set; + @ref erase_at() function call doesn't invalidate any iterator. + If some iterator points to the item to be erased, that item is not deleted immediately + but only after that iterator will be advanced forward or backward. + + @note It is possible the item can be iterated more that once, for example, if an iterator points to the item + in array node that is being splitted. + */ + ///@{ + + /// Returns an iterator to the beginning of the set + iterator begin() + { + return iterator( *this, head(), size_t(0) - 1 ); + } + + /// Returns an const iterator to the beginning of the set + const_iterator begin() const + { + return const_iterator( *this, head(), size_t(0) - 1 ); + } + + /// Returns an const iterator to the beginning of the set + const_iterator cbegin() + { + return const_iterator( *this, head(), size_t(0) - 1 ); + } + + /// Returns an iterator to the element following the last element of the set. This element acts as a placeholder; attempting to access it results in undefined behavior. + iterator end() + { + return iterator( *this, head(), head_size(), false ); + } + + /// Returns a const iterator to the element following the last element of the set. This element acts as a placeholder; attempting to access it results in undefined behavior. + const_iterator end() const + { + return const_iterator( *this, head(), head_size(), false ); + } + + /// Returns a const iterator to the element following the last element of the set. This element acts as a placeholder; attempting to access it results in undefined behavior. + const_iterator cend() + { + return const_iterator( *this, head(), head_size(), false ); + } + + /// Returns a reverse iterator to the first element of the reversed set + reverse_iterator rbegin() + { + return reverse_iterator( *this, head(), head_size()); + } + + /// Returns a const reverse iterator to the first element of the reversed set + const_reverse_iterator rbegin() const + { + return const_reverse_iterator( *this, head(), head_size()); + } + + /// Returns a const reverse iterator to the first element of the reversed set + const_reverse_iterator crbegin() + { + return const_reverse_iterator( *this, head(), head_size()); + } + + /// Returns a reverse iterator to the element following the last element of the reversed set + /** + It corresponds to the element preceding the first element of the non-reversed container. + This element acts as a placeholder, attempting to access it results in undefined behavior. + */ + reverse_iterator rend() + { + return reverse_iterator( *this, head(), size_t(0) - 1, false ); + } + + /// Returns a const reverse iterator to the element following the last element of the reversed set + /** + It corresponds to the element preceding the first element of the non-reversed container. + This element acts as a placeholder, attempting to access it results in undefined behavior. + */ + const_reverse_iterator rend() const + { + return const_reverse_iterator( *this, head(), size_t(0) - 1, false ); + } + + /// Returns a const reverse iterator to the element following the last element of the reversed set + /** + It corresponds to the element preceding the first element of the non-reversed container. + This element acts as a placeholder, attempting to access it results in undefined behavior. + */ + const_reverse_iterator crend() + { + return const_reverse_iterator( *this, head(), size_t(0) - 1, false ); + } + ///@} + + private: + //@cond + void clear_array( array_node * pArrNode, size_t nSize ) + { + back_off bkoff; + + for ( atomic_node_ptr * pArr = pArrNode->nodes, *pLast = pArr + nSize; pArr != pLast; ++pArr ) { + while ( true ) { + node_ptr slot = pArr->load( memory_model::memory_order_acquire ); + if ( slot.bits() == base_class::flag_array_node ) { + // array node, go down the tree + assert( slot.ptr() != nullptr ); + clear_array( to_array( slot.ptr()), array_node_size()); + break; + } + else if ( slot.bits() == base_class::flag_array_converting ) { + // the slot is converting to array node right now + while (( slot = pArr->load( memory_model::memory_order_acquire )).bits() == base_class::flag_array_converting ) { + bkoff(); + stats().onSlotConverting(); + } + bkoff.reset(); + + assert( slot.ptr() != nullptr ); + assert( slot.bits() == base_class::flag_array_node ); + clear_array( to_array( slot.ptr()), array_node_size()); + break; + } + else { + // data node + if ( pArr->compare_exchange_strong( slot, node_ptr(), memory_model::memory_order_acquire, atomics::memory_order_relaxed )) { + if ( slot.ptr()) { + gc::template retire( slot.ptr()); + --m_ItemCounter; + stats().onEraseSuccess(); + } + break; + } + } + } + } + } + //@endcond + + protected: + //@cond + value_type * search( hash_type const& hash, typename gc::Guard& guard ) + { + traverse_data pos( hash, *this ); + hash_comparator cmp; + + while ( true ) { + node_ptr slot = base_class::traverse( pos ); + assert( slot.bits() == 0 ); + + // protect data node by hazard pointer + if ( guard.protect( pos.pArr->nodes[pos.nSlot], []( node_ptr p ) -> value_type* { return p.ptr(); }) != slot) { + // slot value has been changed - retry + stats().onSlotChanged(); + continue; + } + else if ( slot.ptr() && cmp( hash, hash_accessor()( *slot.ptr())) == 0 ) { + // item found + stats().onFindSuccess(); + return slot.ptr(); + } + stats().onFindFailed(); + return nullptr; + } + } + + template + value_type * do_erase( hash_type const& hash, typename gc::Guard& guard, Predicate pred ) + { + traverse_data pos( hash, *this ); + hash_comparator cmp; + while ( true ) { + node_ptr slot = base_class::traverse( pos ); + assert( slot.bits() == 0 ); + + // protect data node by hazard pointer + if ( guard.protect( pos.pArr->nodes[pos.nSlot], []( node_ptr p ) -> value_type* { return p.ptr(); }) != slot ) { + // slot value has been changed - retry + stats().onSlotChanged(); + } + else if ( slot.ptr()) { + if ( cmp( hash, hash_accessor()( *slot.ptr())) == 0 && pred( *slot.ptr())) { + // item found - replace it with nullptr + if ( pos.pArr->nodes[pos.nSlot].compare_exchange_strong( slot, node_ptr( nullptr ), memory_model::memory_order_acquire, atomics::memory_order_relaxed)) { + // slot is guarded by HP + gc::template retire( slot.ptr()); + --m_ItemCounter; + stats().onEraseSuccess(); + + return slot.ptr(); + } + stats().onEraseRetry(); + continue; + } + stats().onEraseFailed(); + return nullptr; + } + else { + // the slot is empty + stats().onEraseFailed(); + return nullptr; + } + } + } + + bool do_erase_at( iterator_base const& iter ) + { + if ( iter.m_set != this ) + return false; + if ( iter.m_pNode == head()) { + if ( iter.m_idx >= head_size()) + return false; + } + else if ( iter.m_idx >= array_node_size()) + return false; + + for (;;) { + node_ptr slot = iter.m_pNode->nodes[iter.m_idx].load( memory_model::memory_order_acquire ); + if ( slot.bits() == 0 && slot.ptr() == iter.pointer()) { + if ( iter.m_pNode->nodes[iter.m_idx].compare_exchange_strong( slot, node_ptr( nullptr ), memory_model::memory_order_acquire, atomics::memory_order_relaxed )) { + // the item is guarded by iterator, so we may retire it safely + gc::template retire( slot.ptr()); + --m_ItemCounter; + stats().onEraseSuccess(); + return true; + } + } + else + return false; + } + } + + template + std::pair do_update( value_type& val, Func f, bool bInsert = true ) + { + hash_type const& hash = hash_accessor()( val ); + traverse_data pos( hash, *this ); + hash_comparator cmp; + typename gc::template GuardArray<2> guards; + + guards.assign( 1, &val ); + while ( true ) { + node_ptr slot = base_class::traverse( pos ); + assert( slot.bits() == 0 ); + + // protect data node by hazard pointer + if ( guards.protect( 0, pos.pArr->nodes[pos.nSlot], []( node_ptr p ) -> value_type* { return p.ptr(); }) != slot ) { + // slot value has been changed - retry + stats().onSlotChanged(); + } + else if ( slot.ptr()) { + if ( cmp( hash, hash_accessor()( *slot.ptr())) == 0 ) { + // the item with that hash value already exists + // Replace it with val + if ( slot.ptr() == &val ) { + stats().onUpdateExisting(); + return std::make_pair( true, false ); + } + + if ( pos.pArr->nodes[pos.nSlot].compare_exchange_strong( slot, node_ptr( &val ), memory_model::memory_order_release, atomics::memory_order_relaxed )) { + // slot can be disposed + f( val, slot.ptr()); + gc::template retire( slot.ptr()); + stats().onUpdateExisting(); + return std::make_pair( true, false ); + } + + stats().onUpdateRetry(); + continue; + } + + if ( bInsert ) { + if ( !pos.splitter.eos()) { + // the slot must be expanded + base_class::expand_slot( pos, slot ); + } + else + return std::make_pair( false, false ); + } + else { + stats().onUpdateFailed(); + return std::make_pair( false, false ); + } + } + else { + // the slot is empty, try to insert data node + if ( bInsert ) { + node_ptr pNull; + if ( pos.pArr->nodes[pos.nSlot].compare_exchange_strong( pNull, node_ptr( &val ), memory_model::memory_order_release, atomics::memory_order_relaxed )) + { + // the new data node has been inserted + f( val, nullptr ); + ++m_ItemCounter; + stats().onUpdateNew(); + stats().height( pos.nHeight ); + return std::make_pair( true, true ); + } + } + else { + stats().onUpdateFailed(); + return std::make_pair( false, false ); + } + + // insert failed - slot has been changed by another thread + // retry updating + stats().onUpdateRetry(); + } + } // while + } + //@endcond + }; +}} // namespace cds::intrusive + +#endif // #ifndef CDSLIB_INTRUSIVE_IMPL_FELDMAN_HASHSET_H diff --git a/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/intrusive/impl/iterable_list.h b/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/intrusive/impl/iterable_list.h new file mode 100644 index 0000000..1962c7a --- /dev/null +++ b/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/intrusive/impl/iterable_list.h @@ -0,0 +1,1467 @@ +/* + This file is a part of libcds - Concurrent Data Structures library + + (C) Copyright Maxim Khizhinsky (libcds.dev@gmail.com) 2006-2017 + + Source code repo: http://github.com/khizmax/libcds/ + Download: http://sourceforge.net/projects/libcds/files/ + + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. + + * Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ + +#ifndef CDSLIB_INTRUSIVE_IMPL_ITERABLE_LIST_H +#define CDSLIB_INTRUSIVE_IMPL_ITERABLE_LIST_H + +#include +#include + +namespace cds { namespace intrusive { + + /// Iterable lock-free ordered single-linked list + /** @ingroup cds_intrusive_list + \anchor cds_intrusive_IterableList_hp + + This non-blocking list implementation supports thread-safe iterators; + searching and removing are lock-free, inserting is non-blocking because it + uses a light-weight synchronization based on marked pointers. + + Unlike \p cds::intrusive::MichaelList the iterable list does not require + any hook in \p T to be stored in the list. + + Usually, ordered single-linked list is used as a building block for the hash table implementation. + Iterable list is suitable for almost append-only hash table because the list doesn't delete + its internal node when erasing a key but it is marked them as empty to be reused in the future. + However, plenty of empty nodes degrades performance. + Separation of internal nodes and user data implies the need for an allocator for internal node + so the iterable list is not fully intrusive. Nevertheless, if you need thread-safe iterator, + the iterable list is good choice. + + The complexity of searching is O(N). + + Template arguments: + - \p GC - Garbage collector used. + - \p T - type to be stored in the list. + - \p Traits - type traits, default is \p iterable_list::traits. It is possible to declare option-based + list with \p cds::intrusive::iterable_list::make_traits metafunction: + For example, the following traits-based declaration of \p gc::HP iterable list + \code + #include + // Declare item stored in your list + struct foo + { + int nKey; + // .... other data + }; + + // Declare comparator for the item + struct my_compare { + int operator()( foo const& i1, foo const& i2 ) const + { + return i1.nKey - i2.nKey; + } + }; + + // Declare traits + struct my_traits: public cds::intrusive::iterable_list::traits + { + typedef my_compare compare; + }; + + // Declare list + typedef cds::intrusive::IterableList< cds::gc::HP, foo, my_traits > list_type; + \endcode + is equivalent for the following option-based list + \code + #include + + // foo struct and my_compare are the same + + // Declare option-based list + typedef cds::intrusive::IterableList< cds::gc::HP, foo, + typename cds::intrusive::iterable_list::make_traits< + cds::intrusive::opt::compare< my_compare > // item comparator option + >::type + > option_list_type; + \endcode + + \par Usage + There are different specializations of this template for each garbage collecting schema. + You should select GC you want and include appropriate .h-file: + - for \p gc::HP: + - for \p gc::DHP: + */ + template < + class GC + ,typename T +#ifdef CDS_DOXYGEN_INVOKED + ,class Traits = iterable_list::traits +#else + ,class Traits +#endif + > + class IterableList +#ifndef CDS_DOXYGEN_INVOKED + : public iterable_list_tag +#endif + { + public: + typedef T value_type; ///< type of value stored in the list + typedef Traits traits; ///< Traits template parameter + + typedef iterable_list::node< value_type > node_type; ///< node type + +# ifdef CDS_DOXYGEN_INVOKED + typedef implementation_defined key_comparator ; ///< key comparison functor based on opt::compare and opt::less option setter. +# else + typedef typename opt::details::make_comparator< value_type, traits >::type key_comparator; +# endif + + typedef typename traits::disposer disposer; ///< disposer for \p value_type + + typedef GC gc; ///< Garbage collector + typedef typename traits::back_off back_off; ///< back-off strategy + typedef typename traits::item_counter item_counter; ///< Item counting policy used + typedef typename traits::memory_model memory_model; ///< Memory ordering. See \p cds::opt::memory_model option + typedef typename traits::node_allocator node_allocator; ///< Node allocator + typedef typename traits::stat stat; ///< Internal statistics + + typedef typename gc::template guarded_ptr< value_type > guarded_ptr; ///< Guarded pointer + + static constexpr const size_t c_nHazardPtrCount = 4; ///< Count of hazard pointer required for the algorithm + + //@cond + // Rebind traits (split-list support) + template + struct rebind_traits { + typedef IterableList< + gc + , value_type + , typename cds::opt::make_options< traits, Options...>::type + > type; + }; + + // Stat selector + template + using select_stat_wrapper = iterable_list::select_stat_wrapper< Stat >; + //@endcond + + protected: + //@cond + typedef atomics::atomic< node_type* > atomic_node_ptr; ///< Atomic node pointer + typedef atomic_node_ptr auxiliary_head; ///< Auxiliary head type (for split-list support) + typedef typename node_type::marked_data_ptr marked_data_ptr; + + node_type m_Head; + node_type m_Tail; + + item_counter m_ItemCounter; ///< Item counter + mutable stat m_Stat; ///< Internal statistics + + typedef cds::details::Allocator< node_type, node_allocator > cxx_node_allocator; + + /// Position pointer for item search + struct position { + node_type const* pHead; + node_type * pPrev; ///< Previous node + node_type * pCur; ///< Current node + + value_type * pFound; ///< Value of \p pCur->data, valid only if data found + + typename gc::Guard guard; ///< guard for \p pFound + }; + + struct insert_position: public position + { + value_type * pPrevVal; ///< Value of \p pPrev->data, can be \p nullptr + typename gc::Guard prevGuard; ///< guard for \p pPrevVal + }; + //@endcond + + protected: + //@cond + template + class iterator_type + { + friend class IterableList; + + protected: + node_type* m_pNode; + typename gc::Guard m_Guard; // data guard + + void next() + { + for ( node_type* p = m_pNode->next.load( memory_model::memory_order_relaxed ); p != m_pNode; p = p->next.load( memory_model::memory_order_relaxed )) + { + m_pNode = p; + if ( m_Guard.protect( p->data, []( marked_data_ptr ptr ) { return ptr.ptr(); }).ptr()) + return; + } + m_Guard.clear(); + } + + explicit iterator_type( node_type* pNode ) + : m_pNode( pNode ) + { + if ( !m_Guard.protect( pNode->data, []( marked_data_ptr p ) { return p.ptr(); }).ptr()) + next(); + } + + iterator_type( node_type* pNode, value_type* pVal ) + : m_pNode( pNode ) + { + if ( m_pNode ) { + assert( pVal != nullptr ); + m_Guard.assign( pVal ); + } + } + + value_type* data() const + { + return m_Guard.template get(); + } + + public: + typedef typename cds::details::make_const_type::pointer value_ptr; + typedef typename cds::details::make_const_type::reference value_ref; + + iterator_type() + : m_pNode( nullptr ) + {} + + iterator_type( iterator_type const& src ) + : m_pNode( src.m_pNode ) + { + m_Guard.copy( src.m_Guard ); + } + + value_ptr operator ->() const + { + return data(); + //return m_Guard.template get(); + } + + value_ref operator *() const + { + assert( m_Guard.get_native() != nullptr ); + return *data(); + //return *m_Guard.template get(); + } + + /// Pre-increment + iterator_type& operator ++() + { + next(); + return *this; + } + + iterator_type& operator = (iterator_type const& src) + { + m_pNode = src.m_pNode; + m_Guard.copy( src.m_Guard ); + return *this; + } + + template + bool operator ==(iterator_type const& i ) const + { + return m_pNode == i.m_pNode; + } + template + bool operator !=(iterator_type const& i ) const + { + return !( *this == i ); + } + }; + //@endcond + + public: + ///@name Thread-safe forward iterators + //@{ + /// Forward iterator + /** + The forward iterator for iterable list has some features: + - it has no post-increment operator + - to protect the value, the iterator contains a GC-specific guard. + For some GC (like as \p gc::HP), a guard is a limited resource per thread, so an exception (or assertion) "no free guard" + may be thrown if the limit of guard count per thread is exceeded. + - The iterator cannot be moved across thread boundary since it contains thread-private GC's guard. + - Iterator is thread-safe: even if the element the iterator points to is removed, the iterator stays valid because + it contains the guard keeping the value from to be recycled. + + The iterator interface: + \code + class iterator { + public: + // Default constructor + iterator(); + + // Copy construtor + iterator( iterator const& src ); + + // Dereference operator + value_type * operator ->() const; + + // Dereference operator + value_type& operator *() const; + + // Preincrement operator + iterator& operator ++(); + + // Assignment operator + iterator& operator = (iterator const& src); + + // Equality operators + bool operator ==(iterator const& i ) const; + bool operator !=(iterator const& i ) const; + }; + \endcode + + @note For two iterators pointed to the same element the value can be different; + this code + \code + if ( it1 == it2 ) + assert( &(*it1) == &(*it2)); + \endcode + can throw assertion. The point is that the iterator stores the value of element which can be modified later by other thread. + The guard inside the iterator prevents recycling that value so the iterator's value remains valid even after changing. + Other iterator may observe modified value of the element. + */ + typedef iterator_type iterator; + /// Const forward iterator + /** + For iterator's features and requirements see \ref iterator + */ + typedef iterator_type const_iterator; + + /// Returns a forward iterator addressing the first element in a list + /** + For empty list \code begin() == end() \endcode + */ + iterator begin() + { + return iterator( &m_Head ); + } + + /// Returns an iterator that addresses the location succeeding the last element in a list + /** + Do not use the value returned by end function to access any item. + Internally, end returning value equals to \p nullptr. + + The returned value can be used only to control reaching the end of the list. + For empty list begin() == end() + */ + iterator end() + { + return iterator( &m_Tail ); + } + + /// Returns a forward const iterator addressing the first element in a list + const_iterator cbegin() const + { + return const_iterator( const_cast( &m_Head )); + } + + /// Returns a forward const iterator addressing the first element in a list + const_iterator begin() const + { + return const_iterator( const_cast( &m_Head )); + } + + /// Returns an const iterator that addresses the location succeeding the last element in a list + const_iterator end() const + { + return const_iterator( const_cast( &m_Tail )); + } + + /// Returns an const iterator that addresses the location succeeding the last element in a list + const_iterator cend() const + { + return const_iterator( const_cast( &m_Tail )); + } + //@} + + public: + /// Default constructor initializes empty list + IterableList() + { + init_list(); + } + + //@cond + template >::value >> + explicit IterableList( Stat& st ) + : m_Stat( st ) + { + init_list(); + } + //@endcond + + /// Destroys the list object + ~IterableList() + { + destroy(); + } + + /// Inserts new node + /** + The function inserts \p val into the list if the list does not contain + an item with key equal to \p val. + + Returns \p true if \p val has been linked to the list, \p false otherwise. + */ + bool insert( value_type& val ) + { + return insert_at( &m_Head, val ); + } + + /// Inserts new node + /** + This function is intended for derived non-intrusive containers. + + The function allows to split new item creating into two part: + - create item with key only + - insert new item into the list + - if inserting is success, calls \p f functor to initialize value-field of \p val. + + The functor signature is: + \code + void func( value_type& val ); + \endcode + where \p val is the item inserted. User-defined functor \p f should guarantee that during changing + \p val no any other changes could be made on this list's item by concurrent threads. + The user-defined functor is called only if the inserting is success. + + @warning See \ref cds_intrusive_item_creating "insert item troubleshooting" + */ + template + bool insert( value_type& val, Func f ) + { + return insert_at( &m_Head, val, f ); + } + + /// Updates the node + /** + The operation performs inserting or changing data with lock-free manner. + + If the item \p val is not found in the list, then \p val is inserted + iff \p bInsert is \p true. + Otherwise, the current element is changed to \p val, the element will be retired later + by call \p Traits::disposer. + The functor \p func is called after inserting or replacing, it signature is: + \code + void func( value_type& val, value_type * old ); + \endcode + where + - \p val - argument \p val passed into the \p %update() function + - \p old - old value that will be retired. If new item has been inserted then \p old is \p nullptr. + + Returns std::pair where \p first is \p true if operation is successful, + \p second is \p true if \p val has been added or \p false if the item with that key + already in the list. + */ + template + std::pair update( value_type& val, Func func, bool bInsert = true ) + { + return update_at( &m_Head, val, func, bInsert ); + } + + /// Insert or update + /** + The operation performs inserting or updating data with lock-free manner. + + If the item \p val is not found in the list, then \p val is inserted + iff \p bInsert is \p true. + Otherwise, the current element is changed to \p val, the old element will be retired later + by call \p Traits::disposer. + + Returns std::pair where \p first is \p true if operation is successful, + \p second is \p true if \p val has been added or \p false if the item with that key + already in the list. + */ + std::pair upsert( value_type& val, bool bInsert = true ) + { + return upsert_at( &m_Head, val, bInsert ); + } + + /// Unlinks the item \p val from the list + /** + The function searches the item \p val in the list and unlinks it from the list + if it is found and it is equal to \p val. + + Difference between \p erase() and \p %unlink(): \p %erase() finds a key + and deletes the item found. \p %unlink() finds an item by key and deletes it + only if \p val is an item of the list, i.e. the pointer to item found + is equal to &val . + + \p disposer specified in \p Traits is called for deleted item. + + The function returns \p true if success and \p false otherwise. + */ + bool unlink( value_type& val ) + { + return unlink_at( &m_Head, val ); + } + + /// Deletes the item from the list + /** \anchor cds_intrusive_IterableList_hp_erase_val + The function searches an item with key equal to \p key in the list, + unlinks it from the list, and returns \p true. + If \p key is not found the function return \p false. + + \p disposer specified in \p Traits is called for deleted item. + */ + template + bool erase( Q const& key ) + { + return erase_at( &m_Head, key, key_comparator()); + } + + /// Deletes the item from the list using \p pred predicate for searching + /** + The function is an analog of \ref cds_intrusive_IterableList_hp_erase_val "erase(Q const&)" + but \p pred is used for key comparing. + \p Less functor has the interface like \p std::less. + \p pred must imply the same element order as the comparator used for building the list. + + \p disposer specified in \p Traits is called for deleted item. + */ + template + bool erase_with( Q const& key, Less pred ) + { + CDS_UNUSED( pred ); + return erase_at( &m_Head, key, cds::opt::details::make_comparator_from_less()); + } + + /// Deletes the item from the list + /** \anchor cds_intrusive_IterableList_hp_erase_func + The function searches an item with key equal to \p key in the list, + call \p func functor with item found, unlinks it from the list, and returns \p true. + The \p Func interface is + \code + struct functor { + void operator()( value_type const& item ); + }; + \endcode + If \p key is not found the function return \p false, \p func is not called. + + \p disposer specified in \p Traits is called for deleted item. + */ + template + bool erase( Q const& key, Func func ) + { + return erase_at( &m_Head, key, key_comparator(), func ); + } + + /// Deletes the item from the list using \p pred predicate for searching + /** + The function is an analog of \ref cds_intrusive_IterableList_hp_erase_func "erase(Q const&, Func)" + but \p pred is used for key comparing. + \p Less functor has the interface like \p std::less. + \p pred must imply the same element order as the comparator used for building the list. + + \p disposer specified in \p Traits is called for deleted item. + */ + template + bool erase_with( Q const& key, Less pred, Func f ) + { + CDS_UNUSED( pred ); + return erase_at( &m_Head, key, cds::opt::details::make_comparator_from_less(), f ); + } + + /// Deletes the item pointed by iterator \p iter + /** + Returns \p true if the operation is successful, \p false otherwise. + The function can return \p false if the node the iterator points to has already been deleted + by other thread. + + The function does not invalidate the iterator, it remains valid and can be used for further traversing. + */ + bool erase_at( iterator const& iter ) + { + assert( iter != end()); + + marked_data_ptr val( iter.data()); + if ( iter.m_pNode->data.compare_exchange_strong( val, marked_data_ptr(), memory_model::memory_order_acquire, atomics::memory_order_relaxed )) { + --m_ItemCounter; + retire_data( val.ptr()); + m_Stat.onEraseSuccess(); + return true; + } + return false; + } + + /// Extracts the item from the list with specified \p key + /** \anchor cds_intrusive_IterableList_hp_extract + The function searches an item with key equal to \p key, + unlinks it from the list, and returns it as \p guarded_ptr. + If \p key is not found returns an empty guarded pointer. + + Note the compare functor should accept a parameter of type \p Q that can be not the same as \p value_type. + + The \ref disposer specified in \p Traits class template parameter is called automatically + by garbage collector \p GC when returned \ref guarded_ptr object will be destroyed or released. + @note Each \p guarded_ptr object uses the GC's guard that can be limited resource. + + Usage: + \code + typedef cds::intrusive::IterableList< cds::gc::HP, foo, my_traits > ord_list; + ord_list theList; + // ... + { + ord_list::guarded_ptr gp( theList.extract( 5 )); + if ( gp ) { + // Deal with gp + // ... + } + // Destructor of gp releases internal HP guard + } + \endcode + */ + template + guarded_ptr extract( Q const& key ) + { + return extract_at( &m_Head, key, key_comparator()); + } + + /// Extracts the item using compare functor \p pred + /** + The function is an analog of \ref cds_intrusive_IterableList_hp_extract "extract(Q const&)" + but \p pred predicate is used for key comparing. + + \p Less functor has the semantics like \p std::less but should take arguments of type \ref value_type and \p Q + in any order. + \p pred must imply the same element order as the comparator used for building the list. + */ + template + guarded_ptr extract_with( Q const& key, Less pred ) + { + CDS_UNUSED( pred ); + return extract_at( &m_Head, key, cds::opt::details::make_comparator_from_less()); + } + + /// Finds \p key in the list + /** \anchor cds_intrusive_IterableList_hp_find_func + The function searches the item with key equal to \p key and calls the functor \p f for item found. + The interface of \p Func functor is: + \code + struct functor { + void operator()( value_type& item, Q& key ); + }; + \endcode + where \p item is the item found, \p key is the \p %find() function argument. + + The functor may change non-key fields of \p item. Note that the function is only guarantee + that \p item cannot be disposed during functor is executing. + The function does not serialize simultaneous access to the \p item. If such access is + possible you must provide your own synchronization schema to keep out unsafe item modifications. + + The function returns \p true if \p val is found, \p false otherwise. + */ + template + bool find( Q& key, Func f ) const + { + return find_at( &m_Head, key, key_comparator(), f ); + } + //@cond + template + bool find( Q const& key, Func f ) const + { + return find_at( &m_Head, key, key_comparator(), f ); + } + //@endcond + + /// Finds \p key in the list and returns iterator pointed to the item found + /** + If \p key is not found the function returns \p end(). + */ + template + iterator find( Q const& key ) const + { + return find_iterator_at( &m_Head, key, key_comparator()); + } + + /// Finds the \p key using \p pred predicate for searching + /** + The function is an analog of \ref cds_intrusive_IterableList_hp_find_func "find(Q&, Func)" + but \p pred is used for key comparing. + \p Less functor has the interface like \p std::less. + \p pred must imply the same element order as the comparator used for building the list. + */ + template + bool find_with( Q& key, Less pred, Func f ) const + { + CDS_UNUSED( pred ); + return find_at( &m_Head, key, cds::opt::details::make_comparator_from_less(), f ); + } + //@cond + template + bool find_with( Q const& key, Less pred, Func f ) const + { + CDS_UNUSED( pred ); + return find_at( &m_Head, key, cds::opt::details::make_comparator_from_less(), f ); + } + //@endcond + + /// Finds \p key in the list using \p pred predicate for searching and returns iterator pointed to the item found + /** + The function is an analog of \p find(Q&) but \p pred is used for key comparing. + \p Less functor has the interface like \p std::less. + \p pred must imply the same element order as the comparator used for building the list. + + If \p key is not found the function returns \p end(). + */ + template + iterator find_with( Q const& key, Less pred ) const + { + CDS_UNUSED( pred ); + return find_iterator_at( &m_Head, key, cds::opt::details::make_comparator_from_less()); + } + + /// Checks whether the list contains \p key + /** + The function searches the item with key equal to \p key + and returns \p true if it is found, and \p false otherwise. + */ + template + bool contains( Q const& key ) const + { + return find_at( &m_Head, key, key_comparator()); + } + + /// Checks whether the list contains \p key using \p pred predicate for searching + /** + The function is an analog of contains( key ) but \p pred is used for key comparing. + \p Less functor has the interface like \p std::less. + \p Less must imply the same element order as the comparator used for building the list. + */ + template + bool contains( Q const& key, Less pred ) const + { + CDS_UNUSED( pred ); + return find_at( &m_Head, key, cds::opt::details::make_comparator_from_less()); + } + + /// Finds the \p key and return the item found + /** \anchor cds_intrusive_IterableList_hp_get + The function searches the item with key equal to \p key + and returns it as \p guarded_ptr. + If \p key is not found the function returns an empty guarded pointer. + + The \ref disposer specified in \p Traits class template parameter is called + by garbage collector \p GC automatically when returned \ref guarded_ptr object + will be destroyed or released. + @note Each \p guarded_ptr object uses one GC's guard which can be limited resource. + + Usage: + \code + typedef cds::intrusive::IterableList< cds::gc::HP, foo, my_traits > ord_list; + ord_list theList; + // ... + { + ord_list::guarded_ptr gp(theList.get( 5 )); + if ( gp ) { + // Deal with gp + //... + } + // Destructor of guarded_ptr releases internal HP guard + } + \endcode + + Note the compare functor specified for \p Traits template parameter + should accept a parameter of type \p Q that can be not the same as \p value_type. + */ + template + guarded_ptr get( Q const& key ) const + { + return get_at( &m_Head, key, key_comparator()); + } + + /// Finds the \p key and return the item found + /** + The function is an analog of \ref cds_intrusive_IterableList_hp_get "get( Q const&)" + but \p pred is used for comparing the keys. + + \p Less functor has the semantics like \p std::less but should take arguments of type \ref value_type and \p Q + in any order. + \p pred must imply the same element order as the comparator used for building the list. + */ + template + guarded_ptr get_with( Q const& key, Less pred ) const + { + CDS_UNUSED( pred ); + return get_at( &m_Head, key, cds::opt::details::make_comparator_from_less()); + } + + /// Clears the list (thread safe, not atomic) + void clear() + { + position pos; + pos.pPrev = nullptr; + for ( pos.pCur = m_Head.next.load( memory_model::memory_order_relaxed ); pos.pCur != pos.pPrev; pos.pCur = pos.pCur->next.load( memory_model::memory_order_relaxed )) { + while ( true ) { + pos.pFound = pos.guard.protect( pos.pCur->data, []( marked_data_ptr p ) { return p.ptr(); }).ptr(); + if ( !pos.pFound ) + break; + if ( cds_likely( unlink_data( pos ))) { + --m_ItemCounter; + break; + } + } + pos.pPrev = pos.pCur; + } + } + + /// Checks if the list is empty + /** + Emptiness is checked by item counting: if item count is zero then the set is empty. + Thus, if you need to use \p %empty() you should provide appropriate (non-empty) \p iterable_list::traits::item_counter + feature. + */ + bool empty() const + { + return size() == 0; + } + + /// Returns list's item count + /** + The value returned depends on item counter provided by \p iterable_list::traits::item_counter. For \p atomicity::empty_item_counter, + this function always returns 0. + */ + size_t size() const + { + return m_ItemCounter.value(); + } + + /// Returns const reference to internal statistics + stat const& statistics() const + { + return m_Stat; + } + + protected: + //@cond + + // split-list support + bool insert_aux_node( node_type * pNode ) + { + return insert_aux_node( &m_Head, pNode ); + } + + // split-list support + bool insert_aux_node( node_type* pHead, node_type * pNode ) + { + assert( pNode != nullptr ); + assert( pNode->data.load( memory_model::memory_order_relaxed ) != nullptr ); + + insert_position pos; + + while ( true ) { + if ( inserting_search( pHead, *pNode->data.load(memory_model::memory_order_relaxed).ptr(), pos, key_comparator())) { + m_Stat.onInsertFailed(); + return false; + } + + if ( link_aux_node( pNode, pos, pHead )) { + ++m_ItemCounter; + m_Stat.onInsertSuccess(); + return true; + } + + m_Stat.onInsertRetry(); + } + } + + bool insert_at( node_type* pHead, value_type& val ) + { + insert_position pos; + + while ( true ) { + if ( inserting_search( pHead, val, pos, key_comparator())) { + m_Stat.onInsertFailed(); + return false; + } + + if ( link_data( &val, pos, pHead )) { + ++m_ItemCounter; + m_Stat.onInsertSuccess(); + return true; + } + + m_Stat.onInsertRetry(); + } + } + + template + bool insert_at( node_type* pHead, value_type& val, Func f ) + { + insert_position pos; + + typename gc::Guard guard; + guard.assign( &val ); + + while ( true ) { + if ( inserting_search( pHead, val, pos, key_comparator())) { + m_Stat.onInsertFailed(); + return false; + } + + if ( link_data( &val, pos, pHead )) { + f( val ); + ++m_ItemCounter; + m_Stat.onInsertSuccess(); + return true; + } + + m_Stat.onInsertRetry(); + } + } + + template + std::pair update_at( node_type* pHead, value_type& val, Func func, bool bInsert ) + { + insert_position pos; + + typename gc::Guard guard; + guard.assign( &val ); + + while ( true ) { + if ( inserting_search( pHead, val, pos, key_comparator())) { + // try to replace pCur->data with val + assert( pos.pFound != nullptr ); + assert( key_comparator()(*pos.pFound, val) == 0 ); + + marked_data_ptr pFound( pos.pFound ); + if ( cds_likely( pos.pCur->data.compare_exchange_strong( pFound, marked_data_ptr( &val ), + memory_model::memory_order_release, atomics::memory_order_relaxed ))) + { + if ( pos.pFound != &val ) { + retire_data( pos.pFound ); + func( val, pos.pFound ); + } + m_Stat.onUpdateExisting(); + return std::make_pair( true, false ); + } + } + else { + if ( !bInsert ) { + m_Stat.onUpdateFailed(); + return std::make_pair( false, false ); + } + + if ( link_data( &val, pos, pHead )) { + func( val, static_cast( nullptr )); + ++m_ItemCounter; + m_Stat.onUpdateNew(); + return std::make_pair( true, true ); + } + } + + m_Stat.onUpdateRetry(); + } + } + + std::pair upsert_at( node_type* pHead, value_type& val, bool bInsert ) + { + return update_at( pHead, val, []( value_type&, value_type* ) {}, bInsert ); + } + + bool unlink_at( node_type* pHead, value_type& val ) + { + position pos; + + back_off bkoff; + while ( search( pHead, val, pos, key_comparator())) { + if ( pos.pFound == &val ) { + if ( unlink_data( pos )) { + --m_ItemCounter; + m_Stat.onEraseSuccess(); + return true; + } + else + bkoff(); + } + else + break; + + m_Stat.onEraseRetry(); + } + + m_Stat.onEraseFailed(); + return false; + } + + template + bool erase_at( node_type* pHead, Q const& val, Compare cmp, Func f, position& pos ) + { + back_off bkoff; + while ( search( pHead, val, pos, cmp )) { + if ( unlink_data( pos )) { + f( *pos.pFound ); + --m_ItemCounter; + m_Stat.onEraseSuccess(); + return true; + } + else + bkoff(); + + m_Stat.onEraseRetry(); + } + + m_Stat.onEraseFailed(); + return false; + } + + template + bool erase_at( node_type* pHead, Q const& val, Compare cmp, Func f ) + { + position pos; + return erase_at( pHead, val, cmp, f, pos ); + } + + template + bool erase_at( node_type* pHead, Q const& val, Compare cmp ) + { + position pos; + return erase_at( pHead, val, cmp, [](value_type const&){}, pos ); + } + + template + guarded_ptr extract_at( node_type* pHead, Q const& val, Compare cmp ) + { + position pos; + back_off bkoff; + while ( search( pHead, val, pos, cmp )) { + if ( unlink_data( pos )) { + --m_ItemCounter; + m_Stat.onEraseSuccess(); + assert( pos.pFound != nullptr ); + return guarded_ptr( std::move( pos.guard )); + } + else + bkoff(); + + m_Stat.onEraseRetry(); + } + + m_Stat.onEraseFailed(); + return guarded_ptr(); + } + + template + bool find_at( node_type const* pHead, Q const& val, Compare cmp ) const + { + position pos; + if ( search( pHead, val, pos, cmp )) { + m_Stat.onFindSuccess(); + return true; + } + + m_Stat.onFindFailed(); + return false; + } + + template + bool find_at( node_type const* pHead, Q& val, Compare cmp, Func f ) const + { + position pos; + if ( search( pHead, val, pos, cmp )) { + assert( pos.pFound != nullptr ); + f( *pos.pFound, val ); + m_Stat.onFindSuccess(); + return true; + } + + m_Stat.onFindFailed(); + return false; + } + + template + iterator find_iterator_at( node_type const* pHead, Q const& val, Compare cmp ) const + { + position pos; + if ( search( pHead, val, pos, cmp )) { + assert( pos.pCur != nullptr ); + assert( pos.pFound != nullptr ); + m_Stat.onFindSuccess(); + return iterator( pos.pCur, pos.pFound ); + } + + m_Stat.onFindFailed(); + return iterator( const_cast( &m_Tail )); + } + + template + guarded_ptr get_at( node_type const* pHead, Q const& val, Compare cmp ) const + { + position pos; + if ( search( pHead, val, pos, cmp )) { + m_Stat.onFindSuccess(); + return guarded_ptr( std::move( pos.guard )); + } + + m_Stat.onFindFailed(); + return guarded_ptr(); + } + + node_type* head() + { + return &m_Head; + } + + node_type const* head() const + { + return &m_Head; + } + //@endcond + + protected: + //@cond + template + bool search( node_type const* pHead, Q const& val, position& pos, Compare cmp ) const + { + pos.pHead = pHead; + node_type* pPrev = const_cast( pHead ); + + while ( true ) { + node_type * pCur = pPrev->next.load( memory_model::memory_order_relaxed ); + + if ( pCur == pCur->next.load( memory_model::memory_order_acquire )) { + // end-of-list + pos.pPrev = pPrev; + pos.pCur = pCur; + pos.pFound = nullptr; + return false; + } + + value_type * pVal = pos.guard.protect( pCur->data, + []( marked_data_ptr p ) -> value_type* + { + return p.ptr(); + }).ptr(); + + if ( pVal ) { + int const nCmp = cmp( *pVal, val ); + if ( nCmp >= 0 ) { + pos.pPrev = pPrev; + pos.pCur = pCur; + pos.pFound = pVal; + return nCmp == 0; + } + } + + pPrev = pCur; + } + } + + template + bool inserting_search( node_type const* pHead, Q const& val, insert_position& pos, Compare cmp ) const + { + pos.pHead = pHead; + node_type* pPrev = const_cast(pHead); + value_type* pPrevVal = pPrev->data.load( memory_model::memory_order_relaxed ).ptr(); + + while ( true ) { + node_type * pCur = pPrev->next.load( memory_model::memory_order_relaxed ); + + if ( pCur == pCur->next.load( memory_model::memory_order_acquire )) { + // end-of-list + pos.pPrev = pPrev; + pos.pCur = pCur; + pos.pFound = nullptr; + pos.pPrevVal = pPrevVal; + return false; + } + + value_type * pVal = pos.guard.protect( pCur->data, + []( marked_data_ptr p ) -> value_type* + { + return p.ptr(); + } ).ptr(); + + if ( pVal ) { + int const nCmp = cmp( *pVal, val ); + if ( nCmp >= 0 ) { + pos.pPrev = pPrev; + pos.pCur = pCur; + pos.pFound = pVal; + pos.pPrevVal = pPrevVal; + return nCmp == 0; + } + } + + pPrev = pCur; + pPrevVal = pVal; + pos.prevGuard.copy( pos.guard ); + } + } + + // split-list support + template + void destroy( Predicate pred ) + { + node_type * pNode = m_Head.next.load( memory_model::memory_order_relaxed ); + while ( pNode != pNode->next.load( memory_model::memory_order_relaxed )) { + value_type * pVal = pNode->data.load( memory_model::memory_order_relaxed ).ptr(); + node_type * pNext = pNode->next.load( memory_model::memory_order_relaxed ); + bool const is_regular_node = !pVal || pred( pVal ); + if ( is_regular_node ) { + if ( pVal ) + retire_data( pVal ); + delete_node( pNode ); + } + pNode = pNext; + } + + m_Head.next.store( &m_Tail, memory_model::memory_order_relaxed ); + } + //@endcond + + private: + //@cond + void init_list() + { + m_Head.next.store( &m_Tail, memory_model::memory_order_relaxed ); + // end-of-list mark: node.next == node + m_Tail.next.store( &m_Tail, memory_model::memory_order_release ); + } + + node_type * alloc_node( value_type * pVal ) + { + m_Stat.onNodeCreated(); + return cxx_node_allocator().New( pVal ); + } + + void delete_node( node_type * pNode ) + { + m_Stat.onNodeRemoved(); + cxx_node_allocator().Delete( pNode ); + } + + static void retire_data( value_type * pVal ) + { + assert( pVal != nullptr ); + gc::template retire( pVal ); + } + + void destroy() + { + node_type * pNode = m_Head.next.load( memory_model::memory_order_relaxed ); + while ( pNode != pNode->next.load( memory_model::memory_order_relaxed )) { + value_type * pVal = pNode->data.load( memory_model::memory_order_relaxed ).ptr(); + if ( pVal ) + retire_data( pVal ); + node_type * pNext = pNode->next.load( memory_model::memory_order_relaxed ); + delete_node( pNode ); + pNode = pNext; + } + } + + bool link_data( value_type* pVal, insert_position& pos, node_type* pHead ) + { + assert( pos.pPrev != nullptr ); + assert( pos.pCur != nullptr ); + + // We need pos.pCur data should be unchanged, otherwise ordering violation can be possible + // if current thread will be preempted and another thread will delete pos.pCur data + // and then set it to another. + // To prevent this we mark pos.pCur data as undeletable by setting LSB + marked_data_ptr valCur( pos.pFound ); + if ( !pos.pCur->data.compare_exchange_strong( valCur, valCur | 1, memory_model::memory_order_acquire, atomics::memory_order_relaxed )) { + // oops, pos.pCur data has been changed or another thread is setting pos.pPrev data + m_Stat.onNodeMarkFailed(); + return false; + } + + marked_data_ptr valPrev( pos.pPrevVal ); + if ( !pos.pPrev->data.compare_exchange_strong( valPrev, valPrev | 1, memory_model::memory_order_acquire, atomics::memory_order_relaxed )) { + pos.pCur->data.store( valCur, memory_model::memory_order_relaxed ); + + m_Stat.onNodeMarkFailed(); + return false; + } + + // checks if link pPrev -> pCur is broken + if ( pos.pPrev->next.load( memory_model::memory_order_acquire ) != pos.pCur ) { + // sequence pPrev - pCur is broken + pos.pPrev->data.store( valPrev, memory_model::memory_order_relaxed ); + pos.pCur->data.store( valCur, memory_model::memory_order_relaxed ); + + m_Stat.onNodeSeqBreak(); + return false; + } + + if ( pos.pPrevVal == nullptr ) { + // Check ABA-problem for prev + // There is a possibility that the current thread was preempted + // on entry of this function. Other threads can link data to prev + // and then remove it. As a result, the order of items may be changed + if ( find_prev( pHead, *pVal ) != pos.pPrev ) { + pos.pPrev->data.store( valPrev, memory_model::memory_order_relaxed ); + pos.pCur->data.store( valCur, memory_model::memory_order_relaxed ); + + m_Stat.onNullPrevABA(); + return false; + } + } + + if ( pos.pPrev != pos.pHead && pos.pPrevVal == nullptr ) { + // reuse pPrev + + // Set pos.pPrev data if it is null + valPrev |= 1; + bool result = pos.pPrev->data.compare_exchange_strong( valPrev, marked_data_ptr( pVal ), + memory_model::memory_order_release, atomics::memory_order_relaxed ); + + // Clears data marks + pos.pCur->data.store( valCur, memory_model::memory_order_relaxed ); + + if ( result ) { + m_Stat.onReuseNode(); + return result; + } + } + else { + // insert new node between pos.pPrev and pos.pCur + node_type * pNode = alloc_node( pVal ); + pNode->next.store( pos.pCur, memory_model::memory_order_relaxed ); + + bool result = pos.pPrev->next.compare_exchange_strong( pos.pCur, pNode, memory_model::memory_order_release, atomics::memory_order_relaxed ); + + // Clears data marks + pos.pPrev->data.store( valPrev, memory_model::memory_order_relaxed ); + pos.pCur->data.store( valCur, memory_model::memory_order_relaxed ); + + if ( result ) { + m_Stat.onNewNodeCreated(); + return result; + } + + delete_node( pNode ); + } + + return false; + } + + // split-list support + bool link_aux_node( node_type * pNode, insert_position& pos, node_type* pHead ) + { + assert( pos.pPrev != nullptr ); + assert( pos.pCur != nullptr ); + + // We need pos.pCur data should be unchanged, otherwise ordering violation can be possible + // if current thread will be preempted and another thread will delete pos.pCur data + // and then set it to another. + // To prevent this we mark pos.pCur data as undeletable by setting LSB + marked_data_ptr valCur( pos.pFound ); + if ( !pos.pCur->data.compare_exchange_strong( valCur, valCur | 1, memory_model::memory_order_acquire, atomics::memory_order_relaxed )) { + // oops, pos.pCur data has been changed or another thread is setting pos.pPrev data + m_Stat.onNodeMarkFailed(); + return false; + } + + marked_data_ptr valPrev( pos.pPrevVal ); + if ( !pos.pPrev->data.compare_exchange_strong( valPrev, valPrev | 1, memory_model::memory_order_acquire, atomics::memory_order_relaxed )) { + pos.pCur->data.store( valCur, memory_model::memory_order_relaxed ); + m_Stat.onNodeMarkFailed(); + return false; + } + + // checks if link pPrev -> pCur is broken + if ( pos.pPrev->next.load( memory_model::memory_order_acquire ) != pos.pCur ) { + // sequence pPrev - pCur is broken + pos.pPrev->data.store( valPrev, memory_model::memory_order_relaxed ); + pos.pCur->data.store( valCur, memory_model::memory_order_relaxed ); + m_Stat.onNodeSeqBreak(); + return false; + } + + if ( pos.pPrevVal == nullptr ) { + // Check ABA-problem for prev + // There is a possibility that the current thread was preempted + // on entry of this function. Other threads can insert (link) an item to prev + // and then remove it. As a result, the order of items may be changed + if ( find_prev( pHead, *pNode->data.load( memory_model::memory_order_relaxed ).ptr()) != pos.pPrev ) { + pos.pPrev->data.store( valPrev, memory_model::memory_order_relaxed ); + pos.pCur->data.store( valCur, memory_model::memory_order_relaxed ); + + m_Stat.onNullPrevABA(); + return false; + } + } + + // insert new node between pos.pPrev and pos.pCur + pNode->next.store( pos.pCur, memory_model::memory_order_relaxed ); + + bool result = pos.pPrev->next.compare_exchange_strong( pos.pCur, pNode, memory_model::memory_order_release, atomics::memory_order_relaxed ); + + // Clears data marks + pos.pPrev->data.store( valPrev, memory_model::memory_order_relaxed ); + pos.pCur->data.store( valCur, memory_model::memory_order_relaxed ); + + return result; + } + + static bool unlink_data( position& pos ) + { + assert( pos.pCur != nullptr ); + assert( pos.pFound != nullptr ); + + marked_data_ptr val( pos.pFound ); + if ( pos.pCur->data.compare_exchange_strong( val, marked_data_ptr(), memory_model::memory_order_acquire, atomics::memory_order_relaxed )) { + retire_data( pos.pFound ); + return true; + } + return false; + } + + template + node_type* find_prev( node_type const* pHead, Q const& val ) const + { + node_type* pPrev = const_cast(pHead); + typename gc::Guard guard; + key_comparator cmp; + + while ( true ) { + node_type * pCur = pPrev->next.load( memory_model::memory_order_relaxed ); + + if ( pCur == pCur->next.load( memory_model::memory_order_acquire )) { + // end-of-list + return pPrev; + } + + value_type * pVal = guard.protect( pCur->data, + []( marked_data_ptr p ) -> value_type* + { + return p.ptr(); + } ).ptr(); + + if ( pVal && cmp( *pVal, val ) >= 0 ) + return pPrev; + + pPrev = pCur; + } + } + //@endcond + }; +}} // namespace cds::intrusive + +#endif // #ifndef CDSLIB_INTRUSIVE_IMPL_ITERABLE_LIST_H diff --git a/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/intrusive/impl/lazy_list.h b/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/intrusive/impl/lazy_list.h new file mode 100644 index 0000000..c0f7323 --- /dev/null +++ b/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/intrusive/impl/lazy_list.h @@ -0,0 +1,1273 @@ +/* + This file is a part of libcds - Concurrent Data Structures library + + (C) Copyright Maxim Khizhinsky (libcds.dev@gmail.com) 2006-2017 + + Source code repo: http://github.com/khizmax/libcds/ + Download: http://sourceforge.net/projects/libcds/files/ + + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. + + * Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ + +#ifndef CDSLIB_INTRUSIVE_IMPL_LAZY_LIST_H +#define CDSLIB_INTRUSIVE_IMPL_LAZY_LIST_H + +#include // unique_lock +#include + +namespace cds { namespace intrusive { + + /// Lazy ordered single-linked list + /** @ingroup cds_intrusive_list + \anchor cds_intrusive_LazyList_hp + + Usually, ordered single-linked list is used as a building block for the hash table implementation. + The complexity of searching is O(N). + + Source: + - [2005] Steve Heller, Maurice Herlihy, Victor Luchangco, Mark Moir, William N. Scherer III, and Nir Shavit + "A Lazy Concurrent List-Based Set Algorithm" + + The lazy list is based on an optimistic locking scheme for inserts and removes, + eliminating the need to use the equivalent of an atomically markable + reference. It also has a novel wait-free membership \p find operation + that does not need to perform cleanup operations and is more efficient. + + Template arguments: + - \p GC - Garbage collector used. Note the \p GC must be the same as the GC used for item type \p T (see lazy_list::node). + - \p T - type to be stored in the list. The type must be based on lazy_list::node (for lazy_list::base_hook) + or it must have a member of type lazy_list::node (for lazy_list::member_hook). + - \p Traits - type traits. See lazy_list::traits for explanation. + It is possible to declare option-based list with cds::intrusive::lazy_list::make_traits metafunction instead of \p Traits template + argument. For example, the following traits-based declaration of \p gc::HP lazy list + \code + #include + // Declare item stored in your list + struct item: public cds::intrusive::lazy_list::node< cds::gc::HP > + { ... }; + + // Declare comparator for the item + struct my_compare { ... } + + // Declare traits + struct my_traits: public cds::intrusive::lazy_list::traits + { + typedef cds::intrusive::lazy_list::base_hook< cds::opt::gc< cds::gc::HP > > hook; + typedef my_compare compare; + }; + + // Declare traits-based list + typedef cds::intrusive::LazyList< cds::gc::HP, item, my_traits > traits_based_list; + \endcode + is equivalent for the following option-based list + \code + #include + + // item struct and my_compare are the same + + // Declare option-based list + typedef cds::intrusive::LazyList< cds::gc::HP, item, + typename cds::intrusive::lazy_list::make_traits< + cds::intrusive::opt::hook< cds::intrusive::lazy_list::base_hook< cds::opt::gc< cds::gc::HP > > > // hook option + ,cds::intrusive::opt::compare< my_compare > // item comparator option + >::type + > option_based_list; + \endcode + + \par Usage + There are different specializations of this template for each garbage collecting schema used. + You should select GC needed and include appropriate .h-file: + - for gc::HP: \code #include \endcode + - for gc::DHP: \code #include \endcode + - for gc::nogc: \code #include \endcode + - for \ref cds_urcu_type "RCU" - see \ref cds_intrusive_LazyList_rcu "LazyList RCU specialization" + + Then, you should incorporate lazy_list::node into your struct \p T and provide + appropriate \p lazy_list::traits::hook in your \p Traits template parameters. Usually, for \p Traits + a struct based on \p lazy_list::traits should be defined. + + Example for gc::DHP and base hook: + \code + // Include GC-related lazy list specialization + #include + + // Data stored in lazy list + struct my_data: public cds::intrusive::lazy_list::node< cds::gc::DHP > + { + // key field + std::string strKey; + + // other data + // ... + }; + + // my_data comparing functor + struct compare { + int operator()( const my_data& d1, const my_data& d2 ) + { + return d1.strKey.compare( d2.strKey ); + } + + int operator()( const my_data& d, const std::string& s ) + { + return d.strKey.compare(s); + } + + int operator()( const std::string& s, const my_data& d ) + { + return s.compare( d.strKey ); + } + }; + + // Declare traits + struct my_traits: public cds::intrusive::lazy_list::traits + { + typedef cds::intrusive::lazy_list::base_hook< cds::opt::gc< cds::gc::DHP > > hook; + typedef my_data_cmp compare; + }; + + // Declare list type + typedef cds::intrusive::LazyList< cds::gc::DHP, my_data, my_traits > traits_based_list; + \endcode + + Equivalent option-based code: + \code + // GC-related specialization + #include + + struct my_data { + // see above + }; + struct compare { + // see above + }; + + // Declare option-based list + typedef cds::intrusive::LazyList< cds::gc::DHP + ,my_data + , typename cds::intrusive::lazy_list::make_traits< + cds::intrusive::opt::hook< cds::intrusive::lazy_list::base_hook< cds::opt::gc< cds::gc::DHP > > > + ,cds::intrusive::opt::compare< my_data_cmp > + >::type + > option_based_list; + + \endcode + */ + template < + class GC + ,typename T +#ifdef CDS_DOXYGEN_INVOKED + ,class Traits = lazy_list::traits +#else + ,class Traits +#endif + > + class LazyList + { + public: + typedef GC gc; ///< Garbage collector + typedef T value_type; ///< type of value stored in the list + typedef Traits traits; ///< Traits template parameter + + typedef typename traits::hook hook; ///< hook type + typedef typename hook::node_type node_type; ///< node type + +# ifdef CDS_DOXYGEN_INVOKED + typedef implementation_defined key_comparator; ///< key comparison functor based on opt::compare and opt::less option setter. +# else + typedef typename opt::details::make_comparator< value_type, traits >::type key_comparator; +# endif + + typedef typename traits::disposer disposer; ///< disposer + typedef typename get_node_traits< value_type, node_type, hook>::type node_traits; ///< node traits + typedef typename lazy_list::get_link_checker< node_type, traits::link_checker >::type link_checker; ///< link checker + + typedef typename traits::back_off back_off; ///< back-off strategy + typedef typename traits::item_counter item_counter; ///< Item counting policy used + typedef typename traits::memory_model memory_model; ///< C++ memory ordering (see \p lazy_list::traits::memory_model) + typedef typename traits::stat stat; ///< Internal statistics + + static_assert((std::is_same< gc, typename node_type::gc >::value), "GC and node_type::gc must be the same type"); + + typedef typename gc::template guarded_ptr< value_type > guarded_ptr; ///< Guarded pointer + + static constexpr const size_t c_nHazardPtrCount = 4; ///< Count of hazard pointer required for the algorithm + + //@cond + // Rebind traits (split-list support) + template + struct rebind_traits { + typedef LazyList< + gc + , value_type + , typename cds::opt::make_options< traits, Options...>::type + > type; + }; + + // Stat selector + template + using select_stat_wrapper = lazy_list::select_stat_wrapper< Stat >; + //@endcond + + protected: + typedef typename node_type::marked_ptr marked_node_ptr; ///< Node marked pointer + typedef node_type * auxiliary_head; ///< Auxiliary head type (for split-list support) + + protected: + //@cond + node_type m_Head; + node_type m_Tail; + + item_counter m_ItemCounter; + stat m_Stat; ///< Internal statistics + + struct clean_disposer { + void operator()( value_type * p ) + { + lazy_list::node_cleaner()( node_traits::to_node_ptr( p )); + disposer()( p ); + } + }; + + /// Position pointer for item search + struct position { + node_type * pPred; ///< Previous node + node_type * pCur; ///< Current node + + typename gc::template GuardArray<2> guards; ///< Guards array + + enum { + guard_prev_item, + guard_current_item + }; + + /// Locks nodes \p pPred and \p pCur + void lock() + { + pPred->m_Lock.lock(); + pCur->m_Lock.lock(); + } + + /// Unlocks nodes \p pPred and \p pCur + void unlock() + { + pCur->m_Lock.unlock(); + pPred->m_Lock.unlock(); + } + }; + + typedef std::unique_lock< position > scoped_position_lock; + //@endcond + + protected: + //@cond + void link_node( node_type * pNode, node_type * pPred, node_type * pCur ) + { + assert( pPred->m_pNext.load(memory_model::memory_order_relaxed).ptr() == pCur ); + link_checker::is_empty( pNode ); + + pNode->m_pNext.store( marked_node_ptr(pCur), memory_model::memory_order_release ); + pPred->m_pNext.store( marked_node_ptr(pNode), memory_model::memory_order_release ); + } + + void unlink_node( node_type * pPred, node_type * pCur, node_type * pHead ) + { + assert( pPred->m_pNext.load(memory_model::memory_order_relaxed).ptr() == pCur ); + + node_type * pNext = pCur->m_pNext.load(memory_model::memory_order_relaxed).ptr(); + pCur->m_pNext.store( marked_node_ptr( pHead, 1 ), memory_model::memory_order_release ); // logical removal + back-link for search + pPred->m_pNext.store( marked_node_ptr( pNext ), memory_model::memory_order_release); // physically deleting + } + + void retire_node( node_type * pNode ) + { + assert( pNode != nullptr ); + gc::template retire( node_traits::to_value_ptr( *pNode )); + } + //@endcond + + protected: + //@cond + template + class iterator_type + { + friend class LazyList; + + protected: + value_type * m_pNode; + typename gc::Guard m_Guard; + + void next() + { + assert( m_pNode != nullptr ); + + if ( m_pNode ) { + typename gc::Guard g; + node_type * pCur = node_traits::to_node_ptr( m_pNode ); + if ( pCur->m_pNext.load( memory_model::memory_order_relaxed ).ptr() != nullptr ) { // if pCur is not tail node + node_type * pNext; + do { + pNext = pCur->m_pNext.load(memory_model::memory_order_relaxed).ptr(); + g.assign( node_traits::to_value_ptr( pNext )); + } while ( pNext != pCur->m_pNext.load(memory_model::memory_order_relaxed).ptr()); + + m_pNode = m_Guard.assign( g.template get()); + } + } + } + + void skip_deleted() + { + if ( m_pNode != nullptr ) { + typename gc::Guard g; + node_type * pNode = node_traits::to_node_ptr( m_pNode ); + + // Dummy tail node could not be marked + while ( pNode->is_marked()) { + node_type * p = pNode->m_pNext.load(memory_model::memory_order_relaxed).ptr(); + g.assign( node_traits::to_value_ptr( p )); + if ( p == pNode->m_pNext.load(memory_model::memory_order_relaxed).ptr()) + pNode = p; + } + if ( pNode != node_traits::to_node_ptr( m_pNode )) + m_pNode = m_Guard.assign( g.template get()); + } + } + + iterator_type( node_type * pNode ) + { + m_pNode = m_Guard.assign( node_traits::to_value_ptr( pNode )); + skip_deleted(); + } + + public: + typedef typename cds::details::make_const_type::pointer value_ptr; + typedef typename cds::details::make_const_type::reference value_ref; + + iterator_type() + : m_pNode( nullptr ) + {} + + iterator_type( iterator_type const& src ) + { + if ( src.m_pNode ) { + m_pNode = m_Guard.assign( src.m_pNode ); + } + else + m_pNode = nullptr; + } + + value_ptr operator ->() const + { + return m_pNode; + } + + value_ref operator *() const + { + assert( m_pNode != nullptr ); + return *m_pNode; + } + + /// Pre-increment + iterator_type& operator ++() + { + next(); + skip_deleted(); + return *this; + } + + iterator_type& operator = (iterator_type const& src) + { + m_pNode = src.m_pNode; + m_Guard.assign( m_pNode ); + return *this; + } + + template + bool operator ==(iterator_type const& i ) const + { + return m_pNode == i.m_pNode; + } + template + bool operator !=(iterator_type const& i ) const + { + return m_pNode != i.m_pNode; + } + }; + //@endcond + + public: + ///@name Forward iterators (only for debugging purpose) + //@{ + /// Forward iterator + /** + The forward iterator for lazy list has some features: + - it has no post-increment operator + - to protect the value, the iterator contains a GC-specific guard + another guard is required locally for increment operator. + For some GC (\p gc::HP), a guard is limited resource per thread, so an exception (or assertion) "no free guard" + may be thrown if a limit of guard count per thread is exceeded. + - The iterator cannot be moved across thread boundary since it contains GC's guard that is thread-private GC data. + - Iterator ensures thread-safety even if you delete the item that iterator points to. However, in case of concurrent + deleting operations it is no guarantee that you iterate all item in the list. + Moreover, a crash is possible when you try to iterate the next element that has been deleted by concurrent thread. + + @warning Use this iterator on the concurrent container for debugging purpose only. + */ + typedef iterator_type iterator; + /// Const forward iterator + /** + For iterator's features and requirements see \ref iterator + */ + typedef iterator_type const_iterator; + + /// Returns a forward iterator addressing the first element in a list + /** + For empty list \code begin() == end() \endcode + */ + iterator begin() + { + iterator it( &m_Head ); + ++it ; // skip dummy head + return it; + } + + /// Returns an iterator that addresses the location succeeding the last element in a list + /** + Do not use the value returned by end function to access any item. + + The returned value can be used only to control reaching the end of the list. + For empty list \code begin() == end() \endcode + */ + iterator end() + { + return iterator( &m_Tail ); + } + + /// Returns a forward const iterator addressing the first element in a list + const_iterator begin() const + { + return get_const_begin(); + } + + /// Returns a forward const iterator addressing the first element in a list + const_iterator cbegin() const + { + return get_const_begin(); + } + + /// Returns an const iterator that addresses the location succeeding the last element in a list + const_iterator end() const + { + return get_const_end(); + } + + /// Returns an const iterator that addresses the location succeeding the last element in a list + const_iterator cend() const + { + return get_const_end(); + } + //@} + + private: + //@cond + const_iterator get_const_begin() const + { + const_iterator it( const_cast( &m_Head )); + ++it ; // skip dummy head + return it; + } + const_iterator get_const_end() const + { + return const_iterator( const_cast(&m_Tail)); + } + //@endcond + + public: + /// Default constructor initializes empty list + LazyList() + { + m_Head.m_pNext.store( marked_node_ptr( &m_Tail ), memory_model::memory_order_relaxed ); + } + + //@cond + template >::value >> + explicit LazyList( Stat& st ) + : m_Stat( st ) + { + m_Head.m_pNext.store( marked_node_ptr( &m_Tail ), memory_model::memory_order_relaxed ); + } + //@endcond + + /// Destroys the list object + ~LazyList() + { + clear(); + assert( m_Head.m_pNext.load( memory_model::memory_order_relaxed ).ptr() == &m_Tail ); + m_Head.m_pNext.store( marked_node_ptr(), memory_model::memory_order_relaxed ); + } + + /// Inserts new node + /** + The function inserts \p val in the list if the list does not contain + an item with key equal to \p val. + + Returns \p true if \p val is linked into the list, \p false otherwise. + */ + bool insert( value_type& val ) + { + return insert_at( &m_Head, val ); + } + + /// Inserts new node + /** + This function is intended for derived non-intrusive containers. + + The function allows to split new item creating into two part: + - create item with key only + - insert new item into the list + - if inserting is success, calls \p f functor to initialize value-field of \p val. + + The functor signature is: + \code + void func( value_type& val ); + \endcode + where \p val is the item inserted. + While the functor \p f is called the item \p val is locked so + the functor has an exclusive access to the item. + The user-defined functor is called only if the inserting is success. + */ + template + bool insert( value_type& val, Func f ) + { + return insert_at( &m_Head, val, f ); + } + + /// Updates the item + /** + The operation performs inserting or changing data with lock-free manner. + + If the item \p val not found in the list, then \p val is inserted into the list + iff \p bAllowInsert is \p true. + Otherwise, the functor \p func is called with item found. + The functor signature is: + \code + struct functor { + void operator()( bool bNew, value_type& item, value_type& val ); + }; + \endcode + with arguments: + - \p bNew - \p true if the item has been inserted, \p false otherwise + - \p item - item of the list + - \p val - argument \p val passed into the \p update() function + If new item has been inserted (i.e. \p bNew is \p true) then \p item and \p val arguments + refer to the same thing. + + The functor may change non-key fields of the \p item. + While the functor \p f is working the item \p item is locked, + so \p func has exclusive access to the item. + + Returns std::pair where \p first is \p true if operation is successful, + \p second is \p true if new item has been added or \p false if the item with \p key + already is in the list. + + The function makes RCU lock internally. + */ + template + std::pair update( value_type& val, Func func, bool bAllowInsert = true ) + { + return update_at( &m_Head, val, func, bAllowInsert ); + } + //@cond + template + CDS_DEPRECATED("ensure() is deprecated, use update()") + std::pair ensure( value_type& val, Func func ) + { + return update( val, func, true ); + } + //@endcond + + /// Unlinks the item \p val from the list + /** + The function searches the item \p val in the list and unlink it from the list + if it is found and it is equal to \p val. + + Difference between \ref erase and \p unlink functions: \p erase finds a key + and deletes the item found. \p unlink finds an item by key and deletes it + only if \p val is an item of that list, i.e. the pointer to item found + is equal to &val . + + The function returns \p true if success and \p false otherwise. + + \p disposer specified in \p Traits is called for unlinked item. + */ + bool unlink( value_type& val ) + { + return unlink_at( &m_Head, val ); + } + + /// Deletes the item from the list + /** \anchor cds_intrusive_LazyList_hp_erase_val + The function searches an item with key equal to \p key in the list, + unlinks it from the list, and returns \p true. + If the item with the key equal to \p key is not found the function return \p false. + + \p disposer specified in \p Traits is called for deleted item. + */ + template + bool erase( Q const& key ) + { + return erase_at( &m_Head, key, key_comparator()); + } + + /// Deletes the item from the list using \p pred predicate for searching + /** + The function is an analog of \ref cds_intrusive_LazyList_hp_erase_val "erase(Q const&)" + but \p pred is used for key comparing. + \p Less functor has the interface like \p std::less. + \p pred must imply the same element order as the comparator used for building the list. + + \p disposer specified in \p Traits is called for deleted item. + */ + template + bool erase_with( Q const& key, Less pred ) + { + CDS_UNUSED( pred ); + return erase_at( &m_Head, key, cds::opt::details::make_comparator_from_less()); + } + + /// Deletes the item from the list + /** \anchor cds_intrusive_LazyList_hp_erase_func + The function searches an item with key equal to \p key in the list, + call \p func functor with item found, unlinks it from the list, and returns \p true. + The \p Func interface is + \code + struct functor { + void operator()( value_type const& item ); + }; + \endcode + + If \p key is not found the function return \p false. + + \p disposer specified in \p Traits is called for deleted item. + */ + template + bool erase( const Q& key, Func func ) + { + return erase_at( &m_Head, key, key_comparator(), func ); + } + + /// Deletes the item from the list using \p pred predicate for searching + /** + The function is an analog of \ref cds_intrusive_LazyList_hp_erase_func "erase(Q const&, Func)" + but \p pred is used for key comparing. + \p Less functor has the interface like \p std::less. + \p pred must imply the same element order as the comparator used for building the list. + + \p disposer specified in \p Traits is called for deleted item. + */ + template + bool erase_with( const Q& key, Less pred, Func func ) + { + CDS_UNUSED( pred ); + return erase_at( &m_Head, key, cds::opt::details::make_comparator_from_less(), func ); + } + + /// Extracts the item from the list with specified \p key + /** \anchor cds_intrusive_LazyList_hp_extract + The function searches an item with key equal to \p key, + unlinks it from the list, and returns it as \p guarded_ptr. + If \p key is not found the function returns an empty guarded pointer. + + Note the compare functor should accept a parameter of type \p Q that can be not the same as \p value_type. + + The \ref disposer specified in \p Traits class template parameter is called automatically + by garbage collector \p GC specified in class' template parameters when returned \p guarded_ptr object + will be destroyed or released. + @note Each \p guarded_ptr object uses the GC's guard that can be limited resource. + + Usage: + \code + typedef cds::intrusive::LazyList< cds::gc::HP, foo, my_traits > ord_list; + ord_list theList; + // ... + { + ord_list::guarded_ptr gp( theList.extract( 5 )); + // Deal with gp + // ... + + // Destructor of gp releases internal HP guard + } + \endcode + */ + template + guarded_ptr extract( Q const& key ) + { + return extract_at( &m_Head, key, key_comparator()); + } + + /// Extracts the item from the list with comparing functor \p pred + /** + The function is an analog of \ref cds_intrusive_LazyList_hp_extract "extract(Q const&)" + but \p pred predicate is used for key comparing. + + \p Less functor has the semantics like \p std::less but should take arguments of type \ref value_type and \p Q + in any order. + \p pred must imply the same element order as the comparator used for building the list. + */ + template + guarded_ptr extract_with( Q const& key, Less pred ) + { + CDS_UNUSED( pred ); + return extract_at( &m_Head, key, cds::opt::details::make_comparator_from_less()); + } + + /// Finds the key \p key + /** \anchor cds_intrusive_LazyList_hp_find + The function searches the item with key equal to \p key and calls the functor \p f for item found. + The interface of \p Func functor is: + \code + struct functor { + void operator()( value_type& item, Q& key ); + }; + \endcode + where \p item is the item found, \p key is the find function argument. + + The functor may change non-key fields of \p item. + While the functor \p f is calling the item \p item is locked. + + The function returns \p true if \p key is found, \p false otherwise. + */ + template + bool find( Q& key, Func f ) + { + return find_at( &m_Head, key, key_comparator(), f ); + } + //@cond + template + bool find( Q const& key, Func f ) + { + return find_at( &m_Head, key, key_comparator(), f ); + } + //@endcond + + /// Finds the key \p key using \p pred predicate for searching + /** + The function is an analog of \ref cds_intrusive_LazyList_hp_find "find(Q&, Func)" + but \p pred is used for key comparing. + \p Less functor has the interface like \p std::less. + \p pred must imply the same element order as the comparator used for building the list. + */ + template + bool find_with( Q& key, Less pred, Func f ) + { + CDS_UNUSED( pred ); + return find_at( &m_Head, key, cds::opt::details::make_comparator_from_less(), f ); + } + //@cond + template + bool find_with( Q const& key, Less pred, Func f ) + { + CDS_UNUSED( pred ); + return find_at( &m_Head, key, cds::opt::details::make_comparator_from_less(), f ); + } + //@endcond + + /// Checks whether the list contains \p key + /** + The function searches the item with key equal to \p key + and returns \p true if it is found, and \p false otherwise. + */ + template + bool contains( Q const& key ) + { + return find_at( &m_Head, key, key_comparator()); + } + //@cond + template + CDS_DEPRECATED("deprecated, use contains()") + bool find( Q const& key ) + { + return contains( key ); + } + //@cond + + /// Checks whether the map contains \p key using \p pred predicate for searching + /** + The function is an analog of contains( key ) but \p pred is used for key comparing. + \p Less functor has the interface like \p std::less. + \p Less must imply the same element order as the comparator used for building the list. + */ + template + bool contains( Q const& key, Less pred ) + { + CDS_UNUSED( pred ); + return find_at( &m_Head, key, cds::opt::details::make_comparator_from_less()); + } + //@cond + template + CDS_DEPRECATED("deprecated, use contains()") + bool find_with( Q const& key, Less pred ) + { + return contains( key, pred ); + } + //@endcond + + /// Finds \p key and return the item found + /** \anchor cds_intrusive_LazyList_hp_get + The function searches the item with key equal to \p key + and returns an guarded pointer to it. + If \p key is not found the function returns an empty guarded pointer. + + The \ref disposer specified in \p Traits class template parameter is called + by garbage collector \p GC automatically when returned \p guarded_ptr object + will be destroyed or released. + @note Each \p guarded_ptr object uses one GC's guard which can be limited resource. + + Usage: + \code + typedef cds::intrusive::LazyList< cds::gc::HP, foo, my_traits > ord_list; + ord_list theList; + // ... + { + ord_list::guarded_ptr gp(theList.get( 5 )); + if ( gp ) { + // Deal with gp + //... + } + // Destructor of guarded_ptr releases internal HP guard + } + \endcode + + Note the compare functor specified for class \p Traits template parameter + should accept a parameter of type \p Q that can be not the same as \p value_type. + */ + template + guarded_ptr get( Q const& key ) + { + return get_at( &m_Head, key, key_comparator()); + } + + /// Finds \p key and return the item found + /** + The function is an analog of \ref cds_intrusive_LazyList_hp_get "get( Q const&)" + but \p pred is used for comparing the keys. + + \p Less functor has the semantics like \p std::less but should take arguments of type \ref value_type and \p Q + in any order. + \p pred must imply the same element order as the comparator used for building the list. + */ + template + guarded_ptr get_with( Q const& key, Less pred ) + { + CDS_UNUSED( pred ); + return get_at( &m_Head, key, cds::opt::details::make_comparator_from_less()); + } + + /// Clears the list + void clear() + { + typename gc::Guard guard; + marked_node_ptr h; + while ( !empty()) { + h = m_Head.m_pNext.load( memory_model::memory_order_relaxed ); + guard.assign( node_traits::to_value_ptr( h.ptr())); + if ( m_Head.m_pNext.load(memory_model::memory_order_acquire) == h ) { + m_Head.m_Lock.lock(); + h->m_Lock.lock(); + + unlink_node( &m_Head, h.ptr(), &m_Head ); + --m_ItemCounter; + + h->m_Lock.unlock(); + m_Head.m_Lock.unlock(); + + retire_node( h.ptr()) ; // free node + } + } + } + + /// Checks if the list is empty + bool empty() const + { + return m_Head.m_pNext.load( memory_model::memory_order_relaxed ).ptr() == &m_Tail; + } + + /// Returns list's item count + /** + The value returned depends on item counter provided by \p Traits. For \p atomicity::empty_item_counter, + this function always returns 0. + + @note Even if you use real item counter and it returns 0, this fact does not mean that the list + is empty. To check list emptiness use \p empty() method. + */ + size_t size() const + { + return m_ItemCounter.value(); + } + + /// Returns const reference to internal statistics + stat const& statistics() const + { + return m_Stat; + } + + protected: + //@cond + // split-list support + bool insert_aux_node( node_type * pNode ) + { + return insert_aux_node( &m_Head, pNode ); + } + + // split-list support + bool insert_aux_node( node_type * pHead, node_type * pNode ) + { + assert( pNode != nullptr ); + + // Hack: convert node_type to value_type. + // In principle, auxiliary node cannot be reducible to value_type + // We assume that internal comparator can correctly distinguish aux and regular node. + return insert_at( pHead, *node_traits::to_value_ptr( pNode )); + } + + bool insert_at( node_type * pHead, value_type& val ) + { + position pos; + key_comparator cmp; + + while ( true ) { + search( pHead, val, pos, key_comparator()); + { + scoped_position_lock alp( pos ); + if ( validate( pos.pPred, pos.pCur )) { + if ( pos.pCur != &m_Tail && cmp( *node_traits::to_value_ptr( *pos.pCur ), val ) == 0 ) { + // failed: key already in list + m_Stat.onInsertFailed(); + return false; + } + else { + link_node( node_traits::to_node_ptr( val ), pos.pPred, pos.pCur ); + break; + } + } + } + + m_Stat.onInsertRetry(); + } + + ++m_ItemCounter; + m_Stat.onInsertSuccess(); + return true; + } + + template + bool insert_at( node_type * pHead, value_type& val, Func f ) + { + position pos; + key_comparator cmp; + + while ( true ) { + search( pHead, val, pos, key_comparator()); + { + scoped_position_lock alp( pos ); + if ( validate( pos.pPred, pos.pCur )) { + if ( pos.pCur != &m_Tail && cmp( *node_traits::to_value_ptr( *pos.pCur ), val ) == 0 ) { + // failed: key already in list + m_Stat.onInsertFailed(); + return false; + } + else { + link_node( node_traits::to_node_ptr( val ), pos.pPred, pos.pCur ); + f( val ); + break; + } + } + } + + m_Stat.onInsertRetry(); + } + + ++m_ItemCounter; + m_Stat.onInsertSuccess(); + return true; + } + + template + std::pair update_at( node_type * pHead, value_type& val, Func func, bool bAllowInsert ) + { + position pos; + key_comparator cmp; + + while ( true ) { + search( pHead, val, pos, key_comparator()); + { + scoped_position_lock alp( pos ); + if ( validate( pos.pPred, pos.pCur )) { + if ( pos.pCur != &m_Tail && cmp( *node_traits::to_value_ptr( *pos.pCur ), val ) == 0 ) { + // key already in the list + + func( false, *node_traits::to_value_ptr( *pos.pCur ) , val ); + m_Stat.onUpdateExisting(); + return std::make_pair( true, false ); + } + else { + // new key + if ( !bAllowInsert ) { + m_Stat.onUpdateFailed(); + return std::make_pair( false, false ); + } + + link_node( node_traits::to_node_ptr( val ), pos.pPred, pos.pCur ); + func( true, val, val ); + break; + } + } + } + + m_Stat.onUpdateRetry(); + } + + ++m_ItemCounter; + m_Stat.onUpdateNew(); + return std::make_pair( true, true ); + } + + bool unlink_at( node_type * pHead, value_type& val ) + { + position pos; + key_comparator cmp; + + while ( true ) { + search( pHead, val, pos, key_comparator()); + { + int nResult = 0; + { + scoped_position_lock alp( pos ); + if ( validate( pos.pPred, pos.pCur )) { + if ( pos.pCur != &m_Tail + && cmp( *node_traits::to_value_ptr( *pos.pCur ), val ) == 0 + && node_traits::to_value_ptr( pos.pCur ) == &val ) + { + // item found + unlink_node( pos.pPred, pos.pCur, pHead ); + nResult = 1; + } + else + nResult = -1; + } + } + + if ( nResult ) { + if ( nResult > 0 ) { + --m_ItemCounter; + retire_node( pos.pCur ); + m_Stat.onEraseSuccess(); + return true; + } + + m_Stat.onEraseFailed(); + return false; + } + } + + m_Stat.onEraseRetry(); + } + } + + template + bool erase_at( node_type * pHead, const Q& val, Compare cmp, Func f, position& pos ) + { + while ( true ) { + search( pHead, val, pos, cmp ); + { + int nResult = 0; + { + scoped_position_lock alp( pos ); + if ( validate( pos.pPred, pos.pCur )) { + if ( pos.pCur != &m_Tail && cmp( *node_traits::to_value_ptr( *pos.pCur ), val ) == 0 ) { + // key found + unlink_node( pos.pPred, pos.pCur, pHead ); + f( *node_traits::to_value_ptr( *pos.pCur )); + nResult = 1; + } + else { + nResult = -1; + } + } + } + if ( nResult ) { + if ( nResult > 0 ) { + --m_ItemCounter; + retire_node( pos.pCur ); + m_Stat.onEraseSuccess(); + return true; + } + + m_Stat.onEraseFailed(); + return false; + } + } + + m_Stat.onEraseRetry(); + } + } + + template + bool erase_at( node_type * pHead, const Q& val, Compare cmp, Func f ) + { + position pos; + return erase_at( pHead, val, cmp, f, pos ); + } + + template + bool erase_at( node_type * pHead, const Q& val, Compare cmp ) + { + position pos; + return erase_at( pHead, val, cmp, [](value_type const &){}, pos ); + } + + template + guarded_ptr extract_at( node_type * pHead, const Q& val, Compare cmp ) + { + position pos; + if ( erase_at( pHead, val, cmp, [](value_type const &){}, pos )) + return guarded_ptr( pos.guards.release( position::guard_current_item )); + return guarded_ptr(); + } + + template + bool find_at( node_type * pHead, Q& val, Compare cmp, Func f ) + { + position pos; + + search( pHead, val, pos, cmp ); + if ( pos.pCur != &m_Tail ) { + std::unique_lock< typename node_type::lock_type> al( pos.pCur->m_Lock ); + if ( !pos.pCur->is_marked() + && cmp( *node_traits::to_value_ptr( *pos.pCur ), val ) == 0 ) + { + f( *node_traits::to_value_ptr( *pos.pCur ), val ); + m_Stat.onFindSuccess(); + return true; + } + } + + m_Stat.onFindFailed(); + return false; + } + + template + bool find_at( node_type * pHead, Q const& val, Compare cmp ) + { + position pos; + + search( pHead, val, pos, cmp ); + if ( pos.pCur != &m_Tail && !pos.pCur->is_marked() && cmp( *node_traits::to_value_ptr( *pos.pCur ), val ) == 0 ) { + m_Stat.onFindSuccess(); + return true; + } + + m_Stat.onFindFailed(); + return false; + } + + template + guarded_ptr get_at( node_type * pHead, Q const& val, Compare cmp ) + { + position pos; + + search( pHead, val, pos, cmp ); + if ( pos.pCur != &m_Tail + && !pos.pCur->is_marked() + && cmp( *node_traits::to_value_ptr( *pos.pCur ), val ) == 0 ) + { + m_Stat.onFindSuccess(); + return guarded_ptr( pos.guards.release( position::guard_current_item )); + } + + m_Stat.onFindFailed(); + return guarded_ptr(); + } + + // split-list support + template + void destroy( Predicate /*pred*/ ) + { + clear(); + } + + //@endcond + + protected: + //@cond + template + void search( node_type * pHead, const Q& key, position& pos, Compare cmp ) + { + node_type const* pTail = &m_Tail; + + marked_node_ptr pCur( pHead ); + marked_node_ptr pPrev( pHead ); + + while ( pCur.ptr() != pTail ) { + if ( pCur.ptr() != pHead ) { + if ( cmp( *node_traits::to_value_ptr( *pCur.ptr()), key ) >= 0 ) + break; + } + + pos.guards.copy( position::guard_prev_item, position::guard_current_item ); + pPrev = pCur; + + pCur = pos.guards.protect( position::guard_current_item, pPrev->m_pNext, + []( marked_node_ptr p ) { return node_traits::to_value_ptr( p.ptr()); } + ); + assert( pCur.ptr() != nullptr ); + if ( pCur.bits()) + pPrev = pCur = pHead; + } + + pos.pCur = pCur.ptr(); + pos.pPred = pPrev.ptr(); + } + + bool validate( node_type * pPred, node_type * pCur ) noexcept + { + if ( validate_link( pPred, pCur )) { + m_Stat.onValidationSuccess(); + return true; + } + + m_Stat.onValidationFailed(); + return false; + } + + static bool validate_link( node_type * pPred, node_type * pCur ) noexcept + { + return !pPred->is_marked() + && !pCur->is_marked() + && pPred->m_pNext.load(memory_model::memory_order_relaxed) == pCur; + } + + //@endcond + }; +}} // namespace cds::intrusive + +#endif // CDSLIB_INTRUSIVE_IMPL_LAZY_LIST_H diff --git a/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/intrusive/impl/michael_list.h b/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/intrusive/impl/michael_list.h new file mode 100644 index 0000000..856daa2 --- /dev/null +++ b/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/intrusive/impl/michael_list.h @@ -0,0 +1,1256 @@ +/* + This file is a part of libcds - Concurrent Data Structures library + + (C) Copyright Maxim Khizhinsky (libcds.dev@gmail.com) 2006-2017 + + Source code repo: http://github.com/khizmax/libcds/ + Download: http://sourceforge.net/projects/libcds/files/ + + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. + + * Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ + +#ifndef CDSLIB_INTRUSIVE_IMPL_MICHAEL_LIST_H +#define CDSLIB_INTRUSIVE_IMPL_MICHAEL_LIST_H + +#include +#include + +namespace cds { namespace intrusive { + + /// Michael's lock-free ordered single-linked list + /** @ingroup cds_intrusive_list + \anchor cds_intrusive_MichaelList_hp + + Usually, ordered single-linked list is used as a building block for the hash table implementation. + The complexity of searching is O(N). + + Source: + - [2002] Maged Michael "High performance dynamic lock-free hash tables and list-based sets" + + Template arguments: + - \p GC - Garbage collector used. Note the \p GC must be the same as the GC used for item type \p T (see \p michael_list::node). + - \p T - type to be stored in the list. The type must be based on \p michael_list::node (for \p michael_list::base_hook) + or it must have a member of type \p michael_list::node (for \p michael_list::member_hook). + - \p Traits - type traits, default is \p michael_list::traits. It is possible to declare option-based + list with \p cds::intrusive::michael_list::make_traits metafunction: + For example, the following traits-based declaration of \p gc::HP Michael's list + \code + #include + // Declare item stored in your list + struct item: public cds::intrusive::michael_list::node< cds::gc::HP > + { + int nKey; + // .... other data + }; + + // Declare comparator for the item + struct my_compare { + int operator()( item const& i1, item const& i2 ) const + { + return i1.nKey - i2.nKey; + } + }; + + // Declare traits + struct my_traits: public cds::intrusive::michael_list::traits + { + typedef cds::intrusive::michael_list::base_hook< cds::opt::gc< cds::gc::HP > > hook; + typedef my_compare compare; + }; + + // Declare traits-based list + typedef cds::intrusive::MichaelList< cds::gc::HP, item, my_traits > traits_based_list; + \endcode + is equivalent for the following option-based list + \code + #include + + // item struct and my_compare are the same + + // Declare option-based list + typedef cds::intrusive::MichaelList< cds::gc::HP, item, + typename cds::intrusive::michael_list::make_traits< + cds::intrusive::opt::hook< cds::intrusive::michael_list::base_hook< cds::opt::gc< cds::gc::HP > > > // hook option + ,cds::intrusive::opt::compare< my_compare > // item comparator option + >::type + > option_based_list; + \endcode + + \par Usage + There are different specializations of this template for each garbage collecting schema. + You should select GC needed and include appropriate .h-file: + - for \p gc::HP: + - for \p gc::DHP: + - for \ref cds_urcu_gc "RCU type" - see \ref cds_intrusive_MichaelList_rcu "RCU-based MichaelList" + - for \p gc::nogc: + See \ref cds_intrusive_MichaelList_nogc "non-GC MichaelList" + + Then, you should incorporate \p michael_list::node into your struct \p T and provide + appropriate \p michael_list::traits::hook in your \p Traits template parameters. Usually, for \p Traits you + define a struct based on \p michael_list::traits. + + Example for \p gc::DHP and base hook: + \code + // Include GC-related Michael's list specialization + #include + + // Data stored in Michael's list + struct my_data: public cds::intrusive::michael_list::node< cds::gc::DHP > + { + // key field + std::string strKey; + + // other data + // ... + }; + + // my_data comparing functor + struct my_data_cmp { + int operator()( const my_data& d1, const my_data& d2 ) + { + return d1.strKey.compare( d2.strKey ); + } + + int operator()( const my_data& d, const std::string& s ) + { + return d.strKey.compare(s); + } + + int operator()( const std::string& s, const my_data& d ) + { + return s.compare( d.strKey ); + } + }; + + + // Declare traits + struct my_traits: public cds::intrusive::michael_list::traits + { + typedef cds::intrusive::michael_list::base_hook< cds::opt::gc< cds::gc::DHP > > hook; + typedef my_data_cmp compare; + }; + + // Declare list type + typedef cds::intrusive::MichaelList< cds::gc::DHP, my_data, my_traits > traits_based_list; + \endcode + + Equivalent option-based code: + \code + // GC-related specialization + #include + + struct my_data { + // see above + }; + struct compare { + // see above + }; + + // Declare option-based list + typedef cds::intrusive::MichaelList< cds::gc::DHP + ,my_data + , typename cds::intrusive::michael_list::make_traits< + cds::intrusive::opt::hook< cds::intrusive::michael_list::base_hook< cds::opt::gc< cds::gc::DHP > > > + ,cds::intrusive::opt::compare< my_data_cmp > + >::type + > option_based_list; + + \endcode + */ + template < + class GC + ,typename T +#ifdef CDS_DOXYGEN_INVOKED + ,class Traits = michael_list::traits +#else + ,class Traits +#endif + > + class MichaelList + { + public: + typedef T value_type; ///< type of value stored in the list + typedef Traits traits; ///< Traits template parameter + + typedef typename traits::hook hook; ///< hook type + typedef typename hook::node_type node_type; ///< node type + +# ifdef CDS_DOXYGEN_INVOKED + typedef implementation_defined key_comparator ; ///< key comparison functor based on opt::compare and opt::less option setter. +# else + typedef typename opt::details::make_comparator< value_type, traits >::type key_comparator; +# endif + + typedef typename traits::disposer disposer; ///< disposer used + typedef typename traits::stat stat; ///< Internal statistics + typedef typename get_node_traits< value_type, node_type, hook>::type node_traits ; ///< node traits + typedef typename michael_list::get_link_checker< node_type, traits::link_checker >::type link_checker; ///< link checker + + typedef GC gc ; ///< Garbage collector + typedef typename traits::back_off back_off; ///< back-off strategy + typedef typename traits::item_counter item_counter; ///< Item counting policy used + typedef typename traits::memory_model memory_model; ///< Memory ordering. See cds::opt::memory_model option + + typedef typename gc::template guarded_ptr< value_type > guarded_ptr; ///< Guarded pointer + + static constexpr const size_t c_nHazardPtrCount = 4; ///< Count of hazard pointer required for the algorithm + + //@cond + // Rebind traits (split-list support) + template + struct rebind_traits { + typedef MichaelList< + gc + , value_type + , typename cds::opt::make_options< traits, Options...>::type + > type; + }; + + // Stat selector + template + using select_stat_wrapper = michael_list::select_stat_wrapper< Stat >; + //@endcond + + protected: + typedef typename node_type::atomic_marked_ptr atomic_node_ptr; ///< Atomic node pointer + typedef typename node_type::marked_ptr marked_node_ptr; ///< Node marked pointer + + typedef atomic_node_ptr auxiliary_head; ///< Auxiliary head type (for split-list support) + + atomic_node_ptr m_pHead; ///< Head pointer + item_counter m_ItemCounter; ///< Item counter + stat m_Stat; ///< Internal statistics + + //@cond + /// Position pointer for item search + struct position { + atomic_node_ptr * pPrev ; ///< Previous node + node_type * pCur ; ///< Current node + node_type * pNext ; ///< Next node + + typename gc::template GuardArray<3> guards ; ///< Guards array + + enum { + guard_prev_item, + guard_current_item, + guard_next_item + }; + }; + + struct clean_disposer { + void operator()( value_type * p ) + { + michael_list::node_cleaner()( node_traits::to_node_ptr( p )); + disposer()( p ); + } + }; + //@endcond + + protected: + //@cond + static void retire_node( node_type * pNode ) + { + assert( pNode != nullptr ); + gc::template retire( node_traits::to_value_ptr( *pNode )); + } + + static bool link_node( node_type * pNode, position& pos ) + { + assert( pNode != nullptr ); + link_checker::is_empty( pNode ); + + marked_node_ptr cur(pos.pCur); + pNode->m_pNext.store( cur, memory_model::memory_order_release ); + if ( cds_likely( pos.pPrev->compare_exchange_strong( cur, marked_node_ptr(pNode), memory_model::memory_order_release, atomics::memory_order_relaxed ))) + return true; + + pNode->m_pNext.store( marked_node_ptr(), memory_model::memory_order_relaxed ); + return false; + } + + static bool unlink_node( position& pos ) + { + assert( pos.pPrev != nullptr ); + assert( pos.pCur != nullptr ); + + // Mark the node (logical deleting) + marked_node_ptr next(pos.pNext, 0); + if ( cds_likely( pos.pCur->m_pNext.compare_exchange_strong( next, marked_node_ptr(pos.pNext, 1), memory_model::memory_order_release, atomics::memory_order_relaxed ))) { + // physical deletion may be performed by search function if it detects that a node is logically deleted (marked) + // CAS may be successful here or in other thread that searching something + marked_node_ptr cur(pos.pCur); + if ( cds_likely( pos.pPrev->compare_exchange_strong( cur, marked_node_ptr( pos.pNext ), memory_model::memory_order_acquire, atomics::memory_order_relaxed ))) + retire_node( pos.pCur ); + return true; + } + return false; + } + //@endcond + + protected: + //@cond + template + class iterator_type + { + friend class MichaelList; + + protected: + value_type * m_pNode; + typename gc::Guard m_Guard; + + void next() + { + if ( m_pNode ) { + typename gc::Guard g; + node_type * pCur = node_traits::to_node_ptr( *m_pNode ); + + marked_node_ptr pNext; + do { + pNext = pCur->m_pNext.load(memory_model::memory_order_relaxed); + g.assign( node_traits::to_value_ptr( pNext.ptr())); + } while ( cds_unlikely( pNext != pCur->m_pNext.load(memory_model::memory_order_acquire))); + + if ( pNext.ptr()) + m_pNode = m_Guard.assign( g.template get()); + else { + m_pNode = nullptr; + m_Guard.clear(); + } + } + } + + iterator_type( atomic_node_ptr const& pNode ) + { + for (;;) { + marked_node_ptr p = pNode.load(memory_model::memory_order_relaxed); + if ( p.ptr()) { + m_pNode = m_Guard.assign( node_traits::to_value_ptr( p.ptr())); + } + else { + m_pNode = nullptr; + m_Guard.clear(); + } + if ( cds_likely( p == pNode.load(memory_model::memory_order_acquire))) + break; + } + } + + public: + typedef typename cds::details::make_const_type::pointer value_ptr; + typedef typename cds::details::make_const_type::reference value_ref; + + iterator_type() + : m_pNode( nullptr ) + {} + + iterator_type( iterator_type const& src ) + { + if ( src.m_pNode ) { + m_pNode = m_Guard.assign( src.m_pNode ); + } + else + m_pNode = nullptr; + } + + value_ptr operator ->() const + { + return m_pNode; + } + + value_ref operator *() const + { + assert( m_pNode != nullptr ); + return *m_pNode; + } + + /// Pre-increment + iterator_type& operator ++() + { + next(); + return *this; + } + + iterator_type& operator = (iterator_type const& src) + { + m_pNode = src.m_pNode; + m_Guard.assign( m_pNode ); + return *this; + } + + /* + /// Post-increment + void operator ++(int) + { + next(); + } + */ + + template + bool operator ==(iterator_type const& i ) const + { + return m_pNode == i.m_pNode; + } + template + bool operator !=(iterator_type const& i ) const + { + return m_pNode != i.m_pNode; + } + }; + //@endcond + + public: + ///@name Forward iterators (only for debugging purpose) + //@{ + /// Forward iterator + /** + The forward iterator for Michael's list has some features: + - it has no post-increment operator + - to protect the value, the iterator contains a GC-specific guard + another guard is required locally for increment operator. + For some GC (like as \p gc::HP), a guard is a limited resource per thread, so an exception (or assertion) "no free guard" + may be thrown if the limit of guard count per thread is exceeded. + - The iterator cannot be moved across thread boundary since it contains thread-private GC's guard. + - Iterator ensures thread-safety even if you delete the item the iterator points to. However, in case of concurrent + deleting operations there is no guarantee that you iterate all item in the list. + Moreover, a crash is possible when you try to iterate the next element that has been deleted by concurrent thread. + + @warning Use this iterator on the concurrent container for debugging purpose only. + + The iterator interface: + \code + class iterator { + public: + // Default constructor + iterator(); + + // Copy construtor + iterator( iterator const& src ); + + // Dereference operator + value_type * operator ->() const; + + // Dereference operator + value_type& operator *() const; + + // Preincrement operator + iterator& operator ++(); + + // Assignment operator + iterator& operator = (iterator const& src); + + // Equality operators + bool operator ==(iterator const& i ) const; + bool operator !=(iterator const& i ) const; + }; + \endcode + */ + typedef iterator_type iterator; + /// Const forward iterator + /** + For iterator's features and requirements see \ref iterator + */ + typedef iterator_type const_iterator; + + /// Returns a forward iterator addressing the first element in a list + /** + For empty list \code begin() == end() \endcode + */ + iterator begin() + { + return iterator( m_pHead ); + } + + /// Returns an iterator that addresses the location succeeding the last element in a list + /** + Do not use the value returned by end function to access any item. + Internally, end returning value equals to \p nullptr. + + The returned value can be used only to control reaching the end of the list. + For empty list begin() == end() + */ + iterator end() + { + return iterator(); + } + + /// Returns a forward const iterator addressing the first element in a list + const_iterator cbegin() const + { + return const_iterator( m_pHead ); + } + + /// Returns a forward const iterator addressing the first element in a list + const_iterator begin() const + { + return const_iterator( m_pHead ); + } + + /// Returns an const iterator that addresses the location succeeding the last element in a list + const_iterator end() const + { + return const_iterator(); + } + + /// Returns an const iterator that addresses the location succeeding the last element in a list + const_iterator cend() const + { + return const_iterator(); + } + //@} + + public: + /// Default constructor initializes empty list + MichaelList() + : m_pHead( nullptr ) + { + static_assert( (std::is_same< gc, typename node_type::gc >::value), "GC and node_type::gc must be the same type" ); + } + + //@cond + template >::value >> + explicit MichaelList( Stat& st ) + : m_pHead( nullptr ) + , m_Stat( st ) + {} + //@endcond + + /// Destroys the list object + ~MichaelList() + { + clear(); + } + + /// Inserts new node + /** + The function inserts \p val into the list if the list does not contain + an item with key equal to \p val. + + Returns \p true if \p val has been linked to the list, \p false otherwise. + */ + bool insert( value_type& val ) + { + return insert_at( m_pHead, val ); + } + + /// Inserts new node + /** + This function is intended for derived non-intrusive containers. + + The function allows to split new item creating into two part: + - create item with key only + - insert new item into the list + - if inserting is success, calls \p f functor to initialize value-field of \p val. + + The functor signature is: + \code + void func( value_type& val ); + \endcode + where \p val is the item inserted. User-defined functor \p f should guarantee that during changing + \p val no any other changes could be made on this list's item by concurrent threads. + The user-defined functor is called only if the inserting is success. + + @warning See \ref cds_intrusive_item_creating "insert item troubleshooting" + */ + template + bool insert( value_type& val, Func f ) + { + return insert_at( m_pHead, val, f ); + } + + /// Updates the node + /** + The operation performs inserting or changing data with lock-free manner. + + If the item \p val is not found in the list, then \p val is inserted + iff \p bInsert is \p true. + Otherwise, the functor \p func is called with item found. + The functor signature is: + \code + void func( bool bNew, value_type& item, value_type& val ); + \endcode + with arguments: + - \p bNew - \p true if the item has been inserted, \p false otherwise + - \p item - item of the list + - \p val - argument \p val passed into the \p update() function + If new item has been inserted (i.e. \p bNew is \p true) then \p item and \p val arguments + refers to the same thing. + + The functor may change non-key fields of the \p item; however, \p func must guarantee + that during changing no any other modifications could be made on this item by concurrent threads. + + Returns std::pair where \p first is \p true if operation is successful, + \p second is \p true if new item has been added or \p false if the item with that key + already in the list. + + @warning See \ref cds_intrusive_item_creating "insert item troubleshooting" + */ + template + std::pair update( value_type& val, Func func, bool bInsert = true ) + { + return update_at( m_pHead, val, func, bInsert ); + } + + //@cond + template + CDS_DEPRECATED("ensure() is deprecated, use update()") + std::pair ensure( value_type& val, Func func ) + { + return update( val, func, true ); + } + //@endcond + + /// Unlinks the item \p val from the list + /** + The function searches the item \p val in the list and unlinks it from the list + if it is found and it is equal to \p val. + + Difference between \p erase() and \p %unlink(): \p %erase() finds a key + and deletes the item found. \p %unlink() finds an item by key and deletes it + only if \p val is an item of the list, i.e. the pointer to item found + is equal to &val . + + \p disposer specified in \p Traits is called for deleted item. + + The function returns \p true if success and \p false otherwise. + */ + bool unlink( value_type& val ) + { + return unlink_at( m_pHead, val ); + } + + /// Deletes the item from the list + /** \anchor cds_intrusive_MichaelList_hp_erase_val + The function searches an item with key equal to \p key in the list, + unlinks it from the list, and returns \p true. + If \p key is not found the function return \p false. + + \p disposer specified in \p Traits is called for deleted item. + */ + template + bool erase( Q const& key ) + { + return erase_at( m_pHead, key, key_comparator()); + } + + /// Deletes the item from the list using \p pred predicate for searching + /** + The function is an analog of \ref cds_intrusive_MichaelList_hp_erase_val "erase(Q const&)" + but \p pred is used for key comparing. + \p Less functor has the interface like \p std::less. + \p pred must imply the same element order as the comparator used for building the list. + + \p disposer specified in \p Traits is called for deleted item. + */ + template + bool erase_with( Q const& key, Less pred ) + { + CDS_UNUSED( pred ); + return erase_at( m_pHead, key, cds::opt::details::make_comparator_from_less()); + } + + /// Deletes the item from the list + /** \anchor cds_intrusive_MichaelList_hp_erase_func + The function searches an item with key equal to \p key in the list, + call \p func functor with item found, unlinks it from the list, and returns \p true. + The \p Func interface is + \code + struct functor { + void operator()( value_type const& item ); + }; + \endcode + If \p key is not found the function return \p false, \p func is not called. + + \p disposer specified in \p Traits is called for deleted item. + */ + template + bool erase( Q const& key, Func func ) + { + return erase_at( m_pHead, key, key_comparator(), func ); + } + + /// Deletes the item from the list using \p pred predicate for searching + /** + The function is an analog of \ref cds_intrusive_MichaelList_hp_erase_func "erase(Q const&, Func)" + but \p pred is used for key comparing. + \p Less functor has the interface like \p std::less. + \p pred must imply the same element order as the comparator used for building the list. + + \p disposer specified in \p Traits is called for deleted item. + */ + template + bool erase_with( Q const& key, Less pred, Func f ) + { + CDS_UNUSED( pred ); + return erase_at( m_pHead, key, cds::opt::details::make_comparator_from_less(), f ); + } + + /// Extracts the item from the list with specified \p key + /** \anchor cds_intrusive_MichaelList_hp_extract + The function searches an item with key equal to \p key, + unlinks it from the list, and returns it as \p guarded_ptr. + If \p key is not found returns an empty guarded pointer. + + Note the compare functor should accept a parameter of type \p Q that can be not the same as \p value_type. + + The \ref disposer specified in \p Traits class template parameter is called automatically + by garbage collector \p GC when returned \ref guarded_ptr object will be destroyed or released. + @note Each \p guarded_ptr object uses the GC's guard that can be limited resource. + + Usage: + \code + typedef cds::intrusive::MichaelList< cds::gc::HP, foo, my_traits > ord_list; + ord_list theList; + // ... + { + ord_list::guarded_ptr gp(theList.extract( 5 )); + if ( gp ) { + // Deal with gp + // ... + } + // Destructor of gp releases internal HP guard + } + \endcode + */ + template + guarded_ptr extract( Q const& key ) + { + return extract_at( m_pHead, key, key_comparator()); + } + + /// Extracts the item using compare functor \p pred + /** + The function is an analog of \ref cds_intrusive_MichaelList_hp_extract "extract(Q const&)" + but \p pred predicate is used for key comparing. + + \p Less functor has the semantics like \p std::less but should take arguments of type \ref value_type and \p Q + in any order. + \p pred must imply the same element order as the comparator used for building the list. + */ + template + guarded_ptr extract_with( Q const& key, Less pred ) + { + CDS_UNUSED( pred ); + return extract_at( m_pHead, key, cds::opt::details::make_comparator_from_less()); + } + + /// Finds \p key in the list + /** \anchor cds_intrusive_MichaelList_hp_find_func + The function searches the item with key equal to \p key and calls the functor \p f for item found. + The interface of \p Func functor is: + \code + struct functor { + void operator()( value_type& item, Q& key ); + }; + \endcode + where \p item is the item found, \p key is the find function argument. + + The functor may change non-key fields of \p item. Note that the function is only guarantee + that \p item cannot be disposed during functor is executing. + The function does not serialize simultaneous access to the \p item. If such access is + possible you must provide your own synchronization schema to keep out unsafe item modifications. + + The \p key argument is non-const since it can be used as \p f functor destination i.e., the functor + may modify both arguments. + + The function returns \p true if \p val is found, \p false otherwise. + */ + template + bool find( Q& key, Func f ) + { + return find_at( m_pHead, key, key_comparator(), f ); + } + //@cond + template + bool find( Q const& key, Func f ) + { + return find_at( m_pHead, key, key_comparator(), f ); + } + //@endcond + + /// Finds the \p key using \p pred predicate for searching + /** + The function is an analog of \ref cds_intrusive_MichaelList_hp_find_func "find(Q&, Func)" + but \p pred is used for key comparing. + \p Less functor has the interface like \p std::less. + \p pred must imply the same element order as the comparator used for building the list. + */ + template + bool find_with( Q& key, Less pred, Func f ) + { + CDS_UNUSED( pred ); + return find_at( m_pHead, key, cds::opt::details::make_comparator_from_less(), f ); + } + //@cond + template + bool find_with( Q const& key, Less pred, Func f ) + { + CDS_UNUSED( pred ); + return find_at( m_pHead, key, cds::opt::details::make_comparator_from_less(), f ); + } + //@endcond + + /// Checks whether the list contains \p key + /** + The function searches the item with key equal to \p key + and returns \p true if it is found, and \p false otherwise. + */ + template + bool contains( Q const& key ) + { + return find_at( m_pHead, key, key_comparator()); + } + //@cond + template + CDS_DEPRECATED("deprecated, use contains()") + bool find( Q const& key ) + { + return contains( key ); + } + //@endcond + + /// Checks whether the list contains \p key using \p pred predicate for searching + /** + The function is an analog of contains( key ) but \p pred is used for key comparing. + \p Less functor has the interface like \p std::less. + \p Less must imply the same element order as the comparator used for building the list. + */ + template + bool contains( Q const& key, Less pred ) + { + CDS_UNUSED( pred ); + return find_at( m_pHead, key, cds::opt::details::make_comparator_from_less()); + } + //@cond + template + CDS_DEPRECATED("deprecated, use contains()") + bool find_with( Q const& key, Less pred ) + { + return contains( key, pred ); + } + //@endcond + + /// Finds the \p key and return the item found + /** \anchor cds_intrusive_MichaelList_hp_get + The function searches the item with key equal to \p key + and returns it as \p guarded_ptr. + If \p key is not found the function returns an empty guarded pointer. + + The \ref disposer specified in \p Traits class template parameter is called + by garbage collector \p GC automatically when returned \ref guarded_ptr object + will be destroyed or released. + @note Each \p guarded_ptr object uses one GC's guard which can be limited resource. + + Usage: + \code + typedef cds::intrusive::MichaelList< cds::gc::HP, foo, my_traits > ord_list; + ord_list theList; + // ... + { + ord_list::guarded_ptr gp(theList.get( 5 )); + if ( gp ) { + // Deal with gp + //... + } + // Destructor of guarded_ptr releases internal HP guard + } + \endcode + + Note the compare functor specified for \p Traits template parameter + should accept a parameter of type \p Q that can be not the same as \p value_type. + */ + template + guarded_ptr get( Q const& key ) + { + return get_at( m_pHead, key, key_comparator()); + } + + /// Finds the \p key and return the item found + /** + The function is an analog of \ref cds_intrusive_MichaelList_hp_get "get( Q const&)" + but \p pred is used for comparing the keys. + + \p Less functor has the semantics like \p std::less but should take arguments of type \ref value_type and \p Q + in any order. + \p pred must imply the same element order as the comparator used for building the list. + */ + template + guarded_ptr get_with( Q const& key, Less pred ) + { + CDS_UNUSED( pred ); + return get_at( m_pHead, key, cds::opt::details::make_comparator_from_less()); + } + + /// Clears the list + /** + The function unlink all items from the list. + */ + void clear() + { + typename gc::Guard guard; + marked_node_ptr head; + while ( true ) { + head = m_pHead.load(memory_model::memory_order_relaxed); + if ( head.ptr()) + guard.assign( node_traits::to_value_ptr( *head.ptr())); + if ( cds_likely( m_pHead.load(memory_model::memory_order_acquire) == head )) { + if ( head.ptr() == nullptr ) + break; + value_type& val = *node_traits::to_value_ptr( *head.ptr()); + unlink( val ); + } + } + } + + /// Checks whether the list is empty + bool empty() const + { + return m_pHead.load( memory_model::memory_order_relaxed ).all() == nullptr; + } + + /// Returns list's item count + /** + The value returned depends on item counter provided by \p Traits. For \p atomicity::empty_item_counter, + this function always returns 0. + + @note Even if you use real item counter and it returns 0, this fact does not mean that the list + is empty. To check list emptiness use \p empty() method. + */ + size_t size() const + { + return m_ItemCounter.value(); + } + + /// Returns const reference to internal statistics + stat const& statistics() const + { + return m_Stat; + } + + protected: + //@cond + // split-list support + bool insert_aux_node( node_type * pNode ) + { + return insert_aux_node( m_pHead, pNode ); + } + + // split-list support + bool insert_aux_node( atomic_node_ptr& refHead, node_type * pNode ) + { + assert( pNode != nullptr ); + + // Hack: convert node_type to value_type. + // In principle, auxiliary node can be non-reducible to value_type + // We assume that comparator can correctly distinguish aux and regular node. + return insert_at( refHead, *node_traits::to_value_ptr( pNode )); + } + + bool insert_at( atomic_node_ptr& refHead, value_type& val ) + { + node_type * pNode = node_traits::to_node_ptr( val ); + position pos; + + while ( true ) { + if ( search( refHead, val, pos, key_comparator())) { + m_Stat.onInsertFailed(); + return false; + } + + if ( link_node( pNode, pos )) { + ++m_ItemCounter; + m_Stat.onInsertSuccess(); + return true; + } + + m_Stat.onInsertRetry(); + } + } + + template + bool insert_at( atomic_node_ptr& refHead, value_type& val, Func f ) + { + node_type * pNode = node_traits::to_node_ptr( val ); + position pos; + + while ( true ) { + if ( search( refHead, val, pos, key_comparator())) { + m_Stat.onInsertFailed(); + return false; + } + + typename gc::Guard guard; + guard.assign( &val ); + if ( link_node( pNode, pos )) { + f( val ); + ++m_ItemCounter; + m_Stat.onInsertSuccess(); + return true; + } + + m_Stat.onInsertRetry(); + } + } + + template + std::pair update_at( atomic_node_ptr& refHead, value_type& val, Func func, bool bInsert ) + { + position pos; + + node_type * pNode = node_traits::to_node_ptr( val ); + while ( true ) { + if ( search( refHead, val, pos, key_comparator())) { + if ( cds_unlikely( pos.pCur->m_pNext.load(memory_model::memory_order_acquire).bits())) { + back_off()(); + m_Stat.onUpdateMarked(); + continue; // the node found is marked as deleted + } + assert( key_comparator()( val, *node_traits::to_value_ptr( *pos.pCur )) == 0 ); + + func( false, *node_traits::to_value_ptr( *pos.pCur ) , val ); + m_Stat.onUpdateExisting(); + return std::make_pair( true, false ); + } + else { + if ( !bInsert ) { + m_Stat.onUpdateFailed(); + return std::make_pair( false, false ); + } + + typename gc::Guard guard; + guard.assign( &val ); + if ( link_node( pNode, pos )) { + ++m_ItemCounter; + func( true, val, val ); + m_Stat.onUpdateNew(); + return std::make_pair( true, true ); + } + } + + m_Stat.onUpdateRetry(); + } + } + + bool unlink_at( atomic_node_ptr& refHead, value_type& val ) + { + position pos; + + back_off bkoff; + while ( search( refHead, val, pos, key_comparator())) { + if ( node_traits::to_value_ptr( *pos.pCur ) == &val ) { + if ( unlink_node( pos )) { + --m_ItemCounter; + m_Stat.onEraseSuccess(); + return true; + } + else + bkoff(); + } + else { + m_Stat.onUpdateFailed(); + break; + } + + m_Stat.onEraseRetry(); + } + + m_Stat.onEraseFailed(); + return false; + } + + template + bool erase_at( atomic_node_ptr& refHead, const Q& val, Compare cmp, Func f, position& pos ) + { + back_off bkoff; + while ( search( refHead, val, pos, cmp )) { + if ( unlink_node( pos )) { + f( *node_traits::to_value_ptr( *pos.pCur )); + --m_ItemCounter; + m_Stat.onEraseSuccess(); + return true; + } + else + bkoff(); + + m_Stat.onEraseRetry(); + } + + m_Stat.onEraseFailed(); + return false; + } + + template + bool erase_at( atomic_node_ptr& refHead, const Q& val, Compare cmp, Func f ) + { + position pos; + return erase_at( refHead, val, cmp, f, pos ); + } + + template + bool erase_at( atomic_node_ptr& refHead, Q const& val, Compare cmp ) + { + position pos; + return erase_at( refHead, val, cmp, [](value_type const&){}, pos ); + } + + template + guarded_ptr extract_at( atomic_node_ptr& refHead, Q const& val, Compare cmp ) + { + position pos; + back_off bkoff; + while ( search( refHead, val, pos, cmp )) { + if ( unlink_node( pos )) { + --m_ItemCounter; + m_Stat.onEraseSuccess(); + return guarded_ptr( pos.guards.release( position::guard_current_item )); + } + else + bkoff(); + m_Stat.onEraseRetry(); + } + + m_Stat.onEraseFailed(); + return guarded_ptr(); + } + + template + bool find_at( atomic_node_ptr& refHead, Q const& val, Compare cmp ) + { + position pos; + if ( search( refHead, val, pos, cmp )) { + m_Stat.onFindSuccess(); + return true; + } + + m_Stat.onFindFailed(); + return false; + } + + template + bool find_at( atomic_node_ptr& refHead, Q& val, Compare cmp, Func f ) + { + position pos; + if ( search( refHead, val, pos, cmp )) { + f( *node_traits::to_value_ptr( *pos.pCur ), val ); + m_Stat.onFindSuccess(); + return true; + } + + m_Stat.onFindFailed(); + return false; + } + + template + guarded_ptr get_at( atomic_node_ptr& refHead, Q const& val, Compare cmp ) + { + position pos; + if ( search( refHead, val, pos, cmp )) { + m_Stat.onFindSuccess(); + return guarded_ptr( pos.guards.release( position::guard_current_item )); + } + + m_Stat.onFindFailed(); + return guarded_ptr(); + } + + // split-list support + template + void destroy( Predicate /*pred*/ ) + { + clear(); + } + + //@endcond + + protected: + + //@cond + template + bool search( atomic_node_ptr& refHead, const Q& val, position& pos, Compare cmp ) + { + atomic_node_ptr * pPrev; + marked_node_ptr pNext; + marked_node_ptr pCur; + + back_off bkoff; + + try_again: + pPrev = &refHead; + pNext = nullptr; + + pCur = pos.guards.protect( position::guard_current_item, *pPrev, + [](marked_node_ptr p) -> value_type * + { + return node_traits::to_value_ptr( p.ptr()); + }); + + while ( true ) { + if ( pCur.ptr() == nullptr ) { + pos.pPrev = pPrev; + pos.pCur = nullptr; + pos.pNext = nullptr; + return false; + } + + pNext = pos.guards.protect( position::guard_next_item, pCur->m_pNext, + [](marked_node_ptr p ) -> value_type * + { + return node_traits::to_value_ptr( p.ptr()); + }); + if ( cds_unlikely( pPrev->load(memory_model::memory_order_acquire).all() != pCur.ptr())) { + bkoff(); + goto try_again; + } + + // pNext contains deletion mark for pCur + if ( pNext.bits() == 1 ) { + // pCur marked i.e. logically deleted. Help the erase/unlink function to unlink pCur node + marked_node_ptr cur( pCur.ptr()); + if ( cds_unlikely( pPrev->compare_exchange_strong( cur, marked_node_ptr( pNext.ptr()), memory_model::memory_order_acquire, atomics::memory_order_relaxed ))) { + retire_node( pCur.ptr()); + m_Stat.onHelpingSuccess(); + } + else { + bkoff(); + m_Stat.onHelpingFailed(); + goto try_again; + } + } + else { + assert( pCur.ptr() != nullptr ); + int nCmp = cmp( *node_traits::to_value_ptr( pCur.ptr()), val ); + if ( nCmp >= 0 ) { + pos.pPrev = pPrev; + pos.pCur = pCur.ptr(); + pos.pNext = pNext.ptr(); + return nCmp == 0; + } + pPrev = &( pCur->m_pNext ); + pos.guards.copy( position::guard_prev_item, position::guard_current_item ); + } + pCur = pNext; + pos.guards.copy( position::guard_current_item, position::guard_next_item ); + } + } + //@endcond + }; +}} // namespace cds::intrusive + +#endif // #ifndef CDSLIB_INTRUSIVE_IMPL_MICHAEL_LIST_H diff --git a/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/intrusive/impl/skip_list.h b/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/intrusive/impl/skip_list.h new file mode 100644 index 0000000..2264d9e --- /dev/null +++ b/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/intrusive/impl/skip_list.h @@ -0,0 +1,1791 @@ +/* + This file is a part of libcds - Concurrent Data Structures library + + (C) Copyright Maxim Khizhinsky (libcds.dev@gmail.com) 2006-2017 + + Source code repo: http://github.com/khizmax/libcds/ + Download: http://sourceforge.net/projects/libcds/files/ + + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. + + * Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ + +#ifndef CDSLIB_INTRUSIVE_IMPL_SKIP_LIST_H +#define CDSLIB_INTRUSIVE_IMPL_SKIP_LIST_H + +#include +#include +#include // ref +#include +#include +#include + +namespace cds { namespace intrusive { + + //@cond + namespace skip_list { namespace details { + + template + class iterator { + public: + typedef GC gc; + typedef NodeTraits node_traits; + typedef BackOff back_off; + typedef typename node_traits::node_type node_type; + typedef typename node_traits::value_type value_type; + static constexpr bool const c_isConst = IsConst; + + typedef typename std::conditional< c_isConst, value_type const&, value_type&>::type value_ref; + + protected: + typedef typename node_type::marked_ptr marked_ptr; + typedef typename node_type::atomic_marked_ptr atomic_marked_ptr; + + typename gc::Guard m_guard; + node_type * m_pNode; + + protected: + static value_type * gc_protect( marked_ptr p ) + { + return node_traits::to_value_ptr( p.ptr()); + } + + void next() + { + typename gc::Guard g; + g.copy( m_guard ); + back_off bkoff; + + for (;;) { + if ( m_pNode->next( m_pNode->height() - 1 ).load( atomics::memory_order_acquire ).bits()) { + // Current node is marked as deleted. So, its next pointer can point to anything + // In this case we interrupt our iteration and returns end() iterator. + *this = iterator(); + return; + } + + marked_ptr p = m_guard.protect( (*m_pNode)[0], gc_protect ); + node_type * pp = p.ptr(); + if ( p.bits()) { + // p is marked as deleted. Spin waiting for physical removal + bkoff(); + continue; + } + else if ( pp && pp->next( pp->height() - 1 ).load( atomics::memory_order_relaxed ).bits()) { + // p is marked as deleted. Spin waiting for physical removal + bkoff(); + continue; + } + + m_pNode = pp; + break; + } + } + + public: // for internal use only!!! + iterator( node_type& refHead ) + : m_pNode( nullptr ) + { + back_off bkoff; + + for (;;) { + marked_ptr p = m_guard.protect( refHead[0], gc_protect ); + if ( !p.ptr()) { + // empty skip-list + m_guard.clear(); + break; + } + + node_type * pp = p.ptr(); + // Logically deleted node is marked from highest level + if ( !pp->next( pp->height() - 1 ).load( atomics::memory_order_acquire ).bits()) { + m_pNode = pp; + break; + } + + bkoff(); + } + } + + public: + iterator() + : m_pNode( nullptr ) + {} + + iterator( iterator const& s) + : m_pNode( s.m_pNode ) + { + m_guard.assign( node_traits::to_value_ptr(m_pNode)); + } + + value_type * operator ->() const + { + assert( m_pNode != nullptr ); + assert( node_traits::to_value_ptr( m_pNode ) != nullptr ); + + return node_traits::to_value_ptr( m_pNode ); + } + + value_ref operator *() const + { + assert( m_pNode != nullptr ); + assert( node_traits::to_value_ptr( m_pNode ) != nullptr ); + + return *node_traits::to_value_ptr( m_pNode ); + } + + /// Pre-increment + iterator& operator ++() + { + next(); + return *this; + } + + iterator& operator =(const iterator& src) + { + m_pNode = src.m_pNode; + m_guard.copy( src.m_guard ); + return *this; + } + + template + bool operator ==(iterator const& i ) const + { + return m_pNode == i.m_pNode; + } + template + bool operator !=(iterator const& i ) const + { + return !( *this == i ); + } + }; + }} // namespace skip_list::details + //@endcond + + /// Lock-free skip-list set + /** @ingroup cds_intrusive_map + @anchor cds_intrusive_SkipListSet_hp + + The implementation of well-known probabilistic data structure called skip-list + invented by W.Pugh in his papers: + - [1989] W.Pugh Skip Lists: A Probabilistic Alternative to Balanced Trees + - [1990] W.Pugh A Skip List Cookbook + + A skip-list is a probabilistic data structure that provides expected logarithmic + time search without the need of rebalance. The skip-list is a collection of sorted + linked list. Nodes are ordered by key. Each node is linked into a subset of the lists. + Each list has a level, ranging from 0 to 32. The bottom-level list contains + all the nodes, and each higher-level list is a sublist of the lower-level lists. + Each node is created with a random top level (with a random height), and belongs + to all lists up to that level. The probability that a node has the height 1 is 1/2. + The probability that a node has the height N is 1/2 ** N (more precisely, + the distribution depends on an random generator provided, but our generators + have this property). + + The lock-free variant of skip-list is implemented according to book + - [2008] M.Herlihy, N.Shavit "The Art of Multiprocessor Programming", + chapter 14.4 "A Lock-Free Concurrent Skiplist". + + Template arguments: + - \p GC - Garbage collector used. Note the \p GC must be the same as the GC used for item type \p T, see \p skip_list::node. + - \p T - type to be stored in the list. The type must be based on \p skip_list::node (for \p skip_list::base_hook) + or it must have a member of type \p skip_list::node (for \p skip_list::member_hook). + - \p Traits - skip-list traits, default is \p skip_list::traits. + It is possible to declare option-based list with \p cds::intrusive::skip_list::make_traits metafunction istead of \p Traits + template argument. + + @warning The skip-list requires up to 67 hazard pointers that may be critical for some GCs for which + the guard count is limited (like as \p gc::HP). Those GCs should be explicitly initialized with + hazard pointer enough: \code cds::gc::HP myhp( 67 ) \endcode. Otherwise an run-time exception may be raised + when you try to create skip-list object. + + There are several specializations of \p %SkipListSet for each \p GC. You should include: + - for \p gc::HP garbage collector + - for \p gc::DHP garbage collector + - for \ref cds_intrusive_SkipListSet_nogc for append-only set + - for \ref cds_intrusive_SkipListSet_rcu "RCU type" + + Iterators + + The class supports a forward iterator (\ref iterator and \ref const_iterator). + The iteration is ordered. + The iterator object is thread-safe: the element pointed by the iterator object is guarded, + so, the element cannot be reclaimed while the iterator object is alive. + However, passing an iterator object between threads is dangerous. + + @warning Due to concurrent nature of skip-list set it is not guarantee that you can iterate + all elements in the set: any concurrent deletion can exclude the element + pointed by the iterator from the set, and your iteration can be terminated + before end of the set. Therefore, such iteration is more suitable for debugging purpose only + + Remember, each iterator object requires 2 additional hazard pointers, that may be + a limited resource for \p GC like as \p gc::HP (for \p gc::DHP the count of + guards is unlimited). + + The iterator class supports the following minimalistic interface: + \code + struct iterator { + // Default ctor + iterator(); + + // Copy ctor + iterator( iterator const& s); + + value_type * operator ->() const; + value_type& operator *() const; + + // Pre-increment + iterator& operator ++(); + + // Copy assignment + iterator& operator = (const iterator& src); + + bool operator ==(iterator const& i ) const; + bool operator !=(iterator const& i ) const; + }; + \endcode + Note, the iterator object returned by \p end(), \p cend() member functions points to \p nullptr and should not be dereferenced. + + How to use + + You should incorporate \p skip_list::node into your struct \p T and provide + appropriate \p skip_list::traits::hook in your \p Traits template parameters. Usually, for \p Traits you + define a struct based on \p skip_list::traits. + + Example for \p gc::HP and base hook: + \code + // Include GC-related skip-list specialization + #include + + // Data stored in skip list + struct my_data: public cds::intrusive::skip_list::node< cds::gc::HP > + { + // key field + std::string strKey; + + // other data + // ... + }; + + // my_data compare functor + struct my_data_cmp { + int operator()( const my_data& d1, const my_data& d2 ) + { + return d1.strKey.compare( d2.strKey ); + } + + int operator()( const my_data& d, const std::string& s ) + { + return d.strKey.compare(s); + } + + int operator()( const std::string& s, const my_data& d ) + { + return s.compare( d.strKey ); + } + }; + + + // Declare your traits + struct my_traits: public cds::intrusive::skip_list::traits + { + typedef cds::intrusive::skip_list::base_hook< cds::opt::gc< cds::gc::HP > > hook; + typedef my_data_cmp compare; + }; + + // Declare skip-list set type + typedef cds::intrusive::SkipListSet< cds::gc::HP, my_data, my_traits > traits_based_set; + \endcode + + Equivalent option-based code: + \code + // GC-related specialization + #include + + struct my_data { + // see above + }; + struct compare { + // see above + }; + + // Declare option-based skip-list set + typedef cds::intrusive::SkipListSet< cds::gc::HP + ,my_data + , typename cds::intrusive::skip_list::make_traits< + cds::intrusive::opt::hook< cds::intrusive::skip_list::base_hook< cds::opt::gc< cds::gc::HP > > > + ,cds::intrusive::opt::compare< my_data_cmp > + >::type + > option_based_set; + + \endcode + */ + template < + class GC + ,typename T +#ifdef CDS_DOXYGEN_INVOKED + ,typename Traits = skip_list::traits +#else + ,typename Traits +#endif + > + class SkipListSet + { + public: + typedef GC gc; ///< Garbage collector + typedef T value_type; ///< type of value stored in the skip-list + typedef Traits traits; ///< Traits template parameter + + typedef typename traits::hook hook; ///< hook type + typedef typename hook::node_type node_type; ///< node type + +# ifdef CDS_DOXYGEN_INVOKED + typedef implementation_defined key_comparator ; ///< key comparison functor based on opt::compare and opt::less option setter. +# else + typedef typename opt::details::make_comparator< value_type, traits >::type key_comparator; +# endif + + typedef typename traits::disposer disposer; ///< item disposer + typedef typename get_node_traits< value_type, node_type, hook>::type node_traits; ///< node traits + + typedef typename traits::item_counter item_counter; ///< Item counting policy + typedef typename traits::memory_model memory_model; ///< Memory ordering, see \p cds::opt::memory_model option + typedef typename traits::random_level_generator random_level_generator; ///< random level generator + typedef typename traits::allocator allocator_type; ///< allocator for maintaining array of next pointers of the node + typedef typename traits::back_off back_off; ///< Back-off strategy + typedef typename traits::stat stat; ///< internal statistics type + + public: + typedef typename gc::template guarded_ptr< value_type > guarded_ptr; ///< Guarded pointer + + /// Max node height. The actual node height should be in range [0 .. c_nMaxHeight) + /** + The max height is specified by \ref skip_list::random_level_generator "random level generator" constant \p m_nUpperBound + but it should be no more than 32 (\p skip_list::c_nHeightLimit). + */ + static unsigned int const c_nMaxHeight = std::conditional< + (random_level_generator::c_nUpperBound <= skip_list::c_nHeightLimit), + std::integral_constant< unsigned int, random_level_generator::c_nUpperBound >, + std::integral_constant< unsigned int, skip_list::c_nHeightLimit > + >::type::value; + + //@cond + static unsigned int const c_nMinHeight = 5; + //@endcond + + // c_nMaxHeight * 2 - pPred/pSucc guards + // + 1 - for erase, unlink + // + 1 - for clear + // + 1 - for help_remove() + static size_t const c_nHazardPtrCount = c_nMaxHeight * 2 + 3; ///< Count of hazard pointer required for the skip-list + + protected: + typedef typename node_type::atomic_marked_ptr atomic_node_ptr; ///< Atomic marked node pointer + typedef typename node_type::marked_ptr marked_node_ptr; ///< Node marked pointer + + protected: + //@cond + typedef skip_list::details::intrusive_node_builder< node_type, atomic_node_ptr, allocator_type > intrusive_node_builder; + + typedef typename std::conditional< + std::is_same< typename traits::internal_node_builder, cds::opt::none >::value + ,intrusive_node_builder + ,typename traits::internal_node_builder + >::type node_builder; + + typedef std::unique_ptr< node_type, typename node_builder::node_disposer > scoped_node_ptr; + + struct position { + node_type * pPrev[ c_nMaxHeight ]; + node_type * pSucc[ c_nMaxHeight ]; + + typename gc::template GuardArray< c_nMaxHeight * 2 > guards; ///< Guards array for pPrev/pSucc + node_type * pCur; // guarded by one of guards + }; + //@endcond + + public: + /// Default constructor + /** + The constructor checks whether the count of guards is enough + for skip-list and may raise an exception if not. + */ + SkipListSet() + : m_Head( c_nMaxHeight ) + , m_nHeight( c_nMinHeight ) + { + static_assert( (std::is_same< gc, typename node_type::gc >::value), "GC and node_type::gc must be the same type" ); + + gc::check_available_guards( c_nHazardPtrCount ); + + // Barrier for head node + atomics::atomic_thread_fence( memory_model::memory_order_release ); + } + + /// Clears and destructs the skip-list + ~SkipListSet() + { + destroy(); + } + + public: + ///@name Forward iterators (only for debugging purpose) + //@{ + /// Iterator type + /** + The forward iterator has some features: + - it has no post-increment operator + - to protect the value, the iterator contains a GC-specific guard + another guard is required locally for increment operator. + For some GC (like as \p gc::HP), a guard is a limited resource per thread, so an exception (or assertion) "no free guard" + may be thrown if the limit of guard count per thread is exceeded. + - The iterator cannot be moved across thread boundary because it contains thread-private GC's guard. + - Iterator ensures thread-safety even if you delete the item the iterator points to. However, in case of concurrent + deleting operations there is no guarantee that you iterate all item in the list. + Moreover, a crash is possible when you try to iterate the next element that has been deleted by concurrent thread. + + @warning Use this iterator on the concurrent container for debugging purpose only. + + The iterator interface: + \code + class iterator { + public: + // Default constructor + iterator(); + + // Copy construtor + iterator( iterator const& src ); + + // Dereference operator + value_type * operator ->() const; + + // Dereference operator + value_type& operator *() const; + + // Preincrement operator + iterator& operator ++(); + + // Assignment operator + iterator& operator = (iterator const& src); + + // Equality operators + bool operator ==(iterator const& i ) const; + bool operator !=(iterator const& i ) const; + }; + \endcode + */ + typedef skip_list::details::iterator< gc, node_traits, back_off, false > iterator; + + /// Const iterator type + typedef skip_list::details::iterator< gc, node_traits, back_off, true > const_iterator; + + /// Returns a forward iterator addressing the first element in a set + iterator begin() + { + return iterator( *m_Head.head()); + } + + /// Returns a forward const iterator addressing the first element in a set + const_iterator begin() const + { + return const_iterator( *m_Head.head()); + } + /// Returns a forward const iterator addressing the first element in a set + const_iterator cbegin() const + { + return const_iterator( *m_Head.head()); + } + + /// Returns a forward iterator that addresses the location succeeding the last element in a set. + iterator end() + { + return iterator(); + } + + /// Returns a forward const iterator that addresses the location succeeding the last element in a set. + const_iterator end() const + { + return const_iterator(); + } + /// Returns a forward const iterator that addresses the location succeeding the last element in a set. + const_iterator cend() const + { + return const_iterator(); + } + //@} + + public: + /// Inserts new node + /** + The function inserts \p val in the set if it does not contain + an item with key equal to \p val. + + Returns \p true if \p val is placed into the set, \p false otherwise. + */ + bool insert( value_type& val ) + { + return insert( val, []( value_type& ) {} ); + } + + /// Inserts new node + /** + This function is intended for derived non-intrusive containers. + + The function allows to split creating of new item into two part: + - create item with key only + - insert new item into the set + - if inserting is success, calls \p f functor to initialize value-field of \p val. + + The functor signature is: + \code + void func( value_type& val ); + \endcode + where \p val is the item inserted. User-defined functor \p f should guarantee that during changing + \p val no any other changes could be made on this set's item by concurrent threads. + The user-defined functor is called only if the inserting is success. + */ + template + bool insert( value_type& val, Func f ) + { + typename gc::Guard gNew; + gNew.assign( &val ); + + node_type * pNode = node_traits::to_node_ptr( val ); + scoped_node_ptr scp( pNode ); + unsigned int nHeight = pNode->height(); + bool bTowerOk = pNode->has_tower(); // nHeight > 1 && pNode->get_tower() != nullptr; + bool bTowerMade = false; + + position pos; + while ( true ) + { + if ( find_position( val, pos, key_comparator(), true )) { + // scoped_node_ptr deletes the node tower if we create it + if ( !bTowerMade ) + scp.release(); + + m_Stat.onInsertFailed(); + return false; + } + + if ( !bTowerOk ) { + build_node( pNode ); + nHeight = pNode->height(); + bTowerMade = pNode->has_tower(); + bTowerOk = true; + } + + if ( !insert_at_position( val, pNode, pos, f )) { + m_Stat.onInsertRetry(); + continue; + } + + increase_height( nHeight ); + ++m_ItemCounter; + m_Stat.onAddNode( nHeight ); + m_Stat.onInsertSuccess(); + scp.release(); + return true; + } + } + + /// Updates the node + /** + The operation performs inserting or changing data with lock-free manner. + + If the item \p val is not found in the set, then \p val is inserted into the set + iff \p bInsert is \p true. + Otherwise, the functor \p func is called with item found. + The functor \p func signature is: + \code + void func( bool bNew, value_type& item, value_type& val ); + \endcode + with arguments: + - \p bNew - \p true if the item has been inserted, \p false otherwise + - \p item - item of the set + - \p val - argument \p val passed into the \p %update() function + If new item has been inserted (i.e. \p bNew is \p true) then \p item and \p val arguments + refer to the same thing. + + Returns std::pair where \p first is \p true if operation is successful, + i.e. the node has been inserted or updated, + \p second is \p true if new item has been added or \p false if the item with \p key + already exists. + + @warning See \ref cds_intrusive_item_creating "insert item troubleshooting" + */ + template + std::pair update( value_type& val, Func func, bool bInsert = true ) + { + typename gc::Guard gNew; + gNew.assign( &val ); + + node_type * pNode = node_traits::to_node_ptr( val ); + scoped_node_ptr scp( pNode ); + unsigned int nHeight = pNode->height(); + bool bTowerOk = pNode->has_tower(); + bool bTowerMade = false; + + position pos; + while ( true ) + { + bool bFound = find_position( val, pos, key_comparator(), true ); + if ( bFound ) { + // scoped_node_ptr deletes the node tower if we create it before + if ( !bTowerMade ) + scp.release(); + + func( false, *node_traits::to_value_ptr(pos.pCur), val ); + m_Stat.onUpdateExist(); + return std::make_pair( true, false ); + } + + if ( !bInsert ) { + scp.release(); + return std::make_pair( false, false ); + } + + if ( !bTowerOk ) { + build_node( pNode ); + nHeight = pNode->height(); + bTowerMade = pNode->has_tower(); + bTowerOk = true; + } + + if ( !insert_at_position( val, pNode, pos, [&func]( value_type& item ) { func( true, item, item ); })) { + m_Stat.onInsertRetry(); + continue; + } + + increase_height( nHeight ); + ++m_ItemCounter; + scp.release(); + m_Stat.onAddNode( nHeight ); + m_Stat.onUpdateNew(); + return std::make_pair( true, true ); + } + } + //@cond + template + CDS_DEPRECATED("ensure() is deprecated, use update()") + std::pair ensure( value_type& val, Func func ) + { + return update( val, func, true ); + } + //@endcond + + /// Unlinks the item \p val from the set + /** + The function searches the item \p val in the set and unlink it from the set + if it is found and is equal to \p val. + + Difference between \p erase() and \p %unlink() functions: \p %erase() finds a key + and deletes the item found. \p %unlink() finds an item by key and deletes it + only if \p val is an item of that set, i.e. the pointer to item found + is equal to &val . + + The \p disposer specified in \p Traits class template parameter is called + by garbage collector \p GC asynchronously. + + The function returns \p true if success and \p false otherwise. + */ + bool unlink( value_type& val ) + { + position pos; + + if ( !find_position( val, pos, key_comparator(), false )) { + m_Stat.onUnlinkFailed(); + return false; + } + + node_type * pDel = pos.pCur; + assert( key_comparator()( *node_traits::to_value_ptr( pDel ), val ) == 0 ); + + unsigned int nHeight = pDel->height(); + typename gc::Guard gDel; + gDel.assign( node_traits::to_value_ptr(pDel)); + + if ( node_traits::to_value_ptr( pDel ) == &val && try_remove_at( pDel, pos, [](value_type const&) {} )) { + --m_ItemCounter; + m_Stat.onRemoveNode( nHeight ); + m_Stat.onUnlinkSuccess(); + return true; + } + + m_Stat.onUnlinkFailed(); + return false; + } + + /// Extracts the item from the set with specified \p key + /** \anchor cds_intrusive_SkipListSet_hp_extract + The function searches an item with key equal to \p key in the set, + unlinks it from the set, and returns it as \p guarded_ptr object. + If \p key is not found the function returns an empty guarded pointer. + + Note the compare functor should accept a parameter of type \p Q that can be not the same as \p value_type. + + The \p disposer specified in \p Traits class template parameter is called automatically + by garbage collector \p GC specified in class' template parameters when returned \p guarded_ptr object + will be destroyed or released. + @note Each \p guarded_ptr object uses the GC's guard that can be limited resource. + + Usage: + \code + typedef cds::intrusive::SkipListSet< cds::gc::HP, foo, my_traits > skip_list; + skip_list theList; + // ... + { + skip_list::guarded_ptr gp(theList.extract( 5 )); + if ( gp ) { + // Deal with gp + // ... + } + // Destructor of gp releases internal HP guard + } + \endcode + */ + template + guarded_ptr extract( Q const& key ) + { + return extract_( key, key_comparator()); + } + + /// Extracts the item from the set with comparing functor \p pred + /** + The function is an analog of \ref cds_intrusive_SkipListSet_hp_extract "extract(Q const&)" + but \p pred predicate is used for key comparing. + + \p Less functor has the semantics like \p std::less but should take arguments of type \ref value_type and \p Q + in any order. + \p pred must imply the same element order as the comparator used for building the set. + */ + template + guarded_ptr extract_with( Q const& key, Less pred ) + { + CDS_UNUSED( pred ); + return extract_( key, cds::opt::details::make_comparator_from_less()); + } + + /// Extracts an item with minimal key from the list + /** + The function searches an item with minimal key, unlinks it, and returns it as \p guarded_ptr object. + If the skip-list is empty the function returns an empty guarded pointer. + + @note Due the concurrent nature of the list, the function extracts nearly minimum key. + It means that the function gets leftmost item and tries to unlink it. + During unlinking, a concurrent thread may insert an item with key less than leftmost item's key. + So, the function returns the item with minimum key at the moment of list traversing. + + The \p disposer specified in \p Traits class template parameter is called + by garbage collector \p GC automatically when returned \p guarded_ptr object + will be destroyed or released. + @note Each \p guarded_ptr object uses the GC's guard that can be limited resource. + + Usage: + \code + typedef cds::intrusive::SkipListSet< cds::gc::HP, foo, my_traits > skip_list; + skip_list theList; + // ... + { + skip_list::guarded_ptr gp(theList.extract_min()); + if ( gp ) { + // Deal with gp + //... + } + // Destructor of gp releases internal HP guard + } + \endcode + */ + guarded_ptr extract_min() + { + return extract_min_(); + } + + /// Extracts an item with maximal key from the list + /** + The function searches an item with maximal key, unlinks it, and returns the pointer to item + as \p guarded_ptr object. + If the skip-list is empty the function returns an empty \p guarded_ptr. + + @note Due the concurrent nature of the list, the function extracts nearly maximal key. + It means that the function gets rightmost item and tries to unlink it. + During unlinking, a concurrent thread may insert an item with key greater than rightmost item's key. + So, the function returns the item with maximum key at the moment of list traversing. + + The \p disposer specified in \p Traits class template parameter is called + by garbage collector \p GC asynchronously when returned \ref guarded_ptr object + will be destroyed or released. + @note Each \p guarded_ptr object uses the GC's guard that can be limited resource. + + Usage: + \code + typedef cds::intrusive::SkipListSet< cds::gc::HP, foo, my_traits > skip_list; + skip_list theList; + // ... + { + skip_list::guarded_ptr gp( theList.extract_max( gp )); + if ( gp ) { + // Deal with gp + //... + } + // Destructor of gp releases internal HP guard + } + \endcode + */ + guarded_ptr extract_max() + { + return extract_max_(); + } + + /// Deletes the item from the set + /** \anchor cds_intrusive_SkipListSet_hp_erase + The function searches an item with key equal to \p key in the set, + unlinks it from the set, and returns \p true. + If the item with key equal to \p key is not found the function return \p false. + + Note the compare functor should accept a parameter of type \p Q that can be not the same as \p value_type. + */ + template + bool erase( Q const& key ) + { + return erase_( key, key_comparator(), [](value_type const&) {} ); + } + + /// Deletes the item from the set with comparing functor \p pred + /** + The function is an analog of \ref cds_intrusive_SkipListSet_hp_erase "erase(Q const&)" + but \p pred predicate is used for key comparing. + + \p Less functor has the semantics like \p std::less but should take arguments of type \ref value_type and \p Q + in any order. + \p pred must imply the same element order as the comparator used for building the set. + */ + template + bool erase_with( Q const& key, Less pred ) + { + CDS_UNUSED( pred ); + return erase_( key, cds::opt::details::make_comparator_from_less(), [](value_type const&) {} ); + } + + /// Deletes the item from the set + /** \anchor cds_intrusive_SkipListSet_hp_erase_func + The function searches an item with key equal to \p key in the set, + call \p f functor with item found, unlinks it from the set, and returns \p true. + The \ref disposer specified in \p Traits class template parameter is called + by garbage collector \p GC asynchronously. + + The \p Func interface is + \code + struct functor { + void operator()( value_type const& item ); + }; + \endcode + + If the item with key equal to \p key is not found the function return \p false. + + Note the compare functor should accept a parameter of type \p Q that can be not the same as \p value_type. + */ + template + bool erase( Q const& key, Func f ) + { + return erase_( key, key_comparator(), f ); + } + + /// Deletes the item from the set with comparing functor \p pred + /** + The function is an analog of \ref cds_intrusive_SkipListSet_hp_erase_func "erase(Q const&, Func)" + but \p pred predicate is used for key comparing. + + \p Less functor has the semantics like \p std::less but should take arguments of type \ref value_type and \p Q + in any order. + \p pred must imply the same element order as the comparator used for building the set. + */ + template + bool erase_with( Q const& key, Less pred, Func f ) + { + CDS_UNUSED( pred ); + return erase_( key, cds::opt::details::make_comparator_from_less(), f ); + } + + /// Finds \p key + /** \anchor cds_intrusive_SkipListSet_hp_find_func + The function searches the item with key equal to \p key and calls the functor \p f for item found. + The interface of \p Func functor is: + \code + struct functor { + void operator()( value_type& item, Q& key ); + }; + \endcode + where \p item is the item found, \p key is the find function argument. + + The functor can change non-key fields of \p item. Note that the functor is only guarantee + that \p item cannot be disposed during functor is executing. + The functor does not serialize simultaneous access to the set \p item. If such access is + possible you must provide your own synchronization on item level to exclude unsafe item modifications. + + Note the compare functor specified for class \p Traits template parameter + should accept a parameter of type \p Q that can be not the same as \p value_type. + + The function returns \p true if \p key is found, \p false otherwise. + */ + template + bool find( Q& key, Func f ) + { + return find_with_( key, key_comparator(), f ); + } + //@cond + template + bool find( Q const& key, Func f ) + { + return find_with_( key, key_comparator(), f ); + } + //@endcond + + /// Finds the key \p key with \p pred predicate for comparing + /** + The function is an analog of \ref cds_intrusive_SkipListSet_hp_find_func "find(Q&, Func)" + but \p pred is used for key compare. + + \p Less functor has the semantics like \p std::less but should take arguments of type \ref value_type and \p Q + in any order. + \p pred must imply the same element order as the comparator used for building the set. + */ + template + bool find_with( Q& key, Less pred, Func f ) + { + CDS_UNUSED( pred ); + return find_with_( key, cds::opt::details::make_comparator_from_less(), f ); + } + //@cond + template + bool find_with( Q const& key, Less pred, Func f ) + { + CDS_UNUSED( pred ); + return find_with_( key, cds::opt::details::make_comparator_from_less(), f ); + } + //@endcond + + /// Checks whether the set contains \p key + /** + The function searches the item with key equal to \p key + and returns \p true if it is found, and \p false otherwise. + */ + template + bool contains( Q const& key ) + { + return find_with_( key, key_comparator(), [](value_type& , Q const& ) {} ); + } + //@cond + template + CDS_DEPRECATED("deprecated, use contains()") + bool find( Q const& key ) + { + return contains( key ); + } + //@endcond + + /// Checks whether the set contains \p key using \p pred predicate for searching + /** + The function is similar to contains( key ) but \p pred is used for key comparing. + \p Less functor has the interface like \p std::less. + \p Less must imply the same element order as the comparator used for building the set. + */ + template + bool contains( Q const& key, Less pred ) + { + CDS_UNUSED( pred ); + return find_with_( key, cds::opt::details::make_comparator_from_less(), [](value_type& , Q const& ) {} ); + } + //@cond + template + CDS_DEPRECATED("deprecated, use contains()") + bool find_with( Q const& key, Less pred ) + { + return contains( key, pred ); + } + //@endcond + + /// Finds \p key and return the item found + /** \anchor cds_intrusive_SkipListSet_hp_get + The function searches the item with key equal to \p key + and returns the pointer to the item found as \p guarded_ptr. + If \p key is not found the function returns an empt guarded pointer. + + The \p disposer specified in \p Traits class template parameter is called + by garbage collector \p GC asynchronously when returned \ref guarded_ptr object + will be destroyed or released. + @note Each \p guarded_ptr object uses one GC's guard which can be limited resource. + + Usage: + \code + typedef cds::intrusive::SkipListSet< cds::gc::HP, foo, my_traits > skip_list; + skip_list theList; + // ... + { + skip_list::guarded_ptr gp(theList.get( 5 )); + if ( gp ) { + // Deal with gp + //... + } + // Destructor of guarded_ptr releases internal HP guard + } + \endcode + + Note the compare functor specified for class \p Traits template parameter + should accept a parameter of type \p Q that can be not the same as \p value_type. + */ + template + guarded_ptr get( Q const& key ) + { + return get_with_( key, key_comparator()); + } + + /// Finds \p key and return the item found + /** + The function is an analog of \ref cds_intrusive_SkipListSet_hp_get "get( Q const&)" + but \p pred is used for comparing the keys. + + \p Less functor has the semantics like \p std::less but should take arguments of type \ref value_type and \p Q + in any order. + \p pred must imply the same element order as the comparator used for building the set. + */ + template + guarded_ptr get_with( Q const& key, Less pred ) + { + CDS_UNUSED( pred ); + return get_with_( key, cds::opt::details::make_comparator_from_less()); + } + + /// Returns item count in the set + /** + The value returned depends on item counter type provided by \p Traits template parameter. + If it is \p atomicity::empty_item_counter this function always returns 0. + Therefore, the function is not suitable for checking the set emptiness, use \p empty() + for this purpose. + */ + size_t size() const + { + return m_ItemCounter; + } + + /// Checks if the set is empty + bool empty() const + { + return m_Head.head()->next( 0 ).load( memory_model::memory_order_relaxed ) == nullptr; + } + + /// Clears the set (not atomic) + /** + The function unlink all items from the set. + The function is not atomic, i.e., in multi-threaded environment with parallel insertions + this sequence + \code + set.clear(); + assert( set.empty()); + \endcode + the assertion could be raised. + + For each item the \ref disposer will be called after unlinking. + */ + void clear() + { + while ( extract_min_()); + } + + /// Returns maximum height of skip-list. The max height is a constant for each object and does not exceed 32. + static constexpr unsigned int max_height() noexcept + { + return c_nMaxHeight; + } + + /// Returns const reference to internal statistics + stat const& statistics() const + { + return m_Stat; + } + + protected: + //@cond + unsigned int random_level() + { + // Random generator produces a number from range [0..31] + // We need a number from range [1..32] + return m_RandomLevelGen() + 1; + } + + template + node_type * build_node( Q v ) + { + return node_builder::make_tower( v, m_RandomLevelGen ); + } + + static value_type * gc_protect( marked_node_ptr p ) + { + return node_traits::to_value_ptr( p.ptr()); + } + + static void dispose_node( void* p ) + { + assert( p != nullptr ); + value_type* pVal = reinterpret_cast( p ); + typename node_builder::node_disposer()( node_traits::to_node_ptr( pVal )); + disposer()( pVal ); + } + + void help_remove( int nLevel, node_type* pPred, marked_node_ptr pCur ) + { + if ( pCur->is_upper_level( nLevel )) { + marked_node_ptr p( pCur.ptr()); + typename gc::Guard hp; + marked_node_ptr pSucc = hp.protect( pCur->next( nLevel ), gc_protect ); + + if ( pSucc.bits() && + pPred->next( nLevel ).compare_exchange_strong( p, marked_node_ptr( pSucc.ptr()), + memory_model::memory_order_acquire, atomics::memory_order_relaxed )) + { + if ( pCur->level_unlinked()) { + gc::retire( node_traits::to_value_ptr( pCur.ptr()), dispose_node ); + m_Stat.onEraseWhileFind(); + } + } + } + } + + template + bool find_position( Q const& val, position& pos, Compare cmp, bool bStopIfFound ) + { + node_type * pPred; + marked_node_ptr pSucc; + marked_node_ptr pCur; + + // Hazard pointer array: + // pPred: [nLevel * 2] + // pSucc: [nLevel * 2 + 1] + + retry: + pPred = m_Head.head(); + int nCmp = 1; + + for ( int nLevel = static_cast( c_nMaxHeight - 1 ); nLevel >= 0; --nLevel ) { + pos.guards.assign( nLevel * 2, node_traits::to_value_ptr( pPred )); + while ( true ) { + pCur = pos.guards.protect( nLevel * 2 + 1, pPred->next( nLevel ), gc_protect ); + if ( pCur.bits()) { + // pCur.bits() means that pPred is logically deleted + goto retry; + } + + if ( pCur.ptr() == nullptr ) { + // end of list at level nLevel - goto next level + break; + } + + // pSucc contains deletion mark for pCur + pSucc = pCur->next( nLevel ).load( memory_model::memory_order_acquire ); + + if ( pPred->next( nLevel ).load( memory_model::memory_order_acquire ).all() != pCur.ptr()) + goto retry; + + if ( pSucc.bits()) { + // pCur is marked, i.e. logically deleted + // try to help deleting pCur + help_remove( nLevel, pPred, pCur ); + goto retry; + } + else { + nCmp = cmp( *node_traits::to_value_ptr( pCur.ptr()), val ); + if ( nCmp < 0 ) { + pPred = pCur.ptr(); + pos.guards.copy( nLevel * 2, nLevel * 2 + 1 ); // pPrev guard := cur guard + } + else if ( nCmp == 0 && bStopIfFound ) + goto found; + else + break; + } + } + + // Next level + pos.pPrev[nLevel] = pPred; + pos.pSucc[nLevel] = pCur.ptr(); + } + + if ( nCmp != 0 ) + return false; + + found: + pos.pCur = pCur.ptr(); + return pCur.ptr() && nCmp == 0; + } + + bool find_min_position( position& pos ) + { + node_type * pPred; + marked_node_ptr pSucc; + marked_node_ptr pCur; + + // Hazard pointer array: + // pPred: [nLevel * 2] + // pSucc: [nLevel * 2 + 1] + + retry: + pPred = m_Head.head(); + + for ( int nLevel = static_cast( c_nMaxHeight - 1 ); nLevel >= 0; --nLevel ) { + pos.guards.assign( nLevel * 2, node_traits::to_value_ptr( pPred )); + pCur = pos.guards.protect( nLevel * 2 + 1, pPred->next( nLevel ), gc_protect ); + + // pCur.bits() means that pPred is logically deleted + // head cannot be deleted + assert( pCur.bits() == 0 ); + + if ( pCur.ptr()) { + + // pSucc contains deletion mark for pCur + pSucc = pCur->next( nLevel ).load( memory_model::memory_order_acquire ); + + if ( pPred->next( nLevel ).load( memory_model::memory_order_acquire ).all() != pCur.ptr()) + goto retry; + + if ( pSucc.bits()) { + // pCur is marked, i.e. logically deleted. + // try to help deleting pCur + help_remove( nLevel, pPred, pCur ); + goto retry; + } + } + + // Next level + pos.pPrev[nLevel] = pPred; + pos.pSucc[nLevel] = pCur.ptr(); + } + + return ( pos.pCur = pCur.ptr()) != nullptr; + } + + bool find_max_position( position& pos ) + { + node_type * pPred; + marked_node_ptr pSucc; + marked_node_ptr pCur; + + // Hazard pointer array: + // pPred: [nLevel * 2] + // pSucc: [nLevel * 2 + 1] + + retry: + pPred = m_Head.head(); + + for ( int nLevel = static_cast( c_nMaxHeight - 1 ); nLevel >= 0; --nLevel ) { + pos.guards.assign( nLevel * 2, node_traits::to_value_ptr( pPred )); + while ( true ) { + pCur = pos.guards.protect( nLevel * 2 + 1, pPred->next( nLevel ), gc_protect ); + if ( pCur.bits()) { + // pCur.bits() means that pPred is logically deleted + goto retry; + } + + if ( pCur.ptr() == nullptr ) { + // end of the list at level nLevel - goto next level + break; + } + + // pSucc contains deletion mark for pCur + pSucc = pCur->next( nLevel ).load( memory_model::memory_order_acquire ); + + if ( pPred->next( nLevel ).load( memory_model::memory_order_acquire ).all() != pCur.ptr()) + goto retry; + + if ( pSucc.bits()) { + // pCur is marked, i.e. logically deleted. + // try to help deleting pCur + help_remove( nLevel, pPred, pCur ); + goto retry; + } + else { + if ( !pSucc.ptr()) + break; + + pPred = pCur.ptr(); + pos.guards.copy( nLevel * 2, nLevel * 2 + 1 ); + } + } + + // Next level + pos.pPrev[nLevel] = pPred; + pos.pSucc[nLevel] = pCur.ptr(); + } + + return ( pos.pCur = pCur.ptr()) != nullptr; + } + + bool renew_insert_position( value_type& val, node_type * pNode, position& pos ) + { + node_type * pPred; + marked_node_ptr pSucc; + marked_node_ptr pCur; + key_comparator cmp; + + // Hazard pointer array: + // pPred: [nLevel * 2] + // pSucc: [nLevel * 2 + 1] + + retry: + pPred = m_Head.head(); + int nCmp = 1; + + for ( int nLevel = static_cast( c_nMaxHeight - 1 ); nLevel >= 0; --nLevel ) { + pos.guards.assign( nLevel * 2, node_traits::to_value_ptr( pPred )); + while ( true ) { + pCur = pos.guards.protect( nLevel * 2 + 1, pPred->next( nLevel ), gc_protect ); + if ( pCur.bits()) { + // pCur.bits() means that pPred is logically deleted + goto retry; + } + + if ( pCur.ptr() == nullptr ) { + // end of list at level nLevel - goto next level + break; + } + + // pSucc contains deletion mark for pCur + pSucc = pCur->next( nLevel ).load( memory_model::memory_order_acquire ); + + if ( pPred->next( nLevel ).load( memory_model::memory_order_acquire ).all() != pCur.ptr()) + goto retry; + + if ( pSucc.bits()) { + // pCur is marked, i.e. logically deleted + if ( pCur.ptr() == pNode ) { + // Node is removing while we are inserting it + return false; + } + // try to help deleting pCur + help_remove( nLevel, pPred, pCur ); + goto retry; + } + else { + nCmp = cmp( *node_traits::to_value_ptr( pCur.ptr()), val ); + if ( nCmp < 0 ) { + pPred = pCur.ptr(); + pos.guards.copy( nLevel * 2, nLevel * 2 + 1 ); // pPrev guard := cur guard + } + else + break; + } + } + + // Next level + pos.pPrev[nLevel] = pPred; + pos.pSucc[nLevel] = pCur.ptr(); + } + + return nCmp == 0; + } + + template + bool insert_at_position( value_type& val, node_type * pNode, position& pos, Func f ) + { + unsigned int const nHeight = pNode->height(); + + for ( unsigned int nLevel = 1; nLevel < nHeight; ++nLevel ) + pNode->next( nLevel ).store( marked_node_ptr(), memory_model::memory_order_relaxed ); + + // Insert at level 0 + { + marked_node_ptr p( pos.pSucc[0] ); + pNode->next( 0 ).store( p, memory_model::memory_order_release ); + if ( !pos.pPrev[0]->next( 0 ).compare_exchange_strong( p, marked_node_ptr( pNode ), memory_model::memory_order_release, atomics::memory_order_relaxed )) + return false; + + f( val ); + } + + // Insert at level 1..max + for ( unsigned int nLevel = 1; nLevel < nHeight; ++nLevel ) { + marked_node_ptr p; + while ( true ) { + marked_node_ptr pSucc( pos.pSucc[nLevel] ); + + // Set pNode->next + // pNode->next can have "logical deleted" flag if another thread is removing pNode right now + if ( !pNode->next( nLevel ).compare_exchange_strong( p, pSucc, + memory_model::memory_order_release, atomics::memory_order_acquire )) + { + // pNode has been marked as removed while we are inserting it + // Stop inserting + assert( p.bits() != 0 ); + + // Here pNode is linked at least level 0 so level_unlinked() cannot returns true + CDS_VERIFY_FALSE( pNode->level_unlinked( nHeight - nLevel )); + + // pNode is linked up to nLevel - 1 + // Remove it via find_position() + find_position( val, pos, key_comparator(), false ); + + m_Stat.onLogicDeleteWhileInsert(); + return true; + } + p = pSucc; + + // Link pNode into the list at nLevel + if ( pos.pPrev[nLevel]->next( nLevel ).compare_exchange_strong( pSucc, marked_node_ptr( pNode ), + memory_model::memory_order_release, atomics::memory_order_relaxed )) + { + // go to next level + break; + } + + // Renew insert position + m_Stat.onRenewInsertPosition(); + + if ( !renew_insert_position( val, pNode, pos )) { + // The node has been deleted while we are inserting it + // Update current height for concurent removing + CDS_VERIFY_FALSE( pNode->level_unlinked( nHeight - nLevel )); + + m_Stat.onRemoveWhileInsert(); + + // help to removing val + find_position( val, pos, key_comparator(), false ); + return true; + } + } + } + return true; + } + + template + bool try_remove_at( node_type * pDel, position& pos, Func f ) + { + assert( pDel != nullptr ); + + marked_node_ptr pSucc; + back_off bkoff; + + // logical deletion (marking) + for ( unsigned int nLevel = pDel->height() - 1; nLevel > 0; --nLevel ) { + pSucc = pDel->next( nLevel ).load( memory_model::memory_order_relaxed ); + if ( pSucc.bits() == 0 ) { + bkoff.reset(); + while ( !( pDel->next( nLevel ).compare_exchange_weak( pSucc, pSucc | 1, + memory_model::memory_order_release, atomics::memory_order_acquire ) + || pSucc.bits() != 0 )) + { + bkoff(); + m_Stat.onMarkFailed(); + } + } + } + + marked_node_ptr p( pDel->next( 0 ).load( memory_model::memory_order_relaxed ).ptr()); + while ( true ) { + if ( pDel->next( 0 ).compare_exchange_strong( p, p | 1, memory_model::memory_order_release, atomics::memory_order_acquire )) + { + f( *node_traits::to_value_ptr( pDel )); + + // Physical deletion + // try fast erase + p = pDel; + + for ( int nLevel = static_cast( pDel->height() - 1 ); nLevel >= 0; --nLevel ) { + + pSucc = pDel->next( nLevel ).load( memory_model::memory_order_acquire ); + if ( pos.pPrev[nLevel]->next( nLevel ).compare_exchange_strong( p, marked_node_ptr( pSucc.ptr()), + memory_model::memory_order_acq_rel, atomics::memory_order_relaxed )) + { + pDel->level_unlinked(); + } + else { + // Make slow erase +# ifdef CDS_DEBUG + if ( find_position( *node_traits::to_value_ptr( pDel ), pos, key_comparator(), false )) + assert( pDel != pos.pCur ); +# else + find_position( *node_traits::to_value_ptr( pDel ), pos, key_comparator(), false ); +# endif + m_Stat.onSlowErase(); + return true; + } + } + + // Fast erasing success + gc::retire( node_traits::to_value_ptr( pDel ), dispose_node ); + m_Stat.onFastErase(); + return true; + } + else if ( p.bits()) { + // Another thread is deleting pDel right now + m_Stat.onEraseContention(); + return false; + } + m_Stat.onEraseRetry(); + bkoff(); + } + } + + enum finsd_fastpath_result { + find_fastpath_found, + find_fastpath_not_found, + find_fastpath_abort + }; + template + finsd_fastpath_result find_fastpath( Q& val, Compare cmp, Func f ) + { + node_type * pPred; + marked_node_ptr pCur; + marked_node_ptr pNull; + + // guard array: + // 0 - pPred on level N + // 1 - pCur on level N + typename gc::template GuardArray<2> guards; + back_off bkoff; + unsigned attempt = 0; + + try_again: + pPred = m_Head.head(); + for ( int nLevel = static_cast( m_nHeight.load( memory_model::memory_order_relaxed ) - 1 ); nLevel >= 0; --nLevel ) { + pCur = guards.protect( 1, pPred->next( nLevel ), gc_protect ); + + while ( pCur != pNull ) { + if ( pCur.bits()) { + // pPred is being removed + if ( ++attempt < 4 ) { + bkoff(); + goto try_again; + } + + return find_fastpath_abort; + } + + if ( pCur.ptr()) { + int nCmp = cmp( *node_traits::to_value_ptr( pCur.ptr()), val ); + if ( nCmp < 0 ) { + guards.copy( 0, 1 ); + pPred = pCur.ptr(); + pCur = guards.protect( 1, pCur->next( nLevel ), gc_protect ); + } + else if ( nCmp == 0 ) { + // found + f( *node_traits::to_value_ptr( pCur.ptr()), val ); + return find_fastpath_found; + } + else { + // pCur > val - go down + break; + } + } + } + } + + return find_fastpath_not_found; + } + + template + bool find_slowpath( Q& val, Compare cmp, Func f ) + { + position pos; + if ( find_position( val, pos, cmp, true )) { + assert( cmp( *node_traits::to_value_ptr( pos.pCur ), val ) == 0 ); + + f( *node_traits::to_value_ptr( pos.pCur ), val ); + return true; + } + else + return false; + } + + template + bool find_with_( Q& val, Compare cmp, Func f ) + { + switch ( find_fastpath( val, cmp, f )) { + case find_fastpath_found: + m_Stat.onFindFastSuccess(); + return true; + case find_fastpath_not_found: + m_Stat.onFindFastFailed(); + return false; + default: + break; + } + + if ( find_slowpath( val, cmp, f )) { + m_Stat.onFindSlowSuccess(); + return true; + } + + m_Stat.onFindSlowFailed(); + return false; + } + + template + guarded_ptr get_with_( Q const& val, Compare cmp ) + { + guarded_ptr gp; + if ( find_with_( val, cmp, [&gp]( value_type& found, Q const& ) { gp.reset( &found ); } )) + return gp; + return guarded_ptr(); + } + + template + bool erase_( Q const& val, Compare cmp, Func f ) + { + position pos; + + if ( !find_position( val, pos, cmp, false )) { + m_Stat.onEraseFailed(); + return false; + } + + node_type * pDel = pos.pCur; + typename gc::Guard gDel; + gDel.assign( node_traits::to_value_ptr( pDel )); + assert( cmp( *node_traits::to_value_ptr( pDel ), val ) == 0 ); + + unsigned int nHeight = pDel->height(); + if ( try_remove_at( pDel, pos, f )) { + --m_ItemCounter; + m_Stat.onRemoveNode( nHeight ); + m_Stat.onEraseSuccess(); + return true; + } + + m_Stat.onEraseFailed(); + return false; + } + + template + guarded_ptr extract_( Q const& val, Compare cmp ) + { + position pos; + + guarded_ptr gp; + for (;;) { + if ( !find_position( val, pos, cmp, false )) { + m_Stat.onExtractFailed(); + return guarded_ptr(); + } + + node_type * pDel = pos.pCur; + gp.reset( node_traits::to_value_ptr( pDel )); + assert( cmp( *node_traits::to_value_ptr( pDel ), val ) == 0 ); + + unsigned int nHeight = pDel->height(); + if ( try_remove_at( pDel, pos, []( value_type const& ) {} )) { + --m_ItemCounter; + m_Stat.onRemoveNode( nHeight ); + m_Stat.onExtractSuccess(); + return gp; + } + m_Stat.onExtractRetry(); + } + } + + guarded_ptr extract_min_() + { + position pos; + + guarded_ptr gp; + for ( ;;) { + if ( !find_min_position( pos )) { + // The list is empty + m_Stat.onExtractMinFailed(); + return guarded_ptr(); + } + + node_type * pDel = pos.pCur; + + unsigned int nHeight = pDel->height(); + gp.reset( node_traits::to_value_ptr( pDel )); + + if ( try_remove_at( pDel, pos, []( value_type const& ) {} )) { + --m_ItemCounter; + m_Stat.onRemoveNode( nHeight ); + m_Stat.onExtractMinSuccess(); + return gp; + } + + m_Stat.onExtractMinRetry(); + } + } + + guarded_ptr extract_max_() + { + position pos; + + guarded_ptr gp; + for ( ;;) { + if ( !find_max_position( pos )) { + // The list is empty + m_Stat.onExtractMaxFailed(); + return guarded_ptr(); + } + + node_type * pDel = pos.pCur; + + unsigned int nHeight = pDel->height(); + gp.reset( node_traits::to_value_ptr( pDel )); + + if ( try_remove_at( pDel, pos, []( value_type const& ) {} )) { + --m_ItemCounter; + m_Stat.onRemoveNode( nHeight ); + m_Stat.onExtractMaxSuccess(); + return gp; + } + + m_Stat.onExtractMaxRetry(); + } + } + + void increase_height( unsigned int nHeight ) + { + unsigned int nCur = m_nHeight.load( memory_model::memory_order_relaxed ); + if ( nCur < nHeight ) + m_nHeight.compare_exchange_strong( nCur, nHeight, memory_model::memory_order_relaxed, atomics::memory_order_relaxed ); + } + + void destroy() + { + node_type* p = m_Head.head()->next( 0 ).load( atomics::memory_order_relaxed ).ptr(); + while ( p ) { + node_type* pNext = p->next( 0 ).load( atomics::memory_order_relaxed ).ptr(); + dispose_node( node_traits::to_value_ptr( p )); + p = pNext; + } + } + + //@endcond + + private: + //@cond + skip_list::details::head_node< node_type > m_Head; ///< head tower (max height) + + random_level_generator m_RandomLevelGen; ///< random level generator instance + atomics::atomic m_nHeight; ///< estimated high level + item_counter m_ItemCounter; ///< item counter + mutable stat m_Stat; ///< internal statistics + //@endcond + }; + +}} // namespace cds::intrusive + + +#endif // #ifndef CDSLIB_INTRUSIVE_IMPL_SKIP_LIST_H diff --git a/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/intrusive/iterable_list_dhp.h b/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/intrusive/iterable_list_dhp.h new file mode 100644 index 0000000..0cdeb22 --- /dev/null +++ b/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/intrusive/iterable_list_dhp.h @@ -0,0 +1,37 @@ +/* + This file is a part of libcds - Concurrent Data Structures library + + (C) Copyright Maxim Khizhinsky (libcds.dev@gmail.com) 2006-2017 + + Source code repo: http://github.com/khizmax/libcds/ + Download: http://sourceforge.net/projects/libcds/files/ + + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. + + * Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ + +#ifndef CDSLIB_INTRUSIVE_ITERABLE_LIST_DHP_H +#define CDSLIB_INTRUSIVE_ITERABLE_LIST_DHP_H + +#include +#include + +#endif // #ifndef CDSLIB_INTRUSIVE_ITERABLE_LIST_DHP_H diff --git a/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/intrusive/iterable_list_hp.h b/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/intrusive/iterable_list_hp.h new file mode 100644 index 0000000..38bf221 --- /dev/null +++ b/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/intrusive/iterable_list_hp.h @@ -0,0 +1,37 @@ +/* + This file is a part of libcds - Concurrent Data Structures library + + (C) Copyright Maxim Khizhinsky (libcds.dev@gmail.com) 2006-2017 + + Source code repo: http://github.com/khizmax/libcds/ + Download: http://sourceforge.net/projects/libcds/files/ + + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. + + * Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ + +#ifndef CDSLIB_INTRUSIVE_ITERABLE_LIST_HP_H +#define CDSLIB_INTRUSIVE_ITERABLE_LIST_HP_H + +#include +#include + +#endif // #ifndef CDSLIB_INTRUSIVE_ITERABLE_LIST_HP_H diff --git a/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/intrusive/lazy_list_dhp.h b/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/intrusive/lazy_list_dhp.h new file mode 100644 index 0000000..73376f7 --- /dev/null +++ b/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/intrusive/lazy_list_dhp.h @@ -0,0 +1,37 @@ +/* + This file is a part of libcds - Concurrent Data Structures library + + (C) Copyright Maxim Khizhinsky (libcds.dev@gmail.com) 2006-2017 + + Source code repo: http://github.com/khizmax/libcds/ + Download: http://sourceforge.net/projects/libcds/files/ + + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. + + * Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ + +#ifndef CDSLIB_INTRUSIVE_LAZY_LIST_DHP_H +#define CDSLIB_INTRUSIVE_LAZY_LIST_DHP_H + +#include +#include + +#endif // #ifndef CDSLIB_INTRUSIVE_LAZY_LIST_DHP_H diff --git a/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/intrusive/lazy_list_hp.h b/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/intrusive/lazy_list_hp.h new file mode 100644 index 0000000..a84c644 --- /dev/null +++ b/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/intrusive/lazy_list_hp.h @@ -0,0 +1,37 @@ +/* + This file is a part of libcds - Concurrent Data Structures library + + (C) Copyright Maxim Khizhinsky (libcds.dev@gmail.com) 2006-2017 + + Source code repo: http://github.com/khizmax/libcds/ + Download: http://sourceforge.net/projects/libcds/files/ + + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. + + * Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ + +#ifndef CDSLIB_INTRUSIVE_LAZY_LIST_HP_H +#define CDSLIB_INTRUSIVE_LAZY_LIST_HP_H + +#include +#include + +#endif // #ifndef CDSLIB_INTRUSIVE_LAZY_LIST_HP_H diff --git a/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/intrusive/lazy_list_nogc.h b/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/intrusive/lazy_list_nogc.h new file mode 100644 index 0000000..1c06e35 --- /dev/null +++ b/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/intrusive/lazy_list_nogc.h @@ -0,0 +1,853 @@ +/* + This file is a part of libcds - Concurrent Data Structures library + + (C) Copyright Maxim Khizhinsky (libcds.dev@gmail.com) 2006-2017 + + Source code repo: http://github.com/khizmax/libcds/ + Download: http://sourceforge.net/projects/libcds/files/ + + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. + + * Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ + +#ifndef CDSLIB_INTRUSIVE_LAZY_LIST_NOGC_H +#define CDSLIB_INTRUSIVE_LAZY_LIST_NOGC_H + +#include // unique_lock +#include +#include + +namespace cds { namespace intrusive { + namespace lazy_list { + /// Lazy list node for \p gc::nogc + /** + Template parameters: + - Lock - lock type. Default is \p cds::sync::spin + - Tag - a \ref cds_intrusive_hook_tag "tag" + */ + template < +#ifdef CDS_DOXYGEN_INVOKED + typename Lock = cds::sync::spin, + typename Tag = opt::none +#else + typename Lock, + typename Tag +#endif + > + struct node + { + typedef gc::nogc gc; ///< Garbage collector + typedef Lock lock_type; ///< Lock type + typedef Tag tag; ///< tag + + atomics::atomic m_pNext; ///< pointer to the next node in the list + mutable lock_type m_Lock; ///< Node lock + + node() + : m_pNext( nullptr ) + {} + }; + } // namespace lazy_list + + + /// Lazy single-linked list (template specialization for \p gc::nogc) + /** @ingroup cds_intrusive_list + \anchor cds_intrusive_LazyList_nogc + + This specialization is append-only list when no item + reclamation may be performed. The class does not support deleting of list item. + + The list can be ordered if \p Traits::sort is \p true that is default + or unordered otherwise. Unordered list can be maintained by \p equal_to + relationship (\p Traits::equal_to), but for the ordered list \p less + or \p compare relations should be specified in \p Traits. + + See \ref cds_intrusive_LazyList_hp "LazyList" for description of template parameters. + */ + template < + typename T +#ifdef CDS_DOXYGEN_INVOKED + ,class Traits = lazy_list::traits +#else + ,class Traits +#endif + > + class LazyList + { + public: + typedef gc::nogc gc; ///< Garbage collector + typedef T value_type; ///< type of value stored in the list + typedef Traits traits; ///< Traits template parameter + + typedef typename traits::hook hook; ///< hook type + typedef typename hook::node_type node_type; ///< node type + static constexpr bool const c_bSort = traits::sort; ///< List type: ordered (\p true) or unordered (\p false) + +# ifdef CDS_DOXYGEN_INVOKED + /// Key comparing functor + /** + - for ordered list, the functor is based on \p traits::compare or \p traits::less + - for unordered list, the functor is based on \p traits::equal_to, \p traits::compare or \p traits::less + */ + typedef implementation_defined key_comparator; +# else + typedef typename std::conditional< c_bSort, + typename opt::details::make_comparator< value_type, traits >::type, + typename opt::details::make_equal_to< value_type, traits >::type + >::type key_comparator; +# endif + typedef typename traits::back_off back_off; ///< Back-off strategy + typedef typename traits::disposer disposer; ///< disposer + typedef typename get_node_traits< value_type, node_type, hook>::type node_traits; ///< node traits + typedef typename lazy_list::get_link_checker< node_type, traits::link_checker >::type link_checker; ///< link checker + + typedef typename traits::item_counter item_counter; ///< Item counting policy used + typedef typename traits::memory_model memory_model; ///< C++ memory ordering (see \p lazy_list::traits::memory_model) + typedef typename traits::stat stat; ///< Internal statistics + + //@cond + static_assert((std::is_same< gc, typename node_type::gc >::value), "GC and node_type::gc must be the same type"); + + // Rebind traits (split-list support) + template + struct rebind_traits { + typedef LazyList< + gc + , value_type + , typename cds::opt::make_options< traits, Options...>::type + > type; + }; + + // Stat selector + template + using select_stat_wrapper = lazy_list::select_stat_wrapper< Stat >; + //@endcond + + protected: + typedef node_type * auxiliary_head ; ///< Auxiliary head type (for split-list support) + + protected: + node_type m_Head; ///< List head (dummy node) + node_type m_Tail; ///< List tail (dummy node) + item_counter m_ItemCounter; ///< Item counter + mutable stat m_Stat; ///< Internal statistics + + //@cond + + /// Position pointer for item search + struct position { + node_type * pPred ; ///< Previous node + node_type * pCur ; ///< Current node + + /// Locks nodes \p pPred and \p pCur + void lock() + { + pPred->m_Lock.lock(); + pCur->m_Lock.lock(); + } + + /// Unlocks nodes \p pPred and \p pCur + void unlock() + { + pCur->m_Lock.unlock(); + pPred->m_Lock.unlock(); + } + }; + + class auto_lock_position { + position& m_pos; + public: + auto_lock_position( position& pos ) + : m_pos(pos) + { + pos.lock(); + } + ~auto_lock_position() + { + m_pos.unlock(); + } + }; + //@endcond + + protected: + //@cond + void clear_links( node_type * pNode ) + { + pNode->m_pNext.store( nullptr, memory_model::memory_order_relaxed ); + } + + template + void dispose_node( node_type * pNode, Disposer disp ) + { + clear_links( pNode ); + disp( node_traits::to_value_ptr( *pNode )); + } + + template + void dispose_value( value_type& val, Disposer disp ) + { + dispose_node( node_traits::to_node_ptr( val ), disp ); + } + + void link_node( node_type * pNode, node_type * pPred, node_type * pCur ) + { + link_checker::is_empty( pNode ); + assert( pPred->m_pNext.load(memory_model::memory_order_relaxed) == pCur ); + + pNode->m_pNext.store( pCur, memory_model::memory_order_release ); + pPred->m_pNext.store( pNode, memory_model::memory_order_release ); + } + //@endcond + + protected: + //@cond + template + class iterator_type + { + friend class LazyList; + + protected: + value_type * m_pNode; + + void next() + { + assert( m_pNode != nullptr ); + + node_type * pNode = node_traits::to_node_ptr( m_pNode ); + node_type * pNext = pNode->m_pNext.load(memory_model::memory_order_relaxed); + if ( pNext != nullptr ) + m_pNode = node_traits::to_value_ptr( pNext ); + } + + iterator_type( node_type * pNode ) + { + m_pNode = node_traits::to_value_ptr( pNode ); + } + + public: + typedef typename cds::details::make_const_type::pointer value_ptr; + typedef typename cds::details::make_const_type::reference value_ref; + + iterator_type() + : m_pNode( nullptr ) + {} + + iterator_type( const iterator_type& src ) + : m_pNode( src.m_pNode ) + {} + + value_ptr operator ->() const + { + return m_pNode; + } + + value_ref operator *() const + { + assert( m_pNode != nullptr ); + return *m_pNode; + } + + /// Pre-increment + iterator_type& operator ++() + { + next(); + return *this; + } + + /// Post-increment + iterator_type operator ++(int) + { + iterator_type i(*this); + next(); + return i; + } + + iterator_type& operator = (const iterator_type& src) + { + m_pNode = src.m_pNode; + return *this; + } + + template + bool operator ==(iterator_type const& i ) const + { + return m_pNode == i.m_pNode; + } + template + bool operator !=(iterator_type const& i ) const + { + return m_pNode != i.m_pNode; + } + }; + //@endcond + + public: + /// Forward iterator + typedef iterator_type iterator; + /// Const forward iterator + typedef iterator_type const_iterator; + + /// Returns a forward iterator addressing the first element in a list + /** + For empty list \code begin() == end() \endcode + */ + iterator begin() + { + iterator it( &m_Head ); + ++it ; // skip dummy head + return it; + } + + /// Returns an iterator that addresses the location succeeding the last element in a list + /** + Do not use the value returned by end function to access any item. + + The returned value can be used only to control reaching the end of the list. + For empty list \code begin() == end() \endcode + */ + iterator end() + { + return iterator( &m_Tail ); + } + + /// Returns a forward const iterator addressing the first element in a list + const_iterator begin() const + { + return cbegin(); + } + /// Returns a forward const iterator addressing the first element in a list + const_iterator cbegin() const + { + const_iterator it( const_cast(&m_Head)); + ++it; // skip dummy head + return it; + } + + /// Returns an const iterator that addresses the location succeeding the last element in a list + const_iterator end() const + { + return cend(); + } + /// Returns an const iterator that addresses the location succeeding the last element in a list + const_iterator cend() const + { + return const_iterator( const_cast(&m_Tail)); + } + + public: + /// Default constructor initializes empty list + LazyList() + { + m_Head.m_pNext.store( &m_Tail, memory_model::memory_order_relaxed ); + } + + //@cond + template >::value >> + explicit LazyList( Stat& st ) + : m_Stat( st ) + { + m_Head.m_pNext.store( &m_Tail, memory_model::memory_order_relaxed ); + } + //@endcond + + /// Destroys the list object + ~LazyList() + { + clear(); + assert( m_Head.m_pNext.load(memory_model::memory_order_relaxed) == &m_Tail ); + m_Head.m_pNext.store( nullptr, memory_model::memory_order_relaxed ); + } + + /// Inserts new node + /** + The function inserts \p val in the list if the list does not contain + an item with key equal to \p val. + + Returns \p true if \p val is linked into the list, \p false otherwise. + */ + bool insert( value_type& val ) + { + return insert_at( &m_Head, val ); + } + + /// Updates the item + /** + The operation performs inserting or changing data with lock-free manner. + + If the item \p val not found in the list, then \p val is inserted into the list + iff \p bAllowInsert is \p true. + Otherwise, the functor \p func is called with item found. + The functor signature is: + \code + struct functor { + void operator()( bool bNew, value_type& item, value_type& val ); + }; + \endcode + with arguments: + - \p bNew - \p true if the item has been inserted, \p false otherwise + - \p item - item of the list + - \p val - argument \p val passed into the \p update() function + If new item has been inserted (i.e. \p bNew is \p true) then \p item and \p val arguments + refer to the same thing. + + The functor may change non-key fields of the \p item. + While the functor \p f is calling the item \p item is locked. + + Returns std::pair where \p first is \p true if operation is successful, + \p second is \p true if new item has been added or \p false if the item with \p key + already is in the list. + */ + template + std::pair update( value_type& val, Func func, bool bAllowInsert = true ) + { + return update_at( &m_Head, val, func, bAllowInsert ); + } + //@cond + template + CDS_DEPRECATED("ensure() is deprecated, use update()") + std::pair ensure( value_type& val, Func func ) + { + return update( val, func, true ); + } + //@endcond + + /// Finds the key \p key + /** \anchor cds_intrusive_LazyList_nogc_find_func + The function searches the item with key equal to \p key + and calls the functor \p f for item found. + The interface of \p Func functor is: + \code + struct functor { + void operator()( value_type& item, Q& key ); + }; + \endcode + where \p item is the item found, \p key is the find function argument. + + The functor may change non-key fields of \p item. + While the functor \p f is calling the item found \p item is locked. + + The function returns \p true if \p key is found, \p false otherwise. + */ + template + bool find( Q& key, Func f ) + { + return find_at( &m_Head, key, key_comparator(), f ); + } + //@cond + template + bool find( Q const& key, Func f ) + { + return find_at( &m_Head, key, key_comparator(), f ); + } + //@endcond + + /// Finds the key \p key using \p less predicate for searching. Disabled for unordered lists. + /** + The function is an analog of \ref cds_intrusive_LazyList_nogc_find_func "find(Q&, Func)" + but \p pred is used for key comparing. + \p Less functor has the interface like \p std::less. + \p pred must imply the same element order as the comparator used for building the list. + */ + template + typename std::enable_if::type find_with( Q& key, Less less, Func f ) + { + CDS_UNUSED( less ); + return find_at( &m_Head, key, cds::opt::details::make_comparator_from_less(), f ); + } + + /// Finds the key \p key using \p equal predicate for searching. Disabled for ordered lists. + /** + The function is an analog of \ref cds_intrusive_LazyList_nogc_find_func "find(Q&, Func)" + but \p equal is used for key comparing. + \p Equal functor has the interface like \p std::equal_to. + */ + template + typename std::enable_if::type find_with( Q& key, Equal eq, Func f ) + { + //CDS_UNUSED( eq ); + return find_at( &m_Head, key, eq, f ); + } + //@cond + template + typename std::enable_if::type find_with( Q const& key, Less pred, Func f ) + { + CDS_UNUSED( pred ); + return find_at( &m_Head, key, cds::opt::details::make_comparator_from_less(), f ); + } + + template + typename std::enable_if::type find_with( Q const& key, Equal eq, Func f ) + { + //CDS_UNUSED( eq ); + return find_at( &m_Head, key, eq, f ); + } + //@endcond + + /// Checks whether the list contains \p key + /** + The function searches the item with key equal to \p key + and returns \p true if it is found, and \p false otherwise. + */ + template + value_type * contains( Q const& key ) + { + return find_at( &m_Head, key, key_comparator()); + } + //@cond + template + CDS_DEPRECATED("deprecated, use contains()") + value_type * find( Q const& key ) + { + return contains( key ); + } + //@endcond + + /// Checks whether the map contains \p key using \p pred predicate for searching (ordered list version) + /** + The function is an analog of contains( key ) but \p pred is used for key comparing. + \p Less functor has the interface like \p std::less. + \p Less must imply the same element order as the comparator used for building the list. + */ + template + typename std::enable_if::type contains( Q const& key, Less pred ) + { + CDS_UNUSED( pred ); + return find_at( &m_Head, key, cds::opt::details::make_comparator_from_less()); + } + //@cond + template + CDS_DEPRECATED("deprecated, use contains()") + typename std::enable_if::type find_with( Q const& key, Less pred ) + { + return contains( key, pred ); + } + //@endcond + + /// Checks whether the map contains \p key using \p equal predicate for searching (unordered list version) + /** + The function is an analog of contains( key ) but \p equal is used for key comparing. + \p Equal functor has the interface like \p std::equal_to. + */ + template + typename std::enable_if::type contains( Q const& key, Equal eq ) + { + return find_at( &m_Head, key, eq ); + } + //@cond + template + CDS_DEPRECATED("deprecated, use contains()") + typename std::enable_if::type find_with( Q const& key, Equal eq ) + { + return contains( key, eq ); + } + //@endcond + + /// Clears the list + /** + The function unlink all items from the list. + For each unlinked item the item disposer \p disp is called after unlinking. + + This function is not thread-safe. + */ + template + void clear( Disposer disp ) + { + node_type * pHead = m_Head.m_pNext.exchange( &m_Tail, memory_model::memory_order_release ); + + while ( pHead != &m_Tail ) { + node_type * p = pHead->m_pNext.load(memory_model::memory_order_relaxed); + dispose_node( pHead, disp ); + --m_ItemCounter; + pHead = p; + } + } + + /// Clears the list using default disposer + /** + The function clears the list using default (provided in class template) disposer functor. + */ + void clear() + { + clear( disposer()); + } + + /// Checks if the list is empty + bool empty() const + { + return m_Head.m_pNext.load(memory_model::memory_order_relaxed) == &m_Tail; + } + + /// Returns list's item count + /** + The value returned depends on opt::item_counter option. For atomicity::empty_item_counter, + this function always returns 0. + + Warning: even if you use real item counter and it returns 0, this fact is not mean that the list + is empty. To check list emptyness use \ref empty() method. + */ + size_t size() const + { + return m_ItemCounter.value(); + } + + /// Returns const reference to internal statistics + stat const& statistics() const + { + return m_Stat; + } + + protected: + //@cond + // split-list support + bool insert_aux_node( node_type * pNode ) + { + return insert_aux_node( &m_Head, pNode ); + } + + // split-list support + bool insert_aux_node( node_type * pHead, node_type * pNode ) + { + assert( pHead != nullptr ); + assert( pNode != nullptr ); + + // Hack: convert node_type to value_type. + // In principle, auxiliary node can be non-reducible to value_type + // We assume that comparator can correctly distinguish aux and regular node. + return insert_at( pHead, *node_traits::to_value_ptr( pNode )); + } + + bool insert_at( node_type * pHead, value_type& val ) + { + position pos; + key_comparator pred; + + while ( true ) { + search( pHead, val, pos, pred ); + { + auto_lock_position alp( pos ); + if ( validate( pos.pPred, pos.pCur )) { + if ( pos.pCur != &m_Tail && equal( *node_traits::to_value_ptr( *pos.pCur ), val, pred )) { + // failed: key already in list + m_Stat.onInsertFailed(); + return false; + } + else { + link_node( node_traits::to_node_ptr( val ), pos.pPred, pos.pCur ); + break; + } + } + } + + m_Stat.onInsertRetry(); + } + + ++m_ItemCounter; + m_Stat.onInsertSuccess(); + return true; + } + + iterator insert_at_( node_type * pHead, value_type& val ) + { + if ( insert_at( pHead, val )) + return iterator( node_traits::to_node_ptr( val )); + return end(); + } + + + template + std::pair update_at_( node_type * pHead, value_type& val, Func func, bool bAllowInsert ) + { + position pos; + key_comparator pred; + + while ( true ) { + search( pHead, val, pos, pred ); + { + auto_lock_position alp( pos ); + if ( validate( pos.pPred, pos.pCur )) { + if ( pos.pCur != &m_Tail && equal( *node_traits::to_value_ptr( *pos.pCur ), val, pred )) { + // key already in the list + + func( false, *node_traits::to_value_ptr( *pos.pCur ) , val ); + m_Stat.onUpdateExisting(); + return std::make_pair( iterator( pos.pCur ), false ); + } + else { + // new key + if ( !bAllowInsert ) { + m_Stat.onUpdateFailed(); + return std::make_pair( end(), false ); + } + + link_node( node_traits::to_node_ptr( val ), pos.pPred, pos.pCur ); + func( true, val, val ); + break; + } + } + + m_Stat.onUpdateRetry(); + } + } + + ++m_ItemCounter; + m_Stat.onUpdateNew(); + return std::make_pair( iterator( node_traits::to_node_ptr( val )), true ); + } + + template + std::pair update_at( node_type * pHead, value_type& val, Func func, bool bAllowInsert ) + { + std::pair ret = update_at_( pHead, val, func, bAllowInsert ); + return std::make_pair( ret.first != end(), ret.second ); + } + + template + bool find_at( node_type * pHead, Q& val, Pred pred, Func f ) + { + position pos; + + search( pHead, val, pos, pred ); + if ( pos.pCur != &m_Tail ) { + std::unique_lock< typename node_type::lock_type> al( pos.pCur->m_Lock ); + if ( equal( *node_traits::to_value_ptr( *pos.pCur ), val, pred )) + { + f( *node_traits::to_value_ptr( *pos.pCur ), val ); + m_Stat.onFindSuccess(); + return true; + } + } + + m_Stat.onFindFailed(); + return false; + } + + template + value_type * find_at( node_type * pHead, Q& val, Pred pred) + { + iterator it = find_at_( pHead, val, pred ); + if ( it != end()) + return &*it; + return nullptr; + } + + template + iterator find_at_( node_type * pHead, Q& val, Pred pred) + { + position pos; + + search( pHead, val, pos, pred ); + if ( pos.pCur != &m_Tail ) { + if ( equal( *node_traits::to_value_ptr( *pos.pCur ), val, pred )) { + m_Stat.onFindSuccess(); + return iterator( pos.pCur ); + } + } + + m_Stat.onFindFailed(); + return end(); + } + + //@endcond + + protected: + //@cond + template + typename std::enable_if::type search( node_type * pHead, const Q& key, position& pos, Equal eq ) + { + const node_type * pTail = &m_Tail; + + node_type * pCur = pHead; + node_type * pPrev = pHead; + + while ( pCur != pTail && ( pCur == pHead || !equal( *node_traits::to_value_ptr( *pCur ), key, eq ))) { + pPrev = pCur; + pCur = pCur->m_pNext.load(memory_model::memory_order_acquire); + } + + pos.pCur = pCur; + pos.pPred = pPrev; + } + + template + typename std::enable_if::type search( node_type * pHead, const Q& key, position& pos, Compare cmp ) + { + const node_type * pTail = &m_Tail; + + node_type * pCur = pHead; + node_type * pPrev = pHead; + + while ( pCur != pTail && ( pCur == pHead || cmp( *node_traits::to_value_ptr( *pCur ), key ) < 0 )) { + pPrev = pCur; + pCur = pCur->m_pNext.load(memory_model::memory_order_acquire); + } + + pos.pCur = pCur; + pos.pPred = pPrev; + } + + template + static typename std::enable_if::type equal( L const& l, R const& r, Equal eq ) + { + return eq(l, r); + } + + template + static typename std::enable_if::type equal( L const& l, R const& r, Compare cmp ) + { + return cmp(l, r) == 0; + } + + bool validate( node_type * pPred, node_type * pCur ) + { + if ( pPred->m_pNext.load(memory_model::memory_order_acquire) == pCur ) { + m_Stat.onValidationSuccess(); + return true; + } + + m_Stat.onValidationFailed(); + return false; + } + + // for split-list + template + void erase_for( Predicate pred ) + { + node_type * pPred = nullptr; + node_type * pHead = m_Head.m_pNext.load( memory_model::memory_order_relaxed ); + + while ( pHead != &m_Tail ) { + node_type * p = pHead->m_pNext.load( memory_model::memory_order_relaxed ); + if ( pred( *node_traits::to_value_ptr( pHead ))) { + assert( pPred != nullptr ); + pPred->m_pNext.store( p, memory_model::memory_order_relaxed ); + dispose_node( pHead, disposer()); + } + else + pPred = pHead; + pHead = p; + } + } + //@endcond + }; + +}} // namespace cds::intrusive + +#endif // #ifndef CDSLIB_INTRUSIVE_LAZY_LIST_NOGC_H diff --git a/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/intrusive/lazy_list_rcu.h b/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/intrusive/lazy_list_rcu.h new file mode 100644 index 0000000..590c763 --- /dev/null +++ b/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/intrusive/lazy_list_rcu.h @@ -0,0 +1,1303 @@ +/* + This file is a part of libcds - Concurrent Data Structures library + + (C) Copyright Maxim Khizhinsky (libcds.dev@gmail.com) 2006-2017 + + Source code repo: http://github.com/khizmax/libcds/ + Download: http://sourceforge.net/projects/libcds/files/ + + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. + + * Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ + +#ifndef CDSLIB_INTRUSIVE_LAZY_LIST_RCU_H +#define CDSLIB_INTRUSIVE_LAZY_LIST_RCU_H + +#include // unique_lock +#include +#include +#include +#include + +namespace cds { namespace intrusive { + namespace lazy_list { + /// Lazy list node for \ref cds_urcu_desc "RCU" + /** + Template parameters: + - Tag - a tag used to distinguish between different implementation + */ + template + struct node, Lock, Tag> + { + typedef cds::urcu::gc gc ; ///< RCU schema + typedef Lock lock_type ; ///< Lock type + typedef Tag tag ; ///< tag + + typedef cds::details::marked_ptr marked_ptr ; ///< marked pointer + typedef atomics::atomic atomic_marked_ptr ; ///< atomic marked pointer specific for GC + + atomic_marked_ptr m_pNext ; ///< pointer to the next node in the list + mutable lock_type m_Lock ; ///< Node lock + + /// Checks if node is marked + bool is_marked() const + { + return m_pNext.load(atomics::memory_order_relaxed).bits() != 0; + } + + /// Default ctor + node() + : m_pNext( nullptr ) + {} + + /// Clears internal fields + void clear() + { + m_pNext.store( marked_ptr(), atomics::memory_order_release ); + } + }; + } // namespace lazy_list + + + /// Lazy ordered single-linked list (template specialization for \ref cds_urcu_desc "RCU") + /** @ingroup cds_intrusive_list + \anchor cds_intrusive_LazyList_rcu + + Usually, ordered single-linked list is used as a building block for the hash table implementation. + The complexity of searching is O(N). + + Template arguments: + - \p RCU - one of \ref cds_urcu_gc "RCU type" + - \p T - type to be stored in the list + - \p Traits - type traits. See \p lazy_list::traits for explanation. + It is possible to declare option-based list with \p %cds::intrusive::lazy_list::make_traits metafunction instead of \p Traits template + argument. + + \par Usage + Before including you should include appropriate RCU header file, + see \ref cds_urcu_gc "RCU type" for list of existing RCU class and corresponding header files. + For example, for \ref cds_urcu_general_buffered_gc "general-purpose buffered RCU" you should include: + \code + #include + #include + + // Now, you can declare lazy list for type Foo and default traits: + typedef cds::intrusive::LazyList >, Foo > rcu_lazy_list; + \endcode + + */ + template < + typename RCU + ,typename T +#ifdef CDS_DOXYGEN_INVOKED + ,class Traits = lazy_list::traits +#else + ,class Traits +#endif + > + class LazyList, T, Traits> + { + public: + typedef cds::urcu::gc gc; ///< RCU schema + typedef T value_type; ///< type of value stored in the list + typedef Traits traits; ///< Traits template parameter + + typedef typename traits::hook hook; ///< hook type + typedef typename hook::node_type node_type; ///< node type + +# ifdef CDS_DOXYGEN_INVOKED + typedef implementation_defined key_comparator ; ///< key compare functor based on opt::compare and opt::less option setter. +# else + typedef typename opt::details::make_comparator< value_type, traits >::type key_comparator; +# endif + + typedef typename traits::disposer disposer; ///< disposer used + typedef typename get_node_traits< value_type, node_type, hook>::type node_traits; ///< node traits + typedef typename lazy_list::get_link_checker< node_type, traits::link_checker >::type link_checker; ///< link checker + + typedef typename traits::back_off back_off; ///< back-off strategy (not used) + typedef typename traits::item_counter item_counter; ///< Item counting policy used + typedef typename traits::memory_model memory_model; ///< C++ memory ordering (see \p lazy_list::traits::memory_model) + typedef typename traits::stat stat; ///< Internal statistics + typedef typename traits::rcu_check_deadlock rcu_check_deadlock; ///< Deadlock checking policy + + typedef typename gc::scoped_lock rcu_lock ; ///< RCU scoped lock + static constexpr const bool c_bExtractLockExternal = true; ///< Group of \p extract_xxx functions require external locking + + static_assert((std::is_same< gc, typename node_type::gc >::value), "GC and node_type::gc must be the same type"); + + //@cond + // Rebind traits (split-list support) + template + struct rebind_traits { + typedef LazyList< + gc + , value_type + , typename cds::opt::make_options< traits, Options...>::type + > type; + }; + + // Stat selector + template + using select_stat_wrapper = lazy_list::select_stat_wrapper< Stat >; + //@endcond + + protected: + node_type m_Head; ///< List head (dummy node) + node_type m_Tail; ///< List tail (dummy node) + item_counter m_ItemCounter; ///< Item counter + mutable stat m_Stat; ///< Internal statistics + + //@cond + typedef typename node_type::marked_ptr marked_node_ptr; ///< Node marked pointer + typedef node_type * auxiliary_head; ///< Auxiliary head type (for split-list support) + + /// Position pointer for item search + struct position { + node_type * pPred; ///< Previous node + node_type * pCur; ///< Current node + + /// Locks nodes \p pPred and \p pCur + void lock() + { + pPred->m_Lock.lock(); + pCur->m_Lock.lock(); + } + + /// Unlocks nodes \p pPred and \p pCur + void unlock() + { + pCur->m_Lock.unlock(); + pPred->m_Lock.unlock(); + } + }; + + typedef std::unique_lock< position > scoped_position_lock; + + typedef cds::urcu::details::check_deadlock_policy< gc, rcu_check_deadlock> deadlock_policy; + + struct clear_and_dispose { + void operator()( value_type * p ) + { + assert( p != nullptr ); + clear_links( node_traits::to_node_ptr(p)); + disposer()( p ); + } + }; + //@endcond + + public: + /// pointer to extracted node + using exempt_ptr = cds::urcu::exempt_ptr< gc, value_type, value_type, clear_and_dispose, void >; + /// Type of \p get() member function return value + typedef value_type * raw_ptr; + + protected: + //@cond + template + class iterator_type + { + friend class LazyList; + + protected: + value_type * m_pNode; + + void next() + { + assert( m_pNode != nullptr ); + + node_type * pNode = node_traits::to_node_ptr( m_pNode ); + node_type * pNext = pNode->m_pNext.load(memory_model::memory_order_acquire).ptr(); + if ( pNext != nullptr ) + m_pNode = node_traits::to_value_ptr( pNext ); + } + + void skip_deleted() + { + if ( m_pNode != nullptr ) { + node_type * pNode = node_traits::to_node_ptr( m_pNode ); + + // Dummy tail node could not be marked + while ( pNode->is_marked()) + pNode = pNode->m_pNext.load(memory_model::memory_order_acquire).ptr(); + + if ( pNode != node_traits::to_node_ptr( m_pNode )) + m_pNode = node_traits::to_value_ptr( pNode ); + } + } + + iterator_type( node_type * pNode ) + { + m_pNode = node_traits::to_value_ptr( pNode ); + skip_deleted(); + } + + public: + typedef typename cds::details::make_const_type::pointer value_ptr; + typedef typename cds::details::make_const_type::reference value_ref; + + iterator_type() + : m_pNode( nullptr ) + {} + + iterator_type( iterator_type const& src ) + : m_pNode( src.m_pNode ) + {} + + value_ptr operator ->() const + { + return m_pNode; + } + + value_ref operator *() const + { + assert( m_pNode != nullptr ); + return *m_pNode; + } + + /// Pre-increment + iterator_type& operator ++() + { + next(); + skip_deleted(); + return *this; + } + + /// Post-increment + iterator_type operator ++(int) + { + iterator_type i(*this); + next(); + skip_deleted(); + return i; + } + + iterator_type& operator = (iterator_type const& src) + { + m_pNode = src.m_pNode; + return *this; + } + + template + bool operator ==(iterator_type const& i ) const + { + return m_pNode == i.m_pNode; + } + template + bool operator !=(iterator_type const& i ) const + { + return m_pNode != i.m_pNode; + } + }; + //@endcond + + public: + ///@name Forward iterators (thread-safe only under RCU lock) + //@{ + /// Forward iterator + /** + You may safely use iterators in multi-threaded environment only under RCU lock. + Otherwise, a crash is possible if another thread deletes the item the iterator points to. + */ + typedef iterator_type iterator; + + /// Const forward iterator + typedef iterator_type const_iterator; + + /// Returns a forward iterator addressing the first element in a list + /** + For empty list \code begin() == end() \endcode + */ + iterator begin() + { + iterator it( &m_Head ); + ++it ; // skip dummy head + return it; + } + + /// Returns an iterator that addresses the location succeeding the last element in a list + /** + Do not use the value returned by end function to access any item. + + The returned value can be used only to control reaching the end of the list. + For empty list \code begin() == end() \endcode + */ + iterator end() + { + return iterator( &m_Tail ); + } + + /// Returns a forward const iterator addressing the first element in a list + const_iterator begin() const + { + return get_const_begin(); + } + + /// Returns a forward const iterator addressing the first element in a list + const_iterator cbegin() const + { + return get_const_begin(); + } + + /// Returns an const iterator that addresses the location succeeding the last element in a list + const_iterator end() const + { + return get_const_end(); + } + + /// Returns an const iterator that addresses the location succeeding the last element in a list + const_iterator cend() const + { + return get_const_end(); + } + //@} + + public: + /// Default constructor initializes empty list + LazyList() + { + m_Head.m_pNext.store( marked_node_ptr( &m_Tail ), memory_model::memory_order_relaxed ); + } + + //@cond + template >::value >> + explicit LazyList( Stat& st ) + : m_Stat( st ) + { + m_Head.m_pNext.store( marked_node_ptr( &m_Tail ), memory_model::memory_order_relaxed ); + } + //@endcond + + /// Destroys the list object + ~LazyList() + { + clear(); + + assert( m_Head.m_pNext.load(memory_model::memory_order_relaxed).ptr() == &m_Tail ); + m_Head.m_pNext.store( marked_node_ptr(), memory_model::memory_order_relaxed ); + } + + /// Inserts new node + /** + The function inserts \p val in the list if the list does not contain + an item with key equal to \p val. + + Returns \p true if \p val is linked into the list, \p false otherwise. + */ + bool insert( value_type& val ) + { + return insert_at( &m_Head, val ); + } + + /// Inserts new node + /** + This function is intended for derived non-intrusive containers. + + The function allows to split new item creating into two part: + - create item with key only + - insert new item into the list + - if inserting is success, calls \p f functor to initialize value-field of \p val. + + The functor signature is: + \code + void func( value_type& val ); + \endcode + where \p val is the item inserted. + While the functor \p f is working the item \p val is locked. + The user-defined functor is called only if the inserting is success. + */ + template + bool insert( value_type& val, Func f ) + { + return insert_at( &m_Head, val, f ); + } + + /// Updates the item + /** + The operation performs inserting or changing data with lock-free manner. + + If the item \p val not found in the list, then \p val is inserted into the list + iff \p bAllowInsert is \p true. + Otherwise, the functor \p func is called with item found. + The functor signature is: + \code + struct functor { + void operator()( bool bNew, value_type& item, value_type& val ); + }; + \endcode + with arguments: + - \p bNew - \p true if the item has been inserted, \p false otherwise + - \p item - item of the list + - \p val - argument \p val passed into the \p update() function + If new item has been inserted (i.e. \p bNew is \p true) then \p item and \p val arguments + refer to the same thing. + + The functor may change non-key fields of the \p item. + While the functor \p f is calling the item \p item is locked. + + Returns std::pair where \p first is \p true if operation is successful, + \p second is \p true if new item has been added or \p false if the item with \p key + already is in the list. + + The function makes RCU lock internally. + */ + template + std::pair update( value_type& val, Func func, bool bAllowInsert = true ) + { + return update_at( &m_Head, val, func, bAllowInsert ); + } + //@cond + template + CDS_DEPRECATED("ensure() is deprecated, use update()") + std::pair ensure( value_type& val, Func func ) + { + return update( val, func, true ); + } + //@endcond + + /// Unlinks the item \p val from the list + /** + The function searches the item \p val in the list and unlink it from the list + if it is found and it is equal to \p val. + + Difference between \p erase() and \p %unlink() functions: \p %erase() finds a key + and deletes the item found. \p %unlink() finds an item by key and deletes it + only if \p val is an item of that list, i.e. the pointer to item found + is equal to &val . + + The function returns \p true if success and \p false otherwise. + + RCU \p synchronize method can be called. The RCU should not be locked. + Note that depending on RCU type used the \ref disposer call can be deferred. + + \p disposer specified in \p Traits is called for unlinked item. + + The function can throw \p cds::urcu::rcu_deadlock exception if deadlock is encountered and + deadlock checking policy is \p opt::v::rcu_throw_deadlock. + */ + bool unlink( value_type& val ) + { + return unlink_at( &m_Head, val ); + } + + /// Deletes the item from the list + /** + The function searches an item with key equal to \p key in the list, + unlinks it from the list, and returns \p true. + If the item with the key equal to \p key is not found the function return \p false. + + RCU \p synchronize method can be called. The RCU should not be locked. + Note that depending on RCU type used the \ref disposer call can be deferred. + + \p disposer specified in \p Traits is called for deleted item. + + The function can throw \ref cds_urcu_rcu_deadlock "cds::urcu::rcu_deadlock" exception if deadlock is encountered and + deadlock checking policy is \p opt::v::rcu_throw_deadlock. + */ + template + bool erase( Q const& key ) + { + return erase_at( &m_Head, key, key_comparator()); + } + + /// Deletes the item from the list using \p pred predicate for searching + /** + The function is an analog of \p erase(Q const&) + but \p pred is used for key comparing. + \p Less functor has the interface like \p std::less. + \p pred must imply the same element order as the comparator used for building the list. + + \p disposer specified in \p Traits is called for deleted item. + */ + template + bool erase_with( Q const& key, Less pred ) + { + CDS_UNUSED( pred ); + return erase_at( &m_Head, key, cds::opt::details::make_comparator_from_less()); + } + + /// Deletes the item from the list + /** + The function searches an item with key equal to \p key in the list, + call \p func functor with item found, unlinks it from the list, and returns \p true. + The \p Func interface is + \code + struct functor { + void operator()( value_type const& item ); + }; + \endcode + + If the item with the key equal to \p key is not found the function return \p false. + + RCU \p synchronize method can be called. The RCU should not be locked. + Note that depending on RCU type used the \ref disposer call can be deferred. + + \p disposer specified in \p Traits is called for deleted item. + + The function can throw \ref cds_urcu_rcu_deadlock "cds::urcu::rcu_deadlock" exception if deadlock is encountered and + deadlock checking policy is \p opt::v::rcu_throw_deadlock. + */ + template + bool erase( Q const& key, Func func ) + { + return erase_at( &m_Head, key, key_comparator(), func ); + } + + /// Deletes the item from the list using \p pred predicate for searching + /** + The function is an analog of \p erase(Q const&, Func) + but \p pred is used for key comparing. + \p Less functor has the interface like \p std::less. + \p pred must imply the same element order as the comparator used for building the list. + + \p disposer specified in \p Traits is called for deleted item. + */ + template + bool erase_with( Q const& key, Less pred, Func func ) + { + CDS_UNUSED( pred ); + return erase_at( &m_Head, key, cds::opt::details::make_comparator_from_less(), func ); + } + + /// Extracts an item from the list + /** + The function searches an item with key equal to \p key in the list, + unlinks it from the list, and returns \ref cds::urcu::exempt_ptr "exempt_ptr" pointer to an item found. + If the item is not found the function returns empty \p exempt_ptr. + + @note The function does NOT call RCU read-side lock or synchronization, + and does NOT dispose the item found. It just unlinks the item from the list + and returns a pointer to it. + You should manually lock RCU before calling this function, and you should manually release + the returned exempt pointer outside the RCU lock region before reusing returned pointer. + + \code + #include + #include + + typedef cds::urcu::gc< general_buffered<> > rcu; + typedef cds::intrusive::LazyList< rcu, Foo > rcu_lazy_list; + + rcu_lazy_list theList; + // ... + + rcu_lazy_list::exempt_ptr p1; + { + // first, we should lock RCU + rcu::scoped_lock sl; + + // Now, you can apply extract function + // Note that you must not delete the item found inside the RCU lock + p1 = theList.extract( 10 ) + if ( p1 ) { + // do something with p1 + ... + } + } + + // We may safely release p1 here + // release() passes the pointer to RCU reclamation cycle: + // it invokes RCU retire_ptr function with the disposer you provided for the list. + p1.release(); + \endcode + */ + template + exempt_ptr extract( Q const& key ) + { + return exempt_ptr( extract_at( &m_Head, key, key_comparator())); + } + + /// Extracts an item from the list using \p pred predicate for searching + /** + This function is the analog for \p extract(Q const&). + + The \p pred is a predicate used for key comparing. + \p Less has the interface like \p std::less. + \p pred must imply the same element order as \ref key_comparator. + */ + template + exempt_ptr extract_with( Q const& key, Less pred ) + { + CDS_UNUSED( pred ); + return exempt_ptr( extract_at( &m_Head, key, cds::opt::details::make_comparator_from_less())); + } + + /// Finds the key \p key + /** + The function searches the item with key equal to \p key + and calls the functor \p f for item found. + The interface of \p Func functor is: + \code + struct functor { + void operator()( value_type& item, Q& key ); + }; + \endcode + where \p item is the item found, \p key is the find function argument. + + The functor may change non-key fields of \p item. + While the functor \p f is calling the item found \p item is locked. + + The function returns \p true if \p key is found, \p false otherwise. + */ + template + bool find( Q& key, Func f ) const + { + return find_at( const_cast( &m_Head ), key, key_comparator(), f ); + } + //@cond + template + bool find( Q const& key, Func f ) const + { + return find_at( const_cast(&m_Head), key, key_comparator(), f ); + } + //@endcond + + /// Finds the key \p key using \p pred predicate for searching + /** + The function is an analog of \p find( Q&, Func ) but \p pred is used for key comparing. + \p Less functor has the interface like \p std::less. + \p pred must imply the same element order as the comparator used for building the list. + */ + template + bool find_with( Q& key, Less pred, Func f ) const + { + CDS_UNUSED( pred ); + return find_at( const_cast( &m_Head ), key, cds::opt::details::make_comparator_from_less(), f ); + } + //@cond + template + bool find_with( Q const& key, Less pred, Func f ) const + { + CDS_UNUSED( pred ); + return find_at( const_cast(&m_Head), key, cds::opt::details::make_comparator_from_less(), f ); + } + //@endcond + + /// Checks whether the list contains \p key + /** + The function searches the item with key equal to \p key + and returns \p true if it is found, and \p false otherwise. + */ + template + bool contains( Q const& key ) const + { + return find_at( const_cast( &m_Head ), key, key_comparator()); + } + //@cond + template + CDS_DEPRECATED("deprecated, use contains()") + bool find( Q const& key ) const + { + return contains( key ); + } + //@endcond + + /// Checks whether the map contains \p key using \p pred predicate for searching + /** + The function is an analog of \p contains( Q const& ) but \p pred is used for key comparing. + \p Less functor has the interface like \p std::less. + \p Less must imply the same element order as the comparator used for building the list. + */ + template + bool contains( Q const& key, Less pred ) const + { + CDS_UNUSED( pred ); + return find_at( const_cast( &m_Head ), key, cds::opt::details::make_comparator_from_less()); + } + //@cond + template + CDS_DEPRECATED("deprecated, use contains()") + bool find_with( Q const& key, Less pred ) const + { + return contains( key, pred ); + } + //@endcond + + /// Finds the key \p key and return the item found + /** \anchor cds_intrusive_LazyList_rcu_get + The function searches the item with key equal to \p key and returns the pointer to item found. + If \p key is not found it returns \p nullptr. + + Note the compare functor should accept a parameter of type \p Q that can be not the same as \p value_type. + + RCU should be locked before call of this function. + Returned item is valid only while RCU is locked: + \code + typedef cds::intrusive::LazyList< cds::urcu::gc< cds::urcu::general_buffered<> >, foo, my_traits > ord_list; + ord_list theList; + // ... + { + // Lock RCU + typename ord_list::rcu_lock lock; + + foo * pVal = theList.get( 5 ); + if ( pVal ) { + // Deal with pVal + //... + } + // Unlock RCU by rcu_lock destructor + // pVal can be retired by disposer at any time after RCU has been unlocked + } + \endcode + */ + template + value_type * get( Q const& key ) const + { + return get_at( const_cast( &m_Head ), key, key_comparator()); + } + + /// Finds the key \p key and return the item found + /** + The function is an analog of \ref cds_intrusive_LazyList_rcu_get "get(Q const&)" + but \p pred is used for comparing the keys. + + \p Less functor has the semantics like \p std::less but should take arguments of type \ref value_type and \p Q + in any order. + \p pred must imply the same element order as the comparator used for building the list. + */ + template + value_type * get_with( Q const& key, Less pred ) const + { + CDS_UNUSED( pred ); + return get_at( const_cast( &m_Head ), key, cds::opt::details::make_comparator_from_less()); + } + + /// Clears the list using default disposer + /** + The function clears the list using default (provided in class template) disposer functor. + + RCU \p synchronize method can be called. + Note that depending on RCU type used the \ref disposer call can be deferred. + + The function can throw \p cds::urcu::rcu_deadlock exception if deadlock is encountered and + deadlock checking policy is \p opt::v::rcu_throw_deadlock. + */ + void clear() + { + if( !empty()) { + deadlock_policy::check(); + + node_type * pHead; + for (;;) { + { + rcu_lock l; + pHead = m_Head.m_pNext.load(memory_model::memory_order_acquire).ptr(); + if ( pHead == &m_Tail ) + break; + + m_Head.m_Lock.lock(); + pHead->m_Lock.lock(); + + if ( m_Head.m_pNext.load(memory_model::memory_order_relaxed).all() == pHead ) + unlink_node( &m_Head, pHead, &m_Head ); + + pHead->m_Lock.unlock(); + m_Head.m_Lock.unlock(); + } + + --m_ItemCounter; + dispose_node( pHead ); + } + } + } + + /// Checks if the list is empty + bool empty() const + { + return m_Head.m_pNext.load(memory_model::memory_order_relaxed).ptr() == &m_Tail; + } + + /// Returns list's item count + /** + The value returned depends on opt::item_counter option. For atomicity::empty_item_counter, + this function always returns 0. + + Warning: even if you use real item counter and it returns 0, this fact is not mean that the list + is empty. To check list emptiness use \ref empty() method. + */ + size_t size() const + { + return m_ItemCounter.value(); + } + + /// Returns const reference to internal statistics + stat const& statistics() const + { + return m_Stat; + } + + protected: + //@cond + static void clear_links( node_type * pNode ) + { + pNode->m_pNext.store( marked_node_ptr(), memory_model::memory_order_relaxed ); + } + + static void dispose_node( node_type * pNode ) + { + assert( pNode ); + assert( !gc::is_locked()); + + gc::template retire_ptr( node_traits::to_value_ptr( *pNode )); + } + + static void link_node( node_type * pNode, node_type * pPred, node_type * pCur ) + { + assert( pPred->m_pNext.load( memory_model::memory_order_relaxed ).ptr() == pCur ); + link_checker::is_empty( pNode ); + + pNode->m_pNext.store( marked_node_ptr( pCur ), memory_model::memory_order_relaxed ); + pPred->m_pNext.store( marked_node_ptr( pNode ), memory_model::memory_order_release ); + } + + void unlink_node( node_type * pPred, node_type * pCur, node_type * pHead ) + { + assert( pPred->m_pNext.load( memory_model::memory_order_relaxed ).ptr() == pCur ); + assert( pCur != &m_Tail ); + + node_type * pNext = pCur->m_pNext.load( memory_model::memory_order_relaxed ).ptr(); + pCur->m_pNext.store( marked_node_ptr( pHead, 1 ), memory_model::memory_order_relaxed ); // logical deletion + back-link for search + pPred->m_pNext.store( marked_node_ptr( pNext ), memory_model::memory_order_release ); // physically deleting + } + + // split-list support + bool insert_aux_node( node_type * pNode ) + { + return insert_aux_node( &m_Head, pNode ); + } + + // split-list support + bool insert_aux_node( node_type * pHead, node_type * pNode ) + { + assert( pHead != nullptr ); + assert( pNode != nullptr ); + + // Hack: convert node_type to value_type. + // Actually, an auxiliary node should not be converted to value_type + // We assume that comparator can correctly distinguish aux and regular node. + return insert_at( pHead, *node_traits::to_value_ptr( pNode )); + } + + bool insert_at( node_type * pHead, value_type& val ) + { + rcu_lock l; + return insert_at_locked( pHead, val ); + } + + template + bool insert_at( node_type * pHead, value_type& val, Func f ) + { + position pos; + key_comparator cmp; + + rcu_lock l; + while ( true ) { + search( pHead, val, pos ); + { + scoped_position_lock sl( pos ); + if ( validate( pos.pPred, pos.pCur )) { + if ( pos.pCur != &m_Tail && cmp( *node_traits::to_value_ptr( *pos.pCur ), val ) == 0 ) { + // failed: key already in list + m_Stat.onInsertFailed(); + return false; + } + + f( val ); + link_node( node_traits::to_node_ptr( val ), pos.pPred, pos.pCur ); + break; + } + } + + m_Stat.onInsertRetry(); + } + + ++m_ItemCounter; + m_Stat.onInsertSuccess(); + return true; + } + + iterator insert_at_( node_type * pHead, value_type& val ) + { + rcu_lock l; + if ( insert_at_locked( pHead, val )) + return iterator( node_traits::to_node_ptr( val )); + return end(); + } + + + template + std::pair update_at_( node_type * pHead, value_type& val, Func func, bool bAllowInsert ) + { + rcu_lock l; + return update_at_locked( pHead, val, func, bAllowInsert ); + } + + template + std::pair update_at( node_type * pHead, value_type& val, Func func, bool bAllowInsert ) + { + rcu_lock l; + std::pair ret = update_at_locked( pHead, val, func, bAllowInsert ); + return std::make_pair( ret.first != end(), ret.second ); + } + + bool unlink_at( node_type * pHead, value_type& val ) + { + position pos; + key_comparator cmp; + deadlock_policy::check(); + + while ( true ) { + int nResult = 0; + { + rcu_lock l; + search( pHead, val, pos ); + { + scoped_position_lock alp( pos ); + if ( validate( pos.pPred, pos.pCur )) { + if ( pos.pCur != &m_Tail + && cmp( *node_traits::to_value_ptr( *pos.pCur ), val ) == 0 + && node_traits::to_value_ptr( pos.pCur ) == &val ) + { + // item found + unlink_node( pos.pPred, pos.pCur, pHead ); + nResult = 1; + } + else + nResult = -1; + } + } + } + + if ( nResult ) { + if ( nResult > 0 ) { + --m_ItemCounter; + dispose_node( pos.pCur ); + m_Stat.onEraseSuccess(); + return true; + } + + m_Stat.onEraseFailed(); + return false; + } + + m_Stat.onEraseRetry(); + } + } + + template + bool erase_at( node_type * const pHead, Q const& val, Compare cmp, Func f, position& pos ) + { + deadlock_policy::check(); + + while ( true ) { + int nResult = 0; + { + rcu_lock l; + search( pHead, val, pos, cmp ); + { + scoped_position_lock alp( pos ); + if ( validate( pos.pPred, pos.pCur )) { + if ( pos.pCur != &m_Tail && cmp( *node_traits::to_value_ptr( *pos.pCur ), val ) == 0 ) { + // key found + unlink_node( pos.pPred, pos.pCur, pHead ); + f( *node_traits::to_value_ptr( *pos.pCur )); + nResult = 1; + } + else + nResult = -1; + } + } + } + + if ( nResult ) { + if ( nResult > 0 ) { + --m_ItemCounter; + dispose_node( pos.pCur ); + m_Stat.onEraseSuccess(); + return true; + } + + m_Stat.onEraseFailed(); + return false; + } + + m_Stat.onEraseRetry(); + } + } + + template + bool erase_at( node_type * pHead, Q const& val, Compare cmp, Func f ) + { + position pos; + return erase_at( pHead, val, cmp, f, pos ); + } + + template + bool erase_at( node_type * pHead, Q const& val, Compare cmp ) + { + position pos; + return erase_at( pHead, val, cmp, [](value_type const&){}, pos ); + } + + template + value_type * extract_at( node_type * const pHead, Q const& val, Compare cmp ) + { + position pos; + assert( gc::is_locked()) ; // RCU must be locked + + while ( true ) { + search( pHead, val, pos, cmp ); + int nResult = 0; + { + scoped_position_lock alp( pos ); + if ( validate( pos.pPred, pos.pCur )) { + if ( pos.pCur != &m_Tail && cmp( *node_traits::to_value_ptr( *pos.pCur ), val ) == 0 ) { + // key found + unlink_node( pos.pPred, pos.pCur, pHead ); + nResult = 1; + } + else { + nResult = -1; + } + } + } + + if ( nResult ) { + if ( nResult > 0 ) { + --m_ItemCounter; + m_Stat.onEraseSuccess(); + return node_traits::to_value_ptr( pos.pCur ); + } + + m_Stat.onEraseFailed(); + return nullptr; + } + + m_Stat.onEraseRetry(); + } + } + + template + bool find_at( node_type * pHead, Q& val, Compare cmp, Func f ) const + { + position pos; + + rcu_lock l; + search( pHead, val, pos, cmp ); + if ( pos.pCur != &m_Tail ) { + std::unique_lock< typename node_type::lock_type> al( pos.pCur->m_Lock ); + if ( cmp( *node_traits::to_value_ptr( *pos.pCur ), val ) == 0 ) { + f( *node_traits::to_value_ptr( *pos.pCur ), val ); + m_Stat.onFindSuccess(); + return true; + } + } + + m_Stat.onFindFailed(); + return false; + } + + template + bool find_at( node_type * pHead, Q& val, Compare cmp ) const + { + rcu_lock l; + return find_at_( pHead, val, cmp ) != end(); + } + + template + const_iterator find_at_( node_type * pHead, Q& val, Compare cmp ) const + { + assert( gc::is_locked()); + + position pos; + + search( pHead, val, pos, cmp ); + if ( pos.pCur != &m_Tail ) { + if ( cmp( *node_traits::to_value_ptr( *pos.pCur ), val ) == 0 ) { + m_Stat.onFindSuccess(); + return const_iterator( pos.pCur ); + } + } + + m_Stat.onFindFailed(); + return end(); + } + + template + value_type * get_at( node_type * pHead, Q const& val, Compare cmp ) const + { + value_type * pFound = nullptr; + return find_at( pHead, val, cmp, [&pFound](value_type& found, Q const& ) { pFound = &found; } ) + ? pFound : nullptr; + } + + //@endcond + + protected: + //@cond + template + void search( node_type * const pHead, Q const& key, position& pos ) const + { + search( pHead, key, pos, key_comparator()); + } + + template + void search( node_type * const pHead, Q const& key, position& pos, Compare cmp ) const + { + // RCU should be locked + assert( gc::is_locked()); + + node_type const* pTail = &m_Tail; + + marked_node_ptr pCur(pHead); + marked_node_ptr pPrev(pHead); + + while ( pCur != pTail && ( pCur == pHead || cmp( *node_traits::to_value_ptr( *pCur.ptr()), key ) < 0 )) { + pPrev = pCur; + pCur = pCur->m_pNext.load(memory_model::memory_order_acquire); + if ( pCur.bits()) + pPrev = pCur = pHead; + } + + pos.pCur = pCur.ptr(); + pos.pPred = pPrev.ptr(); + } + + bool validate( node_type * pPred, node_type * pCur ) noexcept + { + if ( validate_link( pPred, pCur )) { + m_Stat.onValidationSuccess(); + return true; + } + + m_Stat.onValidationFailed(); + return false; + } + + static bool validate_link( node_type * pPred, node_type * pCur ) noexcept + { + // RCU lock should be locked + assert( gc::is_locked()); + + return !pPred->is_marked() + && !pCur->is_marked() + && pPred->m_pNext.load(memory_model::memory_order_relaxed) == pCur; + } + + //@endcond + + private: + //@cond + bool insert_at_locked( node_type * pHead, value_type& val ) + { + // RCU lock should be locked + assert( gc::is_locked()); + + position pos; + key_comparator cmp; + + while ( true ) { + search( pHead, val, pos ); + { + scoped_position_lock alp( pos ); + if ( validate( pos.pPred, pos.pCur )) { + if ( pos.pCur != &m_Tail && cmp( *node_traits::to_value_ptr( *pos.pCur ), val ) == 0 ) { + // failed: key already in list + m_Stat.onInsertFailed(); + return false; + } + + link_node( node_traits::to_node_ptr( val ), pos.pPred, pos.pCur ); + break; + } + } + + m_Stat.onInsertRetry(); + } + + ++m_ItemCounter; + m_Stat.onInsertSuccess(); + return true; + + } + + template + std::pair update_at_locked( node_type * pHead, value_type& val, Func func, bool bAllowInsert ) + { + // RCU lock should be locked + assert( gc::is_locked()); + + position pos; + key_comparator cmp; + + while ( true ) { + search( pHead, val, pos ); + { + scoped_position_lock alp( pos ); + if ( validate( pos.pPred, pos.pCur )) { + if ( pos.pCur != &m_Tail && cmp( *node_traits::to_value_ptr( *pos.pCur ), val ) == 0 ) { + // key already in the list + + func( false, *node_traits::to_value_ptr( *pos.pCur ), val ); + m_Stat.onUpdateExisting(); + return std::make_pair( iterator( pos.pCur ), false ); + } + else { + // new key + if ( !bAllowInsert ) { + m_Stat.onUpdateFailed(); + return std::make_pair( end(), false ); + } + + func( true, val, val ); + link_node( node_traits::to_node_ptr( val ), pos.pPred, pos.pCur ); + break; + } + } + } + + m_Stat.onUpdateRetry(); + } + + ++m_ItemCounter; + m_Stat.onUpdateNew(); + return std::make_pair( iterator( node_traits::to_node_ptr( val )), true ); + } + //@endcond + + private: + //@cond + const_iterator get_const_begin() const + { + const_iterator it( const_cast(&m_Head)); + ++it; // skip dummy head + return it; + } + const_iterator get_const_end() const + { + return const_iterator( const_cast(&m_Tail)); + } + //@endcond + }; + +}} // namespace cds::intrusive + +#endif // #ifndef CDSLIB_INTRUSIVE_LAZY_LIST_RCU_H diff --git a/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/intrusive/michael_list_dhp.h b/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/intrusive/michael_list_dhp.h new file mode 100644 index 0000000..4ae6e59 --- /dev/null +++ b/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/intrusive/michael_list_dhp.h @@ -0,0 +1,37 @@ +/* + This file is a part of libcds - Concurrent Data Structures library + + (C) Copyright Maxim Khizhinsky (libcds.dev@gmail.com) 2006-2017 + + Source code repo: http://github.com/khizmax/libcds/ + Download: http://sourceforge.net/projects/libcds/files/ + + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. + + * Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ + +#ifndef CDSLIB_INTRUSIVE_MICHAEL_LIST_DHP_H +#define CDSLIB_INTRUSIVE_MICHAEL_LIST_DHP_H + +#include +#include + +#endif // #ifndef CDSLIB_INTRUSIVE_MICHAEL_LIST_DHP_H diff --git a/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/intrusive/michael_list_hp.h b/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/intrusive/michael_list_hp.h new file mode 100644 index 0000000..d95ab49 --- /dev/null +++ b/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/intrusive/michael_list_hp.h @@ -0,0 +1,37 @@ +/* + This file is a part of libcds - Concurrent Data Structures library + + (C) Copyright Maxim Khizhinsky (libcds.dev@gmail.com) 2006-2017 + + Source code repo: http://github.com/khizmax/libcds/ + Download: http://sourceforge.net/projects/libcds/files/ + + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. + + * Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ + +#ifndef CDSLIB_INTRUSIVE_MICHAEL_LIST_HP_H +#define CDSLIB_INTRUSIVE_MICHAEL_LIST_HP_H + +#include +#include + +#endif // #ifndef CDSLIB_INTRUSIVE_MICHAEL_LIST_HP_H diff --git a/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/intrusive/michael_list_nogc.h b/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/intrusive/michael_list_nogc.h new file mode 100644 index 0000000..6b4fd53 --- /dev/null +++ b/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/intrusive/michael_list_nogc.h @@ -0,0 +1,737 @@ +/* + This file is a part of libcds - Concurrent Data Structures library + + (C) Copyright Maxim Khizhinsky (libcds.dev@gmail.com) 2006-2017 + + Source code repo: http://github.com/khizmax/libcds/ + Download: http://sourceforge.net/projects/libcds/files/ + + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. + + * Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ + +#ifndef CDSLIB_INTRUSIVE_MICHAEL_LIST_NOGC_H +#define CDSLIB_INTRUSIVE_MICHAEL_LIST_NOGC_H + +#include +#include +#include + +namespace cds { namespace intrusive { + + namespace michael_list { + /// Michael list node + /** + Template parameters: + - Tag - a tag used to distinguish between different implementation + */ + template + struct node + { + typedef gc::nogc gc ; ///< Garbage collector + typedef Tag tag ; ///< tag + + typedef atomics::atomic< node * > atomic_ptr ; ///< atomic marked pointer + + atomic_ptr m_pNext ; ///< pointer to the next node in the container + + node() + : m_pNext( nullptr ) + {} + }; + } // namespace michael_list + + /// Michael's lock-free ordered single-linked list (template specialization for gc::nogc) + /** @ingroup cds_intrusive_list + \anchor cds_intrusive_MichaelList_nogc + + This specialization is intended for so-called append-only usage when no item + reclamation may be performed. The class does not support item removal. + + See \ref cds_intrusive_MichaelList_hp "MichaelList" for description of template parameters. + */ + template < typename T, +#ifdef CDS_DOXYGEN_INVOKED + class Traits = michael_list::traits +#else + class Traits +#endif + > + class MichaelList + { + public: + typedef gc::nogc gc; ///< Garbage collector + typedef T value_type; ///< type of value to be stored in the queue + typedef Traits traits; ///< List traits + + typedef typename traits::hook hook; ///< hook type + typedef typename hook::node_type node_type; ///< node type + +# ifdef CDS_DOXYGEN_INVOKED + typedef implementation_defined key_comparator ; ///< key comparison functor based on opt::compare and opt::less option setter. +# else + typedef typename opt::details::make_comparator< value_type, traits >::type key_comparator; +# endif + + typedef typename traits::disposer disposer; ///< disposer used + typedef typename get_node_traits< value_type, node_type, hook>::type node_traits ; ///< node traits + typedef typename michael_list::get_link_checker< node_type, traits::link_checker >::type link_checker; ///< link checker + + typedef typename traits::back_off back_off; ///< back-off strategy + typedef typename traits::item_counter item_counter; ///< Item counting policy used + typedef typename traits::memory_model memory_model; ///< Memory ordering. See cds::opt::memory_model option + typedef typename traits::stat stat; ///< Internal statistics + + //@cond + static_assert((std::is_same< gc, typename node_type::gc >::value), "GC and node_type::gc must be the same type"); + + // Rebind traits (split-list support) + template + struct rebind_traits { + typedef MichaelList< + gc + , value_type + , typename cds::opt::make_options< traits, Options...>::type + > type; + }; + + // Stat selector + template + using select_stat_wrapper = michael_list::select_stat_wrapper< Stat >; + //@endcond + + protected: + typedef typename node_type::atomic_ptr atomic_node_ptr ; ///< Atomic node pointer + typedef atomic_node_ptr auxiliary_head ; ///< Auxiliary head type (for split-list support) + + atomic_node_ptr m_pHead; ///< Head pointer + item_counter m_ItemCounter; ///< Item counter + stat m_Stat; ///< Internal statistics + + //@cond + /// Position pointer for item search + struct position { + atomic_node_ptr * pPrev ; ///< Previous node + node_type * pCur ; ///< Current node + node_type * pNext ; ///< Next node + }; + //@endcond + + protected: + //@cond + static void clear_links( node_type * pNode ) + { + pNode->m_pNext.store( nullptr, memory_model::memory_order_release ); + } + + template + static void dispose_node( node_type * pNode, Disposer disp ) + { + clear_links( pNode ); + disp( node_traits::to_value_ptr( *pNode )); + } + + template + static void dispose_value( value_type& val, Disposer disp ) + { + dispose_node( node_traits::to_node_ptr( val ), disp ); + } + + static bool link_node( node_type * pNode, position& pos ) + { + assert( pNode != nullptr ); + link_checker::is_empty( pNode ); + + pNode->m_pNext.store( pos.pCur, memory_model::memory_order_relaxed ); + if ( cds_likely( pos.pPrev->compare_exchange_strong( pos.pCur, pNode, memory_model::memory_order_release, atomics::memory_order_relaxed ))) + return true; + + pNode->m_pNext.store( nullptr, memory_model::memory_order_relaxed ); + return false; + } + //@endcond + + protected: + //@cond + template + class iterator_type + { + friend class MichaelList; + value_type * m_pNode; + + void next() + { + if ( m_pNode ) { + node_type * pNode = node_traits::to_node_ptr( *m_pNode )->m_pNext.load(memory_model::memory_order_acquire); + if ( pNode ) + m_pNode = node_traits::to_value_ptr( *pNode ); + else + m_pNode = nullptr; + } + } + + protected: + explicit iterator_type( node_type * pNode) + { + if ( pNode ) + m_pNode = node_traits::to_value_ptr( *pNode ); + else + m_pNode = nullptr; + } + explicit iterator_type( atomic_node_ptr const& refNode) + { + node_type * pNode = refNode.load(memory_model::memory_order_relaxed); + if ( pNode ) + m_pNode = node_traits::to_value_ptr( *pNode ); + else + m_pNode = nullptr; + } + + public: + typedef typename cds::details::make_const_type::pointer value_ptr; + typedef typename cds::details::make_const_type::reference value_ref; + + iterator_type() + : m_pNode( nullptr ) + {} + + iterator_type( const iterator_type& src ) + : m_pNode( src.m_pNode ) + {} + + value_ptr operator ->() const + { + return m_pNode; + } + + value_ref operator *() const + { + assert( m_pNode != nullptr ); + return *m_pNode; + } + + /// Pre-increment + iterator_type& operator ++() + { + next(); + return *this; + } + + /// Post-increment + iterator_type operator ++(int) + { + iterator_type i(*this); + next(); + return i; + } + + iterator_type& operator = (const iterator_type& src) + { + m_pNode = src.m_pNode; + return *this; + } + + template + bool operator ==(iterator_type const& i ) const + { + return m_pNode == i.m_pNode; + } + template + bool operator !=(iterator_type const& i ) const + { + return m_pNode != i.m_pNode; + } + }; + //@endcond + + public: + /// Forward iterator + typedef iterator_type iterator; + /// Const forward iterator + typedef iterator_type const_iterator; + + /// Returns a forward iterator addressing the first element in a list + /** + For empty list \code begin() == end() \endcode + */ + iterator begin() + { + return iterator(m_pHead.load(memory_model::memory_order_relaxed)); + } + + /// Returns an iterator that addresses the location succeeding the last element in a list + /** + Do not use the value returned by end function to access any item. + Internally, end returning value equals to \p nullptr. + + The returned value can be used only to control reaching the end of the list. + For empty list \code begin() == end() \endcode + */ + iterator end() + { + return iterator(); + } + + /// Returns a forward const iterator addressing the first element in a list + const_iterator begin() const + { + return const_iterator(m_pHead.load(memory_model::memory_order_relaxed)); + } + /// Returns a forward const iterator addressing the first element in a list + const_iterator cbegin() const + { + return const_iterator(m_pHead.load(memory_model::memory_order_relaxed)); + } + + /// Returns an const iterator that addresses the location succeeding the last element in a list + const_iterator end() const + { + return const_iterator(); + } + /// Returns an const iterator that addresses the location succeeding the last element in a list + const_iterator cend() const + { + return const_iterator(); + } + + public: + /// Default constructor initializes empty list + MichaelList() + : m_pHead( nullptr ) + {} + + //@cond + template >::value >> + explicit MichaelList( Stat& st ) + : m_pHead( nullptr ) + , m_Stat( st ) + {} + //@endcond + + /// Destroys the list objects + ~MichaelList() + { + clear(); + } + + /// Inserts new node + /** + The function inserts \p val in the list if the list does not contain + an item with key equal to \p val. + + Returns \p true if \p val is linked into the list, \p false otherwise. + */ + bool insert( value_type& val ) + { + return insert_at( m_pHead, val ); + } + + /// Updates the item + /** + The operation performs inserting or changing data with lock-free manner. + + If the item \p val not found in the list, then \p val is inserted into the list + iff \p bAllowInsert is \p true. + Otherwise, the functor \p func is called with item found. + The functor signature is: + \code + struct functor { + void operator()( bool bNew, value_type& item, value_type& val ); + }; + \endcode + with arguments: + - \p bNew - \p true if the item has been inserted, \p false otherwise + - \p item - item of the list + - \p val - argument \p val passed into the \p update() function + If new item has been inserted (i.e. \p bNew is \p true) then \p item and \p val arguments + refer to the same thing. + + The functor may change non-key fields of the \p item; however, \p func must guarantee + that during changing no any other modifications could be made on this item by concurrent threads. + + Returns std::pair where \p first is \p true if operation is successful, + \p second is \p true if new item has been added or \p false if the item with \p key + already is in the list. + */ + template + std::pair update( value_type& val, Func func, bool bAllowInsert = true ) + { + return update_at( m_pHead, val, func, bAllowInsert ); + } + //@cond + template + CDS_DEPRECATED("ensure() is deprecated, use update()") + std::pair ensure( value_type& val, Func func ) + { + return update( val, func ); + } + //@endcond + + /// Finds the key \p val + /** \anchor cds_intrusive_MichaelList_nogc_find_func + The function searches the item with key equal to \p key + and calls the functor \p f for item found. + The interface of \p Func functor is: + \code + struct functor { + void operator()( value_type& item, Q& key ); + }; + \endcode + where \p item is the item found, \p key is the find function argument. + + The functor can change non-key fields of \p item. + The function \p find does not serialize simultaneous access to the list \p item. If such access is + possible you must provide your own synchronization schema to exclude unsafe item modifications. + + The function returns \p true if \p val is found, \p false otherwise. + */ + template + bool find( Q& key, Func f ) + { + return find_at( m_pHead, key, key_comparator(), f ); + } + //@cond + template + bool find( Q const& key, Func f ) + { + return find_at( m_pHead, key, key_comparator(), f ); + } + //@endcond + + /// Finds the key \p key using \p pred predicate for searching + /** + The function is an analog of \ref cds_intrusive_MichaelList_nogc_find_func "find(Q&, Func)" + but \p pred is used for key comparing. + \p Less functor has the interface like \p std::less. + \p pred must imply the same element order as the comparator used for building the list. + */ + template + bool find_with( Q& key, Less pred, Func f ) + { + CDS_UNUSED( pred ); + return find_at( m_pHead, key, cds::opt::details::make_comparator_from_less(), f ); + } + //@cond + template + bool find_with( Q const& key, Less pred, Func f ) + { + CDS_UNUSED( pred ); + return find_at( m_pHead, key, cds::opt::details::make_comparator_from_less(), f ); + } + //@endcond + + /// Checks whether the list contains \p key + /** + The function searches the item with key equal to \p key + and returns \p true if it is found, and \p false otherwise. + */ + template + value_type * contains( Q const& key ) + { + return find_at( m_pHead, key, key_comparator()); + } + //@cond + template + CDS_DEPRECATED("deprecated, use contains()") + value_type * find( Q const& key ) + { + return contains( key ); + } + //@endcond + + /// Checks whether the map contains \p key using \p pred predicate for searching + /** + The function is an analog of contains( key ) but \p pred is used for key comparing. + \p Less functor has the interface like \p std::less. + \p Less must imply the same element order as the comparator used for building the list. + */ + template + value_type * contains( Q const& key, Less pred ) + { + CDS_UNUSED( pred ); + return find_at( m_pHead, key, cds::opt::details::make_comparator_from_less()); + } + //@cond + template + CDS_DEPRECATED("deprecated, use contains()") + value_type * find_with( Q const& key, Less pred ) + { + return contains( key, pred ); + } + //@endcond + + /// Clears the list + /** + The function unlink all items from the list. + + For each unlinked item the item disposer \p disp is called after unlinking. + */ + template + void clear( Disposer disp ) + { + node_type * pHead = m_pHead.load(memory_model::memory_order_relaxed); + do {} while ( cds_unlikely( !m_pHead.compare_exchange_weak( pHead, nullptr, memory_model::memory_order_relaxed ))); + + while ( pHead ) { + node_type * p = pHead->m_pNext.load(memory_model::memory_order_relaxed); + dispose_node( pHead, disp ); + pHead = p; + --m_ItemCounter; + } + } + + /// Clears the list using default disposer + /** + The function clears the list using default (provided in class template) disposer functor. + */ + void clear() + { + clear( disposer()); + } + + /// Checks if the list is empty + bool empty() const + { + return m_pHead.load( memory_model::memory_order_relaxed ) == nullptr; + } + + /// Returns list's item count + /** + The value returned depends on item counter provided by \p Traits. For \p atomicity::empty_item_counter, + this function always returns 0. + + @note Even if you use real item counter and it returns 0, this fact does not mean that the list + is empty. To check list emptyness use \p empty() method. + */ + size_t size() const + { + return m_ItemCounter.value(); + } + + /// Returns const reference to internal statistics + stat const& statistics() const + { + return m_Stat; + } + + protected: + //@cond + // split-list support + bool insert_aux_node( node_type * pNode ) + { + return insert_aux_node( m_pHead, pNode ); + } + + // split-list support + bool insert_aux_node( atomic_node_ptr& refHead, node_type * pNode ) + { + assert( pNode != nullptr ); + + // Hack: convert node_type to value_type. + // In principle, auxiliary node can be non-reducible to value_type + // We assume that comparator can correctly distinguish aux and regular node. + return insert_at( refHead, *node_traits::to_value_ptr( pNode )); + } + + bool insert_at( atomic_node_ptr& refHead, value_type& val ) + { + position pos; + + while ( true ) { + if ( search( refHead, val, key_comparator(), pos )) { + m_Stat.onInsertFailed(); + return false; + } + + if ( link_node( node_traits::to_node_ptr( val ), pos )) { + ++m_ItemCounter; + m_Stat.onInsertSuccess(); + return true; + } + + m_Stat.onInsertRetry(); + } + } + + iterator insert_at_( atomic_node_ptr& refHead, value_type& val ) + { + if ( insert_at( refHead, val )) + return iterator( node_traits::to_node_ptr( val )); + return end(); + } + + template + std::pair update_at_( atomic_node_ptr& refHead, value_type& val, Func func, bool bAllowInsert ) + { + position pos; + + while ( true ) { + if ( search( refHead, val, key_comparator(), pos )) { + assert( key_comparator()( val, *node_traits::to_value_ptr( *pos.pCur )) == 0 ); + + func( false, *node_traits::to_value_ptr( *pos.pCur ) , val ); + m_Stat.onUpdateExisting(); + return std::make_pair( iterator( pos.pCur ), false ); + } + else { + if ( !bAllowInsert ) { + m_Stat.onUpdateFailed(); + return std::make_pair( end(), false ); + } + + if ( link_node( node_traits::to_node_ptr( val ), pos )) { + ++m_ItemCounter; + func( true, val , val ); + m_Stat.onUpdateNew(); + return std::make_pair( iterator( node_traits::to_node_ptr( val )), true ); + } + } + + m_Stat.onUpdateRetry(); + } + } + + template + std::pair update_at( atomic_node_ptr& refHead, value_type& val, Func func, bool bAllowInsert ) + { + std::pair ret = update_at_( refHead, val, func, bAllowInsert ); + return std::make_pair( ret.first != end(), ret.second ); + } + + template + bool find_at( atomic_node_ptr& refHead, Q& val, Compare cmp, Func f ) + { + position pos; + + if ( search( refHead, val, cmp, pos )) { + assert( pos.pCur != nullptr ); + f( *node_traits::to_value_ptr( *pos.pCur ), val ); + m_Stat.onFindSuccess(); + return true; + } + + m_Stat.onFindFailed(); + return false; + } + + template + value_type * find_at( atomic_node_ptr& refHead, Q const& val, Compare cmp ) + { + iterator it = find_at_( refHead, val, cmp ); + if ( it != end()) { + m_Stat.onFindSuccess(); + return &*it; + } + + m_Stat.onFindFailed(); + return nullptr; + } + + template + iterator find_at_( atomic_node_ptr& refHead, Q const& val, Compare cmp ) + { + position pos; + + if ( search( refHead, val, cmp, pos )) { + assert( pos.pCur != nullptr ); + m_Stat.onFindSuccess(); + return iterator( pos.pCur ); + } + + m_Stat.onFindFailed(); + return end(); + } + + //@endcond + + protected: + + //@cond + template + bool search( atomic_node_ptr& refHead, const Q& val, Compare cmp, position& pos ) + { + atomic_node_ptr * pPrev; + node_type * pNext; + node_type * pCur; + + back_off bkoff; + + try_again: + pPrev = &refHead; + pCur = pPrev->load(memory_model::memory_order_acquire); + pNext = nullptr; + + while ( true ) { + if ( !pCur ) { + pos.pPrev = pPrev; + pos.pCur = pCur; + pos.pNext = pNext; + return false; + } + + pNext = pCur->m_pNext.load(memory_model::memory_order_relaxed); + if ( cds_unlikely( pCur->m_pNext.load(memory_model::memory_order_acquire) != pNext )) { + bkoff(); + goto try_again; + } + + if ( cds_unlikely( pPrev->load(memory_model::memory_order_acquire) != pCur )) { + bkoff(); + goto try_again; + } + + assert( pCur != nullptr ); + int nCmp = cmp( *node_traits::to_value_ptr( *pCur ), val ); + if ( nCmp >= 0 ) { + pos.pPrev = pPrev; + pos.pCur = pCur; + pos.pNext = pNext; + return nCmp == 0; + } + pPrev = &( pCur->m_pNext ); + pCur = pNext; + } + } + + // for split-list + template + void erase_for( Predicate pred ) + { + node_type * pPred = nullptr; + node_type * pHead = m_pHead.load( memory_model::memory_order_relaxed ); + while ( pHead ) { + node_type * p = pHead->m_pNext.load( memory_model::memory_order_relaxed ); + if ( pred( *node_traits::to_value_ptr( pHead ))) { + assert( pPred != nullptr ); + pPred->m_pNext.store( p, memory_model::memory_order_relaxed ); + dispose_node( pHead, disposer()); + } + else + pPred = pHead; + pHead = p; + } + } + //@endcond + }; + +}} // namespace cds::intrusive + +#endif // #ifndef CDSLIB_INTRUSIVE_MICHAEL_LIST_NOGC_H diff --git a/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/intrusive/michael_list_rcu.h b/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/intrusive/michael_list_rcu.h new file mode 100644 index 0000000..6f3f04a --- /dev/null +++ b/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/intrusive/michael_list_rcu.h @@ -0,0 +1,1292 @@ +/* + This file is a part of libcds - Concurrent Data Structures library + + (C) Copyright Maxim Khizhinsky (libcds.dev@gmail.com) 2006-2017 + + Source code repo: http://github.com/khizmax/libcds/ + Download: http://sourceforge.net/projects/libcds/files/ + + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. + + * Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ + +#ifndef CDSLIB_INTRUSIVE_MICHAEL_LIST_RCU_H +#define CDSLIB_INTRUSIVE_MICHAEL_LIST_RCU_H + +#include +#include +#include +#include +#include +#include +#include + +namespace cds { namespace intrusive { + + //@cond + namespace michael_list { + + /// Node specialization for uRCU + template + struct node< cds::urcu::gc< RCU >, Tag > + { + typedef cds::urcu::gc< RCU > gc; ///< Garbage collector + typedef Tag tag; ///< tag + + typedef cds::details::marked_ptr marked_ptr; ///< marked pointer + typedef typename gc::template atomic_marked_ptr atomic_marked_ptr; ///< atomic marked pointer specific for GC + + atomic_marked_ptr m_pNext; ///< pointer to the next node in the container + node * m_pDelChain; ///< Deleted node chain (local for a thread) + + constexpr node() noexcept + : m_pNext( nullptr ) + , m_pDelChain( nullptr ) + {} + }; + } // namespace michael_list + //@endcond + + /// Michael's lock-free ordered single-linked list (template specialization for \ref cds_urcu_desc "RCU") + /** @ingroup cds_intrusive_list + \anchor cds_intrusive_MichaelList_rcu + + Usually, ordered single-linked list is used as a building block for the hash table implementation. + The complexity of searching is O(N). + + Template arguments: + - \p RCU - one of \ref cds_urcu_gc "RCU type" + - \p T - type to be stored in the list; the type \p T should be based on (or has a member of type) + cds::intrusive::micheal_list::node + - \p Traits - type traits. See \p michael_list::traits for explanation. It is possible to declare option-based + list with \p cds::intrusive::michael_list::make_traits metafunction, + see \ref cds_intrusive_MichaelList_hp "here" for explanations. + + \par Usage + Before including you should include appropriate RCU header file, + see \ref cds_urcu_gc "RCU type" for list of existing RCU class and corresponding header files. + For example, for \ref cds_urcu_general_buffered_gc "general-purpose buffered RCU" you should include: + \code + #include + #include + + // Now, you can declare Michael's list for type Foo and default traits: + typedef cds::intrusive::MichaelList >, Foo > rcu_michael_list; + \endcode + */ + template < typename RCU, typename T, +#ifdef CDS_DOXYGEN_INVOKED + class Traits = michael_list::traits +#else + class Traits +#endif + > + class MichaelList, T, Traits> + { + public: + typedef T value_type; ///< type of value stored in the list + typedef Traits traits; ///< Traits template parameter + + typedef typename traits::hook hook; ///< hook type + typedef typename hook::node_type node_type; ///< node type + +# ifdef CDS_DOXYGEN_INVOKED + typedef implementation_defined key_comparator ; ///< key comparison functor based on opt::compare and opt::less option setter. +# else + typedef typename opt::details::make_comparator< value_type, traits >::type key_comparator; +# endif + + typedef typename traits::disposer disposer; ///< disposer used + typedef typename get_node_traits< value_type, node_type, hook>::type node_traits ; ///< node traits + typedef typename michael_list::get_link_checker< node_type, traits::link_checker >::type link_checker; ///< link checker + + typedef cds::urcu::gc gc; ///< RCU schema + typedef typename traits::back_off back_off; ///< back-off strategy + typedef typename traits::item_counter item_counter; ///< Item counting policy used + typedef typename traits::memory_model memory_model; ///< Memory ordering. See cds::opt::memory_model option + typedef typename traits::rcu_check_deadlock rcu_check_deadlock; ///< Deadlock checking policy + typedef typename traits::stat stat; ///< Internal statistics + + typedef typename gc::scoped_lock rcu_lock ; ///< RCU scoped lock + static constexpr const bool c_bExtractLockExternal = false; ///< Group of \p extract_xxx functions do not require external locking + + //@cond + // Rebind traits (split-list support) + template + struct rebind_traits { + typedef MichaelList< + gc + , value_type + , typename cds::opt::make_options< traits, Options...>::type + > type; + }; + + // Stat selector + template + using select_stat_wrapper = michael_list::select_stat_wrapper< Stat >; + //@endcond + + protected: + typedef typename node_type::marked_ptr marked_node_ptr; ///< Marked node pointer + typedef typename node_type::atomic_marked_ptr atomic_node_ptr; ///< Atomic node pointer + typedef atomic_node_ptr auxiliary_head; ///< Auxiliary head type (for split-list support) + + atomic_node_ptr m_pHead; ///< Head pointer + item_counter m_ItemCounter; ///< Item counter + stat m_Stat; ///< Internal statistics + + protected: + //@cond + enum erase_node_mask + { + erase_mask = 1, + extract_mask = 3 + }; + + typedef cds::urcu::details::check_deadlock_policy< gc, rcu_check_deadlock> check_deadlock_policy; + + struct clear_and_dispose { + void operator()( value_type * p ) + { + assert( p != nullptr ); + clear_links( node_traits::to_node_ptr(p)); + disposer()( p ); + } + }; + + /// Position pointer for item search + struct position { + atomic_node_ptr * pPrev ; ///< Previous node + node_type * pCur ; ///< Current node + node_type * pNext ; ///< Next node + + atomic_node_ptr& refHead; + node_type * pDelChain; ///< Head of deleted node chain + + position( atomic_node_ptr& head ) + : refHead( head ) + , pDelChain( nullptr ) + {} + + ~position() + { + dispose_chain( pDelChain ); + } + }; + //@endcond + + public: + using exempt_ptr = cds::urcu::exempt_ptr< gc, value_type, value_type, clear_and_dispose, void >; ///< pointer to extracted node + + private: + //@cond + struct chain_disposer { + void operator()( node_type * pChain ) const + { + dispose_chain( pChain ); + } + }; + typedef cds::intrusive::details::raw_ptr_disposer< gc, node_type, chain_disposer> raw_ptr_disposer; + //@endcond + + public: + /// Result of \p get(), \p get_with() functions - pointer to the node found + typedef cds::urcu::raw_ptr< gc, value_type, raw_ptr_disposer > raw_ptr; + + protected: + //@cond + template + class iterator_type + { + friend class MichaelList; + value_type * m_pNode; + + void next() + { + if ( m_pNode ) { + node_type * p = node_traits::to_node_ptr( *m_pNode )->m_pNext.load(memory_model::memory_order_relaxed).ptr(); + m_pNode = p ? node_traits::to_value_ptr( p ) : nullptr; + } + } + + protected: + explicit iterator_type( node_type * pNode) + { + if ( pNode ) + m_pNode = node_traits::to_value_ptr( *pNode ); + else + m_pNode = nullptr; + } + explicit iterator_type( atomic_node_ptr const& refNode) + { + node_type * pNode = refNode.load(memory_model::memory_order_relaxed).ptr(); + m_pNode = pNode ? node_traits::to_value_ptr( *pNode ) : nullptr; + } + + public: + typedef typename cds::details::make_const_type::pointer value_ptr; + typedef typename cds::details::make_const_type::reference value_ref; + + iterator_type() + : m_pNode( nullptr ) + {} + + iterator_type( const iterator_type& src ) + : m_pNode( src.m_pNode ) + {} + + value_ptr operator ->() const + { + return m_pNode; + } + + value_ref operator *() const + { + assert( m_pNode != nullptr ); + return *m_pNode; + } + + /// Pre-increment + iterator_type& operator ++() + { + next(); + return *this; + } + + /// Post-increment + iterator_type operator ++(int) + { + iterator_type i(*this); + next(); + return i; + } + + iterator_type& operator = (const iterator_type& src) + { + m_pNode = src.m_pNode; + return *this; + } + + template + bool operator ==(iterator_type const& i ) const + { + return m_pNode == i.m_pNode; + } + template + bool operator !=(iterator_type const& i ) const + { + return m_pNode != i.m_pNode; + } + }; + //@endcond + + public: + ///@name Forward iterators (thread-safe only under RCU lock) + //@{ + /// Forward iterator + /** + You may safely use iterators in multi-threaded environment only under RCU lock. + Otherwise, a crash is possible if another thread deletes the item the iterator points to. + */ + typedef iterator_type iterator; + + /// Const forward iterator + typedef iterator_type const_iterator; + + /// Returns a forward iterator addressing the first element in a list + /** + For empty list \code begin() == end() \endcode + */ + iterator begin() + { + return iterator( m_pHead ); + } + + /// Returns an iterator that addresses the location succeeding the last element in a list + /** + Do not use the value returned by end function to access any item. + Internally, end returning value equals to \p nullptr. + + The returned value can be used only to control reaching the end of the list. + For empty list \code begin() == end() \endcode + */ + iterator end() + { + return iterator(); + } + + /// Returns a forward const iterator addressing the first element in a list + const_iterator begin() const + { + return const_iterator(m_pHead ); + } + /// Returns a forward const iterator addressing the first element in a list + const_iterator cbegin() const + { + return const_iterator(m_pHead ); + } + + /// Returns an const iterator that addresses the location succeeding the last element in a list + const_iterator end() const + { + return const_iterator(); + } + /// Returns an const iterator that addresses the location succeeding the last element in a list + const_iterator cend() const + { + return const_iterator(); + } + //@} + + public: + /// Default constructor initializes empty list + MichaelList() + : m_pHead( nullptr ) + { + static_assert( (std::is_same< gc, typename node_type::gc >::value), "GC and node_type::gc must be the same type" ); + } + + //@cond + template >::value >> + explicit MichaelList( Stat& st ) + : m_pHead( nullptr ) + , m_Stat( st ) + {} + //@endcond + + /// Destroy list + ~MichaelList() + { + clear(); + } + + /// Inserts new node + /** + The function inserts \p val in the list if the list does not contain + an item with key equal to \p val. + + The function makes RCU lock internally. + + Returns \p true if \p val is linked into the list, \p false otherwise. + */ + bool insert( value_type& val ) + { + return insert_at( m_pHead, val ); + } + + /// Inserts new node + /** + This function is intended for derived non-intrusive containers. + + The function allows to split new item creating into two part: + - create item with key only + - insert new item into the list + - if inserting is success, calls \p f functor to initialize value-field of \p val. + + The functor signature is: + \code + void func( value_type& val ); + \endcode + where \p val is the item inserted. User-defined functor \p f should guarantee that during changing + \p val no any other changes could be made on this list's item by concurrent threads. + The user-defined functor is called only if the inserting is success. + + The function makes RCU lock internally. + + @warning See \ref cds_intrusive_item_creating "insert item troubleshooting" + */ + template + bool insert( value_type& val, Func f ) + { + return insert_at( m_pHead, val, f ); + } + + /// Updates the item + /** + The operation performs inserting or changing data with lock-free manner. + + If the item \p val not found in the list, then \p val is inserted into the list + iff \p bAllowInsert is \p true. + Otherwise, the functor \p func is called with item found. + The functor signature is: + \code + struct functor { + void operator()( bool bNew, value_type& item, value_type& val ); + }; + \endcode + with arguments: + - \p bNew - \p true if the item has been inserted, \p false otherwise + - \p item - item of the list + - \p val - argument \p val passed into the \p update() function + If new item has been inserted (i.e. \p bNew is \p true) then \p item and \p val arguments + refer to the same thing. + + The functor may change non-key fields of the \p item; however, \p func must guarantee + that during changing no any other modifications could be made on this item by concurrent threads. + + Returns std::pair where \p first is \p true if operation is successful, + \p second is \p true if new item has been added or \p false if the item with \p key + already is in the list. + + The function makes RCU lock internally. + + @warning See \ref cds_intrusive_item_creating "insert item troubleshooting" + */ + template + std::pair update( value_type& val, Func func, bool bAllowInsert = true ) + { + return update_at( m_pHead, val, func, bAllowInsert ); + } + //@cond + template + CDS_DEPRECATED("ensure() is deprecated, use update()") + std::pair ensure( value_type& val, Func func ) + { + return update( val, func, true ); + } + //@endcond + + /// Unlinks the item \p val from the list + /** + The function searches the item \p val in the list and unlink it from the list + if it is found and it is equal to \p val. + + Difference between \p erase() and \p %unlink() functions: \p %erase() finds a key + and deletes the item found. \p %unlink() finds an item by key and deletes it + only if \p val is an item of that list, i.e. the pointer to the item found + is equal to &val . + + The function returns \p true if success and \p false otherwise. + + RCU \p synchronize method can be called. + Note that depending on RCU type used the \ref disposer call can be deferred. + + \p disposer specified in \p Traits is called for unlinked item. + + The function can throw cds::urcu::rcu_deadlock exception if deadlock is encountered and + deadlock checking policy is opt::v::rcu_throw_deadlock. + */ + bool unlink( value_type& val ) + { + return unlink_at( m_pHead, val ); + } + + /// Deletes the item from the list + /** + The function searches an item with key equal to \p key in the list, + unlinks it from the list, and returns \p true. + If the item with the key equal to \p key is not found the function return \p false. + + RCU \p synchronize method can be called. + Note that depending on RCU type used the \ref disposer call can be deferred. + + \p disposer specified in \p Traits is called for deleted item. + + The function can throw \ref cds_urcu_rcu_deadlock "cds::urcu::rcu_deadlock" exception if a deadlock is detected and + the deadlock checking policy is \p opt::v::rcu_throw_deadlock. + */ + template + bool erase( Q const& key ) + { + return erase_at( m_pHead, key, key_comparator()); + } + + /// Deletes the item from the list using \p pred predicate for searching + /** + The function is an analog of \p erase(Q const&) + but \p pred is used for key comparing. + \p Less functor has the interface like \p std::less. + \p pred must imply the same element order as the comparator used for building the list. + + \p disposer specified in \p Traits is called for deleted item. + */ + template + bool erase_with( Q const& key, Less pred ) + { + CDS_UNUSED( pred ); + return erase_at( m_pHead, key, cds::opt::details::make_comparator_from_less()); + } + + /// Deletes the item from the list + /** + The function searches an item with key equal to \p key in the list, + call \p func functor with item found, unlinks it from the list, and returns \p true. + The \p Func interface is + \code + struct functor { + void operator()( value_type const& item ); + }; + \endcode + + If the item with the key equal to \p key is not found the function return \p false. + + RCU \p synchronize method can be called. + Note that depending on RCU type used the \ref disposer call can be deferred. + + \p disposer specified in \p Traits is called for deleted item. + + The function can throw \ref cds_urcu_rcu_deadlock "cds::urcu::rcu_deadlock" exception if a deadlock is detected and + the deadlock checking policy is \p opt::v::rcu_throw_deadlock. + */ + template + bool erase( Q const& key, Func func ) + { + return erase_at( m_pHead, key, key_comparator(), func ); + } + + /// Deletes the item from the list using \p pred predicate for searching + /** + The function is an analog of \p erase(Q const&, Func) + but \p pred is used for key comparing. + \p Less functor has the interface like \p std::less. + \p pred must imply the same element order as the comparator used for building the list. + + \p disposer specified in \p Traits is called for deleted item. + */ + template + bool erase_with( Q const& key, Less pred, Func func ) + { + CDS_UNUSED( pred ); + return erase_at( m_pHead, key, cds::opt::details::make_comparator_from_less(), func ); + } + + /// Extracts an item from the list + /** + The function searches an item with key equal to \p key in the list, + unlinks it from the list, and returns \ref cds::urcu::exempt_ptr "exempt_ptr" pointer to the item found. + If \p key is not found the function returns an empty \p exempt_ptr. + + @note The function does NOT dispose the item found. It just unlinks the item from the list + and returns a pointer to item found. + You shouldn't lock RCU for current thread before calling this function, and you should manually release + the returned exempt pointer before reusing it. + + \code + #include + #include + + typedef cds::urcu::gc< general_buffered<> > rcu; + typedef cds::intrusive::MichaelList< rcu, Foo > rcu_michael_list; + + rcu_michael_list theList; + // ... + + rcu_michael_list::exempt_ptr p1; + + // The RCU should NOT be locked when extract() is called! + assert( !rcu::is_locked()); + + // You can call extract() function + p1 = theList.extract( 10 ); + if ( p1 ) { + // do something with p1 + ... + } + + // We may safely release p1 here + // release() passes the pointer to RCU reclamation cycle: + // it invokes RCU retire_ptr function with the disposer you provided for the list. + p1.release(); + \endcode + */ + template + exempt_ptr extract( Q const& key ) + { + return exempt_ptr( extract_at( m_pHead, key, key_comparator())); + } + + /// Extracts an item from the list using \p pred predicate for searching + /** + This function is the analog for \p extract(Q const&) + + The \p pred is a predicate used for key comparing. + \p Less has the interface like \p std::less. + \p pred must imply the same element order as \ref key_comparator. + */ + template + exempt_ptr extract_with( Q const& key, Less pred ) + { + CDS_UNUSED( pred ); + return exempt_ptr( extract_at( m_pHead, key, cds::opt::details::make_comparator_from_less())); + } + + /// Find the key \p val + /** + The function searches the item with key equal to \p key + and calls the functor \p f for item found. + The interface of \p Func functor is: + \code + struct functor { + void operator()( value_type& item, Q& key ); + }; + \endcode + where \p item is the item found, \p key is the find function argument. + + The functor can change non-key fields of \p item. + The function \p find does not serialize simultaneous access to the list \p item. If such access is + possible you must provide your own synchronization schema to exclude unsafe item modifications. + + The function makes RCU lock internally. + + The function returns \p true if \p val is found, \p false otherwise. + */ + template + bool find( Q& key, Func f ) + { + return find_at( m_pHead, key, key_comparator(), f ); + } + //@cond + template + bool find( Q const& key, Func f ) + { + return find_at( m_pHead, key, key_comparator(), f ); + } + //@endcond + + /// Finds \p key using \p pred predicate for searching + /** + The function is an analog of \p find(Q&, Func) + but \p pred is used for key comparing. + \p Less functor has the interface like \p std::less. + \p pred must imply the same element order as the comparator used for building the list. + */ + template + bool find_with( Q& key, Less pred, Func f ) + { + CDS_UNUSED( pred ); + return find_at( m_pHead, key, cds::opt::details::make_comparator_from_less(), f ); + } + //@cond + template + bool find_with( Q const& key, Less pred, Func f ) + { + CDS_UNUSED( pred ); + return find_at( m_pHead, key, cds::opt::details::make_comparator_from_less(), f ); + } + //@endcond + + /// Checks whether the list contains \p key + /** + The function searches the item with key equal to \p key + and returns \p true if it is found, and \p false otherwise. + */ + template + bool contains( Q const& key ) + { + return find_at( m_pHead, key, key_comparator()); + } + //@cond + template + CDS_DEPRECATED("deprecated, use contains()") + bool find( Q const& key ) + { + return contains( key ); + } + //@endcond + + /// Checks whether the map contains \p key using \p pred predicate for searching + /** + The function is an analog of contains( key ) but \p pred is used for key comparing. + \p Less functor has the interface like \p std::less. + \p Less must imply the same element order as the comparator used for building the list. + */ + template + bool contains( Q const& key, Less pred ) + { + CDS_UNUSED( pred ); + return find_at( m_pHead, key, cds::opt::details::make_comparator_from_less()); + } + //@cond + template + CDS_DEPRECATED("deprecated, use contains()") + bool find_with( Q const& key, Less pred ) + { + return contains( key, pred ); + } + //@endcond + + /// Finds \p key and return the item found + /** \anchor cds_intrusive_MichaelList_rcu_get + The function searches the item with key equal to \p key and returns the pointer to item found. + If \p key is not found it returns empty \p raw_ptr object. + + Note the compare functor should accept a parameter of type \p Q that can be not the same as \p value_type. + + RCU should be locked before call of this function. + Returned item is valid only while RCU is locked: + \code + typedef cds::intrusive::MichaelList< cds::urcu::gc< cds::urcu::general_buffered<> >, foo, my_traits > ord_list; + ord_list theList; + // ... + typename ord_list::raw_ptr rp; + { + // Lock RCU + ord_list::rcu_lock lock; + + rp = theList.get( 5 ); + if ( rp ) { + // Deal with rp + //... + } + // Unlock RCU by rcu_lock destructor + // Node owned by rp can be retired by disposer at any time after RCU has been unlocked + } + // You can manually release rp after RCU-locked section + rp.release(); + \endcode + */ + template + raw_ptr get( Q const& key ) + { + return get_at( m_pHead, key, key_comparator()); + } + + /// Finds \p key and return the item found + /** + The function is an analog of \ref cds_intrusive_MichaelList_rcu_get "get(Q const&)" + but \p pred is used for comparing the keys. + + \p Less functor has the semantics like \p std::less but should take arguments of type \p value_type and \p Q + in any order. + \p pred must imply the same element order as the comparator used for building the list. + */ + template + raw_ptr get_with( Q const& key, Less pred ) + { + CDS_UNUSED( pred ); + return get_at( m_pHead, key, cds::opt::details::make_comparator_from_less()); + } + + /// Clears the list using default disposer + /** + The function clears the list using default (provided by \p Traits class template argument) disposer functor. + + RCU \p synchronize method can be called. + Note that depending on RCU type used the \ref disposer invocation can be deferred. + + The function can throw \p cds::urcu::rcu_deadlock exception if a deadlock is encountered and + deadlock checking policy is \p opt::v::rcu_throw_deadlock. + */ + void clear() + { + if( !empty()) { + check_deadlock_policy::check(); + + marked_node_ptr pHead; + for (;;) { + { + rcu_lock l; + pHead = m_pHead.load(memory_model::memory_order_acquire); + if ( !pHead.ptr()) + break; + marked_node_ptr pNext( pHead->m_pNext.load(memory_model::memory_order_relaxed)); + if ( cds_unlikely( !pHead->m_pNext.compare_exchange_weak( pNext, pNext | 1, memory_model::memory_order_acquire, memory_model::memory_order_relaxed ))) + continue; + if ( cds_unlikely( !m_pHead.compare_exchange_weak( pHead, marked_node_ptr(pNext.ptr()), memory_model::memory_order_release, memory_model::memory_order_relaxed ))) + continue; + } + + --m_ItemCounter; + dispose_node( pHead.ptr()); + } + } + } + + /// Check if the list is empty + bool empty() const + { + return m_pHead.load( memory_model::memory_order_relaxed ).all() == nullptr; + } + + /// Returns list's item count + /** + The value returned depends on item counter provided by \p Traits. For \p atomicity::empty_item_counter, + this function always returns 0. + + @note Even if you use real item counter and it returns 0, this fact does not mean that the list + is empty. To check list emptyness use \p empty() method. + */ + size_t size() const + { + return m_ItemCounter.value(); + } + + /// Returns const reference to internal statistics + stat const& statistics() const + { + return m_Stat; + } + + protected: + //@cond + static void clear_links( node_type * pNode ) + { + pNode->m_pNext.store( marked_node_ptr(), memory_model::memory_order_release ); + pNode->m_pDelChain = nullptr; + } + + static void dispose_node( node_type * pNode ) + { + assert( pNode ); + assert( !gc::is_locked()); + + gc::template retire_ptr( node_traits::to_value_ptr( *pNode )); + } + + static void dispose_chain( node_type * pChain ) + { + if ( pChain ) { + assert( !gc::is_locked()); + + auto f = [&pChain]() -> cds::urcu::retired_ptr { + node_type * p = pChain; + if ( p ) { + pChain = p->m_pDelChain; + return cds::urcu::make_retired_ptr( node_traits::to_value_ptr( p )); + } + return cds::urcu::make_retired_ptr( static_cast(nullptr)); + }; + gc::batch_retire( std::ref( f )); + } + } + + bool link_node( node_type * pNode, position& pos ) + { + assert( pNode != nullptr ); + link_checker::is_empty( pNode ); + + marked_node_ptr p( pos.pCur ); + pNode->m_pNext.store( p, memory_model::memory_order_release ); + if ( cds_likely( pos.pPrev->compare_exchange_strong( p, marked_node_ptr( pNode ), memory_model::memory_order_release, atomics::memory_order_relaxed ))) + return true; + + pNode->m_pNext.store( marked_node_ptr(), memory_model::memory_order_relaxed ); + return false; + } + + static void link_to_remove_chain( position& pos, node_type * pDel ) + { + assert( pDel->m_pDelChain == nullptr ); + + pDel->m_pDelChain = pos.pDelChain; + pos.pDelChain = pDel; + } + + bool unlink_node( position& pos, erase_node_mask nMask ) + { + assert( gc::is_locked()); + + // Mark the node (logical deletion) + marked_node_ptr next( pos.pNext, 0 ); + + if ( cds_likely( pos.pCur->m_pNext.compare_exchange_strong( next, next | nMask, memory_model::memory_order_release, atomics::memory_order_relaxed ))) { + + // Try physical removal - fast path + marked_node_ptr cur( pos.pCur ); + if ( cds_likely( pos.pPrev->compare_exchange_strong( cur, marked_node_ptr( pos.pNext ), memory_model::memory_order_acquire, atomics::memory_order_relaxed ))) { + if ( nMask == erase_mask ) + link_to_remove_chain( pos, pos.pCur ); + } + else { + // Slow path + search( pos.refHead, *node_traits::to_value_ptr( pos.pCur ), pos, key_comparator()); + } + return true; + } + return false; + } + + // split-list support + bool insert_aux_node( node_type * pNode ) + { + return insert_aux_node( m_pHead, pNode ); + } + + // split-list support + bool insert_aux_node( atomic_node_ptr& refHead, node_type * pNode ) + { + assert( pNode != nullptr ); + + // Hack: convert node_type to value_type. + // In principle, auxiliary node can be non-reducible to value_type + // We assume that comparator can correctly distinguish between aux and regular node. + return insert_at( refHead, *node_traits::to_value_ptr( pNode )); + } + + bool insert_at( atomic_node_ptr& refHead, value_type& val ) + { + position pos( refHead ); + { + rcu_lock l; + return insert_at_locked( pos, val ); + } + } + + template + bool insert_at( atomic_node_ptr& refHead, value_type& val, Func f ) + { + position pos( refHead ); + + { + rcu_lock l; + while ( true ) { + if ( search( refHead, val, pos, key_comparator())) { + m_Stat.onInsertFailed(); + return false; + } + + if ( link_node( node_traits::to_node_ptr( val ), pos )) { + f( val ); + ++m_ItemCounter; + m_Stat.onInsertSuccess(); + return true; + } + + // clear next field + node_traits::to_node_ptr( val )->m_pNext.store( marked_node_ptr(), memory_model::memory_order_relaxed ); + m_Stat.onInsertRetry(); + } + } + + } + + iterator insert_at_( atomic_node_ptr& refHead, value_type& val ) + { + rcu_lock l; + if ( insert_at_locked( refHead, val )) + return iterator( node_traits::to_node_ptr( val )); + return end(); + } + + template + std::pair update_at_( atomic_node_ptr& refHead, value_type& val, Func func, bool bInsert ) + { + position pos( refHead ); + { + rcu_lock l; + return update_at_locked( pos, val, func, bInsert ); + } + } + + template + std::pair update_at( atomic_node_ptr& refHead, value_type& val, Func func, bool bInsert ) + { + position pos( refHead ); + { + rcu_lock l; + std::pair ret = update_at_locked( pos, val, func, bInsert ); + return std::make_pair( ret.first != end(), ret.second ); + } + } + + bool unlink_at( atomic_node_ptr& refHead, value_type& val ) + { + position pos( refHead ); + back_off bkoff; + check_deadlock_policy::check(); + + for (;;) { + { + rcu_lock l; + if ( !search( refHead, val, pos, key_comparator()) || node_traits::to_value_ptr( *pos.pCur ) != &val ) { + m_Stat.onEraseFailed(); + return false; + } + if ( !unlink_node( pos, erase_mask )) { + bkoff(); + m_Stat.onEraseRetry(); + continue; + } + } + + --m_ItemCounter; + m_Stat.onEraseSuccess(); + return true; + } + } + + template + bool erase_at( position& pos, Q const& val, Compare cmp, Func f ) + { + back_off bkoff; + check_deadlock_policy::check(); + + node_type * pDel; + for (;;) { + { + rcu_lock l; + if ( !search( pos.refHead, val, pos, cmp )) { + m_Stat.onEraseFailed(); + return false; + } + + // store pCur since it may be changed by unlink_node() slow path + pDel = pos.pCur; + if ( !unlink_node( pos, erase_mask )) { + bkoff(); + m_Stat.onEraseRetry(); + continue; + } + } + assert( pDel ); + f( *node_traits::to_value_ptr( pDel )); + --m_ItemCounter; + m_Stat.onEraseSuccess(); + return true; + } + } + + template + bool erase_at( atomic_node_ptr& refHead, Q const& val, Compare cmp, Func f ) + { + position pos( refHead ); + return erase_at( pos, val, cmp, f ); + } + + template + bool erase_at( atomic_node_ptr& refHead, const Q& val, Compare cmp ) + { + position pos( refHead ); + return erase_at( pos, val, cmp, [](value_type const&){} ); + } + + template + value_type * extract_at( atomic_node_ptr& refHead, Q const& val, Compare cmp ) + { + position pos( refHead ); + back_off bkoff; + assert( !gc::is_locked()) ; // RCU must not be locked!!! + + node_type * pExtracted; + { + rcu_lock l; + for (;;) { + if ( !search( refHead, val, pos, cmp )) { + m_Stat.onEraseFailed(); + return nullptr; + } + + // store pCur since it may be changed by unlink_node() slow path + pExtracted = pos.pCur; + if ( !unlink_node( pos, extract_mask )) { + bkoff(); + m_Stat.onEraseRetry(); + continue; + } + + --m_ItemCounter; + value_type * pRet = node_traits::to_value_ptr( pExtracted ); + assert( pExtracted->m_pDelChain == nullptr ); + m_Stat.onEraseSuccess(); + return pRet; + } + } + } + + template + bool find_at( atomic_node_ptr& refHead, Q& val, Compare cmp, Func f ) + { + position pos( refHead ); + + { + rcu_lock l; + if ( search( refHead, val, pos, cmp )) { + assert( pos.pCur != nullptr ); + f( *node_traits::to_value_ptr( *pos.pCur ), val ); + m_Stat.onFindSuccess(); + return true; + } + } + + m_Stat.onFindFailed(); + return false; + } + + template + bool find_at( atomic_node_ptr& refHead, Q const& val, Compare cmp ) + { + position pos( refHead ); + { + rcu_lock l; + return find_at_locked( pos, val, cmp ) != cend(); + } + } + + template + raw_ptr get_at( atomic_node_ptr& refHead, Q const& val, Compare cmp ) + { + // RCU should be locked! + assert(gc::is_locked()); + + position pos( refHead ); + + if ( search( refHead, val, pos, cmp )) { + m_Stat.onFindSuccess(); + return raw_ptr( node_traits::to_value_ptr( pos.pCur ), raw_ptr_disposer( pos )); + } + + m_Stat.onFindFailed(); + return raw_ptr( raw_ptr_disposer( pos )); + } + //@endcond + + protected: + + //@cond + template + bool search( atomic_node_ptr& refHead, const Q& val, position& pos, Compare cmp ) + { + // RCU lock should be locked!!! + assert( gc::is_locked()); + + atomic_node_ptr * pPrev; + marked_node_ptr pNext; + marked_node_ptr pCur; + + back_off bkoff; + + try_again: + pPrev = &refHead; + pCur = pPrev->load(memory_model::memory_order_acquire); + pNext = nullptr; + + while ( true ) { + if ( !pCur.ptr()) { + pos.pPrev = pPrev; + pos.pCur = nullptr; + pos.pNext = nullptr; + return false; + } + + pNext = pCur->m_pNext.load(memory_model::memory_order_acquire); + + if ( cds_unlikely( pPrev->load(memory_model::memory_order_acquire) != pCur + || pNext != pCur->m_pNext.load(memory_model::memory_order_acquire ))) + { + bkoff(); + goto try_again; + } + + if ( pNext.bits()) { + // pCur is marked as deleted. Try to unlink it from the list + if ( cds_likely( pPrev->compare_exchange_weak( pCur, marked_node_ptr( pNext.ptr()), memory_model::memory_order_acquire, atomics::memory_order_relaxed ))) { + if ( pNext.bits() == erase_mask ) + link_to_remove_chain( pos, pCur.ptr()); + m_Stat.onHelpingSuccess(); + } + + m_Stat.onHelpingFailed(); + goto try_again; + } + + assert( pCur.ptr() != nullptr ); + int nCmp = cmp( *node_traits::to_value_ptr( pCur.ptr()), val ); + if ( nCmp >= 0 ) { + pos.pPrev = pPrev; + pos.pCur = pCur.ptr(); + pos.pNext = pNext.ptr(); + return nCmp == 0; + } + pPrev = &( pCur->m_pNext ); + pCur = pNext; + } + } + //@endcond + + private: + //@cond + bool insert_at_locked( position& pos, value_type& val ) + { + // RCU lock should be locked!!! + assert( gc::is_locked()); + + while ( true ) { + if ( search( pos.refHead, val, pos, key_comparator())) { + m_Stat.onInsertFailed(); + return false; + } + + if ( link_node( node_traits::to_node_ptr( val ), pos )) { + ++m_ItemCounter; + m_Stat.onInsertSuccess(); + return true; + } + + // clear next field + node_traits::to_node_ptr( val )->m_pNext.store( marked_node_ptr(), memory_model::memory_order_relaxed ); + m_Stat.onInsertRetry(); + } + } + + template + std::pair update_at_locked( position& pos, value_type& val, Func func, bool bInsert ) + { + // RCU should be locked!!! + assert( gc::is_locked()); + + while ( true ) { + if ( search( pos.refHead, val, pos, key_comparator())) { + assert( key_comparator()( val, *node_traits::to_value_ptr( *pos.pCur )) == 0 ); + + func( false, *node_traits::to_value_ptr( *pos.pCur ), val ); + m_Stat.onUpdateExisting(); + return std::make_pair( iterator( pos.pCur ), false ); + } + else { + if ( !bInsert ) { + m_Stat.onUpdateFailed(); + return std::make_pair( end(), false ); + } + + if ( link_node( node_traits::to_node_ptr( val ), pos )) { + ++m_ItemCounter; + func( true, val , val ); + m_Stat.onUpdateNew(); + return std::make_pair( iterator( node_traits::to_node_ptr( val )), true ); + } + + // clear the next field + node_traits::to_node_ptr( val )->m_pNext.store( marked_node_ptr(), memory_model::memory_order_relaxed ); + m_Stat.onUpdateRetry(); + } + } + } + + template + const_iterator find_at_locked( position& pos, Q const& val, Compare cmp ) + { + assert( gc::is_locked()); + + if ( search( pos.refHead, val, pos, cmp )) { + assert( pos.pCur != nullptr ); + m_Stat.onFindSuccess(); + return const_iterator( pos.pCur ); + } + + m_Stat.onFindFailed(); + return cend(); + } + //@endcond + }; + +}} // namespace cds::intrusive + +#endif // #ifndef CDSLIB_INTRUSIVE_MICHAEL_LIST_NOGC_H diff --git a/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/intrusive/michael_set.h b/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/intrusive/michael_set.h new file mode 100644 index 0000000..9852e5a --- /dev/null +++ b/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/intrusive/michael_set.h @@ -0,0 +1,1014 @@ +/* + This file is a part of libcds - Concurrent Data Structures library + + (C) Copyright Maxim Khizhinsky (libcds.dev@gmail.com) 2006-2017 + + Source code repo: http://github.com/khizmax/libcds/ + Download: http://sourceforge.net/projects/libcds/files/ + + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. + + * Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ + +#ifndef CDSLIB_INTRUSIVE_MICHAEL_SET_H +#define CDSLIB_INTRUSIVE_MICHAEL_SET_H + +#include +#include + +namespace cds { namespace intrusive { + + /// Michael's hash set + /** @ingroup cds_intrusive_map + \anchor cds_intrusive_MichaelHashSet_hp + + Source: + - [2002] Maged Michael "High performance dynamic lock-free hash tables and list-based sets" + + Michael's hash table algorithm is based on lock-free ordered list and it is very simple. + The main structure is an array \p T of size \p M. Each element in \p T is basically a pointer + to a hash bucket, implemented as a singly linked list. The array of buckets cannot be dynamically expanded. + However, each bucket may contain unbounded number of items. + + Template parameters are: + - \p GC - Garbage collector used. Note the \p GC must be the same as the GC used for \p OrderedList + - \p OrderedList - ordered list implementation used as bucket for hash set, possible implementations: + \p MichaelList, \p LazyList, \p IterableList. + The intrusive ordered list implementation specifies the type \p T stored in the hash-set, the reclamation + schema \p GC used by hash-set, the comparison functor for the type \p T and other features specific for + the ordered list. + - \p Traits - type traits. See \p michael_set::traits for explanation. + Instead of defining \p Traits struct you can use option-based syntax with \p michael_set::make_traits metafunction. + + There are several specializations of \p %MichaelHashSet for each GC. You should include: + - for \ref cds_intrusive_MichaelHashSet_rcu "RCU type" + - for \ref cds_intrusive_MichaelHashSet_nogc for append-only set + - for \p gc::HP, \p gc::DHP + + Hash functor + + Some member functions of Michael's hash set accept the key parameter of type \p Q which differs from \p value_type. + It is expected that type \p Q contains full key of \p value_type, and for equal keys of type \p Q and \p value_type + the hash values of these keys must be equal. + The hash functor \p Traits::hash should accept parameters of both type: + \code + // Our node type + struct Foo { + std::string key_; // key field + // ... other fields + }; + + // Hash functor + struct fooHash { + size_t operator()( const std::string& s ) const + { + return std::hash( s ); + } + + size_t operator()( const Foo& f ) const + { + return (*this)( f.key_ ); + } + }; + \endcode + + How to use + + First, you should define ordered list type to use in your hash set: + \code + // For gc::HP-based MichaelList implementation + #include + + // cds::intrusive::MichaelHashSet declaration + #include + + // Type of hash-set items + struct Foo: public cds::intrusive::michael_list::node< cds::gc::HP > + { + std::string key_ ; // key field + unsigned val_ ; // value field + // ... other value fields + }; + + // Declare comparator for the item + struct FooCmp + { + int operator()( const Foo& f1, const Foo& f2 ) const + { + return f1.key_.compare( f2.key_ ); + } + }; + + // Declare bucket type for Michael's hash set + // The bucket type is any ordered list type like MichaelList, LazyList + typedef cds::intrusive::MichaelList< cds::gc::HP, Foo, + typename cds::intrusive::michael_list::make_traits< + // hook option + cds::intrusive::opt::hook< cds::intrusive::michael_list::base_hook< cds::opt::gc< cds::gc::HP > > > + // item comparator option + ,cds::opt::compare< FooCmp > + >::type + > Foo_bucket; + \endcode + + Second, you should declare Michael's hash set container: + \code + + // Declare hash functor + // Note, the hash functor accepts parameter type Foo and std::string + struct FooHash { + size_t operator()( const Foo& f ) const + { + return cds::opt::v::hash()( f.key_ ); + } + size_t operator()( const std::string& f ) const + { + return cds::opt::v::hash()( f ); + } + }; + + // Michael's set typedef + typedef cds::intrusive::MichaelHashSet< + cds::gc::HP + ,Foo_bucket + ,typename cds::intrusive::michael_set::make_traits< + cds::opt::hash< FooHash > + >::type + > Foo_set; + \endcode + + Now, you can use \p Foo_set in your application. + + Like other intrusive containers, you may build several containers on single item structure: + \code + #include + #include + #include + + struct tag_key1_idx; + struct tag_key2_idx; + + // Your two-key data + // The first key is maintained by gc::HP, second key is maintained by gc::DHP garbage collectors + // (I don't know what is needed for, but it is correct) + struct Foo + : public cds::intrusive::michael_list::node< cds::gc::HP, tag_key1_idx > + , public cds::intrusive::michael_list::node< cds::gc::DHP, tag_key2_idx > + { + std::string key1_ ; // first key field + unsigned int key2_ ; // second key field + + // ... value fields and fields for controlling item's lifetime + }; + + // Declare comparators for the item + struct Key1Cmp + { + int operator()( const Foo& f1, const Foo& f2 ) const { return f1.key1_.compare( f2.key1_ ) ; } + }; + struct Key2Less + { + bool operator()( const Foo& f1, const Foo& f2 ) const { return f1.key2_ < f2.key1_ ; } + }; + + // Declare bucket type for Michael's hash set indexed by key1_ field and maintained by gc::HP + typedef cds::intrusive::MichaelList< cds::gc::HP, Foo, + typename cds::intrusive::michael_list::make_traits< + // hook option + cds::intrusive::opt::hook< cds::intrusive::michael_list::base_hook< cds::opt::gc< cds::gc::HP >, tag_key1_idx > > + // item comparator option + ,cds::opt::compare< Key1Cmp > + >::type + > Key1_bucket; + + // Declare bucket type for Michael's hash set indexed by key2_ field and maintained by gc::DHP + typedef cds::intrusive::MichaelList< cds::gc::DHP, Foo, + typename cds::intrusive::michael_list::make_traits< + // hook option + cds::intrusive::opt::hook< cds::intrusive::michael_list::base_hook< cds::opt::gc< cds::gc::DHP >, tag_key2_idx > > + // item comparator option + ,cds::opt::less< Key2Less > + >::type + > Key2_bucket; + + // Declare hash functor + struct Key1Hash { + size_t operator()( const Foo& f ) const { return cds::opt::v::hash()( f.key1_ ) ; } + size_t operator()( const std::string& s ) const { return cds::opt::v::hash()( s ) ; } + }; + inline size_t Key2Hash( const Foo& f ) { return (size_t) f.key2_ ; } + + // Michael's set indexed by key1_ field + typedef cds::intrusive::MichaelHashSet< + cds::gc::HP + ,Key1_bucket + ,typename cds::intrusive::michael_set::make_traits< + cds::opt::hash< Key1Hash > + >::type + > key1_set; + + // Michael's set indexed by key2_ field + typedef cds::intrusive::MichaelHashSet< + cds::gc::DHP + ,Key2_bucket + ,typename cds::intrusive::michael_set::make_traits< + cds::opt::hash< Key2Hash > + >::type + > key2_set; + \endcode + */ + template < + class GC, + class OrderedList, +#ifdef CDS_DOXYGEN_INVOKED + class Traits = michael_set::traits +#else + class Traits +#endif + > + class MichaelHashSet + { + public: + typedef GC gc; ///< Garbage collector + typedef OrderedList ordered_list; ///< type of ordered list used as a bucket implementation + typedef Traits traits; ///< Set traits + + typedef typename ordered_list::value_type value_type ; ///< type of value to be stored in the set + typedef typename ordered_list::key_comparator key_comparator ; ///< key comparing functor + typedef typename ordered_list::disposer disposer ; ///< Node disposer functor +#ifdef CDS_DOXYGEN_INVOKED + typedef typename ordered_list::stat stat ; ///< Internal statistics +#endif + + /// Hash functor for \p value_type and all its derivatives that you use + typedef typename cds::opt::v::hash_selector< typename traits::hash >::type hash; + typedef typename traits::item_counter item_counter; ///< Item counter type + typedef typename traits::allocator allocator; ///< Bucket table allocator + + typedef typename ordered_list::guarded_ptr guarded_ptr; ///< Guarded pointer + + /// Count of hazard pointer required for the algorithm + static constexpr const size_t c_nHazardPtrCount = ordered_list::c_nHazardPtrCount; + + // GC and OrderedList::gc must be the same + static_assert(std::is_same::value, "GC and OrderedList::gc must be the same"); + + protected: + //@cond + typedef typename ordered_list::template select_stat_wrapper< typename ordered_list::stat > bucket_stat; + + typedef typename ordered_list::template rebind_traits< + cds::opt::item_counter< cds::atomicity::empty_item_counter > + , cds::opt::stat< typename bucket_stat::wrapped_stat > + >::type internal_bucket_type; + + typedef typename allocator::template rebind< internal_bucket_type >::other bucket_table_allocator; + //@endcond + + public: + //@cond + typedef typename bucket_stat::stat stat; + //@endcond + + protected: + //@cond + hash m_HashFunctor; ///< Hash functor + size_t const m_nHashBitmask; + internal_bucket_type* m_Buckets; ///< bucket table + item_counter m_ItemCounter; ///< Item counter + stat m_Stat; ///< Internal statistics + //@endcond + + public: + ///@name Forward iterators + //@{ + /// Forward iterator + /** + The forward iterator for Michael's set is based on \p OrderedList forward iterator and has some features: + - it has no post-increment operator + - it iterates items in unordered fashion + - The iterator cannot be moved across thread boundary because it may contain GC's guard that is thread-private GC data. + + Iterator thread safety depends on type of \p OrderedList: + - for \p MichaelList and \p LazyList: iterator guarantees safety even if you delete the item that iterator points to + because that item is guarded by hazard pointer. + However, in case of concurrent deleting operations it is no guarantee that you iterate all item in the set. + Moreover, a crash is possible when you try to iterate the next element that has been deleted by concurrent thread. + Use this iterator on the concurrent container for debugging purpose only. + - for \p IterableList: iterator is thread-safe. You may use it freely in concurrent environment. + */ + typedef michael_set::details::iterator< internal_bucket_type, false > iterator; + + /// Const forward iterator + /** + For iterator's features and requirements see \ref iterator + */ + typedef michael_set::details::iterator< internal_bucket_type, true > const_iterator; + + /// Returns a forward iterator addressing the first element in a set + /** + For empty set \code begin() == end() \endcode + */ + iterator begin() + { + return iterator( m_Buckets[0].begin(), bucket_begin(), bucket_end()); + } + + /// Returns an iterator that addresses the location succeeding the last element in a set + /** + Do not use the value returned by end function to access any item. + The returned value can be used only to control reaching the end of the set. + For empty set \code begin() == end() \endcode + */ + iterator end() + { + return iterator( bucket_end()[-1].end(), bucket_end() - 1, bucket_end()); + } + + /// Returns a forward const iterator addressing the first element in a set + const_iterator begin() const + { + return get_const_begin(); + } + + /// Returns a forward const iterator addressing the first element in a set + const_iterator cbegin() const + { + return get_const_begin(); + } + + /// Returns an const iterator that addresses the location succeeding the last element in a set + const_iterator end() const + { + return get_const_end(); + } + + /// Returns an const iterator that addresses the location succeeding the last element in a set + const_iterator cend() const + { + return get_const_end(); + } + //@} + + public: + /// Initializes hash set + /** + The Michael's hash set is an unbounded container, but its hash table is non-expandable. + At construction time you should pass estimated maximum item count and a load factor. + The load factor is average size of one bucket - a small number between 1 and 10. + The bucket is an ordered single-linked list, searching in the bucket has linear complexity O(nLoadFactor). + The constructor defines hash table size as rounding nMaxItemCount / nLoadFactor up to nearest power of two. + */ + MichaelHashSet( + size_t nMaxItemCount, ///< estimation of max item count in the hash set + size_t nLoadFactor ///< load factor: estimation of max number of items in the bucket. Small integer up to 10. + ) : m_nHashBitmask( michael_set::details::init_hash_bitmask( nMaxItemCount, nLoadFactor )) + , m_Buckets( bucket_table_allocator().allocate( bucket_count())) + { + for ( auto it = m_Buckets, itEnd = m_Buckets + bucket_count(); it != itEnd; ++it ) + construct_bucket( it ); + } + + /// Clears hash set object and destroys it + ~MichaelHashSet() + { + clear(); + + for ( auto it = m_Buckets, itEnd = m_Buckets + bucket_count(); it != itEnd; ++it ) + it->~internal_bucket_type(); + bucket_table_allocator().deallocate( m_Buckets, bucket_count()); + } + + /// Inserts new node + /** + The function inserts \p val in the set if it does not contain + an item with key equal to \p val. + + Returns \p true if \p val is placed into the set, \p false otherwise. + */ + bool insert( value_type& val ) + { + bool bRet = bucket( val ).insert( val ); + if ( bRet ) + ++m_ItemCounter; + return bRet; + } + + /// Inserts new node + /** + This function is intended for derived non-intrusive containers. + + The function allows to split creating of new item into two part: + - create item with key only + - insert new item into the set + - if inserting is success, calls \p f functor to initialize value-field of \p val. + + The functor signature is: + \code + void func( value_type& val ); + \endcode + where \p val is the item inserted. + + The user-defined functor is called only if the inserting is success. + + @warning For \ref cds_intrusive_MichaelList_hp "MichaelList" as the bucket see \ref cds_intrusive_item_creating "insert item troubleshooting". + \ref cds_intrusive_LazyList_hp "LazyList" provides exclusive access to inserted item and does not require any node-level + synchronization. + */ + template + bool insert( value_type& val, Func f ) + { + bool bRet = bucket( val ).insert( val, f ); + if ( bRet ) + ++m_ItemCounter; + return bRet; + } + + /// Updates the element + /** + The operation performs inserting or changing data with lock-free manner. + + If the item \p val not found in the set, then \p val is inserted iff \p bAllowInsert is \p true. + Otherwise, the functor \p func is called with item found. + + The functor signature depends of the type of \p OrderedList: + + for \p MichaelList, \p LazyList + \code + struct functor { + void operator()( bool bNew, value_type& item, value_type& val ); + }; + \endcode + with arguments: + - \p bNew - \p true if the item has been inserted, \p false otherwise + - \p item - item of the set + - \p val - argument \p val passed into the \p %update() function + If new item has been inserted (i.e. \p bNew is \p true) then \p item and \p val arguments + refers to the same thing. + + The functor may change non-key fields of the \p item. + @warning For \ref cds_intrusive_MichaelList_hp "MichaelList" as the bucket see \ref cds_intrusive_item_creating "insert item troubleshooting". + \ref cds_intrusive_LazyList_hp "LazyList" provides exclusive access to inserted item and does not require any node-level + synchronization. + + for \p IterableList + \code + void func( value_type& val, value_type * old ); + \endcode + where + - \p val - argument \p val passed into the \p %update() function + - \p old - old value that will be retired. If new item has been inserted then \p old is \p nullptr. + + Returns std::pair where \p first is \p true if operation is successful, + \p second is \p true if new item has been added or \p false if the item with \p key + already is in the set. + */ + template + std::pair update( value_type& val, Func func, bool bAllowInsert = true ) + { + std::pair bRet = bucket( val ).update( val, func, bAllowInsert ); + if ( bRet.second ) + ++m_ItemCounter; + return bRet; + } + //@cond + template + CDS_DEPRECATED("ensure() is deprecated, use update()") + std::pair ensure( value_type& val, Func func ) + { + return update( val, func, true ); + } + //@endcond + + /// Inserts or updates the node (only for \p IterableList) + /** + The operation performs inserting or changing data with lock-free manner. + + If the item \p val is not found in the set, then \p val is inserted iff \p bAllowInsert is \p true. + Otherwise, the current element is changed to \p val, the old element will be retired later + by call \p Traits::disposer. + + Returns std::pair where \p first is \p true if operation is successful, + \p second is \p true if \p val has been added or \p false if the item with that key + already in the set. + */ +#ifdef CDS_DOXYGEN_INVOKED + std::pair upsert( value_type& val, bool bAllowInsert = true ) +#else + template + typename std::enable_if< + std::is_same< Q, value_type>::value && is_iterable_list< ordered_list >::value, + std::pair + >::type + upsert( Q& val, bool bAllowInsert = true ) +#endif + { + std::pair bRet = bucket( val ).upsert( val, bAllowInsert ); + if ( bRet.second ) + ++m_ItemCounter; + return bRet; + } + + /// Unlinks the item \p val from the set + /** + The function searches the item \p val in the set and unlink it + if it is found and is equal to \p val. + + The function returns \p true if success and \p false otherwise. + */ + bool unlink( value_type& val ) + { + bool bRet = bucket( val ).unlink( val ); + if ( bRet ) + --m_ItemCounter; + return bRet; + } + + /// Deletes the item from the set + /** \anchor cds_intrusive_MichaelHashSet_hp_erase + The function searches an item with key equal to \p key in the set, + unlinks it, and returns \p true. + If the item with key equal to \p key is not found the function return \p false. + + Note the hash functor should accept a parameter of type \p Q that can be not the same as \p value_type. + */ + template + bool erase( Q const& key ) + { + if ( bucket( key ).erase( key )) { + --m_ItemCounter; + return true; + } + return false; + } + + /// Deletes the item from the set using \p pred predicate for searching + /** + The function is an analog of \ref cds_intrusive_MichaelHashSet_hp_erase "erase(Q const&)" + but \p pred is used for key comparing. + \p Less functor has the interface like \p std::less. + \p pred must imply the same element order as the comparator used for building the set. + */ + template + bool erase_with( Q const& key, Less pred ) + { + if ( bucket( key ).erase_with( key, pred )) { + --m_ItemCounter; + return true; + } + return false; + } + + /// Deletes the item from the set + /** \anchor cds_intrusive_MichaelHashSet_hp_erase_func + The function searches an item with key equal to \p key in the set, + call \p f functor with item found, and unlinks it from the set. + The \ref disposer specified in \p OrderedList class template parameter is called + by garbage collector \p GC asynchronously. + + The \p Func interface is + \code + struct functor { + void operator()( value_type const& item ); + }; + \endcode + + If the item with key equal to \p key is not found the function return \p false. + + Note the hash functor should accept a parameter of type \p Q that can be not the same as \p value_type. + */ + template + bool erase( Q const& key, Func f ) + { + if ( bucket( key ).erase( key, f )) { + --m_ItemCounter; + return true; + } + return false; + } + + /// Deletes the item from the set using \p pred predicate for searching + /** + The function is an analog of \ref cds_intrusive_MichaelHashSet_hp_erase_func "erase(Q const&, Func)" + but \p pred is used for key comparing. + \p Less functor has the interface like \p std::less. + \p pred must imply the same element order as the comparator used for building the set. + */ + template + bool erase_with( Q const& key, Less pred, Func f ) + { + if ( bucket( key ).erase_with( key, pred, f )) { + --m_ItemCounter; + return true; + } + return false; + } + + /// Deletes the item pointed by iterator \p iter (only for \p IterableList based set) + /** + Returns \p true if the operation is successful, \p false otherwise. + The function can return \p false if the node the iterator points to has already been deleted + by other thread. + + The function does not invalidate the iterator, it remains valid and can be used for further traversing. + + @note \p %erase_at() is supported only for \p %MichaelHashSet based on \p IterableList. + */ +#ifdef CDS_DOXYGEN_INVOKED + bool erase_at( iterator const& iter ) +#else + template + typename std::enable_if< std::is_same::value && is_iterable_list< ordered_list >::value, bool >::type + erase_at( Iterator const& iter ) +#endif + { + assert( iter != end()); + assert( iter.bucket() != nullptr ); + + if ( iter.bucket()->erase_at( iter.underlying_iterator())) { + --m_ItemCounter; + return true; + } + return false; + } + + /// Extracts the item with specified \p key + /** \anchor cds_intrusive_MichaelHashSet_hp_extract + The function searches an item with key equal to \p key, + unlinks it from the set, and returns an guarded pointer to the item extracted. + If \p key is not found the function returns an empty guarded pointer. + + Note the compare functor should accept a parameter of type \p Q that may be not the same as \p value_type. + + The \p disposer specified in \p OrderedList class' template parameter is called automatically + by garbage collector \p GC when returned \ref guarded_ptr object will be destroyed or released. + @note Each \p guarded_ptr object uses the GC's guard that can be limited resource. + + Usage: + \code + typedef cds::intrusive::MichaelHashSet< your_template_args > michael_set; + michael_set theSet; + // ... + { + michael_set::guarded_ptr gp( theSet.extract( 5 )); + if ( gp ) { + // Deal with gp + // ... + } + // Destructor of gp releases internal HP guard + } + \endcode + */ + template + guarded_ptr extract( Q const& key ) + { + guarded_ptr gp = bucket( key ).extract( key ); + if ( gp ) + --m_ItemCounter; + return gp; + } + + /// Extracts the item using compare functor \p pred + /** + The function is an analog of \ref cds_intrusive_MichaelHashSet_hp_extract "extract(Q const&)" + but \p pred predicate is used for key comparing. + + \p Less functor has the semantics like \p std::less but should take arguments of type \ref value_type and \p Q + in any order. + \p pred must imply the same element order as the comparator used for building the list. + */ + template + guarded_ptr extract_with( Q const& key, Less pred ) + { + guarded_ptr gp = bucket( key ).extract_with( key, pred ); + if ( gp ) + --m_ItemCounter; + return gp; + } + + /// Finds the key \p key + /** \anchor cds_intrusive_MichaelHashSet_hp_find_func + The function searches the item with key equal to \p key and calls the functor \p f for item found. + The interface of \p Func functor is: + \code + struct functor { + void operator()( value_type& item, Q& key ); + }; + \endcode + where \p item is the item found, \p key is the find function argument. + + The functor may change non-key fields of \p item. Note that the functor is only guarantee + that \p item cannot be disposed during functor is executing. + The functor does not serialize simultaneous access to the set \p item. If such access is + possible you must provide your own synchronization schema on item level to exclude unsafe item modifications. + + The \p key argument is non-const since it can be used as \p f functor destination i.e., the functor + may modify both arguments. + + Note the hash functor specified for class \p Traits template parameter + should accept a parameter of type \p Q that can be not the same as \p value_type. + + The function returns \p true if \p key is found, \p false otherwise. + */ + template + bool find( Q& key, Func f ) + { + return bucket( key ).find( key, f ); + } + //@cond + template + bool find( Q const& key, Func f ) + { + return bucket( key ).find( key, f ); + } + //@endcond + + /// Finds \p key and returns iterator pointed to the item found (only for \p IterableList) + /** + If \p key is not found the function returns \p end(). + + @note This function is supported only for the set based on \p IterableList + */ + template +#ifdef CDS_DOXYGEN_INVOKED + iterator +#else + typename std::enable_if< std::is_same::value && is_iterable_list< ordered_list >::value, iterator >::type +#endif + find( Q& key ) + { + internal_bucket_type& b = bucket( key ); + typename internal_bucket_type::iterator it = b.find( key ); + if ( it == b.end()) + return end(); + return iterator( it, &b, bucket_end()); + } + //@cond + template + typename std::enable_if< std::is_same::value && is_iterable_list< ordered_list >::value, iterator >::type + find( Q const& key ) + { + internal_bucket_type& b = bucket( key ); + typename internal_bucket_type::iterator it = b.find( key ); + if ( it == b.end()) + return end(); + return iterator( it, &b, bucket_end()); + } + //@endcond + + + /// Finds the key \p key using \p pred predicate for searching + /** + The function is an analog of \ref cds_intrusive_MichaelHashSet_hp_find_func "find(Q&, Func)" + but \p pred is used for key comparing. + \p Less functor has the interface like \p std::less. + \p pred must imply the same element order as the comparator used for building the set. + */ + template + bool find_with( Q& key, Less pred, Func f ) + { + return bucket( key ).find_with( key, pred, f ); + } + //@cond + template + bool find_with( Q const& key, Less pred, Func f ) + { + return bucket( key ).find_with( key, pred, f ); + } + //@endcond + + /// Finds \p key using \p pred predicate and returns iterator pointed to the item found (only for \p IterableList) + /** + The function is an analog of \p find(Q&) but \p pred is used for key comparing. + \p Less functor has the interface like \p std::less. + \p pred must imply the same element order as the comparator used for building the set. + + If \p key is not found the function returns \p end(). + + @note This function is supported only for the set based on \p IterableList + */ + template +#ifdef CDS_DOXYGEN_INVOKED + iterator +#else + typename std::enable_if< std::is_same::value && is_iterable_list< ordered_list >::value, iterator >::type +#endif + find_with( Q& key, Less pred ) + { + internal_bucket_type& b = bucket( key ); + typename internal_bucket_type::iterator it = b.find_with( key, pred ); + if ( it == b.end()) + return end(); + return iterator( it, &b, bucket_end()); + } + //@cond + template + typename std::enable_if< std::is_same::value && is_iterable_list< ordered_list >::value, iterator >::type + find_with( Q const& key, Less pred ) + { + internal_bucket_type& b = bucket( key ); + typename internal_bucket_type::iterator it = b.find_with( key, pred ); + if ( it == b.end()) + return end(); + return iterator( it, &b, bucket_end()); + } + //@endcond + + /// Checks whether the set contains \p key + /** + + The function searches the item with key equal to \p key + and returns \p true if the key is found, and \p false otherwise. + + Note the hash functor specified for class \p Traits template parameter + should accept a parameter of type \p Q that can be not the same as \p value_type. + */ + template + bool contains( Q const& key ) + { + return bucket( key ).contains( key ); + } + + /// Checks whether the set contains \p key using \p pred predicate for searching + /** + The function is an analog of contains( key ) but \p pred is used for key comparing. + \p Less functor has the interface like \p std::less. + \p Less must imply the same element order as the comparator used for building the set. + */ + template + bool contains( Q const& key, Less pred ) + { + return bucket( key ).contains( key, pred ); + } + + /// Finds the key \p key and return the item found + /** \anchor cds_intrusive_MichaelHashSet_hp_get + The function searches the item with key equal to \p key + and returns the guarded pointer to the item found. + If \p key is not found the function returns an empty \p guarded_ptr. + + @note Each \p guarded_ptr object uses one GC's guard which can be limited resource. + + Usage: + \code + typedef cds::intrusive::MichaelHashSet< your_template_params > michael_set; + michael_set theSet; + // ... + { + michael_set::guarded_ptr gp( theSet.get( 5 )); + if ( theSet.get( 5 )) { + // Deal with gp + //... + } + // Destructor of guarded_ptr releases internal HP guard + } + \endcode + + Note the compare functor specified for \p OrderedList template parameter + should accept a parameter of type \p Q that can be not the same as \p value_type. + */ + template + guarded_ptr get( Q const& key ) + { + return bucket( key ).get( key ); + } + + /// Finds the key \p key and return the item found + /** + The function is an analog of \ref cds_intrusive_MichaelHashSet_hp_get "get( Q const&)" + but \p pred is used for comparing the keys. + + \p Less functor has the semantics like \p std::less but should take arguments of type \ref value_type and \p Q + in any order. + \p pred must imply the same element order as the comparator used for building the set. + */ + template + guarded_ptr get_with( Q const& key, Less pred ) + { + return bucket( key ).get_with( key, pred ); + } + + /// Clears the set (non-atomic) + /** + The function unlink all items from the set. + The function is not atomic. It cleans up each bucket and then resets the item counter to zero. + If there are a thread that performs insertion while \p %clear() is working the result is undefined in general case: + \p empty() may return \p true but the set may contain item(s). + Therefore, \p %clear() may be used only for debugging purposes. + + For each item the \p disposer is called after unlinking. + */ + void clear() + { + for ( size_t i = 0; i < bucket_count(); ++i ) + m_Buckets[i].clear(); + m_ItemCounter.reset(); + } + + /// Checks if the set is empty + /** + @warning If you use \p atomicity::empty_item_counter in \p traits::item_counter, + the function always returns \p true. + */ + bool empty() const + { + return size() == 0; + } + + /// Returns item count in the set + /** + If you use \p atomicity::empty_item_counter in \p traits::item_counter, + the function always returns 0. + */ + size_t size() const + { + return m_ItemCounter; + } + + /// Returns const reference to internal statistics + stat const& statistics() const + { + return m_Stat; + } + + /// Returns the size of hash table + /** + Since \p %MichaelHashSet cannot dynamically extend the hash table size, + the value returned is an constant depending on object initialization parameters, + see \p MichaelHashSet::MichaelHashSet. + */ + size_t bucket_count() const + { + return m_nHashBitmask + 1; + } + + private: + //@cond + internal_bucket_type * bucket_begin() const + { + return m_Buckets; + } + + internal_bucket_type * bucket_end() const + { + return m_Buckets + bucket_count(); + } + + const_iterator get_const_begin() const + { + return const_iterator( m_Buckets[0].cbegin(), bucket_begin(), bucket_end()); + } + const_iterator get_const_end() const + { + return const_iterator( bucket_end()[-1].cend(), bucket_end() - 1, bucket_end()); + } + + template + typename std::enable_if< Stat::empty >::type construct_bucket( internal_bucket_type * b ) + { + new (b) internal_bucket_type; + } + + template + typename std::enable_if< !Stat::empty >::type construct_bucket( internal_bucket_type * b ) + { + new (b) internal_bucket_type( m_Stat ); + } + + /// Calculates hash value of \p key + template + size_t hash_value( const Q& key ) const + { + return m_HashFunctor( key ) & m_nHashBitmask; + } + + /// Returns the bucket (ordered list) for \p key + template + internal_bucket_type& bucket( const Q& key ) + { + return m_Buckets[hash_value( key )]; + } + //@endcond + }; + +}} // namespace cds::intrusive + +#endif // ifndef CDSLIB_INTRUSIVE_MICHAEL_SET_H diff --git a/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/intrusive/michael_set_nogc.h b/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/intrusive/michael_set_nogc.h new file mode 100644 index 0000000..dfe01d1 --- /dev/null +++ b/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/intrusive/michael_set_nogc.h @@ -0,0 +1,474 @@ +/* + This file is a part of libcds - Concurrent Data Structures library + + (C) Copyright Maxim Khizhinsky (libcds.dev@gmail.com) 2006-2017 + + Source code repo: http://github.com/khizmax/libcds/ + Download: http://sourceforge.net/projects/libcds/files/ + + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. + + * Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ + +#ifndef CDSLIB_INTRUSIVE_MICHAEL_SET_NOGC_H +#define CDSLIB_INTRUSIVE_MICHAEL_SET_NOGC_H + +#include +#include + +namespace cds { namespace intrusive { + + /// Michael's hash set (template specialization for gc::nogc) + /** @ingroup cds_intrusive_map + \anchor cds_intrusive_MichaelHashSet_nogc + + This specialization is so-called append-only when no item + reclamation may be performed. The set does not support deleting of list item. + + See \ref cds_intrusive_MichaelHashSet_hp "MichaelHashSet" for description of template parameters. + The template parameter \p OrderedList should be any \p cds::gc::nogc -derived ordered list, for example, + \ref cds_intrusive_MichaelList_nogc "append-only MichaelList". + */ + template < + class OrderedList, +#ifdef CDS_DOXYGEN_INVOKED + class Traits = michael_set::traits +#else + class Traits +#endif + > + class MichaelHashSet< cds::gc::nogc, OrderedList, Traits > + { + public: + typedef cds::gc::nogc gc; ///< Garbage collector + typedef OrderedList ordered_list; ///< type of ordered list used as a bucket implementation + typedef Traits traits; ///< Set traits + + typedef typename ordered_list::value_type value_type; ///< type of value to be stored in the set + typedef typename ordered_list::key_comparator key_comparator; ///< key comparing functor + typedef typename ordered_list::disposer disposer; ///< Node disposer functor +#ifdef CDS_DOXYGEN_INVOKED + typedef typename ordered_list::stat stat; ///< Internal statistics +#endif + + /// Hash functor for \p value_type and all its derivatives that you use + typedef typename cds::opt::v::hash_selector< typename traits::hash >::type hash; + typedef typename traits::item_counter item_counter; ///< Item counter type + typedef typename traits::allocator allocator; ///< Bucket table allocator + + // GC and OrderedList::gc must be the same + static_assert(std::is_same::value, "GC and OrderedList::gc must be the same"); + + protected: + //@cond + typedef typename ordered_list::template select_stat_wrapper< typename ordered_list::stat > bucket_stat; + + typedef typename ordered_list::template rebind_traits< + cds::opt::item_counter< cds::atomicity::empty_item_counter > + , cds::opt::stat< typename bucket_stat::wrapped_stat > + >::type internal_bucket_type; + + typedef typename allocator::template rebind< internal_bucket_type >::other bucket_table_allocator; + //@endcond + + public: + //@cond + typedef typename bucket_stat::stat stat; + //@endcond + + protected: + //@cond + hash m_HashFunctor; ///< Hash functor + const size_t m_nHashBitmask; + internal_bucket_type * m_Buckets; ///< bucket table + item_counter m_ItemCounter; ///< Item counter + stat m_Stat; ///< Internal statistics + //@endcond + + protected: + //@cond + /// Calculates hash value of \p key + template + size_t hash_value( Q const & key ) const + { + return m_HashFunctor( key ) & m_nHashBitmask; + } + + /// Returns the bucket (ordered list) for \p key + template + internal_bucket_type& bucket( Q const & key ) + { + return m_Buckets[ hash_value( key ) ]; + } + //@endcond + + public: + ///@name Forward iterators + //@{ + /// Forward iterator + /** + The forward iterator for Michael's set is based on \p OrderedList forward iterator and has some features: + - it has no post-increment operator + - it iterates items in unordered fashion + + The iterator interface: + \code + class iterator { + public: + // Default constructor + iterator(); + + // Copy construtor + iterator( iterator const& src ); + + // Dereference operator + value_type * operator ->() const; + + // Dereference operator + value_type& operator *() const; + + // Preincrement operator + iterator& operator ++(); + + // Assignment operator + iterator& operator = (iterator const& src); + + // Equality operators + bool operator ==(iterator const& i ) const; + bool operator !=(iterator const& i ) const; + }; + \endcode + */ + typedef michael_set::details::iterator< internal_bucket_type, false > iterator; + + /// Const forward iterator + /** + For iterator's features and requirements see \ref iterator + */ + typedef michael_set::details::iterator< internal_bucket_type, true > const_iterator; + + /// Returns a forward iterator addressing the first element in a set + /** + For empty set \code begin() == end() \endcode + */ + iterator begin() + { + return iterator( m_Buckets[0].begin(), m_Buckets, m_Buckets + bucket_count()); + } + + /// Returns an iterator that addresses the location succeeding the last element in a set + /** + Do not use the value returned by end function to access any item. + The returned value can be used only to control reaching the end of the set. + For empty set \code begin() == end() \endcode + */ + iterator end() + { + return iterator( m_Buckets[bucket_count() - 1].end(), m_Buckets + bucket_count() - 1, m_Buckets + bucket_count()); + } + + /// Returns a forward const iterator addressing the first element in a set + const_iterator begin() const + { + return cbegin(); + } + /// Returns a forward const iterator addressing the first element in a set + const_iterator cbegin() const + { + return const_iterator( m_Buckets[0].cbegin(), m_Buckets, m_Buckets + bucket_count()); + } + + /// Returns an const iterator that addresses the location succeeding the last element in a set + const_iterator end() const + { + return cend(); + } + /// Returns an const iterator that addresses the location succeeding the last element in a set + const_iterator cend() const + { + return const_iterator( m_Buckets[bucket_count() - 1].cend(), m_Buckets + bucket_count() - 1, m_Buckets + bucket_count()); + } + //@} + + public: + /// Initializes hash set + /** + The Michael's hash set is an unbounded container, but its hash table is non-expandable. + At construction time you should pass estimated maximum item count and a load factor. + The load factor is average size of one bucket - a small number between 1 and 10. + The bucket is an ordered single-linked list, searching in the bucket has linear complexity O(nLoadFactor). + The constructor defines hash table size as rounding nMaxItemCount / nLoadFactor up to nearest power of two. + */ + MichaelHashSet( + size_t nMaxItemCount, ///< estimation of max item count in the hash set + size_t nLoadFactor ///< load factor: estimation of max number of items in the bucket + ) : m_nHashBitmask( michael_set::details::init_hash_bitmask( nMaxItemCount, nLoadFactor )) + , m_Buckets( bucket_table_allocator().allocate( bucket_count())) + { + for ( auto it = m_Buckets, itEnd = m_Buckets + bucket_count(); it != itEnd; ++it ) + construct_bucket( it ); + } + + /// Clears hash set object and destroys it + ~MichaelHashSet() + { + clear(); + + for ( auto it = m_Buckets, itEnd = m_Buckets + bucket_count(); it != itEnd; ++it ) + it->~internal_bucket_type(); + bucket_table_allocator().deallocate( m_Buckets, bucket_count()); + } + + /// Inserts new node + /** + The function inserts \p val in the set if it does not contain + an item with key equal to \p val. + + Returns \p true if \p val is placed into the set, \p false otherwise. + */ + bool insert( value_type& val ) + { + bool bRet = bucket( val ).insert( val ); + if ( bRet ) + ++m_ItemCounter; + return bRet; + } + + /// Updates the element + /** + The operation performs inserting or changing data with lock-free manner. + + If the item \p val not found in the set, then \p val is inserted iff \p bAllowInsert is \p true. + Otherwise, the functor \p func is called with item found. + The functor signature is: + \code + struct functor { + void operator()( bool bNew, value_type& item, value_type& val ); + }; + \endcode + with arguments: + - \p bNew - \p true if the item has been inserted, \p false otherwise + - \p item - item of the set + - \p val - argument \p val passed into the \p %update() function + If new item has been inserted (i.e. \p bNew is \p true) then \p item and \p val arguments + refers to the same thing. + + The functor may change non-key fields of the \p item. + + Returns std::pair where \p first is \p true if operation is successful, + \p second is \p true if new item has been added or \p false if the item with \p key + already is in the set. + + @warning For \ref cds_intrusive_MichaelList_hp "MichaelList" as the bucket see \ref cds_intrusive_item_creating "insert item troubleshooting". + \ref cds_intrusive_LazyList_hp "LazyList" provides exclusive access to inserted item and does not require any node-level + synchronization. + */ + template + std::pair update( value_type& val, Func func, bool bAllowInsert = true ) + { + std::pair bRet = bucket( val ).update( val, func, bAllowInsert ); + if ( bRet.second ) + ++m_ItemCounter; + return bRet; + } + //@cond + template + CDS_DEPRECATED("ensure() is deprecated, use update()") + std::pair ensure( value_type& val, Func func ) + { + return update( val, func, true ); + } + //@endcond + + /// Checks whether the set contains \p key + /** + + The function searches the item with key equal to \p key + and returns the pointer to an element found or \p nullptr. + + Note the hash functor specified for class \p Traits template parameter + should accept a parameter of type \p Q that can be not the same as \p value_type. + */ + template + value_type * contains( Q const& key ) + { + return bucket( key ).contains( key ); + } + //@cond + template + CDS_DEPRECATED("use contains()") + value_type * find( Q const& key ) + { + return contains( key ); + } + //@endcond + + /// Checks whether the set contains \p key using \p pred predicate for searching + /** + The function is an analog of contains( key ) but \p pred is used for key comparing. + \p Less functor has the interface like \p std::less. + \p Less must imply the same element order as the comparator used for building the set. + */ + template + value_type * contains( Q const& key, Less pred ) + { + return bucket( key ).contains( key, pred ); + } + //@cond + template + CDS_DEPRECATED("use contains()") + value_type * find_with( Q const& key, Less pred ) + { + return contains( key, pred ); + } + //@endcond + + /// Finds the key \p key + /** \anchor cds_intrusive_MichaelHashSet_nogc_find_func + The function searches the item with key equal to \p key and calls the functor \p f for item found. + The interface of \p Func functor is: + \code + struct functor { + void operator()( value_type& item, Q& key ); + }; + \endcode + where \p item is the item found, \p key is the find function argument. + + The functor can change non-key fields of \p item. + The functor does not serialize simultaneous access to the set \p item. If such access is + possible you must provide your own synchronization schema on item level to exclude unsafe item modifications. + + The \p key argument is non-const since it can be used as \p f functor destination i.e., the functor + can modify both arguments. + + Note the hash functor specified for class \p Traits template parameter + should accept a parameter of type \p Q that can be not the same as \p value_type. + + The function returns \p true if \p key is found, \p false otherwise. + */ + template + bool find( Q& key, Func f ) + { + return bucket( key ).find( key, f ); + } + //@cond + template + bool find( Q const& key, Func f ) + { + return bucket( key ).find( key, f ); + } + //@endcond + + /// Finds the key \p key using \p pred predicate for searching + /** + The function is an analog of \ref cds_intrusive_MichaelHashSet_nogc_find_func "find(Q&, Func)" + but \p pred is used for key comparing. + \p Less functor has the interface like \p std::less. + \p pred must imply the same element order as the comparator used for building the set. + */ + template + bool find_with( Q& key, Less pred, Func f ) + { + return bucket( key ).find_with( key, pred, f ); + } + //@cond + template + bool find_with( Q const& key, Less pred, Func f ) + { + return bucket( key ).find_with( key, pred, f ); + } + //@endcond + + /// Clears the set (non-atomic) + /** + The function unlink all items from the set. + The function is not atomic. It cleans up each bucket and then resets the item counter to zero. + If there are a thread that performs insertion while \p %clear() is working the result is undefined in general case: + empty() may return \p true but the set may contain item(s). + Therefore, \p %clear() may be used only for debugging purposes. + + For each item the \p disposer is called after unlinking. + */ + void clear() + { + for ( size_t i = 0; i < bucket_count(); ++i ) + m_Buckets[i].clear(); + m_ItemCounter.reset(); + } + + + /// Checks if the set is empty + /** + @warning If you use \p atomicity::empty_item_counter in \p traits::item_counter, + the function always returns \p true. + */ + bool empty() const + { + return size() == 0; + } + + /// Returns item count in the set + /** + If you use \p atomicity::empty_item_counter in \p traits::item_counter, + the function always returns 0. + */ + size_t size() const + { + return m_ItemCounter; + } + + /// Returns the size of hash table + /** + Since \p %MichaelHashSet cannot dynamically extend the hash table size, + the value returned is an constant depending on object initialization parameters; + see MichaelHashSet::MichaelHashSet for explanation. + */ + size_t bucket_count() const + { + return m_nHashBitmask + 1; + } + + /// Returns const reference to internal statistics + stat const& statistics() const + { + return m_Stat; + } + + private: + //@cond + template + typename std::enable_if< Stat::empty >::type construct_bucket( internal_bucket_type * b ) + { + new (b) internal_bucket_type; + } + + template + typename std::enable_if< !Stat::empty >::type construct_bucket( internal_bucket_type * b ) + { + new (b) internal_bucket_type( m_Stat ); + } + //@endcond + }; + +}} // namespace cds::intrusive + +#endif // #ifndef CDSLIB_INTRUSIVE_MICHAEL_SET_NOGC_H + diff --git a/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/intrusive/michael_set_rcu.h b/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/intrusive/michael_set_rcu.h new file mode 100644 index 0000000..d5efd0c --- /dev/null +++ b/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/intrusive/michael_set_rcu.h @@ -0,0 +1,772 @@ +/* + This file is a part of libcds - Concurrent Data Structures library + + (C) Copyright Maxim Khizhinsky (libcds.dev@gmail.com) 2006-2017 + + Source code repo: http://github.com/khizmax/libcds/ + Download: http://sourceforge.net/projects/libcds/files/ + + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. + + * Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ + +#ifndef CDSLIB_INTRUSIVE_MICHAEL_SET_RCU_H +#define CDSLIB_INTRUSIVE_MICHAEL_SET_RCU_H + +#include + +namespace cds { namespace intrusive { + + /// Michael's hash set, \ref cds_urcu_desc "RCU" specialization + /** @ingroup cds_intrusive_map + \anchor cds_intrusive_MichaelHashSet_rcu + + Source: + - [2002] Maged Michael "High performance dynamic lock-free hash tables and list-based sets" + + Michael's hash table algorithm is based on lock-free ordered list and it is very simple. + The main structure is an array \p T of size \p M. Each element in \p T is basically a pointer + to a hash bucket, implemented as a singly linked list. The array of buckets cannot be dynamically expanded. + However, each bucket may contain unbounded number of items. + + Template parameters are: + - \p RCU - one of \ref cds_urcu_gc "RCU type" + - \p OrderedList - ordered list implementation used as bucket for hash set, for example, MichaelList, LazyList. + The intrusive ordered list implementation specifies the type \p T stored in the hash-set, the reclamation + schema \p GC used by hash-set, the comparison functor for the type \p T and other features specific for + the ordered list. + - \p Traits - type traits, default is \p michael_set::traits. + Instead of defining \p Traits struct you can use option-based syntax with \p michael_set::make_traits metafunction. + + \par Usage + Before including you should include appropriate RCU header file, + see \ref cds_urcu_gc "RCU type" for list of existing RCU class and corresponding header files. + For example, for \ref cds_urcu_general_buffered_gc "general-purpose buffered RCU" you should include: + \code + #include + #include + #include + + struct Foo { ... }; + // Hash functor for struct Foo + struct foo_hash { + size_t operator()( Foo const& foo ) const { return ... } + }; + + // Now, you can declare Michael's list for type Foo and default traits: + typedef cds::intrusive::MichaelList >, Foo > rcu_michael_list; + + // Declare Michael's set with MichaelList as bucket type + typedef cds::intrusive::MichaelSet< + cds::urcu::gc< general_buffered<> >, + rcu_michael_list, + cds::intrusive::michael_set::make_traits< + cds::opt::::hash< foo_hash > + >::type + > rcu_michael_set; + + // Declares hash set for 1000000 items with load factor 2 + rcu_michael_set theSet( 1000000, 2 ); + + // Now you can use theSet object in many threads without any synchronization. + \endcode + */ + template < + class RCU, + class OrderedList, +#ifdef CDS_DOXYGEN_INVOKED + class Traits = michael_set::traits +#else + class Traits +#endif + > + class MichaelHashSet< cds::urcu::gc< RCU >, OrderedList, Traits > + { + public: + typedef cds::urcu::gc< RCU > gc; ///< RCU schema + typedef OrderedList ordered_list; ///< type of ordered list used as a bucket implementation + typedef Traits traits; ///< Set traits + + typedef typename ordered_list::value_type value_type; ///< type of value stored in the list + typedef typename ordered_list::key_comparator key_comparator; ///< key comparing functor + typedef typename ordered_list::disposer disposer; ///< Node disposer functor +#ifdef CDS_DOXYGEN_INVOKED + typedef typename ordered_list::stat stat; ///< Internal statistics +#endif + + /// Hash functor for \ref value_type and all its derivatives that you use + typedef typename cds::opt::v::hash_selector< typename traits::hash >::type hash; + typedef typename traits::item_counter item_counter; ///< Item counter type + typedef typename traits::allocator allocator; ///< Bucket table allocator + + typedef typename ordered_list::rcu_lock rcu_lock; ///< RCU scoped lock + /// Group of \p extract_xxx functions require external locking if underlying ordered list requires that + static constexpr const bool c_bExtractLockExternal = ordered_list::c_bExtractLockExternal; + + // GC and OrderedList::gc must be the same + static_assert(std::is_same::value, "GC and OrderedList::gc must be the same"); + + protected: + //@cond + typedef typename ordered_list::template select_stat_wrapper< typename ordered_list::stat > bucket_stat; + + typedef typename ordered_list::template rebind_traits< + cds::opt::item_counter< cds::atomicity::empty_item_counter > + , cds::opt::stat< typename bucket_stat::wrapped_stat > + >::type internal_bucket_type; + + typedef typename allocator::template rebind< internal_bucket_type >::other bucket_table_allocator; + //@endcond + + public: + typedef typename internal_bucket_type::exempt_ptr exempt_ptr; ///< pointer to extracted node + typedef typename internal_bucket_type::raw_ptr raw_ptr; ///< Return type of \p get() member function and its derivatives + + //@cond + typedef typename bucket_stat::stat stat; + //@endcond + + private: + //@cond + hash m_HashFunctor; ///< Hash functor + size_t const m_nHashBitmask; + internal_bucket_type* m_Buckets; ///< bucket table + item_counter m_ItemCounter; ///< Item counter + stat m_Stat; ///< Internal statistics + //@endcond + + public: + ///@name Forward iterators (thread-safe under RCU lock) + //@{ + /// Forward iterator + /** + The forward iterator for Michael's set is based on \p OrderedList forward iterator and has some features: + - it has no post-increment operator + - it iterates items in unordered fashion + + You may safely use iterators in multi-threaded environment only under RCU lock. + Otherwise, a crash is possible if another thread deletes the element the iterator points to. + + The iterator interface: + \code + class iterator { + public: + // Default constructor + iterator(); + + // Copy construtor + iterator( iterator const& src ); + + // Dereference operator + value_type * operator ->() const; + + // Dereference operator + value_type& operator *() const; + + // Preincrement operator + iterator& operator ++(); + + // Assignment operator + iterator& operator = (iterator const& src); + + // Equality operators + bool operator ==(iterator const& i ) const; + bool operator !=(iterator const& i ) const; + }; + \endcode + */ + typedef michael_set::details::iterator< internal_bucket_type, false > iterator; + + /// Const forward iterator + /** + For iterator's features and requirements see \ref iterator + */ + typedef michael_set::details::iterator< internal_bucket_type, true > const_iterator; + + /// Returns a forward iterator addressing the first element in a set + /** + For empty set \code begin() == end() \endcode + */ + iterator begin() + { + return iterator( m_Buckets[0].begin(), m_Buckets, m_Buckets + bucket_count()); + } + + /// Returns an iterator that addresses the location succeeding the last element in a set + /** + Do not use the value returned by end function to access any item. + The returned value can be used only to control reaching the end of the set. + For empty set \code begin() == end() \endcode + */ + iterator end() + { + return iterator( m_Buckets[bucket_count() - 1].end(), m_Buckets + bucket_count() - 1, m_Buckets + bucket_count()); + } + + /// Returns a forward const iterator addressing the first element in a set + const_iterator begin() const + { + return cbegin(); + } + + /// Returns a forward const iterator addressing the first element in a set + const_iterator cbegin() const + { + return const_iterator( m_Buckets[0].cbegin(), m_Buckets, m_Buckets + bucket_count()); + } + + /// Returns an const iterator that addresses the location succeeding the last element in a set + const_iterator end() const + { + return cend(); + } + + /// Returns an const iterator that addresses the location succeeding the last element in a set + const_iterator cend() const + { + return const_iterator( m_Buckets[bucket_count() - 1].cend(), m_Buckets + bucket_count() - 1, m_Buckets + bucket_count()); + } + //@} + + public: + /// Initialize hash set + /** + The Michael's hash set is an unbounded container, but its hash table is non-expandable. + At construction time you should pass estimated maximum item count and a load factor. + The load factor is average size of one bucket - a small number between 1 and 10. + The bucket is an ordered single-linked list, the complexity of searching in the bucket is linear O(nLoadFactor). + The constructor defines hash table size as rounding nMaxItemCount / nLoadFactor up to nearest power of two. + */ + MichaelHashSet( + size_t nMaxItemCount, ///< estimation of max item count in the hash set + size_t nLoadFactor ///< load factor: average size of the bucket + ) : m_nHashBitmask( michael_set::details::init_hash_bitmask( nMaxItemCount, nLoadFactor )) + , m_Buckets( bucket_table_allocator().allocate( bucket_count())) + { + for ( auto it = m_Buckets, itEnd = m_Buckets + bucket_count(); it != itEnd; ++it ) + construct_bucket( it ); + } + + /// Clear hash set and destroy it + ~MichaelHashSet() + { + clear(); + + for ( auto it = m_Buckets, itEnd = m_Buckets + bucket_count(); it != itEnd; ++it ) + it->~internal_bucket_type(); + bucket_table_allocator().deallocate( m_Buckets, bucket_count()); + } + + /// Inserts new node + /** + The function inserts \p val in the set if it does not contain + an item with key equal to \p val. + + Returns \p true if \p val is placed into the set, \p false otherwise. + */ + bool insert( value_type& val ) + { + bool bRet = bucket( val ).insert( val ); + if ( bRet ) + ++m_ItemCounter; + return bRet; + } + + /// Inserts new node + /** + This function is intended for derived non-intrusive containers. + + The function allows to split creating of new item into two part: + - create item with key only + - insert new item into the set + - if inserting is success, calls \p f functor to initialize value-field of \p val. + + The functor signature is: + \code + void func( value_type& val ); + \endcode + where \p val is the item inserted. + The user-defined functor is called only if the inserting is success. + + @warning For \ref cds_intrusive_MichaelList_rcu "MichaelList" as the bucket see \ref cds_intrusive_item_creating "insert item troubleshooting". + \ref cds_intrusive_LazyList_rcu "LazyList" provides exclusive access to inserted item and does not require any node-level + synchronization. + */ + template + bool insert( value_type& val, Func f ) + { + bool bRet = bucket( val ).insert( val, f ); + if ( bRet ) + ++m_ItemCounter; + return bRet; + } + + /// Updates the element + /** + The operation performs inserting or changing data with lock-free manner. + + If the item \p val not found in the set, then \p val is inserted iff \p bAllowInsert is \p true. + Otherwise, the functor \p func is called with item found. + The functor signature is: + \code + struct functor { + void operator()( bool bNew, value_type& item, value_type& val ); + }; + \endcode + with arguments: + - \p bNew - \p true if the item has been inserted, \p false otherwise + - \p item - item of the set + - \p val - argument \p val passed into the \p %update() function + If new item has been inserted (i.e. \p bNew is \p true) then \p item and \p val arguments + refers to the same thing. + + The functor may change non-key fields of the \p item. + + Returns std::pair where \p first is \p true if operation is successful, + \p second is \p true if new item has been added or \p false if the item with \p key + already is in the set. + + @warning For \ref cds_intrusive_MichaelList_hp "MichaelList" as the bucket see \ref cds_intrusive_item_creating "insert item troubleshooting". + \ref cds_intrusive_LazyList_hp "LazyList" provides exclusive access to inserted item and does not require any node-level + synchronization. + */ + template + std::pair update( value_type& val, Func func, bool bAllowInsert = true ) + { + std::pair bRet = bucket( val ).update( val, func, bAllowInsert ); + if ( bRet.second ) + ++m_ItemCounter; + return bRet; + } + //@cond + template + CDS_DEPRECATED("ensure() is deprecated, use update()") + std::pair ensure( value_type& val, Func func ) + { + return update( val, func, true ); + } + //@endcond + + /// Unlinks the item \p val from the set + /** + The function searches the item \p val in the set and unlink it from the set + if it is found and is equal to \p val. + + The function returns \p true if success and \p false otherwise. + */ + bool unlink( value_type& val ) + { + bool bRet = bucket( val ).unlink( val ); + if ( bRet ) + --m_ItemCounter; + return bRet; + } + + /// Deletes the item from the set + /** \anchor cds_intrusive_MichaelHashSet_rcu_erase + The function searches an item with key equal to \p key in the set, + unlinks it from the set, and returns \p true. + If the item with key equal to \p key is not found the function return \p false. + + Note the hash functor should accept a parameter of type \p Q that may be not the same as \p value_type. + */ + template + bool erase( Q const& key ) + { + if ( bucket( key ).erase( key )) { + --m_ItemCounter; + return true; + } + return false; + } + + /// Deletes the item from the set using \p pred predicate for searching + /** + The function is an analog of \ref cds_intrusive_MichaelHashSet_rcu_erase "erase(Q const&)" + but \p pred is used for key comparing. + \p Less functor has the interface like \p std::less. + \p pred must imply the same element order as the comparator used for building the set. + */ + template + bool erase_with( Q const& key, Less pred ) + { + if ( bucket( key ).erase_with( key, pred )) { + --m_ItemCounter; + return true; + } + return false; + } + + /// Deletes the item from the set + /** \anchor cds_intrusive_MichaelHashSet_rcu_erase_func + The function searches an item with key equal to \p key in the set, + call \p f functor with item found, and unlinks it from the set. + The \ref disposer specified in \p OrderedList class template parameter is called + by garbage collector \p GC asynchronously. + + The \p Func interface is + \code + struct functor { + void operator()( value_type const& item ); + }; + \endcode + + If the item with key equal to \p key is not found the function return \p false. + + Note the hash functor should accept a parameter of type \p Q that can be not the same as \p value_type. + */ + template + bool erase( const Q& key, Func f ) + { + if ( bucket( key ).erase( key, f )) { + --m_ItemCounter; + return true; + } + return false; + } + + /// Deletes the item from the set using \p pred predicate for searching + /** + The function is an analog of \ref cds_intrusive_MichaelHashSet_rcu_erase_func "erase(Q const&)" + but \p pred is used for key comparing. + \p Less functor has the interface like \p std::less. + \p pred must imply the same element order as the comparator used for building the set. + */ + template + bool erase_with( const Q& key, Less pred, Func f ) + { + if ( bucket( key ).erase_with( key, pred, f )) { + --m_ItemCounter; + return true; + } + return false; + } + + /// Extracts an item from the set + /** \anchor cds_intrusive_MichaelHashSet_rcu_extract + The function searches an item with key equal to \p key in the set, + unlinks it from the set, and returns \ref cds::urcu::exempt_ptr "exempt_ptr" pointer to the item found. + If the item with the key equal to \p key is not found the function returns an empty \p exempt_ptr. + + Depends on \p ordered_list you should or should not lock RCU before calling of this function: + - for the set based on \ref cds_intrusive_MichaelList_rcu "MichaelList" RCU should not be locked + - for the set based on \ref cds_intrusive_LazyList_rcu "LazyList" RCU should be locked + + See ordered list implementation for details. + + \code + #include + #include + #include + + typedef cds::urcu::gc< general_buffered<> > rcu; + typedef cds::intrusive::MichaelList< rcu, Foo > rcu_michael_list; + typedef cds::intrusive::MichaelHashSet< rcu, rcu_michael_list, foo_traits > rcu_michael_set; + + rcu_michael_set theSet; + // ... + + typename rcu_michael_set::exempt_ptr p; + + // For MichaelList we should not lock RCU + + // Now, you can apply extract function + // Note that you must not delete the item found inside the RCU lock + p = theSet.extract( 10 ) + if ( p ) { + // do something with p + ... + } + + // We may safely release p here + // release() passes the pointer to RCU reclamation cycle: + // it invokes RCU retire_ptr function with the disposer you provided for rcu_michael_list. + p.release(); + \endcode + */ + template + exempt_ptr extract( Q const& key ) + { + exempt_ptr p( bucket( key ).extract( key )); + if ( p ) + --m_ItemCounter; + return p; + } + + /// Extracts an item from the set using \p pred predicate for searching + /** + The function is an analog of \p extract(Q const&) but \p pred is used for key comparing. + \p Less functor has the interface like \p std::less. + \p pred must imply the same element order as the comparator used for building the set. + */ + template + exempt_ptr extract_with( Q const& key, Less pred ) + { + exempt_ptr p( bucket( key ).extract_with( key, pred )); + if ( p ) + --m_ItemCounter; + return p; + } + + /// Checks whether the set contains \p key + /** + + The function searches the item with key equal to \p key + and returns \p true if the key is found, and \p false otherwise. + + Note the hash functor specified for class \p Traits template parameter + should accept a parameter of type \p Q that can be not the same as \p value_type. + */ + template + bool contains( Q const& key ) + { + return bucket( key ).contains( key ); + } + //@cond + template + CDS_DEPRECATED("use contains()") + bool find( Q const& key ) + { + return contains( key ); + } + //@endcond + + /// Checks whether the set contains \p key using \p pred predicate for searching + /** + The function is an analog of contains( key ) but \p pred is used for key comparing. + \p Less functor has the interface like \p std::less. + \p Less must imply the same element order as the comparator used for building the set. + */ + template + bool contains( Q const& key, Less pred ) + { + return bucket( key ).contains( key, pred ); + } + //@cond + template + CDS_DEPRECATED("use contains()") + bool find_with( Q const& key, Less pred ) + { + return contains( key, pred ); + } + //@endcond + + /// Find the key \p key + /** \anchor cds_intrusive_MichaelHashSet_rcu_find_func + The function searches the item with key equal to \p key and calls the functor \p f for item found. + The interface of \p Func functor is: + \code + struct functor { + void operator()( value_type& item, Q& key ); + }; + \endcode + where \p item is the item found, \p key is the find function argument. + + The functor can change non-key fields of \p item. + The functor does not serialize simultaneous access to the set \p item. If such access is + possible you must provide your own synchronization schema on item level to exclude unsafe item modifications. + + The \p key argument is non-const since it can be used as \p f functor destination i.e., the functor + can modify both arguments. + + Note the hash functor specified for class \p Traits template parameter + should accept a parameter of type \p Q that can be not the same as \p value_type. + + The function returns \p true if \p key is found, \p false otherwise. + */ + template + bool find( Q& key, Func f ) + { + return bucket( key ).find( key, f ); + } + //@cond + template + bool find( Q const& key, Func f ) + { + return bucket( key ).find( key, f ); + } + //@endcond + + /// Finds the key \p key using \p pred predicate for searching + /** + The function is an analog of \ref cds_intrusive_MichaelHashSet_rcu_find_func "find(Q&, Func)" + but \p pred is used for key comparing. + \p Less functor has the interface like \p std::less. + \p pred must imply the same element order as the comparator used for building the set. + */ + template + bool find_with( Q& key, Less pred, Func f ) + { + return bucket( key ).find_with( key, pred, f ); + } + //@cond + template + bool find_with( Q const& key, Less pred, Func f ) + { + return bucket( key ).find_with( key, pred, f ); + } + //@endcond + + /// Finds the key \p key and return the item found + /** \anchor cds_intrusive_MichaelHashSet_rcu_get + The function searches the item with key equal to \p key and returns the pointer to item found. + If \p key is not found it returns \p nullptr. + Note the type of returned value depends on underlying \p ordered_list. + For details, see documentation of ordered list you use. + + Note the compare functor should accept a parameter of type \p Q that can be not the same as \p value_type. + + RCU should be locked before call of this function. + Returned item is valid only while RCU is locked: + \code + typedef cds::intrusive::MichaelHashSet< your_template_parameters > hash_set; + hash_set theSet; + // ... + // Result of get() call + typename hash_set::raw_ptr ptr; + { + // Lock RCU + hash_set::rcu_lock lock; + + ptr = theSet.get( 5 ); + if ( ptr ) { + // Deal with ptr + //... + } + // Unlock RCU by rcu_lock destructor + // ptr can be reclaimed by disposer at any time after RCU has been unlocked + } + \endcode + */ + template + raw_ptr get( Q const& key ) + { + return bucket( key ).get( key ); + } + + /// Finds the key \p key and return the item found + /** + The function is an analog of \ref cds_intrusive_MichaelHashSet_rcu_get "get(Q const&)" + but \p pred is used for comparing the keys. + + \p Less functor has the semantics like \p std::less but should take arguments of type \ref value_type and \p Q + in any order. + \p pred must imply the same element order as the comparator used for building the set. + */ + template + raw_ptr get_with( Q const& key, Less pred ) + { + return bucket( key ).get_with( key, pred ); + } + + /// Clears the set (non-atomic) + /** + The function unlink all items from the set. + The function is not atomic. It cleans up each bucket and then resets the item counter to zero. + If there are a thread that performs insertion while \p clear is working the result is undefined in general case: + empty() may return \p true but the set may contain item(s). + Therefore, \p clear may be used only for debugging purposes. + + For each item the \p disposer is called after unlinking. + */ + void clear() + { + for ( size_t i = 0; i < bucket_count(); ++i ) + m_Buckets[i].clear(); + m_ItemCounter.reset(); + } + + + /// Checks if the set is empty + /** + @warning If you use \p atomicity::empty_item_counter in \p traits::item_counter, + the function always returns \p true. + */ + bool empty() const + { + return size() == 0; + } + + /// Returns item count in the set + /** + If you use \p atomicity::empty_item_counter in \p traits::item_counter, + the function always returns 0. + */ + size_t size() const + { + return m_ItemCounter; + } + + /// Returns the size of hash table + /** + Since %MichaelHashSet cannot dynamically extend the hash table size, + the value returned is an constant depending on object initialization parameters; + see \ref cds_intrusive_MichaelHashSet_hp "MichaelHashSet" for explanation. + */ + size_t bucket_count() const + { + return m_nHashBitmask + 1; + } + + /// Returns const reference to internal statistics + stat const& statistics() const + { + return m_Stat; + } + + private: + //@cond + template + typename std::enable_if< Stat::empty >::type construct_bucket( internal_bucket_type * bkt ) + { + new (bkt) internal_bucket_type; + } + + template + typename std::enable_if< !Stat::empty >::type construct_bucket( internal_bucket_type * bkt ) + { + new (bkt) internal_bucket_type( m_Stat ); + } + + /// Calculates hash value of \p key + template + size_t hash_value( Q const& key ) const + { + return m_HashFunctor( key ) & m_nHashBitmask; + } + + /// Returns the bucket (ordered list) for \p key + template + internal_bucket_type& bucket( Q const& key ) + { + return m_Buckets[hash_value( key )]; + } + template + internal_bucket_type const& bucket( Q const& key ) const + { + return m_Buckets[hash_value( key )]; + } + //@endcond + }; + +}} // namespace cds::intrusive + +#endif // #ifndef CDSLIB_INTRUSIVE_MICHAEL_SET_NOGC_H diff --git a/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/intrusive/moir_queue.h b/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/intrusive/moir_queue.h new file mode 100644 index 0000000..70afe03 --- /dev/null +++ b/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/intrusive/moir_queue.h @@ -0,0 +1,196 @@ +/* + This file is a part of libcds - Concurrent Data Structures library + + (C) Copyright Maxim Khizhinsky (libcds.dev@gmail.com) 2006-2017 + + Source code repo: http://github.com/khizmax/libcds/ + Download: http://sourceforge.net/projects/libcds/files/ + + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. + + * Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ + +#ifndef CDSLIB_INTRUSIVE_MOIR_QUEUE_H +#define CDSLIB_INTRUSIVE_MOIR_QUEUE_H + +#include + +namespace cds { namespace intrusive { + + /// A variation of Michael & Scott's lock-free queue (intrusive variant) + /** @ingroup cds_intrusive_queue + This is slightly optimized Michael & Scott's queue algorithm that overloads \ref dequeue function. + + Source: + - [2000] Simon Doherty, Lindsay Groves, Victor Luchangco, Mark Moir + "Formal Verification of a practical lock-free queue algorithm" + + Cite from this work about difference from Michael & Scott algo: + "Our algorithm differs from Michael and Scott's [MS98] in that we test whether \p Tail points to the header + node only after \p Head has been updated, so a dequeuing process reads \p Tail only once. The dequeue in + [MS98] performs this test before checking whether the next pointer in the dummy node is null, which + means that it reads \p Tail every time a dequeuing process loops. Under high load, when operations retry + frequently, our modification will reduce the number of accesses to global memory. This modification, however, + introduces the possibility of \p Head and \p Tail 'crossing'." + + Explanation of template arguments see \p intrusive::MSQueue. + + \par Examples + \code + #include + #include + + namespace ci = cds::inrtusive; + typedef cds::gc::HP hp_gc; + + // MoirQueue with Hazard Pointer garbage collector, base hook + item disposer: + struct Foo: public ci::msqueue::node< hp_gc > + { + // Your data + ... + }; + + // Disposer for Foo struct just deletes the object passed in + struct fooDisposer { + void operator()( Foo * p ) + { + delete p; + } + }; + + typedef ci::MoirQueue< + hp_gc + ,Foo + typename ci::msqueue::make_traits< + ,ci::opt::hook< + ci::msqueue::base_hook< ci::opt::gc > + > + ,ci::opt::disposer< fooDisposer > + >::type + > fooQueue; + + // MoirQueue with Hazard Pointer garbage collector, + // member hook + item disposer + item counter, + // without padding of internal queue data: + struct Bar + { + // Your data + ... + ci::msqueue::node< hp_gc > hMember; + }; + + struct barQueueTraits: public ci::msqueue::traits + { + typedef ci::msqueue::member_hook< offsetof(Bar, hMember), ,ci::opt::gc > hook; + typedef fooDisposer disposer; + typedef cds::atomicity::item_counter item_counter; + enum { padding = cds::opt::no_special_padding }; + }; + typedef ci::MoirQueue< hp_gc, Bar, barQueueTraits > barQueue; + \endcode + */ + template + class MoirQueue: public MSQueue< GC, T, Traits > + { + //@cond + typedef MSQueue< GC, T, Traits > base_class; + typedef typename base_class::node_type node_type; + //@endcond + + public: + //@cond + typedef typename base_class::value_type value_type; + typedef typename base_class::back_off back_off; + typedef typename base_class::gc gc; + typedef typename base_class::node_traits node_traits; + typedef typename base_class::memory_model memory_model; + //@endcond + + /// Rebind template arguments + template < typename GC2, typename T2, typename Traits2 > + struct rebind { + typedef MoirQueue< GC2, T2, Traits2> other ; ///< Rebinding result + }; + + protected: + //@cond + typedef typename base_class::dequeue_result dequeue_result; + + bool do_dequeue( dequeue_result& res ) + { + back_off bkoff; + + node_type * pNext; + node_type * h; + while ( true ) { + h = res.guards.protect( 0, base_class::m_pHead, []( node_type * p ) -> value_type * { return node_traits::to_value_ptr( p );}); + pNext = res.guards.protect( 1, h->m_pNext, []( node_type * p ) -> value_type * { return node_traits::to_value_ptr( p );}); + + if ( pNext == nullptr ) { + base_class::m_Stat.onEmptyDequeue(); + return false; // queue is empty + } + + if ( base_class::m_pHead.compare_exchange_strong( h, pNext, memory_model::memory_order_release, atomics::memory_order_relaxed )) { + node_type * t = base_class::m_pTail.load(memory_model::memory_order_acquire); + if ( h == t ) + base_class::m_pTail.compare_exchange_strong( t, pNext, memory_model::memory_order_release, atomics::memory_order_relaxed ); + break; + } + + base_class::m_Stat.onDequeueRace(); + bkoff(); + } + + --base_class::m_ItemCounter; + base_class::m_Stat.onDequeue(); + + res.pHead = h; + res.pNext = pNext; + return true; + } + //@endcond + + public: + /// Dequeues a value from the queue + /** @anchor cds_intrusive_MoirQueue_dequeue + See warning about item disposing in \p MSQueue::dequeue. + */ + value_type * dequeue() + { + dequeue_result res; + if ( do_dequeue( res )) { + base_class::dispose_result( res ); + return node_traits::to_value_ptr( *res.pNext ); + } + return nullptr; + } + + /// Synonym for \ref cds_intrusive_MoirQueue_dequeue "dequeue" function + value_type * pop() + { + return dequeue(); + } + }; + +}} // namespace cds::intrusive + +#endif // #ifndef CDSLIB_INTRUSIVE_MOIR_QUEUE_H diff --git a/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/intrusive/mspriority_queue.h b/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/intrusive/mspriority_queue.h new file mode 100644 index 0000000..a41cd98 --- /dev/null +++ b/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/intrusive/mspriority_queue.h @@ -0,0 +1,543 @@ +/* + This file is a part of libcds - Concurrent Data Structures library + + (C) Copyright Maxim Khizhinsky (libcds.dev@gmail.com) 2006-2017 + + Source code repo: http://github.com/khizmax/libcds/ + Download: http://sourceforge.net/projects/libcds/files/ + + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. + + * Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ + +#ifndef CDSLIB_INTRUSIVE_MSPRIORITY_QUEUE_H +#define CDSLIB_INTRUSIVE_MSPRIORITY_QUEUE_H + +#include // std::unique_lock +#include +#include +#include +#include +#include +#include +#include +#include + +namespace cds { namespace intrusive { + + /// MSPriorityQueue related definitions + /** @ingroup cds_intrusive_helper + */ + namespace mspriority_queue { + + /// MSPriorityQueue statistics + template + struct stat { + typedef Counter event_counter ; ///< Event counter type + + event_counter m_nPushCount; ///< Count of success push operation + event_counter m_nPopCount; ///< Count of success pop operation + event_counter m_nPushFailCount; ///< Count of failed ("the queue is full") push operation + event_counter m_nPopFailCount; ///< Count of failed ("the queue is empty") pop operation + event_counter m_nPushHeapifySwapCount; ///< Count of item swapping when heapifying in push + event_counter m_nPopHeapifySwapCount; ///< Count of item swapping when heapifying in pop + event_counter m_nItemMovedTop; ///< Count of events when \p push() encountered that inserted item was moved to top by a concurrent \p pop() + event_counter m_nItemMovedUp; ///< Count of events when \p push() encountered that inserted item was moved upwards by a concurrent \p pop() + event_counter m_nPushEmptyPass; ///< Count of empty pass during heapify via concurrent operations + + //@cond + void onPushSuccess() { ++m_nPushCount ;} + void onPopSuccess() { ++m_nPopCount ;} + void onPushFailed() { ++m_nPushFailCount ;} + void onPopFailed() { ++m_nPopFailCount ;} + void onPushHeapifySwap() { ++m_nPushHeapifySwapCount ;} + void onPopHeapifySwap() { ++m_nPopHeapifySwapCount ;} + + void onItemMovedTop() { ++m_nItemMovedTop ;} + void onItemMovedUp() { ++m_nItemMovedUp ;} + void onPushEmptyPass() { ++m_nPushEmptyPass ;} + //@endcond + }; + + /// MSPriorityQueue empty statistics + struct empty_stat { + //@cond + void onPushSuccess() const {} + void onPopSuccess() const {} + void onPushFailed() const {} + void onPopFailed() const {} + void onPushHeapifySwap() const {} + void onPopHeapifySwap() const {} + + void onItemMovedTop() const {} + void onItemMovedUp() const {} + void onPushEmptyPass() const {} + //@endcond + }; + + /// MSPriorityQueue traits + struct traits { + /// Storage type + /** + The storage type for the heap array. Default is \p cds::opt::v::initialized_dynamic_buffer. + + You may specify any type of buffer's value since at instantiation time + the \p buffer::rebind member metafunction is called to change type + of values stored in the buffer. + */ + typedef opt::v::initialized_dynamic_buffer buffer; + + /// Priority compare functor + /** + No default functor is provided. If the option is not specified, the \p less is used. + */ + typedef opt::none compare; + + /// Specifies binary predicate used for priority comparing. + /** + Default is \p std::less. + */ + typedef opt::none less; + + /// Type of mutual-exclusion lock. The lock is not need to be recursive. + typedef cds::sync::spin lock_type; + + /// Back-off strategy + typedef backoff::Default back_off; + + /// Internal statistics + /** + Possible types: \p mspriority_queue::empty_stat (the default, no overhead), \p mspriority_queue::stat + or any other with interface like \p %mspriority_queue::stat + */ + typedef empty_stat stat; + }; + + /// Metafunction converting option list to traits + /** + \p Options: + - \p opt::buffer - the buffer type for heap array. Possible type are: \p opt::v::initialized_static_buffer, \p opt::v::initialized_dynamic_buffer. + Default is \p %opt::v::initialized_dynamic_buffer. + You may specify any type of value for the buffer since at instantiation time + the \p buffer::rebind member metafunction is called to change the type of values stored in the buffer. + - \p opt::compare - priority compare functor. No default functor is provided. + If the option is not specified, the \p opt::less is used. + - \p opt::less - specifies binary predicate used for priority compare. Default is \p std::less. + - \p opt::lock_type - lock type. Default is \p cds::sync::spin + - \p opt::back_off - back-off strategy. Default is \p cds::backoff::yield + - \p opt::stat - internal statistics. Available types: \p mspriority_queue::stat, \p mspriority_queue::empty_stat (the default, no overhead) + */ + template + struct make_traits { +# ifdef CDS_DOXYGEN_INVOKED + typedef implementation_defined type ; ///< Metafunction result +# else + typedef typename cds::opt::make_options< + typename cds::opt::find_type_traits< traits, Options... >::type + ,Options... + >::type type; +# endif + }; + + } // namespace mspriority_queue + + /// Michael & Scott array-based lock-based concurrent priority queue heap + /** @ingroup cds_intrusive_priority_queue + Source: + - [1996] G.Hunt, M.Michael, S. Parthasarathy, M.Scott + "An efficient algorithm for concurrent priority queue heaps" + + \p %MSPriorityQueue augments the standard array-based heap data structure with + a mutual-exclusion lock on the heap's size and locks on each node in the heap. + Each node also has a tag that indicates whether + it is empty, valid, or in a transient state due to an update to the heap + by an inserting thread. + The algorithm allows concurrent insertions and deletions in opposite directions, + without risking deadlock and without the need for special server threads. + It also uses a "bit-reversal" technique to scatter accesses across the fringe + of the tree to reduce contention. + On large heaps the algorithm achieves significant performance improvements + over serialized single-lock algorithm, for various insertion/deletion + workloads. For small heaps it still performs well, but not as well as + single-lock algorithm. + + Template parameters: + - \p T - type to be stored in the queue. The priority is a part of \p T type. + - \p Traits - type traits. See \p mspriority_queue::traits for explanation. + It is possible to declare option-based queue with \p cds::container::mspriority_queue::make_traits + metafunction instead of \p Traits template argument. + */ + template + class MSPriorityQueue: public cds::bounded_container + { + public: + typedef T value_type ; ///< Value type stored in the queue + typedef Traits traits ; ///< Traits template parameter + +# ifdef CDS_DOXYGEN_INVOKED + typedef implementation_defined key_comparator ; ///< priority comparing functor based on opt::compare and opt::less option setter. +# else + typedef typename opt::details::make_comparator< value_type, traits >::type key_comparator; +# endif + + typedef typename traits::lock_type lock_type; ///< heap's size lock type + typedef typename traits::back_off back_off; ///< Back-off strategy + typedef typename traits::stat stat; ///< internal statistics type, see \p mspriority_queue::traits::stat + typedef typename cds::bitop::bit_reverse_counter<> item_counter;///< Item counter type + + protected: + //@cond + typedef cds::OS::ThreadId tag_type; + + enum tag_value { + Available = -1, + Empty = 0 + }; + //@endcond + + //@cond + /// Heap item type + struct node { + value_type * m_pVal ; ///< A value pointer + tag_type volatile m_nTag ; ///< A tag + mutable lock_type m_Lock ; ///< Node-level lock + + /// Creates empty node + node() + : m_pVal( nullptr ) + , m_nTag( tag_type(Empty)) + {} + + /// Lock the node + void lock() + { + m_Lock.lock(); + } + + /// Unlock the node + void unlock() + { + m_Lock.unlock(); + } + }; + //@endcond + + public: + typedef typename traits::buffer::template rebind::other buffer_type ; ///< Heap array buffer type + + //@cond + typedef typename item_counter::counter_type counter_type; + //@endcond + + protected: + item_counter m_ItemCounter ; ///< Item counter + mutable lock_type m_Lock ; ///< Heap's size lock + buffer_type m_Heap ; ///< Heap array + stat m_Stat ; ///< internal statistics accumulator + + public: + /// Constructs empty priority queue + /** + For \p cds::opt::v::initialized_static_buffer the \p nCapacity parameter is ignored. + */ + MSPriorityQueue( size_t nCapacity ) + : m_Heap( nCapacity ) + {} + + /// Clears priority queue and destructs the object + ~MSPriorityQueue() + { + clear(); + } + + /// Inserts a item into priority queue + /** + If the priority queue is full, the function returns \p false, + no item has been added. + Otherwise, the function inserts the pointer to \p val into the heap + and returns \p true. + + The function does not make a copy of \p val. + */ + bool push( value_type& val ) + { + tag_type const curId = cds::OS::get_current_thread_id(); + + // Insert new item at bottom of the heap + m_Lock.lock(); + if ( m_ItemCounter.value() >= capacity()) { + // the heap is full + m_Lock.unlock(); + m_Stat.onPushFailed(); + return false; + } + + counter_type i = m_ItemCounter.inc(); + assert( i < m_Heap.capacity()); + + node& refNode = m_Heap[i]; + refNode.lock(); + m_Lock.unlock(); + assert( refNode.m_nTag == tag_type( Empty )); + assert( refNode.m_pVal == nullptr ); + refNode.m_pVal = &val; + refNode.m_nTag = curId; + refNode.unlock(); + + // Move item towards top of heap while it has a higher priority than its parent + heapify_after_push( i, curId ); + + m_Stat.onPushSuccess(); + return true; + } + + /// Extracts item with high priority + /** + If the priority queue is empty, the function returns \p nullptr. + Otherwise, it returns the item extracted. + */ + value_type * pop() + { + node& refTop = m_Heap[1]; + + m_Lock.lock(); + if ( m_ItemCounter.value() == 0 ) { + // the heap is empty + m_Lock.unlock(); + m_Stat.onPopFailed(); + return nullptr; + } + counter_type nBottom = m_ItemCounter.dec(); + assert( nBottom < m_Heap.capacity()); + assert( nBottom > 0 ); + + refTop.lock(); + if ( nBottom == 1 ) { + refTop.m_nTag = tag_type( Empty ); + value_type * pVal = refTop.m_pVal; + refTop.m_pVal = nullptr; + refTop.unlock(); + m_Lock.unlock(); + m_Stat.onPopSuccess(); + return pVal; + } + + node& refBottom = m_Heap[nBottom]; + refBottom.lock(); + m_Lock.unlock(); + refBottom.m_nTag = tag_type(Empty); + value_type * pVal = refBottom.m_pVal; + refBottom.m_pVal = nullptr; + refBottom.unlock(); + + if ( refTop.m_nTag == tag_type(Empty)) { + // nBottom == nTop + refTop.unlock(); + m_Stat.onPopSuccess(); + return pVal; + } + + std::swap( refTop.m_pVal, pVal ); + refTop.m_nTag = tag_type( Available ); + + // refTop will be unlocked inside heapify_after_pop + heapify_after_pop( &refTop ); + + m_Stat.onPopSuccess(); + return pVal; + } + + /// Clears the queue (not atomic) + /** + This function is no atomic, but thread-safe + */ + void clear() + { + clear_with( []( value_type const& /*src*/ ) {} ); + } + + /// Clears the queue (not atomic) + /** + This function is no atomic, but thread-safe. + + For each item removed the functor \p f is called. + \p Func interface is: + \code + struct clear_functor + { + void operator()( value_type& item ); + }; + \endcode + A lambda function or a function pointer can be used as \p f. + */ + template + void clear_with( Func f ) + { + value_type * pVal; + while (( pVal = pop()) != nullptr ) + f( *pVal ); + } + + /// Checks is the priority queue is empty + bool empty() const + { + return size() == 0; + } + + /// Checks if the priority queue is full + bool full() const + { + return size() == capacity(); + } + + /// Returns current size of priority queue + size_t size() const + { + std::unique_lock l( m_Lock ); + return static_cast( m_ItemCounter.value()); + } + + /// Return capacity of the priority queue + size_t capacity() const + { + // m_Heap[0] is not used + return m_Heap.capacity() - 1; + } + + /// Returns const reference to internal statistics + stat const& statistics() const + { + return m_Stat; + } + + protected: + //@cond + + void heapify_after_push( counter_type i, tag_type curId ) + { + key_comparator cmp; + back_off bkoff; + + // Move item towards top of the heap while it has higher priority than parent + while ( i > 1 ) { + bool bProgress = true; + counter_type nParent = i / 2; + node& refParent = m_Heap[nParent]; + refParent.lock(); + node& refItem = m_Heap[i]; + refItem.lock(); + + if ( refParent.m_nTag == tag_type(Available) && refItem.m_nTag == curId ) { + if ( cmp( *refItem.m_pVal, *refParent.m_pVal ) > 0 ) { + std::swap( refItem.m_nTag, refParent.m_nTag ); + std::swap( refItem.m_pVal, refParent.m_pVal ); + m_Stat.onPushHeapifySwap(); + i = nParent; + } + else { + refItem.m_nTag = tag_type(Available); + i = 0; + } + } + else if ( refParent.m_nTag == tag_type( Empty )) { + m_Stat.onItemMovedTop(); + i = 0; + } + else if ( refItem.m_nTag != curId ) { + m_Stat.onItemMovedUp(); + i = nParent; + } + else { + m_Stat.onPushEmptyPass(); + bProgress = false; + } + + refItem.unlock(); + refParent.unlock(); + + if ( !bProgress ) + bkoff(); + else + bkoff.reset(); + } + + if ( i == 1 ) { + node& refItem = m_Heap[i]; + refItem.lock(); + if ( refItem.m_nTag == curId ) + refItem.m_nTag = tag_type(Available); + refItem.unlock(); + } + } + + void heapify_after_pop( node * pParent ) + { + key_comparator cmp; + counter_type const nCapacity = m_Heap.capacity(); + + counter_type nParent = 1; + for ( counter_type nChild = nParent * 2; nChild < nCapacity; nChild *= 2 ) { + node* pChild = &m_Heap[ nChild ]; + pChild->lock(); + + if ( pChild->m_nTag == tag_type( Empty )) { + pChild->unlock(); + break; + } + + counter_type const nRight = nChild + 1; + if ( nRight < nCapacity ) { + node& refRight = m_Heap[nRight]; + refRight.lock(); + + if ( refRight.m_nTag != tag_type( Empty ) && cmp( *refRight.m_pVal, *pChild->m_pVal ) > 0 ) { + // get right child + pChild->unlock(); + nChild = nRight; + pChild = &refRight; + } + else + refRight.unlock(); + } + + // If child has higher priority than parent then swap + // Otherwise stop + if ( cmp( *pChild->m_pVal, *pParent->m_pVal ) > 0 ) { + std::swap( pParent->m_nTag, pChild->m_nTag ); + std::swap( pParent->m_pVal, pChild->m_pVal ); + pParent->unlock(); + m_Stat.onPopHeapifySwap(); + nParent = nChild; + pParent = pChild; + } + else { + pChild->unlock(); + break; + } + } + pParent->unlock(); + } + //@endcond + }; + +}} // namespace cds::intrusive + +#endif // #ifndef CDSLIB_INTRUSIVE_MSPRIORITY_QUEUE_H diff --git a/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/intrusive/msqueue.h b/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/intrusive/msqueue.h new file mode 100644 index 0000000..1f4a9ac --- /dev/null +++ b/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/intrusive/msqueue.h @@ -0,0 +1,624 @@ +/* + This file is a part of libcds - Concurrent Data Structures library + + (C) Copyright Maxim Khizhinsky (libcds.dev@gmail.com) 2006-2017 + + Source code repo: http://github.com/khizmax/libcds/ + Download: http://sourceforge.net/projects/libcds/files/ + + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. + + * Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ + +#ifndef CDSLIB_INTRUSIVE_MSQUEUE_H +#define CDSLIB_INTRUSIVE_MSQUEUE_H + +#include +#include +#include + +namespace cds { namespace intrusive { + + /// MSQueue related definitions + /** @ingroup cds_intrusive_helper + */ + namespace msqueue { + + /// Queue node + /** + Template parameters: + - GC - garbage collector used + - Tag - a \ref cds_intrusive_hook_tag "tag" + */ + template + using node = cds::intrusive::single_link::node< GC, Tag >; + + /// Base hook + /** + \p Options are: + - opt::gc - garbage collector used. + - opt::tag - a \ref cds_intrusive_hook_tag "tag" + */ + template < typename... Options > + using base_hook = cds::intrusive::single_link::base_hook< Options...>; + + /// Member hook + /** + \p MemberOffset specifies offset in bytes of \ref node member into your structure. + Use \p offsetof macro to define \p MemberOffset + + \p Options are: + - opt::gc - garbage collector used. + - opt::tag - a \ref cds_intrusive_hook_tag "tag" + */ + template < size_t MemberOffset, typename... Options > + using member_hook = cds::intrusive::single_link::member_hook< MemberOffset, Options... >; + + /// Traits hook + /** + \p NodeTraits defines type traits for node. + See \ref node_traits for \p NodeTraits interface description + + \p Options are: + - opt::gc - garbage collector used. + - opt::tag - a \ref cds_intrusive_hook_tag "tag" + */ + template + using traits_hook = cds::intrusive::single_link::traits_hook< NodeTraits, Options... >; + + /// Queue internal statistics. May be used for debugging or profiling + /** + Template argument \p Counter defines type of counter. + Default is \p cds::atomicity::event_counter, that is weak, i.e. it is not guaranteed + strict event counting. + You may use stronger type of counter like as \p cds::atomicity::item_counter, + or even integral type, for example, \p int. + */ + template + struct stat + { + typedef Counter counter_type; ///< Counter type + + counter_type m_EnqueueCount ; ///< Enqueue call count + counter_type m_DequeueCount ; ///< Dequeue call count + counter_type m_EnqueueRace ; ///< Count of enqueue race conditions encountered + counter_type m_DequeueRace ; ///< Count of dequeue race conditions encountered + counter_type m_AdvanceTailError ; ///< Count of "advance tail failed" events + counter_type m_BadTail ; ///< Count of events "Tail is not pointed to the last item in the queue" + counter_type m_EmptyDequeue ; ///< Count of dequeue from empty queue + + /// Register enqueue call + void onEnqueue() { ++m_EnqueueCount; } + /// Register dequeue call + void onDequeue() { ++m_DequeueCount; } + /// Register enqueue race event + void onEnqueueRace() { ++m_EnqueueRace; } + /// Register dequeue race event + void onDequeueRace() { ++m_DequeueRace; } + /// Register "advance tail failed" event + void onAdvanceTailFailed() { ++m_AdvanceTailError; } + /// Register event "Tail is not pointed to last item in the queue" + void onBadTail() { ++m_BadTail; } + /// Register dequeuing from empty queue + void onEmptyDequeue() { ++m_EmptyDequeue; } + + //@cond + void reset() + { + m_EnqueueCount.reset(); + m_DequeueCount.reset(); + m_EnqueueRace.reset(); + m_DequeueRace.reset(); + m_AdvanceTailError.reset(); + m_BadTail.reset(); + m_EmptyDequeue.reset(); + } + + stat& operator +=( stat const& s ) + { + m_EnqueueCount += s.m_EnqueueCount.get(); + m_DequeueCount += s.m_DequeueCount.get(); + m_EnqueueRace += s.m_EnqueueRace.get(); + m_DequeueRace += s.m_DequeueRace.get(); + m_AdvanceTailError += s.m_AdvanceTailError.get(); + m_BadTail += s.m_BadTail.get(); + m_EmptyDequeue += s.m_EmptyDequeue.get(); + + return *this; + } + //@endcond + }; + + /// Dummy queue statistics - no counting is performed, no overhead. Support interface like \p msqueue::stat + struct empty_stat + { + //@cond + void onEnqueue() const {} + void onDequeue() const {} + void onEnqueueRace() const {} + void onDequeueRace() const {} + void onAdvanceTailFailed() const {} + void onBadTail() const {} + void onEmptyDequeue() const {} + + void reset() {} + empty_stat& operator +=( empty_stat const& ) + { + return *this; + } + //@endcond + }; + + /// MSQueue default traits + struct traits + { + /// Back-off strategy + typedef cds::backoff::empty back_off; + + /// Hook, possible types are \p msqueue::base_hook, \p msqueue::member_hook, \p msqueue::traits_hook + typedef msqueue::base_hook<> hook; + + /// The functor used for dispose removed items. Default is \p opt::v::empty_disposer. This option is used for dequeuing + typedef opt::v::empty_disposer disposer; + + /// Item counting feature; by default, disabled. Use \p cds::atomicity::item_counter to enable item counting + typedef atomicity::empty_item_counter item_counter; + + /// Internal statistics (by default, disabled) + /** + Possible option value are: \p msqueue::stat, \p msqueue::empty_stat (the default), + user-provided class that supports \p %msqueue::stat interface. + */ + typedef msqueue::empty_stat stat; + + /// C++ memory ordering model + /** + Can be \p opt::v::relaxed_ordering (relaxed memory model, the default) + or \p opt::v::sequential_consistent (sequentially consisnent memory model). + */ + typedef opt::v::relaxed_ordering memory_model; + + /// Link checking, see \p cds::opt::link_checker + static constexpr const opt::link_check_type link_checker = opt::debug_check_link; + + /// Padding for internal critical atomic data. Default is \p opt::cache_line_padding + enum { padding = opt::cache_line_padding }; + }; + + /// Metafunction converting option list to \p msqueue::traits + /** + Supported \p Options are: + + - \p opt::hook - hook used. Possible hooks are: \p msqueue::base_hook, \p msqueue::member_hook, \p msqueue::traits_hook. + If the option is not specified, \p %msqueue::base_hook<> is used. + - \p opt::back_off - back-off strategy used, default is \p cds::backoff::empty. + - \p opt::disposer - the functor used for dispose removed items. Default is \p opt::v::empty_disposer. This option is used + when dequeuing. + - \p opt::link_checker - the type of node's link fields checking. Default is \p opt::debug_check_link + - \p opt::item_counter - the type of item counting feature. Default is \p cds::atomicity::empty_item_counter (item counting disabled) + To enable item counting use \p cds::atomicity::item_counter + - \p opt::stat - the type to gather internal statistics. + Possible statistics types are: \p msqueue::stat, \p msqueue::empty_stat, user-provided class that supports \p %msqueue::stat interface. + Default is \p %msqueue::empty_stat (internal statistics disabled). + - \p opt::padding - padding for internal critical atomic data. Default is \p opt::cache_line_padding + - \p opt::memory_model - C++ memory ordering model. Can be \p opt::v::relaxed_ordering (relaxed memory model, the default) + or \p opt::v::sequential_consistent (sequentially consisnent memory model). + + Example: declare \p %MSQueue with item counting and internal statistics + \code + typedef cds::intrusive::MSQueue< cds::gc::HP, Foo, + typename cds::intrusive::msqueue::make_traits< + cds::intrusive::opt:hook< cds::intrusive::msqueue::base_hook< cds::opt::gc >>, + cds::opt::item_counte< cds::atomicity::item_counter >, + cds::opt::stat< cds::intrusive::msqueue::stat<> > + >::type + > myQueue; + \endcode + */ + template + struct make_traits { +# ifdef CDS_DOXYGEN_INVOKED + typedef implementation_defined type; ///< Metafunction result +# else + typedef typename cds::opt::make_options< + typename cds::opt::find_type_traits< traits, Options... >::type + , Options... + >::type type; +# endif + }; + } // namespace msqueue + + /// Michael & Scott's intrusive lock-free queue + /** @ingroup cds_intrusive_queue + Implementation of well-known Michael & Scott's queue algorithm: + - [1998] Maged Michael, Michael Scott "Simple, fast, and practical non-blocking and blocking concurrent queue algorithms" + + Template arguments: + - \p GC - garbage collector type: \p gc::HP, \p gc::DHP + - \p T - type of value to be stored in the queue. A value of type \p T must be derived from \p msqueue::node for \p msqueue::base_hook, + or it should have a member of type \p %msqueue::node for \p msqueue::member_hook, + or it should be convertible to \p %msqueue::node for \p msqueue::traits_hook. + - \p Traits - queue traits, default is \p msqueue::traits. You can use \p msqueue::make_traits + metafunction to make your traits or just derive your traits from \p %msqueue::traits: + \code + struct myTraits: public cds::intrusive::msqueue::traits { + typedef cds::intrusive::msqueue::stat<> stat; + typedef cds::atomicity::item_counter item_counter; + }; + typedef cds::intrusive::MSQueue< cds::gc::HP, Foo, myTraits > myQueue; + + // Equivalent make_traits example: + typedef cds::intrusive::MSQueue< cds::gc::HP, Foo, + typename cds::intrusive::msqueue::make_traits< + cds::opt::stat< cds::intrusive::msqueue::stat<> >, + cds::opt::item_counter< cds::atomicity::item_counter > + >::type + > myQueue; + \endcode + + \par About item disposing + The Michael & Scott's queue algo has a key feature: even if the queue is empty it contains one item that is "dummy" one from + the standpoint of the algo. See \p dequeue() function for explanation. + + \par Examples + \code + #include + #include + + namespace ci = cds::inrtusive; + typedef cds::gc::HP hp_gc; + + // MSQueue with Hazard Pointer garbage collector, base hook + item disposer: + struct Foo: public ci::msqueue::node< hp_gc > + { + // Your data + ... + }; + + // Disposer for Foo struct just deletes the object passed in + struct fooDisposer { + void operator()( Foo * p ) + { + delete p; + } + }; + + // Declare traits for the queue + struct myTraits: public ci::msqueue::traits { + ,ci::opt::hook< + ci::msqueue::base_hook< ci::opt::gc > + > + ,ci::opt::disposer< fooDisposer > + }; + + // At least, declare the queue type + typedef ci::MSQueue< hp_gc, Foo, myTraits > fooQueue; + + // Example 2: + // MSQueue with Hazard Pointer garbage collector, + // member hook + item disposer + item counter, + // without padding of internal queue data + // Use msqueue::make_traits + struct Bar + { + // Your data + ... + ci::msqueue::node< hp_gc > hMember; + }; + + typedef ci::MSQueue< hp_gc, + Foo, + typename ci::msqueue::make_traits< + ci::opt::hook< + ci::msqueue::member_hook< + offsetof(Bar, hMember) + ,ci::opt::gc + > + > + ,ci::opt::disposer< fooDisposer > + ,cds::opt::item_counter< cds::atomicity::item_counter > + ,cds::opt::padding< cds::opt::no_special_padding > + >::type + > barQueue; + \endcode + */ + template + class MSQueue + { + public: + typedef GC gc; ///< Garbage collector + typedef T value_type; ///< type of value to be stored in the queue + typedef Traits traits; ///< Queue traits + + typedef typename traits::hook hook; ///< hook type + typedef typename hook::node_type node_type; ///< node type + typedef typename traits::disposer disposer; ///< disposer used + typedef typename get_node_traits< value_type, node_type, hook>::type node_traits; ///< node traits + typedef typename single_link::get_link_checker< node_type, traits::link_checker >::type link_checker; ///< link checker + + typedef typename traits::back_off back_off; ///< back-off strategy + typedef typename traits::item_counter item_counter; ///< Item counter class + typedef typename traits::stat stat; ///< Internal statistics + typedef typename traits::memory_model memory_model; ///< Memory ordering. See \p cds::opt::memory_model option + + /// Rebind template arguments + template + struct rebind { + typedef MSQueue< GC2, T2, Traits2 > other; ///< Rebinding result + }; + + static constexpr const size_t c_nHazardPtrCount = 2; ///< Count of hazard pointer required for the algorithm + + protected: + //@cond + + // GC and node_type::gc must be the same + static_assert((std::is_same::value), "GC and node_type::gc must be the same"); + + typedef typename node_type::atomic_node_ptr atomic_node_ptr; + + atomic_node_ptr m_pHead; ///< Queue's head pointer + typename opt::details::apply_padding< atomic_node_ptr, traits::padding >::padding_type pad1_; + atomic_node_ptr m_pTail; ///< Queue's tail pointer + typename opt::details::apply_padding< atomic_node_ptr, traits::padding >::padding_type pad2_; + node_type m_Dummy; ///< dummy node + typename opt::details::apply_padding< node_type, traits::padding >::padding_type pad3_; + item_counter m_ItemCounter; ///< Item counter + stat m_Stat; ///< Internal statistics + //@endcond + + //@cond + struct dequeue_result { + typename gc::template GuardArray<2> guards; + + node_type * pHead; + node_type * pNext; + }; + + bool do_dequeue( dequeue_result& res ) + { + node_type * pNext; + back_off bkoff; + + node_type * h; + while ( true ) { + h = res.guards.protect( 0, m_pHead, []( node_type * p ) -> value_type * { return node_traits::to_value_ptr( p );}); + pNext = res.guards.protect( 1, h->m_pNext, []( node_type * p ) -> value_type * { return node_traits::to_value_ptr( p );}); + if ( m_pHead.load(memory_model::memory_order_acquire) != h ) + continue; + + if ( pNext == nullptr ) { + m_Stat.onEmptyDequeue(); + return false; // empty queue + } + + node_type * t = m_pTail.load(memory_model::memory_order_acquire); + if ( h == t ) { + // It is needed to help enqueue + m_pTail.compare_exchange_strong( t, pNext, memory_model::memory_order_release, atomics::memory_order_relaxed ); + m_Stat.onBadTail(); + continue; + } + + if ( m_pHead.compare_exchange_strong( h, pNext, memory_model::memory_order_acquire, atomics::memory_order_relaxed )) + break; + + m_Stat.onDequeueRace(); + bkoff(); + } + + --m_ItemCounter; + m_Stat.onDequeue(); + + res.pHead = h; + res.pNext = pNext; + return true; + } + + static void clear_links( node_type * pNode ) + { + pNode->m_pNext.store( nullptr, memory_model::memory_order_release ); + } + + void dispose_result( dequeue_result& res ) + { + dispose_node( res.pHead ); + } + + void dispose_node( node_type * p ) + { + // Note about the dummy node: + // We cannot clear m_Dummy here since it leads to ABA. + // On the other hand, we cannot use deferred clear_links( &m_Dummy ) call via + // HP retiring cycle since m_Dummy is member of MSQueue and may be destroyed + // before HP retiring cycle invocation. + // So, we will never clear m_Dummy + + struct disposer_thunk { + void operator()( value_type * p ) const + { + assert( p != nullptr ); + MSQueue::clear_links( node_traits::to_node_ptr( p )); + disposer()(p); + } + }; + + if ( p != &m_Dummy ) + gc::template retire( node_traits::to_value_ptr( p )); + } + //@endcond + + public: + /// Initializes empty queue + MSQueue() + : m_pHead( &m_Dummy ) + , m_pTail( &m_Dummy ) + {} + + /// Destructor clears the queue + /** + Since the Michael & Scott queue contains at least one item even + if the queue is empty, the destructor may call item disposer. + */ + ~MSQueue() + { + clear(); + + node_type * pHead = m_pHead.load(memory_model::memory_order_relaxed); + + assert( pHead != nullptr ); + assert( pHead == m_pTail.load(memory_model::memory_order_relaxed)); + + m_pHead.store( nullptr, memory_model::memory_order_relaxed ); + m_pTail.store( nullptr, memory_model::memory_order_relaxed ); + + dispose_node( pHead ); + } + + /// Enqueues \p val value into the queue. + /** @anchor cds_intrusive_MSQueue_enqueue + The function always returns \p true. + */ + bool enqueue( value_type& val ) + { + node_type * pNew = node_traits::to_node_ptr( val ); + link_checker::is_empty( pNew ); + + typename gc::Guard guard; + back_off bkoff; + + node_type * t; + while ( true ) { + t = guard.protect( m_pTail, []( node_type * p ) -> value_type * { return node_traits::to_value_ptr( p );}); + + node_type * pNext = t->m_pNext.load(memory_model::memory_order_acquire); + if ( pNext != nullptr ) { + // Tail is misplaced, advance it + m_pTail.compare_exchange_weak( t, pNext, memory_model::memory_order_release, atomics::memory_order_relaxed ); + m_Stat.onBadTail(); + continue; + } + + node_type * tmp = nullptr; + if ( t->m_pNext.compare_exchange_strong( tmp, pNew, memory_model::memory_order_release, atomics::memory_order_relaxed )) + break; + + m_Stat.onEnqueueRace(); + bkoff(); + } + ++m_ItemCounter; + m_Stat.onEnqueue(); + + if ( !m_pTail.compare_exchange_strong( t, pNew, memory_model::memory_order_release, atomics::memory_order_relaxed )) + m_Stat.onAdvanceTailFailed(); + return true; + } + + /// Dequeues a value from the queue + /** @anchor cds_intrusive_MSQueue_dequeue + If the queue is empty the function returns \p nullptr. + + \par Warning + The queue algorithm has following feature: when \p %dequeue() is called, + the item returning is still queue's top, and previous top is disposed: + + \code + before dequeuing Dequeue after dequeuing + +------------------+ +------------------+ + Top ->| Item 1 | -> Dispose Item 1 | Item 2 | <- Top + +------------------+ +------------------+ + | Item 2 | -> Return Item 2 | ... | + +------------------+ + | ... | + \endcode + + \p %dequeue() function returns Item 2, that becomes new top of queue, and calls + the disposer for Item 1, that was queue's top on function entry. + Thus, you cannot manually delete item returned because it is still included in + item sequence and it has valuable link field that must not be zeroed. + The item should be deleted only in garbage collector retire cycle using the disposer. + */ + value_type * dequeue() + { + dequeue_result res; + + if ( do_dequeue( res )) { + dispose_result( res ); + + return node_traits::to_value_ptr( *res.pNext ); + } + return nullptr; + } + + /// Synonym for \ref cds_intrusive_MSQueue_enqueue "enqueue()" function + bool push( value_type& val ) + { + return enqueue( val ); + } + + /// Synonym for \ref cds_intrusive_MSQueue_dequeue "dequeue()" function + value_type * pop() + { + return dequeue(); + } + + /// Checks if the queue is empty + bool empty() const + { + typename gc::Guard guard; + node_type * p = guard.protect( m_pHead, []( node_type * pNode ) -> value_type * { return node_traits::to_value_ptr( pNode );}); + return p->m_pNext.load( memory_model::memory_order_relaxed ) == nullptr; + } + + /// Clear the queue + /** + The function repeatedly calls \p dequeue() until it returns \p nullptr. + The disposer defined in template \p Traits is called for each item + that can be safely disposed. + */ + void clear() + { + while ( dequeue()); + } + + /// Returns queue's item count + /** + The value returned depends on \p msqueue::traits::item_counter. For \p atomicity::empty_item_counter, + this function always returns 0. + + @note Even if you use real item counter and it returns 0, this fact is not mean that the queue + is empty. To check queue emptyness use \p empty() method. + */ + size_t size() const + { + return m_ItemCounter.value(); + } + + /// Returns reference to internal statistics + stat const& statistics() const + { + return m_Stat; + } + }; + +}} // namespace cds::intrusive + +#endif // #ifndef CDSLIB_INTRUSIVE_MSQUEUE_H diff --git a/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/intrusive/optimistic_queue.h b/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/intrusive/optimistic_queue.h new file mode 100644 index 0000000..dee5d9e --- /dev/null +++ b/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/intrusive/optimistic_queue.h @@ -0,0 +1,717 @@ +/* + This file is a part of libcds - Concurrent Data Structures library + + (C) Copyright Maxim Khizhinsky (libcds.dev@gmail.com) 2006-2017 + + Source code repo: http://github.com/khizmax/libcds/ + Download: http://sourceforge.net/projects/libcds/files/ + + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. + + * Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ + +#ifndef CDSLIB_INTRUSIVE_OPTIMISTIC_QUEUE_H +#define CDSLIB_INTRUSIVE_OPTIMISTIC_QUEUE_H + +#include +#include +#include +#include + +namespace cds { namespace intrusive { + + /// \p OptimisticQueue related definitions + /** @ingroup cds_intrusive_helper + */ + namespace optimistic_queue { + + /// Optimistic queue node + /** + Template parameters: + - \p GC - garbage collector + - \p Tag - a \ref cds_intrusive_hook_tag "tag" + */ + template + struct node + { + typedef GC gc ; ///< Garbage collector + typedef Tag tag ; ///< tag + + typedef typename gc::template atomic_ref atomic_node_ptr ; ///< atomic pointer + + atomic_node_ptr m_pNext ; ///< Pointer to next node + atomic_node_ptr m_pPrev ; ///< Pointer to previous node + + node() noexcept + { + m_pNext.store( nullptr, atomics::memory_order_relaxed ); + m_pPrev.store( nullptr, atomics::memory_order_release ); + } + }; + + //@cond + struct default_hook { + typedef cds::gc::default_gc gc; + typedef opt::none tag; + }; + //@endcond + + //@cond + template < typename HookType, typename... Options> + struct hook + { + typedef typename opt::make_options< default_hook, Options...>::type options; + typedef typename options::gc gc; + typedef typename options::tag tag; + typedef node node_type; + typedef HookType hook_type; + }; + //@endcond + + /// Base hook + /** + \p Options are: + - \p opt::gc - garbage collector used. + - \p opt::tag - a \ref cds_intrusive_hook_tag "tag" + */ + template < typename... Options > + struct base_hook: public hook< opt::base_hook_tag, Options... > + {}; + + /// Member hook + /** + \p MemberOffset specifies offset in bytes of \ref node member into your structure. + Use \p offsetof macro to define \p MemberOffset + + \p Options are: + - \p opt::gc - garbage collector used. + - \p opt::tag - a \ref cds_intrusive_hook_tag "tag" + */ + template < size_t MemberOffset, typename... Options > + struct member_hook: public hook< opt::member_hook_tag, Options... > + { + //@cond + static const size_t c_nMemberOffset = MemberOffset; + //@endcond + }; + + /// Traits hook + /** + \p NodeTraits defines type traits for node. + See \ref node_traits for \p NodeTraits interface description + + \p Options are: + - \p opt::gc - garbage collector used. + - \p opt::tag - a \ref cds_intrusive_hook_tag "tag" + */ + template + struct traits_hook: public hook< opt::traits_hook_tag, Options... > + { + //@cond + typedef NodeTraits node_traits; + //@endcond + }; + + /// Check link + template + struct link_checker { + //@cond + typedef Node node_type; + //@endcond + + /// Checks if the link fields of node \p pNode is \p nullptr + /** + An asserting is generated if \p pNode link fields is not \p nullptr + */ + static void is_empty( const node_type * pNode ) + { + assert( pNode->m_pNext.load( atomics::memory_order_relaxed ) == nullptr ); + assert( pNode->m_pPrev.load( atomics::memory_order_relaxed ) == nullptr ); + CDS_UNUSED( pNode ); + } + }; + + /// Metafunction for selecting appropriate link checking policy + template < typename Node, opt::link_check_type LinkType > + struct get_link_checker + { + //@cond + typedef intrusive::opt::v::empty_link_checker type; + //@endcond + }; + + //@cond + template < typename Node > + struct get_link_checker< Node, opt::always_check_link > + { + typedef link_checker type; + }; + template < typename Node > + struct get_link_checker< Node, opt::debug_check_link > + { +# ifdef _DEBUG + typedef link_checker type; +# else + typedef intrusive::opt::v::empty_link_checker type; +# endif + }; + //@endcond + + /// \p OptimisticQueue internal statistics. May be used for debugging or profiling + /** + Template argument \p Counter defines type of counter. + Default is \p cds::atomicity::event_counter. + You may use stronger type of counter like as \p cds::atomicity::item_counter, + or even integral type, for example, \p int. + */ + template + struct stat + { + typedef Counter counter_type; ///< Counter type + + counter_type m_EnqueueCount; ///< Enqueue call count + counter_type m_DequeueCount; ///< Dequeue call count + counter_type m_EnqueueRace; ///< Count of enqueue race conditions encountered + counter_type m_DequeueRace; ///< Count of dequeue race conditions encountered + counter_type m_AdvanceTailError; ///< Count of "advance tail failed" events + counter_type m_BadTail; ///< Count of events "Tail is not pointed to the last item in the queue" + counter_type m_FixListCount; ///< Count of fix list event + counter_type m_EmptyDequeue; ///< Count of dequeue from empty queue + + /// Register enqueue call + void onEnqueue() { ++m_EnqueueCount; } + /// Register dequeue call + void onDequeue() { ++m_DequeueCount; } + /// Register enqueue race event + void onEnqueueRace() { ++m_EnqueueRace; } + /// Register dequeue race event + void onDequeueRace() { ++m_DequeueRace; } + /// Register "advance tail failed" event + void onAdvanceTailFailed() { ++m_AdvanceTailError; } + /// Register event "Tail is not pointed to last item in the queue" + void onBadTail() { ++m_BadTail; } + /// Register fix list event + void onFixList() { ++m_FixListCount; } + /// Register dequeuing from empty queue + void onEmptyDequeue() { ++m_EmptyDequeue; } + + //@cond + void reset() + { + m_EnqueueCount.reset(); + m_DequeueCount.reset(); + m_EnqueueRace.reset(); + m_DequeueRace.reset(); + m_AdvanceTailError.reset(); + m_BadTail.reset(); + m_FixListCount.reset(); + m_EmptyDequeue.reset(); + } + + stat& operator +=( stat const& s ) + { + m_EnqueueCount += s.m_EnqueueCount.get(); + m_DequeueCount += s.m_DequeueCount.get(); + m_EnqueueRace += s.m_EnqueueRace.get(); + m_DequeueRace += s.m_DequeueRace.get(); + m_AdvanceTailError += s.m_AdvanceTailError.get(); + m_BadTail += s.m_BadTail.get(); + m_FixListCount += s.m_FixListCount.get(); + m_EmptyDequeue += s.m_EmptyDequeue.get(); + + return *this; + } + //@endcond + }; + + /// Dummy \p OptimisticQueue statistics - no counting is performed. Support interface like \p optimistic_queue::stat + struct empty_stat + { + //@cond + void onEnqueue() const {} + void onDequeue() const {} + void onEnqueueRace() const {} + void onDequeueRace() const {} + void onAdvanceTailFailed() const {} + void onBadTail() const {} + void onFixList() const {} + void onEmptyDequeue() const {} + + void reset() {} + empty_stat& operator +=( empty_stat const& ) + { + return *this; + } + //@endcond + }; + + /// \p OptimisticQueue default type traits + struct traits + { + /// Back-off strategy + typedef cds::backoff::empty back_off; + + /// Hook, possible types are \p optimistic_queue::base_hook, \p optimistic_queue::member_hook, \p optimistic_queue::traits_hook + typedef optimistic_queue::base_hook<> hook; + + /// The functor used for dispose removed items. Default is \p opt::v::empty_disposer. This option is used for dequeuing + typedef opt::v::empty_disposer disposer; + + /// Item counting feature; by default, disabled. Use \p cds::atomicity::item_counter to enable item counting + typedef cds::atomicity::empty_item_counter item_counter; + + /// C++ memory ordering model + /** + Can be \p opt::v::relaxed_ordering (relaxed memory model, the default) + or \p opt::v::sequential_consistent (sequentially consisnent memory model). + */ + typedef opt::v::relaxed_ordering memory_model; + + /// Internal statistics (by default, disabled) + /** + Possible option value are: \p optimistic_queue::stat, \p optimistic_queue::empty_stat (the default), + user-provided class that supports \p %optimistic_queue::stat interface. + */ + typedef optimistic_queue::empty_stat stat; + + /// Link checking, see \p cds::opt::link_checker + static constexpr const opt::link_check_type link_checker = opt::debug_check_link; + + /// Padding for internal critical atomic data. Default is \p opt::cache_line_padding + enum { padding = opt::cache_line_padding }; + }; + + /// Metafunction converting option list to \p optimistic_queue::traits + /** + Supported \p Options are: + + - \p opt::hook - hook used. Possible hooks are: \p optimistic_queue::base_hook, \p optimistic_queue::member_hook, \p optimistic_queue::traits_hook. + If the option is not specified, \p %optimistic_queue::base_hook<> is used. + - \p opt::back_off - back-off strategy used, default is \p cds::backoff::empty. + - \p opt::disposer - the functor used for dispose removed items. Default is \p opt::v::empty_disposer. This option is used + when dequeuing. + - \p opt::link_checker - the type of node's link fields checking. Default is \p opt::debug_check_link + - \p opt::item_counter - the type of item counting feature. Default is \p cds::atomicity::empty_item_counter (item counting disabled) + To enable item counting use \p cds::atomicity::item_counter + - \p opt::stat - the type to gather internal statistics. + Possible statistics types are: \p optimistic_queue::stat, \p optimistic_queue::empty_stat, + user-provided class that supports \p %optimistic_queue::stat interface. + Default is \p %optimistic_queue::empty_stat (internal statistics disabled). + - \p opt::padding - padding for internal critical atomic data. Default is \p opt::cache_line_padding + - \p opt::memory_model - C++ memory ordering model. Can be \p opt::v::relaxed_ordering (relaxed memory model, the default) + or \p opt::v::sequential_consistent (sequentially consisnent memory model). + + Example: declare \p %OptimisticQueue with item counting and internal statistics + \code + typedef cds::intrusive::OptimisticQueue< cds::gc::HP, Foo, + typename cds::intrusive::optimistic_queue::make_traits< + cds::intrusive::opt:hook< cds::intrusive::optimistic_queue::base_hook< cds::opt::gc >>, + cds::opt::item_counte< cds::atomicity::item_counter >, + cds::opt::stat< cds::intrusive::optimistic_queue::stat<> > + >::type + > myQueue; + \endcode + */ + template + struct make_traits { +# ifdef CDS_DOXYGEN_INVOKED + typedef implementation_defined type; ///< Metafunction result +# else + typedef typename cds::opt::make_options< + typename cds::opt::find_type_traits< traits, Options... >::type + , Options... + >::type type; +# endif + }; + } // namespace optimistic_queue + + /// Optimistic intruive lock-free queue + /** @ingroup cds_intrusive_queue + Implementation of Ladan-Mozes & Shavit optimistic queue algorithm. + [2008] Edya Ladan-Mozes, Nir Shavit "An Optimistic Approach to Lock-Free FIFO Queues" + + Template arguments: + - \p GC - garbage collector type: \p gc::HP, \p gc::DHP + - \p T - type of value to be stored in the queue. A value of type \p T must be derived from \p optimistic_queue::node for \p optimistic_queue::base_hook, + or it should have a member of type \p %optimistic_queue::node for \p optimistic_queue::member_hook, + or it should be convertible to \p %optimistic_queue::node for \p optimistic_queue::traits_hook. + - \p Traits - queue traits, default is \p optimistic_queue::traits. You can use \p optimistic_queue::make_traits + metafunction to make your traits or just derive your traits from \p %optimistic_queue::traits: + \code + struct myTraits: public cds::intrusive::optimistic_queue::traits { + typedef cds::intrusive::optimistic_queue::stat<> stat; + typedef cds::atomicity::item_counter item_counter; + }; + typedef cds::intrusive::OptimisticQueue< cds::gc::HP, Foo, myTraits > myQueue; + + // Equivalent make_traits example: + typedef cds::intrusive::OptimisticQueue< cds::gc::HP, Foo, + typename cds::intrusive::optimistic_queue::make_traits< + cds::opt::stat< cds::intrusive::optimistic_queue::stat<> >, + cds::opt::item_counter< cds::atomicity::item_counter > + >::type + > myQueue; + \endcode + + Garbage collecting schema \p GC must be consistent with the optimistic_queue::node GC. + + \par About item disposing + The optimistic queue algo has a key feature: even if the queue is empty it contains one item that is "dummy" one from + the standpoint of the algo. See \p dequeue() function for explanation. + + \par Examples + \code + #include + #include + + namespace ci = cds::inrtusive; + typedef cds::gc::HP hp_gc; + + // Optimistic queue with Hazard Pointer garbage collector, base hook + item counter: + struct Foo: public ci::optimistic_queue::node< hp_gc > + { + // Your data + ... + }; + + typedef ci::OptimisticQueue< hp_gc, + Foo, + typename ci::optimistic_queue::make_traits< + ci::opt::hook< + ci::optimistic_queue::base_hook< ci::opt::gc< hp_gc > > + > + ,cds::opt::item_counter< cds::atomicity::item_counter > + >::type + > FooQueue; + + // Optimistic queue with Hazard Pointer garbage collector, member hook, no item counter: + struct Bar + { + // Your data + ... + ci::optimistic_queue::node< hp_gc > hMember; + }; + + typedef ci::OptimisticQueue< hp_gc, + Bar, + typename ci::optimistic_queue::make_traits< + ci::opt::hook< + ci::optimistic_queue::member_hook< + offsetof(Bar, hMember) + ,ci::opt::gc< hp_gc > + > + > + >::type + > BarQueue; + \endcode + */ + template + class OptimisticQueue + { + public: + typedef GC gc; ///< Garbage collector + typedef T value_type; ///< type of value to be stored in the queue + typedef Traits traits; ///< Queue traits + + typedef typename traits::hook hook; ///< hook type + typedef typename hook::node_type node_type; ///< node type + typedef typename traits::disposer disposer; ///< disposer used + typedef typename get_node_traits< value_type, node_type, hook>::type node_traits; ///< node traits + typedef typename optimistic_queue::get_link_checker< node_type, traits::link_checker >::type link_checker; ///< link checker + typedef typename traits::back_off back_off; ///< back-off strategy + typedef typename traits::item_counter item_counter; ///< Item counting policy used + typedef typename traits::memory_model memory_model;///< Memory ordering. See cds::opt::memory_model option + typedef typename traits::stat stat; ///< Internal statistics policy used + + /// Rebind template arguments + template + struct rebind { + typedef OptimisticQueue< GC2, T2, Traits2 > other ; ///< Rebinding result + }; + + static constexpr const size_t c_nHazardPtrCount = 5; ///< Count of hazard pointer required for the algorithm + + protected: + //@cond + typedef typename node_type::atomic_node_ptr atomic_node_ptr; + + // GC and node_type::gc must be the same + static_assert((std::is_same::value), "GC and node_type::gc must be the same"); + //@endcond + + atomic_node_ptr m_pTail; ///< Pointer to tail node + //@cond + typename opt::details::apply_padding< atomic_node_ptr, traits::padding >::padding_type pad1_; + //@endcond + atomic_node_ptr m_pHead; ///< Pointer to head node + //@cond + typename opt::details::apply_padding< atomic_node_ptr, traits::padding >::padding_type pad2_; + //@endcond + node_type m_Dummy ; ///< dummy node + //@cond + typename opt::details::apply_padding< atomic_node_ptr, traits::padding >::padding_type pad3_; + //@endcond + item_counter m_ItemCounter ; ///< Item counter + stat m_Stat ; ///< Internal statistics + + protected: + //@cond + static void clear_links( node_type * pNode ) + { + pNode->m_pNext.store( nullptr, memory_model::memory_order_release ); + pNode->m_pPrev.store( nullptr, memory_model::memory_order_release ); + } + + struct dequeue_result { + typename gc::template GuardArray<3> guards; + + node_type * pHead; + node_type * pNext; + }; + + bool do_dequeue( dequeue_result& res ) + { + node_type * pTail; + node_type * pHead; + node_type * pFirstNodePrev; + back_off bkoff; + + while ( true ) { // Try till success or empty + pHead = res.guards.protect( 0, m_pHead, [](node_type * p) -> value_type * {return node_traits::to_value_ptr(p);}); + pTail = res.guards.protect( 1, m_pTail, [](node_type * p) -> value_type * {return node_traits::to_value_ptr(p);}); + assert( pHead != nullptr ); + pFirstNodePrev = res.guards.protect( 2, pHead->m_pPrev, [](node_type * p) -> value_type * {return node_traits::to_value_ptr(p);}); + + if ( pHead == m_pHead.load(memory_model::memory_order_acquire)) { + if ( pTail != pHead ) { + if ( pFirstNodePrev == nullptr + || pFirstNodePrev->m_pNext.load(memory_model::memory_order_acquire) != pHead ) + { + fix_list( pTail, pHead ); + continue; + } + if ( m_pHead.compare_exchange_weak( pHead, pFirstNodePrev, memory_model::memory_order_acquire, atomics::memory_order_relaxed )) { + // dequeue success + break; + } + } + else { + // the queue is empty + m_Stat.onEmptyDequeue(); + return false; + } + } + + m_Stat.onDequeueRace(); + bkoff(); + } + + --m_ItemCounter; + m_Stat.onDequeue(); + + res.pHead = pHead; + res.pNext = pFirstNodePrev; + return true; + } + + + /// Helper function for optimistic queue. Corrects \p prev pointer of queue's nodes if it is needed + void fix_list( node_type * pTail, node_type * pHead ) + { + // pTail and pHead are already guarded + + node_type * pCurNode; + node_type * pCurNodeNext; + + typename gc::template GuardArray<2> guards; + + pCurNode = pTail; + while ( pCurNode != pHead ) { // While not at head + pCurNodeNext = guards.protect(0, pCurNode->m_pNext, [](node_type * p) -> value_type * { return node_traits::to_value_ptr(p);}); + if ( pHead != m_pHead.load(memory_model::memory_order_acquire)) + break; + pCurNodeNext->m_pPrev.store( pCurNode, memory_model::memory_order_release ); + guards.assign( 1, node_traits::to_value_ptr( pCurNode = pCurNodeNext )); + } + + m_Stat.onFixList(); + } + + void dispose_result( dequeue_result& res ) + { + dispose_node( res.pHead ); + } + + void dispose_node( node_type * p ) + { + assert( p != nullptr ); + + if ( p != &m_Dummy ) { + struct internal_disposer + { + void operator ()( value_type * p ) + { + assert( p != nullptr ); + + OptimisticQueue::clear_links( node_traits::to_node_ptr( *p )); + disposer()(p); + } + }; + gc::template retire( node_traits::to_value_ptr(p)); + } + } + + //@endcond + + public: + /// Constructor creates empty queue + OptimisticQueue() + : m_pTail( &m_Dummy ) + , m_pHead( &m_Dummy ) + {} + + ~OptimisticQueue() + { + clear(); + node_type * pHead = m_pHead.load(memory_model::memory_order_relaxed); + CDS_DEBUG_ONLY( node_type * pTail = m_pTail.load(memory_model::memory_order_relaxed); ) + CDS_DEBUG_ONLY( assert( pHead == pTail ); ) + assert( pHead != nullptr ); + + m_pHead.store( nullptr, memory_model::memory_order_relaxed ); + m_pTail.store( nullptr, memory_model::memory_order_relaxed ); + + dispose_node( pHead ); + } + + /// @anchor cds_intrusive_OptimisticQueue_enqueue Enqueues \p data in lock-free manner. Always return \a true + bool enqueue( value_type& val ) + { + node_type * pNew = node_traits::to_node_ptr( val ); + link_checker::is_empty( pNew ); + + typename gc::template GuardArray<2> guards; + back_off bkoff; + + guards.assign( 1, &val ); + while( true ) { + node_type * pTail = guards.protect( 0, m_pTail, []( node_type * p ) -> value_type * { return node_traits::to_value_ptr( p ); } ); // Read the tail + pNew->m_pNext.store( pTail, memory_model::memory_order_relaxed ); + if ( m_pTail.compare_exchange_strong( pTail, pNew, memory_model::memory_order_release, atomics::memory_order_acquire )) { // Try to CAS the tail + pTail->m_pPrev.store( pNew, memory_model::memory_order_release ); // Success, write prev + ++m_ItemCounter; + m_Stat.onEnqueue(); + break; // Enqueue done! + } + m_Stat.onEnqueueRace(); + bkoff(); + } + return true; + } + + /// Dequeues a value from the queue + /** @anchor cds_intrusive_OptimisticQueue_dequeue + If the queue is empty the function returns \p nullptr + + \par Warning + The queue algorithm has following feature: when \p dequeue is called, + the item returning is still queue's top, and previous top is disposed: + + \code + before dequeuing Dequeue after dequeuing + +------------------+ +------------------+ + Top ->| Item 1 | -> Dispose Item 1 | Item 2 | <- Top + +------------------+ +------------------+ + | Item 2 | -> Return Item 2 | ... | + +------------------+ + | ... | + \endcode + + \p %dequeue() function returns Item 2, that becomes new top of queue, and calls + the disposer for Item 1, that was queue's top on function entry. + Thus, you cannot manually delete item returned because it is still included in + the queue and it has valuable link field that must not be zeroed. + The item may be deleted only in disposer call. + */ + value_type * dequeue() + { + dequeue_result res; + if ( do_dequeue( res )) { + dispose_result( res ); + + return node_traits::to_value_ptr( *res.pNext ); + } + return nullptr; + } + + /// Synonym for \p enqueue() + bool push( value_type& val ) + { + return enqueue( val ); + } + + /// Synonym for \p dequeue() + value_type * pop() + { + return dequeue(); + } + + /// Checks if the queue is empty + bool empty() const + { + return m_pTail.load(memory_model::memory_order_relaxed) == m_pHead.load(memory_model::memory_order_relaxed); + } + + /// Clear the stack + /** + The function repeatedly calls \ref dequeue until it returns \p nullptr. + The disposer defined in template \p Traits is called for each item + that can be safely disposed. + */ + void clear() + { + value_type * pv; + while ( (pv = dequeue()) != nullptr ); + } + + /// Returns queue's item count + /** + The value returned depends on \p optimistic_queue::traits::item_counter. + For \p atomicity::empty_item_counter, this function always returns 0. + + @note Even if you use real item counter and it returns 0, this fact is not mean that the queue + is empty. To check queue emptyness use \p empty() method. + */ + size_t size() const + { + return m_ItemCounter.value(); + } + + /// Returns refernce to internal statistics + const stat& statistics() const + { + return m_Stat; + } + }; + +}} // namespace cds::intrusive + +#endif // #ifndef CDSLIB_INTRUSIVE_OPTIMISTIC_QUEUE_H diff --git a/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/intrusive/options.h b/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/intrusive/options.h new file mode 100644 index 0000000..2e7486c --- /dev/null +++ b/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/intrusive/options.h @@ -0,0 +1,189 @@ +/* + This file is a part of libcds - Concurrent Data Structures library + + (C) Copyright Maxim Khizhinsky (libcds.dev@gmail.com) 2006-2017 + + Source code repo: http://github.com/khizmax/libcds/ + Download: http://sourceforge.net/projects/libcds/files/ + + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. + + * Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ + +#ifndef CDSLIB_INTRUSIVE_OPTIONS_H +#define CDSLIB_INTRUSIVE_OPTIONS_H + +#include +#include + +namespace cds { namespace intrusive { + + /// Common options for intrusive containers + /** @ingroup cds_intrusive_helper + This namespace contains options for intrusive containers. + It imports all definitions from cds::opt namespace and introduces a lot + of options specific for intrusive approach. + */ + namespace opt { + using namespace cds::opt; + + //@cond + struct base_hook_tag; + struct member_hook_tag; + struct traits_hook_tag; + //@endcond + + /// Hook option + /** + Hook is a class that a user must add as a base class or as a member to make the user class compatible with intrusive containers. + \p Hook template parameter strongly depends on the type of intrusive container you use. + */ + template + struct hook { + //@cond + template struct pack: public Base + { + typedef Hook hook; + }; + //@endcond + }; + + /// Item disposer option setter + /** + The option specifies a functor that is used for dispose removed items. + The interface of \p Type functor is: + \code + struct myDisposer { + void operator ()( T * val ); + }; + \endcode + + Predefined types for \p Type: + - \p opt::v::empty_disposer - the disposer that does nothing + - \p opt::v::delete_disposer - the disposer that calls operator \p delete + + Usually, the disposer should be stateless default-constructible functor. + It is called by garbage collector in deferred mode. + */ + template + struct disposer { + //@cond + template struct pack: public Base + { + typedef Type disposer; + }; + //@endcond + }; + + /// Values of \ref cds::intrusive::opt::link_checker option + enum link_check_type { + never_check_link, ///< no link checking performed + debug_check_link, ///< check only in debug build + always_check_link ///< check in debug and release build + }; + + /// Link checking + /** + The option specifies a type of link checking. + Possible values for \p Value are is one of \ref link_check_type enum: + - \ref never_check_link - no link checking performed + - \ref debug_check_link - check only in debug build + - \ref always_check_link - check in debug and release build (not yet implemented for release mode). + + When link checking is on, the container tests that the node's link fields + must be \p nullptr before inserting the item. If the link is not \p nullptr an assertion is generated + */ + template + struct link_checker { + //@cond + template struct pack: public Base + { + static const link_check_type link_checker = Value; + }; + //@endcond + }; + + /// Predefined option values + namespace v { + using namespace cds::opt::v; + + //@cond + /// No link checking + template + struct empty_link_checker + { + //@cond + typedef Node node_type; + + static void is_empty( const node_type * /*pNode*/ ) + {} + //@endcond + }; + //@endcond + + /// Empty item disposer + /** + The disposer does nothing. + This is one of possible values of opt::disposer option. + */ + struct empty_disposer + { + /// Empty dispose functor + template + void operator ()( T * ) + {} + }; + + /// Deletion item disposer + /** + Analogue of operator \p delete call. + The disposer that calls \p T destructor and deallocates the item via \p Alloc allocator. + */ + template + struct delete_disposer + { + /// Dispose functor + template + void operator ()( T * p ) + { + cds::details::Allocator alloc; + alloc.Delete( p ); + } + }; + } // namespace v + + //@cond + // Lazy-list specific option (for split-list support) + template + struct boundary_node_type { + //@cond + template struct pack: public Base + { + typedef Type boundary_node_type; + }; + //@endcond + }; + //@endcond + } // namespace opt + +}} // namespace cds::intrusive + +#endif // #ifndef CDSLIB_INTRUSIVE_OPTIONS_H diff --git a/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/intrusive/segmented_queue.h b/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/intrusive/segmented_queue.h new file mode 100644 index 0000000..95d8e2e --- /dev/null +++ b/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/intrusive/segmented_queue.h @@ -0,0 +1,725 @@ +/* + This file is a part of libcds - Concurrent Data Structures library + + (C) Copyright Maxim Khizhinsky (libcds.dev@gmail.com) 2006-2017 + + Source code repo: http://github.com/khizmax/libcds/ + Download: http://sourceforge.net/projects/libcds/files/ + + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. + + * Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ + +#ifndef CDSLIB_INTRUSIVE_SEGMENTED_QUEUE_H +#define CDSLIB_INTRUSIVE_SEGMENTED_QUEUE_H + +#include +#include +#include +#include +#include +#include + +#include + +#if CDS_COMPILER == CDS_COMPILER_MSVC +# pragma warning( push ) +# pragma warning( disable: 4355 ) // warning C4355: 'this' : used in base member initializer list +#endif + +namespace cds { namespace intrusive { + + /// SegmentedQueue -related declarations + namespace segmented_queue { + + /// SegmentedQueue internal statistics. May be used for debugging or profiling + template + struct stat + { + typedef Counter counter_type; ///< Counter type + + counter_type m_nPush; ///< Push count + counter_type m_nPushPopulated; ///< Number of attempts to push to populated (non-empty) cell + counter_type m_nPushContended; ///< Number of failed CAS when pushing + counter_type m_nPop; ///< Pop count + counter_type m_nPopEmpty; ///< Number of dequeuing from empty queue + counter_type m_nPopContended; ///< Number of failed CAS when popping + + counter_type m_nCreateSegmentReq; ///< Number of request to create new segment + counter_type m_nDeleteSegmentReq; ///< Number to request to delete segment + counter_type m_nSegmentCreated; ///< Number of created segments + counter_type m_nSegmentDeleted; ///< Number of deleted segments + + //@cond + void onPush() { ++m_nPush; } + void onPushPopulated() { ++m_nPushPopulated; } + void onPushContended() { ++m_nPushContended; } + void onPop() { ++m_nPop; } + void onPopEmpty() { ++m_nPopEmpty; } + void onPopContended() { ++m_nPopContended; } + void onCreateSegmentReq() { ++m_nCreateSegmentReq; } + void onDeleteSegmentReq() { ++m_nDeleteSegmentReq; } + void onSegmentCreated() { ++m_nSegmentCreated; } + void onSegmentDeleted() { ++m_nSegmentDeleted; } + //@endcond + }; + + /// Dummy SegmentedQueue statistics, no overhead + struct empty_stat { + //@cond + void onPush() const {} + void onPushPopulated() const {} + void onPushContended() const {} + void onPop() const {} + void onPopEmpty() const {} + void onPopContended() const {} + void onCreateSegmentReq() const {} + void onDeleteSegmentReq() const {} + void onSegmentCreated() const {} + void onSegmentDeleted() const {} + //@endcond + }; + + /// SegmentedQueue default traits + struct traits { + /// Element disposer that is called when the item to be dequeued. Default is opt::v::empty_disposer (no disposer) + typedef opt::v::empty_disposer disposer; + + /// Item counter, default is atomicity::item_counter + /** + The item counting is an essential part of segmented queue algorithm. + The \p empty() member function is based on checking size() == 0. + Therefore, dummy item counter like atomicity::empty_item_counter is not the proper counter. + */ + typedef atomicity::item_counter item_counter; + + /// Internal statistics, possible predefined types are \ref stat, \ref empty_stat (the default) + typedef segmented_queue::empty_stat stat; + + /// Memory model, default is opt::v::relaxed_ordering. See cds::opt::memory_model for the full list of possible types + typedef opt::v::relaxed_ordering memory_model; + + /// Alignment of critical data, default is cache line alignment. See cds::opt::alignment option specification + enum { alignment = opt::cache_line_alignment }; + + /// Padding of segment data, default is no special padding + /** + The segment is just an array of atomic data pointers, + so, the high load leads to false sharing and performance degradation. + A padding of segment data can eliminate false sharing issue. + On the other hand, the padding leads to increase segment size. + */ + enum { padding = opt::no_special_padding }; + + /// Segment allocator. Default is \ref CDS_DEFAULT_ALLOCATOR + typedef CDS_DEFAULT_ALLOCATOR allocator; + + /// Lock type used to maintain an internal list of allocated segments + typedef cds::sync::spin lock_type; + + /// Random \ref cds::opt::permutation_generator "permutation generator" for sequence [0, quasi_factor) + typedef cds::opt::v::random2_permutation permutation_generator; + }; + + /// Metafunction converting option list to traits for SegmentedQueue + /** + The metafunction can be useful if a few fields in \p segmented_queue::traits should be changed. + For example: + \code + typedef cds::intrusive::segmented_queue::make_traits< + cds::opt::item_counter< cds::atomicity::item_counter > + >::type my_segmented_queue_traits; + \endcode + This code creates \p %SegmentedQueue type traits with item counting feature, + all other \p %segmented_queue::traits members left unchanged. + + \p Options are: + - \p opt::disposer - the functor used to dispose removed items. + - \p opt::stat - internal statistics, possible type: \p segmented_queue::stat, \p segmented_queue::empty_stat (the default) + - \p opt::item_counter - item counting feature. Note that \p atomicity::empty_item_counetr is not suitable + for segmented queue. + - \p opt::memory_model - memory model, default is \p opt::v::relaxed_ordering. + See option description for the full list of possible models + - \p opt::alignment - the alignment for critical data, see option description for explanation + - \p opt::padding - the padding of segment data, default no special padding. + See \p traits::padding for explanation. + - \p opt::allocator - the allocator to be used for maintaining segments. + - \p opt::lock_type - a mutual exclusion lock type used to maintain internal list of allocated + segments. Default is \p cds::opt::Spin, \p std::mutex is also suitable. + - \p opt::permutation_generator - a random permutation generator for sequence [0, quasi_factor), + default is \p cds::opt::v::random2_permutation + */ + template + struct make_traits { +# ifdef CDS_DOXYGEN_INVOKED + typedef implementation_defined type ; ///< Metafunction result +# else + typedef typename cds::opt::make_options< + typename cds::opt::find_type_traits< traits, Options... >::type + ,Options... + >::type type; +# endif + }; + } // namespace segmented_queue + + /// Segmented queue + /** @ingroup cds_intrusive_queue + + The queue is based on work + - [2010] Afek, Korland, Yanovsky "Quasi-Linearizability: relaxed consistency for improved concurrency" + + In this paper the authors offer a relaxed version of linearizability, so-called quasi-linearizability, + that preserves some of the intuition, provides a flexible way to control the level of relaxation + and supports th implementation of more concurrent and scalable data structure. + Intuitively, the linearizability requires each run to be equivalent in some sense to a serial run + of the algorithm. This equivalence to some serial run imposes strong synchronization requirements + that in many cases results in limited scalability and synchronization bottleneck. + + The general idea is that the queue maintains a linked list of segments, each segment is an array of + nodes in the size of the quasi factor, and each node has a deleted boolean marker, which states + if it has been dequeued. Each producer iterates over last segment in the linked list in some random + permutation order. Whet it finds an empty cell it performs a CAS operation attempting to enqueue its + new element. In case the entire segment has been scanned and no available cell is found (implying + that the segment is full), then it attempts to add a new segment to the list. + + The dequeue operation is similar: the consumer iterates over the first segment in the linked list + in some random permutation order. When it finds an item which has not yet been dequeued, it performs + CAS on its deleted marker in order to "delete" it, if succeeded this item is considered dequeued. + In case the entire segment was scanned and all the nodes have already been dequeued (implying that + the segment is empty), then it attempts to remove this segment from the linked list and starts + the same process on the next segment. If there is no next segment, the queue is considered empty. + + Based on the fact that most of the time threads do not add or remove segments, most of the work + is done in parallel on different cells in the segments. This ensures a controlled contention + depending on the segment size, which is quasi factor. + + The segmented queue is an unfair queue since it violates the strong FIFO order but no more than + quasi factor. This means that the consumer dequeues any item from the current first segment. + + Template parameters: + - \p GC - a garbage collector, possible types are cds::gc::HP, cds::gc::DHP + - \p T - the type of values stored in the queue + - \p Traits - queue type traits, default is \p segmented_queue::traits. + \p segmented_queue::make_traits metafunction can be used to construct the + type traits. + + The queue stores the pointers to enqueued items so no special node hooks are needed. + */ + template + class SegmentedQueue + { + public: + typedef GC gc; ///< Garbage collector + typedef T value_type; ///< type of the value stored in the queue + typedef Traits traits; ///< Queue traits + + typedef typename traits::disposer disposer ; ///< value disposer, called only in \p clear() when the element to be dequeued + typedef typename traits::allocator allocator; ///< Allocator maintaining the segments + typedef typename traits::memory_model memory_model; ///< Memory ordering. See cds::opt::memory_model option + typedef typename traits::item_counter item_counter; ///< Item counting policy, see cds::opt::item_counter option setter + typedef typename traits::stat stat; ///< Internal statistics policy + typedef typename traits::lock_type lock_type; ///< Type of mutex for maintaining an internal list of allocated segments. + typedef typename traits::permutation_generator permutation_generator; ///< Random permutation generator for sequence [0, quasi-factor) + + static const size_t c_nHazardPtrCount = 2 ; ///< Count of hazard pointer required for the algorithm + + protected: + //@cond + // Segment cell. LSB is used as deleted mark + typedef cds::details::marked_ptr< value_type, 1 > regular_cell; + typedef atomics::atomic< regular_cell > atomic_cell; + typedef typename cds::opt::details::apply_padding< atomic_cell, traits::padding >::type cell; + + // Segment + struct segment: public boost::intrusive::slist_base_hook<> + { + cell * cells; // Cell array of size \ref m_nQuasiFactor + size_t version; // version tag (ABA prevention tag) + // cell array is placed here in one continuous memory block + + // Initializes the segment + explicit segment( size_t nCellCount ) + // MSVC warning C4355: 'this': used in base member initializer list + : cells( reinterpret_cast< cell *>( this + 1 )) + , version( 0 ) + { + init( nCellCount ); + } + + segment() = delete; + + void init( size_t nCellCount ) + { + cell * pLastCell = cells + nCellCount; + for ( cell* pCell = cells; pCell < pLastCell; ++pCell ) + pCell->data.store( regular_cell(), atomics::memory_order_relaxed ); + atomics::atomic_thread_fence( memory_model::memory_order_release ); + } + }; + + typedef typename opt::details::alignment_setter< atomics::atomic, traits::alignment >::type aligned_segment_ptr; + //@endcond + + protected: + //@cond + class segment_list + { + typedef boost::intrusive::slist< segment, boost::intrusive::cache_last< true > > list_impl; + typedef std::unique_lock< lock_type > scoped_lock; + + aligned_segment_ptr m_pHead; + aligned_segment_ptr m_pTail; + + list_impl m_List; + mutable lock_type m_Lock; + size_t const m_nQuasiFactor; + stat& m_Stat; + + private: + struct segment_disposer + { + void operator()( segment * pSegment ) + { + assert( pSegment != nullptr ); + free_segment( pSegment ); + } + }; + + struct gc_segment_disposer + { + void operator()( segment * pSegment ) + { + assert( pSegment != nullptr ); + retire_segment( pSegment ); + } + }; + + public: + segment_list( size_t nQuasiFactor, stat& st ) + : m_pHead( nullptr ) + , m_pTail( nullptr ) + , m_nQuasiFactor( nQuasiFactor ) + , m_Stat( st ) + { + assert( cds::beans::is_power2( nQuasiFactor )); + } + + ~segment_list() + { + m_List.clear_and_dispose( gc_segment_disposer()); + } + + segment * head( typename gc::Guard& guard ) + { + return guard.protect( m_pHead ); + } + + segment * tail( typename gc::Guard& guard ) + { + return guard.protect( m_pTail ); + } + +# ifdef _DEBUG + bool populated( segment const& s ) const + { + // The lock should be held + cell const * pLastCell = s.cells + quasi_factor(); + for ( cell const * pCell = s.cells; pCell < pLastCell; ++pCell ) { + if ( !pCell->data.load( memory_model::memory_order_relaxed ).all()) + return false; + } + return true; + } + bool exhausted( segment const& s ) const + { + // The lock should be held + cell const * pLastCell = s.cells + quasi_factor(); + for ( cell const * pCell = s.cells; pCell < pLastCell; ++pCell ) { + if ( !pCell->data.load( memory_model::memory_order_relaxed ).bits()) + return false; + } + return true; + } +# endif + + segment * create_tail( segment * pTail, typename gc::Guard& guard ) + { + // pTail is guarded by GC + + m_Stat.onCreateSegmentReq(); + + scoped_lock l( m_Lock ); + + if ( !m_List.empty() && ( pTail != &m_List.back() || get_version(pTail) != m_List.back().version )) { + m_pTail.store( &m_List.back(), memory_model::memory_order_relaxed ); + + return guard.assign( &m_List.back()); + } + +# ifdef _DEBUG + assert( m_List.empty() || populated( m_List.back())); +# endif + + segment * pNew = allocate_segment(); + m_Stat.onSegmentCreated(); + + if ( m_List.empty()) + m_pHead.store( pNew, memory_model::memory_order_release ); + m_List.push_back( *pNew ); + m_pTail.store( pNew, memory_model::memory_order_release ); + return guard.assign( pNew ); + } + + segment * remove_head( segment * pHead, typename gc::Guard& guard ) + { + // pHead is guarded by GC + m_Stat.onDeleteSegmentReq(); + + segment * pRet; + { + scoped_lock l( m_Lock ); + + if ( m_List.empty()) { + m_pTail.store( nullptr, memory_model::memory_order_relaxed ); + m_pHead.store( nullptr, memory_model::memory_order_relaxed ); + return guard.assign( nullptr ); + } + + if ( pHead != &m_List.front() || get_version(pHead) != m_List.front().version ) { + m_pHead.store( &m_List.front(), memory_model::memory_order_relaxed ); + return guard.assign( &m_List.front()); + } + +# ifdef _DEBUG + assert( exhausted( m_List.front())); +# endif + + m_List.pop_front(); + if ( m_List.empty()) { + pRet = guard.assign( nullptr ); + m_pTail.store( nullptr, memory_model::memory_order_relaxed ); + } + else + pRet = guard.assign( &m_List.front()); + m_pHead.store( pRet, memory_model::memory_order_release ); + } + + retire_segment( pHead ); + m_Stat.onSegmentDeleted(); + + return pRet; + } + + size_t quasi_factor() const + { + return m_nQuasiFactor; + } + + private: + typedef cds::details::Allocator< segment, allocator > segment_allocator; + + static size_t get_version( segment * pSegment ) + { + return pSegment ? pSegment->version : 0; + } + + segment * allocate_segment() + { + return segment_allocator().NewBlock( sizeof(segment) + sizeof(cell) * m_nQuasiFactor, quasi_factor()); + } + + static void free_segment( segment * pSegment ) + { + segment_allocator().Delete( pSegment ); + } + + static void retire_segment( segment * pSegment ) + { + gc::template retire( pSegment ); + } + }; + //@endcond + + protected: + segment_list m_SegmentList; ///< List of segments + + item_counter m_ItemCounter; ///< Item counter + stat m_Stat; ///< Internal statistics + + public: + /// Initializes the empty queue + SegmentedQueue( + size_t nQuasiFactor ///< Quasi factor. If it is not a power of 2 it is rounded up to nearest power of 2. Minimum is 2. + ) + : m_SegmentList( cds::beans::ceil2(nQuasiFactor), m_Stat ) + { + static_assert( (!std::is_same< item_counter, cds::atomicity::empty_item_counter >::value), + "cds::atomicity::empty_item_counter is not supported for SegmentedQueue" + ); + assert( m_SegmentList.quasi_factor() > 1 ); + } + + /// Clears the queue and deletes all internal data + ~SegmentedQueue() + { + clear(); + } + + /// Inserts a new element at last segment of the queue + bool enqueue( value_type& val ) + { + // LSB is used as a flag in marked pointer + assert( (reinterpret_cast( &val ) & 1) == 0 ); + + typename gc::Guard segmentGuard; + segment * pTailSegment = m_SegmentList.tail( segmentGuard ); + if ( !pTailSegment ) { + // no segments, create the new one + pTailSegment = m_SegmentList.create_tail( pTailSegment, segmentGuard ); + assert( pTailSegment ); + } + + permutation_generator gen( quasi_factor()); + + // First, increment item counter. + // We sure that the item will be enqueued + // but if we increment the counter after inserting we can get a negative counter value + // if dequeuing occurs before incrementing (enqueue/dequeue race) + ++m_ItemCounter; + + while ( true ) { + CDS_DEBUG_ONLY( size_t nLoopCount = 0); + do { + typename permutation_generator::integer_type i = gen; + CDS_DEBUG_ONLY( ++nLoopCount ); + if ( pTailSegment->cells[i].data.load(memory_model::memory_order_relaxed).all()) { + // Cell is not empty, go next + m_Stat.onPushPopulated(); + } + else { + // Empty cell found, try to enqueue here + regular_cell nullCell; + if ( pTailSegment->cells[i].data.compare_exchange_strong( nullCell, regular_cell( &val ), + memory_model::memory_order_release, atomics::memory_order_relaxed )) + { + // Ok to push item + m_Stat.onPush(); + return true; + } + assert( nullCell.ptr()); + m_Stat.onPushContended(); + } + } while ( gen.next()); + + assert( nLoopCount == quasi_factor()); + + // No available position, create a new segment + pTailSegment = m_SegmentList.create_tail( pTailSegment, segmentGuard ); + + // Get new permutation + gen.reset(); + } + } + + /// Removes an element from first segment of the queue and returns it + /** + If the queue is empty the function returns \p nullptr. + + The disposer specified in \p Traits template argument is not called for returned item. + You should manually dispose the item: + \code + struct my_disposer { + void operator()( foo * p ) + { + delete p; + } + }; + cds::intrusive::SegmentedQueue< cds::gc::HP, foo > theQueue; + // ... + + // Dequeue an item + foo * pItem = theQueue.dequeue(); + // deal with pItem + //... + + // pItem is not longer needed and can be deleted + // Do it via gc::HP::retire + cds::gc::HP::template retire< my_disposer >( pItem ); + \endcode + */ + value_type * dequeue() + { + typename gc::Guard itemGuard; + if ( do_dequeue( itemGuard )) { + value_type * pVal = itemGuard.template get(); + assert( pVal ); + return pVal; + } + return nullptr; + + } + + /// Synonym for \p enqueue(value_type&) member function + bool push( value_type& val ) + { + return enqueue( val ); + } + + /// Synonym for \p dequeue() member function + value_type * pop() + { + return dequeue(); + } + + /// Checks if the queue is empty + /** + The original segmented queue algorithm does not allow to check emptiness accurately + because \p empty() is unlinearizable. + This function tests queue's emptiness checking size() == 0, + so, the item counting feature is an essential part of queue's algorithm. + */ + bool empty() const + { + return size() == 0; + } + + /// Clear the queue + /** + The function repeatedly calls \p dequeue() until it returns \p nullptr. + The disposer specified in \p Traits template argument is called for each removed item. + */ + void clear() + { + clear_with( disposer()); + } + + /// Clear the queue + /** + The function repeatedly calls \p dequeue() until it returns \p nullptr. + \p Disposer is called for each removed item. + */ + template + void clear_with( Disposer ) + { + typename gc::Guard itemGuard; + while ( do_dequeue( itemGuard )) { + assert( itemGuard.template get()); + gc::template retire( itemGuard.template get()); + itemGuard.clear(); + } + } + + /// Returns queue's item count + size_t size() const + { + return m_ItemCounter.value(); + } + + /// Returns reference to internal statistics + /** + The type of internal statistics is specified by \p Traits template argument. + */ + const stat& statistics() const + { + return m_Stat; + } + + /// Returns quasi factor, a power-of-two number + size_t quasi_factor() const + { + return m_SegmentList.quasi_factor(); + } + + protected: + //@cond + bool do_dequeue( typename gc::Guard& itemGuard ) + { + typename gc::Guard segmentGuard; + segment * pHeadSegment = m_SegmentList.head( segmentGuard ); + + permutation_generator gen( quasi_factor()); + while ( true ) { + if ( !pHeadSegment ) { + // Queue is empty + m_Stat.onPopEmpty(); + return false; + } + + bool bHadNullValue = false; + regular_cell item; + CDS_DEBUG_ONLY( size_t nLoopCount = 0 ); + do { + typename permutation_generator::integer_type i = gen; + CDS_DEBUG_ONLY( ++nLoopCount ); + + // Guard the item + // In segmented queue the cell cannot be reused + // So no loop is needed here to protect the cell + item = pHeadSegment->cells[i].data.load( memory_model::memory_order_relaxed ); + itemGuard.assign( item.ptr()); + + // Check if this cell is empty, which means an element + // can be enqueued to this cell in the future + if ( !item.ptr()) + bHadNullValue = true; + else { + // If the item is not deleted yet + if ( !item.bits()) { + // Try to mark the cell as deleted + if ( pHeadSegment->cells[i].data.compare_exchange_strong( item, item | 1, + memory_model::memory_order_acquire, atomics::memory_order_relaxed )) + { + --m_ItemCounter; + m_Stat.onPop(); + + return true; + } + assert( item.bits()); + m_Stat.onPopContended(); + } + } + } while ( gen.next()); + + assert( nLoopCount == quasi_factor()); + + // scanning the entire segment without finding a candidate to dequeue + // If there was an empty cell, the queue is considered empty + if ( bHadNullValue ) { + m_Stat.onPopEmpty(); + return false; + } + + // All nodes have been dequeued, we can safely remove the first segment + pHeadSegment = m_SegmentList.remove_head( pHeadSegment, segmentGuard ); + + // Get new permutation + gen.reset(); + } + } + //@endcond + }; +}} // namespace cds::intrusive + +#if CDS_COMPILER == CDS_COMPILER_MSVC +# pragma warning( pop ) +#endif + +#endif // #ifndef CDSLIB_INTRUSIVE_SEGMENTED_QUEUE_H diff --git a/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/intrusive/skip_list_dhp.h b/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/intrusive/skip_list_dhp.h new file mode 100644 index 0000000..b90eeea --- /dev/null +++ b/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/intrusive/skip_list_dhp.h @@ -0,0 +1,37 @@ +/* + This file is a part of libcds - Concurrent Data Structures library + + (C) Copyright Maxim Khizhinsky (libcds.dev@gmail.com) 2006-2017 + + Source code repo: http://github.com/khizmax/libcds/ + Download: http://sourceforge.net/projects/libcds/files/ + + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. + + * Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ + +#ifndef CDSLIB_INTRUSIVE_SKIP_LIST_DHP_H +#define CDSLIB_INTRUSIVE_SKIP_LIST_DHP_H + +#include +#include + +#endif // CDSLIB_INTRUSIVE_SKIP_LIST_DHP_H diff --git a/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/intrusive/skip_list_hp.h b/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/intrusive/skip_list_hp.h new file mode 100644 index 0000000..42b60ca --- /dev/null +++ b/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/intrusive/skip_list_hp.h @@ -0,0 +1,37 @@ +/* + This file is a part of libcds - Concurrent Data Structures library + + (C) Copyright Maxim Khizhinsky (libcds.dev@gmail.com) 2006-2017 + + Source code repo: http://github.com/khizmax/libcds/ + Download: http://sourceforge.net/projects/libcds/files/ + + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. + + * Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ + +#ifndef CDSLIB_INTRUSIVE_SKIP_LIST_HP_H +#define CDSLIB_INTRUSIVE_SKIP_LIST_HP_H + +#include +#include + +#endif diff --git a/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/intrusive/skip_list_nogc.h b/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/intrusive/skip_list_nogc.h new file mode 100644 index 0000000..6143f08 --- /dev/null +++ b/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/intrusive/skip_list_nogc.h @@ -0,0 +1,994 @@ +/* + This file is a part of libcds - Concurrent Data Structures library + + (C) Copyright Maxim Khizhinsky (libcds.dev@gmail.com) 2006-2017 + + Source code repo: http://github.com/khizmax/libcds/ + Download: http://sourceforge.net/projects/libcds/files/ + + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. + + * Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ + +#ifndef CDSLIB_INTRUSIVE_SKIP_LIST_NOGC_H +#define CDSLIB_INTRUSIVE_SKIP_LIST_NOGC_H + +#include +#include +#include +#include +#include +#include + +namespace cds { namespace intrusive { + + //@cond + namespace skip_list { + template + class node< cds::gc::nogc, Tag > + { + public: + typedef cds::gc::nogc gc; ///< Garbage collector + typedef Tag tag; ///< tag + + typedef atomics::atomic atomic_ptr; + typedef atomic_ptr tower_item_type; + + protected: + atomic_ptr m_pNext; ///< Next item in bottom-list (list at level 0) + unsigned int m_nHeight; ///< Node height (size of m_arrNext array). For node at level 0 the height is 1. + atomic_ptr * m_arrNext; ///< Array of next items for levels 1 .. m_nHeight - 1. For node at level 0 \p m_arrNext is \p nullptr + + public: + /// Constructs a node of height 1 (a bottom-list node) + node() + : m_pNext( nullptr ) + , m_nHeight(1) + , m_arrNext( nullptr ) + {} + + /// Constructs a node of height \p nHeight + void make_tower( unsigned int nHeight, atomic_ptr * nextTower ) + { + assert( nHeight > 0 ); + assert( (nHeight == 1 && nextTower == nullptr) // bottom-list node + || (nHeight > 1 && nextTower != nullptr) // node at level of more than 0 + ); + + m_arrNext = nextTower; + m_nHeight = nHeight; + } + + atomic_ptr * release_tower() + { + atomic_ptr * pTower = m_arrNext; + m_arrNext = nullptr; + m_nHeight = 1; + return pTower; + } + + atomic_ptr * get_tower() const + { + return m_arrNext; + } + + /// Access to element of next pointer array + atomic_ptr& next( unsigned int nLevel ) + { + assert( nLevel < height()); + assert( nLevel == 0 || (nLevel > 0 && m_arrNext != nullptr)); + + return nLevel ? m_arrNext[ nLevel - 1] : m_pNext; + } + + /// Access to element of next pointer array (const version) + atomic_ptr const& next( unsigned int nLevel ) const + { + assert( nLevel < height()); + assert( nLevel == 0 || nLevel > 0 && m_arrNext != nullptr ); + + return nLevel ? m_arrNext[ nLevel - 1] : m_pNext; + } + + /// Access to element of next pointer array (same as \ref next function) + atomic_ptr& operator[]( unsigned int nLevel ) + { + return next( nLevel ); + } + + /// Access to element of next pointer array (same as \ref next function) + atomic_ptr const& operator[]( unsigned int nLevel ) const + { + return next( nLevel ); + } + + /// Height of the node + unsigned int height() const + { + return m_nHeight; + } + + /// Clears internal links + void clear() + { + assert( m_arrNext == nullptr ); + m_pNext.store( nullptr, atomics::memory_order_release ); + } + + bool is_cleared() const + { + return m_pNext.load( atomics::memory_order_relaxed ) == nullptr + && m_arrNext == nullptr + && m_nHeight <= 1 +; + } + }; + } // namespace skip_list + + namespace skip_list { namespace details { + + template + class iterator< cds::gc::nogc, NodeTraits, BackOff, IsConst> + { + public: + typedef cds::gc::nogc gc; + typedef NodeTraits node_traits; + typedef BackOff back_off; + typedef typename node_traits::node_type node_type; + typedef typename node_traits::value_type value_type; + static constexpr bool const c_isConst = IsConst; + + typedef typename std::conditional< c_isConst, value_type const &, value_type &>::type value_ref; + friend class iterator< gc, node_traits, back_off, !c_isConst >; + + protected: + typedef typename node_type::atomic_ptr atomic_ptr; + node_type * m_pNode; + + public: // for internal use only!!! + iterator( node_type& refHead ) + : m_pNode( refHead[0].load( atomics::memory_order_relaxed )) + {} + + static iterator from_node( node_type * pNode ) + { + iterator it; + it.m_pNode = pNode; + return it; + } + + public: + iterator() + : m_pNode( nullptr ) + {} + + iterator( iterator const& s) + : m_pNode( s.m_pNode ) + {} + + value_type * operator ->() const + { + assert( m_pNode != nullptr ); + assert( node_traits::to_value_ptr( m_pNode ) != nullptr ); + + return node_traits::to_value_ptr( m_pNode ); + } + + value_ref operator *() const + { + assert( m_pNode != nullptr ); + assert( node_traits::to_value_ptr( m_pNode ) != nullptr ); + + return *node_traits::to_value_ptr( m_pNode ); + } + + /// Pre-increment + iterator& operator ++() + { + if ( m_pNode ) + m_pNode = m_pNode->next(0).load( atomics::memory_order_relaxed ); + return *this; + } + + iterator& operator =(const iterator& src) + { + m_pNode = src.m_pNode; + return *this; + } + + template + bool operator ==(iterator const& i ) const + { + return m_pNode == i.m_pNode; + } + template + bool operator !=(iterator const& i ) const + { + return !( *this == i ); + } + }; + }} // namespace skip_list::details + //@endcond + + /// Lock-free skip-list set (template specialization for gc::nogc) + /** @ingroup cds_intrusive_map + @anchor cds_intrusive_SkipListSet_nogc + + This specialization is so-called append-only when no item + reclamation may be performed. The class does not support deleting of list item. + + See \ref cds_intrusive_SkipListSet_hp "SkipListSet" for description of skip-list. + + Template arguments : + - \p T - type to be stored in the set. The type must be based on \p skip_list::node (for \p skip_list::base_hook) + or it must have a member of type \p skip_list::node (for \p skip_list::member_hook). + - \p Traits - type traits, default is \p skip_list::traits. + It is possible to declare option-based list with \p cds::intrusive::skip_list::make_traits metafunction + istead of \p Traits template argument. + + Iterators + + The class supports a forward iterator (\ref iterator and \ref const_iterator). + The iteration is ordered. + + The iterator class supports the following minimalistic interface: + \code + struct iterator { + // Default ctor + iterator(); + + // Copy ctor + iterator( iterator const& s); + + value_type * operator ->() const; + value_type& operator *() const; + + // Pre-increment + iterator& operator ++(); + + // Copy assignment + iterator& operator = (const iterator& src); + + bool operator ==(iterator const& i ) const; + bool operator !=(iterator const& i ) const; + }; + \endcode + Note, the iterator object returned by \ref end, \p cend member functions points to \p nullptr and should not be dereferenced. + + How to use + + You should incorporate \p skip_list::node into your struct \p T and provide + appropriate \p skip_list::traits::hook in your \p Traits template parameters. Usually, for \p Traits you + define a struct based on \p skip_list::traits. + + Example for base hook: + \code + #include + + // Data stored in skip list + struct my_data: public cds::intrusive::skip_list::node< cds::gc::nogc > + { + // key field + std::string strKey; + + // other data + // ... + }; + + // my_data compare functor + struct my_data_cmp { + int operator()( const my_data& d1, const my_data& d2 ) + { + return d1.strKey.compare( d2.strKey ); + } + + int operator()( const my_data& d, const std::string& s ) + { + return d.strKey.compare(s); + } + + int operator()( const std::string& s, const my_data& d ) + { + return s.compare( d.strKey ); + } + }; + + + // Declare traits + struct my_traits: public cds::intrusive::skip_list::traits + { + typedef cds::intrusive::skip_list::base_hook< cds::opt::gc< cds::gc::nogc > > hook; + typedef my_data_cmp compare; + }; + + // Declare skip-list set type + typedef cds::intrusive::SkipListSet< cds::gc::nogc, my_data, my_traits > traits_based_set; + \endcode + + Equivalent option-based code: + \code + // GC-related specialization + #include + + struct my_data { + // see above + }; + struct compare { + // see above + }; + + // Declare option-based skip-list set + typedef cds::intrusive::SkipListSet< cds::gc::nogc + ,my_data + , typename cds::intrusive::skip_list::make_traits< + cds::intrusive::opt::hook< cds::intrusive::skip_list::base_hook< cds::opt::gc< cds::gc::nogc > > > + ,cds::intrusive::opt::compare< my_data_cmp > + >::type + > option_based_set; + \endcode + */ + template < + typename T +#ifdef CDS_DOXYGEN_INVOKED + ,typename Traits = skip_list::traits +#else + ,typename Traits +#endif + > + class SkipListSet< cds::gc::nogc, T, Traits > + { + public: + typedef cds::gc::nogc gc; ///< No garbage collector is used + typedef T value_type; ///< type of value stored in the skip-list + typedef Traits traits; ///< Traits template parameter + + typedef typename traits::hook hook; ///< hook type + typedef typename hook::node_type node_type; ///< node type + +# ifdef CDS_DOXYGEN_INVOKED + typedef implementation_defined key_comparator; ///< key comparison functor based on \p Traits::compare and \p Traits::less +# else + typedef typename opt::details::make_comparator< value_type, traits >::type key_comparator; +# endif + typedef typename get_node_traits< value_type, node_type, hook>::type node_traits; ///< node traits + + typedef typename traits::item_counter item_counter; ///< Item counting policy + typedef typename traits::memory_model memory_model; ///< Memory ordering. See \p cds::opt::memory_model option + typedef typename traits::random_level_generator random_level_generator ; ///< random level generator + typedef typename traits::allocator allocator_type; ///< allocator for maintaining array of next pointers of the node + typedef typename traits::back_off back_off; ///< Back-off strategy + typedef typename traits::stat stat; ///< internal statistics type + typedef typename traits::disposer disposer; ///< disposer + + /// Max node height. The actual node height should be in range [0 .. c_nMaxHeight) + /** + The max height is specified by \ref skip_list::random_level_generator "random level generator" constant \p m_nUpperBound + but it should be no more than 32 (\p skip_list::c_nHeightLimit). + */ + static unsigned int const c_nMaxHeight = std::conditional< + (random_level_generator::c_nUpperBound <= skip_list::c_nHeightLimit), + std::integral_constant< unsigned int, random_level_generator::c_nUpperBound >, + std::integral_constant< unsigned int, skip_list::c_nHeightLimit > + >::type::value; + + //@cond + static unsigned int const c_nMinHeight = 3; + //@endcond + + protected: + typedef typename node_type::atomic_ptr atomic_node_ptr; ///< Atomic node pointer + + protected: + //@cond + typedef skip_list::details::intrusive_node_builder< node_type, atomic_node_ptr, allocator_type > intrusive_node_builder; + + typedef typename std::conditional< + std::is_same< typename traits::internal_node_builder, cds::opt::none >::value + ,intrusive_node_builder + ,typename traits::internal_node_builder + >::type node_builder; + + typedef std::unique_ptr< node_type, typename node_builder::node_disposer > scoped_node_ptr; + + struct position { + node_type * pPrev[ c_nMaxHeight ]; + node_type * pSucc[ c_nMaxHeight ]; + + node_type * pCur; + }; + + class head_node: public node_type + { + typename node_type::atomic_ptr m_Tower[c_nMaxHeight]; + + public: + head_node( unsigned int nHeight ) + { + for ( size_t i = 0; i < sizeof(m_Tower) / sizeof(m_Tower[0]); ++i ) + m_Tower[i].store( nullptr, atomics::memory_order_relaxed ); + + node_type::make_tower( nHeight, m_Tower ); + } + + node_type * head() const + { + return const_cast( static_cast(this)); + } + + void clear() + { + for (unsigned int i = 0; i < sizeof(m_Tower) / sizeof(m_Tower[0]); ++i ) + m_Tower[i].store( nullptr, atomics::memory_order_relaxed ); + node_type::m_pNext.store( nullptr, atomics::memory_order_relaxed ); + } + }; + //@endcond + + protected: + head_node m_Head; ///< head tower (max height) + + random_level_generator m_RandomLevelGen; ///< random level generator instance + atomics::atomic m_nHeight; ///< estimated high level + item_counter m_ItemCounter; ///< item counter + mutable stat m_Stat; ///< internal statistics + + protected: + //@cond + unsigned int random_level() + { + // Random generator produces a number from range [0..31] + // We need a number from range [1..32] + return m_RandomLevelGen() + 1; + } + + template + node_type * build_node( Q v ) + { + return node_builder::make_tower( v, m_RandomLevelGen ); + } + + static void dispose_node( node_type * pNode ) + { + assert( pNode != nullptr ); + typename node_builder::node_disposer()( pNode ); + disposer()( node_traits::to_value_ptr( pNode )); + } + + template + bool find_position( Q const& val, position& pos, Compare cmp, bool bStopIfFound, bool bStrictSearch ) const + { + node_type * pPred; + node_type * pSucc; + node_type * pCur = nullptr; + + int nCmp = 1; + + unsigned int nHeight = c_nMaxHeight; + retry: + if ( !bStrictSearch ) + nHeight = m_nHeight.load( memory_model::memory_order_relaxed ); + pPred = m_Head.head(); + + for ( int nLevel = (int) nHeight - 1; nLevel >= 0; --nLevel ) { + while ( true ) { + pCur = pPred->next( nLevel ).load( memory_model::memory_order_relaxed ); + + if ( !pCur ) { + // end of the list at level nLevel - goto next level + break; + } + + pSucc = pCur->next( nLevel ).load( memory_model::memory_order_relaxed ); + + if ( pPred->next( nLevel ).load( memory_model::memory_order_acquire ) != pCur + || pCur->next( nLevel ).load( memory_model::memory_order_acquire ) != pSucc ) + { + goto retry; + } + + nCmp = cmp( *node_traits::to_value_ptr( pCur ), val ); + if ( nCmp < 0 ) + pPred = pCur; + else if ( nCmp == 0 && bStopIfFound ) + goto found; + else + break; + } + + pos.pPrev[ nLevel ] = pPred; + pos.pSucc[ nLevel ] = pCur; + } + + if ( nCmp != 0 ) + return false; + + found: + pos.pCur = pCur; + return pCur && nCmp == 0; + } + + template + bool insert_at_position( value_type& val, node_type * pNode, position& pos, Func f ) + { + unsigned int nHeight = pNode->height(); + + for ( unsigned int nLevel = 1; nLevel < nHeight; ++nLevel ) + pNode->next( nLevel ).store( nullptr, memory_model::memory_order_relaxed ); + + { + node_type * p = pos.pSucc[0]; + pNode->next( 0 ).store( pos.pSucc[ 0 ], memory_model::memory_order_release ); + if ( !pos.pPrev[0]->next(0).compare_exchange_strong( p, pNode, memory_model::memory_order_release, memory_model::memory_order_relaxed )) { + return false; + } + f( val ); + } + + for ( unsigned int nLevel = 1; nLevel < nHeight; ++nLevel ) { + node_type * p = nullptr; + while ( true ) { + node_type * q = pos.pSucc[ nLevel ]; + + if ( pNode->next( nLevel ).compare_exchange_strong( p, q, memory_model::memory_order_release, memory_model::memory_order_relaxed )) { + p = q; + if ( pos.pPrev[nLevel]->next(nLevel).compare_exchange_strong( q, pNode, memory_model::memory_order_release, memory_model::memory_order_relaxed )) + break; + } + + // Renew insert position + find_position( val, pos, key_comparator(), false, true ); + } + } + return true; + } + + template + node_type * find_with_( Q& val, Compare cmp, Func f ) const + { + position pos; + if ( find_position( val, pos, cmp, true, false )) { + assert( cmp( *node_traits::to_value_ptr( pos.pCur ), val ) == 0 ); + + f( *node_traits::to_value_ptr( pos.pCur ), val ); + + m_Stat.onFindFastSuccess(); + return pos.pCur; + } + else { + m_Stat.onFindFastFailed(); + return nullptr; + } + } + + void increase_height( unsigned int nHeight ) + { + unsigned int nCur = m_nHeight.load( memory_model::memory_order_relaxed ); + while ( nCur < nHeight && !m_nHeight.compare_exchange_weak( nCur, nHeight, memory_model::memory_order_acquire, atomics::memory_order_relaxed )); + } + //@endcond + + public: + /// Default constructor + /** + The constructor checks whether the count of guards is enough + for skip-list and may raise an exception if not. + */ + SkipListSet() + : m_Head( c_nMaxHeight ) + , m_nHeight( c_nMinHeight ) + { + static_assert( (std::is_same< gc, typename node_type::gc >::value), "GC and node_type::gc must be the same type" ); + + // Barrier for head node + atomics::atomic_thread_fence( memory_model::memory_order_release ); + } + + /// Clears and destructs the skip-list + ~SkipListSet() + { + clear(); + } + + public: + ///@name Forward iterators + //@{ + /// Forward iterator + /** + The forward iterator for a split-list has some features: + - it has no post-increment operator + - it depends on iterator of underlying \p OrderedList + */ + typedef skip_list::details::iterator< gc, node_traits, back_off, false > iterator; + + /// Const iterator type + typedef skip_list::details::iterator< gc, node_traits, back_off, true > const_iterator; + + /// Returns a forward iterator addressing the first element in a set + iterator begin() + { + return iterator( *m_Head.head()); + } + + /// Returns a forward const iterator addressing the first element in a set + const_iterator begin() const + { + return const_iterator( *m_Head.head()); + } + /// Returns a forward const iterator addressing the first element in a set + const_iterator cbegin() const + { + return const_iterator( *m_Head.head()); + } + + /// Returns a forward iterator that addresses the location succeeding the last element in a set. + iterator end() + { + return iterator(); + } + + /// Returns a forward const iterator that addresses the location succeeding the last element in a set. + const_iterator end() const + { + return const_iterator(); + } + /// Returns a forward const iterator that addresses the location succeeding the last element in a set. + const_iterator cend() const + { + return const_iterator(); + } + //@} + + protected: + //@cond + iterator nonconst_end() const + { + return iterator(); + } + //@endcond + + public: + /// Inserts new node + /** + The function inserts \p val in the set if it does not contain + an item with key equal to \p val. + + Returns \p true if \p val is placed into the set, \p false otherwise. + */ + bool insert( value_type& val ) + { + node_type * pNode = node_traits::to_node_ptr( val ); + scoped_node_ptr scp( pNode ); + unsigned int nHeight = pNode->height(); + bool bTowerOk = nHeight > 1 && pNode->get_tower() != nullptr; + bool bTowerMade = false; + + position pos; + while ( true ) + { + bool bFound = find_position( val, pos, key_comparator(), true, true ); + if ( bFound ) { + // scoped_node_ptr deletes the node tower if we create it + if ( !bTowerMade ) + scp.release(); + + m_Stat.onInsertFailed(); + return false; + } + + if ( !bTowerOk ) { + build_node( pNode ); + nHeight = pNode->height(); + bTowerMade = + bTowerOk = true; + } + + if ( !insert_at_position( val, pNode, pos, []( value_type& ) {} )) { + m_Stat.onInsertRetry(); + continue; + } + + increase_height( nHeight ); + ++m_ItemCounter; + m_Stat.onAddNode( nHeight ); + m_Stat.onInsertSuccess(); + scp.release(); + return true; + } + } + + /// Updates the node + /** + The operation performs inserting or changing data with lock-free manner. + + If the item \p val is not found in the set, then \p val is inserted into the set + iff \p bInsert is \p true. + Otherwise, the functor \p func is called with item found. + The functor signature is: + \code + void func( bool bNew, value_type& item, value_type& val ); + \endcode + with arguments: + - \p bNew - \p true if the item has been inserted, \p false otherwise + - \p item - item of the set + - \p val - argument \p val passed into the \p %update() function + If new item has been inserted (i.e. \p bNew is \p true) then \p item and \p val arguments + refer to the same thing. + + The functor can change non-key fields of the \p item; however, \p func must guarantee + that during changing no any other modifications could be made on this item by concurrent threads. + + Returns std::pair where \p first is \p true if operation is successful, + \p second is \p true if new item has been added or \p false if the item with \p key + already is in the set. + + @warning See \ref cds_intrusive_item_creating "insert item troubleshooting" + */ + template + std::pair update( value_type& val, Func func, bool bInsert = true ) + { + node_type * pNode = node_traits::to_node_ptr( val ); + scoped_node_ptr scp( pNode ); + unsigned int nHeight = pNode->height(); + bool bTowerOk = nHeight > 1 && pNode->get_tower() != nullptr; + bool bTowerMade = false; + + position pos; + while ( true ) + { + bool bFound = find_position( val, pos, key_comparator(), true, true ); + if ( bFound ) { + // scoped_node_ptr deletes the node tower if we create it before + if ( !bTowerMade ) + scp.release(); + + func( false, *node_traits::to_value_ptr(pos.pCur), val ); + m_Stat.onUpdateExist(); + return std::make_pair( true, false ); + } + + if ( !bInsert ) { + scp.release(); + return std::make_pair( false, false ); + } + + if ( !bTowerOk ) { + build_node( pNode ); + nHeight = pNode->height(); + bTowerMade = + bTowerOk = true; + } + + if ( !insert_at_position( val, pNode, pos, [&func]( value_type& item ) { func( true, item, item ); })) { + m_Stat.onInsertRetry(); + continue; + } + + increase_height( nHeight ); + ++m_ItemCounter; + scp.release(); + m_Stat.onAddNode( nHeight ); + m_Stat.onUpdateNew(); + return std::make_pair( true, true ); + } + } + //@cond + template + CDS_DEPRECATED("ensure() is deprecated, use update()") + std::pair ensure( value_type& val, Func func ) + { + return update( val, func, true ); + } + //@endcond + + /// Finds \p key + /** \anchor cds_intrusive_SkipListSet_nogc_find_func + The function searches the item with key equal to \p key and calls the functor \p f for item found. + The interface of \p Func functor is: + \code + struct functor { + void operator()( value_type& item, Q& key ); + }; + \endcode + where \p item is the item found, \p key is the find function argument. + + The functor can change non-key fields of \p item. Note that the functor is only guarantee + that \p item cannot be disposed during functor is executing. + The functor does not serialize simultaneous access to the set \p item. If such access is + possible you must provide your own synchronization schema on item level to exclude unsafe item modifications. + + The \p key argument is non-const since it can be used as \p f functor destination i.e., the functor + can modify both arguments. + + Note the hash functor specified for class \p Traits template parameter + should accept a parameter of type \p Q that can be not the same as \p value_type. + + The function returns \p true if \p key is found, \p false otherwise. + */ + template + bool find( Q& key, Func f ) const + { + return find_with_( key, key_comparator(), f ) != nullptr; + } + //@cond + template + bool find( Q const& key, Func f ) const + { + return find_with_( key, key_comparator(), f ) != nullptr; + } + //@endcond + + /// Finds the key \p key using \p pred predicate for comparing + /** + The function is an analog of \ref cds_intrusive_SkipListSet_nogc_find_func "find(Q&, Func)" + but \p pred predicate is used for key compare. + \p Less has the interface like \p std::less. + \p pred must imply the same element order as the comparator used for building the set. + */ + template + bool find_with( Q& key, Less pred, Func f ) const + { + CDS_UNUSED( pred ); + return find_with_( key, cds::opt::details::make_comparator_from_less(), f ) != nullptr; + } + //@cond + template + bool find_with( Q const& key, Less pred, Func f ) const + { + CDS_UNUSED( pred ); + return find_with_( key, cds::opt::details::make_comparator_from_less(), f ) != nullptr; + } + //@endcond + + /// Checks whether the set contains \p key + /** + The function searches the item with key equal to \p key + and returns pointer to item found or \p nullptr. + */ + template + value_type * contains( Q const& key ) const + { + node_type * pNode = find_with_( key, key_comparator(), [](value_type& , Q const& ) {} ); + if ( pNode ) + return node_traits::to_value_ptr( pNode ); + return nullptr; + } + //@cond + template + CDS_DEPRECATED("deprecated, use contains()") + value_type * find( Q const& key ) const + { + return contains( key ); + } + //@endcond + + /// Checks whether the set contains \p key using \p pred predicate for searching + /** + The function is similar to contains( key ) but \p pred is used for key comparing. + \p Less functor has the interface like \p std::less. + \p Less must imply the same element order as the comparator used for building the set. + */ + template + value_type * contains( Q const& key, Less pred ) const + { + CDS_UNUSED( pred ); + node_type * pNode = find_with_( key, cds::opt::details::make_comparator_from_less(), [](value_type& , Q const& ) {} ); + if ( pNode ) + return node_traits::to_value_ptr( pNode ); + return nullptr; + } + //@cond + template + CDS_DEPRECATED("deprecated, use contains()") + value_type * find_with( Q const& key, Less pred ) const + { + return contains( key, pred ); + } + //@endcond + + /// Gets minimum key from the set + /** + If the set is empty the function returns \p nullptr + */ + value_type * get_min() const + { + return node_traits::to_value_ptr( m_Head.head()->next( 0 )); + } + + /// Gets maximum key from the set + /** + The function returns \p nullptr if the set is empty + */ + value_type * get_max() const + { + node_type * pPred; + + unsigned int nHeight = m_nHeight.load( memory_model::memory_order_relaxed ); + pPred = m_Head.head(); + + for ( int nLevel = (int) nHeight - 1; nLevel >= 0; --nLevel ) { + while ( true ) { + node_type * pCur = pPred->next( nLevel ).load( memory_model::memory_order_relaxed ); + + if ( !pCur ) { + // end of the list at level nLevel - goto next level + break; + } + pPred = pCur; + } + } + return pPred && pPred != m_Head.head() ? node_traits::to_value_ptr( pPred ) : nullptr; + } + + /// Clears the set (non-atomic) + /** + The function is not atomic. + Finding and/or inserting is prohibited while clearing. + Otherwise an unpredictable result may be encountered. + Thus, \p clear() may be used only for debugging purposes. + */ + void clear() + { + node_type * pNode = m_Head.head()->next(0).load( memory_model::memory_order_relaxed ); + m_Head.clear(); + m_ItemCounter.reset(); + m_nHeight.store( c_nMinHeight, memory_model::memory_order_release ); + + while ( pNode ) { + node_type * pNext = pNode->next(0).load( memory_model::memory_order_relaxed ); + dispose_node( pNode ); + pNode = pNext; + } + } + + /// Returns item count in the set + /** + The value returned depends on item counter type provided by \p Traits template parameter. + For \p atomicity::empty_item_counter the function always returns 0. + The function is not suitable for checking the set emptiness, use \p empty(). + */ + size_t size() const + { + return m_ItemCounter; + } + + /// Checks if the set is empty + bool empty() const + { + return m_Head.head()->next( 0 ).load( memory_model::memory_order_relaxed ) == nullptr; + } + + /// Returns maximum height of skip-list. The max height is a constant for each object and does not exceed 32. + static constexpr unsigned int max_height() noexcept + { + return c_nMaxHeight; + } + + /// Returns const reference to internal statistics + stat const& statistics() const + { + return m_Stat; + } + }; + +}} // namespace cds::intrusive + + +#endif // #ifndef CDSLIB_INTRUSIVE_SKIP_LIST_IMPL_H diff --git a/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/intrusive/skip_list_rcu.h b/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/intrusive/skip_list_rcu.h new file mode 100644 index 0000000..b71c46f --- /dev/null +++ b/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/intrusive/skip_list_rcu.h @@ -0,0 +1,2076 @@ +/* + This file is a part of libcds - Concurrent Data Structures library + + (C) Copyright Maxim Khizhinsky (libcds.dev@gmail.com) 2006-2017 + + Source code repo: http://github.com/khizmax/libcds/ + Download: http://sourceforge.net/projects/libcds/files/ + + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. + + * Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ + +#ifndef CDSLIB_INTRUSIVE_SKIP_LIST_RCU_H +#define CDSLIB_INTRUSIVE_SKIP_LIST_RCU_H + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +namespace cds { namespace intrusive { + + //@cond + namespace skip_list { + + template + class node< cds::urcu::gc< RCU >, Tag > + { + public: + typedef cds::urcu::gc< RCU > gc; ///< Garbage collector + typedef Tag tag ; ///< tag + + // Mark bits: + // bit 0 - the item is logically deleted + // bit 1 - the item is extracted (only for level 0) + typedef cds::details::marked_ptr marked_ptr; ///< marked pointer + typedef atomics::atomic< marked_ptr > atomic_marked_ptr; ///< atomic marked pointer + typedef atomic_marked_ptr tower_item_type; + + protected: + atomic_marked_ptr m_pNext; ///< Next item in bottom-list (list at level 0) + public: + node * m_pDelChain; ///< Deleted node chain (local for a thread) + protected: + unsigned int m_nHeight; ///< Node height (size of m_arrNext array). For node at level 0 the height is 1. + atomic_marked_ptr * m_arrNext; ///< Array of next items for levels 1 .. m_nHeight - 1. For node at level 0 \p m_arrNext is \p nullptr + atomics::atomic m_nUnlink; ///< Unlink helper + + public: + /// Constructs a node of height 1 (a bottom-list node) + node() + : m_pNext( nullptr ) + , m_pDelChain( nullptr ) + , m_nHeight(1) + , m_arrNext( nullptr ) + { + m_nUnlink.store( 1, atomics::memory_order_release ); + } + + /// Constructs a node of height \p nHeight + void make_tower( unsigned int nHeight, atomic_marked_ptr * nextTower ) + { + assert( nHeight > 0 ); + assert( (nHeight == 1 && nextTower == nullptr) // bottom-list node + || (nHeight > 1 && nextTower != nullptr) // node at level of more than 0 + ); + + m_arrNext = nextTower; + m_nHeight = nHeight; + m_nUnlink.store( nHeight, atomics::memory_order_release ); + } + + atomic_marked_ptr * release_tower() + { + atomic_marked_ptr * pTower = m_arrNext; + m_arrNext = nullptr; + m_nHeight = 1; + return pTower; + } + + atomic_marked_ptr * get_tower() const + { + return m_arrNext; + } + + void clear_tower() + { + for ( unsigned int nLevel = 1; nLevel < m_nHeight; ++nLevel ) + next(nLevel).store( marked_ptr(), atomics::memory_order_relaxed ); + } + + /// Access to element of next pointer array + atomic_marked_ptr& next( unsigned int nLevel ) + { + assert( nLevel < height()); + assert( nLevel == 0 || (nLevel > 0 && m_arrNext != nullptr)); + + return nLevel ? m_arrNext[ nLevel - 1] : m_pNext; + } + + /// Access to element of next pointer array (const version) + atomic_marked_ptr const& next( unsigned int nLevel ) const + { + assert( nLevel < height()); + assert( nLevel == 0 || nLevel > 0 && m_arrNext != nullptr ); + + return nLevel ? m_arrNext[ nLevel - 1] : m_pNext; + } + + /// Access to element of next pointer array (same as \ref next function) + atomic_marked_ptr& operator[]( unsigned int nLevel ) + { + return next( nLevel ); + } + + /// Access to element of next pointer array (same as \ref next function) + atomic_marked_ptr const& operator[]( unsigned int nLevel ) const + { + return next( nLevel ); + } + + /// Height of the node + unsigned int height() const + { + return m_nHeight; + } + + /// Clears internal links + void clear() + { + assert( m_arrNext == nullptr ); + m_pNext.store( marked_ptr(), atomics::memory_order_release ); + m_pDelChain = nullptr; + } + + bool is_cleared() const + { + return m_pNext == atomic_marked_ptr() + && m_arrNext == nullptr + && m_nHeight <= 1; + } + + bool level_unlinked( unsigned nCount = 1 ) + { + return m_nUnlink.fetch_sub( nCount, std::memory_order_relaxed ) == 1; + } + + bool is_upper_level( unsigned nLevel ) const + { + return m_nUnlink.load( atomics::memory_order_relaxed ) == nLevel + 1; + } + }; + } // namespace skip_list + //@endcond + + //@cond + namespace skip_list { namespace details { + + template + class iterator< cds::urcu::gc< RCU >, NodeTraits, BackOff, IsConst > + { + public: + typedef cds::urcu::gc< RCU > gc; + typedef NodeTraits node_traits; + typedef BackOff back_off; + typedef typename node_traits::node_type node_type; + typedef typename node_traits::value_type value_type; + static bool const c_isConst = IsConst; + + typedef typename std::conditional< c_isConst, value_type const &, value_type &>::type value_ref; + + protected: + typedef typename node_type::marked_ptr marked_ptr; + typedef typename node_type::atomic_marked_ptr atomic_marked_ptr; + + node_type * m_pNode; + + protected: + void next() + { + back_off bkoff; + + for (;;) { + if ( m_pNode->next( m_pNode->height() - 1 ).load( atomics::memory_order_acquire ).bits()) { + // Current node is marked as deleted. So, its next pointer can point to anything + // In this case we interrupt our iteration and returns end() iterator. + *this = iterator(); + return; + } + + marked_ptr p = m_pNode->next(0).load( atomics::memory_order_relaxed ); + node_type * pp = p.ptr(); + if ( p.bits()) { + // p is marked as deleted. Spin waiting for physical removal + bkoff(); + continue; + } + else if ( pp && pp->next( pp->height() - 1 ).load( atomics::memory_order_relaxed ).bits()) { + // p is marked as deleted. Spin waiting for physical removal + bkoff(); + continue; + } + + m_pNode = pp; + break; + } + } + + public: // for internal use only!!! + iterator( node_type& refHead ) + : m_pNode( nullptr ) + { + back_off bkoff; + + for (;;) { + marked_ptr p = refHead.next(0).load( atomics::memory_order_relaxed ); + if ( !p.ptr()) { + // empty skip-list + break; + } + + node_type * pp = p.ptr(); + // Logically deleted node is marked from highest level + if ( !pp->next( pp->height() - 1 ).load( atomics::memory_order_acquire ).bits()) { + m_pNode = pp; + break; + } + + bkoff(); + } + } + + public: + iterator() + : m_pNode( nullptr ) + {} + + iterator( iterator const& s) + : m_pNode( s.m_pNode ) + {} + + value_type * operator ->() const + { + assert( m_pNode != nullptr ); + assert( node_traits::to_value_ptr( m_pNode ) != nullptr ); + + return node_traits::to_value_ptr( m_pNode ); + } + + value_ref operator *() const + { + assert( m_pNode != nullptr ); + assert( node_traits::to_value_ptr( m_pNode ) != nullptr ); + + return *node_traits::to_value_ptr( m_pNode ); + } + + /// Pre-increment + iterator& operator ++() + { + next(); + return *this; + } + + iterator& operator = (const iterator& src) + { + m_pNode = src.m_pNode; + return *this; + } + + template + bool operator ==(iterator const& i ) const + { + return m_pNode == i.m_pNode; + } + template + bool operator !=(iterator const& i ) const + { + return !( *this == i ); + } + }; + }} // namespace skip_list::details + //@endcond + + /// Lock-free skip-list set (template specialization for \ref cds_urcu_desc "RCU") + /** @ingroup cds_intrusive_map + @anchor cds_intrusive_SkipListSet_rcu + + The implementation of well-known probabilistic data structure called skip-list + invented by W.Pugh in his papers: + - [1989] W.Pugh Skip Lists: A Probabilistic Alternative to Balanced Trees + - [1990] W.Pugh A Skip List Cookbook + + A skip-list is a probabilistic data structure that provides expected logarithmic + time search without the need of rebalance. The skip-list is a collection of sorted + linked list. Nodes are ordered by key. Each node is linked into a subset of the lists. + Each list has a level, ranging from 0 to 32. The bottom-level list contains + all the nodes, and each higher-level list is a sublist of the lower-level lists. + Each node is created with a random top level (with a random height), and belongs + to all lists up to that level. The probability that a node has the height 1 is 1/2. + The probability that a node has the height N is 1/2 ** N (more precisely, + the distribution depends on an random generator provided, but our generators + have this property). + + The lock-free variant of skip-list is implemented according to book + - [2008] M.Herlihy, N.Shavit "The Art of Multiprocessor Programming", + chapter 14.4 "A Lock-Free Concurrent Skiplist". + + Template arguments: + - \p RCU - one of \ref cds_urcu_gc "RCU type" + - \p T - type to be stored in the list. The type must be based on \p skip_list::node (for \p skip_list::base_hook) + or it must have a member of type \p skip_list::node (for \p skip_list::member_hook). + - \p Traits - set traits, default is \p skip_list::traits + It is possible to declare option-based list with \p cds::intrusive::skip_list::make_traits metafunction + instead of \p Traits template argument. + + @note Before including you should include appropriate RCU header file, + see \ref cds_urcu_gc "RCU type" for list of existing RCU class and corresponding header files. + + Iterators + + The class supports a forward iterator (\ref iterator and \ref const_iterator). + The iteration is ordered. + + You may iterate over skip-list set items only under RCU lock. + Only in this case the iterator is thread-safe since + while RCU is locked any set's item cannot be reclaimed. + + @note The requirement of RCU lock during iterating means that any type of modification of the skip list + (i.e. inserting, erasing and so on) is not possible. + + @warning The iterator object cannot be passed between threads. + + Example how to use skip-list set iterators: + \code + // First, you should include the header for RCU type you have chosen + #include + #include + + typedef cds::urcu::gc< cds::urcu::general_buffered<> > rcu_type; + + struct Foo { + // ... + }; + + // Traits for your skip-list. + // At least, you should define cds::opt::less or cds::opt::compare for Foo struct + struct my_traits: public cds::intrusive::skip_list::traits + { + // ... + }; + typedef cds::intrusive::SkipListSet< rcu_type, Foo, my_traits > my_skiplist_set; + + my_skiplist_set theSet; + + // ... + + // Begin iteration + { + // Apply RCU locking manually + typename rcu_type::scoped_lock sl; + + for ( auto it = theList.begin(); it != theList.end(); ++it ) { + // ... + } + + // rcu_type::scoped_lock destructor releases RCU lock implicitly + } + \endcode + + The iterator class supports the following minimalistic interface: + \code + struct iterator { + // Default ctor + iterator(); + + // Copy ctor + iterator( iterator const& s); + + value_type * operator ->() const; + value_type& operator *() const; + + // Pre-increment + iterator& operator ++(); + + // Copy assignment + iterator& operator = (const iterator& src); + + bool operator ==(iterator const& i ) const; + bool operator !=(iterator const& i ) const; + }; + \endcode + Note, the iterator object returned by \ref end, \p cend member functions points to \p nullptr and should not be dereferenced. + + How to use + + You should incorporate skip_list::node into your struct \p T and provide + appropriate skip_list::traits::hook in your \p Traits template parameters. Usually, for \p Traits you + define a struct based on \p skip_list::traits. + + Example for cds::urcu::general_buffered<> RCU and base hook: + \code + // First, you should include the header for RCU type you have chosen + #include + + // Include RCU skip-list specialization + #include + + // RCU type typedef + typedef cds::urcu::gc< cds::urcu::general_buffered<> > rcu_type; + + // Data stored in skip list + struct my_data: public cds::intrusive::skip_list::node< rcu_type > + { + // key field + std::string strKey; + + // other data + // ... + }; + + // my_data compare functor + struct my_data_cmp { + int operator()( const my_data& d1, const my_data& d2 ) + { + return d1.strKey.compare( d2.strKey ); + } + + int operator()( const my_data& d, const std::string& s ) + { + return d.strKey.compare(s); + } + + int operator()( const std::string& s, const my_data& d ) + { + return s.compare( d.strKey ); + } + }; + + // Declare traits + struct my_traits: public cds::intrusive::skip_list::traits + { + typedef cds::intrusive::skip_list::base_hook< cds::opt::gc< rcu_type > > hook; + typedef my_data_cmp compare; + }; + + // Declare skip-list set type + typedef cds::intrusive::SkipListSet< rcu_type, my_data, my_traits > traits_based_set; + \endcode + + Equivalent option-based code: + \code + #include + #include + + typedef cds::urcu::gc< cds::urcu::general_buffered<> > rcu_type; + + struct my_data { + // see above + }; + struct compare { + // see above + }; + + // Declare option-based skip-list set + typedef cds::intrusive::SkipListSet< rcu_type + ,my_data + , typename cds::intrusive::skip_list::make_traits< + cds::intrusive::opt::hook< cds::intrusive::skip_list::base_hook< cds::opt::gc< rcu_type > > > + ,cds::intrusive::opt::compare< my_data_cmp > + >::type + > option_based_set; + + \endcode + */ + template < + class RCU + ,typename T +#ifdef CDS_DOXYGEN_INVOKED + ,typename Traits = skip_list::traits +#else + ,typename Traits +#endif + > + class SkipListSet< cds::urcu::gc< RCU >, T, Traits > + { + public: + typedef cds::urcu::gc< RCU > gc; ///< Garbage collector + typedef T value_type; ///< type of value stored in the skip-list + typedef Traits traits; ///< Traits template parameter + + typedef typename traits::hook hook; ///< hook type + typedef typename hook::node_type node_type; ///< node type + +# ifdef CDS_DOXYGEN_INVOKED + typedef implementation_defined key_comparator ; ///< key comparison functor based on \p Traits::compare and \p Traits::less +# else + typedef typename opt::details::make_comparator< value_type, traits >::type key_comparator; +# endif + + typedef typename traits::disposer disposer; ///< disposer + typedef typename get_node_traits< value_type, node_type, hook>::type node_traits; ///< node traits + + typedef typename traits::item_counter item_counter; ///< Item counting policy used + typedef typename traits::memory_model memory_model; ///< Memory ordering, see \p cds::opt::memory_model option + typedef typename traits::random_level_generator random_level_generator; ///< random level generator + typedef typename traits::allocator allocator_type; ///< allocator for maintaining array of next pointers of the node + typedef typename traits::back_off back_off; ///< Back-off strategy + typedef typename traits::stat stat; ///< internal statistics type + typedef typename traits::rcu_check_deadlock rcu_check_deadlock; ///< Deadlock checking policy + typedef typename gc::scoped_lock rcu_lock; ///< RCU scoped lock + static constexpr const bool c_bExtractLockExternal = false; ///< Group of \p extract_xxx functions does not require external locking + + + /// Max node height. The actual node height should be in range [0 .. c_nMaxHeight) + /** + The max height is specified by \ref skip_list::random_level_generator "random level generator" constant \p m_nUpperBound + but it should be no more than 32 (\ref skip_list::c_nHeightLimit). + */ + static unsigned int const c_nMaxHeight = std::conditional< + (random_level_generator::c_nUpperBound <= skip_list::c_nHeightLimit), + std::integral_constant< unsigned int, random_level_generator::c_nUpperBound >, + std::integral_constant< unsigned int, skip_list::c_nHeightLimit > + >::type::value; + + //@cond + static unsigned int const c_nMinHeight = 5; + //@endcond + + protected: + typedef typename node_type::atomic_marked_ptr atomic_node_ptr ; ///< Atomic marked node pointer + typedef typename node_type::marked_ptr marked_node_ptr ; ///< Node marked pointer + + protected: + //@cond + typedef skip_list::details::intrusive_node_builder< node_type, atomic_node_ptr, allocator_type > intrusive_node_builder; + + typedef typename std::conditional< + std::is_same< typename traits::internal_node_builder, cds::opt::none >::value + ,intrusive_node_builder + ,typename traits::internal_node_builder + >::type node_builder; + + typedef std::unique_ptr< node_type, typename node_builder::node_disposer > scoped_node_ptr; + + static void dispose_node( value_type * pVal ) + { + assert( pVal ); + + typename node_builder::node_disposer()( node_traits::to_node_ptr(pVal)); + disposer()( pVal ); + } + + struct node_disposer + { + void operator()( value_type * pVal ) + { + dispose_node( pVal ); + } + }; + + static void dispose_chain( node_type * pChain ) + { + if ( pChain ) { + assert( !gc::is_locked()); + + auto f = [&pChain]() -> cds::urcu::retired_ptr { + node_type * p = pChain; + if ( p ) { + pChain = p->m_pDelChain; + return cds::urcu::make_retired_ptr( node_traits::to_value_ptr( p )); + } + return cds::urcu::make_retired_ptr( static_cast(nullptr)); + }; + gc::batch_retire(std::ref(f)); + } + } + + struct position { + node_type * pPrev[ c_nMaxHeight ]; + node_type * pSucc[ c_nMaxHeight ]; + node_type * pNext[ c_nMaxHeight ]; + + node_type * pCur; + node_type * pDelChain; + + position() + : pDelChain( nullptr ) + {} + + ~position() + { + dispose_chain( pDelChain ); + } + + void dispose( node_type * p ) + { + assert( p != nullptr ); + assert( p->m_pDelChain == nullptr ); + + p->m_pDelChain = pDelChain; + pDelChain = p; + } + }; + + typedef cds::urcu::details::check_deadlock_policy< gc, rcu_check_deadlock> check_deadlock_policy; + //@endcond + + protected: + skip_list::details::head_node< node_type > m_Head; ///< head tower (max height) + + random_level_generator m_RandomLevelGen; ///< random level generator instance + atomics::atomic m_nHeight; ///< estimated high level + atomics::atomic m_pDeferredDelChain ; ///< Deferred deleted node chain + item_counter m_ItemCounter; ///< item counter + mutable stat m_Stat; ///< internal statistics + + protected: + //@cond + unsigned int random_level() + { + // Random generator produces a number from range [0..31] + // We need a number from range [1..32] + return m_RandomLevelGen() + 1; + } + + template + node_type * build_node( Q v ) + { + return node_builder::make_tower( v, m_RandomLevelGen ); + } + //@endcond + + public: + using exempt_ptr = cds::urcu::exempt_ptr< gc, value_type, value_type, node_disposer, void >; ///< pointer to extracted node + + private: + //@cond + struct chain_disposer { + void operator()( node_type * pChain ) const + { + dispose_chain( pChain ); + } + }; + typedef cds::intrusive::details::raw_ptr_disposer< gc, node_type, chain_disposer> raw_ptr_disposer; + //@endcond + + public: + /// Result of \p get(), \p get_with() functions - pointer to the node found + typedef cds::urcu::raw_ptr< gc, value_type, raw_ptr_disposer > raw_ptr; + + public: + /// Default constructor + SkipListSet() + : m_Head( c_nMaxHeight ) + , m_nHeight( c_nMinHeight ) + , m_pDeferredDelChain( nullptr ) + { + static_assert( (std::is_same< gc, typename node_type::gc >::value), "GC and node_type::gc must be the same type" ); + + // Barrier for head node + atomics::atomic_thread_fence( memory_model::memory_order_release ); + } + + /// Clears and destructs the skip-list + ~SkipListSet() + { + destroy(); + } + + public: + ///@name Forward iterators (thread-safe under RCU lock) + //@{ + /// Forward iterator + /** + The forward iterator has some features: + - it has no post-increment operator + - it depends on iterator of underlying \p OrderedList + + You may safely use iterators in multi-threaded environment only under RCU lock. + Otherwise, a crash is possible if another thread deletes the element the iterator points to. + */ + typedef skip_list::details::iterator< gc, node_traits, back_off, false > iterator; + + /// Const iterator type + typedef skip_list::details::iterator< gc, node_traits, back_off, true > const_iterator; + + /// Returns a forward iterator addressing the first element in a set + iterator begin() + { + return iterator( *m_Head.head()); + } + + /// Returns a forward const iterator addressing the first element in a set + const_iterator begin() const + { + return const_iterator( *m_Head.head()); + } + + /// Returns a forward const iterator addressing the first element in a set + const_iterator cbegin() const + { + return const_iterator( *m_Head.head()); + } + + /// Returns a forward iterator that addresses the location succeeding the last element in a set. + iterator end() + { + return iterator(); + } + + /// Returns a forward const iterator that addresses the location succeeding the last element in a set. + const_iterator end() const + { + return const_iterator(); + } + + /// Returns a forward const iterator that addresses the location succeeding the last element in a set. + const_iterator cend() const + { + return const_iterator(); + } + //@} + + public: + /// Inserts new node + /** + The function inserts \p val in the set if it does not contain + an item with key equal to \p val. + + The function applies RCU lock internally. + + Returns \p true if \p val is placed into the set, \p false otherwise. + */ + bool insert( value_type& val ) + { + return insert( val, []( value_type& ) {} ); + } + + /// Inserts new node + /** + This function is intended for derived non-intrusive containers. + + The function allows to split creating of new item into two part: + - create item with key only + - insert new item into the set + - if inserting is success, calls \p f functor to initialize value-field of \p val. + + The functor signature is: + \code + void func( value_type& val ); + \endcode + where \p val is the item inserted. User-defined functor \p f should guarantee that during changing + \p val no any other changes could be made on this set's item by concurrent threads. + The user-defined functor is called only if the inserting is success. + + RCU \p synchronize method can be called. RCU should not be locked. + */ + template + bool insert( value_type& val, Func f ) + { + check_deadlock_policy::check(); + + position pos; + bool bRet; + + { + node_type * pNode = node_traits::to_node_ptr( val ); + scoped_node_ptr scp( pNode ); + unsigned int nHeight = pNode->height(); + bool bTowerOk = nHeight > 1 && pNode->get_tower() != nullptr; + bool bTowerMade = false; + + rcu_lock rcuLock; + + while ( true ) + { + bool bFound = find_position( val, pos, key_comparator(), true ); + if ( bFound ) { + // scoped_node_ptr deletes the node tower if we create it + if ( !bTowerMade ) + scp.release(); + + m_Stat.onInsertFailed(); + bRet = false; + break; + } + + if ( !bTowerOk ) { + build_node( pNode ); + nHeight = pNode->height(); + bTowerMade = + bTowerOk = true; + } + + if ( !insert_at_position( val, pNode, pos, f )) { + m_Stat.onInsertRetry(); + continue; + } + + increase_height( nHeight ); + ++m_ItemCounter; + m_Stat.onAddNode( nHeight ); + m_Stat.onInsertSuccess(); + scp.release(); + bRet = true; + break; + } + } + + return bRet; + } + + /// Updates the node + /** + The operation performs inserting or changing data with lock-free manner. + + If the item \p val is not found in the set, then \p val is inserted into the set + iff \p bInsert is \p true. + Otherwise, the functor \p func is called with item found. + The functor signature is: + \code + void func( bool bNew, value_type& item, value_type& val ); + \endcode + with arguments: + - \p bNew - \p true if the item has been inserted, \p false otherwise + - \p item - item of the set + - \p val - argument \p val passed into the \p %update() function + If new item has been inserted (i.e. \p bNew is \p true) then \p item and \p val arguments + refer to the same thing. + + The functor can change non-key fields of the \p item; however, \p func must guarantee + that during changing no any other modifications could be made on this item by concurrent threads. + + RCU \p synchronize method can be called. RCU should not be locked. + + Returns std::pair where \p first is \p true if operation is successful, + i.e. the node has been inserted or updated, + \p second is \p true if new item has been added or \p false if the item with \p key + already exists. + + @warning See \ref cds_intrusive_item_creating "insert item troubleshooting" + */ + template + std::pair update( value_type& val, Func func, bool bInsert = true ) + { + check_deadlock_policy::check(); + + position pos; + std::pair bRet( true, false ); + + { + node_type * pNode = node_traits::to_node_ptr( val ); + scoped_node_ptr scp( pNode ); + unsigned int nHeight = pNode->height(); + bool bTowerOk = nHeight > 1 && pNode->get_tower() != nullptr; + bool bTowerMade = false; + + rcu_lock rcuLock; + while ( true ) + { + bool bFound = find_position( val, pos, key_comparator(), true ); + if ( bFound ) { + // scoped_node_ptr deletes the node tower if we create it before + if ( !bTowerMade ) + scp.release(); + + func( false, *node_traits::to_value_ptr(pos.pCur), val ); + m_Stat.onUpdateExist(); + break; + } + + if ( !bInsert ) { + scp.release(); + bRet.first = false; + break; + } + + if ( !bTowerOk ) { + build_node( pNode ); + nHeight = pNode->height(); + bTowerMade = + bTowerOk = true; + } + + if ( !insert_at_position( val, pNode, pos, [&func]( value_type& item ) { func( true, item, item ); })) { + m_Stat.onInsertRetry(); + continue; + } + + increase_height( nHeight ); + ++m_ItemCounter; + scp.release(); + m_Stat.onAddNode( nHeight ); + m_Stat.onUpdateNew(); + bRet.second = true; + break; + } + } + + return bRet; + } + //@cond + template + CDS_DEPRECATED("ensure() is deprecated, use update()") + std::pair ensure( value_type& val, Func func ) + { + return update( val, func, true ); + } + //@endcond + + /// Unlinks the item \p val from the set + /** + The function searches the item \p val in the set and unlink it from the set + if it is found and is equal to \p val. + + Difference between \p erase() and \p %unlink() functions: \p erase() finds a key + and deletes the item found. \p %unlink() searches an item by key and deletes it + only if \p val is an item of that set, i.e. the pointer to item found + is equal to &val . + + RCU \p synchronize method can be called. RCU should not be locked. + + The \ref disposer specified in \p Traits class template parameter is called + by garbage collector \p GC asynchronously. + + The function returns \p true if success and \p false otherwise. + */ + bool unlink( value_type& val ) + { + check_deadlock_policy::check(); + + position pos; + bool bRet; + + { + rcu_lock l; + + if ( !find_position( val, pos, key_comparator(), false )) { + m_Stat.onUnlinkFailed(); + bRet = false; + } + else { + node_type * pDel = pos.pCur; + assert( key_comparator()( *node_traits::to_value_ptr( pDel ), val ) == 0 ); + + unsigned int nHeight = pDel->height(); + + if ( node_traits::to_value_ptr( pDel ) == &val && try_remove_at( pDel, pos, [](value_type const&) {}, false )) { + --m_ItemCounter; + m_Stat.onRemoveNode( nHeight ); + m_Stat.onUnlinkSuccess(); + bRet = true; + } + else { + m_Stat.onUnlinkFailed(); + bRet = false; + } + } + } + + return bRet; + } + + /// Extracts the item from the set with specified \p key + /** \anchor cds_intrusive_SkipListSet_rcu_extract + The function searches an item with key equal to \p key in the set, + unlinks it from the set, and returns \ref cds::urcu::exempt_ptr "exempt_ptr" pointer to the item found. + If the item with key equal to \p key is not found the function returns an empty \p exempt_ptr. + + Note the compare functor should accept a parameter of type \p Q that can be not the same as \p value_type. + + RCU \p synchronize method can be called. RCU should NOT be locked. + The function does not call the disposer for the item found. + The disposer will be implicitly invoked when the returned object is destroyed or when + its \p release() member function is called. + Example: + \code + typedef cds::intrusive::SkipListSet< cds::urcu::gc< cds::urcu::general_buffered<> >, foo, my_traits > skip_list; + skip_list theList; + // ... + + typename skip_list::exempt_ptr ep( theList.extract( 5 )); + if ( ep ) { + // Deal with ep + //... + + // Dispose returned item. + ep.release(); + } + \endcode + */ + template + exempt_ptr extract( Q const& key ) + { + return exempt_ptr( do_extract( key )); + } + + /// Extracts the item from the set with comparing functor \p pred + /** + The function is an analog of \p extract(Q const&) but \p pred predicate is used for key comparing. + \p Less has the interface like \p std::less. + \p pred must imply the same element order as the comparator used for building the set. + */ + template + exempt_ptr extract_with( Q const& key, Less pred ) + { + return exempt_ptr( do_extract_with( key, pred )); + } + + /// Extracts an item with minimal key from the list + /** + The function searches an item with minimal key, unlinks it, and returns \ref cds::urcu::exempt_ptr "exempt_ptr" pointer to the item. + If the skip-list is empty the function returns an empty \p exempt_ptr. + + RCU \p synchronize method can be called. RCU should NOT be locked. + The function does not call the disposer for the item found. + The disposer will be implicitly invoked when the returned object is destroyed or when + its \p release() member function is manually called. + Example: + \code + typedef cds::intrusive::SkipListSet< cds::urcu::gc< cds::urcu::general_buffered<> >, foo, my_traits > skip_list; + skip_list theList; + // ... + + typename skip_list::exempt_ptr ep(theList.extract_min()); + if ( ep ) { + // Deal with ep + //... + + // Dispose returned item. + ep.release(); + } + \endcode + + @note Due the concurrent nature of the list, the function extracts nearly minimum key. + It means that the function gets leftmost item and tries to unlink it. + During unlinking, a concurrent thread may insert an item with key less than leftmost item's key. + So, the function returns the item with minimum key at the moment of list traversing. + */ + exempt_ptr extract_min() + { + return exempt_ptr( do_extract_min()); + } + + /// Extracts an item with maximal key from the list + /** + The function searches an item with maximal key, unlinks it, and returns \ref cds::urcu::exempt_ptr "exempt_ptr" pointer to the item. + If the skip-list is empty the function returns an empty \p exempt_ptr. + + RCU \p synchronize method can be called. RCU should NOT be locked. + The function does not call the disposer for the item found. + The disposer will be implicitly invoked when the returned object is destroyed or when + its \p release() member function is manually called. + Example: + \code + typedef cds::intrusive::SkipListSet< cds::urcu::gc< cds::urcu::general_buffered<> >, foo, my_traits > skip_list; + skip_list theList; + // ... + + typename skip_list::exempt_ptr ep( theList.extract_max()); + if ( ep ) { + // Deal with ep + //... + // Dispose returned item. + ep.release(); + } + \endcode + + @note Due the concurrent nature of the list, the function extracts nearly maximal key. + It means that the function gets rightmost item and tries to unlink it. + During unlinking, a concurrent thread can insert an item with key greater than rightmost item's key. + So, the function returns the item with maximum key at the moment of list traversing. + */ + exempt_ptr extract_max() + { + return exempt_ptr( do_extract_max()); + } + + /// Deletes the item from the set + /** \anchor cds_intrusive_SkipListSet_rcu_erase + The function searches an item with key equal to \p key in the set, + unlinks it from the set, and returns \p true. + If the item with key equal to \p key is not found the function return \p false. + + Note the hash functor should accept a parameter of type \p Q that can be not the same as \p value_type. + + RCU \p synchronize method can be called. RCU should not be locked. + */ + template + bool erase( const Q& key ) + { + return do_erase( key, key_comparator(), [](value_type const&) {} ); + } + + /// Delete the item from the set with comparing functor \p pred + /** + The function is an analog of \ref cds_intrusive_SkipListSet_rcu_erase "erase(Q const&)" + but \p pred predicate is used for key comparing. + \p Less has the interface like \p std::less. + \p pred must imply the same element order as the comparator used for building the set. + */ + template + bool erase_with( const Q& key, Less pred ) + { + CDS_UNUSED( pred ); + return do_erase( key, cds::opt::details::make_comparator_from_less(), [](value_type const&) {} ); + } + + /// Deletes the item from the set + /** \anchor cds_intrusive_SkipListSet_rcu_erase_func + The function searches an item with key equal to \p key in the set, + call \p f functor with item found, unlinks it from the set, and returns \p true. + The \ref disposer specified in \p Traits class template parameter is called + by garbage collector \p GC asynchronously. + + The \p Func interface is + \code + struct functor { + void operator()( value_type const& item ); + }; + \endcode + If the item with key equal to \p key is not found the function return \p false. + + Note the hash functor should accept a parameter of type \p Q that can be not the same as \p value_type. + + RCU \p synchronize method can be called. RCU should not be locked. + */ + template + bool erase( Q const& key, Func f ) + { + return do_erase( key, key_comparator(), f ); + } + + /// Delete the item from the set with comparing functor \p pred + /** + The function is an analog of \ref cds_intrusive_SkipListSet_rcu_erase_func "erase(Q const&, Func)" + but \p pred predicate is used for key comparing. + \p Less has the interface like \p std::less. + \p pred must imply the same element order as the comparator used for building the set. + */ + template + bool erase_with( Q const& key, Less pred, Func f ) + { + CDS_UNUSED( pred ); + return do_erase( key, cds::opt::details::make_comparator_from_less(), f ); + } + + /// Finds \p key + /** @anchor cds_intrusive_SkipListSet_rcu_find_func + The function searches the item with key equal to \p key and calls the functor \p f for item found. + The interface of \p Func functor is: + \code + struct functor { + void operator()( value_type& item, Q& key ); + }; + \endcode + where \p item is the item found, \p key is the find function argument. + + The functor can change non-key fields of \p item. Note that the functor is only guarantee + that \p item cannot be disposed during functor is executing. + The functor does not serialize simultaneous access to the set \p item. If such access is + possible you must provide your own synchronization schema on item level to exclude unsafe item modifications. + + The \p key argument is non-const since it can be used as \p f functor destination i.e., the functor + can modify both arguments. + + The function applies RCU lock internally. + + The function returns \p true if \p key is found, \p false otherwise. + */ + template + bool find( Q& key, Func f ) + { + return do_find_with( key, key_comparator(), f ); + } + //@cond + template + bool find( Q const& key, Func f ) + { + return do_find_with( key, key_comparator(), f ); + } + //@endcond + + /// Finds the key \p key with comparing functor \p pred + /** + The function is an analog of \ref cds_intrusive_SkipListSet_rcu_find_func "find(Q&, Func)" + but \p cmp is used for key comparison. + \p Less functor has the interface like \p std::less. + \p cmp must imply the same element order as the comparator used for building the set. + */ + template + bool find_with( Q& key, Less pred, Func f ) + { + CDS_UNUSED( pred ); + return do_find_with( key, cds::opt::details::make_comparator_from_less(), f ); + } + //@cond + template + bool find_with( Q const& key, Less pred, Func f ) + { + CDS_UNUSED( pred ); + return do_find_with( key, cds::opt::details::make_comparator_from_less(), f ); + } + //@endcond + + /// Checks whether the set contains \p key + /** + The function searches the item with key equal to \p key + and returns \p true if it is found, and \p false otherwise. + + The function applies RCU lock internally. + */ + template + bool contains( Q const& key ) + { + return do_find_with( key, key_comparator(), [](value_type& , Q const& ) {} ); + } + //@cond + template + CDS_DEPRECATED("deprecated, use contains()") + bool find( Q const& key ) + { + return contains( key ); + } + //@endcond + + /// Checks whether the set contains \p key using \p pred predicate for searching + /** + The function is similar to contains( key ) but \p pred is used for key comparing. + \p Less functor has the interface like \p std::less. + \p Less must imply the same element order as the comparator used for building the set. + */ + template + bool contains( Q const& key, Less pred ) + { + CDS_UNUSED( pred ); + return do_find_with( key, cds::opt::details::make_comparator_from_less(), [](value_type& , Q const& ) {} ); + } + //@cond + template + CDS_DEPRECATED("deprecated, use contains()") + bool find_with( Q const& key, Less pred ) + { + return contains( key, pred ); + } + //@endcond + + /// Finds \p key and return the item found + /** \anchor cds_intrusive_SkipListSet_rcu_get + The function searches the item with key equal to \p key and returns a \p raw_ptr object pointed to item found. + If \p key is not found it returns empty \p raw_ptr. + + Note the compare functor should accept a parameter of type \p Q that can be not the same as \p value_type. + + RCU should be locked before call of this function. + Returned item is valid only while RCU is locked: + \code + typedef cds::intrusive::SkipListSet< cds::urcu::gc< cds::urcu::general_buffered<> >, foo, my_traits > skip_list; + skip_list theList; + // ... + typename skip_list::raw_ptr pVal; + { + // Lock RCU + skip_list::rcu_lock lock; + + pVal = theList.get( 5 ); + if ( pVal ) { + // Deal with pVal + //... + } + } + // You can manually release pVal after RCU-locked section + pVal.release(); + \endcode + */ + template + raw_ptr get( Q const& key ) + { + assert( gc::is_locked()); + + position pos; + value_type * pFound; + if ( do_find_with( key, key_comparator(), [&pFound](value_type& found, Q const& ) { pFound = &found; }, pos )) + return raw_ptr( pFound, raw_ptr_disposer( pos )); + return raw_ptr( raw_ptr_disposer( pos )); + } + + /// Finds \p key and return the item found + /** + The function is an analog of \ref cds_intrusive_SkipListSet_rcu_get "get(Q const&)" + but \p pred is used for comparing the keys. + + \p Less functor has the semantics like \p std::less but should take arguments of type \ref value_type and \p Q + in any order. + \p pred must imply the same element order as the comparator used for building the set. + */ + template + raw_ptr get_with( Q const& key, Less pred ) + { + CDS_UNUSED( pred ); + assert( gc::is_locked()); + + value_type * pFound = nullptr; + position pos; + if ( do_find_with( key, cds::opt::details::make_comparator_from_less(), + [&pFound](value_type& found, Q const& ) { pFound = &found; }, pos )) + { + return raw_ptr( pFound, raw_ptr_disposer( pos )); + } + return raw_ptr( raw_ptr_disposer( pos )); + } + + /// Returns item count in the set + /** + The value returned depends on item counter type provided by \p Traits template parameter. + For \p atomicity::empty_item_counter the function always returns 0. + Therefore, the function is not suitable for checking the set emptiness, use \p empty() + member function for this purpose. + */ + size_t size() const + { + return m_ItemCounter; + } + + /// Checks if the set is empty + bool empty() const + { + return m_Head.head()->next( 0 ).load( memory_model::memory_order_relaxed ) == nullptr; + } + + /// Clears the set (not atomic) + /** + The function unlink all items from the set. + The function is not atomic, thus, in multi-threaded environment with parallel insertions + this sequence + \code + set.clear(); + assert( set.empty()); + \endcode + the assertion could be raised. + + For each item the \p disposer will be called automatically after unlinking. + */ + void clear() + { + exempt_ptr ep; + while ( (ep = extract_min())); + } + + /// Returns maximum height of skip-list. The max height is a constant for each object and does not exceed 32. + static constexpr unsigned int max_height() noexcept + { + return c_nMaxHeight; + } + + /// Returns const reference to internal statistics + stat const& statistics() const + { + return m_Stat; + } + + protected: + //@cond + + bool is_extracted( marked_node_ptr const p ) const + { + return ( p.bits() & 2 ) != 0; + } + + void help_remove( int nLevel, node_type* pPred, marked_node_ptr pCur, marked_node_ptr pSucc, position& pos ) + { + marked_node_ptr p( pCur.ptr()); + + if ( pCur->is_upper_level( nLevel ) + && pPred->next( nLevel ).compare_exchange_strong( p, marked_node_ptr( pSucc.ptr()), + memory_model::memory_order_release, atomics::memory_order_relaxed )) + { + if ( pCur->level_unlinked()) { + if ( !is_extracted( pSucc )) { + // We cannot free the node at this moment because RCU is locked + // Link deleted nodes to a chain to free later + pos.dispose( pCur.ptr()); + m_Stat.onEraseWhileFind(); + } + else + m_Stat.onExtractWhileFind(); + } + } + } + + template + bool find_position( Q const& val, position& pos, Compare cmp, bool bStopIfFound ) + { + assert( gc::is_locked()); + + node_type * pPred; + marked_node_ptr pSucc; + marked_node_ptr pCur; + int nCmp = 1; + + retry: + pPred = m_Head.head(); + + for ( int nLevel = static_cast( c_nMaxHeight - 1 ); nLevel >= 0; --nLevel ) { + + while ( true ) { + pCur = pPred->next( nLevel ).load( memory_model::memory_order_acquire ); + if ( pCur.bits()) { + // pCur.bits() means that pPred is logically deleted + goto retry; + } + + if ( pCur.ptr() == nullptr ) { + // end of the list at level nLevel - goto next level + break; + } + + // pSucc contains deletion mark for pCur + pSucc = pCur->next( nLevel ).load( memory_model::memory_order_acquire ); + + if ( pPred->next( nLevel ).load( memory_model::memory_order_acquire ).all() != pCur.ptr()) + goto retry; + + if ( pSucc.bits()) { + // pCur is marked, i.e. logically deleted. + help_remove( nLevel, pPred, pCur, pSucc, pos ); + goto retry; + } + else { + nCmp = cmp( *node_traits::to_value_ptr( pCur.ptr()), val ); + if ( nCmp < 0 ) + pPred = pCur.ptr(); + else if ( nCmp == 0 && bStopIfFound ) + goto found; + else + break; + } + } + + // Next level + pos.pPrev[nLevel] = pPred; + pos.pSucc[nLevel] = pCur.ptr(); + } + + if ( nCmp != 0 ) + return false; + + found: + pos.pCur = pCur.ptr(); + return pCur.ptr() && nCmp == 0; + } + + bool find_min_position( position& pos ) + { + assert( gc::is_locked()); + + node_type * pPred; + marked_node_ptr pSucc; + marked_node_ptr pCur; + + retry: + pPred = m_Head.head(); + + for ( int nLevel = static_cast( c_nMaxHeight - 1 ); nLevel >= 0; --nLevel ) { + + pCur = pPred->next( nLevel ).load( memory_model::memory_order_acquire ); + // pCur.bits() means that pPred is logically deleted + // head cannot be deleted + assert( pCur.bits() == 0 ); + + if ( pCur.ptr()) { + + // pSucc contains deletion mark for pCur + pSucc = pCur->next( nLevel ).load( memory_model::memory_order_acquire ); + + if ( pPred->next( nLevel ).load( memory_model::memory_order_acquire ).all() != pCur.ptr()) + goto retry; + + if ( pSucc.bits()) { + // pCur is marked, i.e. logically deleted. + help_remove( nLevel, pPred, pCur, pSucc, pos ); + goto retry; + } + } + + // Next level + pos.pPrev[nLevel] = pPred; + pos.pSucc[nLevel] = pCur.ptr(); + } + return ( pos.pCur = pCur.ptr()) != nullptr; + } + + bool find_max_position( position& pos ) + { + assert( gc::is_locked()); + + node_type * pPred; + marked_node_ptr pSucc; + marked_node_ptr pCur; + + retry: + pPred = m_Head.head(); + + for ( int nLevel = static_cast( c_nMaxHeight - 1 ); nLevel >= 0; --nLevel ) { + + while ( true ) { + pCur = pPred->next( nLevel ).load( memory_model::memory_order_acquire ); + if ( pCur.bits()) { + // pCur.bits() means that pPred is logically deleted + goto retry; + } + + if ( pCur.ptr() == nullptr ) { + // end of the list at level nLevel - goto next level + break; + } + + // pSucc contains deletion mark for pCur + pSucc = pCur->next( nLevel ).load( memory_model::memory_order_acquire ); + + if ( pPred->next( nLevel ).load( memory_model::memory_order_acquire ).all() != pCur.ptr()) + goto retry; + + if ( pSucc.bits()) { + // pCur is marked, i.e. logically deleted. + help_remove( nLevel, pPred, pCur, pSucc, pos ); + goto retry; + } + else { + if ( !pSucc.ptr()) + break; + + pPred = pCur.ptr(); + } + } + + // Next level + pos.pPrev[nLevel] = pPred; + pos.pSucc[nLevel] = pCur.ptr(); + } + + return ( pos.pCur = pCur.ptr()) != nullptr; + } + + bool renew_insert_position( value_type& val, node_type * pNode, position& pos ) + { + assert( gc::is_locked()); + + node_type * pPred; + marked_node_ptr pSucc; + marked_node_ptr pCur; + key_comparator cmp; + int nCmp = 1; + + retry: + pPred = m_Head.head(); + + for ( int nLevel = static_cast( c_nMaxHeight - 1 ); nLevel >= 0; --nLevel ) { + + while ( true ) { + pCur = pPred->next( nLevel ).load( memory_model::memory_order_acquire ); + if ( pCur.bits()) { + // pCur.bits() means that pPred is logically deleted + goto retry; + } + + if ( pCur.ptr() == nullptr ) { + // end of the list at level nLevel - goto next level + break; + } + + // pSucc contains deletion mark for pCur + pSucc = pCur->next( nLevel ).load( memory_model::memory_order_acquire ); + + if ( pPred->next( nLevel ).load( memory_model::memory_order_acquire ).all() != pCur.ptr()) + goto retry; + + if ( pSucc.bits()) { + // pCur is marked, i.e. logically deleted. + if ( pCur.ptr() == pNode ) { + // Node is removing while we are inserting it + return false; + } + + // try to help deleting pCur + help_remove( nLevel, pPred, pCur, pSucc, pos ); + goto retry; + } + else { + nCmp = cmp( *node_traits::to_value_ptr( pCur.ptr()), val ); + if ( nCmp < 0 ) + pPred = pCur.ptr(); + else + break; + } + } + + // Next level + pos.pPrev[nLevel] = pPred; + pos.pSucc[nLevel] = pCur.ptr(); + } + + return nCmp == 0; + } + + template + bool insert_at_position( value_type& val, node_type * pNode, position& pos, Func f ) + { + assert( gc::is_locked()); + + unsigned int const nHeight = pNode->height(); + pNode->clear_tower(); + + // Insert at level 0 + { + marked_node_ptr p( pos.pSucc[0] ); + pNode->next( 0 ).store( p, memory_model::memory_order_relaxed ); + if ( !pos.pPrev[0]->next( 0 ).compare_exchange_strong( p, marked_node_ptr( pNode ), memory_model::memory_order_release, atomics::memory_order_relaxed )) + return false; + + f( val ); + } + + // Insert at level 1..max + for ( unsigned int nLevel = 1; nLevel < nHeight; ++nLevel ) { + marked_node_ptr p; + while ( true ) { + marked_node_ptr pSucc( pos.pSucc[nLevel] ); + + // Set pNode->next + // pNode->next must be null but can have a "logical deleted" flag if another thread is removing pNode right now + if ( !pNode->next( nLevel ).compare_exchange_strong( p, pSucc, + memory_model::memory_order_acq_rel, atomics::memory_order_acquire )) + { + // pNode has been marked as removed while we are inserting it + // Stop inserting + assert( p.bits() != 0 ); + + // Here pNode is linked at least level 0 so level_unlinked() cannot returns true + CDS_VERIFY_FALSE( pNode->level_unlinked( nHeight - nLevel )); + + // pNode is linked up to nLevel - 1 + // Remove it via find_position() + find_position( val, pos, key_comparator(), false ); + + m_Stat.onLogicDeleteWhileInsert(); + return true; + } + p = pSucc; + + // Link pNode into the list at nLevel + if ( pos.pPrev[nLevel]->next( nLevel ).compare_exchange_strong( pSucc, marked_node_ptr( pNode ), + memory_model::memory_order_release, atomics::memory_order_relaxed )) + { + // go to next level + break; + } + + // Renew insert position + m_Stat.onRenewInsertPosition(); + + if ( !renew_insert_position( val, pNode, pos )) { + // The node has been deleted while we are inserting it + // Update current height for concurent removing + CDS_VERIFY_FALSE( pNode->level_unlinked( nHeight - nLevel )); + + m_Stat.onRemoveWhileInsert(); + + // help to removing val + find_position( val, pos, key_comparator(), false ); + return true; + } + } + } + return true; + } + + template + bool try_remove_at( node_type * pDel, position& pos, Func f, bool bExtract ) + { + assert( pDel != nullptr ); + assert( gc::is_locked()); + + marked_node_ptr pSucc; + back_off bkoff; + + unsigned const nMask = bExtract ? 3u : 1u; + + // logical deletion (marking) + for ( unsigned int nLevel = pDel->height() - 1; nLevel > 0; --nLevel ) { + pSucc = pDel->next( nLevel ).load( memory_model::memory_order_relaxed ); + if ( pSucc.bits() == 0 ) { + bkoff.reset(); + while ( !pDel->next( nLevel ).compare_exchange_weak( pSucc, pSucc | nMask, + memory_model::memory_order_release, atomics::memory_order_acquire )) + { + if ( pSucc.bits() == 0 ) { + bkoff(); + m_Stat.onMarkFailed(); + } + else if ( pSucc.bits() != nMask ) + return false; + } + } + } + + marked_node_ptr p( pDel->next( 0 ).load( memory_model::memory_order_relaxed ).ptr()); + while ( true ) { + if ( pDel->next( 0 ).compare_exchange_strong( p, p | nMask, memory_model::memory_order_release, atomics::memory_order_acquire )) + { + f( *node_traits::to_value_ptr( pDel )); + + // physical deletion + // try fast erase + p = pDel; + for ( int nLevel = static_cast( pDel->height() - 1 ); nLevel >= 0; --nLevel ) { + + pSucc = pDel->next( nLevel ).load( memory_model::memory_order_acquire ); + if ( pos.pPrev[nLevel]->next( nLevel ).compare_exchange_strong( p, marked_node_ptr( pSucc.ptr()), + memory_model::memory_order_acq_rel, atomics::memory_order_relaxed )) + { + pDel->level_unlinked(); + } + else { + // Make slow erase +# ifdef CDS_DEBUG + if ( find_position( *node_traits::to_value_ptr( pDel ), pos, key_comparator(), false )) + assert( pDel != pos.pCur ); +# else + find_position( *node_traits::to_value_ptr( pDel ), pos, key_comparator(), false ); +# endif + if ( bExtract ) + m_Stat.onSlowExtract(); + else + m_Stat.onSlowErase(); + + return true; + } + } + + // Fast erasing success + if ( !bExtract ) { + // We cannot free the node at this moment since RCU is locked + // Link deleted nodes to a chain to free later + pos.dispose( pDel ); + m_Stat.onFastErase(); + } + else + m_Stat.onFastExtract(); + return true; + } + else if ( p.bits()) { + // Another thread is deleting pDel right now + m_Stat.onEraseContention(); + return false; + } + + m_Stat.onEraseRetry(); + bkoff(); + } + } + + enum finsd_fastpath_result { + find_fastpath_found, + find_fastpath_not_found, + find_fastpath_abort + }; + template + finsd_fastpath_result find_fastpath( Q& val, Compare cmp, Func f ) const + { + node_type * pPred; + marked_node_ptr pCur; + marked_node_ptr pSucc; + marked_node_ptr pNull; + + back_off bkoff; + unsigned attempt = 0; + + try_again: + pPred = m_Head.head(); + for ( int nLevel = static_cast( m_nHeight.load( memory_model::memory_order_relaxed ) - 1 ); nLevel >= 0; --nLevel ) { + pCur = pPred->next( nLevel ).load( memory_model::memory_order_acquire ); + + while ( pCur != pNull ) { + if ( pCur.bits()) { + // pPred is being removed + if ( ++attempt < 4 ) { + bkoff(); + goto try_again; + } + + return find_fastpath_abort; + } + + if ( pCur.ptr()) { + int nCmp = cmp( *node_traits::to_value_ptr( pCur.ptr()), val ); + if ( nCmp < 0 ) { + pPred = pCur.ptr(); + pCur = pCur->next( nLevel ).load( memory_model::memory_order_acquire ); + } + else if ( nCmp == 0 ) { + // found + f( *node_traits::to_value_ptr( pCur.ptr()), val ); + return find_fastpath_found; + } + else // pCur > val - go down + break; + } + } + } + + return find_fastpath_not_found; + } + + template + bool find_slowpath( Q& val, Compare cmp, Func f, position& pos ) + { + if ( find_position( val, pos, cmp, true )) { + assert( cmp( *node_traits::to_value_ptr( pos.pCur ), val ) == 0 ); + + f( *node_traits::to_value_ptr( pos.pCur ), val ); + return true; + } + else + return false; + } + + template + bool do_find_with( Q& val, Compare cmp, Func f ) + { + position pos; + return do_find_with( val, cmp, f, pos ); + } + + template + bool do_find_with( Q& val, Compare cmp, Func f, position& pos ) + { + bool bRet; + + { + rcu_lock l; + + switch ( find_fastpath( val, cmp, f )) { + case find_fastpath_found: + m_Stat.onFindFastSuccess(); + return true; + case find_fastpath_not_found: + m_Stat.onFindFastFailed(); + return false; + default: + break; + } + + if ( find_slowpath( val, cmp, f, pos )) { + m_Stat.onFindSlowSuccess(); + bRet = true; + } + else { + m_Stat.onFindSlowFailed(); + bRet = false; + } + } + return bRet; + } + + template + bool do_erase( Q const& val, Compare cmp, Func f ) + { + check_deadlock_policy::check(); + + position pos; + bool bRet; + + { + rcu_lock rcuLock; + + if ( !find_position( val, pos, cmp, false )) { + m_Stat.onEraseFailed(); + bRet = false; + } + else { + node_type * pDel = pos.pCur; + assert( cmp( *node_traits::to_value_ptr( pDel ), val ) == 0 ); + + unsigned int nHeight = pDel->height(); + if ( try_remove_at( pDel, pos, f, false )) { + --m_ItemCounter; + m_Stat.onRemoveNode( nHeight ); + m_Stat.onEraseSuccess(); + bRet = true; + } + else { + m_Stat.onEraseFailed(); + bRet = false; + } + } + } + + return bRet; + } + + template + value_type * do_extract_key( Q const& key, Compare cmp, position& pos ) + { + // RCU should be locked!!! + assert( gc::is_locked()); + + node_type * pDel; + + if ( !find_position( key, pos, cmp, false )) { + m_Stat.onExtractFailed(); + pDel = nullptr; + } + else { + pDel = pos.pCur; + assert( cmp( *node_traits::to_value_ptr( pDel ), key ) == 0 ); + + unsigned int const nHeight = pDel->height(); + + if ( try_remove_at( pDel, pos, []( value_type const& ) {}, true )) { + --m_ItemCounter; + m_Stat.onRemoveNode( nHeight ); + m_Stat.onExtractSuccess(); + } + else { + m_Stat.onExtractFailed(); + pDel = nullptr; + } + } + + return pDel ? node_traits::to_value_ptr( pDel ) : nullptr; + } + + template + value_type * do_extract( Q const& key ) + { + check_deadlock_policy::check(); + value_type * pDel = nullptr; + position pos; + { + rcu_lock l; + pDel = do_extract_key( key, key_comparator(), pos ); + } + + return pDel; + } + + template + value_type * do_extract_with( Q const& key, Less pred ) + { + CDS_UNUSED( pred ); + check_deadlock_policy::check(); + value_type * pDel = nullptr; + position pos; + { + rcu_lock l; + pDel = do_extract_key( key, cds::opt::details::make_comparator_from_less(), pos ); + } + + return pDel; + } + + value_type * do_extract_min() + { + assert( !gc::is_locked()); + + position pos; + node_type * pDel; + + { + rcu_lock l; + + if ( !find_min_position( pos )) { + m_Stat.onExtractMinFailed(); + pDel = nullptr; + } + else { + pDel = pos.pCur; + unsigned int const nHeight = pDel->height(); + + if ( try_remove_at( pDel, pos, []( value_type const& ) {}, true )) { + --m_ItemCounter; + m_Stat.onRemoveNode( nHeight ); + m_Stat.onExtractMinSuccess(); + } + else { + m_Stat.onExtractMinFailed(); + pDel = nullptr; + } + } + } + + return pDel ? node_traits::to_value_ptr( pDel ) : nullptr; + } + + value_type * do_extract_max() + { + assert( !gc::is_locked()); + + position pos; + node_type * pDel; + + { + rcu_lock l; + + if ( !find_max_position( pos )) { + m_Stat.onExtractMaxFailed(); + pDel = nullptr; + } + else { + pDel = pos.pCur; + unsigned int const nHeight = pDel->height(); + + if ( try_remove_at( pDel, pos, []( value_type const& ) {}, true )) { + --m_ItemCounter; + m_Stat.onRemoveNode( nHeight ); + m_Stat.onExtractMaxSuccess(); + } + else { + m_Stat.onExtractMaxFailed(); + pDel = nullptr; + } + } + } + + return pDel ? node_traits::to_value_ptr( pDel ) : nullptr; + } + + void increase_height( unsigned int nHeight ) + { + unsigned int nCur = m_nHeight.load( memory_model::memory_order_relaxed ); + if ( nCur < nHeight ) + m_nHeight.compare_exchange_strong( nCur, nHeight, memory_model::memory_order_release, atomics::memory_order_relaxed ); + } + + void destroy() + { + node_type* p = m_Head.head()->next( 0 ).load( atomics::memory_order_relaxed ).ptr(); + while ( p ) { + node_type* pNext = p->next( 0 ).load( atomics::memory_order_relaxed ).ptr(); + dispose_node( node_traits::to_value_ptr( p )); + p = pNext; + } + } + + //@endcond + }; + +}} // namespace cds::intrusive + + +#endif // #ifndef CDSLIB_INTRUSIVE_SKIP_LIST_RCU_H diff --git a/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/intrusive/split_list.h b/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/intrusive/split_list.h new file mode 100644 index 0000000..3f988bb --- /dev/null +++ b/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/intrusive/split_list.h @@ -0,0 +1,1464 @@ +/* + This file is a part of libcds - Concurrent Data Structures library + + (C) Copyright Maxim Khizhinsky (libcds.dev@gmail.com) 2006-2017 + + Source code repo: http://github.com/khizmax/libcds/ + Download: http://sourceforge.net/projects/libcds/files/ + + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. + + * Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ + +#ifndef CDSLIB_INTRUSIVE_SPLIT_LIST_H +#define CDSLIB_INTRUSIVE_SPLIT_LIST_H + +#include +#include +#include + +namespace cds { namespace intrusive { + + /// Split-ordered list + /** @ingroup cds_intrusive_map + \anchor cds_intrusive_SplitListSet_hp + + Hash table implementation based on split-ordered list algorithm discovered by Ori Shalev and Nir Shavit, see + - [2003] Ori Shalev, Nir Shavit "Split-Ordered Lists - Lock-free Resizable Hash Tables" + - [2008] Nir Shavit "The Art of Multiprocessor Programming" + + The split-ordered list is a lock-free implementation of an extensible unbounded hash table. It uses original + recursive split-ordering algorithm discovered by Ori Shalev and Nir Shavit that allows to split buckets + without item moving on resizing. + + \anchor cds_SplitList_algo_desc + Short description + [from [2003] Ori Shalev, Nir Shavit "Split-Ordered Lists - Lock-free Resizable Hash Tables"] + + The algorithm keeps all the items in one lock-free linked list, and gradually assigns the bucket pointers to + the places in the list where a sublist of 'correct' items can be found. A bucket is initialized upon first + access by assigning it to a new 'dummy' node (dashed contour) in the list, preceding all items that should be + in that bucket. A newly created bucket splits an older bucket's chain, reducing the access cost to its items. The + table uses a modulo 2**i hash (there are known techniques for 'pre-hashing' before a modulo 2**i hash + to overcome possible binary correlations among values). The table starts at size 2 and repeatedly doubles in size. + + Unlike moving an item, the operation of directing a bucket pointer can be done + in a single CAS operation, and since items are not moved, they are never 'lost'. + However, to make this approach work, one must be able to keep the items in the + list sorted in such a way that any bucket's sublist can be 'split' by directing a new + bucket pointer within it. This operation must be recursively repeatable, as every + split bucket may be split again and again as the hash table grows. To achieve this + goal the authors introduced recursive split-ordering, a new ordering on keys that keeps items + in a given bucket adjacent in the list throughout the repeated splitting process. + + Magically, yet perhaps not surprisingly, recursive split-ordering is achieved by + simple binary reversal: reversing the bits of the hash key so that the new key's + most significant bits (MSB) are those that were originally its least significant. + The split-order keys of regular nodes are exactly the bit-reverse image of the original + keys after turning on their MSB. For example, items 9 and 13 are in the 1 mod + 4 bucket, which can be recursively split in two by inserting a new node between + them. + + To insert (respectively delete or search for) an item in the hash table, hash its + key to the appropriate bucket using recursive split-ordering, follow the pointer to + the appropriate location in the sorted items list, and traverse the list until the key's + proper location in the split-ordering (respectively until the key or a key indicating + the item is not in the list is found). Because of the combinatorial structure induced + by the split-ordering, this will require traversal of no more than an expected constant number of items. + + The design is modular: to implement the ordered items list, you can use one of several + non-blocking list-based set algorithms: MichaelList, LazyList. + + Implementation + + Template parameters are: + - \p GC - Garbage collector. Note the \p GC must be the same as the \p GC used for \p OrderedList + - \p OrderedList - ordered list implementation used as a bucket for hash set, for example, \p MichaelList, \p LazyList. + The intrusive ordered list implementation specifies the type \p T stored in the split-list set, the comparison + functor for the type \p T and other features specific for the ordered list. + - \p Traits - split-list traits, default is \p split_list::traits. + Instead of defining \p Traits struct you can use option-based syntax provided by \p split_list::make_traits metafunction. + + There are several specialization of the split-list class for different \p GC: + - for \ref cds_urcu_gc "RCU type" include - see + \ref cds_intrusive_SplitListSet_rcu "RCU-based split-list" + - for cds::gc::nogc include - see + \ref cds_intrusive_SplitListSet_nogc "persistent SplitListSet". + + \anchor cds_SplitList_hash_functor + Hash functor + + Some member functions of split-ordered list accept the key parameter of type \p Q which differs from \p value_type. + It is expected that type \p Q contains full key of \p value_type, and for equal keys of type \p Q and \p value_type + the hash values of these keys must be equal too. + The hash functor \p Traits::hash should accept parameters of both type: + \code + // Our node type + struct Foo { + std::string key_ ; // key field + // ... other fields + }; + + // Hash functor + struct fooHash { + size_t operator()( const std::string& s ) const + { + return std::hash( s ); + } + + size_t operator()( const Foo& f ) const + { + return (*this)( f.key_ ); + } + }; + \endcode + + How to use + + Split-list based on \p IterableList differs from split-list based on \p MichaelList or \p LazyList + because \p %IterableList stores data "as is" - it cannot use any hook. + + Suppose, your split-list contains values of type \p Foo. + For \p %MichaelList and \p %LazyList, \p Foo declaration should be based on ordered-list node: + - \p %MichaelList: + \code + struct Foo: public cds::intrusive::split_list::node< cds::intrusive::michael_list::node< cds::gc::HP > > + { + // ... field declarations + }; + \endcode + - \p %LazyList: + \code + struct Foo: public cds::intrusive::split_list::node< cds::intrusive::lazy_list::node< cds::gc::HP > > + { + // ... field declarations + }; + \endcode + + For \p %IterableList, \p Foo should be based on \p void: + \code + struct Foo: public cds::intrusive::split_list::node + { + // ... field declarations + }; + \endcode + + Everything else is the same. + Consider split-list based on \p MichaelList. + + First, you should choose ordered list type to use in your split-list set: + \code + // For gc::HP-based MichaelList implementation + #include + + // cds::intrusive::SplitListSet declaration + #include + + // Type of set items + // Note you should declare your struct based on cds::intrusive::split_list::node + // which is a wrapper for ordered-list node struct. + // In our case, the node type for HP-based MichaelList is cds::intrusive::michael_list::node< cds::gc::HP > + struct Foo: public cds::intrusive::split_list::node< cds::intrusive::michael_list::node< cds::gc::HP > > + { + std::string key_ ; // key field + unsigned val_ ; // value field + // ... other value fields + }; + + // Declare comparator for the item + struct FooCmp + { + int operator()( const Foo& f1, const Foo& f2 ) const + { + return f1.key_.compare( f2.key_ ); + } + }; + + // Declare base ordered-list type for split-list + typedef cds::intrusive::MichaelList< cds::gc::HP, Foo, + typename cds::intrusive::michael_list::make_traits< + // hook option + cds::intrusive::opt::hook< cds::intrusive::michael_list::base_hook< cds::opt::gc< cds::gc::HP > > > + // item comparator option + ,cds::opt::compare< FooCmp > + >::type + > Foo_list; + \endcode + + Second, you should declare split-list set container: + \code + + // Declare hash functor + // Note, the hash functor accepts parameter type Foo and std::string + struct FooHash { + size_t operator()( const Foo& f ) const + { + return cds::opt::v::hash()( f.key_ ); + } + size_t operator()( const std::string& s ) const + { + return cds::opt::v::hash()( s ); + } + }; + + // Split-list set typedef + typedef cds::intrusive::SplitListSet< + cds::gc::HP + ,Foo_list + ,typename cds::intrusive::split_list::make_traits< + cds::opt::hash< FooHash > + >::type + > Foo_set; + \endcode + + Now, you can use \p Foo_set in your application. + \code + Foo_set fooSet; + Foo * foo = new Foo; + foo->key_ = "First"; + + fooSet.insert( *foo ); + + // and so on ... + \endcode + */ + template < + class GC, + class OrderedList, +# ifdef CDS_DOXYGEN_INVOKED + class Traits = split_list::traits +# else + class Traits +# endif + > + class SplitListSet + { + public: + typedef GC gc; ///< Garbage collector + typedef Traits traits; ///< Set traits + + protected: + //@cond + typedef split_list::details::rebind_list_traits ordered_list_adapter; + //@endcond + + public: +# ifdef CDS_DOXYGEN_INVOKED + typedef OrderedList ordered_list; ///< type of ordered list used as a base for split-list +# else + typedef typename ordered_list_adapter::result ordered_list; +# endif + typedef typename ordered_list::value_type value_type; ///< type of value stored in the split-list + typedef typename ordered_list::key_comparator key_comparator; ///< key comparison functor + typedef typename ordered_list::disposer disposer; ///< Node disposer functor + + /// Hash functor for \p %value_type and all its derivatives you use + typedef typename cds::opt::v::hash_selector< typename traits::hash >::type hash; + + typedef typename traits::bit_reversal bit_reversal; ///< Bit reversal algorithm, see \p split_list::traits::bit_reversal + typedef typename traits::item_counter item_counter; ///< Item counter type + typedef typename traits::back_off back_off; ///< back-off strategy for spinning + typedef typename traits::memory_model memory_model; ///< Memory ordering. See \p cds::opt::memory_model option + typedef typename traits::stat stat; ///< Internal statistics, see \p spit_list::stat + typedef typename ordered_list::guarded_ptr guarded_ptr; ///< Guarded pointer + + /// Count of hazard pointer required + static constexpr const size_t c_nHazardPtrCount = ordered_list::c_nHazardPtrCount + 4; // +4 - for iterators + + protected: + //@cond + typedef split_list::node node_type; ///< split-list node type + typedef typename ordered_list_adapter::node_traits node_traits; + + /// Bucket table implementation + typedef typename split_list::details::bucket_table_selector< + traits::dynamic_bucket_table + , gc + , typename ordered_list_adapter::aux_node + , opt::allocator< typename traits::allocator > + , opt::memory_model< memory_model > + , opt::free_list< typename traits::free_list > + >::type bucket_table; + + typedef typename bucket_table::aux_node_type aux_node_type; ///< auxiliary node type + //@endcond + + protected: + //@cond + /// Ordered list wrapper to access protected members + class ordered_list_wrapper: public ordered_list + { + typedef ordered_list base_class; + typedef typename base_class::auxiliary_head bucket_head_type; + + public: + bool insert_at( aux_node_type* pHead, value_type& val ) + { + assert( pHead != nullptr ); + bucket_head_type h(pHead); + return base_class::insert_at( h, val ); + } + + template + bool insert_at( aux_node_type * pHead, value_type& val, Func f ) + { + assert( pHead != nullptr ); + bucket_head_type h(pHead); + return base_class::insert_at( h, val, f ); + } + + template + std::pair update_at( aux_node_type * pHead, value_type& val, Func func, bool bAllowInsert ) + { + assert( pHead != nullptr ); + bucket_head_type h(pHead); + return base_class::update_at( h, val, func, bAllowInsert ); + } + + template + typename std::enable_if< + std::is_same< Q, value_type>::value && is_iterable_list< ordered_list >::value, + std::pair + >::type + upsert_at( aux_node_type * pHead, Q& val, bool bAllowInsert ) + { + assert( pHead != nullptr ); + bucket_head_type h( pHead ); + return base_class::upsert_at( h, val, bAllowInsert ); + } + + bool unlink_at( aux_node_type * pHead, value_type& val ) + { + assert( pHead != nullptr ); + bucket_head_type h(pHead); + return base_class::unlink_at( h, val ); + } + + template + typename std::enable_if< + std::is_same< Iterator, typename ordered_list::iterator>::value && is_iterable_list< ordered_list >::value, + bool + >::type + erase_at( Iterator iter ) + { + return base_class::erase_at( iter ); + } + + template + bool erase_at( aux_node_type * pHead, split_list::details::search_value_type const& val, Compare cmp, Func f ) + { + assert( pHead != nullptr ); + bucket_head_type h(pHead); + return base_class::erase_at( h, val, cmp, f ); + } + + template + bool erase_at( aux_node_type * pHead, split_list::details::search_value_type const& val, Compare cmp ) + { + assert( pHead != nullptr ); + bucket_head_type h(pHead); + return base_class::erase_at( h, val, cmp ); + } + + template + guarded_ptr extract_at( aux_node_type * pHead, split_list::details::search_value_type const& val, Compare cmp ) + { + assert( pHead != nullptr ); + bucket_head_type h(pHead); + return base_class::extract_at( h, val, cmp ); + } + + template + bool find_at( aux_node_type * pHead, split_list::details::search_value_type& val, Compare cmp, Func f ) + { + assert( pHead != nullptr ); + bucket_head_type h(pHead); + return base_class::find_at( h, val, cmp, f ); + } + + template + bool find_at( aux_node_type * pHead, split_list::details::search_value_type const& val, Compare cmp ) + { + assert( pHead != nullptr ); + bucket_head_type h(pHead); + return base_class::find_at( h, val, cmp ); + } + + template + typename std::enable_if< + std::is_same::value && is_iterable_list< ordered_list >::value, + typename base_class::iterator + >::type + find_iterator_at( aux_node_type * pHead, split_list::details::search_value_type const& val, Compare cmp ) + { + assert( pHead != nullptr ); + bucket_head_type h( pHead ); + return base_class::find_iterator_at( h, val, cmp ); + } + + template + guarded_ptr get_at( aux_node_type * pHead, split_list::details::search_value_type const& val, Compare cmp ) + { + assert( pHead != nullptr ); + bucket_head_type h(pHead); + return base_class::get_at( h, val, cmp ); + } + + bool insert_aux_node( aux_node_type * pNode ) + { + return base_class::insert_aux_node( pNode ); + } + bool insert_aux_node( aux_node_type * pHead, aux_node_type * pNode ) + { + bucket_head_type h(pHead); + return base_class::insert_aux_node( h, pNode ); + } + + template + void destroy( Predicate pred ) + { + base_class::destroy( pred ); + } + }; + //@endcond + + protected: + //@cond + template + class iterator_type + : public split_list::details::iterator_type + { + typedef split_list::details::iterator_type iterator_base_class; + typedef typename iterator_base_class::list_iterator list_iterator; + + friend class SplitListSet; + + public: + iterator_type() + : iterator_base_class() + {} + + iterator_type( iterator_type const& src ) + : iterator_base_class( src ) + {} + + // This ctor should be protected... + iterator_type( list_iterator itCur, list_iterator itEnd ) + : iterator_base_class( itCur, itEnd ) + {} + }; + //@endcond + + public: + ///@name Forward iterators + //@{ + /// Forward iterator + /** + The forward iterator is based on \p OrderedList forward iterator and has some features: + - it has no post-increment operator + - it iterates items in unordered fashion + - iterator cannot be moved across thread boundary because it may contain GC's guard that is thread-private GC data. + + Iterator thread safety depends on type of \p OrderedList: + - for \p MichaelList and \p LazyList: iterator guarantees safety even if you delete the item that iterator points to + because that item is guarded by hazard pointer. + However, in case of concurrent deleting operations it is no guarantee that you iterate all item in the set. + Moreover, a crash is possible when you try to iterate the next element that has been deleted by concurrent thread. + Use this iterator on the concurrent container for debugging purpose only. + - for \p IterableList: iterator is thread-safe. You may use it freely in concurrent environment. + */ + typedef iterator_type iterator; + + /// Const forward iterator + /** + For iterator's features and requirements see \ref iterator + */ + typedef iterator_type const_iterator; + + /// Returns a forward iterator addressing the first element in a split-list + /** + For empty list \code begin() == end() \endcode + */ + iterator begin() + { + return iterator( m_List.begin(), m_List.end()); + } + + /// Returns an iterator that addresses the location succeeding the last element in a split-list + /** + Do not use the value returned by end function to access any item. + + The returned value can be used only to control reaching the end of the split-list. + For empty list \code begin() == end() \endcode + */ + iterator end() + { + return iterator( m_List.end(), m_List.end()); + } + + /// Returns a forward const iterator addressing the first element in a split-list + const_iterator begin() const + { + return cbegin(); + } + /// Returns a forward const iterator addressing the first element in a split-list + const_iterator cbegin() const + { + return const_iterator( m_List.cbegin(), m_List.cend()); + } + + /// Returns an const iterator that addresses the location succeeding the last element in a split-list + const_iterator end() const + { + return cend(); + } + /// Returns an const iterator that addresses the location succeeding the last element in a split-list + const_iterator cend() const + { + return const_iterator( m_List.cend(), m_List.cend()); + } + //@} + + public: + /// Initialize split-ordered list of default capacity + /** + The default capacity is defined in bucket table constructor. + See \p split_list::expandable_bucket_table, \p split_list::static_bucket_table + which selects by \p split_list::dynamic_bucket_table option. + */ + SplitListSet() + : m_nBucketCountLog2(1) + , m_nMaxItemCount( max_item_count(2, m_Buckets.load_factor())) + { + init(); + } + + /// Initialize split-ordered list + SplitListSet( + size_t nItemCount ///< estimate average of item count + , size_t nLoadFactor = 1 ///< load factor - average item count per bucket. Small integer up to 8, default is 1. + ) + : m_Buckets( nItemCount, nLoadFactor ) + , m_nBucketCountLog2(1) + , m_nMaxItemCount( max_item_count(2, m_Buckets.load_factor())) + { + init(); + } + + /// Destroys split-list set + ~SplitListSet() + { + // list contains aux node that cannot be retired + // all aux nodes will be destroyed by bucket table dtor + m_List.destroy( + []( node_type * pNode ) -> bool { + return !pNode->is_dummy(); + } + ); + gc::force_dispose(); + } + + public: + /// Inserts new node + /** + The function inserts \p val in the set if it does not contain + an item with key equal to \p val. + + Returns \p true if \p val is placed into the set, \p false otherwise. + */ + bool insert( value_type& val ) + { + size_t nHash = hash_value( val ); + aux_node_type * pHead = get_bucket( nHash ); + assert( pHead != nullptr ); + + node_traits::to_node_ptr( val )->m_nHash = split_list::regular_hash( nHash ); + + if ( m_List.insert_at( pHead, val )) { + inc_item_count(); + m_Stat.onInsertSuccess(); + return true; + } + m_Stat.onInsertFailed(); + return false; + } + + /// Inserts new node + /** + This function is intended for derived non-intrusive containers. + + The function allows to split creating of new item into two part: + - create item with key only + - insert new item into the set + - if inserting is success, calls \p f functor to initialize value-field of \p val. + + The functor signature is: + \code + void func( value_type& val ); + \endcode + where \p val is the item inserted. + The user-defined functor is called only if the inserting is success. + + @warning For \ref cds_intrusive_MichaelList_hp "MichaelList" as the bucket see \ref cds_intrusive_item_creating "insert item troubleshooting". + \ref cds_intrusive_LazyList_hp "LazyList" provides exclusive access to inserted item and does not require any node-level + synchronization. + */ + template + bool insert( value_type& val, Func f ) + { + size_t nHash = hash_value( val ); + aux_node_type * pHead = get_bucket( nHash ); + assert( pHead != nullptr ); + + node_traits::to_node_ptr( val )->m_nHash = split_list::regular_hash( nHash ); + + if ( m_List.insert_at( pHead, val, f )) { + inc_item_count(); + m_Stat.onInsertSuccess(); + return true; + } + m_Stat.onInsertFailed(); + return false; + } + + /// Updates the node + /** + The operation performs inserting or changing data with lock-free manner. + + If the item \p val is not found in the set, then \p val is inserted + iff \p bAllowInsert is \p true. + Otherwise, the functor \p func is called with item found. + + The functor signature depends of the type of \p OrderedList: + + for \p MichaelList, \p LazyList + \code + struct functor { + void operator()( bool bNew, value_type& item, value_type& val ); + }; + \endcode + with arguments: + - \p bNew - \p true if the item has been inserted, \p false otherwise + - \p item - item of the set + - \p val - argument \p val passed into the \p %update() function + If new item has been inserted (i.e. \p bNew is \p true) then \p item and \p val arguments + refers to the same thing. + + The functor may change non-key fields of the \p item. + @warning For \ref cds_intrusive_MichaelList_hp "MichaelList" as the bucket see \ref cds_intrusive_item_creating "insert item troubleshooting". + \ref cds_intrusive_LazyList_hp "LazyList" provides exclusive access to inserted item and does not require any node-level + synchronization. + + for \p IterableList + \code + void func( value_type& val, value_type * old ); + \endcode + where + - \p val - argument \p val passed into the \p %update() function + - \p old - old value that will be retired. If new item has been inserted then \p old is \p nullptr. + + Returns std::pair where \p first is \p true if operation is successful, + \p second is \p true if new item has been added or \p false if the item with \p val + already is in the list. + */ + template + std::pair update( value_type& val, Func func, bool bAllowInsert = true ) + { + size_t nHash = hash_value( val ); + aux_node_type * pHead = get_bucket( nHash ); + assert( pHead != nullptr ); + + node_traits::to_node_ptr( val )->m_nHash = split_list::regular_hash( nHash ); + + std::pair bRet = m_List.update_at( pHead, val, func, bAllowInsert ); + if ( bRet.first && bRet.second ) { + inc_item_count(); + m_Stat.onUpdateNew(); + } + else + m_Stat.onUpdateExist(); + return bRet; + } + //@cond + template + CDS_DEPRECATED("ensure() is deprecated, use update()") + std::pair ensure( value_type& val, Func func ) + { + return update( val, func, true ); + } + //@endcond + + /// Inserts or updates the node (only for \p IterableList) + /** + The operation performs inserting or changing data with lock-free manner. + + If the item \p val is not found in the set, then \p val is inserted iff \p bAllowInsert is \p true. + Otherwise, the current element is changed to \p val, the old element will be retired later + by call \p Traits::disposer. + + Returns std::pair where \p first is \p true if operation is successful, + \p second is \p true if \p val has been added or \p false if the item with that key + already in the set. + */ +#ifdef CDS_DOXYGEN_INVOKED + std::pair upsert( value_type& val, bool bAllowInsert = true ) +#else + template + typename std::enable_if< + std::is_same< Q, value_type>::value && is_iterable_list< ordered_list >::value, + std::pair + >::type + upsert( Q& val, bool bAllowInsert = true ) +#endif + { + size_t nHash = hash_value( val ); + aux_node_type * pHead = get_bucket( nHash ); + assert( pHead != nullptr ); + + node_traits::to_node_ptr( val )->m_nHash = split_list::regular_hash( nHash ); + + std::pair bRet = m_List.upsert_at( pHead, val, bAllowInsert ); + if ( bRet.first && bRet.second ) { + inc_item_count(); + m_Stat.onUpdateNew(); + } + else + m_Stat.onUpdateExist(); + return bRet; + } + + /// Unlinks the item \p val from the set + /** + The function searches the item \p val in the set and unlinks it from the set + if it is found and is equal to \p val. + + Difference between \ref erase and \p unlink functions: \p erase finds a key + and deletes the item found. \p unlink finds an item by key and deletes it + only if \p val is an item of that set, i.e. the pointer to item found + is equal to &val . + + The function returns \p true if success and \p false otherwise. + */ + bool unlink( value_type& val ) + { + size_t nHash = hash_value( val ); + aux_node_type * pHead = get_bucket( nHash ); + assert( pHead != nullptr ); + + if ( m_List.unlink_at( pHead, val )) { + --m_ItemCounter; + m_Stat.onEraseSuccess(); + return true; + } + m_Stat.onEraseFailed(); + return false; + } + + /// Deletes the item from the set + /** \anchor cds_intrusive_SplitListSet_hp_erase + The function searches an item with key equal to \p key in the set, + unlinks it from the set, and returns \p true. + If the item with key equal to \p key is not found the function return \p false. + + Difference between \ref erase and \p unlink functions: \p erase finds a key + and deletes the item found. \p unlink finds an item by key and deletes it + only if \p key is an item of that set, i.e. the pointer to item found + is equal to &key . + + Note the hash functor should accept a parameter of type \p Q that can be not the same as \p value_type. + */ + template + bool erase( Q const& key ) + { + return erase_( key, key_comparator()); + } + + /// Deletes the item from the set with comparing functor \p pred + /** + + The function is an analog of \ref cds_intrusive_SplitListSet_hp_erase "erase(Q const&)" + but \p pred predicate is used for key comparing. + \p Less has the interface like \p std::less. + \p pred must imply the same element order as the comparator used for building the set. + */ + template + bool erase_with( const Q& key, Less pred ) + { + CDS_UNUSED( pred ); + return erase_( key, typename ordered_list_adapter::template make_compare_from_less()); + } + + /// Deletes the item from the set + /** \anchor cds_intrusive_SplitListSet_hp_erase_func + The function searches an item with key equal to \p key in the set, + call \p f functor with item found, unlinks it from the set, and returns \p true. + The \ref disposer specified by \p OrderedList class template parameter is called + by garbage collector \p GC asynchronously. + + The \p Func interface is + \code + struct functor { + void operator()( value_type const& item ); + }; + \endcode + + If the item with key equal to \p key is not found the function return \p false. + + Note the hash functor should accept a parameter of type \p Q that can be not the same as \p value_type. + */ + template + bool erase( Q const& key, Func f ) + { + return erase_( key, key_comparator(), f ); + } + + /// Deletes the item from the set with comparing functor \p pred + /** + The function is an analog of \ref cds_intrusive_SplitListSet_hp_erase_func "erase(Q const&, Func)" + but \p pred predicate is used for key comparing. + \p Less has the interface like \p std::less. + \p pred must imply the same element order as the comparator used for building the set. + */ + template + bool erase_with( Q const& key, Less pred, Func f ) + { + CDS_UNUSED( pred ); + return erase_( key, typename ordered_list_adapter::template make_compare_from_less(), f ); + } + + /// Deletes the item pointed by iterator \p iter (only for \p IterableList based set) + /** + Returns \p true if the operation is successful, \p false otherwise. + The function can return \p false if the node the iterator points to has already been deleted + by other thread. + + The function does not invalidate the iterator, it remains valid and can be used for further traversing. + + @note \p %erase_at() is supported only for \p %SplitListSet based on \p IterableList. + */ +#ifdef CDS_DOXYGEN_INVOKED + bool erase_at( iterator const& iter ) +#else + template + typename std::enable_if< std::is_same::value && is_iterable_list< ordered_list >::value, bool >::type + erase_at( Iterator const& iter ) +#endif + { + assert( iter != end()); + + if ( m_List.erase_at( iter.underlying_iterator())) { + --m_ItemCounter; + m_Stat.onEraseSuccess(); + return true; + } + return false; + } + + /// Extracts the item with specified \p key + /** \anchor cds_intrusive_SplitListSet_hp_extract + The function searches an item with key equal to \p key, + unlinks it from the set, and returns it as \p guarded_ptr. + If \p key is not found the function returns an empty guarded pointer. + + Note the compare functor should accept a parameter of type \p Q that may be not the same as \p value_type. + + The \p disposer specified in \p OrderedList class' template parameter is called automatically + by garbage collector \p GC when returned \p guarded_ptr object will be destroyed or released. + @note Each \p guarded_ptr object uses the GC's guard that can be limited resource. + + Usage: + \code + typedef cds::intrusive::SplitListSet< your_template_args > splitlist_set; + splitlist_set theSet; + // ... + { + splitlist_set::guarded_ptr gp( theSet.extract( 5 )); + if ( gp) { + // Deal with gp + // ... + } + // Destructor of gp releases internal HP guard + } + \endcode + */ + template + guarded_ptr extract( Q const& key ) + { + return extract_( key ); + } + + /// Extracts the item using compare functor \p pred + /** + The function is an analog of \ref cds_intrusive_SplitListSet_hp_extract "extract(Q const&)" + but \p pred predicate is used for key comparing. + + \p Less functor has the semantics like \p std::less but should take arguments of type \ref value_type and \p Q + in any order. + \p pred must imply the same element order as the comparator used for building the set. + */ + template + guarded_ptr extract_with( Q const& key, Less pred ) + { + return extract_with_( key, pred ); + } + + /// Finds the key \p key + /** \anchor cds_intrusive_SplitListSet_hp_find_func + The function searches the item with key equal to \p key and calls the functor \p f for item found. + The interface of \p Func functor is: + \code + struct functor { + void operator()( value_type& item, Q& key ); + }; + \endcode + where \p item is the item found, \p key is the find function argument. + + The functor can change non-key fields of \p item. Note that the functor is only guarantee + that \p item cannot be disposed during functor is executing. + The functor does not serialize simultaneous access to the set \p item. If such access is + possible you must provide your own synchronization schema on item level to exclude unsafe item modifications. + + Note the hash functor specified for class \p Traits template parameter + should accept a parameter of type \p Q that can be not the same as \p value_type. + + The function returns \p true if \p key is found, \p false otherwise. + */ + template + bool find( Q& key, Func f ) + { + return find_( key, key_comparator(), f ); + } + //@cond + template + bool find( Q const& key, Func f ) + { + return find_( key, key_comparator(), f ); + } + //@endcond + + /// Finds \p key and returns iterator pointed to the item found (only for \p IterableList) + /** + If \p key is not found the function returns \p end(). + + @note This function is supported only for the set based on \p IterableList + */ + template +#ifdef CDS_DOXYGEN_INVOKED + iterator +#else + typename std::enable_if< std::is_same::value && is_iterable_list< ordered_list >::value, iterator >::type +#endif + find( Q& key ) + { + return find_iterator_( key, key_comparator()); + } + //@cond + template + typename std::enable_if< std::is_same::value && is_iterable_list< ordered_list >::value, iterator >::type + find( Q const& key ) + { + return find_iterator_( key, key_comparator()); + } + //@endcond + + + /// Finds the key \p key with \p pred predicate for comparing + /** + The function is an analog of \ref cds_intrusive_SplitListSet_hp_find_func "find(Q&, Func)" + but \p cmp is used for key compare. + \p Less has the interface like \p std::less. + \p cmp must imply the same element order as the comparator used for building the set. + */ + template + bool find_with( Q& key, Less pred, Func f ) + { + CDS_UNUSED( pred ); + return find_( key, typename ordered_list_adapter::template make_compare_from_less(), f ); + } + //@cond + template + bool find_with( Q const& key, Less pred, Func f ) + { + CDS_UNUSED( pred ); + return find_( key, typename ordered_list_adapter::template make_compare_from_less(), f ); + } + //@endcond + + /// Finds \p key using \p pred predicate and returns iterator pointed to the item found (only for \p IterableList) + /** + The function is an analog of \p find(Q&) but \p pred is used for key comparing. + \p Less functor has the interface like \p std::less. + \p pred must imply the same element order as the comparator used for building the set. + + If \p key is not found the function returns \p end(). + + @note This function is supported only for the set based on \p IterableList + */ + template +#ifdef CDS_DOXYGEN_INVOKED + iterator +#else + typename std::enable_if< std::is_same::value && is_iterable_list< ordered_list >::value, iterator >::type +#endif + find_with( Q& key, Less pred ) + { + CDS_UNUSED( pred ); + return find_iterator_( key, typename ordered_list_adapter::template make_compare_from_less()); + } + //@cond + template + typename std::enable_if< std::is_same::value && is_iterable_list< ordered_list >::value, iterator >::type + find_with( Q const& key, Less pred ) + { + CDS_UNUSED( pred ); + return find_iterator_( key, typename ordered_list_adapter::template make_compare_from_less()); + } + //@endcond + + + /// Checks whether the set contains \p key + /** + The function searches the item with key equal to \p key + and returns \p true if it is found, and \p false otherwise. + + Note the hash functor specified for class \p Traits template parameter + should accept a parameter of type \p Q that can be not the same as \p value_type. + Otherwise, you may use \p contains( Q const&, Less pred ) functions with explicit predicate for key comparing. + */ + template + bool contains( Q const& key ) + { + return find_( key, key_comparator()); + } + + /// Checks whether the set contains \p key using \p pred predicate for searching + /** + The function is an analog of contains( key ) but \p pred is used for key comparing. + \p Less functor has the interface like \p std::less. + \p Less must imply the same element order as the comparator used for building the set. + */ + template + bool contains( Q const& key, Less pred ) + { + CDS_UNUSED( pred ); + return find_( key, typename ordered_list_adapter::template make_compare_from_less()); + } + + /// Finds the key \p key and return the item found + /** \anchor cds_intrusive_SplitListSet_hp_get + The function searches the item with key equal to \p key + and returns the item found as \p guarded_ptr. + If \p key is not found the function returns an empty guarded pointer. + + The \p disposer specified in \p OrderedList class' template parameter is called + by garbage collector \p GC automatically when returned \p guarded_ptr object + will be destroyed or released. + @note Each \p guarded_ptr object uses one GC's guard which can be limited resource. + + Usage: + \code + typedef cds::intrusive::SplitListSet< your_template_params > splitlist_set; + splitlist_set theSet; + // ... + { + splitlist_set::guarded_ptr gp = theSet.get( 5 ); + if ( gp ) { + // Deal with gp + //... + } + // Destructor of guarded_ptr releases internal HP guard + } + \endcode + + Note the compare functor specified for \p OrderedList template parameter + should accept a parameter of type \p Q that can be not the same as \p value_type. + */ + template + guarded_ptr get( Q const& key ) + { + return get_( key ); + } + + /// Finds the key \p key and return the item found + /** + The function is an analog of \ref cds_intrusive_SplitListSet_hp_get "get( Q const&)" + but \p pred is used for comparing the keys. + + \p Less functor has the semantics like \p std::less but should take arguments of type \ref value_type and \p Q + in any order. + \p pred must imply the same element order as the comparator used for building the set. + */ + template + guarded_ptr get_with( Q const& key, Less pred ) + { + return get_with_( key, pred ); + } + + /// Returns item count in the set + size_t size() const + { + return m_ItemCounter; + } + + /// Checks if the set is empty + /** + Emptiness is checked by item counting: if item count is zero then the set is empty. + Thus, the correct item counting feature is an important part of split-list set implementation. + */ + bool empty() const + { + return size() == 0; + } + + /// Clears the set (non-atomic) + /** + The function unlink all items from the set. + The function is not atomic. After call the split-list can be non-empty. + + For each item the \p disposer is called after unlinking. + */ + void clear() + { + iterator it = begin(); + while ( it != end()) { + iterator i(it); + ++i; + unlink( *it ); + it = i; + } + } + + /// Returns internal statistics + stat const& statistics() const + { + return m_Stat; + } + + /// Returns internal statistics for \p OrderedList + typename OrderedList::stat const& list_statistics() const + { + return m_List.statistics(); + } + + protected: + //@cond + aux_node_type * alloc_aux_node( size_t nHash ) + { + m_Stat.onHeadNodeAllocated(); + aux_node_type* p = m_Buckets.alloc_aux_node(); + if ( p ) { + CDS_TSAN_ANNOTATE_IGNORE_WRITES_BEGIN; + // p->m_nHash is read-only data member + p->m_nHash = nHash; + CDS_TSAN_ANNOTATE_IGNORE_WRITES_END; +# ifdef CDS_DEBUG + cds_assert( !p->m_busy.load( atomics::memory_order_acquire )); + p->m_busy.store( true, atomics::memory_order_release ); +# endif + } + return p; + } + + void free_aux_node( aux_node_type * p ) + { +# ifdef CDS_DEBUG + cds_assert( p->m_busy.load( atomics::memory_order_acquire )); + p->m_busy.store( false, atomics::memory_order_release ); +# endif + + m_Buckets.free_aux_node( p ); + m_Stat.onHeadNodeFreed(); + } + + /// Calculates hash value of \p key + template + size_t hash_value( Q const& key ) const + { + return m_HashFunctor( key ); + } + + size_t bucket_no( size_t nHash ) const + { + return nHash & ((1 << m_nBucketCountLog2.load( memory_model::memory_order_relaxed )) - 1); + } + + static size_t parent_bucket( size_t nBucket ) + { + assert( nBucket > 0 ); + return nBucket & ~(1 << bitop::MSBnz( nBucket )); + } + + aux_node_type * init_bucket( size_t const nBucket ) + { + assert( nBucket > 0 ); + size_t nParent = parent_bucket( nBucket ); + + aux_node_type * pParentBucket = m_Buckets.bucket( nParent ); + if ( pParentBucket == nullptr ) { + pParentBucket = init_bucket( nParent ); + m_Stat.onRecursiveInitBucket(); + } + + assert( pParentBucket != nullptr ); + + // Allocate an aux node for new bucket + aux_node_type * pBucket = m_Buckets.bucket( nBucket ); + + back_off bkoff; + for ( ;; pBucket = m_Buckets.bucket( nBucket )) { + if ( pBucket ) + return pBucket; + + pBucket = alloc_aux_node( split_list::dummy_hash( nBucket )); + if ( pBucket ) { + if ( m_List.insert_aux_node( pParentBucket, pBucket )) { + m_Buckets.bucket( nBucket, pBucket ); + m_Stat.onNewBucket(); + return pBucket; + } + + // Another thread set the bucket. Wait while it done + free_aux_node( pBucket ); + m_Stat.onBucketInitContenton(); + break; + } + + // There are no free buckets. It means that the bucket table is full + // Wait while another thread set the bucket or a free bucket will be available + m_Stat.onBucketsExhausted(); + bkoff(); + } + + // Another thread set the bucket. Wait while it done + for ( pBucket = m_Buckets.bucket( nBucket ); pBucket == nullptr; pBucket = m_Buckets.bucket( nBucket )) { + bkoff(); + m_Stat.onBusyWaitBucketInit(); + } + + return pBucket; + } + + aux_node_type * get_bucket( size_t nHash ) + { + size_t nBucket = bucket_no( nHash ); + + aux_node_type * pHead = m_Buckets.bucket( nBucket ); + if ( pHead == nullptr ) + pHead = init_bucket( nBucket ); + + assert( pHead->is_dummy()); + + return pHead; + } + + void init() + { + // GC and OrderedList::gc must be the same + static_assert(std::is_same::value, "GC and OrderedList::gc must be the same"); + + // atomicity::empty_item_counter is not allowed as a item counter + static_assert(!std::is_same::value, + "cds::atomicity::empty_item_counter is not allowed as a item counter"); + + // Initialize bucket 0 + aux_node_type * pNode = alloc_aux_node( 0 /*split_list::dummy_hash(0)*/ ); + assert( pNode != nullptr ); + + // insert_aux_node cannot return false for empty list + CDS_VERIFY( m_List.insert_aux_node( pNode )); + + m_Buckets.bucket( 0, pNode ); + } + + static size_t max_item_count( size_t nBucketCount, size_t nLoadFactor ) + { + return nBucketCount * nLoadFactor; + } + + void inc_item_count() + { + size_t nMaxCount = m_nMaxItemCount.load( memory_model::memory_order_relaxed ); + if ( ++m_ItemCounter <= nMaxCount ) + return; + + size_t sz = m_nBucketCountLog2.load( memory_model::memory_order_relaxed ); + const size_t nBucketCount = static_cast(1) << sz; + if ( nBucketCount < m_Buckets.capacity()) { + // we may grow the bucket table + const size_t nLoadFactor = m_Buckets.load_factor(); + if ( nMaxCount < max_item_count( nBucketCount, nLoadFactor )) + return; // someone already have updated m_nBucketCountLog2, so stop here + + m_nMaxItemCount.compare_exchange_strong( nMaxCount, max_item_count( nBucketCount << 1, nLoadFactor ), + memory_model::memory_order_relaxed, atomics::memory_order_relaxed ); + m_nBucketCountLog2.compare_exchange_strong( sz, sz + 1, memory_model::memory_order_relaxed, atomics::memory_order_relaxed ); + } + else + m_nMaxItemCount.store( std::numeric_limits::max(), memory_model::memory_order_relaxed ); + } + + template + bool find_( Q& val, Compare cmp, Func f ) + { + size_t nHash = hash_value( val ); + split_list::details::search_value_type sv( val, split_list::regular_hash( nHash )); + aux_node_type * pHead = get_bucket( nHash ); + assert( pHead != nullptr ); + + return m_Stat.onFind( + m_List.find_at( pHead, sv, cmp, + [&f]( value_type& item, split_list::details::search_value_type& v ) { f( item, v.val ); } ) + ); + } + + template + bool find_( Q const& val, Compare cmp ) + { + size_t nHash = hash_value( val ); + split_list::details::search_value_type sv( val, split_list::regular_hash( nHash )); + aux_node_type * pHead = get_bucket( nHash ); + assert( pHead != nullptr ); + + return m_Stat.onFind( m_List.find_at( pHead, sv, cmp )); + } + + template + iterator find_iterator_( Q const& val, Compare cmp ) + { + size_t nHash = hash_value( val ); + split_list::details::search_value_type sv( val, split_list::regular_hash( nHash )); + aux_node_type * pHead = get_bucket( nHash ); + assert( pHead != nullptr ); + + return iterator( m_List.find_iterator_at( pHead, sv, cmp ), m_List.end()); + } + + template + guarded_ptr get_( Q const& val, Compare cmp ) + { + size_t nHash = hash_value( val ); + split_list::details::search_value_type sv( val, split_list::regular_hash( nHash )); + aux_node_type * pHead = get_bucket( nHash ); + assert( pHead != nullptr ); + + guarded_ptr gp = m_List.get_at( pHead, sv, cmp ); + m_Stat.onFind( !gp.empty()); + return gp; + } + + template + guarded_ptr get_( Q const& key ) + { + return get_( key, key_comparator()); + } + + template + guarded_ptr get_with_( Q const& key, Less ) + { + return get_( key, typename ordered_list_adapter::template make_compare_from_less()); + } + + template + bool erase_( Q const& val, Compare cmp, Func f ) + { + size_t nHash = hash_value( val ); + split_list::details::search_value_type sv( val, split_list::regular_hash( nHash )); + aux_node_type * pHead = get_bucket( nHash ); + assert( pHead != nullptr ); + + if ( m_List.erase_at( pHead, sv, cmp, f )) { + --m_ItemCounter; + m_Stat.onEraseSuccess(); + return true; + } + m_Stat.onEraseFailed(); + return false; + } + + template + bool erase_( Q const& val, Compare cmp ) + { + size_t nHash = hash_value( val ); + split_list::details::search_value_type sv( val, split_list::regular_hash( nHash )); + aux_node_type * pHead = get_bucket( nHash ); + assert( pHead != nullptr ); + + if ( m_List.erase_at( pHead, sv, cmp )) { + --m_ItemCounter; + m_Stat.onEraseSuccess(); + return true; + } + m_Stat.onEraseFailed(); + return false; + } + + template + guarded_ptr extract_( Q const& val, Compare cmp ) + { + size_t nHash = hash_value( val ); + split_list::details::search_value_type sv( val, split_list::regular_hash( nHash )); + aux_node_type * pHead = get_bucket( nHash ); + assert( pHead != nullptr ); + + guarded_ptr gp = m_List.extract_at( pHead, sv, cmp ); + if ( gp ) { + --m_ItemCounter; + m_Stat.onExtractSuccess(); + } + else + m_Stat.onExtractFailed(); + return gp; + } + + template + guarded_ptr extract_( Q const& key ) + { + return extract_( key, key_comparator()); + } + + template + guarded_ptr extract_with_( Q const& key, Less ) + { + return extract_( key, typename ordered_list_adapter::template make_compare_from_less()); + } + //@endcond + + protected: + //@cond + static unsigned const c_padding = cds::opt::actual_padding< traits::padding >::value; + + typedef typename cds::details::type_padding< bucket_table, c_padding >::type padded_bucket_table; + padded_bucket_table m_Buckets; ///< bucket table + + typedef typename cds::details::type_padding< ordered_list_wrapper, c_padding >::type padded_ordered_list; + padded_ordered_list m_List; ///< Ordered list containing split-list items + + atomics::atomic m_nBucketCountLog2; ///< log2( current bucket count ) + atomics::atomic m_nMaxItemCount; ///< number of items container can hold, before we have to resize + hash m_HashFunctor; ///< Hash functor + item_counter m_ItemCounter; ///< Item counter + stat m_Stat; ///< Internal statistics + //@endcond + }; + +}} // namespace cds::intrusive + +#endif // #ifndef CDSLIB_INTRUSIVE_SPLIT_LIST_H diff --git a/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/intrusive/split_list_nogc.h b/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/intrusive/split_list_nogc.h new file mode 100644 index 0000000..966f5da --- /dev/null +++ b/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/intrusive/split_list_nogc.h @@ -0,0 +1,743 @@ +/* + This file is a part of libcds - Concurrent Data Structures library + + (C) Copyright Maxim Khizhinsky (libcds.dev@gmail.com) 2006-2017 + + Source code repo: http://github.com/khizmax/libcds/ + Download: http://sourceforge.net/projects/libcds/files/ + + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. + + * Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ + +#ifndef CDSLIB_INTRUSIVE_SPLIT_LIST_NOGC_H +#define CDSLIB_INTRUSIVE_SPLIT_LIST_NOGC_H + +#include + +#include +#include +#include + +namespace cds { namespace intrusive { + + /// Split-ordered list (template specialization for gc::nogc) + /** @ingroup cds_intrusive_map + \anchor cds_intrusive_SplitListSet_nogc + + This specialization is intended for so-called persistent usage when no item + reclamation may be performed. The class does not support deleting of list item. + + See \ref cds_intrusive_SplitListSet_hp "SplitListSet" for description of template parameters. + The template parameter \p OrderedList should be any gc::nogc-derived ordered list, for example, + \ref cds_intrusive_MichaelList_nogc "persistent MichaelList", + \ref cds_intrusive_LazyList_nogc "persistent LazyList" + */ + template < + class OrderedList, +#ifdef CDS_DOXYGEN_INVOKED + class Traits = split_list::traits +#else + class Traits +#endif + > + class SplitListSet< cds::gc::nogc, OrderedList, Traits > + { + public: + typedef cds::gc::nogc gc; ///< Garbage collector + typedef Traits traits; ///< Traits template parameters + + /// Hash functor for \p value_type and all its derivatives that you use + typedef typename cds::opt::v::hash_selector< typename traits::hash >::type hash; + + protected: + //@cond + typedef split_list::details::rebind_list_traits ordered_list_adapter; + //@endcond + + public: +# ifdef CDS_DOXYGEN_INVOKED + typedef OrderedList ordered_list; ///< type of ordered list used as base for split-list +# else + typedef typename ordered_list_adapter::result ordered_list; +# endif + typedef typename ordered_list::value_type value_type; ///< type of value stored in the split-list + typedef typename ordered_list::key_comparator key_comparator; ///< key comparison functor + typedef typename ordered_list::disposer disposer; ///< Node disposer functor + + typedef typename traits::bit_reversal bit_reversal; ///< Bit reversal algorithm, see \p split_list::traits::bit_reversal + typedef typename traits::item_counter item_counter; ///< Item counter type + typedef typename traits::back_off back_off; ///< back-off strategy + typedef typename traits::memory_model memory_model; ///< Memory ordering. See cds::opt::memory_model option + typedef typename traits::stat stat; ///< Internal statistics, see \p spit_list::stat + + // GC and OrderedList::gc must be the same + static_assert(std::is_same::value, "GC and OrderedList::gc must be the same"); + + // atomicity::empty_item_counter is not allowed as a item counter + static_assert(!std::is_same::value, + "cds::atomicity::empty_item_counter is not allowed as a item counter"); + + protected: + //@cond + typedef typename ordered_list::node_type list_node_type; ///< Node type as declared in ordered list + typedef split_list::node node_type; ///< split-list node type + + /// Split-list node traits + /** + This traits is intended for converting between underlying ordered list node type \ref list_node_type + and split-list node type \ref node_type + */ + typedef typename ordered_list_adapter::node_traits node_traits; + + /// Bucket table implementation + typedef typename split_list::details::bucket_table_selector< + traits::dynamic_bucket_table + , gc + , typename ordered_list_adapter::aux_node + , opt::allocator< typename traits::allocator > + , opt::memory_model< memory_model > + , opt::free_list< typename traits::free_list > + >::type bucket_table; + + typedef typename bucket_table::aux_node_type aux_node_type; ///< dummy node type + + typedef typename ordered_list::iterator list_iterator; + typedef typename ordered_list::const_iterator list_const_iterator; + //@endcond + + protected: + //@cond + /// Ordered list wrapper to access protected members + class ordered_list_wrapper: public ordered_list + { + typedef ordered_list base_class; + typedef typename base_class::auxiliary_head bucket_head_type; + + public: + list_iterator insert_at_( aux_node_type * pHead, value_type& val ) + { + assert( pHead != nullptr ); + bucket_head_type h(static_cast(pHead)); + return base_class::insert_at_( h, val ); + } + + template + std::pair update_at_( aux_node_type * pHead, value_type& val, Func func, bool bAllowInsert ) + { + assert( pHead != nullptr ); + bucket_head_type h(static_cast(pHead)); + return base_class::update_at_( h, val, func, bAllowInsert ); + } + + template + bool find_at( aux_node_type * pHead, split_list::details::search_value_type& val, Compare cmp, Func f ) + { + assert( pHead != nullptr ); + bucket_head_type h(static_cast(pHead)); + return base_class::find_at( h, val, cmp, f ); + } + + template + list_iterator find_at_( aux_node_type * pHead, split_list::details::search_value_type const & val, Compare cmp ) + { + assert( pHead != nullptr ); + bucket_head_type h(static_cast(pHead)); + return base_class::find_at_( h, val, cmp ); + } + + bool insert_aux_node( aux_node_type * pNode ) + { + return base_class::insert_aux_node( pNode ); + } + bool insert_aux_node( aux_node_type * pHead, aux_node_type * pNode ) + { + bucket_head_type h(static_cast(pHead)); + return base_class::insert_aux_node( h, pNode ); + } + + template + void erase_for( Predicate pred ) + { + return base_class::erase_for( pred ); + } + }; + //@endcond + + public: + /// Initialize split-ordered list of default capacity + /** + The default capacity is defined in bucket table constructor. + See split_list::expandable_bucket_table, split_list::static_ducket_table + which selects by split_list::dynamic_bucket_table option. + */ + SplitListSet() + : m_nBucketCountLog2(1) + , m_nMaxItemCount( max_item_count(2, m_Buckets.load_factor())) + { + init(); + } + + /// Initialize split-ordered list + SplitListSet( + size_t nItemCount ///< estimate average of item count + , size_t nLoadFactor = 1 ///< load factor - average item count per bucket. Small integer up to 10, default is 1. + ) + : m_Buckets( nItemCount, nLoadFactor ) + , m_nBucketCountLog2(1) + , m_nMaxItemCount( max_item_count(2, m_Buckets.load_factor())) + { + init(); + } + + /// Destroys split-list + ~SplitListSet() + { + m_List.clear(); + } + public: + /// Inserts new node + /** + The function inserts \p val in the set if it does not contain + an item with key equal to \p val. + + Returns \p true if \p val is placed into the set, \p false otherwise. + */ + bool insert( value_type& val ) + { + return insert_( val ) != end(); + } + + /// Updates the node + /** + The operation performs inserting or changing data with lock-free manner. + + If the item \p val is not found in the set, then \p val is inserted + iff \p bAllowInsert is \p true. + Otherwise, the functor \p func is called with item found. + The functor signature is: + \code + void func( bool bNew, value_type& item, value_type& val ); + \endcode + with arguments: + - \p bNew - \p true if the item has been inserted, \p false otherwise + - \p item - item of the set + - \p val - argument \p val passed into the \p update() function + If new item has been inserted (i.e. \p bNew is \p true) then \p item and \p val arguments + refers to the same thing. + + The functor may change non-key fields of the \p item. + + Returns std::pair where \p first is \p true if operation is successful, + \p second is \p true if new item has been added or \p false if the item with \p key + already is in the list. + + @warning For \ref cds_intrusive_MichaelList_hp "MichaelList" as the bucket see \ref cds_intrusive_item_creating "insert item troubleshooting". + \ref cds_intrusive_LazyList_hp "LazyList" provides exclusive access to inserted item and does not require any node-level + synchronization. + */ + template + std::pair update( value_type& val, Func func, bool bAllowInsert = true ) + { + std::pair ret = update_( val, func, bAllowInsert ); + return std::make_pair( ret.first != end(), ret.second ); + } + //@cond + template + CDS_DEPRECATED("ensure() is deprecated, use update()") + std::pair ensure( value_type& val, Func func ) + { + return update( val, func, true ); + } + //@endcond + + /// Checks whether the set contains \p key + /** + The function searches the item with key equal to \p key + and returns \p true if it is found, and \p false otherwise. + + Note the hash functor specified for class \p Traits template parameter + should accept a parameter of type \p Q that can be not the same as \p value_type. + Otherwise, you may use \p contains( Q const&, Less pred ) functions with explicit predicate for key comparing. + */ + template + value_type * contains( Q const& key ) + { + iterator it = find_( key ); + if ( it == end()) + return nullptr; + return &*it; + } + //@cond + template + CDS_DEPRECATED("deprecated, use contains()") + value_type * find( Q const& key ) + { + return contains( key ); + } + //@endcond + + /// Checks whether the set contains \p key using \p pred predicate for searching + /** + The function is similar to contains( key ) but \p pred is used for key comparing. + \p Less functor has the interface like \p std::less. + \p Less must imply the same element order as the comparator used for building the list. + */ + template + value_type * contains( Q const& key, Less pred ) + { + iterator it = find_with_( key, pred ); + if ( it == end()) + return nullptr; + return &*it; + } + //@cond + template + CDS_DEPRECATED("deprecated, use contains()") + value_type * find_with( Q const& key, Less pred ) + { + return contains( key, pred ); + } + //@endcond + + /// Finds the key \p key + /** \anchor cds_intrusive_SplitListSet_nogc_find_func + The function searches the item with key equal to \p key and calls the functor \p f for item found. + The interface of \p Func functor is: + \code + struct functor { + void operator()( value_type& item, Q& key ); + }; + \endcode + where \p item is the item found, \p key is the find function argument. + + The functor can change non-key fields of \p item. + The functor does not serialize simultaneous access to the set \p item. If such access is + possible you must provide your own synchronization schema on item level to exclude unsafe item modifications. + + Note the hash functor specified for class \p Traits template parameter + should accept a parameter of type \p Q that can be not the same as \p value_type. + + The function returns \p true if \p key is found, \p false otherwise. + */ + template + bool find( Q& key, Func f ) + { + return find_( key, key_comparator(), f ); + } + //@cond + template + bool find( Q const& key, Func f ) + { + return find_( key, key_comparator(), f ); + } + //@endcond + + /// Finds the key \p key with \p pred predicate for comparing + /** + The function is an analog of \ref cds_intrusive_SplitListSet_nogc_find_func "find(Q&, Func)" + but \p cmp is used for key compare. + \p Less has the interface like \p std::less. + \p cmp must imply the same element order as the comparator used for building the set. + */ + template + bool find_with( Q& key, Less pred, Func f ) + { + CDS_UNUSED( pred ); + return find_( key, typename ordered_list_adapter::template make_compare_from_less(), f ); + } + //@cond + template + bool find_with( Q const& key, Less pred, Func f ) + { + CDS_UNUSED( pred ); + return find_( key, typename ordered_list_adapter::template make_compare_from_less(), f ); + } + //@endcond + + + /// Clears the set (non-atomic, not thread-safe) + /** + The function unlink all items from the set. + The function is not atomic. It cleans up each bucket and then resets the item counter to zero. + If there are a thread that performs insertion while \p %clear() is working the result is undefined in general case: + empty() may return \p true but the set may contain item(s). + Therefore, \p %clear() may be used only for debugging purposes. + + For each item the \p disposer is called after unlinking. + */ + void clear() + { + m_List.erase_for( []( value_type const& val ) -> bool { return !node_traits::to_node_ptr( val )->is_dummy(); } ); + m_ItemCounter.reset(); + } + + /// Checks if the set is empty + /** + Emptiness is checked by item counting: if item count is zero then the set is empty. + Thus, the correct item counting feature is an important part of split-list implementation. + */ + bool empty() const + { + return size() == 0; + } + + /// Returns item count in the set + size_t size() const + { + return m_ItemCounter; + } + + /// Returns internal statistics + stat const& statistics() const + { + return m_Stat; + } + + /// Returns internal statistics for \p OrderedList + typename OrderedList::stat const& list_statistics() const + { + return m_List.statistics(); + } + + protected: + //@cond + template + class iterator_type + : public split_list::details::iterator_type + { + typedef split_list::details::iterator_type iterator_base_class; + typedef typename iterator_base_class::list_iterator list_iterator; + public: + iterator_type() + : iterator_base_class() + {} + + iterator_type( iterator_type const& src ) + : iterator_base_class( src ) + {} + + // This ctor should be protected... + iterator_type( list_iterator itCur, list_iterator itEnd ) + : iterator_base_class( itCur, itEnd ) + {} + }; + //@endcond + + public: + ///@name Forward iterators + //@{ + /// Forward iterator + /** + The forward iterator for a split-list has some features: + - it has no post-increment operator + - it depends on iterator of underlying \p OrderedList + */ + typedef iterator_type iterator; + + /// Const forward iterator + /** + For iterator's features and requirements see \ref iterator + */ + typedef iterator_type const_iterator; + + /// Returns a forward iterator addressing the first element in a split-list + /** + For empty list \code begin() == end() \endcode + */ + iterator begin() + { + return iterator( m_List.begin(), m_List.end()); + } + + /// Returns an iterator that addresses the location succeeding the last element in a split-list + /** + Do not use the value returned by end function to access any item. + + The returned value can be used only to control reaching the end of the split-list. + For empty list \code begin() == end() \endcode + */ + iterator end() + { + return iterator( m_List.end(), m_List.end()); + } + + /// Returns a forward const iterator addressing the first element in a split-list + const_iterator begin() const + { + return const_iterator( m_List.begin(), m_List.end()); + } + + /// Returns a forward const iterator addressing the first element in a split-list + const_iterator cbegin() const + { + return const_iterator( m_List.cbegin(), m_List.cend()); + } + + /// Returns an const iterator that addresses the location succeeding the last element in a split-list + const_iterator end() const + { + return const_iterator( m_List.end(), m_List.end()); + } + + /// Returns an const iterator that addresses the location succeeding the last element in a split-list + const_iterator cend() const + { + return const_iterator( m_List.cend(), m_List.cend()); + } + //@} + + protected: + //@cond + iterator insert_( value_type& val ) + { + size_t nHash = hash_value( val ); + aux_node_type * pHead = get_bucket( nHash ); + assert( pHead != nullptr ); + + node_traits::to_node_ptr( val )->m_nHash = split_list::regular_hash( nHash ); + + list_iterator it = m_List.insert_at_( pHead, val ); + if ( it != m_List.end()) { + inc_item_count(); + m_Stat.onInsertSuccess(); + return iterator( it, m_List.end()); + } + m_Stat.onInsertFailed(); + return end(); + } + + template + std::pair update_( value_type& val, Func func, bool bAllowInsert ) + { + size_t nHash = hash_value( val ); + aux_node_type * pHead = get_bucket( nHash ); + assert( pHead != nullptr ); + + node_traits::to_node_ptr( val )->m_nHash = split_list::regular_hash( nHash ); + + std::pair ret = m_List.update_at_( pHead, val, func, bAllowInsert ); + if ( ret.first != m_List.end()) { + if ( ret.second ) { + inc_item_count(); + m_Stat.onUpdateNew(); + } + else + m_Stat.onUpdateExist(); + return std::make_pair( iterator(ret.first, m_List.end()), ret.second ); + } + return std::make_pair( end(), ret.second ); + } + + template + iterator find_with_( Q& val, Less pred ) + { + CDS_UNUSED( pred ); + size_t nHash = hash_value( val ); + split_list::details::search_value_type sv( val, split_list::regular_hash( nHash )); + aux_node_type * pHead = get_bucket( nHash ); + assert( pHead != nullptr ); + + auto it = m_List.find_at_( pHead, sv, typename ordered_list_adapter::template make_compare_from_less()); + m_Stat.onFind( it != m_List.end()); + return iterator( it, m_List.end()); + } + + template + iterator find_( Q const& val ) + { + size_t nHash = hash_value( val ); + split_list::details::search_value_type sv( val, split_list::regular_hash( nHash )); + aux_node_type * pHead = get_bucket( nHash ); + assert( pHead != nullptr ); + + auto it = m_List.find_at_( pHead, sv, key_comparator()); + m_Stat.onFind( it != m_List.end()); + return iterator( it, m_List.end()); + } + + template + bool find_( Q& val, Compare cmp, Func f ) + { + size_t nHash = hash_value( val ); + split_list::details::search_value_type sv( val, split_list::regular_hash( nHash )); + aux_node_type * pHead = get_bucket( nHash ); + assert( pHead != nullptr ); + return m_Stat.onFind( m_List.find_at( pHead, sv, cmp, + [&f](value_type& item, split_list::details::search_value_type& v){ f(item, v.val ); })); + } + + aux_node_type * alloc_aux_node( size_t nHash ) + { + m_Stat.onHeadNodeAllocated(); + aux_node_type* p = m_Buckets.alloc_aux_node(); + if ( p ) + p->m_nHash = nHash; + return p; + } + + void free_aux_node( aux_node_type * p ) + { + m_Buckets.free_aux_node( p ); + m_Stat.onHeadNodeFreed(); + } + + /// Calculates hash value of \p key + template + size_t hash_value( Q const& key ) const + { + return m_HashFunctor( key ); + } + + size_t bucket_no( size_t nHash ) const + { + return nHash & ((1 << m_nBucketCountLog2.load( memory_model::memory_order_relaxed )) - 1); + } + + static size_t parent_bucket( size_t nBucket ) + { + assert( nBucket > 0 ); + return nBucket & ~(1 << bitop::MSBnz( nBucket )); + } + + aux_node_type * init_bucket( size_t const nBucket ) + { + assert( nBucket > 0 ); + size_t nParent = parent_bucket( nBucket ); + + aux_node_type * pParentBucket = m_Buckets.bucket( nParent ); + if ( pParentBucket == nullptr ) { + pParentBucket = init_bucket( nParent ); + m_Stat.onRecursiveInitBucket(); + } + + assert( pParentBucket != nullptr ); + + // Allocate an aux node for new bucket + aux_node_type * pBucket = m_Buckets.bucket( nBucket ); + + back_off bkoff; + for ( ;; pBucket = m_Buckets.bucket( nBucket )) { + if ( pBucket ) + return pBucket; + + pBucket = alloc_aux_node( split_list::dummy_hash( nBucket )); + if ( pBucket ) { + if ( m_List.insert_aux_node( pParentBucket, pBucket )) { + m_Buckets.bucket( nBucket, pBucket ); + m_Stat.onNewBucket(); + return pBucket; + } + + // Another thread set the bucket. Wait while it done + free_aux_node( pBucket ); + m_Stat.onBucketInitContenton(); + break; + } + + // There are no free buckets. It means that the bucket table is full + // Wait while another thread set the bucket or a free bucket will be available + m_Stat.onBucketsExhausted(); + bkoff(); + } + + // Another thread set the bucket. Wait while it done + for ( pBucket = m_Buckets.bucket( nBucket ); pBucket == nullptr; pBucket = m_Buckets.bucket( nBucket )) { + bkoff(); + m_Stat.onBusyWaitBucketInit(); + } + + return pBucket; + } + + aux_node_type * get_bucket( size_t nHash ) + { + size_t nBucket = bucket_no( nHash ); + + aux_node_type * pHead = m_Buckets.bucket( nBucket ); + if ( pHead == nullptr ) + pHead = init_bucket( nBucket ); + + assert( pHead->is_dummy()); + + return pHead; + } + + void init() + { + // Initialize bucket 0 + aux_node_type * pNode = alloc_aux_node( 0 /*split_list::dummy_hash(0)*/ ); + + // insert_aux_node cannot return false for empty list + CDS_VERIFY( m_List.insert_aux_node( pNode )); + + m_Buckets.bucket( 0, pNode ); + } + + static size_t max_item_count( size_t nBucketCount, size_t nLoadFactor ) + { + return nBucketCount * nLoadFactor; + } + + void inc_item_count() + { + size_t nMaxCount = m_nMaxItemCount.load( memory_model::memory_order_relaxed ); + if ( ++m_ItemCounter <= nMaxCount ) + return; + + size_t sz = m_nBucketCountLog2.load( memory_model::memory_order_relaxed ); + const size_t nBucketCount = static_cast(1) << sz; + if ( nBucketCount < m_Buckets.capacity()) { + // we may grow the bucket table + const size_t nLoadFactor = m_Buckets.load_factor(); + if ( nMaxCount < max_item_count( nBucketCount, nLoadFactor )) + return; // someone already have updated m_nBucketCountLog2, so stop here + + m_nMaxItemCount.compare_exchange_strong( nMaxCount, max_item_count( nBucketCount << 1, nLoadFactor ), + memory_model::memory_order_relaxed, atomics::memory_order_relaxed ); + m_nBucketCountLog2.compare_exchange_strong( sz, sz + 1, memory_model::memory_order_relaxed, atomics::memory_order_relaxed ); + } + else + m_nMaxItemCount.store( std::numeric_limits::max(), memory_model::memory_order_relaxed ); + } + //@endcond + + protected: + //@cond + static unsigned const c_padding = cds::opt::actual_padding< traits::padding >::value; + + typedef typename cds::details::type_padding< bucket_table, c_padding >::type padded_bucket_table; + padded_bucket_table m_Buckets; ///< bucket table + + typedef typename cds::details::type_padding< ordered_list_wrapper, c_padding >::type padded_ordered_list; + padded_ordered_list m_List; ///< Ordered list containing split-list items + + atomics::atomic m_nBucketCountLog2; ///< log2( current bucket count ) + atomics::atomic m_nMaxItemCount; ///< number of items container can hold, before we have to resize + hash m_HashFunctor; ///< Hash functor + item_counter m_ItemCounter; ///< Item counter + stat m_Stat; ///< Internal statistics + //@endcond + }; + +}} // namespace cds::intrusive + +#endif // #ifndef CDSLIB_INTRUSIVE_SPLIT_LIST_NOGC_H diff --git a/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/intrusive/split_list_rcu.h b/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/intrusive/split_list_rcu.h new file mode 100644 index 0000000..2f13cfb --- /dev/null +++ b/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/intrusive/split_list_rcu.h @@ -0,0 +1,1129 @@ +/* + This file is a part of libcds - Concurrent Data Structures library + + (C) Copyright Maxim Khizhinsky (libcds.dev@gmail.com) 2006-2017 + + Source code repo: http://github.com/khizmax/libcds/ + Download: http://sourceforge.net/projects/libcds/files/ + + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. + + * Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ + +#ifndef CDSLIB_INTRUSIVE_SPLIT_LIST_RCU_H +#define CDSLIB_INTRUSIVE_SPLIT_LIST_RCU_H + +#include + +#include +#include +#include + +namespace cds { namespace intrusive { + + /// Split-ordered list RCU specialization + /** @ingroup cds_intrusive_map + \anchor cds_intrusive_SplitListSet_rcu + + Hash table implementation based on split-ordered list algorithm discovered by Ori Shalev and Nir Shavit, see + - [2003] Ori Shalev, Nir Shavit "Split-Ordered Lists - Lock-free Resizable Hash Tables" + - [2008] Nir Shavit "The Art of Multiprocessor Programming" + + The split-ordered list is a lock-free implementation of an extensible unbounded hash table. It uses original + recursive split-ordering algorithm discovered by Ori Shalev and Nir Shavit that allows to split buckets + without moving an item on resizing, see \ref cds_SplitList_algo_desc "short algo description". + + Implementation + + Template parameters are: + - \p RCU - one of \ref cds_urcu_gc "RCU type" + - \p OrderedList - ordered list implementation used as bucket for hash set, for example, MichaelList, LazyList. + The intrusive ordered list implementation specifies the type \p T stored in the hash-set, + the comparing functor for the type \p T and other features specific for the ordered list. + - \p Traits - set traits, default isd \p split_list::traits. + Instead of defining \p Traits struct you can use option-based syntax provided by \p split_list::make_traits metafunction. + + @note About required features of hash functor see \ref cds_SplitList_hash_functor "SplitList general description". + + \par How to use + Before including you should include appropriate RCU header file, + see \ref cds_urcu_gc "RCU type" for list of existing RCU class and corresponding header files. + For example, for \ref cds_urcu_general_buffered_gc "general-purpose buffered RCU" and + MichaelList-based split-list you should include: + \code + #include + #include + #include + + // Declare Michael's list for type Foo and default traits: + typedef cds::intrusive::MichaelList< cds::urcu::gc< cds::urcu::general_buffered<> >, Foo > rcu_michael_list; + + // Declare split-list based on rcu_michael_list + typedef cds::intrusive::SplitListSet< cds::urcu::gc< cds::urcu::general_buffered<> >, rcu_michael_list > rcu_split_list; + \endcode + + */ + template < + class RCU, + class OrderedList, +# ifdef CDS_DOXYGEN_INVOKED + class Traits = split_list::traits +# else + class Traits +# endif + > + class SplitListSet< cds::urcu::gc< RCU >, OrderedList, Traits > + { + public: + typedef cds::urcu::gc< RCU > gc; ///< RCU garbage collector + typedef Traits traits; ///< Traits template parameters + + /// Hash functor for \ref value_type and all its derivatives that you use + typedef typename cds::opt::v::hash_selector< typename traits::hash >::type hash; + + protected: + //@cond + typedef split_list::details::rebind_list_traits ordered_list_adapter; + //@endcond + + public: +# ifdef CDS_DOXYGEN_INVOKED + typedef OrderedList ordered_list; ///< type of ordered list used as base for split-list +# else + typedef typename ordered_list_adapter::result ordered_list; +# endif + typedef typename ordered_list::value_type value_type; ///< type of value stored in the split-list + typedef typename ordered_list::key_comparator key_comparator; ///< key compare functor + typedef typename ordered_list::disposer disposer; ///< Node disposer functor + typedef typename ordered_list::rcu_lock rcu_lock; ///< RCU scoped lock + typedef typename ordered_list::exempt_ptr exempt_ptr; ///< pointer to extracted node + typedef typename ordered_list::raw_ptr raw_ptr; ///< pointer to the node for \p get() function + /// Group of \p extract_xxx functions require external locking if underlying ordered list requires that + static constexpr const bool c_bExtractLockExternal = ordered_list::c_bExtractLockExternal; + + typedef typename traits::bit_reversal bit_reversal; ///< Bit reversal algorithm, see \p split_list::traits::bit_reversal + typedef typename traits::item_counter item_counter; ///< Item counter type + typedef typename traits::back_off back_off; ///< back-off strategy for spinning + typedef typename traits::memory_model memory_model; ///< Memory ordering. See cds::opt::memory_model option + typedef typename traits::stat stat; ///< Internal statistics + + // GC and OrderedList::gc must be the same + static_assert( std::is_same::value, "GC and OrderedList::gc must be the same"); + + // atomicity::empty_item_counter is not allowed as a item counter + static_assert( !std::is_same::value, + "cds::atomicity::empty_item_counter is not allowed as a item counter"); + + protected: + //@cond + typedef typename ordered_list::node_type list_node_type; ///< Node type as declared in ordered list + typedef split_list::node node_type; ///< split-list node type + + /// Split-list node traits + /** + This traits is intended for converting between underlying ordered list node type \ref list_node_type + and split-list node type \ref node_type + */ + typedef typename ordered_list_adapter::node_traits node_traits; + + /// Bucket table implementation + typedef typename split_list::details::bucket_table_selector< + traits::dynamic_bucket_table + , gc + , typename ordered_list_adapter::aux_node + , opt::allocator< typename traits::allocator > + , opt::memory_model< memory_model > + , opt::free_list< typename traits::free_list > + >::type bucket_table; + + typedef typename bucket_table::aux_node_type aux_node_type; ///< auxiliary node type + + //@endcond + + protected: + //@cond + /// Ordered list wrapper to access protected members of OrderedList + class ordered_list_wrapper: public ordered_list + { + typedef ordered_list base_class; + typedef typename base_class::auxiliary_head bucket_head_type; + + public: + bool insert_at( aux_node_type * pHead, value_type& val ) + { + assert( pHead != nullptr ); + bucket_head_type h(pHead); + return base_class::insert_at( h, val ); + } + + template + bool insert_at( aux_node_type * pHead, value_type& val, Func f ) + { + assert( pHead != nullptr ); + bucket_head_type h(pHead); + return base_class::insert_at( h, val, f ); + } + + template + std::pair update_at( aux_node_type * pHead, value_type& val, Func func, bool bAllowInsert ) + { + assert( pHead != nullptr ); + bucket_head_type h(pHead); + return base_class::update_at( h, val, func, bAllowInsert ); + } + + bool unlink_at( aux_node_type * pHead, value_type& val ) + { + assert( pHead != nullptr ); + bucket_head_type h(pHead); + return base_class::unlink_at( h, val ); + } + + template + bool erase_at( aux_node_type * pHead, split_list::details::search_value_type const& val, Compare cmp, Func f ) + { + assert( pHead != nullptr ); + bucket_head_type h(pHead); + return base_class::erase_at( h, val, cmp, f ); + } + + template + bool erase_at( aux_node_type * pHead, split_list::details::search_value_type const& val, Compare cmp ) + { + assert( pHead != nullptr ); + bucket_head_type h(pHead); + return base_class::erase_at( h, val, cmp ); + } + + template + value_type * extract_at( aux_node_type * pHead, split_list::details::search_value_type& val, Compare cmp ) + { + assert( pHead != nullptr ); + bucket_head_type h(pHead); + return base_class::extract_at( h, val, cmp ); + } + + template + bool find_at( aux_node_type * pHead, split_list::details::search_value_type& val, Compare cmp, Func f ) + { + assert( pHead != nullptr ); + bucket_head_type h(pHead); + return base_class::find_at( h, val, cmp, f ); + } + + template + bool find_at( aux_node_type * pHead, split_list::details::search_value_type const & val, Compare cmp ) + { + assert( pHead != nullptr ); + bucket_head_type h(pHead); + return base_class::find_at( h, val, cmp ); + } + + template + raw_ptr get_at( aux_node_type * pHead, split_list::details::search_value_type& val, Compare cmp ) + { + assert( pHead != nullptr ); + bucket_head_type h(pHead); + return base_class::get_at( h, val, cmp ); + } + + bool insert_aux_node( aux_node_type * pNode ) + { + return base_class::insert_aux_node( pNode ); + } + bool insert_aux_node( aux_node_type * pHead, aux_node_type * pNode ) + { + bucket_head_type h(pHead); + return base_class::insert_aux_node( h, pNode ); + } + }; + + template + struct less_wrapper: public cds::opt::details::make_comparator_from_less + { + typedef cds::opt::details::make_comparator_from_less base_wrapper; + + template + int operator()( split_list::details::search_value_type const& v1, Q2 const& v2 ) const + { + return base_wrapper::operator()( v1.val, v2 ); + } + + template + int operator()( Q1 const& v1, split_list::details::search_value_type const& v2 ) const + { + return base_wrapper::operator()( v1, v2.val ); + } + }; + //@endcond + + public: + /// Initialize split-ordered list of default capacity + /** + The default capacity is defined in bucket table constructor. + See split_list::expandable_bucket_table, split_list::static_ducket_table + which selects by split_list::dynamic_bucket_table option. + */ + SplitListSet() + : m_nBucketCountLog2(1) + , m_nMaxItemCount( max_item_count(2, m_Buckets.load_factor())) + { + init(); + } + + /// Initialize split-ordered list + SplitListSet( + size_t nItemCount ///< estimate average of item count + , size_t nLoadFactor = 1 ///< load factor - average item count per bucket. Small integer up to 8, default is 1. + ) + : m_Buckets( nItemCount, nLoadFactor ) + , m_nBucketCountLog2(1) + , m_nMaxItemCount( max_item_count(2, m_Buckets.load_factor())) + { + init(); + } + + /// Destroys split-list + ~SplitListSet() + { + m_List.clear(); + gc::force_dispose(); + } + + public: + /// Inserts new node + /** + The function inserts \p val in the set if it does not contain + an item with key equal to \p val. + + The function makes RCU lock internally. + + Returns \p true if \p val is placed into the set, \p false otherwise. + */ + bool insert( value_type& val ) + { + size_t nHash = hash_value( val ); + aux_node_type * pHead = get_bucket( nHash ); + assert( pHead != nullptr ); + + node_traits::to_node_ptr( val )->m_nHash = split_list::regular_hash( nHash ); + + if ( m_List.insert_at( pHead, val )) { + inc_item_count(); + m_Stat.onInsertSuccess(); + return true; + } + m_Stat.onInsertFailed(); + return false; + } + + /// Inserts new node + /** + This function is intended for derived non-intrusive containers. + + The function allows to split creating of new item into two part: + - create item with key only + - insert new item into the set + - if inserting is success, calls \p f functor to initialize value-field of \p val. + + The functor signature is: + \code + void func( value_type& val ); + \endcode + where \p val is the item inserted. + + The function makes RCU lock internally. + + @warning For \ref cds_intrusive_MichaelList_rcu "MichaelList" as the bucket see \ref cds_intrusive_item_creating "insert item troubleshooting". + \ref cds_intrusive_LazyList_rcu "LazyList" provides exclusive access to inserted item and does not require any node-level + synchronization. + */ + template + bool insert( value_type& val, Func f ) + { + size_t nHash = hash_value( val ); + aux_node_type * pHead = get_bucket( nHash ); + assert( pHead != nullptr ); + + node_traits::to_node_ptr( val )->m_nHash = split_list::regular_hash( nHash ); + + if ( m_List.insert_at( pHead, val, f )) { + inc_item_count(); + m_Stat.onInsertSuccess(); + return true; + } + m_Stat.onInsertFailed(); + return false; + } + + /// Updates the node + /** + The operation performs inserting or changing data with lock-free manner. + + If the item \p val is not found in the set, then \p val is inserted + iff \p bAllowInsert is \p true. + Otherwise, the functor \p func is called with item found. + The functor signature is: + \code + void func( bool bNew, value_type& item, value_type& val ); + \endcode + with arguments: + - \p bNew - \p true if the item has been inserted, \p false otherwise + - \p item - item of the set + - \p val - argument \p val passed into the \p update() function + If new item has been inserted (i.e. \p bNew is \p true) then \p item and \p val arguments + refers to the same stuff. + + The functor may change non-key fields of the \p item. + + The function applies RCU lock internally. + + Returns std::pair where \p first is \p true if operation is successful, + \p second is \p true if new item has been added or \p false if the item with \p key + already is in the list. + + @warning For \ref cds_intrusive_MichaelList_rcu "MichaelList" as the bucket see \ref cds_intrusive_item_creating "insert item troubleshooting". + \ref cds_intrusive_LazyList_rcu "LazyList" provides exclusive access to inserted item and does not require any node-level + synchronization. + */ + template + std::pair update( value_type& val, Func func, bool bAllowInsert = true ) + { + size_t nHash = hash_value( val ); + aux_node_type * pHead = get_bucket( nHash ); + assert( pHead != nullptr ); + + node_traits::to_node_ptr( val )->m_nHash = split_list::regular_hash( nHash ); + + std::pair bRet = m_List.update_at( pHead, val, func, bAllowInsert ); + if ( bRet.first && bRet.second ) { + inc_item_count(); + m_Stat.onUpdateNew(); + } + else + m_Stat.onUpdateExist(); + return bRet; + } + //@cond + template + CDS_DEPRECATED("ensure() is deprecated, use update()") + std::pair ensure( value_type& val, Func func ) + { + return update( val, func, true ); + } + //@endcond + + /// Unlinks the item \p val from the set + /** + The function searches the item \p val in the set and unlinks it from the set + if it is found and is equal to \p val. + + Difference between \ref erase and \p unlink functions: \p erase finds a key + and deletes the item found. \p unlink finds an item by key and deletes it + only if \p val is an item of that set, i.e. the pointer to item found + is equal to &val . + + RCU \p synchronize method can be called, therefore, RCU should not be locked. + + The function returns \p true if success and \p false otherwise. + */ + bool unlink( value_type& val ) + { + size_t nHash = hash_value( val ); + aux_node_type * pHead = get_bucket( nHash ); + assert( pHead != nullptr ); + + if ( m_List.unlink_at( pHead, val )) { + --m_ItemCounter; + m_Stat.onEraseSuccess(); + return true; + } + m_Stat.onEraseFailed(); + return false; + } + + /// Deletes the item from the set + /** \anchor cds_intrusive_SplitListSet_rcu_erase + The function searches an item with key equal to \p key in the set, + unlinks it from the set, and returns \p true. + If the item with key equal to \p key is not found the function return \p false. + + Difference between \ref erase and \p unlink functions: \p erase finds a key + and deletes the item found. \p unlink finds an item by key and deletes it + only if \p key is an item of that set, i.e. the pointer to item found + is equal to &key . + + RCU \p synchronize method can be called, therefore, RCU should not be locked. + + Note the hash functor should accept a parameter of type \p Q that can be not the same as \p value_type. + */ + template + bool erase( Q const& key ) + { + return erase_( key, key_comparator()); + } + + /// Deletes the item from the set using \p pred for searching + /** + The function is an analog of \ref cds_intrusive_SplitListSet_rcu_erase "erase(Q const&)" + but \p cmp is used for key compare. + \p Less has the interface like \p std::less. + \p pred must imply the same element order as the comparator used for building the set. + */ + template + bool erase_with( Q const& key, Less pred ) + { + CDS_UNUSED( pred ); + return erase_( key, typename ordered_list_adapter::template make_compare_from_less()); + } + + /// Deletes the item from the set + /** \anchor cds_intrusive_SplitListSet_rcu_erase_func + The function searches an item with key equal to \p key in the set, + call \p f functor with item found, unlinks it from the set, and returns \p true. + The \ref disposer specified by \p OrderedList class template parameter is called + by garbage collector \p GC asynchronously. + + The \p Func interface is + \code + struct functor { + void operator()( value_type const& item ); + }; + \endcode + + If the item with key equal to \p key is not found the function return \p false. + + RCU \p synchronize method can be called, therefore, RCU should not be locked. + + Note the hash functor should accept a parameter of type \p Q that can be not the same as \p value_type. + */ + template + bool erase( Q const& key, Func f ) + { + return erase_( key, key_comparator(), f ); + } + + /// Deletes the item from the set using \p pred for searching + /** + The function is an analog of \ref cds_intrusive_SplitListSet_rcu_erase_func "erase(Q const&, Func)" + but \p cmp is used for key compare. + \p Less has the interface like \p std::less. + \p pred must imply the same element order as the comparator used for building the set. + */ + template + bool erase_with( Q const& key, Less pred, Func f ) + { + CDS_UNUSED( pred ); + return erase_( key, typename ordered_list_adapter::template make_compare_from_less(), f ); + } + + /// Extracts an item from the set + /** \anchor cds_intrusive_SplitListSet_rcu_extract + The function searches an item with key equal to \p key in the set, + unlinks it, and returns \ref cds::urcu::exempt_ptr "exempt_ptr" pointer to the item found. + If the item with the key equal to \p key is not found the function returns an empty \p exempt_ptr. + + Depends on \p bucket_type you should or should not lock RCU before calling of this function: + - for the set based on \ref cds_intrusive_MichaelList_rcu "MichaelList" RCU should not be locked + - for the set based on \ref cds_intrusive_LazyList_rcu "LazyList" RCU should be locked + See ordered list implementation for details. + + \code + typedef cds::urcu::gc< general_buffered<> > rcu; + typedef cds::intrusive::MichaelList< rcu, Foo > rcu_michael_list; + typedef cds::intrusive::SplitListSet< rcu, rcu_michael_list, foo_traits > rcu_splitlist_set; + + rcu_splitlist_set theSet; + // ... + + rcu_splitlist_set::exempt_ptr p; + + // For MichaelList we should not lock RCU + + // Now, you can apply extract function + // Note that you must not delete the item found inside the RCU lock + p = theList.extract( 10 ); + if ( p ) { + // do something with p + ... + } + + // We may safely release p here + // release() passes the pointer to RCU reclamation cycle: + // it invokes RCU retire_ptr function with the disposer you provided for rcu_michael_list. + p.release(); + \endcode + */ + template + exempt_ptr extract( Q const& key ) + { + return exempt_ptr(extract_( key, key_comparator())); + } + + /// Extracts an item from the set using \p pred for searching + /** + The function is an analog of \p extract(Q const&) but \p pred is used for key compare. + \p Less functor has the interface like \p std::less. + \p pred must imply the same element order as the comparator used for building the set. + */ + template + exempt_ptr extract_with( Q const& key, Less pred ) + { + return exempt_ptr( extract_with_( key, pred )); + } + + /// Finds the key \p key + /** \anchor cds_intrusive_SplitListSet_rcu_find_func + The function searches the item with key equal to \p key and calls the functor \p f for item found. + The interface of \p Func functor is: + \code + struct functor { + void operator()( value_type& item, Q& key ); + }; + \endcode + where \p item is the item found, \p key is the find function argument. + + The functor can change non-key fields of \p item. Note that the functor is only guarantee + that \p item cannot be disposed during functor is executing. + The functor does not serialize simultaneous access to the set \p item. If such access is + possible you must provide your own synchronization schema on item level to exclude unsafe item modifications. + + The \p key argument is non-const since it can be used as \p f functor destination i.e., the functor + can modify both arguments. + + Note the hash functor specified for class \p Traits template parameter + should accept a parameter of type \p Q that can be not the same as \p value_type. + + The function applies RCU lock internally. + + The function returns \p true if \p key is found, \p false otherwise. + */ + template + bool find( Q& key, Func f ) + { + return find_( key, key_comparator(), f ); + } + //@cond + template + bool find( Q const& key, Func f ) + { + return find_( key, key_comparator(), f ); + } + //@endcond + + /// Finds the key \p key with \p pred predicate for comparing + /** + The function is an analog of \ref cds_intrusive_SplitListSet_rcu_find_func "find(Q&, Func)" + but \p cmp is used for key compare. + \p Less has the interface like \p std::less. + \p cmp must imply the same element order as the comparator used for building the set. + */ + template + bool find_with( Q& key, Less pred, Func f ) + { + CDS_UNUSED( pred ); + return find_( key, typename ordered_list_adapter::template make_compare_from_less(), f ); + } + //@cond + template + bool find_with( Q const& key, Less pred, Func f ) + { + CDS_UNUSED( pred ); + return find_( key, typename ordered_list_adapter::template make_compare_from_less(), f ); + } + //@endcond + + /// Checks whether the set contains \p key + /** + The function searches the item with key equal to \p key + and returns \p true if it is found, and \p false otherwise. + + Note the hash functor specified for class \p Traits template parameter + should accept a parameter of type \p Q that can be not the same as \p value_type. + Otherwise, you may use \p contains( Q const&, Less pred ) functions with explicit predicate for key comparing. + */ + template + bool contains( Q const& key ) + { + return find_value( key, key_comparator()); + } + //@cond + template + CDS_DEPRECATED("deprecated, use contains()") + bool find( Q const& key ) + { + return contains( key ); + } + //@endcond + + /// Checks whether the set contains \p key using \p pred predicate for searching + /** + The function is an analog of contains( key ) but \p pred is used for key comparing. + \p Less functor has the interface like \p std::less. + \p Less must imply the same element order as the comparator used for building the list. + */ + template + bool contains( Q const& key, Less pred ) + { + CDS_UNUSED( pred ); + return find_value( key, typename ordered_list_adapter::template make_compare_from_less()); + } + //@cond + template + CDS_DEPRECATED("deprecated, use contains()") + bool find_with( Q const& key, Less pred ) + { + return contains( key, pred ); + } + //@endcond + + /// Finds the key \p key and return the item found + /** \anchor cds_intrusive_SplitListSet_rcu_get + The function searches the item with key equal to \p key and returns the pointer to item found. + If \p key is not found it returns \p nullptr. + + Note the compare functor should accept a parameter of type \p Q that can be not the same as \p value_type. + + RCU should be locked before call of this function. + Returned item is valid only while RCU is locked: + \code + typedef cds::intrusive::SplitListSet< your_template_parameters > set_class; + set_class theSet; + // ... + typename set_class::raw_ptr rp; + { + // Lock RCU + hash_set::rcu_lock lock; + + rp = theSet.get( 5 ); + if ( rp ) { + // Deal with rp + //... + } + // Unlock RCU by rcu_lock destructor + // rp can be retired by disposer at any time after RCU has been unlocked + } + \endcode + */ + template + raw_ptr get( Q const& key ) + { + return get_( key, key_comparator()); + } + + /// Finds the key \p key and return the item found + /** + The function is an analog of \ref cds_intrusive_SplitListSet_rcu_get "get(Q const&)" + but \p pred is used for comparing the keys. + + \p Less functor has the semantics like \p std::less but should take arguments of type \ref value_type and \p Q + in any order. + \p pred must imply the same element order as the comparator used for building the set. + */ + template + raw_ptr get_with( Q const& key, Less pred ) + { + CDS_UNUSED( pred ); + return get_( key, typename ordered_list_adapter::template make_compare_from_less()); + } + + + /// Returns item count in the set + size_t size() const + { + return m_ItemCounter; + } + + /// Checks if the set is empty + /** + Emptiness is checked by item counting: if item count is zero then the set is empty. + Thus, the correct item counting feature is an important part of split-list set implementation. + */ + bool empty() const + { + return size() == 0; + } + + /// Clears the set (not atomic) + void clear() + { + iterator it = begin(); + while ( it != end()) { + iterator i(it); + ++i; + unlink( *it ); + it = i; + } + } + + /// Returns internal statistics + stat const& statistics() const + { + return m_Stat; + } + + /// Returns internal statistics for \p OrderedList + typename OrderedList::stat const& list_statistics() const + { + return m_List.statistics(); + } + + protected: + //@cond + template + class iterator_type + :public split_list::details::iterator_type + { + typedef split_list::details::iterator_type iterator_base_class; + typedef typename iterator_base_class::list_iterator list_iterator; + public: + iterator_type() + : iterator_base_class() + {} + + iterator_type( iterator_type const& src ) + : iterator_base_class( src ) + {} + + // This ctor should be protected... + iterator_type( list_iterator itCur, list_iterator itEnd ) + : iterator_base_class( itCur, itEnd ) + {} + }; + //@endcond + + public: + ///@name Forward iterators (thread-safe under RCU lock) + //@{ + /// Forward iterator + /** + The forward iterator for a split-list has some features: + - it has no post-increment operator + - it depends on iterator of underlying \p OrderedList + + You may safely use iterators in multi-threaded environment only under RCU lock. + Otherwise, a crash is possible if another thread deletes the element the iterator points to. + */ + typedef iterator_type iterator; + + /// Const forward iterator + /** + For iterator's features and requirements see \ref iterator + */ + typedef iterator_type const_iterator; + + /// Returns a forward iterator addressing the first element in a split-list + /** + For empty list \code begin() == end() \endcode + */ + iterator begin() + { + return iterator( m_List.begin(), m_List.end()); + } + + /// Returns an iterator that addresses the location succeeding the last element in a split-list + /** + Do not use the value returned by end function to access any item. + + The returned value can be used only to control reaching the end of the split-list. + For empty list \code begin() == end() \endcode + */ + iterator end() + { + return iterator( m_List.end(), m_List.end()); + } + + /// Returns a forward const iterator addressing the first element in a split-list + const_iterator begin() const + { + return cbegin(); + } + /// Returns a forward const iterator addressing the first element in a split-list + const_iterator cbegin() const + { + return const_iterator( m_List.cbegin(), m_List.cend()); + } + + /// Returns an const iterator that addresses the location succeeding the last element in a split-list + const_iterator end() const + { + return cend(); + } + /// Returns an const iterator that addresses the location succeeding the last element in a split-list + const_iterator cend() const + { + return const_iterator( m_List.cend(), m_List.cend()); + } + //@} + + protected: + //@cond + aux_node_type * alloc_aux_node( size_t nHash ) + { + m_Stat.onHeadNodeAllocated(); + aux_node_type* p = m_Buckets.alloc_aux_node(); + if ( p ) + p->m_nHash = nHash; + return p; + } + + void free_aux_node( aux_node_type * p ) + { + m_Buckets.free_aux_node( p ); + m_Stat.onHeadNodeFreed(); + } + + /// Calculates hash value of \p key + template + size_t hash_value( Q const& key ) const + { + return m_HashFunctor( key ); + } + + size_t bucket_no( size_t nHash ) const + { + return nHash & ( (1 << m_nBucketCountLog2.load(memory_model::memory_order_relaxed)) - 1 ); + } + + static size_t parent_bucket( size_t nBucket ) + { + assert( nBucket > 0 ); + return nBucket & ~( 1 << bitop::MSBnz( nBucket )); + } + + aux_node_type * init_bucket( size_t const nBucket ) + { + assert( nBucket > 0 ); + size_t nParent = parent_bucket( nBucket ); + + aux_node_type * pParentBucket = m_Buckets.bucket( nParent ); + if ( pParentBucket == nullptr ) { + pParentBucket = init_bucket( nParent ); + m_Stat.onRecursiveInitBucket(); + } + + assert( pParentBucket != nullptr ); + + // Allocate an aux node for new bucket + aux_node_type * pBucket = m_Buckets.bucket( nBucket ); + + back_off bkoff; + for ( ;; pBucket = m_Buckets.bucket( nBucket )) { + if ( pBucket ) + return pBucket; + + pBucket = alloc_aux_node( split_list::dummy_hash( nBucket )); + if ( pBucket ) { + if ( m_List.insert_aux_node( pParentBucket, pBucket )) { + m_Buckets.bucket( nBucket, pBucket ); + m_Stat.onNewBucket(); + return pBucket; + } + + // Another thread set the bucket. Wait while it done + free_aux_node( pBucket ); + m_Stat.onBucketInitContenton(); + break; + } + + // There are no free buckets. It means that the bucket table is full + // Wait while another thread set the bucket or a free bucket will be available + m_Stat.onBucketsExhausted(); + bkoff(); + } + + // Another thread set the bucket. Wait while it done + for ( pBucket = m_Buckets.bucket( nBucket ); pBucket == nullptr; pBucket = m_Buckets.bucket( nBucket )) { + bkoff(); + m_Stat.onBusyWaitBucketInit(); + } + + return pBucket; + } + + aux_node_type * get_bucket( size_t nHash ) + { + size_t nBucket = bucket_no( nHash ); + + aux_node_type * pHead = m_Buckets.bucket( nBucket ); + if ( pHead == nullptr ) + pHead = init_bucket( nBucket ); + + assert( pHead->is_dummy()); + + return pHead; + } + + void init() + { + // Initialize bucket 0 + aux_node_type * pNode = alloc_aux_node( 0 /*split_list::dummy_hash(0)*/ ); + + // insert_aux_node cannot return false for empty list + CDS_VERIFY( m_List.insert_aux_node( pNode )); + + m_Buckets.bucket( 0, pNode ); + } + + static size_t max_item_count( size_t nBucketCount, size_t nLoadFactor ) + { + return nBucketCount * nLoadFactor; + } + + void inc_item_count() + { + size_t nMaxCount = m_nMaxItemCount.load(memory_model::memory_order_relaxed); + if ( ++m_ItemCounter <= nMaxCount ) + return; + + size_t sz = m_nBucketCountLog2.load(memory_model::memory_order_relaxed); + const size_t nBucketCount = static_cast(1) << sz; + if ( nBucketCount < m_Buckets.capacity()) { + // we may grow the bucket table + const size_t nLoadFactor = m_Buckets.load_factor(); + if ( nMaxCount < max_item_count( nBucketCount, nLoadFactor )) + return; // someone already have updated m_nBucketCountLog2, so stop here + + m_nMaxItemCount.compare_exchange_strong( nMaxCount, max_item_count( nBucketCount << 1, nLoadFactor ), + memory_model::memory_order_relaxed, atomics::memory_order_relaxed ); + m_nBucketCountLog2.compare_exchange_strong( sz, sz + 1, memory_model::memory_order_relaxed, atomics::memory_order_relaxed ); + } + else + m_nMaxItemCount.store( std::numeric_limits::max(), memory_model::memory_order_relaxed ); + } + + template + bool find_( Q& val, Compare cmp, Func f ) + { + size_t nHash = hash_value( val ); + split_list::details::search_value_type sv( val, split_list::regular_hash( nHash )); + aux_node_type * pHead = get_bucket( nHash ); + assert( pHead != nullptr ); + + return m_Stat.onFind( m_List.find_at( pHead, sv, cmp, + [&f](value_type& item, split_list::details::search_value_type& v){ f(item, v.val ); })); + } + + template + bool find_value( Q const& val, Compare cmp ) + { + size_t nHash = hash_value( val ); + split_list::details::search_value_type sv( val, split_list::regular_hash( nHash )); + aux_node_type * pHead = get_bucket( nHash ); + assert( pHead != nullptr ); + + return m_Stat.onFind( m_List.find_at( pHead, sv, cmp )); + } + + template + raw_ptr get_( Q const& val, Compare cmp ) + { + size_t nHash = hash_value( val ); + split_list::details::search_value_type sv( val, split_list::regular_hash( nHash )); + aux_node_type * pHead = get_bucket( nHash ); + assert( pHead != nullptr ); + + raw_ptr p = m_List.get_at( pHead, sv, cmp ); + m_Stat.onFind( !!p ); + return p; + } + + template + value_type * extract_( Q const& val, Compare cmp ) + { + size_t nHash = hash_value( val ); + split_list::details::search_value_type sv( val, split_list::regular_hash( nHash )); + aux_node_type * pHead = get_bucket( nHash ); + assert( pHead != nullptr ); + + value_type * pNode = m_List.extract_at( pHead, sv, cmp ); + if ( pNode ) { + --m_ItemCounter; + m_Stat.onExtractSuccess(); + } + else + m_Stat.onExtractFailed(); + return pNode; + } + + template + value_type * extract_with_( Q const& val, Less pred ) + { + CDS_UNUSED( pred ); + return extract_( val, typename ordered_list_adapter::template make_compare_from_less()); + } + + template + bool erase_( const Q& val, Compare cmp ) + { + size_t nHash = hash_value( val ); + split_list::details::search_value_type sv( val, split_list::regular_hash( nHash )); + aux_node_type * pHead = get_bucket( nHash ); + assert( pHead != nullptr ); + + if ( m_List.erase_at( pHead, sv, cmp )) { + --m_ItemCounter; + m_Stat.onEraseSuccess(); + return true; + } + m_Stat.onEraseFailed(); + return false; + } + + template + bool erase_( Q const& val, Compare cmp, Func f ) + { + size_t nHash = hash_value( val ); + split_list::details::search_value_type sv( val, split_list::regular_hash( nHash )); + aux_node_type * pHead = get_bucket( nHash ); + assert( pHead != nullptr ); + + if ( m_List.erase_at( pHead, sv, cmp, f )) { + --m_ItemCounter; + m_Stat.onEraseSuccess(); + return true; + } + m_Stat.onEraseFailed(); + return false; + } + //@endcond + + protected: + //@cond + static unsigned const c_padding = cds::opt::actual_padding< traits::padding >::value; + + typedef typename cds::details::type_padding< bucket_table, c_padding >::type padded_bucket_table; + padded_bucket_table m_Buckets; ///< bucket table + + typedef typename cds::details::type_padding< ordered_list_wrapper, c_padding >::type padded_ordered_list; + padded_ordered_list m_List; ///< Ordered list containing split-list items + + atomics::atomic m_nBucketCountLog2; ///< log2( current bucket count ) + atomics::atomic m_nMaxItemCount; ///< number of items container can hold, before we have to resize + hash m_HashFunctor; ///< Hash functor + item_counter m_ItemCounter; ///< Item counter + stat m_Stat; ///< Internal statistics accumulator + //@endcond + }; + +}} // namespace cds::intrusive + +#endif // #ifndef CDSLIB_INTRUSIVE_SPLIT_LIST_RCU_H diff --git a/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/intrusive/striped_set.h b/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/intrusive/striped_set.h new file mode 100644 index 0000000..d47947c --- /dev/null +++ b/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/intrusive/striped_set.h @@ -0,0 +1,909 @@ +/* + This file is a part of libcds - Concurrent Data Structures library + + (C) Copyright Maxim Khizhinsky (libcds.dev@gmail.com) 2006-2017 + + Source code repo: http://github.com/khizmax/libcds/ + Download: http://sourceforge.net/projects/libcds/files/ + + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. + + * Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ + +#ifndef CDSLIB_INTRUSIVE_STRIPED_SET_H +#define CDSLIB_INTRUSIVE_STRIPED_SET_H + +#include +#include +#include + +namespace cds { namespace intrusive { + /// StripedSet related definitions + namespace striped_set { + + /** @defgroup cds_striped_resizing_policy Resizing policy for striped/refinable set/map + + Resizing policy for \p intrusive::StripedSet, \p container::StripedSet and \p container::StripedMap. + */ + + } // namespace striped_set + + /// Striped hash set + /** @ingroup cds_intrusive_map + + Source + - [2008] Maurice Herlihy, Nir Shavit "The Art of Multiprocessor Programming" + + Lock striping is very simple technique. + The set consists of the bucket table and the array of locks. + Initially, the capacity of lock array and bucket table is the same. + When set is resized, bucket table capacity will be doubled but lock array will not. + The lock \p i protects each bucket \p j, where j = i mod L , + where \p L - the size of lock array. + + Template arguments: + - \p Container - the container class that is used as bucket table entry. The \p Container class should support + an uniform interface described below. + - \p Options - options + + The \p %StripedSet class does not exactly dictate the type of container that should be used as a \p Container bucket. + Instead, the class supports different intrusive container type for the bucket, for exampe, + \p boost::intrusive::list, \p boost::intrusive::set and others. + + Remember that \p %StripedSet class algorithm ensures sequential blocking access to its bucket through the mutex type you specify + among \p Options template arguments. + + The \p Options are: + - \p opt::mutex_policy - concurrent access policy. + Available policies: \p striped_set::striping, \p striped_set::refinable. + Default is \p %striped_set::striping. + - \p cds::opt::hash - hash functor. Default option value see opt::v::hash_selector + which selects default hash functor for your compiler. + - \p cds::opt::compare - key comparison functor. No default functor is provided. + If the option is not specified, the \p opt::less is used. + - \p cds::opt::less - specifies binary predicate used for key comparison. Default is \p std::less. + - \p cds::opt::item_counter - item counter type. Default is \p atomicity::item_counter since some operation on the counter is performed + without locks. Note that item counting is an essential part of the set algorithm, so dummy counter like \p atomicity::empty_item_counter + is not suitable. + - \p cds::opt::allocator - the allocator type using for memory allocation of bucket table and lock array. Default is \ref CDS_DEFAULT_ALLOCATOR. + - \p cds::opt::resizing_policy - the resizing policy - a functor that decides when to resize the hash set. + Default option value depends on bucket container type: + for sequential containers like \p boost::intrusive::list the resizing policy is cds::container::striped_set::load_factor_resizing<4> ; + for other type of containers like \p boost::intrusive::set the resizing policy is cds::container::striped_set::no_resizing. + See \ref cds_striped_resizing_policy "available resizing policy". + Note that the choose of resizing policy depends of \p Container type: + for sequential containers like \p boost::intrusive::list the right policy can significantly improve performance. + For other, non-sequential types of \p Container (like a \p boost::intrusive::set) the resizing policy is not so important. + - \p cds::opt::buffer - an initialized buffer type used only for \p boost::intrusive::unordered_set. + Default is cds::opt::v::initialized_static_buffer< cds::any_type, 256 > . + + \p opt::compare or \p opt::less options are used in some \p Container class for ordering. + \p %opt::compare option has the highest priority: if \p %opt::compare is specified, \p %opt::less is not used. + + You can pass other option that would be passed to \p adapt metafunction, see below. + + Internal details + + The \p %StripedSet class cannot utilize the \p Container specified directly, but only its adapted variant which + supports an unified interface. Internally, the adaptation is made via \p intrusive::striped_set::adapt metafunction that wraps bucket container + and provides the unified bucket interface suitable for \p %StripedSet. Such adaptation is completely transparent for you - + you don't need to call \p adapt metafunction directly, \p %StripedSet class's internal machinery itself invokes appropriate + \p adapt metafunction specialization to adjust your \p Container container class to \p %StripedSet bucket's internal interface. + All you need is to include a right header before striped_set.h. + + By default, intrusive::striped_set::adapt metafunction does not make any wrapping to \p AnyContainer, + so, the result intrusive::striped_set::adapt::type is the same as \p AnyContainer. + However, there are a lot of specializations of \p %intrusive::striped_set::adapt for \p boost::intrusive containers, see table below. + Any of this specialization wraps corresponding container making it suitable for the set's bucket. + Remember, you should include the proper header file for \p adapt before including striped_set.h. + + \note It is important to specify boost::intrusive::constant_time_size option + for all \p boost::intrusive container that supports this option. Fast item counting feature is essential part of + \p %StripedSet resizing algorithm. + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Container.h-file for \p adaptExampleNotes
\p boost::intrusive::list\code + #include + #include + typedef cds::intrusive::StripedSet< + boost::intrusive::list >, + cds::opt::less< std::less > + > striped_set; + \endcode + + The list is ordered. + Template argument pack \p Options must contain cds::opt::less or cds::opt::compare for type \p T stored in the list +
\p boost::intrusive::slist\code + #include + #include + typedef cds::intrusive::StripedSet< + boost::intrusive::slist >, + cds::opt::less< std::less > + > striped_set; + \endcode + + The list is ordered. + Template argument pack \p Options must contain \p cds::opt::less or \p cds::opt::compare for type \p T stored in the list +
\p boost::intrusive::set\code + #include + #include + typedef cds::intrusive::StripedSet< + boost::intrusive::set > + > striped_set; + \endcode + + Note that \p boost::intrusive::compare option using in \p boost::intrusive::set + should support \p T type stored in the set and any type \p Q that you can use + in \p erase() and \p find() member functions. +
\p boost::intrusive::unordered_set\code + #include + #include + typedef cds::intrusive::StripedSet< + boost::intrusive::unordered_set + ,boost::intrusive::hash< user_provided_hash_functor > + > + > striped_set; + \endcode + + You should provide two different hash function \p h1 and \p h2 - one for \p boost::intrusive::unordered_set + and other for \p %StripedSet. For the best result, \p h1 and \p h2 must be orthogonal i.e. h1(X) != h2(X) for any value \p X + + The option \p opt::buffer is used for \p boost::intrusive::bucket_traits. + Default is cds::opt::v::initialized_static_buffer< cds::any_type, 256 > . + The resizing policy should correlate with the buffer capacity. + The default resizing policy is cds::container::striped_set::load_factor_resizing<256> what gives load factor 1 for + default bucket buffer that is the best for \p boost::intrusive::unordered_set. +
\p boost::intrusive::avl_set\code + #include + #include + typedef cds::intrusive::StripedSet< + boost::intrusive::avl_set > + > striped_set; + \endcode + + Note that \p boost::intrusive::compare option using in \p boost::intrusive::avl_set + should support \p T type stored in the set and any type \p Q that you can use + in \p erase() and \p find() member functions. +
\p boost::intrusive::sg_set\code + #include + #include + typedef cds::intrusive::StripedSet< + boost::intrusive::sg_set > + > striped_set; + \endcode + + Note that \p boost::intrusive::compare option using in \p boost::intrusive::sg_set + should support \p T type stored in the set and any type \p Q that you can use + in \p erase() and \p find() member functions. +
\p boost::intrusive::splay_set\code + #include + #include + typedef cds::intrusive::StripedSet< + boost::intrusive::splay_set > + > striped_set; + \endcode + + Note that \p boost::intrusive::compare option using in \p boost::intrusive::splay_set + should support \p T type stored in the set and any type \p Q that you can use + in \p erase() and \p find() member functions. +
\p boost::intrusive::treap_set\code + #include + #include + typedef cds::intrusive::StripedSet< + boost::intrusive::treap_set > + > striped_set; + \endcode + + Note that \p boost::intrusive::compare option using in \p boost::intrusive::treap_set + should support \p T type stored in the set and any type \p Q that you can use + in \p erase() and \p find() member functions. +
+ + You can use another intrusive container type as striped set's bucket. + Suppose, you have a container class \p MyBestContainer and you want to integrate it with \p StripedSet as bucket type. + There are two possibility: + - either your \p MyBestContainer class has native support of bucket's interface; + in this case, you can use default \p intrusive::striped_set::adapt metafunction; + - or your \p MyBestContainer class does not support bucket's interface, which means, that you should create a specialization of + cds::intrusive::striped_set::adapt metafunction providing necessary interface. + + The intrusive::striped_set::adapt< Container, OptionPack > metafunction has two template argument: + - \p Container is the class that should be used as the bucket, for example, boost::intrusive::list< T >. + - \p OptionPack is the packed options from \p %StripedSet declaration. The \p adapt metafunction can use + any option from \p OptionPack for its internal use. For example, a \p compare option can be passed to \p adapt + metafunction via \p OptionPack argument of \p %StripedSet declaration. + + See \p intrusive::striped_set::adapt metafunction for the description of interface that the bucket container must provide + to be \p %StripedSet compatible. + */ + template + class StripedSet + { + public: + //@cond + struct default_options { + typedef striped_set::striping<> mutex_policy; + typedef typename cds::opt::v::hash_selector< cds::opt::none >::type hash; + typedef cds::atomicity::item_counter item_counter; + typedef CDS_DEFAULT_ALLOCATOR allocator; + typedef cds::opt::none resizing_policy; + typedef cds::opt::none compare; + typedef cds::opt::none less; + }; + + typedef typename cds::opt::make_options< + typename cds::opt::find_type_traits< default_options, Options... >::type + ,Options... + >::type options; + //@endcond + + typedef Container underlying_container_type ; ///< original intrusive container type for the bucket + typedef typename cds::intrusive::striped_set::adapt< underlying_container_type, Options... >::type bucket_type ; ///< container type adapted for hash set + typedef typename bucket_type::value_type value_type ; ///< value type stored in the set + + typedef typename options::hash hash ; ///< Hash functor + typedef typename options::item_counter item_counter ; ///< Item counter + typedef typename cds::opt::select_default< + typename options::resizing_policy, + typename bucket_type::default_resizing_policy + >::type resizing_policy ; ///< Resizing policy + typedef typename options::allocator allocator_type ; ///< allocator type specified in options. + typedef typename options::mutex_policy mutex_policy ; ///< Mutex policy + + typedef cds::details::Allocator< bucket_type, allocator_type > bucket_allocator; ///< bucket allocator type based on allocator_type + + protected: + bucket_type * m_Buckets; ///< Bucket table + atomics::atomic m_nBucketMask; ///< Bucket table size - 1. m_nBucketMask + 1 should be power of two. + item_counter m_ItemCounter; ///< Item counter + hash m_Hash; ///< Hash functor + + mutex_policy m_MutexPolicy ; ///< Mutex policy + resizing_policy m_ResizingPolicy; ///< Resizing policy + + static const size_t c_nMinimalCapacity = 16 ; ///< Minimal capacity + + protected: + //@cond + typedef typename mutex_policy::scoped_cell_lock scoped_cell_lock; + typedef typename mutex_policy::scoped_full_lock scoped_full_lock; + typedef typename mutex_policy::scoped_resize_lock scoped_resize_lock; + //@endcond + + protected: + //@cond + static size_t calc_init_capacity( size_t nCapacity ) + { + nCapacity = cds::beans::ceil2( nCapacity ); + return nCapacity < c_nMinimalCapacity ? c_nMinimalCapacity : nCapacity; + } + + void alloc_bucket_table( size_t nSize ) + { + assert( cds::beans::is_power2( nSize )); + m_nBucketMask.store( nSize - 1, atomics::memory_order_release ); + m_Buckets = bucket_allocator().NewArray( nSize ); + } + + static void free_bucket_table( bucket_type * pBuckets, size_t nSize ) + { + bucket_allocator().Delete( pBuckets, nSize ); + } + + template + size_t hashing( Q const& v ) const + { + return m_Hash( v ); + } + + bucket_type * bucket( size_t nHash ) const noexcept + { + return m_Buckets + (nHash & m_nBucketMask.load( atomics::memory_order_relaxed )); + } + + template + bool find_( Q& val, Func f ) + { + size_t nHash = hashing( val ); + + scoped_cell_lock sl( m_MutexPolicy, nHash ); + return bucket( nHash )->find( val, f ); + } + + template + bool find_with_( Q& val, Less pred, Func f ) + { + size_t nHash = hashing( val ); + scoped_cell_lock sl( m_MutexPolicy, nHash ); + return bucket( nHash )->find( val, pred, f ); + } + + void internal_resize( size_t nNewCapacity ) + { + // All locks are already locked! + m_MutexPolicy.resize( nNewCapacity ); + + size_t nOldCapacity = bucket_count(); + bucket_type * pOldBuckets = m_Buckets; + + alloc_bucket_table( nNewCapacity ); + + typedef typename bucket_type::iterator bucket_iterator; + bucket_type * pEnd = pOldBuckets + nOldCapacity; + for ( bucket_type * pCur = pOldBuckets; pCur != pEnd; ++pCur ) { + bucket_iterator itEnd = pCur->end(); + bucket_iterator itNext; + for ( bucket_iterator it = pCur->begin(); it != itEnd; it = itNext ) { + itNext = it; + ++itNext; + bucket( m_Hash( *it ))->move_item( *pCur, it ); + } + pCur->clear(); + } + + free_bucket_table( pOldBuckets, nOldCapacity ); + + m_ResizingPolicy.reset(); + } + + void resize() + { + size_t nOldCapacity = bucket_count( atomics::memory_order_acquire ); + + scoped_resize_lock al( m_MutexPolicy ); + if ( al.success()) { + if ( nOldCapacity != bucket_count( atomics::memory_order_acquire )) { + // someone resized already + return; + } + + internal_resize( nOldCapacity * 2 ); + } + } + + //@endcond + + public: + /// Default ctor. The initial capacity is 16. + StripedSet() + : m_Buckets( nullptr ) + , m_nBucketMask( c_nMinimalCapacity - 1 ) + , m_MutexPolicy( c_nMinimalCapacity ) + { + alloc_bucket_table( bucket_count()); + } + + /// Ctor with initial capacity specified + StripedSet( + size_t nCapacity ///< Initial size of bucket table and lock array. Must be power of two, the minimum is 16. + ) + : m_Buckets( nullptr ) + , m_nBucketMask( calc_init_capacity(nCapacity) - 1 ) + , m_MutexPolicy( bucket_count()) + { + alloc_bucket_table( bucket_count()); + } + + /// Ctor with resizing policy (copy semantics) + /** + This constructor initializes m_ResizingPolicy member with copy of \p resizingPolicy parameter + */ + StripedSet( + size_t nCapacity ///< Initial size of bucket table and lock array. Must be power of two, the minimum is 16. + ,resizing_policy const& resizingPolicy ///< Resizing policy + ) + : m_Buckets( nullptr ) + , m_nBucketMask( ( nCapacity ? calc_init_capacity(nCapacity) : c_nMinimalCapacity ) - 1 ) + , m_MutexPolicy( bucket_count()) + , m_ResizingPolicy( resizingPolicy ) + { + alloc_bucket_table( bucket_count()); + } + + /// Ctor with resizing policy (move semantics) + /** + This constructor initializes m_ResizingPolicy member moving \p resizingPolicy parameter + Move semantics is used. + */ + StripedSet( + size_t nCapacity ///< Initial size of bucket table and lock array. Must be power of two, the minimum is 16. + ,resizing_policy&& resizingPolicy ///< Resizing policy + ) + : m_Buckets( nullptr ) + , m_nBucketMask( ( nCapacity ? calc_init_capacity(nCapacity) : c_nMinimalCapacity ) - 1 ) + , m_MutexPolicy( bucket_count()) + , m_ResizingPolicy( std::forward( resizingPolicy )) + { + alloc_bucket_table( bucket_count()); + } + + /// Destructor destroys internal data + ~StripedSet() + { + free_bucket_table( m_Buckets, bucket_count()); + } + + public: + /// Inserts new node + /** + The function inserts \p val in the set if it does not contain + an item with key equal to \p val. + + Returns \p true if \p val is placed into the set, \p false otherwise. + */ + bool insert( value_type& val ) + { + return insert( val, []( value_type& ) {} ); + } + + /// Inserts new node + /** + The function allows to split creating of new item into two part: + - create item with key only + - insert new item into the set + - if inserting is success, calls \p f functor to initialize value-field of \p val. + + The functor signature is: + \code + void func( value_type& val ); + \endcode + where \p val is the item inserted. + */ + template + bool insert( value_type& val, Func f ) + { + bool bOk; + bool bResize; + size_t nHash = hashing( val ); + bucket_type * pBucket; + { + scoped_cell_lock sl( m_MutexPolicy, nHash ); + pBucket = bucket( nHash ); + bOk = pBucket->insert( val, f ); + bResize = bOk && m_ResizingPolicy( ++m_ItemCounter, *this, *pBucket ); + } + + if ( bResize ) + resize(); + return bOk; + } + + /// Updates the node + /** + The operation performs inserting or changing data with lock-free manner. + + If the item \p val is not found in the set, then \p val is inserted + iff \p bAllowInsert is \p true. + Otherwise, the functor \p func is called with item found. + The functor signature is: + \code + void func( bool bNew, value_type& item, value_type& val ); + \endcode + with arguments: + - \p bNew - \p true if the item has been inserted, \p false otherwise + - \p item - item of the set + - \p val - argument \p val passed into the \p update() function + If new item has been inserted (i.e. \p bNew is \p true) then \p item and \p val arguments + refers to the same thing. + + The functor may change non-key fields of the \p item. + + Returns std::pair where \p first is \p true if operation is successful, + \p second is \p true if new item has been added or \p false if the item with \p val + already is in the set. + */ + template + std::pair update( value_type& val, Func func, bool bAllowInsert = true ) + { + std::pair result; + bool bResize; + size_t nHash = hashing( val ); + bucket_type * pBucket; + { + scoped_cell_lock sl( m_MutexPolicy, nHash ); + pBucket = bucket( nHash ); + + result = pBucket->update( val, func, bAllowInsert ); + bResize = result.first && result.second && m_ResizingPolicy( ++m_ItemCounter, *this, *pBucket ); + } + + if ( bResize ) + resize(); + return result; + } + //@cond + template + std::pair ensure( value_type& val, Func func ) + { + return update( val, func, true ); + } + //@endcond + + /// Unlink the item \p val from the set + /** + The function searches the item \p val in the set and unlink it + if it is found and is equal to \p val (here, the equality means that + \p val belongs to the set: if \p item is an item found then + unlink is successful iif &val == &item) + + The function returns \p true if success and \p false otherwise. + */ + bool unlink( value_type& val ) + { + bool bOk; + size_t nHash = hashing( val ); + { + scoped_cell_lock sl( m_MutexPolicy, nHash ); + bOk = bucket( nHash )->unlink( val ); + } + + if ( bOk ) + --m_ItemCounter; + return bOk; + } + + /// Deletes the item from the set + /** \anchor cds_intrusive_StripedSet_erase + The function searches an item with key equal to \p val in the set, + unlinks it from the set, and returns a pointer to unlinked item. + + If the item with key equal to \p val is not found the function return \p nullptr. + + Note the hash functor should accept a parameter of type \p Q that can be not the same as \p value_type. + */ + template + value_type * erase( Q const& val ) + { + return erase( val, [](value_type const&) {} ); + } + + /// Deletes the item from the set using \p pred predicate for searching + /** + The function is an analog of \ref cds_intrusive_StripedSet_erase "erase(Q const&)" + but \p pred is used for key comparing + \p Less has the interface like \p std::less. + \p pred must imply the same element order as the comparator used for building the set. + */ + template + value_type * erase_with( Q const& val, Less pred ) + { + return erase_with( val, pred, [](value_type const&) {} ); + } + + /// Deletes the item from the set + /** \anchor cds_intrusive_StripedSet_erase_func + + The function searches an item with key equal to \p val in the set, + call \p f functor with item found, unlinks it from the set, and returns a pointer to unlinked item. + + The \p Func interface is + \code + struct functor { + void operator()( value_type const& item ); + }; + \endcode + + If the item with key equal to \p val is not found the function return \p false. + + Note the hash functor should accept a parameter of type \p Q that can be not the same as \p value_type. + */ + template + value_type * erase( Q const& val, Func f ) + { + size_t nHash = hashing( val ); + value_type * pVal; + { + scoped_cell_lock sl( m_MutexPolicy, nHash ); + pVal = bucket( nHash )->erase( val, f ); + } + + if ( pVal ) + --m_ItemCounter; + return pVal; + } + + /// Deletes the item from the set using \p pred predicate for searching + /** + The function is an analog of \ref cds_intrusive_StripedSet_erase_func "erase(Q const&, Func)" + but \p pred is used for key comparing + \p Less has the interface like \p std::less. + \p pred must imply the same element order as the comparator used for building the set. + */ + template + value_type * erase_with( Q const& val, Less pred, Func f ) + { + size_t nHash = hashing( val ); + value_type * pVal; + { + scoped_cell_lock sl( m_MutexPolicy, nHash ); + pVal = bucket( nHash )->erase( val, pred, f ); + } + + if ( pVal ) + --m_ItemCounter; + return pVal; + } + + /// Find the key \p val + /** \anchor cds_intrusive_StripedSet_find_func + The function searches the item with key equal to \p val and calls the functor \p f for item found. + The interface of \p Func functor is: + \code + struct functor { + void operator()( value_type& item, Q& val ); + }; + \endcode + where \p item is the item found, \p val is the find function argument. + + The functor may change non-key fields of \p item. + + The \p val argument is non-const since it can be used as \p f functor destination i.e., the functor + may modify both arguments. + + Note the hash functor specified for class \p Traits template parameter + should accept a parameter of type \p Q that can be not the same as \p value_type. + + The function returns \p true if \p val is found, \p false otherwise. + */ + template + bool find( Q& val, Func f ) + { + return find_( val, f ); + } + + /// Find the key \p val using \p pred predicate + /** + The function is an analog of \ref cds_intrusive_StripedSet_find_func "find(Q&, Func)" + but \p pred is used for key comparing + \p Less has the interface like \p std::less. + \p pred must imply the same element order as the comparator used for building the set. + */ + template + bool find_with( Q& val, Less pred, Func f ) + { + return find_with_( val, pred, f ); + } + + /// Find the key \p val + /** \anchor cds_intrusive_StripedSet_find_cfunc + The function searches the item with key equal to \p val and calls the functor \p f for item found. + The interface of \p Func functor is: + \code + struct functor { + void operator()( value_type& item, Q const& val ); + }; + \endcode + where \p item is the item found, \p val is the find function argument. + + The functor may change non-key fields of \p item. + + The \p val argument is non-const since it can be used as \p f functor destination i.e., the functor + may modify both arguments. + + The function returns \p true if \p val is found, \p false otherwise. + */ + template + bool find( Q const& val, Func f ) + { + return find_( val, f ); + } + + /// Find the key \p val using \p pred predicate + /** + The function is an analog of \ref cds_intrusive_StripedSet_find_cfunc "find(Q const&, Func)" + but \p pred is used for key comparing + \p Less has the interface like \p std::less. + \p pred must imply the same element order as the comparator used for building the set. + */ + template + bool find_with( Q const& val, Less pred, Func f ) + { + return find_with_( val, pred, f ); + } + + /// Checks whether the set contains \p key + /** + The function searches the item with key equal to \p key + and returns \p true if it is found, and \p false otherwise. + + Note the hash functor specified for class \p Traits template parameter + should accept a parameter of type \p Q that can be not the same as \p value_type. + Otherwise, you may use \p contains( Q const&, Less pred ) functions with explicit predicate for key comparing. + */ + template + bool contains( Q const& key ) + { + return find( key, [](value_type&, Q const& ) {} ); + } + //@cond + template + CDS_DEPRECATED("use contains()") + bool find( Q const& val ) + { + return contains( val ); + } + //@endcond + + /// Checks whether the set contains \p key using \p pred predicate for searching + /** + The function is an analog of contains( key ) but \p pred is used for key comparing. + \p Less functor has the interface like \p std::less. + \p Less must imply the same element order as the comparator used for building the set. + */ + template + bool contains( Q const& key, Less pred ) + { + return find_with( key, pred, [](value_type& , Q const& ) {} ); + } + //@cond + template + CDS_DEPRECATED("use contains()") + bool find_with( Q const& val, Less pred ) + { + return contains( val, pred ); + } + //@endcond + + /// Clears the set + /** + The function unlinks all items from the set. + */ + void clear() + { + // locks entire array + scoped_full_lock sl( m_MutexPolicy ); + + size_t nBucketCount = bucket_count(); + bucket_type * pBucket = m_Buckets; + for ( size_t i = 0; i < nBucketCount; ++i, ++pBucket ) + pBucket->clear(); + m_ItemCounter.reset(); + } + + /// Clears the set and calls \p disposer for each item + /** + The function unlinks all items from the set calling \p disposer for each item. + \p Disposer functor interface is: + \code + struct Disposer{ + void operator()( value_type * p ); + }; + \endcode + */ + template + void clear_and_dispose( Disposer disposer ) + { + // locks entire array + scoped_full_lock sl( m_MutexPolicy ); + + size_t nBucketCount = bucket_count(); + bucket_type * pBucket = m_Buckets; + for ( size_t i = 0; i < nBucketCount; ++i, ++pBucket ) + pBucket->clear( disposer ); + m_ItemCounter.reset(); + } + + /// Checks if the set is empty + /** + Emptiness is checked by item counting: if item count is zero then the set is empty. + */ + bool empty() const + { + return size() == 0; + } + + /// Returns item count in the set + size_t size() const + { + return m_ItemCounter; + } + + /// Returns the size of hash table + /** + The hash table size is non-constant and can be increased via resizing. + */ + size_t bucket_count() const + { + return m_nBucketMask.load( atomics::memory_order_relaxed ) + 1; + } + //@cond + size_t bucket_count( atomics::memory_order load_mo ) const + { + return m_nBucketMask.load( load_mo ) + 1; + } + //@endcond + + /// Returns lock array size + size_t lock_count() const + { + return m_MutexPolicy.lock_count(); + } + + /// Returns resizing policy object + resizing_policy& get_resizing_policy() + { + return m_ResizingPolicy; + } + + /// Returns resizing policy (const version) + resizing_policy const& get_resizing_policy() const + { + return m_ResizingPolicy; + } + }; +}} // namespace cds::itrusive + +#endif // #ifndef CDSLIB_INTRUSIVE_STRIPED_SET_H diff --git a/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/intrusive/striped_set/adapter.h b/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/intrusive/striped_set/adapter.h new file mode 100644 index 0000000..01d90bb --- /dev/null +++ b/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/intrusive/striped_set/adapter.h @@ -0,0 +1,349 @@ +/* + This file is a part of libcds - Concurrent Data Structures library + + (C) Copyright Maxim Khizhinsky (libcds.dev@gmail.com) 2006-2017 + + Source code repo: http://github.com/khizmax/libcds/ + Download: http://sourceforge.net/projects/libcds/files/ + + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. + + * Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ + +#ifndef CDSLIB_INTRUSIVE_STRIPED_SET_ADAPTER_H +#define CDSLIB_INTRUSIVE_STRIPED_SET_ADAPTER_H + +#include +#include +#include +#include // cds::opt::details::make_comparator - for some adapt specializations + +namespace cds { namespace intrusive { + + /// StripedSet related definitions + namespace striped_set { + /// Default adapter for intrusive striped/refinable hash set + /** + By default, the metafunction does not make any transformation for container type \p Container. + \p Container should provide interface suitable for the hash set. + + The \p Options template argument contains option pack + that will be passed to \p cds::intrusive::StripedSet. + + Bucket interface + + The result of metafunction is a container (a bucket) that should support the following interface: + + Public typedefs that the bucket should provide: + - \p value_type - the type of the item in the bucket + - \p iterator - bucket's item iterator + - \p const_iterator - bucket's item constant iterator + - \p default_resizing_policy - default resizing policy preferable for the container. + By default, the library defines cds::container::striped_set::load_factor_resizing<4> for sequential containers like + boost::intrusive::list, and cds::container::striped_set::no_resizing for ordered container like boost::intrusive::set. + + Insert value \p val of type \p Q + \code template bool insert( value_type& val, Func f ) ; \endcode + Inserts \p val into the container and, if inserting is successful, calls functor \p f + with \p val. + + The functor signature is: + \code + struct functor { + void operator()( value_type& item ); + }; + \endcode + where \p item is the item inserted. + + The user-defined functor \p f is called only if the inserting is success. +
+ + Updates the item in the container + \code template std::pair update( value_type& val, Func f, bool bAllowInsert = true ) \endcode + The operation performs inserting or changing data. + + If the \p val key not found in the container, then \p val is inserted iff \p bAllowInsert is \p true. + Otherwise, the functor \p f is called with the item found. + + The \p Func functor has the following interface: + \code + void func( bool bNew, value_type& item, value_type& val ); + \endcode + or like a functor: + \code + struct functor { + void operator()( bool bNew, value_type& item, value_type& val ); + }; + \endcode + + where arguments are: + - \p bNew - \p true if the item has been inserted, \p false otherwise + - \p item - container's item + - \p val - argument \p val passed into the \p update() function + + If \p val has been inserted (i.e. bNew == true) then \p item and \p val + are the same element: &item == &val. Otherwise, they are different. + + The functor can change non-key fields of the \p item. + + Returns std::pair where \p first is true if operation is successful, + \p second is true if new item has been added or \p false if the item with \p val key + already exists. +
+ + Unlink an item + \code bool unlink( value_type& val ) \endcode + Unlink \p val from the container if \p val belongs to it. +
+ + Erase \p key + \code template bool erase( Q const& key, Func f ) \endcode + The function searches an item with key \p key, calls \p f functor + and erases the item. If \p key is not found, the functor is not called. + + The functor \p Func interface is: + \code + struct functor { + void operator()(value_type& val); + }; + \endcode + + The type \p Q can differ from \ref value_type of items storing in the container. + Therefore, the \p value_type should be comparable with type \p Q. + + Return \p true if key is found and deleted, \p false otherwise +
+ + + Find the key \p val + \code + template bool find( Q& val, Func f ) + template bool find( Q& val, Compare cmp, Func f ) + \endcode + The function searches the item with key equal to \p val and calls the functor \p f for item found. + The interface of \p Func functor is: + \code + struct functor { + void operator()( value_type& item, Q& val ); + }; + \endcode + where \p item is the item found, \p val is the find function argument. + + The functor can change non-key fields of \p item. + The \p val argument may be non-const since it can be used as \p f functor destination i.e., the functor + can modify both arguments. + + The type \p Q can differ from \ref value_type of items storing in the container. + Therefore, the \p value_type should be comparable with type \p Q. + + The first form uses default \p compare function used for key ordering. + The second form allows to point specific \p Compare functor \p cmp + that can compare \p value_typwe and \p Q type. The interface of \p Compare is the same as \p std::less. + + The function returns \p true if \p val is found, \p false otherwise. +
+ + Clears the container + \code + void clear() + template void clear( Disposer disposer ) + \endcode + Second form calls \p disposer for each item in the container before clearing. +
+ + Get size of bucket + \code size_t size() const \endcode + This function may be required by some resizing policy +
+ + Iterators + \code + iterator begin(); + const_iterator begin() const; + iterator end(); + const_iterator end() const; + \endcode +
+ + Move item when resizing + \code void move_item( adapted_container& from, iterator it ) \endcode + This helper function is invented for the set resizing when the item + pointed by \p it iterator is copied from old bucket \p from to a new bucket + pointed by \p this. +
+ + */ + template < typename Container, typename... Options > + class adapt + { + public: + typedef Container type ; ///< adapted container type + typedef typename type::value_type value_type ; ///< value type stored in the container + }; + + //@cond + struct adapted_sequential_container + { + typedef striped_set::load_factor_resizing<4> default_resizing_policy; + }; + + struct adapted_container + { + typedef striped_set::no_resizing default_resizing_policy; + }; + //@endcond + + //@cond + namespace details { + template + class boost_intrusive_set_adapter: public cds::intrusive::striped_set::adapted_container + { + public: + typedef Set container_type; + + typedef typename container_type::value_type value_type ; ///< value type stored in the container + typedef typename container_type::iterator iterator ; ///< container iterator + typedef typename container_type::const_iterator const_iterator ; ///< container const iterator + + typedef typename container_type::key_compare key_comparator; + + private: + container_type m_Set; + + public: + boost_intrusive_set_adapter() + {} + + container_type& base_container() + { + return m_Set; + } + + template + bool insert( value_type& val, Func f ) + { + std::pair res = m_Set.insert( val ); + if ( res.second ) + f( val ); + return res.second; + } + + template + std::pair update( value_type& val, Func f, bool bAllowInsert ) + { + if ( bAllowInsert ) { + std::pair res = m_Set.insert( val ); + f( res.second, *res.first, val ); + return std::make_pair( true, res.second ); + } + else { + auto it = m_Set.find( val, key_comparator()); + if ( it == m_Set.end()) + return std::make_pair( false, false ); + f( false, *it, val ); + return std::make_pair( true, false ); + } + } + + bool unlink( value_type& val ) + { + iterator it = m_Set.find( val, key_comparator()); + if ( it == m_Set.end() || &(*it) != &val ) + return false; + m_Set.erase( it ); + return true; + } + + template + value_type * erase( Q const& key, Func f ) + { + iterator it = m_Set.find( key, key_comparator()); + if (it == m_Set.end()) + return nullptr; + value_type& val = *it; + f( val ); + m_Set.erase( it ); + return &val; + } + + template + value_type * erase( Q const& key, Less pred, Func f ) + { + iterator it = m_Set.find( key, pred ); + if (it == m_Set.end()) + return nullptr; + value_type& val = *it; + f( val ); + m_Set.erase( it ); + return &val; + } + + template + bool find( Q const& key, Func f ) + { + return find( key, key_comparator(), f ); + } + + template + bool find( Q const& key, Compare cmp, Func f ) + { + iterator it = m_Set.find( key, cmp ); + if ( it == m_Set.end()) + return false; + f( *it, key ); + return true; + } + + void clear() + { + m_Set.clear(); + } + + template + void clear( Disposer disposer ) + { + m_Set.clear_and_dispose( disposer ); + } + + iterator begin() { return m_Set.begin(); } + const_iterator begin() const { return m_Set.begin(); } + iterator end() { return m_Set.end(); } + const_iterator end() const { return m_Set.end(); } + + size_t size() const + { + return (size_t) m_Set.size(); + } + + void move_item( boost_intrusive_set_adapter& from, iterator itWhat ) + { + value_type& val = *itWhat; + from.base_container().erase( itWhat ); + insert( val, []( value_type& ) {} ); + } + }; + } // namespace details + //@endcond + + } // namespace striped_set +}} // namespace cds::intrusive + +#endif // #ifndef CDSLIB_INTRUSIVE_STRIPED_SET_ADAPTER_H diff --git a/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/intrusive/striped_set/boost_avl_set.h b/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/intrusive/striped_set/boost_avl_set.h new file mode 100644 index 0000000..e4d4854 --- /dev/null +++ b/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/intrusive/striped_set/boost_avl_set.h @@ -0,0 +1,65 @@ +/* + This file is a part of libcds - Concurrent Data Structures library + + (C) Copyright Maxim Khizhinsky (libcds.dev@gmail.com) 2006-2017 + + Source code repo: http://github.com/khizmax/libcds/ + Download: http://sourceforge.net/projects/libcds/files/ + + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. + + * Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ + +#ifndef CDSLIB_INTRUSIVE_STRIPED_SET_BOOST_AVL_SET_ADAPTER_H +#define CDSLIB_INTRUSIVE_STRIPED_SET_BOOST_AVL_SET_ADAPTER_H + +#include +#include + +//@cond +namespace cds { namespace intrusive { namespace striped_set { + +#if CDS_COMPILER == CDS_COMPILER_INTEL && CDS_COMPILER_VERSION <= 1500 + template + class adapt< boost::intrusive::avl_set< T, P1, P2, P3, P4, P5 >, Options... > + { + public: + typedef boost::intrusive::avl_set< T, P1, P2, P3, P4, P5 > container_type; ///< underlying intrusive container type + + public: + typedef details::boost_intrusive_set_adapter type; ///< Result of the metafunction + }; +#else + template + class adapt< boost::intrusive::avl_set< T, BIOptons... >, Options... > + { + public: + typedef boost::intrusive::avl_set< T, BIOptons... > container_type ; ///< underlying intrusive container type + + public: + typedef details::boost_intrusive_set_adapter type ; ///< Result of the metafunction + }; +#endif + +}}} // namespace cds::intrusive::striped_set +//@endcond + +#endif // #ifndef CDSLIB_INTRUSIVE_STRIPED_SET_BOOST_AVL_SET_ADAPTER_H diff --git a/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/intrusive/striped_set/boost_list.h b/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/intrusive/striped_set/boost_list.h new file mode 100644 index 0000000..25e32b2 --- /dev/null +++ b/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/intrusive/striped_set/boost_list.h @@ -0,0 +1,244 @@ +/* + This file is a part of libcds - Concurrent Data Structures library + + (C) Copyright Maxim Khizhinsky (libcds.dev@gmail.com) 2006-2017 + + Source code repo: http://github.com/khizmax/libcds/ + Download: http://sourceforge.net/projects/libcds/files/ + + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. + + * Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ + +#ifndef CDSLIB_INTRUSIVE_STRIPED_SET_BOOST_LIST_ADAPTER_H +#define CDSLIB_INTRUSIVE_STRIPED_SET_BOOST_LIST_ADAPTER_H + +#include +#include + +//@cond +namespace cds { namespace intrusive { namespace striped_set { + + namespace details { + template + class adapt_boost_list + { + public: + typedef List container_type; ///< underlying intrusive container type + + private: + /// Adapted intrusive container + class adapted_container : public cds::intrusive::striped_set::adapted_sequential_container + { + public: + typedef typename container_type::value_type value_type; ///< value type stored in the container + typedef typename container_type::iterator iterator; ///< container iterator + typedef typename container_type::const_iterator const_iterator; ///< container const iterator + typedef typename cds::opt::details::make_comparator_from_option_list< value_type, Options... >::type key_comparator; + + private: + struct find_predicate + { + bool operator()( value_type const& i1, value_type const& i2 ) const + { + return key_comparator()(i1, i2) < 0; + } + + template + bool operator()( Q const& i1, value_type const& i2 ) const + { + return key_comparator()(i1, i2) < 0; + } + + template + bool operator()( value_type const& i1, Q const& i2 ) const + { + return key_comparator()(i1, i2) < 0; + } + }; + + template + iterator find_key( Q const& key, Pred pred ) + { + iterator itEnd = m_List.end(); + iterator it; + for ( it = m_List.begin(); it != itEnd; ++it ) { + if ( !pred( *it, key )) + break; + } + return it; + } + + private: + container_type m_List; + + public: + adapted_container() + {} + + container_type& base_container() + { + return m_List; + } + + template + bool insert( value_type& val, Func f ) + { + iterator it = find_key( val, find_predicate()); + if ( it == m_List.end() || key_comparator()(val, *it) != 0 ) { + m_List.insert( it, val ); + f( val ); + + return true; + } + + // key already exists + return false; + } + + template + std::pair update( value_type& val, Func f, bool bAllowInsert ) + { + iterator it = find_key( val, find_predicate()); + if ( it == m_List.end() || key_comparator()(val, *it) != 0 ) { + // insert new + if ( !bAllowInsert ) + return std::make_pair( false, false ); + + m_List.insert( it, val ); + f( true, val, val ); + return std::make_pair( true, true ); + } + else { + // already exists + f( false, *it, val ); + return std::make_pair( true, false ); + } + } + + bool unlink( value_type& val ) + { + iterator it = find_key( val, find_predicate()); + if ( it == m_List.end() || &(*it) != &val ) + return false; + + m_List.erase( it ); + return true; + } + + template + value_type * erase( Q const& key, Func f ) + { + iterator it = find_key( key, find_predicate()); + if ( it == m_List.end() || key_comparator()(key, *it) != 0 ) + return nullptr; + + // key exists + value_type& val = *it; + f( val ); + m_List.erase( it ); + + return &val; + } + + template + value_type * erase( Q const& key, Less pred, Func f ) + { + iterator it = find_key( key, pred ); + if ( it == m_List.end() || pred( key, *it ) || pred( *it, key )) + return nullptr; + + // key exists + value_type& val = *it; + f( val ); + m_List.erase( it ); + + return &val; + } + + template + bool find( Q& key, Func f ) + { + return find( key, find_predicate(), f ); + } + + template + bool find( Q& key, Less pred, Func f ) + { + iterator it = find_key( key, pred ); + if ( it == m_List.end() || pred( key, *it ) || pred( *it, key )) + return false; + + // key exists + f( *it, key ); + return true; + } + + void clear() + { + m_List.clear(); + } + + template + void clear( Disposer disposer ) + { + m_List.clear_and_dispose( disposer ); + } + + iterator begin() { return m_List.begin(); } + const_iterator begin() const { return m_List.begin(); } + iterator end() { return m_List.end(); } + const_iterator end() const { return m_List.end(); } + + size_t size() const + { + return (size_t)m_List.size(); + } + + void move_item( adapted_container& from, iterator itWhat ) + { + value_type& val = *itWhat; + from.base_container().erase( itWhat ); + insert( val, []( value_type& ) {} ); + } + + }; + public: + typedef adapted_container type; ///< Result of the metafunction + }; + } // namespace details + +#if CDS_COMPILER == CDS_COMPILER_INTEL && CDS_COMPILER_VERSION <= 1500 + template + class adapt< boost::intrusive::list< T, P1, P2, P3, P4 >, Options... > + : public details::adapt_boost_list< boost::intrusive::list< T, P1, P2, P3, P4 >, Options... > + {}; +#else + template + class adapt< boost::intrusive::list< T, BIOptions... >, Options... > + : public details::adapt_boost_list< boost::intrusive::list< T, BIOptions... >, Options... > + {}; +#endif + +}}} // namespace cds::intrusive::striped_set +//@endcond + +#endif // #ifndef CDSLIB_INTRUSIVE_STRIPED_SET_BOOST_LIST_ADAPTER_H diff --git a/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/intrusive/striped_set/boost_set.h b/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/intrusive/striped_set/boost_set.h new file mode 100644 index 0000000..802e594 --- /dev/null +++ b/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/intrusive/striped_set/boost_set.h @@ -0,0 +1,65 @@ +/* + This file is a part of libcds - Concurrent Data Structures library + + (C) Copyright Maxim Khizhinsky (libcds.dev@gmail.com) 2006-2017 + + Source code repo: http://github.com/khizmax/libcds/ + Download: http://sourceforge.net/projects/libcds/files/ + + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. + + * Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ + +#ifndef CDSLIB_INTRUSIVE_STRIPED_SET_BOOST_SET_ADAPTER_H +#define CDSLIB_INTRUSIVE_STRIPED_SET_BOOST_SET_ADAPTER_H + +#include +#include + +//@cond +namespace cds { namespace intrusive { namespace striped_set { + +#if CDS_COMPILER == CDS_COMPILER_INTEL && CDS_COMPILER_VERSION <= 1500 + template + class adapt< boost::intrusive::set< T, O1, O2, O3, O4 >, Options... > + { + public: + typedef boost::intrusive::set< T, O1, O2, O3, O4 > container_type; ///< underlying intrusive container type + + public: + typedef details::boost_intrusive_set_adapter type; ///< Result of the metafunction + }; +#else + template + class adapt< boost::intrusive::set< T, BIOptons... >, Options... > + { + public: + typedef boost::intrusive::set< T, BIOptons... > container_type ; ///< underlying intrusive container type + + public: + typedef details::boost_intrusive_set_adapter type ; ///< Result of the metafunction + }; +#endif + +}}} // namespace cds::intrusive::striped_set +//@endcond + +#endif // #ifndef CDSLIB_INTRUSIVE_STRIPED_SET_BOOST_SET_ADAPTER_H diff --git a/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/intrusive/striped_set/boost_sg_set.h b/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/intrusive/striped_set/boost_sg_set.h new file mode 100644 index 0000000..7ffd142 --- /dev/null +++ b/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/intrusive/striped_set/boost_sg_set.h @@ -0,0 +1,64 @@ +/* + This file is a part of libcds - Concurrent Data Structures library + + (C) Copyright Maxim Khizhinsky (libcds.dev@gmail.com) 2006-2017 + + Source code repo: http://github.com/khizmax/libcds/ + Download: http://sourceforge.net/projects/libcds/files/ + + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. + + * Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ + +#ifndef CDSLIB_INTRUSIVE_STRIPED_SET_BOOST_SG_SET_ADAPTER_H +#define CDSLIB_INTRUSIVE_STRIPED_SET_BOOST_SG_SET_ADAPTER_H + +#include +#include + +//@cond +namespace cds { namespace intrusive { namespace striped_set { + +#if CDS_COMPILER == CDS_COMPILER_INTEL && CDS_COMPILER_VERSION <= 1500 + template + class adapt< boost::intrusive::sg_set< T, O1, O2, O3, O4 >, Options... > + { + public: + typedef boost::intrusive::sg_set< T, O1, O2, O3, O4 > container_type; ///< underlying intrusive container type + + public: + typedef details::boost_intrusive_set_adapter type; ///< Result of the metafunction + }; +#else + template + class adapt< boost::intrusive::sg_set< T, BIOptons... >, Options... > + { + public: + typedef boost::intrusive::sg_set< T, BIOptons... > container_type ; ///< underlying intrusive container type + + public: + typedef details::boost_intrusive_set_adapter type ; ///< Result of the metafunction + }; +#endif +}}} // namespace cds::intrusive::striped_set +//@endcond + +#endif // #ifndef CDSLIB_INTRUSIVE_STRIPED_SET_BOOST_SG_SET_ADAPTER_H diff --git a/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/intrusive/striped_set/boost_slist.h b/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/intrusive/striped_set/boost_slist.h new file mode 100644 index 0000000..2d7596d --- /dev/null +++ b/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/intrusive/striped_set/boost_slist.h @@ -0,0 +1,260 @@ +/* + This file is a part of libcds - Concurrent Data Structures library + + (C) Copyright Maxim Khizhinsky (libcds.dev@gmail.com) 2006-2017 + + Source code repo: http://github.com/khizmax/libcds/ + Download: http://sourceforge.net/projects/libcds/files/ + + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. + + * Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ + +#ifndef CDSLIB_INTRUSIVE_STRIPED_SET_BOOST_SLIST_ADAPTER_H +#define CDSLIB_INTRUSIVE_STRIPED_SET_BOOST_SLIST_ADAPTER_H + +#include +#include + +//@cond +namespace cds { namespace intrusive { namespace striped_set { + + namespace details { + template + class adapt_boost_slist + { + public: + typedef List container_type; ///< underlying intrusive container type + + private: + /// Adapted intrusive container + class adapted_container : public cds::intrusive::striped_set::adapted_sequential_container + { + public: + typedef typename container_type::value_type value_type; ///< value type stored in the container + typedef typename container_type::iterator iterator; ///< container iterator + typedef typename container_type::const_iterator const_iterator; ///< container const iterator + + typedef typename cds::opt::details::make_comparator_from_option_list< value_type, Options... >::type key_comparator; + + private: + + template + std::pair< iterator, bool > find_prev_item( Q const& key, Less pred ) + { + iterator itPrev = m_List.before_begin(); + iterator itEnd = m_List.end(); + for ( iterator it = m_List.begin(); it != itEnd; ++it ) { + if ( pred( key, *it )) + itPrev = it; + else if ( pred( *it, key )) + break; + else + return std::make_pair( itPrev, true ); + } + return std::make_pair( itPrev, false ); + } + + template + std::pair< iterator, bool > find_prev_item( Q const& key ) + { + return find_prev_item_cmp( key, key_comparator()); + } + + template + std::pair< iterator, bool > find_prev_item_cmp( Q const& key, Compare cmp ) + { + iterator itPrev = m_List.before_begin(); + iterator itEnd = m_List.end(); + for ( iterator it = m_List.begin(); it != itEnd; ++it ) { + int nCmp = cmp( key, *it ); + if ( nCmp < 0 ) + itPrev = it; + else if ( nCmp > 0 ) + break; + else + return std::make_pair( itPrev, true ); + } + return std::make_pair( itPrev, false ); + } + + template + value_type * erase_( Q const& key, Compare cmp, Func f ) + { + std::pair< iterator, bool > pos = find_prev_item_cmp( key, cmp ); + if ( !pos.second ) + return nullptr; + + // key exists + iterator it = pos.first; + value_type& val = *(++it); + f( val ); + m_List.erase_after( pos.first ); + + return &val; + } + + private: + container_type m_List; + + public: + adapted_container() + {} + + container_type& base_container() + { + return m_List; + } + + template + bool insert( value_type& val, Func f ) + { + std::pair< iterator, bool > pos = find_prev_item( val ); + if ( !pos.second ) { + m_List.insert_after( pos.first, val ); + f( val ); + return true; + } + + // key already exists + return false; + } + + template + std::pair update( value_type& val, Func f, bool bAllowInsert ) + { + std::pair< iterator, bool > pos = find_prev_item( val ); + if ( !pos.second ) { + // insert new + if ( !bAllowInsert ) + return std::make_pair( false, false ); + + m_List.insert_after( pos.first, val ); + f( true, val, val ); + return std::make_pair( true, true ); + } + else { + // already exists + f( false, *(++pos.first), val ); + return std::make_pair( true, false ); + } + } + + bool unlink( value_type& val ) + { + std::pair< iterator, bool > pos = find_prev_item( val ); + if ( !pos.second ) + return false; + + ++pos.first; + if ( &(*pos.first) != &val ) + return false; + + m_List.erase( pos.first ); + return true; + } + + template + value_type * erase( Q const& key, Func f ) + { + return erase_( key, key_comparator(), f ); + } + + template + value_type * erase( Q const& key, Less /*pred*/, Func f ) + { + return erase_( key, cds::opt::details::make_comparator_from_less(), f ); + } + + template + bool find( Q& key, Func f ) + { + std::pair< iterator, bool > pos = find_prev_item( key ); + if ( !pos.second ) + return false; + + // key exists + f( *(++pos.first), key ); + return true; + } + + template + bool find( Q& key, Less pred, Func f ) + { + std::pair< iterator, bool > pos = find_prev_item( key, pred ); + if ( !pos.second ) + return false; + + // key exists + f( *(++pos.first), key ); + return true; + } + + void clear() + { + m_List.clear(); + } + + template + void clear( Disposer disposer ) + { + m_List.clear_and_dispose( disposer ); + } + + iterator begin() { return m_List.begin(); } + const_iterator begin() const { return m_List.begin(); } + iterator end() { return m_List.end(); } + const_iterator end() const { return m_List.end(); } + + size_t size() const + { + return (size_t)m_List.size(); + } + + void move_item( adapted_container& from, iterator itWhat ) + { + value_type& val = *itWhat; + from.base_container().erase( itWhat ); + insert( val, []( value_type& ) {} ); + } + + }; + public: + typedef adapted_container type; ///< Result of the metafunction + }; + } // namespace details + +#if CDS_COMPILER == CDS_COMPILER_INTEL && CDS_COMPILER_VERSION <= 1500 + template + class adapt< boost::intrusive::slist< T, P1, P2, P3, P4, P5 >, Options... > + : public details::adapt_boost_slist< boost::intrusive::slist< T, P1, P2, P3, P4, P5 >, Options... > + {}; +#else + template + class adapt< boost::intrusive::slist< T, BIOptions... >, Options... > + : public details::adapt_boost_slist< boost::intrusive::slist< T, BIOptions... >, Options... > + {}; +#endif + +}}} // namespace cds::intrusive::striped_set +//@endcond + +#endif // #ifndef CDSLIB_INTRUSIVE_STRIPED_SET_BOOST_SLIST_ADAPTER_H diff --git a/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/intrusive/striped_set/boost_splay_set.h b/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/intrusive/striped_set/boost_splay_set.h new file mode 100644 index 0000000..8c0167f --- /dev/null +++ b/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/intrusive/striped_set/boost_splay_set.h @@ -0,0 +1,66 @@ +/* + This file is a part of libcds - Concurrent Data Structures library + + (C) Copyright Maxim Khizhinsky (libcds.dev@gmail.com) 2006-2017 + + Source code repo: http://github.com/khizmax/libcds/ + Download: http://sourceforge.net/projects/libcds/files/ + + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. + + * Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ + +#ifndef CDSLIB_INTRUSIVE_STRIPED_SET_BOOST_SPLAY_SET_ADAPTER_H +#define CDSLIB_INTRUSIVE_STRIPED_SET_BOOST_SPLAY_SET_ADAPTER_H + +#include +#include + +//@cond +namespace cds { namespace intrusive { namespace striped_set { + +#if CDS_COMPILER == CDS_COMPILER_INTEL && CDS_COMPILER_VERSION <= 1500 + template + class adapt< boost::intrusive::splay_set< T, O1, O2, O3, O4 >, Options... > + { + public: + typedef boost::intrusive::splay_set< T, O1, O2, O3, O4 > container_type; ///< underlying intrusive container type + + public: + typedef details::boost_intrusive_set_adapter type; ///< Result of the metafunction + + }; +#else + template + class adapt< boost::intrusive::splay_set< T, BIOptons... >, Options... > + { + public: + typedef boost::intrusive::splay_set< T, BIOptons... > container_type ; ///< underlying intrusive container type + + public: + typedef details::boost_intrusive_set_adapter type ; ///< Result of the metafunction + + }; +#endif +}}} // namespace cds::intrusive::striped_set +//@endcond + +#endif // #ifndef CDSLIB_INTRUSIVE_STRIPED_SET_BOOST_SPLAY_SET_ADAPTER_H diff --git a/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/intrusive/striped_set/boost_treap_set.h b/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/intrusive/striped_set/boost_treap_set.h new file mode 100644 index 0000000..307b204 --- /dev/null +++ b/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/intrusive/striped_set/boost_treap_set.h @@ -0,0 +1,64 @@ +/* + This file is a part of libcds - Concurrent Data Structures library + + (C) Copyright Maxim Khizhinsky (libcds.dev@gmail.com) 2006-2017 + + Source code repo: http://github.com/khizmax/libcds/ + Download: http://sourceforge.net/projects/libcds/files/ + + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. + + * Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ + +#ifndef CDSLIB_INTRUSIVE_STRIPED_SET_BOOST_TREAP_SET_ADAPTER_H +#define CDSLIB_INTRUSIVE_STRIPED_SET_BOOST_TREAP_SET_ADAPTER_H + +#include +#include + +//@cond +namespace cds { namespace intrusive { namespace striped_set { + +#if CDS_COMPILER == CDS_COMPILER_INTEL && CDS_COMPILER_VERSION <= 1500 + template + class adapt< boost::intrusive::treap_set< T, O1, O2, O3, O4 >, Options... > + { + public: + typedef boost::intrusive::treap_set< T, O1, O2, O3, O4 > container_type ; ///< underlying intrusive container type + + public: + typedef details::boost_intrusive_set_adapter type ; ///< Result of the metafunction + }; +#else + template + class adapt< boost::intrusive::treap_set< T, BIOptons... >, Options... > + { + public: + typedef boost::intrusive::treap_set< T, BIOptons... > container_type; ///< underlying intrusive container type + + public: + typedef details::boost_intrusive_set_adapter type; ///< Result of the metafunction + }; +#endif +}}} // namespace cds::intrusive::striped_set +//@endcond + +#endif // #ifndef CDSLIB_INTRUSIVE_STRIPED_SET_BOOST_TREAP_SET_ADAPTER_H diff --git a/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/intrusive/striped_set/boost_unordered_set.h b/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/intrusive/striped_set/boost_unordered_set.h new file mode 100644 index 0000000..ae2cb8b --- /dev/null +++ b/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/intrusive/striped_set/boost_unordered_set.h @@ -0,0 +1,236 @@ +/* + This file is a part of libcds - Concurrent Data Structures library + + (C) Copyright Maxim Khizhinsky (libcds.dev@gmail.com) 2006-2017 + + Source code repo: http://github.com/khizmax/libcds/ + Download: http://sourceforge.net/projects/libcds/files/ + + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. + + * Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ + +#ifndef CDSLIB_INTRUSIVE_STRIPED_SET_BOOST_UNORDERED_SET_ADAPTER_H +#define CDSLIB_INTRUSIVE_STRIPED_SET_BOOST_UNORDERED_SET_ADAPTER_H + +#include +#include +#include + +//@cond +namespace cds { namespace intrusive { namespace striped_set { + + namespace details { + template + class adapt_boost_unordered_set + { + public: + typedef Set container_type; ///< underlying intrusive container type + + private: + class adapted_container + { + public: + typedef typename container_type::value_type value_type; ///< value type stored in the container + typedef typename container_type::iterator iterator; ///< container iterator + typedef typename container_type::const_iterator const_iterator; ///< container const iterator + + typedef typename opt::value < + typename opt::find_option < + opt::buffer< opt::v::initialized_static_buffer< cds::any_type, 256 > >, + Options... + > ::type + > ::buffer initial_buffer_type; + typedef typename initial_buffer_type::template rebind< typename container_type::bucket_type >::other buffer_type; + typedef cds::intrusive::striped_set::load_factor_resizing<256> default_resizing_policy; + + private: + template + struct equal_from_compare + { + Compare& m_cmp; + equal_from_compare( Compare& cmp ) + : m_cmp( cmp ) + {} + + equal_from_compare( equal_from_compare const& src ) + : m_cmp( src.m_cmp ) + {} + + template + bool operator()( A& a, B& b ) const + { + return !m_cmp( a, b ) && !m_cmp( b, a ); + } + + template + bool operator()( A& a, B& b ) + { + return !m_cmp( a, b ) && !m_cmp( b, a ); + } + }; + + buffer_type m_Buckets; // buffer should be declared first since it is used in m_Set ctor. + container_type m_Set; + + public: + adapted_container() + : m_Set( typename container_type::bucket_traits( m_Buckets.buffer(), m_Buckets.capacity())) + {} + + container_type& base_container() + { + return m_Set; + } + + template + bool insert( value_type& val, Func f ) + { + std::pair res = m_Set.insert( val ); + if ( res.second ) + f( val ); + return res.second; + } + + template + std::pair update( value_type& val, Func f, bool bAllowInsert ) + { + if ( bAllowInsert ) { + std::pair res = m_Set.insert( val ); + f( res.second, *res.first, val ); + return std::make_pair( true, res.second ); + } + else { + auto it = m_Set.find( val ); + if ( it == m_Set.end()) + return std::make_pair( false, false ); + f( false, *it, val ); + return std::make_pair( true, false ); + } + } + + bool unlink( value_type& val ) + { + iterator it = m_Set.find( value_type( val )); + if ( it == m_Set.end() || &(*it) != &val ) + return false; + m_Set.erase( it ); + return true; + } + + template + value_type * erase( Q const& key, Func f ) + { + iterator it = m_Set.find( key, typename container_type::hasher(), typename container_type::key_equal()); + if ( it == m_Set.end()) + return nullptr; + value_type& val = *it; + f( val ); + m_Set.erase( it ); + return &val; + } + + template + value_type * erase( Q const& key, Less pred, Func f ) + { + iterator it = m_Set.find( key, typename container_type::hasher(), equal_from_compare( pred )); + if ( it == m_Set.end()) + return nullptr; + value_type& val = *it; + f( val ); + m_Set.erase( it ); + return &val; + } + + template + bool find( Q& key, Func f ) + { + iterator it = m_Set.find( key, typename container_type::hasher(), typename container_type::key_equal()); + if ( it == m_Set.end()) + return false; + f( *it, key ); + return true; + } + + template + bool find( Q& key, Less pred, Func f ) + { + iterator it = m_Set.find( key, typename container_type::hasher(), equal_from_compare( pred )); + if ( it == m_Set.end()) + return false; + f( *it, key ); + return true; + } + + void clear() + { + m_Set.clear(); + } + + template + void clear( Disposer disposer ) + { + m_Set.clear_and_dispose( disposer ); + } + + iterator begin() { return m_Set.begin(); } + const_iterator begin() const { return m_Set.begin(); } + iterator end() { return m_Set.end(); } + const_iterator end() const { return m_Set.end(); } + + size_t size() const + { + return (size_t)m_Set.size(); + } + + void move_item( adapted_container& from, iterator itWhat ) + { + value_type& val = *itWhat; + from.base_container().erase( itWhat ); + insert( val, []( value_type& ) {} ); + } + }; + + public: + typedef adapted_container type; ///< Result of the metafunction + }; + } // namespace details + +#if CDS_COMPILER == CDS_COMPILER_INTEL && CDS_COMPILER_VERSION <= 1500 + template + class adapt < boost::intrusive::unordered_set< T, O1, O2, O3, O4, O5, O6, O7, O8, O9, O10 >, Options... > + : public details::adapt_boost_unordered_set < boost::intrusive::unordered_set< T, O1, O2, O3, O4, O5, O6, O7, O8, O9, O10 >, Options... > + {}; +#else + template + class adapt < boost::intrusive::unordered_set< T, BIOptons... >, Options... > + : public details::adapt_boost_unordered_set < boost::intrusive::unordered_set< T, BIOptons... >, Options... > + {}; +#endif + +}}} // namespace cds::intrusive::striped_set +//@endcond + +#endif // #ifndef CDSLIB_INTRUSIVE_STRIPED_SET_BOOST_UNORDERED_SET_ADAPTER_H diff --git a/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/intrusive/striped_set/resizing_policy.h b/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/intrusive/striped_set/resizing_policy.h new file mode 100644 index 0000000..72eaaa8 --- /dev/null +++ b/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/intrusive/striped_set/resizing_policy.h @@ -0,0 +1,293 @@ +/* + This file is a part of libcds - Concurrent Data Structures library + + (C) Copyright Maxim Khizhinsky (libcds.dev@gmail.com) 2006-2017 + + Source code repo: http://github.com/khizmax/libcds/ + Download: http://sourceforge.net/projects/libcds/files/ + + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. + + * Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ + +#ifndef CDSLIB_INTRUSIVE_STRIPED_SET_RESIZING_POLICY_H +#define CDSLIB_INTRUSIVE_STRIPED_SET_RESIZING_POLICY_H + +#include + +namespace cds { namespace intrusive { namespace striped_set { + + /// Load factor based resizing policy + /** @ingroup cds_striped_resizing_policy + When total item count in a container exceeds + container.bucket_count() * LoadFactor + then resizing is needed. + + This policy is stateless. + + The reset() function is called after the resizing is done. + The function is intended for resetting internal state of the policy. + */ + template + struct load_factor_resizing + { + /// Main policy operator returns \p true when resizing is needed + template + bool operator ()( + size_t nSize, ///< Current item count of \p container + Container const& container, ///< Container + Bucket const& /*bucket*/ ///< reference to a container's bucket (not used) + ) const + { + return nSize > container.bucket_count() * LoadFactor; + } + + /// Resets internal state of the policy (does nothing) + void reset() + {} + }; + + /// Load factor based resizing policy, stateful specialization + /** @ingroup cds_striped_resizing_policy + This specialization allows to specify a load factor at runtime. + */ + template <> + struct load_factor_resizing<0> + { + ///@cond + const size_t m_nLoadFactor; + //@endcond + public: + /// Default ctor, load factor is 4 + load_factor_resizing() + : m_nLoadFactor(4) + {} + + /// Ctor with explicitly defined \p nLoadFactor + explicit load_factor_resizing( size_t nLoadFactor ) + : m_nLoadFactor( nLoadFactor ) + {} + + /// Copy ctor + load_factor_resizing( load_factor_resizing const& src ) + : m_nLoadFactor( src.m_nLoadFactor ) + {} + + /// Move ctor + load_factor_resizing( load_factor_resizing&& src ) + : m_nLoadFactor( src.m_nLoadFactor ) + {} + + /// Main policy operator returns \p true when resizing is needed + template + bool operator ()( + size_t nSize, ///< Current item count of \p container + Container const& container, ///< Container + Bucket const& /*bucket*/ ///< reference to a container's bucket (not used) + ) + { + return nSize > container.bucket_count() * m_nLoadFactor; + } + + /// Resets internal state of the policy (does nothing) + void reset() + {} + }; + + /// Rational load factor resizing policy + /** @ingroup cds_striped_resizing_policy + When total item count in a container exceeds + container.bucket_count() * Numerator / Denominator + then resizing is needed. + + This policy is stateless: \p Numerator and \p Denominator specifies in compile time as template arguments + */ + template + struct rational_load_factor_resizing + { + static_assert( Denominator != 0, "Denominator must not be zero" ); + + /// Main policy operator returns \p true when resizing is needed + template + bool operator ()( + size_t nSize, ///< Current item count of \p container + Container const& container, ///< Container + Bucket const& /*bucket*/ ///< reference to a container's bucket (not used) + ) const + { + return nSize * Denominator > container.bucket_count() * Numerator; + } + + /// Resets internal state of the policy (does nothing) + void reset() + {} + }; + + /// Rational load factor resizing policy + /** @ingroup cds_striped_resizing_policy + When total item count in a container exceeds + container.bucket_count() * Numerator / Denominator + then resizing is needed. + + This policy is stateful: \p Numerator and \p Denominator specifies in construction time. + */ + template + struct rational_load_factor_resizing<0, Denominator> + { + ///@cond + const size_t m_nNumerator; + const size_t m_nDenominator; + //@endcond + public: + /// Default ctor, load factor is 1/2 + rational_load_factor_resizing() + : m_nNumerator(1), m_nDenominator(2) + {} + + /// Ctor with explicitly defined \p nLoadFactor + rational_load_factor_resizing( size_t nNumerator, size_t nDenominator ) + : m_nNumerator( nNumerator ), m_nDenominator( nDenominator ) + {} + + /// Copy ctor + rational_load_factor_resizing( rational_load_factor_resizing const& src ) + : m_nNumerator( src.m_nNumerator ), m_nDenominator( src.m_nDenominator ) + {} + + /// Move ctor + rational_load_factor_resizing( rational_load_factor_resizing&& src ) + : m_nNumerator( src.m_nNumerator ), m_nDenominator( src.m_nDenominator ) + {} + + /// Main policy operator returns \p true when resizing is needed + template + bool operator ()( + size_t nSize, ///< Current item count of \p container + Container const& container, ///< Container + Bucket const& /*bucket*/ ///< reference to a container's bucket (not used) + ) + { + return nSize * m_nDenominator > container.bucket_count() * m_nNumerator; + } + + /// Resets internal state of the policy (does nothing) + void reset() + {} + }; + + /// Single bucket threshold resizing policy + /** @ingroup cds_striped_resizing_policy + If any single bucket size exceeds the global \p Threshold then resizing is needed. + + This policy is stateless. + */ + template + struct single_bucket_size_threshold + { + /// Main policy operator returns \p true when resizing is needed + template + bool operator ()( + size_t /*nSize*/, ///< Current item count of \p container (not used) + Container const& /*container*/, ///< Container (not used) + Bucket const& bucket ///< reference to a container's bucket + ) const + { + return bucket.size() > Threshold; + } + + /// Resets internal state of the policy (does nothing) + void reset() + {} + }; + + + /// Single bucket threshold resizing policy, stateful specialization + /** @ingroup cds_striped_resizing_policy + This specialization allows to specify and modify a threshold at runtime. + */ + template <> + struct single_bucket_size_threshold<0> + { + size_t m_nThreshold ; ///< The bucket size threshold + + /// Default ctor, the threshold is 4 + single_bucket_size_threshold() + : m_nThreshold(4) + {} + + /// Ctor with explicitly defined \p nThreshold + explicit single_bucket_size_threshold( size_t nThreshold ) + : m_nThreshold( nThreshold ) + {} + + /// Copy ctor + single_bucket_size_threshold( single_bucket_size_threshold const& src ) + : m_nThreshold( src.m_nThreshold ) + {} + + /// Move ctor + single_bucket_size_threshold( single_bucket_size_threshold&& src ) + : m_nThreshold( src.m_nThreshold ) + {} + + /// Main policy operator returns \p true when resizing is needed + template + bool operator ()( + size_t /*nSize*/, ///< Current item count of \p container (not used) + Container const& /*container*/, ///< Container (not used) + Bucket const& bucket ///< reference to a container's bucket + ) const + { + return bucket.size() > m_nThreshold; + } + + /// Resets internal state of the policy (does nothing) + void reset() + {} + }; + + /// Dummy resizing policy + /** @ingroup cds_striped_resizing_policy + This policy is dummy and always returns \p false that means no resizing is needed. + + This policy is stateless. + */ + struct no_resizing + { + /// Main policy operator always returns \p false + template + bool operator ()( + size_t /*nSize*/, ///< Current item count of \p container (not used) + Container const& /*container*/, ///< Container (not used) + Bucket const& /*bucket*/ ///< reference to a container's bucket (not used) + ) const + { + return false; + } + + /// Resets internal state of the policy (does nothing) + void reset() + {} + }; + +}}} // namespace cds::intrusive::striped_set + +#endif // #define CDSLIB_INTRUSIVE_STRIPED_SET_RESIZING_POLICY_H diff --git a/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/intrusive/striped_set/striping_policy.h b/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/intrusive/striped_set/striping_policy.h new file mode 100644 index 0000000..4047963 --- /dev/null +++ b/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/intrusive/striped_set/striping_policy.h @@ -0,0 +1,389 @@ +/* + This file is a part of libcds - Concurrent Data Structures library + + (C) Copyright Maxim Khizhinsky (libcds.dev@gmail.com) 2006-2017 + + Source code repo: http://github.com/khizmax/libcds/ + Download: http://sourceforge.net/projects/libcds/files/ + + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. + + * Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ + +#ifndef CDSLIB_INTRUSIVE_STRIPED_SET_STRIPING_POLICY_H +#define CDSLIB_INTRUSIVE_STRIPED_SET_STRIPING_POLICY_H + +#include +#include +#include +#include +#include + +namespace cds { namespace intrusive { namespace striped_set { + + /// Lock striping concurrent access policy + /** + This is one of available opt::mutex_policy option type for StripedSet + + Lock striping is very simple technique. + The set consists of the bucket table and the array of locks. + Initially, the capacity of lock array and bucket table is the same. + When set is resized, bucket table capacity will be doubled but lock array will not. + The lock \p i protects each bucket \p j, where j = i mod L , + where \p L - the size of lock array. + + The policy contains an internal array of \p Lock locks. + + Template arguments: + - \p Lock - the type of mutex. The default is \p std::mutex. The mutex type should be default-constructible. + Note that a spin-lock is not so good suitable for lock striping for performance reason. + - \p Alloc - allocator type used for lock array memory allocation. Default is \p CDS_DEFAULT_ALLOCATOR. + */ + template + class striping + { + public: + typedef Lock lock_type ; ///< lock type + typedef Alloc allocator_type ; ///< allocator type + + typedef cds::sync::lock_array< lock_type, cds::sync::pow2_select_policy, allocator_type > lock_array_type ; ///< lock array type + + protected: + //@cond + lock_array_type m_Locks; + //@endcond + + public: + //@cond + class scoped_cell_lock { + std::unique_lock< lock_array_type > m_guard; + + public: + scoped_cell_lock( striping& policy, size_t nHash ) + : m_guard( policy.m_Locks, nHash ) + {} + }; + + class scoped_full_lock { + std::unique_lock< lock_array_type > m_guard; + public: + scoped_full_lock( striping& policy ) + : m_guard( policy.m_Locks ) + {} + }; + + class scoped_resize_lock: public scoped_full_lock { + public: + scoped_resize_lock( striping& policy ) + : scoped_full_lock( policy ) + {} + + bool success() const + { + return true; + } + }; + //@endcond + + public: + /// Constructor + striping( + size_t nLockCount ///< The size of lock array. Must be power of two. + ) + : m_Locks( nLockCount, cds::sync::pow2_select_policy( nLockCount )) + {} + + /// Returns lock array size + /** + Lock array size is unchanged during \p striped object lifetime + */ + size_t lock_count() const + { + return m_Locks.size(); + } + + //@cond + void resize( size_t /*nNewCapacity*/ ) + {} + //@endcond + }; + + + /// Refinable concurrent access policy + /** + This is one of available opt::mutex_policy option type for StripedSet + + Refining is like a striping technique (see striped_set::striping) + but it allows growing the size of lock array when resizing the hash table. + So, the sizes of hash table and lock array are equal. + + Template arguments: + - \p RecursiveLock - the type of mutex. Reentrant (recursive) mutex is required. + The default is \p std::recursive_mutex. The mutex type should be default-constructible. + - \p BackOff - back-off strategy. Default is cds::backoff::yield + - \p Alloc - allocator type used for lock array memory allocation. Default is \p CDS_DEFAULT_ALLOCATOR. + */ + template < + class RecursiveLock = std::recursive_mutex, + typename BackOff = cds::backoff::yield, + class Alloc = CDS_DEFAULT_ALLOCATOR> + class refinable + { + public: + typedef RecursiveLock lock_type ; ///< lock type + typedef BackOff back_off ; ///< back-off strategy used + typedef Alloc allocator_type; ///< allocator type + + protected: + //@cond + typedef cds::sync::trivial_select_policy lock_selection_policy; + + class lock_array_type + : public cds::sync::lock_array< lock_type, lock_selection_policy, allocator_type > + , public std::enable_shared_from_this< lock_array_type > + { + typedef cds::sync::lock_array< lock_type, lock_selection_policy, allocator_type > lock_array_base; + public: + lock_array_type( size_t nCapacity ) + : lock_array_base( nCapacity ) + {} + }; + typedef std::shared_ptr< lock_array_type > lock_array_ptr; + typedef cds::details::Allocator< lock_array_type, allocator_type > lock_array_allocator; + + typedef unsigned long long owner_t; + typedef cds::OS::ThreadId threadId_t; + + typedef cds::sync::spin spinlock_type; + typedef std::unique_lock< spinlock_type > scoped_spinlock; + //@endcond + + protected: + //@cond + static owner_t const c_nOwnerMask = (((owner_t) 1) << (sizeof(owner_t) * 8 - 1)) - 1; + + lock_array_ptr m_arrLocks ; ///< Lock array. The capacity of array is specified in constructor. + atomics::atomic< owner_t > m_Owner ; ///< owner mark (thread id + boolean flag) + atomics::atomic m_nCapacity ; ///< Lock array capacity + spinlock_type m_access ; ///< access to m_arrLocks + //@endcond + + protected: + //@cond + struct lock_array_disposer { + void operator()( lock_array_type * pArr ) + { + // Seems, there is a false positive in std::shared_ptr deallocation in uninstrumented libc++ + // see, for example, https://groups.google.com/forum/#!topic/thread-sanitizer/eHu4dE_z7Cc + // https://reviews.llvm.org/D21609 + CDS_TSAN_ANNOTATE_IGNORE_WRITES_BEGIN; + lock_array_allocator().Delete( pArr ); + CDS_TSAN_ANNOTATE_IGNORE_WRITES_END; + } + }; + + lock_array_ptr create_lock_array( size_t nCapacity ) + { + m_nCapacity.store( nCapacity, atomics::memory_order_relaxed ); + return lock_array_ptr( lock_array_allocator().New( nCapacity ), lock_array_disposer()); + } + + lock_type& acquire( size_t nHash ) + { + owner_t me = (owner_t) cds::OS::get_current_thread_id(); + owner_t who; + + back_off bkoff; + while ( true ) { + // wait while resizing + while ( true ) { + who = m_Owner.load( atomics::memory_order_acquire ); + if ( !( who & 1 ) || (who >> 1) == (me & c_nOwnerMask)) + break; + bkoff(); + } + + lock_array_ptr pLocks; + { + scoped_spinlock sl(m_access); + pLocks = m_arrLocks; + } + + lock_type& lock = pLocks->at( nHash & (pLocks->size() - 1)); + lock.lock(); + + who = m_Owner.load( atomics::memory_order_acquire ); + if ( ( !(who & 1) || (who >> 1) == (me & c_nOwnerMask)) && m_arrLocks == pLocks ) + return lock; + lock.unlock(); + } + } + + lock_array_ptr acquire_all() + { + owner_t me = (owner_t) cds::OS::get_current_thread_id(); + owner_t who; + + back_off bkoff; + while ( true ) { + // wait while resizing + while ( true ) { + who = m_Owner.load( atomics::memory_order_acquire ); + if ( !( who & 1 ) || (who >> 1) == (me & c_nOwnerMask)) + break; + bkoff(); + } + + lock_array_ptr pLocks; + { + scoped_spinlock sl(m_access); + pLocks = m_arrLocks; + } + + pLocks->lock_all(); + + who = m_Owner.load( atomics::memory_order_acquire ); + if ( ( !(who & 1) || (who >> 1) == (me & c_nOwnerMask)) && m_arrLocks == pLocks ) + return pLocks; + + pLocks->unlock_all(); + } + } + + void release_all( lock_array_ptr p ) + { + p->unlock_all(); + } + + bool acquire_resize() + { + owner_t me = (owner_t) cds::OS::get_current_thread_id(); + + back_off bkoff; + for (unsigned int nAttempts = 0; nAttempts < 32; ++nAttempts ) { + owner_t ownNull = 0; + if ( m_Owner.compare_exchange_strong( ownNull, (me << 1) | 1, atomics::memory_order_acquire, atomics::memory_order_relaxed )) { + lock_array_ptr pOldLocks = m_arrLocks; + size_t const nLockCount = pOldLocks->size(); + for ( size_t i = 0; i < nLockCount; ++i ) { + typename lock_array_type::lock_type& lock = pOldLocks->at(i); + bkoff.reset(); + while ( !lock.try_lock()) + bkoff(); + lock.unlock(); + } + return true; + } + else + bkoff(); + } + return false; + } + + void release_resize() + { + m_Owner.store( 0, atomics::memory_order_release ); + } + //@endcond + public: + //@cond + class scoped_cell_lock { + std::unique_lock< lock_type > m_guard; + + public: + scoped_cell_lock( refinable& policy, size_t nHash ) + : m_guard( policy.acquire( nHash ), std::adopt_lock_t()) + {} + }; + + class scoped_full_lock { + refinable& m_Policy; + lock_array_ptr m_Locks; + public: + scoped_full_lock( refinable& policy ) + : m_Policy( policy ) + { + m_Locks = policy.acquire_all(); + } + ~scoped_full_lock() + { + m_Policy.release_all( m_Locks ); + } + }; + + class scoped_resize_lock { + refinable& m_Policy; + bool m_bSucceess; + + public: + scoped_resize_lock( refinable& policy ) + : m_Policy( policy ) + { + m_bSucceess = policy.acquire_resize(); + } + + ~scoped_resize_lock() + { + if ( m_bSucceess ) + m_Policy.release_resize(); + } + + bool success() const + { + return m_bSucceess; + } + }; + //@endcond + + public: + /// Constructor + refinable( + size_t nLockCount ///< Initial size of lock array. Must be power of two. + ) + : m_Owner(0) + , m_nCapacity( nLockCount ) + { + assert( cds::beans::is_power2( nLockCount )); + m_arrLocks = create_lock_array( nLockCount ); + } + + /// Returns lock array size + /** + Lock array size is not a constant for \p refinable policy and can be changed when the set is resized. + */ + size_t lock_count() const + { + return m_nCapacity.load( atomics::memory_order_relaxed ); + } + + /// Resize for new capacity + void resize( size_t nNewCapacity ) + { + // Expect the access is locked by scoped_resize_lock!!! + lock_array_ptr pNewArr = create_lock_array( nNewCapacity ); + scoped_spinlock sl(m_access); + m_arrLocks.swap( pNewArr ); + } + }; + +}}} // namespace cds::intrusive::striped_set + +#endif diff --git a/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/intrusive/treiber_stack.h b/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/intrusive/treiber_stack.h new file mode 100644 index 0000000..d1ad96a --- /dev/null +++ b/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/intrusive/treiber_stack.h @@ -0,0 +1,862 @@ +/* + This file is a part of libcds - Concurrent Data Structures library + + (C) Copyright Maxim Khizhinsky (libcds.dev@gmail.com) 2006-2017 + + Source code repo: http://github.com/khizmax/libcds/ + Download: http://sourceforge.net/projects/libcds/files/ + + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. + + * Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ + +#ifndef CDSLIB_INTRUSIVE_TREIBER_STACK_H +#define CDSLIB_INTRUSIVE_TREIBER_STACK_H + +#include +#include // unique_lock +#include +#include +#include +#include +#include + +namespace cds { namespace intrusive { + + /// TreiberStack related definitions + /** @ingroup cds_intrusive_helper + */ + namespace treiber_stack { + + /// Stack node + /** + Template parameters: + - GC - garbage collector used + - Tag - a \ref cds_intrusive_hook_tag "tag" + */ + template + using node = cds::intrusive::single_link::node< GC, Tag >; + + /// Base hook + /** + \p Options are: + - opt::gc - garbage collector used. + - opt::tag - a \ref cds_intrusive_hook_tag "tag" + */ + template < typename... Options > + using base_hook = cds::intrusive::single_link::base_hook< Options...>; + + /// Member hook + /** + \p MemberOffset specifies offset in bytes of \ref node member into your structure. + Use \p offsetof macro to define \p MemberOffset + + \p Options are: + - opt::gc - garbage collector used. + - opt::tag - a \ref cds_intrusive_hook_tag "tag" + */ + template < size_t MemberOffset, typename... Options > + using member_hook = cds::intrusive::single_link::member_hook< MemberOffset, Options... >; + + /// Traits hook + /** + \p NodeTraits defines type traits for node. + See \ref node_traits for \p NodeTraits interface description + + \p Options are: + - opt::gc - garbage collector used. + - opt::tag - a \ref cds_intrusive_hook_tag "tag" + */ + template + using traits_hook = cds::intrusive::single_link::traits_hook< NodeTraits, Options... >; + + //@cond + /// Operation id for the \ref cds_elimination_description "elimination back-off" + enum operation_id { + op_push, ///< push op id + op_pop ///< pop op id + }; + + /// Operation descriptor for the \ref cds_elimination_description "elimination back-off" + template + struct operation: public cds::algo::elimination::operation_desc + { + operation_id idOp; ///< Op id + T * pVal; ///< for push: pointer to argument; for pop: accepts a return value + atomics::atomic nStatus; ///< Internal elimination status + + operation() + : pVal( nullptr ) + , nStatus( 0 /*op_free*/ ) + {} + }; + //@endcond + + /// Stack internal statistics. May be useful for debugging or profiling + /** + Template argument \p Counter defines type of counter. + Default is cds::atomicity::event_counter. + You may use stronger type of counter like as cds::atomicity::item_counter, + or even an integral type, for example, \p int + */ + template + struct stat + { + typedef Counter counter_type ; ///< Counter type + + counter_type m_PushCount ; ///< Push call count + counter_type m_PopCount ; ///< Pop call count + counter_type m_PushRace ; ///< Count of push race conditions encountered + counter_type m_PopRace ; ///< Count of pop race conditions encountered + counter_type m_ActivePushCollision ; ///< Count of active push collision for elimination back-off + counter_type m_ActivePopCollision ; ///< Count of active pop collision for elimination back-off + counter_type m_PassivePushCollision ; ///< Count of passive push collision for elimination back-off + counter_type m_PassivePopCollision ; ///< Count of passive pop collision for elimination back-off + counter_type m_EliminationFailed ; ///< Count of unsuccessful elimination back-off + + //@cond + void onPush() { ++m_PushCount; } + void onPop() { ++m_PopCount; } + void onPushRace() { ++m_PushRace; } + void onPopRace() { ++m_PopRace; } + void onActiveCollision( operation_id opId ) + { + if ( opId == treiber_stack::op_push ) + ++m_ActivePushCollision; + else + ++m_ActivePopCollision; + } + void onPassiveCollision( operation_id opId ) + { + if ( opId == treiber_stack::op_push ) + ++m_PassivePushCollision; + else + ++m_PassivePopCollision; + } + void onEliminationFailed() + { + ++m_EliminationFailed; + } + //@endcond + }; + + /// Empty (no overhead) stack statistics. Support interface like treiber_stack::stat + struct empty_stat + { + //@cond + void onPush() {} + void onPop() {} + void onPushRace() {} + void onPopRace() {} + void onActiveCollision( operation_id ) {} + void onPassiveCollision( operation_id ) {} + void onEliminationFailed() {} + //@endcond + }; + + /// TreiberStack default type traits + struct traits + { + /// Back-off strategy + typedef cds::backoff::Default back_off; + + /// Hook, possible types are \p treiber_stack::base_hook, \p treiber_stack::member_hook, \p treiber_stack::traits_hook + typedef treiber_stack::base_hook<> hook; + + /// The functor used for dispose removed items. Default is \p opt::v::empty_disposer. This option is used only in \p TreiberStack::clear() function + typedef opt::v::empty_disposer disposer; + + /// Item counting feature; by default, disabled. Use \p cds::atomicity::item_counter to enable item counting + typedef cds::atomicity::empty_item_counter item_counter; + + /// C++ memory ordering model + /** + Can be \p opt::v::relaxed_ordering (relaxed memory model, the default) + or \p opt::v::sequential_consistent (sequentially consisnent memory model). + */ + typedef opt::v::relaxed_ordering memory_model; + + /// Internal statistics (by default, disabled) + /** + Possible option value are: \p treiber_stack::stat, \p treiber_stack::empty_stat (the default), + user-provided class that supports \p %treiber_stack::stat interface. + */ + typedef treiber_stack::empty_stat stat; + + /// Link checking, see \p cds::opt::link_checker + static constexpr const opt::link_check_type link_checker = opt::debug_check_link; + + /** @name Elimination back-off traits + The following traits is used only if elimination enabled + */ + ///@{ + + /// Enable elimination back-off; by default, it is disabled + static constexpr const bool enable_elimination = false; + + /// Back-off strategy to wait for elimination, default is \p cds::backoff::delay<> + typedef cds::backoff::delay<> elimination_backoff; + + /// Buffer type for elimination array + /** + Possible types are \p opt::v::initialized_static_buffer, \p opt::v::initialized_dynamic_buffer. + The buffer can be any size: \p Exp2 template parameter of those classes can be \p false. + The size should be selected empirically for your application and hardware, there are no common rules for that. + Default is %opt::v::initialized_static_buffer< any_type, 4 > . + */ + typedef opt::v::initialized_static_buffer< int, 4 > buffer; + + /// Random engine to generate a random position in elimination array + typedef opt::v::c_rand random_engine; + + /// Lock type used in elimination, default is cds::sync::spin + typedef cds::sync::spin lock_type; + + ///@} + }; + + /// Metafunction converting option list to \p treiber_stack::traits + /** + Supported \p Options are: + - \p opt::hook - hook used. Possible hooks are: \p treiber_stack::base_hook, \p treiber_stack::member_hook, \p treiber_stack::traits_hook. + If the option is not specified, \p %treiber_stack::base_hook<> is used. + - \p opt::back_off - back-off strategy used. If the option is not specified, the \p cds::backoff::Default is used. + - \p opt::disposer - the functor used for dispose removed items. Default is \p opt::v::empty_disposer. This option is used only + in \p TreiberStack::clear function. + - \p opt::link_checker - the type of node's link fields checking. Default is \ref opt::debug_check_link. + - \p opt::memory_model - C++ memory ordering model. Can be \p opt::v::relaxed_ordering (relaxed memory model, the default) + or \p opt::v::sequential_consistent (sequentially consisnent memory model). + - \p opt::item_counter - the type of item counting feature. Default is \p cds::atomicity::empty_item_counter, i.e. + no item counting. Use \p cds::atomicity::item_counter to enable item counting. + - \p opt::stat - the type to gather internal statistics. + Possible option value are: \p treiber_stack::stat, \p treiber_stack::empty_stat (the default), + user-provided class that supports \p treiber_stack::stat interface. + - \p opt::enable_elimination - enable elimination back-off for the stack. Default value is \p false. + + If elimination back-off is enabled, additional options can be specified: + - \p opt::buffer - a buffer type for elimination array, see \p opt::v::initialized_static_buffer, \p opt::v::initialized_dynamic_buffer. + The buffer can be any size: \p Exp2 template parameter of those classes can be \p false. + The size should be selected empirically for your application and hardware, there are no common rules for that. + Default is %opt::v::initialized_static_buffer< any_type, 4 > . + - \p opt::random_engine - a random engine to generate a random position in elimination array. + Default is \p opt::v::c_rand. + - \p opt::elimination_backoff - back-off strategy to wait for elimination, default is \p cds::backoff::delay<> + - \p opt::lock_type - a lock type used in elimination back-off, default is \p cds::sync::spin + + Example: declare \p %TreiberStack with elimination enabled and internal statistics + \code + typedef cds::intrusive::TreiberStack< cds::gc::HP, Foo, + typename cds::intrusive::treiber_stack::make_traits< + cds::opt::enable_elimination< true >, + cds::opt::stat< cds::intrusive::treiber_stack::stat<> > + >::type + > myStack; + \endcode + */ + template + struct make_traits { +# ifdef CDS_DOXYGEN_INVOKED + typedef implementation_defined type; ///< Metafunction result +# else + typedef typename cds::opt::make_options< + typename cds::opt::find_type_traits< traits, Options... >::type + , Options... + >::type type; +# endif + }; + + + //@cond + namespace details { + + template + class elimination_backoff; + + template + class elimination_backoff + { + typedef typename Traits::back_off back_off; + + struct wrapper + { + back_off m_bkoff; + + void reset() + { + m_bkoff.reset(); + } + + template + bool backoff( treiber_stack::operation< T >&, Stat& ) + { + m_bkoff(); + return false; + } + }; + + public: + elimination_backoff() + {} + + elimination_backoff( size_t ) + {} + + typedef wrapper type; + type init() + { + return wrapper(); + } + }; + + template + class elimination_backoff + { + typedef typename Traits::back_off back_off; + + /// Back-off for elimination (usually delay) + typedef typename Traits::elimination_backoff elimination_backoff_type; + /// Lock type used in elimination back-off + typedef typename Traits::lock_type elimination_lock_type; + /// Random engine used in elimination back-off + typedef typename Traits::random_engine elimination_random_engine; + + /// Per-thread elimination record + typedef cds::algo::elimination::record elimination_rec; + + /// Collision array record + struct collision_array_record { + elimination_rec * pRec; + elimination_lock_type lock; + }; + + /// Collision array used in elimination-backoff; each item is optimized for cache-line size + typedef typename Traits::buffer::template rebind< + typename cds::details::type_padding::type + >::other collision_array; + + /// Operation descriptor used in elimination back-off + typedef treiber_stack::operation< T > operation_desc; + + /// Elimination back-off data + struct elimination_data { + mutable elimination_random_engine randEngine; ///< random engine + collision_array collisions; ///< collision array + + elimination_data() + { + //TODO: check Traits::buffer must be static! + } + elimination_data( size_t nCollisionCapacity ) + : collisions( nCollisionCapacity ) + {} + }; + + elimination_data m_Elimination; + + enum operation_status { + op_free = 0, + op_waiting = 1, + op_collided = 2 + }; + + typedef std::unique_lock< elimination_lock_type > slot_scoped_lock; + + template + typename std::enable_if< Exp2, size_t >::type slot_index() const + { + return m_Elimination.randEngine() & (m_Elimination.collisions.capacity() - 1); + } + + template + typename std::enable_if< !Exp2, size_t >::type slot_index() const + { + return m_Elimination.randEngine() % m_Elimination.collisions.capacity(); + } + + public: + elimination_backoff() + { + m_Elimination.collisions.zeroize(); + } + + elimination_backoff( size_t nCollisionCapacity ) + : m_Elimination( nCollisionCapacity ) + { + m_Elimination.collisions.zeroize(); + } + + typedef elimination_backoff& type; + + type init() + { + return *this; + } + + void reset() + {} + + template + bool backoff( operation_desc& op, Stat& stat ) + { + elimination_backoff_type bkoff; + op.nStatus.store( op_waiting, atomics::memory_order_relaxed ); + + elimination_rec * myRec = cds::algo::elimination::init_record( op ); + + collision_array_record& slot = m_Elimination.collisions[ slot_index() ]; + { + slot.lock.lock(); + elimination_rec * himRec = slot.pRec; + if ( himRec ) { + operation_desc * himOp = static_cast( himRec->pOp ); + assert( himOp ); + if ( himOp->idOp != op.idOp ) { + + if ( op.idOp == treiber_stack::op_push ) + himOp->pVal = op.pVal; + else + op.pVal = himOp->pVal; + + slot.pRec = nullptr; + himOp->nStatus.store( op_collided, atomics::memory_order_release ); + slot.lock.unlock(); + + cds::algo::elimination::clear_record(); + stat.onActiveCollision( op.idOp ); + return true; + } + //himOp->nStatus.store( op_free, atomics::memory_order_release ); + } + slot.pRec = myRec; + slot.lock.unlock(); + } + + // Wait for colliding operation + bkoff( [&op]() noexcept -> bool { return op.nStatus.load( atomics::memory_order_acquire ) != op_waiting; } ); + + { + slot_scoped_lock l( slot.lock ); + if ( slot.pRec == myRec ) + slot.pRec = nullptr; + } + + bool bCollided = op.nStatus.load( atomics::memory_order_relaxed ) == op_collided; + + if ( !bCollided ) + stat.onEliminationFailed(); + else + stat.onPassiveCollision( op.idOp ); + + cds::algo::elimination::clear_record(); + return bCollided; + } + }; + + } // namespace details + //@endcond + } // namespace treiber_stack + + /// Treiber intrusive stack + /** @ingroup cds_intrusive_stack + Intrusive implementation of well-known Treiber's stack algorithm: + - R. K. Treiber. Systems programming: Coping with parallelism. Technical Report RJ 5118, IBM Almaden Research Center, April 1986. + + \ref cds_elimination_description "Elimination back-off technique" can be used optionally. + The idea of elimination algorithm is taken from: + - [2004] Danny Hendler, Nir Shavit, Lena Yerushalmi "A Scalable Lock-free Stack Algorithm" + + The elimination algorithm uses a single elimination array as a back-off schema + on a shared lock-free stack. If the threads fail on the stack, they attempt to eliminate + on the array, and if they fail in eliminating, they attempt to access the stack again and so on. + + @note Hendler's et al paper describes a lock-free implementation of elimination back-off which is quite complex. + The main difficulty is the managing life-time of elimination record. + This implementation uses simplified lock-based (spin-based) approach which allows + the elimination record allocation on thread's stack. + This approach demonstrates sufficient performance under high load. + + Template arguments: + - \p GC - garbage collector type: \p gc::HP, \p gc::DHP. + Garbage collecting schema must be the same as \p treiber_stack::node GC. + - \p T - a type the stack contains. A value of type \p T must be derived + from \p treiber_stack::node for \p treiber_stack::base_hook, + or it should have a member of type \p %treiber_stack::node for \p treiber_stack::member_hook, + or it should be convertible to \p %treiber_stack::node for \p treiber_stack::traits_hook. + - \p Traits - stack traits, default is \p treiber_stack::traits. You can use \p treiber_stack::make_traits + metafunction to make your traits or just derive your traits from \p %treiber_stack::traits: + \code + struct myTraits: public cds::intrusive::treiber_stack::traits { + typedef cds::intrusive::treiber_stack::stat<> stat; + }; + typedef cds::intrusive::TreiberStack< cds::gc::HP, Foo, myTraits > myStack; + + // Equivalent make_traits example: + typedef cds::intrusive::TreiberStack< cds::gc::HP, Foo, + typename cds::intrusive::treiber_stack::make_traits< + cds::opt::stat< cds::intrusive::treiber_stack::stat<> > + >::type + > myStack; + \endcode + + @note Be careful when you want destroy an item popped, see \ref cds_intrusive_item_destroying "Destroying items of intrusive containers". + + @anchor cds_intrusive_TreiberStack_examples + \par Examples + + Example of how to use \p treiber_stack::base_hook. + Your class that objects will be pushed on \p %TreiberStack should be based on \p treiber_stack::node class + \code + #include + #include + + namespace ci = cds::intrusive; + typedef cds::gc::HP gc; + + struct myData: public ci::treiber_stack::node< gc > + { + // ... + }; + + // Stack type + typedef ci::TreiberStack< gc, + myData, + typename cds::intrusive::treiber_stack::make_traits< + ci::opt::hook< ci::treiber_stack::base_hook< gc > > + >::type + > stack_t; + + // Stack with elimination back-off enabled + typedef ci::TreiberStack< gc, + myData, + typename ci::treiber_stack::make_traits< + ci::opt::hook< ci::treiber_stack::base_hook< gc > >, + cds::opt::enable_elimination< true > + >::type + > elimination_stack_t; + \endcode + + Example of how to use \p treiber_stack::base_hook with different tags. + \code + #include + #include + + namespace ci = cds::intrusive; + typedef cds::gc::HP gc; + + // It is not necessary to declare complete type for tags + struct tag1; + struct tag2; + + struct myData + : public ci::treiber_stack::node< gc, tag1 > + , public ci::treiber_stack::node< gc, tag2 > + { + // ... + }; + + typedef ci::TreiberStack< gc, + myData, + typename ci::treiber_stack::make_traits< + ci::opt::hook< ci::treiber_stack::base_hook< gc, tag1 > > + >::type + > stack1_t; + + typedef ci::TreiberStack< gc, + myData, + typename ci::treiber_stack::make_traits< + ci::opt::hook< ci::treiber_stack::base_hook< gc, tag2 > > + >::type + > stack2_t; + + // You may add myData objects into stack1_t and stack2_t independently + void foo() { + stack1_t s1; + stack2_t s2; + + myData i1, i2; + s1.push( i1 ); + s2.push( i2 ); + s2.push( i1 ) ; // i1 is now contained in s1 and s2. + + myData * p; + + p = s1.pop() ; // pop i1 from s1 + p = s1.pop() ; // p == nullptr, s1 is empty + p = s2.pop() ; // pop i1 from s2 + p = s2.pop() ; // pop i2 from s2 + p = s2.pop() ; // p == nullptr, s2 is empty + } + \endcode + + Example of how to use \p treiber_stack::member_hook. + Your class should have a member of type \p treiber_stack::node + \code + #include // offsetof macro + #include + #include + + namespace ci = cds::intrusive; + typedef cds::gc::HP gc; + + struct myData + { + // ... + ci::treiber_stack::node< gc > member_hook_; + // ... + }; + + typedef ci::TreiberStack< gc, + myData, + typename ci::treiber_stack::make_traits< + ci::opt::hook< ci::treiber_stack::member_hook< offsetof(myData, member_hook_), gc >> + >::type + > stack_t; + \endcode + */ + template < + typename GC, + typename T, + typename Traits = treiber_stack::traits + > + class TreiberStack + { + public: + /// Rebind template arguments + template + struct rebind { + typedef TreiberStack< GC2, T2, Traits2 > other ; ///< Rebinding result + }; + + public: + typedef GC gc; ///< Garbage collector + typedef T value_type; ///< type of value stored in the stack + typedef Traits traits; ///< Stack traits + + typedef typename traits::hook hook; ///< hook type + typedef typename hook::node_type node_type; ///< node type + typedef typename traits::disposer disposer; ///< disposer used + typedef typename get_node_traits< value_type, node_type, hook>::type node_traits ; ///< node traits + typedef typename single_link::get_link_checker< node_type, traits::link_checker >::type link_checker ; ///< link checker + typedef typename traits::memory_model memory_model; ///< Memory ordering. See \p cds::opt::memory_model option + typedef typename traits::item_counter item_counter; ///< Item counter class + typedef typename traits::stat stat; ///< Internal statistics + typedef typename traits::back_off back_off; ///< back-off strategy + + /// How many Hazard pointers is required for Treiber's stack implementation + static constexpr size_t const c_nHazardPtrCount = 1; + + public: // related to elimination back-off + + /// Elimination back-off is enabled or not + static constexpr const bool enable_elimination = traits::enable_elimination; + /// back-off strategy used to wait for elimination + typedef typename traits::elimination_backoff elimination_backoff_type; + /// Lock type used in elimination back-off + typedef typename traits::lock_type elimination_lock_type; + /// Random engine used in elimination back-off + typedef typename traits::random_engine elimination_random_engine; + + protected: + typename node_type::atomic_node_ptr m_Top; ///< Top of the stack + item_counter m_ItemCounter; ///< Item counter + stat m_stat; ///< Internal statistics + + //@cond + typedef treiber_stack::details::elimination_backoff elimination_backoff; + elimination_backoff m_Backoff; + + typedef treiber_stack::operation< value_type > operation_desc; + + // GC and node_type::gc must be the same + static_assert( std::is_same::value, "GC and node_type::gc must be the same"); + + static_assert( !enable_elimination || std::is_same::value, + "Random engine result type must be unsigned int"); + //@endcond + + protected: + //@cond + void clear_links( node_type * pNode ) noexcept + { + pNode->m_pNext.store( nullptr, memory_model::memory_order_relaxed ); + } + + template + struct elimination_backoff_impl; + //@endcond + + public: + /// Constructs empty stack + TreiberStack() + : m_Top( nullptr ) + {} + + /// Constructs empty stack and initializes elimination back-off data + /** + This form should be used if you use elimination back-off with dynamically allocated collision array, i.e + \p Traits contains typedef cds::opt::v::initialized_dynamic_buffer buffer. + \p nCollisionCapacity parameter specifies the capacity of collision array. + */ + TreiberStack( size_t nCollisionCapacity ) + : m_Top( nullptr ) + , m_Backoff( nCollisionCapacity ) + {} + + /// \p %TreiberStack is not copy-constructible + TreiberStack( TreiberStack const& ) = delete; + + /// Destructor calls \ref cds_intrusive_TreiberStack_clear "clear" member function + ~TreiberStack() + { + clear(); + } + + /// Push the item \p val on the stack + /** + No copying is made since it is intrusive stack. + */ + bool push( value_type& val ) + { + node_type * pNew = node_traits::to_node_ptr( val ); + link_checker::is_empty( pNew ); + + typename elimination_backoff::type bkoff = m_Backoff.init(); + + operation_desc op; + if ( enable_elimination ) { + op.idOp = treiber_stack::op_push; + op.pVal = &val; + } + + node_type * t = m_Top.load( memory_model::memory_order_relaxed ); + while ( true ) { + pNew->m_pNext.store( t, memory_model::memory_order_relaxed ); + if ( m_Top.compare_exchange_weak( t, pNew, memory_model::memory_order_release, atomics::memory_order_acquire )) { + ++m_ItemCounter; + m_stat.onPush(); + return true; + } + m_stat.onPushRace(); + + if ( bkoff.backoff( op, m_stat )) + return true; + } + } + + /// Pop an item from the stack + /** + If stack is empty, returns \p nullptr. + The disposer is not called for popped item. + See \ref cds_intrusive_item_destroying "Destroying items of intrusive containers". + */ + value_type * pop() + { + typename elimination_backoff::type bkoff = m_Backoff.init(); + typename gc::Guard guard; + + operation_desc op; + if ( enable_elimination ) { + op.idOp = treiber_stack::op_pop; + } + + while ( true ) { + node_type * t = guard.protect( m_Top, + []( node_type * p ) -> value_type * { + return node_traits::to_value_ptr( p ); + }); + if ( t == nullptr ) + return nullptr; // stack is empty + + node_type * pNext = t->m_pNext.load(memory_model::memory_order_relaxed); + if ( m_Top.compare_exchange_weak( t, pNext, memory_model::memory_order_acquire, atomics::memory_order_relaxed )) { + clear_links( t ); + --m_ItemCounter; + m_stat.onPop(); + return node_traits::to_value_ptr( *t ); + } + + m_stat.onPopRace(); + if ( bkoff.backoff( op, m_stat )) { + // may return nullptr if stack is empty + return op.pVal; + } + } + } + + /// Check if stack is empty + bool empty() const + { + return m_Top.load( memory_model::memory_order_relaxed ) == nullptr; + } + + /// Clear the stack + /** @anchor cds_intrusive_TreiberStack_clear + For each removed item the disposer is called. + + @note It is possible that after clear() the empty() returns \p false + if some other thread pushes an item into the stack during \p clear works + */ + void clear() + { + back_off bkoff; + node_type * pTop; + while ( true ) { + pTop = m_Top.load( memory_model::memory_order_relaxed ); + if ( pTop == nullptr ) + return; + if ( m_Top.compare_exchange_weak( pTop, nullptr, memory_model::memory_order_acquire, atomics::memory_order_relaxed )) { + m_ItemCounter.reset(); + break; + } + bkoff(); + } + + while( pTop ) { + node_type * p = pTop; + pTop = p->m_pNext.load(memory_model::memory_order_relaxed); + clear_links( p ); + gc::template retire( node_traits::to_value_ptr( *p )); + } + } + + /// Returns stack's item count + /** + The value returned depends on opt::item_counter option. For atomicity::empty_item_counter, + this function always returns 0. + + @warning Even if you use real item counter and it returns 0, this fact is not mean that the stack + is empty. To check emptyness use \ref empty() method. + */ + size_t size() const + { + return m_ItemCounter.value(); + } + + /// Returns reference to internal statistics + stat const& statistics() const + { + return m_stat; + } + }; + +}} // namespace cds::intrusive + +#endif // #ifndef CDSLIB_INTRUSIVE_TREIBER_STACK_H diff --git a/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/intrusive/vyukov_mpmc_cycle_queue.h b/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/intrusive/vyukov_mpmc_cycle_queue.h new file mode 100644 index 0000000..8c98e36 --- /dev/null +++ b/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/intrusive/vyukov_mpmc_cycle_queue.h @@ -0,0 +1,258 @@ +/* + This file is a part of libcds - Concurrent Data Structures library + + (C) Copyright Maxim Khizhinsky (libcds.dev@gmail.com) 2006-2017 + + Source code repo: http://github.com/khizmax/libcds/ + Download: http://sourceforge.net/projects/libcds/files/ + + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. + + * Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ + +#ifndef CDSLIB_INTRUSIVE_VYUKOV_MPMC_CYCLE_QUEUE_H +#define CDSLIB_INTRUSIVE_VYUKOV_MPMC_CYCLE_QUEUE_H + +#include +#include + +namespace cds { namespace intrusive { + + /// VyukovMPMCCycleQueue related definitions + /** @ingroup cds_intrusive_helper + */ + namespace vyukov_queue { + + /// VyukovMPMCCycleQueue traits + struct traits : public cds::container::vyukov_queue::traits + { + /// The functor used for dispose removed items. Default is \p opt::v::empty_disposer. This option is used only in \p clear() + typedef opt::v::empty_disposer disposer; + }; + + /// Metafunction converting option list to \p vyukov_queue::traits + /** + Supported \p Options are: + - \p opt::buffer - an uninitialized buffer type for internal cyclic array. Possible types are: + \p opt::v::uninitialized_dynamic_buffer (the default), \p opt::v::uninitialized_static_buffer. The type of + element in the buffer is not important: it will be changed via \p rebind metafunction. + - \p opt::disposer - the functor used for dispose removed items. Default is \p opt::v::empty_disposer. + This option is used only in \p clear() member function. + - \p opt::item_counter - the type of item counting feature. Default is \p cds::atomicity::empty_item_counter (item counting disabled) + To enable item counting use \p cds::atomicity::item_counter + - \p opt::back_off - back-off strategy used. If the option is not specified, the \p cds::backoff::Default is used. + - \p opt::padding - padding for internal critical atomic data. Default is \p opt::cache_line_padding + - \p opt::memory_model - C++ memory ordering model. Can be \p opt::v::relaxed_ordering (relaxed memory model, the default) + or \p opt::v::sequential_consistent (sequentially consistent memory model). + + Example: declare \p %VyukovMPMCCycleQueue with item counting and static internal buffer of size 1024: + \code + typedef cds::intrusive::VyukovMPMCCycleQueue< Foo, + typename cds::intrusive::vyukov_queue::make_traits< + cds::opt::buffer< cds::opt::v::uninitialized_static_buffer< void *, 1024 >, + cds::opt::item_counter< cds::atomicity::item_counter > + >::type + > myQueue; + \endcode + */ + template + struct make_traits { +# ifdef CDS_DOXYGEN_INVOKED + typedef implementation_defined type; ///< Metafunction result +# else + typedef typename cds::opt::make_options< + typename cds::opt::find_type_traits< traits, Options... >::type + , Options... + >::type type; +# endif + }; + + } // namespace vyukov_queue + + /// Vyukov's MPMC bounded queue + /** @ingroup cds_intrusive_queue + This algorithm is developed by Dmitry Vyukov (see http://www.1024cores.net) + + Implementation of intrusive version is based on container::VyukovMPMCCycleQueue. + + Template parameters: + - \p T - type stored in queue. + - \p Traits - queue traits, default is \p vyukov_queue::traits. You can use \p vyukov_queue::make_traits + metafunction to make your traits or just derive your traits from \p %vyukov_queue::traits: + \code + struct myTraits: public cds::intrusive::vyukov_queue::traits { + typedef cds::atomicity::item_counter item_counter; + }; + typedef cds::intrusive::VyukovMPMCCycleQueue< Foo, myTraits > myQueue; + + // Equivalent make_traits example: + typedef cds::intrusive::VyukovMPMCCycleQueue< cds::gc::HP, Foo, + typename cds::intrusive::vyukov_queue::make_traits< + cds::opt::item_counter< cds::atomicity::item_counter > + >::type + > myQueue; + \endcode + + Instead of saving copy of enqueued data, the intrusive implementation stores pointer to passed data. + + \par Examples: + \code + #include + + struct Foo { + ... + }; + + // Queue of Foo pointers, capacity is 1024, statically allocated buffer: + typedef cds::intrusive::VyukovMPMCCycleQueue< Foo, + typename cds::intrusive::vyukov_queue::make_traits< + cds::opt::buffer< cds::opt::v::uninitialized_static_buffer< Foo, 1024 > > + >::type + > static_queue; + static_queue stQueue; + + // Queue of Foo pointers, capacity is 1024, dynamically allocated buffer: + struct queue_traits: public cds::intrusive::vyukov_queue::traits + { + typedef cds::opt::v::uninitialized_dynamic_buffer< Foo > buffer; + }; + typedef cds::intrusive::VyukovMPMCCycleQueue< Foo, queue_traits > dynamic_queue; + dynamic_queue dynQueue( 1024 ); + \endcode + */ + template + class VyukovMPMCCycleQueue + : private container::VyukovMPMCCycleQueue< T*, Traits > + { + //@cond + typedef container::VyukovMPMCCycleQueue< T*, Traits > base_class; + //@endcond + public: + typedef T value_type; ///< type of data to be stored in the queue + typedef Traits traits; ///< Queue traits + typedef typename traits::item_counter item_counter; ///< Item counter type + typedef typename traits::memory_model memory_model; ///< Memory ordering. See cds::opt::memory_model option + typedef typename traits::disposer disposer; ///< Item disposer + typedef typename traits::back_off back_off; ///< back-off strategy + + public: + /// Rebind template arguments + template + struct rebind { + typedef VyukovMPMCCycleQueue< T2, Traits2> other ; ///< Rebinding result + }; + + public: + /// Constructs the queue of capacity \p nCapacity + /** + For \p cds::opt::v::uninitialized_static_buffer the \p nCapacity parameter is ignored. + */ + VyukovMPMCCycleQueue( size_t nCapacity = 0 ) + : base_class( nCapacity ) + {} + + /// Enqueues \p data to queue + /** + @note The intrusive queue stores pointer to \p data passed, not the copy of \p data. + */ + bool enqueue( value_type& data ) + { + return base_class::enqueue( &data ); + } + + /// Dequeues an item from queue + /** + \p Traits::disposer is not called. You may manually delete the returned pointer. + + If queue is empty, returns \p nullptr. + */ + value_type * dequeue() + { + value_type * p = nullptr; + return base_class::dequeue( p ) ? p : nullptr; + } + + /// Synonym for \p enqueue() + bool push( value_type& data ) + { + return enqueue( data ); + } + + /// Synonym for \p dequeue() + value_type * pop() + { + return dequeue(); + } + + /// Clears queue in lock-free manner. + /** + \p f parameter is a functor to dispose removed items. + The interface of \p Disposer is: + \code + struct myDisposer { + void operator ()( T * val ); + }; + \endcode + The disposer will be called immediately for each item. + */ + template + void clear( Disposer f ) + { + value_type * pv; + while ( (pv = pop()) != nullptr ) { + f( pv ); + } + } + + /// Clears the queue + /** + This function uses the disposer that is specified in \p Traits. + */ + void clear() + { + clear( disposer()); + } + + /// Checks if the queue is empty + bool empty() const + { + return base_class::empty(); + } + + /// Returns queue's item count + /** + The value returned depends on \p vyukov_queue::traits::item_counter option. + For \p atomicity::empty_item_counter, this function always returns 0. + */ + size_t size() const + { + return base_class::size(); + } + + /// Returns capacity of the queue + size_t capacity() const + { + return base_class::capacity(); + } + }; +}} // namespace cds::intrusive + +#endif // #ifndef CDSLIB_INTRUSIVE_VYUKOV_MPMC_CYCLE_QUEUE_H diff --git a/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/lock/array.h b/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/lock/array.h new file mode 100644 index 0000000..6d3a896 --- /dev/null +++ b/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/lock/array.h @@ -0,0 +1,58 @@ +/* + This file is a part of libcds - Concurrent Data Structures library + + (C) Copyright Maxim Khizhinsky (libcds.dev@gmail.com) 2006-2017 + + Source code repo: http://github.com/khizmax/libcds/ + Download: http://sourceforge.net/projects/libcds/files/ + + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. + + * Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ + +#ifndef CDSLIB_LOCK_ARRAY_H +#define CDSLIB_LOCK_ARRAY_H + +#if CDS_COMPILER == CDS_COMPILER_MSVC +# pragma message("cds/lock/array.h is deprecated, use cds/sync/lock_array.h instead") +#else +# warning "cds/lock/array.h is deprecated, use cds/sync/lock_array.h instead" +#endif + +#include + +//@cond +namespace cds { namespace lock { + + using cds::sync::trivial_select_policy; + using cds::sync::mod_select_policy; + using cds::sync::pow2_select_policy; + + template + using array = cds::sync::lock_array< Lock, SelectPolicy, Alloc >; + +}} // namespace cds::lock +//@endcond + +#endif // #ifndef CDSLIB_LOCK_ARRAY_H diff --git a/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/lock/spinlock.h b/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/lock/spinlock.h new file mode 100644 index 0000000..2289785 --- /dev/null +++ b/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/lock/spinlock.h @@ -0,0 +1,84 @@ +/* + This file is a part of libcds - Concurrent Data Structures library + + (C) Copyright Maxim Khizhinsky (libcds.dev@gmail.com) 2006-2017 + + Source code repo: http://github.com/khizmax/libcds/ + Download: http://sourceforge.net/projects/libcds/files/ + + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. + + * Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ + +#ifndef CDSLIB_LOCK_SPINLOCK_H +#define CDSLIB_LOCK_SPINLOCK_H + +#if CDS_COMPILER == CDS_COMPILER_MSVC +# pragma message("cds/lock/spinlock.h is deprecated, use cds/sync/spinlock.h instead") +#else +# warning "cds/lock/spinlock.h is deprecated, use cds/sync/spinlock.h instead" +#endif + +#include + +//@cond +namespace cds { + /// Synchronization primitives (deprecated namespace, use \p cds::sync namespace instead) + namespace lock { + + /// Alias for \p cds::sync::spin_lock for backward compatibility + template + using Spinlock = cds::sync::spin_lock< Backoff >; + + /// Spin-lock implementation default for the current platform + typedef cds::sync::spin_lock< backoff::LockDefault> Spin; + + /// Alias for \p cds::sync::reentrant_spin_lock for backward compatibility + template + using ReentrantSpinT = cds::sync::reentrant_spin_lock< Integral, Backoff >; + + /// Recursive 32bit spin-lock + typedef cds::sync::reentrant_spin32 ReentrantSpin32; + + /// Recursive 64bit spin-lock + typedef cds::sync::reentrant_spin64 ReentrantSpin64; + + /// Default recursive spin-lock type + typedef ReentrantSpin32 ReentrantSpin; + + } // namespace lock + + /// Standard (best for the current platform) spin-lock implementation + typedef lock::Spin SpinLock; + + /// Standard (best for the current platform) recursive spin-lock implementation + typedef lock::ReentrantSpin RecursiveSpinLock; + + /// 32bit recursive spin-lock shortcut + typedef lock::ReentrantSpin32 RecursiveSpinLock32; + + /// 64bit recursive spin-lock shortcut + typedef lock::ReentrantSpin64 RecursiveSpinLock64; + +} // namespace cds +//@endcond + +#endif // #ifndef CDSLIB_LOCK_SPINLOCK_H diff --git a/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/memory/pool_allocator.h b/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/memory/pool_allocator.h new file mode 100644 index 0000000..ccc4c12 --- /dev/null +++ b/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/memory/pool_allocator.h @@ -0,0 +1,150 @@ +/* + This file is a part of libcds - Concurrent Data Structures library + + (C) Copyright Maxim Khizhinsky (libcds.dev@gmail.com) 2006-2017 + + Source code repo: http://github.com/khizmax/libcds/ + Download: http://sourceforge.net/projects/libcds/files/ + + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. + + * Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ + +#ifndef CDSLIB_MEMORY_POOL_ALLOCATOR_H +#define CDSLIB_MEMORY_POOL_ALLOCATOR_H + +#include +#include + +namespace cds { namespace memory { + + ///@defgroup cds_memory_pool Simple memory pool + + /// Pool allocator adapter + /** + This class is an adapter for an object pool. It gives \p std::allocator interface + for the @ref cds_memory_pool "pool". + + Template arguments: + - \p T - value type + - \p Accessor - a functor to access to the pool object. The pool has the following interface: + \code + template + class pool { + typedef T value_type ; // Object type maintained by pool + T * allocate( size_t n ) ; // Allocate an array of object of type T + void deallocate( T * p, size_t n ) ; // Deallocate the array p of size n + }; + \endcode + + Usage + + Suppose, we have a pool with interface above. Usually, the pool is a static object: + \code + static pool thePool; + \endcode + + The \p %pool_allocator gives \p std::allocator interface for the pool. + It is needed to declare an accessor functor to access to \p thePool: + \code + struct pool_accessor { + typedef typename pool::value_type value_type; + + pool& operator()() const + { + return thePool; + } + }; + \endcode + + Now, cds::memory::pool_allocator< T, pool_accessor > can be used instead of \p std::allocator. + */ + template + class pool_allocator + { + //@cond + public: + typedef Accessor accessor_type; + + typedef size_t size_type; + typedef ptrdiff_t difference_type; + typedef T* pointer; + typedef const T* const_pointer; + typedef T& reference; + typedef const T& const_reference; + typedef T value_type; + + template struct rebind { + typedef pool_allocator other; + }; + + public: + pool_allocator() noexcept + {} + + pool_allocator(const pool_allocator&) noexcept + {} + template pool_allocator(const pool_allocator&) noexcept + {} + ~pool_allocator() + {} + + pointer address(reference x) const noexcept + { + return &x; + } + const_pointer address(const_reference x) const noexcept + { + return &x; + } + pointer allocate( size_type n, void const * /*hint*/ = 0) + { + static_assert( sizeof(value_type) <= sizeof(typename accessor_type::value_type), "Incompatible type" ); + + return reinterpret_cast( accessor_type()().allocate( n )); + } + void deallocate(pointer p, size_type n) noexcept + { + accessor_type()().deallocate( reinterpret_cast( p ), n ); + } + size_type max_size() const noexcept + { + return size_t(-1) / sizeof(value_type); + } + + template + void construct(U* p, Args&&... args) + { + new((void *)p) U( std::forward(args)...); + } + + template + void destroy(U* p) + { + p->~U(); + } + //@endcond + }; + +}} // namespace cds::memory + + +#endif // #ifndef CDSLIB_MEMORY_POOL_ALLOCATOR_H diff --git a/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/memory/vyukov_queue_pool.h b/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/memory/vyukov_queue_pool.h new file mode 100644 index 0000000..432a7f0 --- /dev/null +++ b/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/memory/vyukov_queue_pool.h @@ -0,0 +1,549 @@ +/* + This file is a part of libcds - Concurrent Data Structures library + + (C) Copyright Maxim Khizhinsky (libcds.dev@gmail.com) 2006-2017 + + Source code repo: http://github.com/khizmax/libcds/ + Download: http://sourceforge.net/projects/libcds/files/ + + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. + + * Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ + +#ifndef CDSLIB_MEMORY_VYUKOV_QUEUE_ALLOCATOR_H +#define CDSLIB_MEMORY_VYUKOV_QUEUE_ALLOCATOR_H + +#include +#include +#include + +namespace cds { namespace memory { + + /// \p vyukov_queue_pool traits + /** @ingroup cds_memory_pool + */ + struct vyukov_queue_pool_traits : public cds::intrusive::vyukov_queue::traits + { + /// Allocator type + typedef CDS_DEFAULT_ALLOCATOR allocator; + }; + + /// Free-list based on bounded lock-free queue \p cds::intrusive::VyukovMPMCCycleQueue + /** @ingroup cds_memory_pool + Template parameters: + - \p T - the type of object maintaining by free-list. \p T must be default constructible. + - \p Traits - traits for \p cds::intrusive::VyukovMPMCCycleQueue class plus + \p cds::opt::allocator option, defaul is \p vyukov_queue_pool_traits + + \b Internals + + This free-list is very simple. + At construction time, the free-list allocates the array of N items + and stores them into queue, where N is the queue capacity. + When allocating the free-list tries to pop an object from + internal queue i.e. from preallocated pool. If success the popped object is returned. + Otherwise a new one is allocated. When deallocating, the free-list checks whether + the object is from preallocated pool. If so, the object is pushed into queue, otherwise + it is deallocated by using the allocator provided. + The pool can manage more than \p N items but only \p N items is contained in the free-list. + + \b Usage + + \p %vyukov_queue_pool should be used together with \ref pool_allocator. + You should declare an static object of type \p %vyukov_queue_pool, provide + an accessor to that object and use \p pool_allocator as an allocator: + \code + #include + #include + + // Pool of Foo object of size 1024. + struct pool_traits: public cds::memory::vyukov_queue_pool_traits + { + typedef cds::opt::v::uninitialized_static_buffer< Foo, 1024 > buffer; + }; + typedef cds::memory::vyukov_queue_pool< Foo, pool_traits > pool_type; + static pool_type thePool; + + struct pool_accessor { + typedef typename pool_type::value_type value_type; + + pool_type& operator()() const + { + return thePool; + } + }; + + // Declare pool allocator + typedef cds::memory::pool_allocator< Foo, pool_accessor > pool_allocator; + + // Use pool_allocator + // Allocate an object + Foo * p = pool_allocator().allocate( 1 ); + + // construct object + new(p) Foo; + + //... + + // Destruct object + p->~Foo(); + + // Deallocate object + pool_allocator().deallocate( p , 1 ); + \endcode + */ + template + class vyukov_queue_pool + { + public: + typedef cds::intrusive::VyukovMPMCCycleQueue< T, Traits > queue_type ; ///< Queue type + + public: + typedef T value_type ; ///< Value type + typedef Traits traits; ///< Traits type + typedef typename traits::allocator::template rebind::other allocator_type ; ///< allocator type + typedef typename traits::back_off back_off; ///< back-off strategy + + protected: + //@cond + typedef cds::details::Allocator< value_type, allocator_type > cxx_allocator; + typedef typename cxx_allocator::allocator_type std_allocator; + + queue_type m_Queue; + value_type * m_pFirst; + value_type * m_pLast; + //@endcond + + protected: + //@cond + void preallocate_pool() + { + m_pFirst = std_allocator().allocate( m_Queue.capacity()); + m_pLast = m_pFirst + m_Queue.capacity(); + + for ( value_type * p = m_pFirst; p < m_pLast; ++p ) { + CDS_VERIFY( m_Queue.push( *p )) ; // must be true + } + } + + bool from_pool( value_type * p ) const + { + return m_pFirst <= p && p < m_pLast; + } + //@endcond + + public: + /// Preallocates the pool of object + /** + \p nCapacity argument is the queue capacity. It should be passed + if the queue is based on dynamically-allocated buffer. + See \p cds::intrusive::VyukovMPMCCycleQueue for explanation. + */ + vyukov_queue_pool( size_t nCapacity = 0 ) + : m_Queue( nCapacity ) + { + preallocate_pool(); + } + + /// Deallocates the pool. + ~vyukov_queue_pool() + { + m_Queue.clear(); + std_allocator().deallocate( m_pFirst, m_Queue.capacity()); + } + + /// Allocates an object from pool + /** + The pool supports allocation only single object (\p n = 1). + If \p n > 1 the behavior is undefined. + + If the queue is not empty, the popped value is returned. + Otherwise, a new value allocated. + */ + value_type * allocate( size_t n ) + { + assert( n == 1 ); + CDS_UNUSED(n); + + value_type * p = m_Queue.pop(); + if ( p ) { + assert( from_pool(p)); + return new( p ) value_type; + } + // The pool is empty - allocate new from the heap + return cxx_allocator().New(); + } + + /// Deallocated the object \p p + /** + The pool supports allocation only single object (\p n = 1). + If \p n > 1 the behavior is undefined. + + If \p p is from preallocated pool, it pushes into the queue. + Otherwise, \p p is deallocated by allocator provided. + */ + void deallocate( value_type * p, size_t n ) + { + assert( n == 1 ); + CDS_UNUSED(n); + + if ( p ) { + if ( from_pool(p)) { + p->~value_type(); + // The queue can notify about false fullness state + // so we push in loop + back_off bkoff; + while ( !m_Queue.push( *p )) + bkoff(); + } + else + cxx_allocator().Delete( p ); + } + } + }; + + + /// Lazy free-list based on bounded lock-free queue \p cds::intrusive::VyukovMPMCCycleQueue + /** @ingroup cds_memory_pool + Template parameters: + - \p T - the type of object maintaining by free-list. \p T must be default constructible + - \p Traits - traits for \p cds::intrusive::VyukovMPMCCycleQueue class plus + \p cds::opt::allocator option, default is \p vyukov_queue_pool_traits + + \b Internals + + This free-list is very simple. + At construction time the pool is empty. + When allocating the free-list tries to pop an object from + internal queue. If success the popped object is returned. + Otherwise a new one is allocated. + When deallocating, the free-list tries to push the object into the pool. + If internal queue is full, the object is deallocated by using the allocator provided. + The pool can manage more than \p N items but only \p N items is placed in the free-list. + + \b Usage + + \p %lazy_vyukov_queue_pool should be used together with \ref pool_allocator. + You should declare an static object of type \p %lazy_vyukov_queue_pool, provide + an accessor functor to this object and use \p pool_allocator as an allocator: + \code + #include + #include + + // Pool of Foo object of size 1024. + typedef cds::memory::lazy_vyukov_queue_pool< Foo > pool_type; + static pool_type thePool( 1024 ); + + struct pool_accessor { + typedef typename pool_type::value_type value_type; + + pool_type& operator()() const + { + return thePool; + } + }; + + // Declare pool allocator + typedef cds::memory::pool_allocator< Foo, pool_accessor > pool_allocator; + + // Use pool_allocator + // Allocate an object + Foo * p = pool_allocator().allocate( 1 ); + + // construct object + new(p) Foo; + + //... + + // Destruct object + p->~Foo(); + + // Deallocate object + pool_allocator().deallocate( p , 1 ); + \endcode + + */ + template + class lazy_vyukov_queue_pool + { + public: + typedef cds::intrusive::VyukovMPMCCycleQueue< T, Traits > queue_type ; ///< Queue type + + public: + typedef T value_type ; ///< Value type + typedef Traits traits; ///< Pool traits + typedef typename traits::allocator::template rebind::other allocator_type ; ///< allocator type + + protected: + //@cond + typedef cds::details::Allocator< value_type, allocator_type > cxx_allocator; + typedef typename cxx_allocator::allocator_type std_allocator; + + queue_type m_Queue; + //@endcond + + public: + /// Constructs empty pool + lazy_vyukov_queue_pool( size_t nCapacity = 0 ) + : m_Queue( nCapacity ) + {} + + /// Deallocates all objects from the pool + ~lazy_vyukov_queue_pool() + { + std_allocator a; + while ( !m_Queue.empty()) + a.deallocate( m_Queue.pop(), 1 ); + } + + /// Allocates an object from pool + /** + The pool supports allocation only single object (\p n = 1). + If \p n > 1 the behavior is undefined. + + If the queue is not empty, the popped value is returned. + Otherwise, a new value allocated. + */ + value_type * allocate( size_t n ) + { + assert( n == 1 ); + CDS_UNUSED(n); + + value_type * p = m_Queue.pop(); + if ( p ) + return new( p ) value_type; + + return cxx_allocator().New(); + } + + /// Deallocates the object \p p + /** + The pool supports allocation only single object (\p n = 1). + If \p n > 1 the behaviour is undefined. + + If the queue is not full, \p p is pushed into the queue. + Otherwise, \p p is deallocated by allocator provided. + */ + void deallocate( value_type * p, size_t n ) + { + assert( n == 1 ); + CDS_UNUSED(n); + + if ( p ) { + p->~value_type(); + // Here we ignore false fullness state of the queue + if ( !m_Queue.push( *p )) + std_allocator().deallocate( p, 1 ); + } + } + + }; + + /// Bounded free-list based on bounded lock-free queue \p cds::intrusive::VyukovMPMCCycleQueue + /** @ingroup cds_memory_pool + Template parameters: + - \p T - the type of object maintaining by free-list. \p T must be default-constructible + - \p Traits - traits for \p cds::intrusive::VyukovMPMCCycleQueue class plus + \p cds::opt::allocator option, defaul is \p vyukov_queue_pool_traits + + \b Internals + + At construction time, the free-list allocates the array of N items + and stores them into the queue, where N is the queue capacity. + When allocating the free-list tries to pop an object from + internal queue i.e. from preallocated pool. If success the popped object is returned. + Otherwise a \p std::bad_alloc exception is raised. + So, the pool can contain up to \p N items. + When deallocating, the object is pushed into the queue. + In debug mode \p deallocate() member function asserts + that the pointer is from preallocated pool. + + \b Usage + + \p %bounded_vyukov_queue_pool should be used together with \ref pool_allocator. + You should declare an static object of type \p %bounded_vyukov_queue_pool, provide + an accessor functor to this object and use \p pool_allocator as an allocator: + \code + #include + #include + + // Pool of Foo object of size 1024. + struct pool_traits: public cds::memory::vyukov_queue_pool_traits + { + typedef cds::opt::v::uninitialized_static_buffer< Foo, 1024 > buffer; + }; + typedef cds::memory::bounded_vyukov_queue_pool< Foo, pool_traits > pool_type; + static pool_type thePool; + + struct pool_accessor { + typedef typename pool_type::value_type value_type; + + pool_type& operator()() const + { + return thePool; + } + }; + + // Declare pool allocator + typedef cds::memory::pool_allocator< Foo, pool_accessor > pool_allocator; + + // Use pool_allocator + // Allocate an object + Foo * p = pool_allocator().allocate( 1 ); + + // construct object + new(p) Foo; + + //... + + // Destruct object + p->~Foo(); + + // Deallocate object + pool_allocator().deallocate( p , 1 ); + \endcode + */ + template + class bounded_vyukov_queue_pool + { + //@cond + struct internal_traits : public Traits { + typedef cds::atomicity::item_counter item_counter; + }; + //@endcond + public: + typedef cds::intrusive::VyukovMPMCCycleQueue< T, internal_traits > queue_type ; ///< Queue type + + public: + typedef T value_type; ///< Value type + typedef Traits traits; ///< Pool traits + typedef typename traits::allocator::template rebind::other allocator_type ; ///< allocator type + typedef typename traits::back_off back_off; ///< back-off strategy + + protected: + //@cond + typedef cds::details::Allocator< value_type, allocator_type > cxx_allocator; + typedef typename cxx_allocator::allocator_type std_allocator; + + queue_type m_Queue; + value_type * m_pFirst; + value_type * m_pLast; + //@endcond + + protected: + //@cond + void preallocate_pool() + { + size_t const nCount = m_Queue.capacity(); + m_pFirst = std_allocator().allocate( nCount ); + m_pLast = m_pFirst + nCount; + + for ( value_type * p = m_pFirst; p < m_pLast; ++p ) + CDS_VERIFY( m_Queue.push( *p )) ; // must be true + } + + bool from_pool( value_type * p ) const + { + return m_pFirst <= p && p < m_pLast; + } + //@endcond + + public: + /// Preallocates the pool of object + /** + \p nCapacity argument is the queue capacity. It should be passed + if the queue is based on dynamically-allocated buffer. + See \p cds::intrusive::VyukovMPMCCycleQueue for explanation. + */ + bounded_vyukov_queue_pool( size_t nCapacity = 0 ) + : m_Queue( nCapacity ) + { + preallocate_pool(); + } + + /// Deallocates the pool. + ~bounded_vyukov_queue_pool() + { + m_Queue.clear(); + std_allocator().deallocate( m_pFirst, m_Queue.capacity()); + } + + /// Allocates an object from pool + /** + The pool supports allocation only single object (\p n = 1). + If \p n > 1 the behaviour is undefined. + + If the queue is not empty, the popped value is returned. + Otherwise, a \p std::bad_alloc exception is raised. + */ + value_type * allocate( size_t n ) + { + assert( n == 1 ); + CDS_UNUSED( n ); + + value_type * p = m_Queue.pop(); + + if ( !p ) { + back_off bkoff; + while ( m_Queue.size()) { + p = m_Queue.pop(); + if ( p ) + goto ok; + bkoff(); + } + + // The pool is empty + CDS_THROW_EXCEPTION( std::bad_alloc()); + } + + ok: + assert( from_pool(p)); + return p; + } + + /// Deallocates the object \p p + /** + The pool supports allocation only single object (\p n = 1). + If \p n > 1 the behaviour is undefined. + + \p p should be from preallocated pool. + */ + void deallocate( value_type * p, size_t n ) + { + assert( n == 1 ); + CDS_UNUSED( n ); + + if ( p ) { + assert( from_pool( p )); + back_off bkoff; + // The queue can notify it is full but that is false fullness state + // So, we push in loop + while ( !m_Queue.push(*p)) + bkoff(); + } + } + }; + + +}} // namespace cds::memory + + +#endif // #ifndef CDSLIB_MEMORY_VYUKOV_QUEUE_ALLOCATOR_H diff --git a/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/opt/buffer.h b/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/opt/buffer.h new file mode 100644 index 0000000..c06b4cc --- /dev/null +++ b/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/opt/buffer.h @@ -0,0 +1,576 @@ +/* + This file is a part of libcds - Concurrent Data Structures library + + (C) Copyright Maxim Khizhinsky (libcds.dev@gmail.com) 2006-2017 + + Source code repo: http://github.com/khizmax/libcds/ + Download: http://sourceforge.net/projects/libcds/files/ + + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. + + * Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ + +#ifndef CDSLIB_OPT_BUFFER_H +#define CDSLIB_OPT_BUFFER_H + +#include +#include +#include +#include +#include + +namespace cds { namespace opt { + + /// [type-option] Option setter for user-provided plain buffer + /** + This option is used by some container as a random access array for storing + container's item; for example, a bounded queue may use + this option to define underlying buffer implementation. + + The template parameter \p Type should be rebindable. + + Implementations: + - \p opt::v::initialized_static_buffer + - \p opt::v::uninitialized_static_buffer + - \p opt::v::initialized_dynamic_buffer + - \p opt::v::uninitialized_dynamic_buffer + + Uninitialized buffer is just an array of uninitialized elements. + Each element should be manually constructed, for example with a placement new operator. + When the uninitialized buffer is destroyed the destructor of its element is not called. + + Initialized buffer contains default-constructed elements. Element destructor is called automatically + when the buffer is destroyed. + + @note Usually, initialized and uninitialized buffers are not interchangeable. + */ + template + struct buffer { + //@cond + template struct pack: public Base + { + typedef Type buffer; + }; + //@endcond + }; + + namespace v { + + /// Static uninitialized buffer + /** + One of available type for \p opt::buffer option. + + This buffer maintains static array of uninitialized elements. + You should manually construct each element when needed. + No dynamic memory allocation performed. + + \par Template parameters: + - \p T - item type the buffer stores + - \p Capacity - the capacity of buffer. The value must be power of two if \p Exp2 is \p true + - \p Exp2 - a boolean flag. If it is \p true the buffer capacity must be power of two. + Otherwise it can be any positive number. Usually, it is required that the buffer has + size of a power of two. + */ + template + class uninitialized_static_buffer + { + public: + typedef T value_type; ///< value type + static constexpr const size_t c_nCapacity = Capacity; ///< Capacity + static constexpr const bool c_bExp2 = Exp2; ///< \p Exp2 flag + + /// Rebind buffer for other template parameters + template + struct rebind { + typedef uninitialized_static_buffer other; ///< Rebind result type + }; + + // Capacity must be power of 2 + static_assert(!c_bExp2 || (c_nCapacity & (c_nCapacity - 1)) == 0, "Capacity must be power of two"); + + private: + //@cond + union element { + value_type v; + char c; + + element() + {} + }; + + element m_buffer[c_nCapacity]; + //@endcond + public: + /// Construct static buffer + uninitialized_static_buffer() noexcept + {} + + /// Construct buffer of given capacity + /** + This ctor ignores \p nCapacity argument. The capacity of static buffer + is defined by template argument \p Capacity + */ + uninitialized_static_buffer( size_t nCapacity ) noexcept + { + CDS_UNUSED( nCapacity ); + } + + uninitialized_static_buffer( const uninitialized_static_buffer& ) = delete; + uninitialized_static_buffer& operator =( const uninitialized_static_buffer& ) = delete; + + /// Get item \p i + value_type& operator []( size_t i ) + { + assert( i < capacity()); + return m_buffer[i].v; + } + + /// Get item \p i, const version + const value_type& operator []( size_t i ) const + { + assert( i < capacity()); + return m_buffer[i].v; + } + + /// Returns buffer capacity + constexpr size_t capacity() const noexcept + { + return c_nCapacity; + } + + /// Zeroize the buffer + void zeroize() + { + memset( m_buffer, 0, capacity() * sizeof(m_buffer[0])); + } + + /// Returns pointer to buffer array + value_type * buffer() noexcept + { + return &( m_buffer[0].v ); + } + + /// Returns pointer to buffer array + value_type * buffer() const noexcept + { + return &( m_buffer[0].v ); + } + + /// Returns idx % capacity() + /** + If the buffer size is a power of two, binary arithmethics is used + instead of modulo arithmetics + */ + size_t mod( size_t idx ) + { + constexpr_if ( c_bExp2 ) + return idx & ( capacity() - 1 ); + else + return idx % capacity(); + } + + //@cond + template + typename std::enable_if< sizeof(I) != sizeof(size_t), size_t >::type mod( I idx ) + { + constexpr_if ( c_bExp2 ) + return static_cast( idx & static_cast( capacity() - 1 )); + else + return static_cast( idx % capacity()); + } + //@endcond + }; + + /// Static initialized buffer + /** + One of available type for \p opt::buffer option. + + This buffer maintains static array of default-constructed elements. + No dynamic memory allocation performed. + + \par Template parameters: + - \p T - item type the buffer stores + - \p Capacity - the capacity of buffer. The value must be power of two if \p Exp2 is \p true + - \p Exp2 - a boolean flag. If it is \p true the buffer capacity must be power of two. + Otherwise it can be any positive number. Usually, it is required that the buffer has + size of a power of two. + */ + template + class initialized_static_buffer + { + public: + typedef T value_type; ///< value type + static constexpr const size_t c_nCapacity = Capacity; ///< Capacity + static constexpr const bool c_bExp2 = Exp2; ///< \p Exp2 flag + + /// Rebind buffer for other template parameters + template + struct rebind { + typedef initialized_static_buffer other; ///< Rebind result type + }; + + // Capacity must be power of 2 + static_assert(!c_bExp2 || (c_nCapacity & (c_nCapacity - 1)) == 0, "Capacity must be power of two"); + + private: + //@cond + value_type m_buffer[c_nCapacity]; + //@endcond + public: + /// Construct static buffer + initialized_static_buffer() noexcept + {} + + /// Construct buffer of given capacity + /** + This ctor ignores \p nCapacity argument. The capacity of static buffer + is defined by template argument \p Capacity + */ + initialized_static_buffer( size_t nCapacity ) noexcept + { + CDS_UNUSED( nCapacity ); + } + + initialized_static_buffer( const initialized_static_buffer& ) = delete; + initialized_static_buffer& operator =( const initialized_static_buffer& ) = delete; + + /// Get item \p i + value_type& operator []( size_t i ) + { + assert( i < capacity()); + return m_buffer[i]; + } + + /// Get item \p i, const version + const value_type& operator []( size_t i ) const + { + assert( i < capacity()); + return m_buffer[i]; + } + + /// Returns buffer capacity + constexpr size_t capacity() const noexcept + { + return c_nCapacity; + } + + /// Zeroize the buffer + void zeroize() + { + memset( m_buffer, 0, capacity() * sizeof(m_buffer[0])); + } + + /// Returns pointer to buffer array + value_type * buffer() noexcept + { + return m_buffer; + } + + /// Returns pointer to buffer array + value_type * buffer() const noexcept + { + return m_buffer; + } + + /// Returns idx % capacity() + /** + If the buffer size is a power of two, binary arithmethics is used + instead of modulo arithmetics + */ + size_t mod( size_t idx ) + { + constexpr_if ( c_bExp2 ) + return idx & ( capacity() - 1 ); + else + return idx % capacity(); + } + + //@cond + template + typename std::enable_if< sizeof( I ) != sizeof( size_t ), size_t >::type mod( I idx ) + { + constexpr_if ( c_bExp2 ) + return static_cast( idx & static_cast( capacity() - 1 )); + else + return static_cast( idx % capacity()); + } + //@endcond + }; + + /// Dynamically allocated uninitialized buffer + /** + One of available type for \p opt::buffer option. + + This buffer maintains dynamically allocated array of uninitialized elements. + You should manually construct each element when needed. + Allocation is performed at construction time. + + \par Template parameters: + - \p T - item type storing in the buffer + - \p Alloc - an allocator used for allocating internal buffer (\p std::allocator interface) + - \p Exp2 - a boolean flag. If it is \p true the buffer capacity must be power of two. + Otherwise it can be any positive number. Usually, it is required that the buffer has + size of a power of two. + */ + template + class uninitialized_dynamic_buffer + { + public: + typedef T value_type; ///< Value type + typedef Alloc allocator; ///< Allocator type; + static constexpr const bool c_bExp2 = Exp2; ///< \p Exp2 flag + + /// Rebind buffer for other template parameters + template + struct rebind { + typedef uninitialized_dynamic_buffer other; ///< Rebinding result type + }; + + //@cond + typedef typename allocator::template rebind::other allocator_type; + //@endcond + + private: + //@cond + value_type * m_buffer; + size_t const m_nCapacity; + //@endcond + public: + /// Allocates dynamic buffer of given \p nCapacity + /** + If \p Exp2 class template parameter is \p true then actual capacity + of allocating buffer is nearest upper to \p nCapacity power of two. + */ + uninitialized_dynamic_buffer( size_t nCapacity ) + : m_nCapacity( c_bExp2 ? beans::ceil2(nCapacity) : nCapacity ) + { + assert( m_nCapacity >= 2 ); + // Capacity must be power of 2 + assert( !c_bExp2 || (m_nCapacity & (m_nCapacity - 1)) == 0 ); + + m_buffer = allocator_type().allocate( m_nCapacity ); + } + + /// Destroys dynamically allocated buffer + ~uninitialized_dynamic_buffer() + { + allocator_type().deallocate( m_buffer, m_nCapacity ); + } + + uninitialized_dynamic_buffer( const uninitialized_dynamic_buffer& ) = delete; + uninitialized_dynamic_buffer& operator =( const uninitialized_dynamic_buffer& ) = delete; + + /// Get item \p i + value_type& operator []( size_t i ) + { + assert( i < capacity()); + return m_buffer[i]; + } + + /// Get item \p i, const version + const value_type& operator []( size_t i ) const + { + assert( i < capacity()); + return m_buffer[i]; + } + + /// Returns buffer capacity + size_t capacity() const noexcept + { + return m_nCapacity; + } + + /// Zeroize the buffer + void zeroize() + { + memset( m_buffer, 0, capacity() * sizeof(m_buffer[0])); + } + + /// Returns pointer to buffer array + value_type * buffer() noexcept + { + return m_buffer; + } + + /// Returns pointer to buffer array + value_type * buffer() const noexcept + { + return m_buffer; + } + + /// Returns idx % capacity() + /** + If the buffer size is a power of two, binary arithmethics is used + instead of modulo arithmetics + */ + size_t mod( size_t idx ) + { + constexpr_if ( c_bExp2 ) + return idx & ( capacity() - 1 ); + else + return idx % capacity(); + } + + //@cond + template + typename std::enable_if< sizeof( I ) != sizeof( size_t ), size_t >::type mod( I idx ) + { + constexpr_if ( c_bExp2 ) + return static_cast( idx & static_cast( capacity() - 1 )); + else + return static_cast( idx % capacity()); + } + //@endcond + }; + + + /// Dynamically allocated initialized buffer + /** + One of available type for \p opt::buffer option. + + This buffer maintains dynamically allocated array of initialized default-constructed elements. + Allocation is performed at construction time. + + \par Template parameters: + - \p T - item type storing in the buffer + - \p Alloc - an allocator used for allocating internal buffer (\p std::allocator interface) + - \p Exp2 - a boolean flag. If it is \p true the buffer capacity must be power of two. + Otherwise it can be any positive number. Usually, it is required that the buffer has + size of a power of two. + */ + template + class initialized_dynamic_buffer + { + public: + typedef T value_type; ///< Value type + typedef Alloc allocator; ///< Allocator type + static constexpr const bool c_bExp2 = Exp2; ///< \p Exp2 flag + + /// Rebind buffer for other template parameters + template + struct rebind { + typedef initialized_dynamic_buffer other; ///< Rebinding result type + }; + + //@cond + typedef cds::details::Allocator allocator_type; + //@endcond + + private: + //@cond + value_type * m_buffer; + size_t const m_nCapacity; + //@endcond + public: + /// Allocates dynamic buffer of given \p nCapacity + /** + If \p Exp2 class template parameter is \p true then actual capacity + of allocating buffer is nearest upper to \p nCapacity power of two. + */ + initialized_dynamic_buffer( size_t nCapacity ) + : m_nCapacity( c_bExp2 ? beans::ceil2(nCapacity) : nCapacity ) + { + assert( m_nCapacity >= 2 ); + // Capacity must be power of 2 + assert( !c_bExp2 || (m_nCapacity & (m_nCapacity - 1)) == 0 ); + + allocator_type a; + m_buffer = a.NewArray( m_nCapacity ); + } + + /// Destroys dynamically allocated buffer + ~initialized_dynamic_buffer() + { + allocator_type a; + a.Delete( m_buffer, m_nCapacity ); + } + + initialized_dynamic_buffer( const initialized_dynamic_buffer& ) = delete; + initialized_dynamic_buffer& operator =( const initialized_dynamic_buffer& ) = delete; + + /// Get item \p i + value_type& operator []( size_t i ) + { + assert( i < capacity()); + return m_buffer[i]; + } + + /// Get item \p i, const version + const value_type& operator []( size_t i ) const + { + assert( i < capacity()); + return m_buffer[i]; + } + + /// Returns buffer capacity + size_t capacity() const noexcept + { + return m_nCapacity; + } + + /// Zeroize the buffer + void zeroize() + { + memset( m_buffer, 0, capacity() * sizeof(m_buffer[0])); + } + + /// Returns pointer to buffer array + value_type * buffer() noexcept + { + return m_buffer; + } + + /// Returns pointer to buffer array + value_type * buffer() const noexcept + { + return m_buffer; + } + + /// Returns idx % capacity() + /** + If the buffer size is a power of two, binary arithmethics is used + instead of modulo arithmetics + */ + size_t mod( size_t idx ) + { + constexpr_if ( c_bExp2 ) + return idx & ( capacity() - 1 ); + else + return idx % capacity(); + } + + //@cond + template + typename std::enable_if< sizeof( I ) != sizeof( size_t ), size_t >::type mod( I idx ) + { + constexpr_if ( c_bExp2 ) + return static_cast( idx & static_cast( capacity() - 1 )); + else + return static_cast( idx % capacity()); + } + //@endcond + }; + + } // namespace v + +}} // namespace cds::opt + +#endif // #ifndef CDSLIB_OPT_BUFFER_H diff --git a/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/opt/compare.h b/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/opt/compare.h new file mode 100644 index 0000000..23c9cbd --- /dev/null +++ b/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/opt/compare.h @@ -0,0 +1,336 @@ +/* + This file is a part of libcds - Concurrent Data Structures library + + (C) Copyright Maxim Khizhinsky (libcds.dev@gmail.com) 2006-2017 + + Source code repo: http://github.com/khizmax/libcds/ + Download: http://sourceforge.net/projects/libcds/files/ + + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. + + * Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ + +#ifndef CDSLIB_OPT_COMPARE_H +#define CDSLIB_OPT_COMPARE_H + +/* + Editions: + 2011.05.05 khizmax Created +*/ + +#include +#include +#include +#include + +namespace cds { namespace opt { + + /// [type-option] Option setter for key comparing + /** + The option sets a type of a functor to compare keys. + For comparing two keys \p k1 and \p k2 the functor must return: + - 1 if k1 > k2 + - 0 if k1 == k2 + - -1 if k1 < k2 + + \p Functor is a functor with following interface: + \code + template + struct Comparator { + int operator ()(const T& r1, const T& r2) + { + // Comparator body + } + }; + \endcode + Note that the functor must return \p int, not a \p bool value. + + There are predefined type for \p Functor: + - the functor \p opt::v::less_comparator that implements comparing functor through \p std::less predicate. + - the specialization of \p opt::v::less_comparator functor intended for the string comparison + + You may implement your own comparing functor that satisfies \p Functor interface. + + About relation between \p %opt::less and \p %opt::compare option setters see \p opt::less description. + */ + template + struct compare { + //@cond + template struct pack: public Base + { + typedef Functor compare; + }; + //@endcond + }; + + namespace v { + + /// Comparator based on \p std::less predicate + /** + This functor is predefined type for \p opt::compare option setter. + It is based on \p std::less predicate. + */ + template + struct less_comparator { + /// Operator that compares two value of type \p T + int operator()(T const& v1, T const& v2) + { + if ( std::less()( v1, v2 )) + return -1; + if ( std::less()( v2, v1 )) + return 1; + return 0; + } + }; + + /// Comparator specialization for \p std::string + /** + This functor uses \p std::string::compare() method instead of \p std::less predicate. + */ + template + struct less_comparator< std::basic_string > + { + //@cond + typedef std::basic_string string_type; + int operator()(string_type const& v1, string_type const& v2) + { + return v1.compare( v2 ); + } + //@endcond + }; + } // namespace v + + /// [type-option] Option setter for \p less predicate + /** + The option sets a binary predicate that tests whether a value of a specified type is less than another value of that type. + \p Functor interface is similar to \p std::less predicate interface. + The standard predicate \p std::less can act as \p Functor: + \code typedef cds::opt::less< std::less< int > > opt_less \endcode + + In addition, the option setter may sets non-standard 2-type predicate (\p std::binary_function): + \code + + struct foo { + int n; + }; + + template + struct pred_less { + bool operator ()( const T& t, const Q& q ) + { return t.n < q ; } + bool operator ()( const Q& q, const T& t ) + { return q < t.n ; } + bool operator ()( const T& t1, const T& t2 ) + { return t1.n < t2.n ; } + bool operator ()( const Q& q1, const Q& q2 ) + { return q1 < q2 ; } + }; + + typedef cds::opt::less< pred_less< foo, int > > opt_less; + \endcode + + Generally, the default type for \p Functor is \p std::less but it depends on the container used. + + \par Relation between opt::less and opt::compare option setters + Unless otherwise specified, \p opt::compare option setter has high priority. + If \p %opt::compare and \p %opt::less options are specified for a container, the \p %opt::compare option is used: + \code + // Suppose, a hypothetical map_type allows to specify + // cds::opt::less and cds::opt::compare options + + typedef map_type< std::string, int, + cds::opt::compare< cds::opt::v::less_comparator< std::string > >, + cds::opt::less< std::less< std::string > > + > my_map_type; + + // For my_map_type, the cds::opt::compare comparator will be used, + // the cds::opt::less option is ignored without any warnings. + \endcode + */ + template + struct less { + //@cond + template struct pack: public Base + { + typedef Functor less; + }; + //@endcond + }; + + //@cond + namespace details { + template + struct make_comparator_from_less + { + typedef Less less_functor; + + template + int operator ()( T const& t, Q const& q ) const + { + less_functor f; + if ( f( t, q )) + return -1; + if ( f( q, t )) + return 1; + return 0; + } + }; + + template > > + struct make_comparator_from + { + typedef typename Traits::compare compare; + typedef typename Traits::less less; + + typedef typename std::conditional< + std::is_same< compare, opt::none >::value, + typename std::conditional< + std::is_same< less, opt::none >::value, + DefaultCmp, + make_comparator_from_less< less > + >::type, + compare + >::type type; + }; + + + template + using make_comparator = make_comparator_from< T, Traits, + typename std::conditional< + Forced, + make_comparator_from_less< std::less>, + opt::none + >::type >; + + template + struct make_comparator_from_option_list + { + struct default_traits { + typedef opt::none compare; + typedef opt::none less; + }; + + typedef typename make_comparator< T, + typename opt::make_options< + typename opt::find_type_traits< default_traits, Options... >::type + ,Options... + >::type + >::type type; + }; + } // namespace details + //@endcond + + /// [type-option] Option setter for \p opt::equal_to predicate + /** + The option sets a binary predicate that tests whether a value of a specified type is equal to another value of that type. + \p Functor interface is similar to \p std::equal_to predicate interface. + The standard predicate \p std::equal_to can act as \p Functor: + \code typedef cds::opt::equal_to< std::equal_to< int > > opt_equal_to \endcode + + In addition, the option setter may sets non-standard 2-type (or even N-type) predicate (\p std::binary_function): + \code + + struct foo { + int n; + }; + + template + struct pred_equal_to { + bool operator ()( const T& t, const Q& q ) + { return t.n == q ; } + bool operator ()( const Q& q, const T& t ) + { return q == t.n ; } + bool operator ()( const T& t1, const T& t2 ) + { return t1.n == t2.n ; } + bool operator ()( const Q& q1, const Q& q2 ) + { return q1 == q2 ; } + }; + + typedef cds::opt::equal_to< pred_equal_to< foo, int > > opt_equal_to; + \endcode + + Generally, the default type for \p Functor is \p std::equal_to but it depends on the container used. + */ + template + struct equal_to { + //@cond + template struct pack: public Base + { + typedef Functor equal_to; + }; + //@endcond + }; + + //@cond + namespace details { + template + struct make_equal_to_from_compare + { + typedef Compare compare_functor; + + template + bool operator()( T const& t, Q const& q ) const + { + return compare_functor()(t, q) == 0; + } + }; + + template + struct make_equal_to_from_less + { + typedef Less less_functor; + + template + bool operator()( T const& t, Q const& q ) const + { + less_functor less; + return !less(t, q) && !less(q, t); + } + }; + + template + struct make_equal_to + { + typedef typename Traits::equal_to equal_to; + typedef typename Traits::compare compare; + typedef typename Traits::less less; + + typedef typename std::conditional< + std::is_same< equal_to, opt::none >::value, + typename std::conditional< + std::is_same< compare, opt::none >::value, + typename std::conditional< + std::is_same< less, opt::none >::value, + typename std::conditional< + Forced, + std::equal_to, + opt::none >::type, + make_equal_to_from_less< less > >::type, + make_equal_to_from_compare< compare > >::type, + equal_to + >::type type; + }; + } + //@endcond + +}} // namespace cds::opt + +#endif // #ifndef CDSLIB_OPT_COMPARE_H diff --git a/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/opt/hash.h b/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/opt/hash.h new file mode 100644 index 0000000..2949b50 --- /dev/null +++ b/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/opt/hash.h @@ -0,0 +1,195 @@ +/* + This file is a part of libcds - Concurrent Data Structures library + + (C) Copyright Maxim Khizhinsky (libcds.dev@gmail.com) 2006-2017 + + Source code repo: http://github.com/khizmax/libcds/ + Download: http://sourceforge.net/projects/libcds/files/ + + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. + + * Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ + +#ifndef CDSLIB_OPT_HASH_H +#define CDSLIB_OPT_HASH_H + +#include +#include +#include + +namespace cds { namespace opt { + + /// [type-option] Option setter for a hash function + /** + This option setter specifies hash functor used in unordered containers. + + The default value of template argument \p Functor is \p cds::opt::v::hash + that is synonym for std::hash implementation of standard library. + */ + template + struct hash { + //@cond + template struct pack: public Base + { + typedef Functor hash; + }; + //@endcond + }; + + namespace v { + //@cond + using std::hash; + + /// Metafunction selecting default hash implementation + /** + The metafunction selects appropriate hash functor implementation. + If \p Hash is not equal to opt::none, then result of metafunction is \p Hash. + Otherwise, the result is std::hash . + + Note that default hash function like std::hash + is generally not suitable for complex type \p Q and its derivatives. + You should manually provide particular hash functor for such types. + */ + template + struct hash_selector + { + typedef Hash type; ///< resulting implementation of hash functor + }; + + template <> + struct hash_selector + { + struct type { + template + size_t operator()( Q const& key ) const + { + return std::hash()( key ); + } + }; + }; + //@endcond + } // namespace v + + //@cond + namespace details { + template struct hash_list; + template + struct hash_list< std::tuple > + { + static size_t const size = sizeof...(Functors); + typedef size_t values[size]; + typedef std::tuple hash_tuple_type; + + hash_tuple_type hash_tuple; + + hash_list() + {} + + hash_list( hash_tuple_type const& t) + : hash_tuple( t ) + {} + hash_list( hash_tuple_type&& t) + : hash_tuple( std::forward(t)) + {} + + template + typename std::enable_if< (I == sizeof...(Functors)) >::type apply( size_t * /*dest*/, T const& /*v*/ ) const + {} + + template + typename std::enable_if< (I < sizeof...(Functors)) >::type apply( size_t * dest, T const& v ) const + { + dest[I] = std::get( hash_tuple )( v ); + apply( dest, v ); + } + + template + void operator()( size_t * dest, T const& v ) const + { + apply<0>( dest, v ); + } + }; + } // namespace details + //@endcond + + /// Declare tuple for hash functors \p Functors + template + using hash_tuple = details::hash_list< std::tuple< Functors... >>; + + //@cond + // At least, two functors must be provided. Single functor is not supported + template struct hash< std::tuple >; + //@endcond + + /// Multi-functor hash option setter - specialization for \p std::tuple + template + struct hash< std::tuple > + { + //@cond + template struct pack: public Base + { + typedef details::hash_list< std::tuple > hash; + }; + //@endcond + }; + + + //@cond + namespace details { + + template + struct hash_list_wrapper { + typedef HashList hash_list; + typedef WrappedType wrapped_type; + typedef Wrapper wrapper_type; + + typedef typename hash_list::hash_tuple_type hash_tuple_type; + static size_t const size = hash_list::size; + + hash_list m_wrappedList; + + hash_list_wrapper() + {} + hash_list_wrapper( hash_tuple_type const& t) + : m_wrappedList( t ) + {} + hash_list_wrapper( hash_tuple_type&& t) + : m_wrappedList( std::forward(t)) + {} + + void operator()( size_t * dest, wrapped_type const& what ) const + { + m_wrappedList( dest, wrapper_type()( what )); + } + + template + void operator()( size_t * dest, Q const& what) const + { + m_wrappedList( dest, what ); + } + }; + + } // namespace details + //@endcond + +}} // namespace cds::opt + +#endif // #ifndef CDSLIB_OPT_HASH_H diff --git a/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/opt/options.h b/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/opt/options.h new file mode 100644 index 0000000..37c3e08 --- /dev/null +++ b/gdax-orderbook-hpp/demo/dependencies/libcds-2.3.2/cds/opt/options.h @@ -0,0 +1,1219 @@ +/* + This file is a part of libcds - Concurrent Data Structures library + + (C) Copyright Maxim Khizhinsky (libcds.dev@gmail.com) 2006-2017 + + Source code repo: http://github.com/khizmax/libcds/ + Download: http://sourceforge.net/projects/libcds/files/ + + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. + + * Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ + +#ifndef CDSLIB_OPT_OPTIONS_H +#define CDSLIB_OPT_OPTIONS_H + +/* + Framework to define template options + + Editions: + 2011.01.23 khizmax Created +*/ + +#include // rand, srand + +#include +#include +#include +#include + +namespace cds { + +/// Framework to define template options +/** + There are two kind of options: + - \p type-option - option that determines a data type. The template argument \p Type of the option is a type. + - \p value-option - option that determines a value. The template argument \p Value of the option is a value. +*/ +namespace opt { + + /// Type indicates that an option is not specified and the default one should be used + struct none + { + //@cond + template struct pack: public Base + {}; + //@endcond + }; + + /// Metafunction for selecting default option value + /** + Template parameters: + - \p Option - option value + - \p Default - default option value + - \p Value - option value if \p Option is not opt::none + + If \p Option is opt::none, the metafunction result is \p Default, otherwise + the result is \p Value. + + Examples: + \code + // default_spin is cds::sync::spin + typedef typename cds::opt::select_default< cds::opt::none, cds::sync::spin >::type default_spin; + + // spin_32bit is cds::sync::reentrant_spin32 + typedef typename cds::opt::select_default< cds::opt::none, cds::sync::reentrant_spin32 >::type spin_32bit; + \endcode + */ + template + struct select_default + { + typedef Value type ; ///< metafunction result + }; + //@cond + template + struct select_default< none, Default > + { + typedef Default type; + }; + //@endcond + + /// Metafunction to select option value + /** + This metafunction is intended for extracting the value of the \p Option option. + For example, + \code + #include + #include // only for testing purpose (static_assert) + + struct tag_a; + + // Define option + typedef cds::opt::tag< tag_a > tag_option; + + // What is the value of the tag_option? + // How we can extract tag_a from tag_option? + // Here is a solution: + typedef cds::opt::value< tag_option >::tag tag_option_value; + + // tag_option_value is the same as tag_a + static_assert( std::is_same< tag_option_value, tag_a >::value, "Error: tag_option_value != tag_a" ); + + \endcode + */ + template + struct value: public Option::template pack + {}; + + + /// [type-option] Option setter specifies a tag + /** + Suppose, you have a struct + \code + struct Feature + { .... }; + \endcode + and you want that your class \p X would be derived from several \p Feature: + \code + class X: public Feature, public Feature + { .... }; + \endcode + + How can you distinguish one \p Feature from another? + You may use a tag option: + \code + template + struct Feature + { .... }; + + class tag_a; + class tag_b; + class X: public Feature< tag_a >, public Feature< tag_b > + { .... }; + \endcode + Now you can distinguish one \p Feature from another: + \code + X x; + Feature& fa = static_cast< Feature >( x ); + Feature& fb = static_cast< Feature >( x ); + \endcode + + \p tag option setter allows you to do things like this for an option-centric approach: + \code + template + struct Feature + { .... }; + + class tag_a; + class tag_b; + class X: public Feature< tag >, public Feature< tag > + { .... }; + \endcode + + This option setter is widely used in cds::intrusive containers to distinguish + between different intrusive part of container's node. + + An incomplete type can serve as a \p Tag. + */ + template + struct tag { + //@cond + template struct pack: public Base + { + typedef Tag tag; + }; + //@endcond + }; + + /// [type-option] Option setter specifies lock class + /** + Specification of the \p Type class is: + \code + struct Lock { + void lock(); + void unlock(); + }; + \endcode + */ + template + struct lock_type { + //@cond + template struct pack: public Base + { + typedef Type lock_type; + }; + //@endcond + }; + + /// [type-option] @ref cds_sync_monitor "Monitor" type setter + /** + This option setter specifyes @ref cds_sync_monitor "synchronization monitor" + for blocking container. + */ + template + struct sync_monitor { + //@cond + template struct pack : public Base + { + typedef Type sync_monitor; + }; + //@endcond + }; + + /// [type-option] Back-off strategy option setter + /** + Back-off strategy used in some algorithm. + See cds::backoff namespace for back-off explanation and supported interface. + */ + template + struct back_off { + //@cond + template struct pack: public Base + { + typedef Type back_off; + }; + //@endcond + }; + + /// [type-option] Option setter for garbage collecting schema used + /** + Possible values of \p GC template parameter are: + - cds::gc::HP - Hazard Pointer garbage collector + - cds::gc::DHP - Dynamic Hazard Pointer garbage collector + - cds::gc::none::GC - No garbage collector (not supported for some containers) + */ + template + struct gc { + //@cond + template struct pack: public Base + { + typedef GC gc; + }; + //@endcond + }; + + /// [type-option] Option setter for an allocator + /** + \p Type is allocator with \p std::allocator interface. Default is value of macro CDS_DEFAULT_ALLOCATOR + that, in turn, is \p std::allocator. + + The \p libcds containers actively use rebinding to convert an allocator of one type to another. Thus, + you may specify any valid type as std::allocator's template parameter. + + See also opt::node_allocator + */ + template + struct allocator { + //@cond + template struct pack: public Base + { + typedef Type allocator; + }; + //@endcond + }; + + /// [type-option] Option setter for node allocator + /** + \p Type is allocator with \p std::allocator interface. Default is value of macro CDS_DEFAULT_ALLOCATOR + that, in turn, is \p std::allocator. + + Many node-base containers require an allocator for maintaining data (container's node) and for internal use. + Sometimes, this types of allocator should be different for performance reason. + For example, we should like to allocate the node from a pool of preallocated nodes. + Such pool can be seen as the node allocator. + + Usually, if a container supports \p opt::allocator and \p %opt::node_allocator options + and \p opt::node_allocator is not specified the \p %opt::allocator option is used for maintaining the nodes. + + The \p libcds containers actively use rebinding to convert an allocator of one type to another. Thus, + you may specify any valid type as std::allocator's template parameter. + */ + template + struct node_allocator { + //@cond + template struct pack: public Base + { + typedef Type node_allocator; + }; + //@endcond + }; + + /// [type-option] Option setter for item counting + /** + Some data structure (for example, queues) has additional feature for item counting. + This option allows to set up appropriate item counting policy for that data structure. + + Predefined option \p Type: + - \p atomicity::empty_item_counter - no item counting performed. It is default policy for many + containers + - \p atomicity::item_counter - the class that provides atomic item counting + - \p atomicity::cache_friendly_item_counter - cache-friendly atomic item counter + - \p opt::v::sequential_item_counter - simple non-atomic item counter. This counter is not intended for + concurrent containers and may be used only if it is explicitly noted. + + You may provide other implementation of \p atomicity::item_counter interface for your needs. + + Note, the item counting in lock-free containers cannot be exact; for example, if + item counter for a container returns zero it is not mean that the container is empty. + So, the item counter may be used for statistical purposes only. + */ + template + struct item_counter { + //@cond + template struct pack: public Base + { + typedef Type item_counter; + }; + //@endcond + }; + + /// Special alignment constants for \ref cds::opt::alignment option + enum special_alignment { + no_special_alignment = 0, ///< no special alignment + cache_line_alignment = 1 ///< use cache line size defined in cds/user_setup/cache_line.h + }; + + /// [value-option] Alignment option setter + /** + Alignment for some internal data of containers. May be useful to solve false sharing problem. + \p Value defines desired alignment and it may be power of two integer or predefined values from + \ref special_alignment enum. + */ + template + struct alignment { + //@cond + template struct pack: public Base + { + enum { alignment = Value }; + }; + //@endcond + }; + + //@cond + namespace details { + template + struct alignment_setter { + typedef typename cds::details::aligned_type< Type, Alignment >::type type; + }; + + template + struct alignment_setter { + typedef Type type; + }; + + template + struct alignment_setter { + typedef typename cds::details::aligned_type< Type, c_nCacheLineSize >::type type; + }; + } // namespace details + //@endcond + + /// Special padding constants for \p cds::opt::padding option + enum special_padding { + no_special_padding = 0, ///< no special padding + cache_line_padding = 1, ///< use cache line size defined in cds/user_setup/cache_line.h + + /// Apply padding only for tiny data when data size is less than required padding + /** + The flag means that if your data size is less than the cacheline size, the padding is applyed. + Otherwise no padding will be applyed. + + This flag is applyed for padding value: + \code + cds::opt::padding< cds::opt::cache_line_padding | cds::opt::padding_tiny_data_only >; + cds::opt::padding< 256 | cds::opt::padding_tiny_data_only >; + \endcode + */ + padding_tiny_data_only = 0x80000000, + + //@cond + padding_flags = padding_tiny_data_only + //@endcond + }; + + /// [value-option] Padding option setter + /** + The padding for the internal data of some containers. May be useful to solve false sharing problem. + \p Value defines desired padding and it may be power of two integer or predefined values from + \p special_padding enum. + */ + template + struct padding { + //@cond + template struct pack: public Base + { + enum { padding = Value }; + }; + //@endcond + }; + + //@cond + template + struct actual_padding + { + enum { value = Padding & ~padding_flags }; + }; + + template <> + struct actual_padding + { + enum { value = cds::c_nCacheLineSize }; + }; + + template <> + struct actual_padding + { + enum { value = cds::c_nCacheLineSize }; + }; + //@endcond + + //@cond + namespace details { + enum padding_vs_datasize { + padding_datasize_less, + padding_datasize_equal, + padding_datasize_greater + }; + + template < typename T, unsigned int Padding, bool NoPadding, padding_vs_datasize Relation, bool TinyOnly > + struct apply_padding_helper; + + template + struct apply_padding_helper < T, 0, true, Relation, TinyOnly > + { + struct type { + T data; + }; + typedef void padding_type; + }; + + template + struct apply_padding_helper < T, Padding, false, padding_datasize_equal, TinyOnly > + { + struct type { + T data; + }; + typedef void padding_type; + }; + + template + struct apply_padding_helper < T, Padding, false, padding_datasize_less, TinyOnly > + { + typedef uint8_t padding_type[Padding - sizeof( T )]; + struct type { + T data; + padding_type pad_; + }; + + }; + + template + struct apply_padding_helper < T, Padding, false, padding_datasize_greater, false > + { + typedef uint8_t padding_type[Padding - sizeof( T ) % Padding]; + struct type { + T data; + padding_type pad_; + }; + }; + + template + struct apply_padding_helper < T, Padding, false, padding_datasize_greater, true > + { + struct type { + T data; + }; + typedef void padding_type; + }; + + template + struct apply_padding + { + private: + enum { padding = Padding & ~padding_flags }; + + public: + static constexpr const size_t c_nPadding = + static_cast(padding) == static_cast(cache_line_padding) ? cds::c_nCacheLineSize : + static_cast(padding) == static_cast(no_special_padding) ? 0 : padding; + + static_assert( (c_nPadding & (c_nPadding - 1)) == 0, "Padding must be a power-of-two number" ); + + typedef apply_padding_helper< T, + c_nPadding, + c_nPadding == 0, + sizeof( T ) < c_nPadding ? padding_datasize_less : sizeof( T ) == c_nPadding ? padding_datasize_equal : padding_datasize_greater, + (Padding & padding_tiny_data_only) != 0 + > result; + + typedef typename result::type type; + + typedef typename std::conditional< + std::is_same< typename result::padding_type, void >::value, + unsigned int, + typename result::padding_type + >::type padding_type; + }; + + } // namespace details + //@endcond + + + /// [type-option] Generic option setter for statisitcs + /** + This option sets a type to gather statistics. + The option is generic - no predefined type(s) is provided. + The particular \p Type of statistics depends on internal structure of the object. + */ + template + struct stat { + //@cond + template struct pack: public Base + { + typedef Type stat; + }; + //@endcond + }; + + /// [type-option] Option setter for C++ memory model + /** + The cds library supports following memory ordering constraints for atomic operations in container implementation: + - \p v::relaxed_ordering - relaxed C++ memory model. This mode supports full set of memory ordering constraints: + \p memory_order_relaxed, \p memory_order_acquire, \p memory_order_release and so on. + - \p v::sequential_consistent - sequentially consistent C++ memory model (default memory ordering for C++). In + this mode any memory ordering constraint maps to \p memory_order_seq_cst. + + The \p Type template parameter can be \p v::relaxed_ordering or \p v::sequential_consistent. + + You may mix different memory ordering options for different containers: one declare as sequentially consistent, + another declare as relaxed. + Usually, \p v::relaxed_ordering is the default memory ordering for libcds containers. + */ + template + struct memory_model { + //@cond + template struct pack: public Base + { + typedef Type memory_model; + }; + //@endcond + }; + + /// [type-option] Base type traits option setter + /** + This option setter is intended generally for internal use for type rebinding. + */ + template + struct type_traits { + //@cond + template struct pack: public Base + { + typedef Type type_traits; + }; + //@endcond + }; + + /// Resizing policy option + /** + This option specifies the resizing policy that decides when to resize a container. + Used in some containers, for example, in container::StripedHashSet, intrusive::StripedHashSet. + + The real resizing policy specified by \p Type does strongly depend on a container + that supports this option, see container documentation about possibly \p Type values. + */ + template + struct resizing_policy { + //@cond + template struct pack: public Base + { + typedef Type resizing_policy; + }; + //@endcond + }; + + /// Copy policy option + /** + The copy policy defines an item copying algorithm which is used, for example, when a container is resized. + It is very specific algorithm depending on type of the container. + */ + template + struct copy_policy { + //@cond + template struct pack: public Base + { + typedef Type copy_policy; + }; + //@endcond + }; + + /// Swap policy option + /** + The swap policy specifies an algorithm for swapping two objects. + Usually, the default policy is \p std::swap (see opt::v::default_swap_policy): + + @code + struct std_swap { + template + void operator ()( T& v1, T& v2 ) + { + std::swap( v1, v2 ); + } + }; + @endcode + */ + template + struct swap_policy { + //@cond + template struct pack: public Base + { + typedef Type swap_policy; + }; + //@endcond + }; + + /// Move policy option + /** + The move policy specifies an algorithm for moving object content. + In trivial case, it can be simple assignment. + + The move interface is: + \code + template + struct move_policy { + void operator()( T& dest, T& src ); + }; + \endcode + + Note that in move algorithm the \p src source argument can be changed too. + So you can use move semantics. + + Usually, the default move policy is opt::v::assignment_move_policy + */ + template + struct move_policy { + //@cond + template struct pack: public Base + { + typedef Type move_policy; + }; + //@endcond + }; + + /// [value-option] Enable sorting + /** + This option enables (Enable = true) or disables (Enable == false) + sorting of a container. + */ + template + struct sort { + //@cond + template struct pack: public Base + { + static bool const sort = Enable; + }; + //@endcond + }; + + /// [type-option] Concurrent access policy + /** + This option specifies synchronization strategy for fine-grained lock-based containers. + The option has no predefined \p Policy type. + For each container that accepts this option the range of available \p Policy types + is unique. + */ + template + struct mutex_policy { + //@cond + template struct pack: public Base + { + typedef Policy mutex_policy; + }; + //@endcond + }; + + + /// [type-option] Random number generator + /** + The option specifies a random number generator. + \p Random can be any STL random number generator producing + unsigned integer: \p std::linear_congruential_engine, + \p std::mersenne_twister_engine, \p std::subtract_with_carry_engine + and so on, or \p opt::v::c_rand. + + */ + template + struct random_engine { + //@cond + template struct pack: public Base + { + typedef Random random_engine; + }; + //@endcond + }; + + /// [type-option] Free-list implementation + /** + See \p cds::intrusive::FreeList for free-list interface + */ + template + struct free_list { + //@cond + template struct pack: public Base + { + typedef FreeList free_list; + }; + //@endcond + }; + + //@cond + // For internal use + template + struct key_accessor { + template struct pack: public Base + { + typedef Accessor key_accessor; + }; + }; + + template + struct replace_key_accessor { + typedef typename std::conditional< + std::is_same< typename Traits::key_accessor, WhatReplace >::value, + typename opt::key_accessor< ReplaceWith >::template pack< Traits >, + Traits + >::type type; + }; + //@endcond + +}} // namespace cds::opt + + +// **************************************************** +// Options predefined types and values + +namespace cds { namespace opt { + + /// Predefined options value + namespace v { + + /// Sequential non-atomic item counter + /** + This type of \p opt::item_counter option is not intended for concurrent containers + and may be used only if it is explicitly noted. + */ + class sequential_item_counter + { + public: + typedef size_t counter_type ; ///< Counter type + protected: + counter_type m_nCounter ; ///< Counter + + public: + sequential_item_counter() + : m_nCounter(0) + {} + + /// Returns current value of the counter + counter_type value() const + { + return m_nCounter; + } + + /// Same as \ref value() with relaxed memory ordering + operator counter_type() const + { + return value(); + } + + /// Increments the counter. Semantics: postincrement + counter_type inc() + { + return m_nCounter++; + } + + /// Decrements the counter. Semantics: postdecrement + counter_type dec() + { + return m_nCounter--; + } + + /// Preincrement + counter_type operator ++() + { + return inc() + 1; + } + /// Postincrement + counter_type operator ++(int) + { + return inc(); + } + + /// Predecrement + counter_type operator --() + { + return dec() - 1; + } + /// Postdecrement + counter_type operator --(int) + { + return dec(); + } + + /// Resets count to 0 + void reset() + { + m_nCounter = 0; + } + }; + + /// Relaxed memory ordering \p opt::memory_model + /** + In this ordering the memory constraints are defined according to C++ Memory Model specification: + each constraint is mapped to \p std::memory_order constraints one-to-one + */ + struct relaxed_ordering { + //@cond + static const atomics::memory_order memory_order_relaxed = atomics::memory_order_relaxed; + static const atomics::memory_order memory_order_consume = atomics::memory_order_consume; + static const atomics::memory_order memory_order_acquire = atomics::memory_order_acquire; + static const atomics::memory_order memory_order_release = atomics::memory_order_release; + static const atomics::memory_order memory_order_acq_rel = atomics::memory_order_acq_rel; + static const atomics::memory_order memory_order_seq_cst = atomics::memory_order_seq_cst; + //@endcond + }; + + /// Sequential consistent \p opt::memory_memory ordering + /** + In this memory model any memory constraint is equivalent to \p std::memory_order_seq_cst. + */ + struct sequential_consistent { + //@cond + static const atomics::memory_order memory_order_relaxed = atomics::memory_order_seq_cst; + static const atomics::memory_order memory_order_consume = atomics::memory_order_seq_cst; + static const atomics::memory_order memory_order_acquire = atomics::memory_order_seq_cst; + static const atomics::memory_order memory_order_release = atomics::memory_order_seq_cst; + static const atomics::memory_order memory_order_acq_rel = atomics::memory_order_seq_cst; + static const atomics::memory_order memory_order_seq_cst = atomics::memory_order_seq_cst; + //@endcond + }; + + //@cond + /// Totally relaxed \p opt::memory_model ordering (do not use!) + /** + In this memory model any memory constraint is equivalent to \p std::memory_order_relaxed. + @warning Do not use this model! It intended for testing purposes only + to verify debugging instruments like Thread Sanitizer. + */ + struct total_relaxed_ordering { + static const atomics::memory_order memory_order_relaxed = atomics::memory_order_relaxed; + static const atomics::memory_order memory_order_consume = atomics::memory_order_relaxed; + static const atomics::memory_order memory_order_acquire = atomics::memory_order_relaxed; + static const atomics::memory_order memory_order_release = atomics::memory_order_relaxed; + static const atomics::memory_order memory_order_acq_rel = atomics::memory_order_relaxed; + static const atomics::memory_order memory_order_seq_cst = atomics::memory_order_relaxed; + }; + //@endcond + + + /// Default swap policy for \p opt::swap_policy option + /** + The default swap policy is wrappr around \p std::swap algorithm. + */ + struct default_swap_policy { + /// Performs swapping of \p v1 and \p v2 using \p std::swap algo + template + void operator()( T& v1, T& v2 ) const + { + std::swap( v1, v2 ); + } + }; + + /// \p opt::move_policy based on move-assignment operator + struct assignment_move_policy + { + /// dest = std::move( src ) + template + void operator()( T& dest, T&& src ) const + { + dest = std::move( src ); + } + }; + + /// \p rand() -base random number generator for \p opt::random_engine + /** + This generator returns a pseudorandom integer in the range 0 to \p RAND_MAX (32767). + */ + struct c_rand { + typedef unsigned int result_type; ///< Result type + + /// Constructor initializes object calling \p std::srand() + c_rand() + { + std::srand(1); + } + + /// Returns next random number calling \p std::rand() + result_type operator()() + { + return (result_type) std::rand(); + } + }; + } // namespace v + +}} // namespace cds::opt + + +// **************************************************** +// Options metafunctions + +namespace cds { namespace opt { + + //@cond + namespace details { + template + struct do_pack + { + // Use "pack" member template to pack options + typedef typename Option::template pack type; + }; + + template class typelist; + + template struct typelist_head; + template + struct typelist_head< typelist > { + typedef Head type; + }; + template + struct typelist_head< typelist > { + typedef Head type; + }; + + template struct typelist_tail; + template + struct typelist_tail< typelist > { + typedef typelist type; + }; + template + struct typelist_tail< typelist > { + typedef typelist<> type; + }; + + template + struct make_options_impl { + typedef typename make_options_impl< + typename do_pack< + OptionList, + typename typelist_head< Typelist >::type + >::type, + typename typelist_tail::type + >::type type; + }; + + template + struct make_options_impl > { + typedef OptionList type; + }; + } // namespace details + //@endcond + + /// make_options metafunction + /** @headerfile cds/opt/options.h + + The metafunction converts option list \p Options to traits structure. + The result of metafunction is \p type. + + Template parameter \p OptionList is default option set (default traits). + \p Options is option list. + */ + template + struct make_options { +#ifdef CDS_DOXYGEN_INVOKED + typedef implementation_defined type ; ///< Result of the metafunction +#else + typedef typename details::make_options_impl< OptionList, details::typelist >::type type; +#endif + }; + + + // ***************************************************************** + // find_type_traits metafunction + // ***************************************************************** + + //@cond + namespace details { + template + struct find_type_traits_option; + + template <> + struct find_type_traits_option<> { + typedef cds::opt::none type; + }; + + template + struct find_type_traits_option< Any > { + typedef cds::opt::none type; + }; + + template + struct find_type_traits_option< cds::opt::type_traits< Any > > { + typedef Any type; + }; + + template + struct find_type_traits_option< cds::opt::type_traits< Any >, Options... > { + typedef Any type; + }; + + template + struct find_type_traits_option< Any, Options... > { + typedef typename find_type_traits_option< Options... >::type type; + }; + } // namespace details + //@endcond + + /// Metafunction to find opt::type_traits option in \p Options list + /** @headerfile cds/opt/options.h + + If \p Options contains \p opt::type_traits option then it is the metafunction result. + Otherwise the result is \p DefaultOptons. + */ + template + struct find_type_traits { + typedef typename select_default< typename details::find_type_traits_option::type, DefaultOptions>::type type ; ///< Metafunction result + }; + + + // ***************************************************************** + // find_option metafunction + // ***************************************************************** + + //@cond + namespace details { + template + struct find_option; + + struct compare_ok; + struct compare_fail; + + template + struct compare_option + { + typedef compare_fail type; + }; + + template