2 * Copyright 2017 Facebook, Inc.
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
8 * http://www.apache.org/licenses/LICENSE-2.0
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
16 #define HAZPTR_DEBUG true
17 #define HAZPTR_STATS true
18 #define HAZPTR_SCAN_THRESHOLD 10
20 #include <folly/experimental/hazptr/debug.h>
21 #include <folly/experimental/hazptr/example/LockFreeLIFO.h>
22 #include <folly/experimental/hazptr/example/MWMRSet.h>
23 #include <folly/experimental/hazptr/example/SWMRList.h>
24 #include <folly/experimental/hazptr/example/WideCAS.h>
25 #include <folly/experimental/hazptr/hazptr.h>
26 #include <folly/experimental/hazptr/test/HazptrUse1.h>
27 #include <folly/experimental/hazptr/test/HazptrUse2.h>
29 #include <folly/portability/GFlags.h>
30 #include <folly/portability/GTest.h>
34 DEFINE_int32(num_threads, 5, "Number of threads");
35 DEFINE_int64(num_reps, 1, "Number of test reps");
36 DEFINE_int64(num_ops, 10, "Number of ops or pairs of ops per rep");
38 using namespace folly::hazptr;
40 class HazptrTest : public testing::Test {
42 HazptrTest() : Test() {
43 DEBUG_PRINT("========== start of test scope");
45 ~HazptrTest() override {
46 DEBUG_PRINT("========== end of test scope");
50 TEST_F(HazptrTest, Test1) {
52 Node1* node0 = (Node1*)malloc(sizeof(Node1));
53 node0 = new (node0) Node1;
54 DEBUG_PRINT("=== malloc node0 " << node0 << " " << sizeof(*node0));
55 Node1* node1 = (Node1*)malloc(sizeof(Node1));
56 node1 = new (node1) Node1;
57 DEBUG_PRINT("=== malloc node1 " << node1 << " " << sizeof(*node1));
58 Node1* node2 = (Node1*)malloc(sizeof(Node1));
59 node2 = new (node2) Node1;
60 DEBUG_PRINT("=== malloc node2 " << node2 << " " << sizeof(*node2));
61 Node1* node3 = (Node1*)malloc(sizeof(Node1));
62 node3 = new (node3) Node1;
63 DEBUG_PRINT("=== malloc node3 " << node3 << " " << sizeof(*node3));
67 std::atomic<Node1*> shared0 = {node0};
68 std::atomic<Node1*> shared1 = {node1};
69 std::atomic<Node1*> shared2 = {node2};
70 std::atomic<Node1*> shared3 = {node3};
72 MyMemoryResource myMr;
73 DEBUG_PRINT("=== myMr " << &myMr);
74 hazptr_domain myDomain0;
75 DEBUG_PRINT("=== myDomain0 " << &myDomain0);
76 hazptr_domain myDomain1(&myMr);
77 DEBUG_PRINT("=== myDomain1 " << &myDomain1);
81 DEBUG_PRINT("=== hptr0");
83 DEBUG_PRINT("=== hptr1");
84 hazptr_holder hptr1(myDomain0);
85 DEBUG_PRINT("=== hptr2");
86 hazptr_holder hptr2(myDomain1);
87 DEBUG_PRINT("=== hptr3");
92 Node1* n0 = shared0.load();
93 Node1* n1 = shared1.load();
94 Node1* n2 = shared2.load();
95 Node1* n3 = shared3.load();
97 CHECK(hptr0.try_protect(n0, shared0));
98 CHECK(hptr1.try_protect(n1, shared1));
100 hptr1.reset(nullptr);
102 CHECK(hptr2.try_protect(n3, shared3));
108 DEBUG_PRINT("=== retire n0 " << n0);
110 DEBUG_PRINT("=== retire n1 " << n1);
111 n1->retire(default_hazptr_domain());
112 DEBUG_PRINT("=== retire n2 " << n2);
113 n2->retire(myDomain0);
114 DEBUG_PRINT("=== retire n3 " << n3);
115 n3->retire(myDomain1);
118 TEST_F(HazptrTest, Test2) {
119 Node2* node0 = new Node2;
120 DEBUG_PRINT("=== new node0 " << node0 << " " << sizeof(*node0));
121 Node2* node1 = (Node2*)malloc(sizeof(Node2));
122 node1 = new (node1) Node2;
123 DEBUG_PRINT("=== malloc node1 " << node1 << " " << sizeof(*node1));
124 Node2* node2 = (Node2*)malloc(sizeof(Node2));
125 node2 = new (node2) Node2;
126 DEBUG_PRINT("=== malloc node2 " << node2 << " " << sizeof(*node2));
127 Node2* node3 = (Node2*)malloc(sizeof(Node2));
128 node3 = new (node3) Node2;
129 DEBUG_PRINT("=== malloc node3 " << node3 << " " << sizeof(*node3));
133 std::atomic<Node2*> shared0 = {node0};
134 std::atomic<Node2*> shared1 = {node1};
135 std::atomic<Node2*> shared2 = {node2};
136 std::atomic<Node2*> shared3 = {node3};
138 MineMemoryResource mineMr;
139 DEBUG_PRINT("=== mineMr " << &mineMr);
140 hazptr_domain mineDomain0;
141 DEBUG_PRINT("=== mineDomain0 " << &mineDomain0);
142 hazptr_domain mineDomain1(&mineMr);
143 DEBUG_PRINT("=== mineDomain1 " << &mineDomain1);
147 DEBUG_PRINT("=== hptr0");
149 DEBUG_PRINT("=== hptr1");
150 hazptr_holder hptr1(mineDomain0);
151 DEBUG_PRINT("=== hptr2");
152 hazptr_holder hptr2(mineDomain1);
153 DEBUG_PRINT("=== hptr3");
158 Node2* n0 = shared0.load();
159 Node2* n1 = shared1.load();
160 Node2* n2 = shared2.load();
161 Node2* n3 = shared3.load();
163 CHECK(hptr0.try_protect(n0, shared0));
164 CHECK(hptr1.try_protect(n1, shared1));
167 CHECK(hptr2.try_protect(n3, shared3));
173 DEBUG_PRINT("=== retire n0 " << n0);
174 n0->retire(default_hazptr_domain(), &mineReclaimFnDelete);
175 DEBUG_PRINT("=== retire n1 " << n1);
176 n1->retire(default_hazptr_domain(), &mineReclaimFnFree);
177 DEBUG_PRINT("=== retire n2 " << n2);
178 n2->retire(mineDomain0, &mineReclaimFnFree);
179 DEBUG_PRINT("=== retire n3 " << n3);
180 n3->retire(mineDomain1, &mineReclaimFnFree);
183 TEST_F(HazptrTest, LIFO) {
185 CHECK_GT(FLAGS_num_threads, 0);
186 for (int i = 0; i < FLAGS_num_reps; ++i) {
187 DEBUG_PRINT("========== start of rep scope");
189 std::vector<std::thread> threads(FLAGS_num_threads);
190 for (int tid = 0; tid < FLAGS_num_threads; ++tid) {
191 threads[tid] = std::thread([&s, tid]() {
192 for (int j = tid; j < FLAGS_num_ops; j += FLAGS_num_threads) {
195 while (!s.pop(res)) {
201 for (auto& t : threads) {
204 DEBUG_PRINT("========== end of rep scope");
208 TEST_F(HazptrTest, SWMRLIST) {
211 CHECK_GT(FLAGS_num_threads, 0);
212 for (int i = 0; i < FLAGS_num_reps; ++i) {
213 DEBUG_PRINT("========== start of rep scope");
215 std::vector<std::thread> threads(FLAGS_num_threads);
216 for (int tid = 0; tid < FLAGS_num_threads; ++tid) {
217 threads[tid] = std::thread([&s, tid]() {
218 for (int j = tid; j < FLAGS_num_ops; j += FLAGS_num_threads) {
223 for (int j = 0; j < 10; ++j) {
226 for (int j = 0; j < 10; ++j) {
229 for (auto& t : threads) {
232 DEBUG_PRINT("========== end of rep scope");
236 TEST_F(HazptrTest, MWMRSet) {
239 CHECK_GT(FLAGS_num_threads, 0);
240 for (int i = 0; i < FLAGS_num_reps; ++i) {
241 DEBUG_PRINT("========== start of rep scope");
243 std::vector<std::thread> threads(FLAGS_num_threads);
244 for (int tid = 0; tid < FLAGS_num_threads; ++tid) {
245 threads[tid] = std::thread([&s, tid]() {
246 for (int j = tid; j < FLAGS_num_ops; j += FLAGS_num_threads) {
253 for (int j = 0; j < 10; ++j) {
256 for (int j = 0; j < 10; ++j) {
259 for (auto& t : threads) {
262 DEBUG_PRINT("========== end of rep scope");
266 TEST_F(HazptrTest, WIDECAS) {
269 std::string v = "11112222";
270 auto ret = s.cas(u, v);
286 TEST_F(HazptrTest, VirtualTest) {
287 struct Thing : public hazptr_obj_base<Thing> {
289 DEBUG_PRINT("this: " << this << " &a: " << &a << " a: " << a);
293 for (int i = 0; i < 100; i++) {
294 auto bar = new Thing;
300 EXPECT_EQ(bar->a, i);
304 void destructionTest(hazptr_domain& domain) {
305 struct Thing : public hazptr_obj_base<Thing> {
307 hazptr_domain* domain;
309 Thing(int v, Thing* n, hazptr_domain* d) : next(n), domain(d), val(v) {}
311 DEBUG_PRINT("this: " << this << " val: " << val << " next: " << next);
313 next->retire(*domain);
317 Thing* last{nullptr};
318 for (int i = 0; i < 2000; i++) {
319 last = new Thing(i, last, &domain);
321 last->retire(domain);
324 TEST_F(HazptrTest, DestructionTest) {
326 hazptr_domain myDomain0;
327 destructionTest(myDomain0);
329 destructionTest(default_hazptr_domain());
332 TEST_F(HazptrTest, Move) {
333 struct Foo : hazptr_obj_base<Foo> {
336 for (int i = 0; i < 100; ++i) {
344 // Move constructor - still protected
345 hazptr_holder hptr1(std::move(hptr0));
346 // Self move is no-op - still protected
347 hazptr_holder* phptr1 = &hptr1;
348 CHECK_EQ(phptr1, &hptr1);
349 hptr1 = std::move(*phptr1);
351 hazptr_holder hptr2(nullptr);
352 // Move assignment - still protected
353 hptr2 = std::move(hptr1);
356 // Unprotect object - hptr2 is nonempty
361 TEST_F(HazptrTest, Array) {
362 struct Foo : hazptr_obj_base<Foo> {
365 for (int i = 0; i < 100; ++i) {
368 hazptr_array<10> hptr;
372 hazptr_array<10> h(nullptr);
377 // Unprotect object - hptr2 is nonempty
382 hazptr_array<HAZPTR_TC_SIZE + 1> h;
386 TEST_F(HazptrTest, Local) {
387 struct Foo : hazptr_obj_base<Foo> {
390 for (int i = 0; i < 100; ++i) {
393 hazptr_local<10> hptr;
398 // Unprotect object - hptr2 is nonempty
403 hazptr_local<HAZPTR_TC_SIZE + 1> h;
407 /* Test ref counting */
409 std::atomic<int> constructed;
410 std::atomic<int> destroyed;
412 struct Foo : hazptr_obj_base_refcounted<Foo> {
416 Foo(int v, Foo* n) : val_(v), marked_(false), next_(n) {
428 if (!next->release_ref()) {
439 struct Dummy : hazptr_obj_base<Dummy> {};
441 TEST_F(HazptrTest, basic_refcount) {
442 constructed.store(0);
447 for (int i = 0; i < num; ++i) {
450 p->acquire_ref_safe();
457 for (auto q = p->next_; q; q = q->next_) {
461 for (auto q = p; q; q = q->next_) {
464 CHECK_EQ(q->val_, v);
466 CHECK(!p->release_ref());
467 CHECK_EQ(constructed.load(), num);
468 CHECK_EQ(destroyed.load(), 0);
470 CHECK_EQ(constructed.load(), num);
471 CHECK_EQ(destroyed.load(), 0);
474 /* retire enough objects to guarantee reclamation of Foo objects */
475 for (int i = 0; i < 100; ++i) {
480 CHECK_EQ(constructed.load(), num);
481 CHECK_EQ(destroyed.load(), num);
484 TEST_F(HazptrTest, mt_refcount) {
485 constructed.store(0);
488 std::atomic<bool> ready(false);
489 std::atomic<int> setHazptrs(0);
490 std::atomic<Foo*> head;
494 std::vector<std::thread> thr(nthr);
495 for (int i = 0; i < nthr; ++i) {
496 thr[i] = std::thread([&] {
497 while (!ready.load()) {
501 auto p = hptr.get_protected(head);
503 /* Concurrent with removal */
505 for (auto q = p; q; q = q->next_) {
508 CHECK_EQ(q->val_, v);
515 for (int i = 0; i < num; ++i) {
517 p->acquire_ref_safe();
523 while (setHazptrs.load() < nthr) {
527 /* this is concurrent with traversal by reader */
529 for (auto q = p; q; q = q->next_) {
532 DEBUG_PRINT("Foo should not be destroyed");
533 CHECK_EQ(constructed.load(), num);
534 CHECK_EQ(destroyed.load(), 0);
536 DEBUG_PRINT("Foo may be destroyed after releasing the last reference");
537 if (p->release_ref()) {
541 /* retire enough objects to guarantee reclamation of Foo objects */
542 for (int i = 0; i < 100; ++i) {
547 for (int i = 0; i < nthr; ++i) {
551 CHECK_EQ(constructed.load(), num);
552 CHECK_EQ(destroyed.load(), num);
555 TEST_F(HazptrTest, FreeFunctionRetire) {
559 hazptr_retire(foo2, [](int* obj) { delete obj; });
561 bool retired = false;
563 hazptr_domain myDomain0;
566 delret(bool* retire) : retired_(retire) {}
571 auto foo3 = new delret(&retired);
572 myDomain0.retire(foo3);
574 EXPECT_TRUE(retired);