bool is_lock_free() const {return true;}
};
-template<
- typename Key, typename Value, template<typename> class Atom = non_atomic>
-using UnorderedInsertMap = AtomicUnorderedInsertMap<
- Key,
- Value,
- std::hash<Key>,
- std::equal_to<Key>,
- (boost::has_trivial_destructor<Key>::value &&
- boost::has_trivial_destructor<Value>::value),
- Atom,
- std::allocator<char>>;
-
-TEST(AtomicUnorderedInsertMap, basic) {
- AtomicUnorderedInsertMap<std::string,std::string> m(100);
+template <typename Key,
+ typename Value,
+ typename IndexType,
+ template <typename> class Atom = std::atomic,
+ typename Allocator = std::allocator<char>>
+using UIM =
+ AtomicUnorderedInsertMap<Key,
+ Value,
+ std::hash<Key>,
+ std::equal_to<Key>,
+ (boost::has_trivial_destructor<Key>::value &&
+ boost::has_trivial_destructor<Value>::value),
+ Atom,
+ Allocator,
+ IndexType>;
+
+namespace {
+template <typename T>
+struct AtomicUnorderedInsertMapTest : public ::testing::Test {};
+}
+
+// uint16_t doesn't make sense for most platforms, but we might as well
+// test it
+using IndexTypesToTest = ::testing::Types<uint16_t, uint32_t, uint64_t>;
+TYPED_TEST_CASE(AtomicUnorderedInsertMapTest, IndexTypesToTest);
+
+TYPED_TEST(AtomicUnorderedInsertMapTest, basic) {
+ UIM<std::string,
+ std::string,
+ TypeParam,
+ std::atomic,
+ folly::detail::MMapAlloc> m(100);
m.emplace("abc", "ABC");
EXPECT_TRUE(m.find("abc") != m.cend());
EXPECT_TRUE(a != b);
}
-TEST(AtomicUnorderedInsertMap, value_mutation) {
- AtomicUnorderedInsertMap<int, MutableAtom<int>> m(100);
+TYPED_TEST(AtomicUnorderedInsertMapTest, value_mutation) {
+ UIM<int, MutableAtom<int>, TypeParam> m(100);
for (int i = 0; i < 50; ++i) {
m.emplace(i, i);
}
TEST(UnorderedInsertMap, value_mutation) {
- UnorderedInsertMap<int, MutableData<int>> m(100);
+ UIM<int, MutableData<int>, uint32_t, non_atomic> m(100);
for (int i = 0; i < 50; ++i) {
m.emplace(i, i);
EXPECT_EQ(m.find(1)->second.data, 2);
}
+// This test is too expensive to run automatically. On my dev server it
+// takes about 10 minutes for dbg build, 2 for opt.
+TEST(AtomicUnorderedInsertMap, DISABLED_mega_map) {
+ size_t capacity = 2000000000;
+ AtomicUnorderedInsertMap64<size_t,size_t> big(capacity);
+ for (size_t i = 0; i < capacity * 2; i += 2) {
+ big.emplace(i, i * 10);
+ }
+ for (size_t i = 0; i < capacity * 3; i += capacity / 1000 + 1) {
+ auto iter = big.find(i);
+ if ((i & 1) == 0 && i < capacity * 2) {
+ EXPECT_EQ(iter->second, i * 10);
+ } else {
+ EXPECT_TRUE(iter == big.cend());
+ }
+ }
+}
+
BENCHMARK(lookup_int_int_hit, iters) {
std::unique_ptr<AtomicUnorderedInsertMap<int,size_t>> ptr = {};
}
BENCHMARK(atomic_fast_map) {
- UnorderedInsertMap<long, long, std::atomic> m(10000);
+ UIM<long, long, uint32_t, std::atomic> m(10000);
for (int i=0; i<10000; ++i) {
m.emplace(i,i);
}
}
BENCHMARK(fast_map) {
- UnorderedInsertMap<long, long> m(10000);
+ UIM<long, long, uint32_t, non_atomic> m(10000);
+ for (int i=0; i<10000; ++i) {
+ m.emplace(i,i);
+ }
+
+ for (int i=0; i<10000; ++i) {
+ auto a = m.find(i);
+ folly::doNotOptimizeAway(&*a);
+ }
+}
+
+BENCHMARK(atomic_fast_map_64) {
+ UIM<long, long, uint64_t, std::atomic> m(10000);
+ for (int i=0; i<10000; ++i) {
+ m.emplace(i,i);
+ }
+
+ for (int i=0; i<10000; ++i) {
+ auto a = m.find(i);
+ folly::doNotOptimizeAway(&*a);
+ }
+}
+
+BENCHMARK(fast_map_64) {
+ UIM<long, long, uint64_t, non_atomic> m(10000);
for (int i=0; i<10000; ++i) {
m.emplace(i,i);
}