Move AtomicUnorderedInsertMap to folly.
[folly.git] / folly / detail / AtomicUnorderedMapUtils.h
diff --git a/folly/detail/AtomicUnorderedMapUtils.h b/folly/detail/AtomicUnorderedMapUtils.h
new file mode 100644 (file)
index 0000000..f573797
--- /dev/null
@@ -0,0 +1,52 @@
+#pragma once
+
+#include <atomic>
+#include <stdint.h>
+#include <sys/mman.h>
+#include <unistd.h>
+
+namespace folly { namespace detail {
+
+class MMapAlloc {
+ private:
+  size_t computeSize(size_t size) {
+    long pagesize = sysconf(_SC_PAGESIZE);
+    size_t mmapLength = ((size - 1) & ~(pagesize - 1)) + pagesize;
+    assert(size <= mmapLength && mmapLength < size + pagesize);
+    assert((mmapLength % pagesize) == 0);
+    return mmapLength;
+  }
+
+ public:
+  void* allocate(size_t size) {
+    auto len = computeSize(size);
+
+    // MAP_HUGETLB is a perf win, but requires cooperation from the
+    // deployment environment (and a change to computeSize()).
+    void* mem = static_cast<void*>(mmap(
+         nullptr,
+         len,
+         PROT_READ | PROT_WRITE,
+         MAP_PRIVATE | MAP_ANONYMOUS | MAP_POPULATE,
+         -1,
+         0));
+    if (mem == reinterpret_cast<void*>(-1)) {
+      throw std::system_error(errno, std::system_category());
+    }
+
+    return mem;
+  }
+
+  void deallocate(void* p, size_t size) {
+    auto len = computeSize(size);
+    munmap(p, len);
+  }
+};
+
+template<typename Allocator>
+struct GivesZeroFilledMemory : public std::false_type {};
+
+template<>
+struct GivesZeroFilledMemory<MMapAlloc> : public std::true_type{};
+
+}}