+FOLLY_MALLOC_NOINLINE inline bool usingJEMalloc() noexcept {
+ // Checking for rallocx != nullptr is not sufficient; we may be in a
+ // dlopen()ed module that depends on libjemalloc, so rallocx is resolved, but
+ // the main program might be using a different memory allocator.
+ // How do we determine that we're using jemalloc? In the hackiest
+ // way possible. We allocate memory using malloc() and see if the
+ // per-thread counter of allocated memory increases. This makes me
+ // feel dirty inside. Also note that this requires jemalloc to have
+ // been compiled with --enable-stats.
+ static const bool result = [] () noexcept {
+ // Some platforms (*cough* OSX *cough*) require weak symbol checks to be
+ // in the form if (mallctl != nullptr). Not if (mallctl) or if (!mallctl)
+ // (!!). http://goo.gl/xpmctm
+ if (mallocx == nullptr || rallocx == nullptr || xallocx == nullptr
+ || sallocx == nullptr || dallocx == nullptr || sdallocx == nullptr
+ || nallocx == nullptr || mallctl == nullptr
+ || mallctlnametomib == nullptr || mallctlbymib == nullptr) {
+ return false;
+ }
+
+ // "volatile" because gcc optimizes out the reads from *counter, because
+ // it "knows" malloc doesn't modify global state...
+ /* nolint */ volatile uint64_t* counter;
+ size_t counterLen = sizeof(uint64_t*);
+
+ if (mallctl("thread.allocatedp", static_cast<void*>(&counter), &counterLen,
+ nullptr, 0) != 0) {
+ return false;
+ }
+
+ if (counterLen != sizeof(uint64_t*)) {
+ return false;
+ }
+
+ uint64_t origAllocated = *counter;
+
+ // Static because otherwise clever compilers will find out that
+ // the ptr is not used and does not escape the scope, so they will
+ // just optimize away the malloc.
+ static const void* ptr = malloc(1);
+ if (!ptr) {
+ // wtf, failing to allocate 1 byte
+ return false;
+ }
+
+ return (origAllocated != *counter);
+ }();
+
+ return result;