From: Christopher Dykes Date: Thu, 16 Feb 2017 21:08:47 +0000 (-0800) Subject: Use std::this_thread::yield rather than sched_yield X-Git-Tag: v2017.03.06.00~32 X-Git-Url: http://plrg.eecs.uci.edu/git/?p=folly.git;a=commitdiff_plain;h=79b78eaaf88ead9fa6543822c193b4e86d0e2395 Use std::this_thread::yield rather than sched_yield Summary: They do the same thing, and the first is portable. Reviewed By: yfeldblum Differential Revision: D4569649 fbshipit-source-id: db0434766f674a7789d6e59335e122b4d2131e06 --- diff --git a/folly/RWSpinLock.h b/folly/RWSpinLock.h index c4905d1d..63dac2f5 100644 --- a/folly/RWSpinLock.h +++ b/folly/RWSpinLock.h @@ -156,11 +156,11 @@ pthread_rwlock_t Read 728698 24us 101ns 7.28ms 194us #undef RW_SPINLOCK_USE_SSE_INSTRUCTIONS_ #endif +#include #include #include -#include +#include -#include #include #include @@ -194,7 +194,7 @@ class RWSpinLock { void lock() { int count = 0; while (!LIKELY(try_lock())) { - if (++count > 1000) sched_yield(); + if (++count > 1000) std::this_thread::yield(); } } @@ -208,7 +208,7 @@ class RWSpinLock { void lock_shared() { int count = 0; while (!LIKELY(try_lock_shared())) { - if (++count > 1000) sched_yield(); + if (++count > 1000) std::this_thread::yield(); } } @@ -226,7 +226,7 @@ class RWSpinLock { void lock_upgrade() { int count = 0; while (!try_lock_upgrade()) { - if (++count > 1000) sched_yield(); + if (++count > 1000) std::this_thread::yield(); } } @@ -238,7 +238,7 @@ class RWSpinLock { void unlock_upgrade_and_lock() { int64_t count = 0; while (!try_unlock_upgrade_and_lock()) { - if (++count > 1000) sched_yield(); + if (++count > 1000) std::this_thread::yield(); } } @@ -601,7 +601,7 @@ class RWTicketSpinLockT { * turns. */ void writeLockAggressive() { - // sched_yield() is needed here to avoid a pathology if the number + // std::this_thread::yield() is needed here to avoid a pathology if the number // of threads attempting concurrent writes is >= the number of real // cores allocated to this process. This is less likely than the // corresponding situation in lock_shared(), but we still want to @@ -610,7 +610,7 @@ class RWTicketSpinLockT { QuarterInt val = __sync_fetch_and_add(&ticket.users, 1); while (val != load_acquire(&ticket.write)) { asm_volatile_pause(); - if (UNLIKELY(++count > 1000)) sched_yield(); + if (UNLIKELY(++count > 1000)) std::this_thread::yield(); } } @@ -623,7 +623,7 @@ class RWTicketSpinLockT { // there are a lot of competing readers. The aggressive spinning // can help to avoid starving writers. // - // We don't worry about sched_yield() here because the caller + // We don't worry about std::this_thread::yield() here because the caller // has already explicitly abandoned fairness. while (!try_lock()) {} } @@ -653,13 +653,13 @@ class RWTicketSpinLockT { } void lock_shared() { - // sched_yield() is important here because we can't grab the + // std::this_thread::yield() is important here because we can't grab the // shared lock if there is a pending writeLockAggressive, so we // need to let threads that already have a shared lock complete int count = 0; while (!LIKELY(try_lock_shared())) { asm_volatile_pause(); - if (UNLIKELY((++count & 1023) == 0)) sched_yield(); + if (UNLIKELY((++count & 1023) == 0)) std::this_thread::yield(); } } diff --git a/folly/configure.ac b/folly/configure.ac index a38ea92c..f08d6c2f 100644 --- a/folly/configure.ac +++ b/folly/configure.ac @@ -564,7 +564,6 @@ AC_CHECK_FUNCS([getdelim \ memset \ pow \ strerror \ - sched_yield \ malloc_size \ malloc_usable_size \ memrchr \ diff --git a/folly/io/async/EventBase.cpp b/folly/io/async/EventBase.cpp index a493d928..941cdeec 100644 --- a/folly/io/async/EventBase.cpp +++ b/folly/io/async/EventBase.cpp @@ -27,6 +27,7 @@ #include #include #include +#include namespace folly { @@ -231,7 +232,7 @@ getTimeDelta(std::chrono::steady_clock::time_point* prev) { void EventBase::waitUntilRunning() { while (!isRunning()) { - sched_yield(); + std::this_thread::yield(); } } diff --git a/folly/test/CacheLocalityBenchmark.cpp b/folly/test/CacheLocalityBenchmark.cpp index 9522be68..dbb2d6ac 100644 --- a/folly/test/CacheLocalityBenchmark.cpp +++ b/folly/test/CacheLocalityBenchmark.cpp @@ -16,11 +16,12 @@ #include -#include #include #include #include + #include + #include using namespace folly::detail; @@ -167,7 +168,7 @@ static void contentionAtWidth(size_t iters, size_t stripes, size_t work) { ready++; while (!go.load()) { - sched_yield(); + std::this_thread::yield(); } std::atomic localWork(0); for (size_t i = iters; i > 0; --i) { @@ -187,7 +188,7 @@ static void contentionAtWidth(size_t iters, size_t stripes, size_t work) { } while (ready < numThreads) { - sched_yield(); + std::this_thread::yield(); } braces.dismiss(); go = true; @@ -208,7 +209,7 @@ static void atomicIncrBaseline(size_t iters, while (threads.size() < numThreads) { threads.push_back(std::thread([&]() { while (!go.load()) { - sched_yield(); + std::this_thread::yield(); } std::atomic localCounter(0); std::atomic localWork(0); diff --git a/folly/test/SynchronizedTestLib-inl.h b/folly/test/SynchronizedTestLib-inl.h index 4b1409d6..99706950 100644 --- a/folly/test/SynchronizedTestLib-inl.h +++ b/folly/test/SynchronizedTestLib-inl.h @@ -498,7 +498,7 @@ template void testConcurrency() { // Test lock() for (size_t n = 0; n < itersPerThread; ++n) { v.contextualLock()->push_back((itersPerThread * threadIdx) + n); - sched_yield(); + std::this_thread::yield(); } }; runParallel(numThreads, pushNumbers);