projects
/
folly.git
/ blobdiff
commit
grep
author
committer
pickaxe
?
search:
re
summary
|
shortlog
|
log
|
commit
|
commitdiff
|
tree
raw
|
inline
| side by side
Don't declare caught exceptions when not actually used
[folly.git]
/
folly
/
MicroLock.cpp
diff --git
a/folly/MicroLock.cpp
b/folly/MicroLock.cpp
index d6656dce4c32c2fc374eb596200c508a0215a04b..06e8de08768bb01bbb69636dffed07b198738108 100644
(file)
--- a/
folly/MicroLock.cpp
+++ b/
folly/MicroLock.cpp
@@
-1,5
+1,5
@@
/*
/*
- * Copyright 201
6
Facebook, Inc.
+ * Copyright 201
7
Facebook, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@@
-15,6
+15,9
@@
*/
#include <folly/MicroLock.h>
*/
#include <folly/MicroLock.h>
+#include <thread>
+
+#include <folly/portability/Asm.h>
namespace folly {
namespace folly {
@@
-23,7
+26,7
@@
void MicroLockCore::lockSlowPath(uint32_t oldWord,
uint32_t slotHeldBit,
unsigned maxSpins,
unsigned maxYields) {
uint32_t slotHeldBit,
unsigned maxSpins,
unsigned maxYields) {
- u
nsigned
newWord;
+ u
int32_t
newWord;
unsigned spins = 0;
uint32_t slotWaitBit = slotHeldBit << 1;
unsigned spins = 0;
uint32_t slotWaitBit = slotHeldBit << 1;
@@
-45,7
+48,10
@@
retry:
}
(void)wordPtr->futexWait(newWord, slotHeldBit);
} else if (spins > maxSpins) {
}
(void)wordPtr->futexWait(newWord, slotHeldBit);
} else if (spins > maxSpins) {
- sched_yield();
+ // sched_yield(), but more portable
+ std::this_thread::yield();
+ } else {
+ folly::asm_pause();
}
oldWord = wordPtr->load(std::memory_order_relaxed);
goto retry;
}
oldWord = wordPtr->load(std::memory_order_relaxed);
goto retry;
@@
-54,14
+60,9
@@
retry:
newWord = oldWord | slotHeldBit;
if (!wordPtr->compare_exchange_weak(oldWord,
newWord,
newWord = oldWord | slotHeldBit;
if (!wordPtr->compare_exchange_weak(oldWord,
newWord,
- std::memory_order_
relaxed
,
+ std::memory_order_
acquire
,
std::memory_order_relaxed)) {
goto retry;
}
std::memory_order_relaxed)) {
goto retry;
}
-
- // Locks are traditionally memory barriers, so we emit a full fence
- // even though we were happy using relaxed atomics for the
- // lock itself.
- std::atomic_thread_fence(std::memory_order_seq_cst);
}
}
}
}