Summary:
As pointed out by orbitcowboy at
https://github.com/facebook/folly/issues/6
Test Plan: No
Reviewers: jdelong, aalexandre
Reviewed By: jdelong
CC: folly@lists, bagashe
Differential Revision: https://phabricator.fb.com/
D486754
* constructor.) This is in order to avoid needing to default
* construct a bunch of value_type when we first start up: if you
* have an expensive default constructor for the value type this can
* constructor.) This is in order to avoid needing to default
* construct a bunch of value_type when we first start up: if you
* have an expensive default constructor for the value type this can
- * noticably speed construction time for an AHA.
+ * noticeably speed construction time for an AHA.
*/
FOR_EACH_RANGE(i, 0, map->capacity_) {
cellKeyPtr(map->cells_[i])->store(map->kEmptyKey_,
*/
FOR_EACH_RANGE(i, 0, map->capacity_) {
cellKeyPtr(map->cells_[i])->store(map->kEmptyKey_,
}
// Replaces at most n1 chars of *this, starting with pos, with n2
}
// Replaces at most n1 chars of *this, starting with pos, with n2
//
// consolidated with
//
//
// consolidated with
//
static_cast<typename basic_fbstring<E1, T, A, S>::size_type>(-1);
#ifndef _LIBSTDCXX_FBSTRING
static_cast<typename basic_fbstring<E1, T, A, S>::size_type>(-1);
#ifndef _LIBSTDCXX_FBSTRING
-// basic_string compatiblity routines
+// basic_string compatibility routines
template <typename E, class T, class A, class S>
inline
template <typename E, class T, class A, class S>
inline
reference operator*() const { return *get(); }
reference operator[](std::ptrdiff_t i) const { return get()[i]; }
reference operator*() const { return *get(); }
reference operator[](std::ptrdiff_t i) const { return get()[i]; }
- // Syncronization (logically const, even though this mutates our
+ // Synchronization (logically const, even though this mutates our
// locked state: you can lock a const PackedSyncPtr<T> to read it).
void lock() const { data_.lock(); }
void unlock() const { data_.unlock(); }
// locked state: you can lock a const PackedSyncPtr<T> to read it).
void lock() const { data_.lock(); }
void unlock() const { data_.unlock(); }
the approximate number of elements you'll be inserting into the map, you
probably shouldn't use this class.
the approximate number of elements you'll be inserting into the map, you
probably shouldn't use this class.
-* Must manage syncronization externally in order to modify values in the map
+* Must manage synchronization externally in order to modify values in the map
after insertion. Lock pools are a common way to do this, or you may
consider using `folly::PackedSyncPtr<T>` as your `ValueT`.
after insertion. Lock pools are a common way to do this, or you may
consider using `folly::PackedSyncPtr<T>` as your `ValueT`.
-// multi-thread benchmarking
+// multithreaded benchmarking
BENCHMARK_PARAM(BM_ContentionStdSet, 1024);
BENCHMARK_PARAM(BM_ContentionCSL, 1024);
BENCHMARK_PARAM(BM_ContentionStdSet, 1024);
BENCHMARK_PARAM(BM_ContentionCSL, 1024);
- // Make sure every thread succesfully inserted it's ID into every vec
+ // Make sure every thread successfully inserted it's ID into every vec
std::set<intptr_t> idsFound;
for (auto& elem : kv.second) {
EXPECT_TRUE(idsFound.insert(elem).second); // check for dups
std::set<intptr_t> idsFound;
for (auto& elem : kv.second) {
EXPECT_TRUE(idsFound.insert(elem).second); // check for dups
__thread int64_t global__thread64;
__thread int32_t global__thread32;
__thread int64_t global__thread64;
__thread int32_t global__thread32;
-// Alternate lock-free implementation. Acheives about the same performance,
+// Alternate lock-free implementation. Achieves about the same performance,
// but uses about 20x more memory than ThreadCachedInt with 24 threads.
struct ShardedAtomicInt {
static const int64_t kBuckets_ = 2048;
// but uses about 20x more memory than ThreadCachedInt with 24 threads.
struct ShardedAtomicInt {
static const int64_t kBuckets_ = 2048;