AtomicHashArray<KeyT, ValueT, HashFcn, EqualFcn,
Allocator, ProbeFcn, KeyConvertFcn>::
AtomicHashArray(size_t capacity, KeyT emptyKey, KeyT lockedKey,
- KeyT erasedKey, double _maxLoadFactor, size_t cacheSize)
+ KeyT erasedKey, double _maxLoadFactor, uint32_t cacheSize)
: capacity_(capacity),
maxEntries_(size_t(_maxLoadFactor * capacity_ + 0.5)),
kEmptyKey_(emptyKey), kLockedKey_(lockedKey), kErasedKey_(erasedKey),
* deleter to make sure everything is cleaned up properly.
*/
struct Config {
- KeyT emptyKey;
- KeyT lockedKey;
- KeyT erasedKey;
+ KeyT emptyKey;
+ KeyT lockedKey;
+ KeyT erasedKey;
double maxLoadFactor;
double growthFactor;
- int entryCountThreadCacheSize;
+ uint32_t entryCountThreadCacheSize;
size_t capacity; // if positive, overrides maxLoadFactor
public:
numPendingEntries_.setCacheSize(newSize);
}
- int getEntryCountThreadCacheSize() const {
+ uint32_t getEntryCountThreadCacheSize() const {
return numEntries_.getCacheSize();
}
// Force constructor/destructor private since create/destroy should be
// used externally instead
- AtomicHashArray(size_t capacity, KeyT emptyKey, KeyT lockedKey,
- KeyT erasedKey, double maxLoadFactor, size_t cacheSize);
+ AtomicHashArray(
+ size_t capacity,
+ KeyT emptyKey,
+ KeyT lockedKey,
+ KeyT erasedKey,
+ double maxLoadFactor,
+ uint32_t cacheSize);
~AtomicHashArray() = default;
std::atomic<SubMap*> subMaps_[kNumSubMaps_];
std::atomic<uint32_t> numMapsAllocated_;
- inline bool tryLockMap(int idx) {
+ inline bool tryLockMap(unsigned int idx) {
SubMap* val = nullptr;
return subMaps_[idx].compare_exchange_strong(val, (SubMap*)kLockedPtr_,
std::memory_order_acquire);
}
private:
-
- enum {
+ enum : IndexType {
kMaxAllocationTries = 1000, // after this we throw
};
/// Allocates a slot and returns its index. Tries to put it near
/// slots_[start].
IndexType allocateNear(IndexType start) {
- for (auto tries = 0; tries < kMaxAllocationTries; ++tries) {
+ for (IndexType tries = 0; tries < kMaxAllocationTries; ++tries) {
auto slot = allocationAttempt(start, tries);
auto prev = slots_[slot].headAndState_.load(std::memory_order_acquire);
if ((prev & 3) == EMPTY &&
DEFINE_int64(
bm_max_iters,
- 1L << 30L,
+ 1 << 30,
"Maximum # of iterations we'll try for each benchmark.");
DEFINE_int32(
template<typename T>
class SkipListNode : private boost::noncopyable {
- enum {
+ enum : uint16_t {
IS_HEAD_NODE = 1,
MARKED_FOR_REMOVAL = (1 << 1),
FULLY_LINKED = (1 << 2),
};
+
public:
typedef T value_type;
SkipListNode* copyHead(SkipListNode* node) {
DCHECK(node != nullptr && height_ > node->height_);
setFlags(node->getFlags());
- for (int i = 0; i < node->height_; ++i) {
+ for (uint8_t i = 0; i < node->height_; ++i) {
setSkip(i, node->skip(i));
}
return this;
Expected<Tgt, ConversionCode>>::type
convertTo(const Src& value) noexcept {
/* static */ if (
- std::numeric_limits<Tgt>::max() < std::numeric_limits<Src>::max()) {
+ folly::_t<std::make_unsigned<Tgt>>(std::numeric_limits<Tgt>::max()) <
+ folly::_t<std::make_unsigned<Src>>(std::numeric_limits<Src>::max())) {
if (greater_than<Tgt, std::numeric_limits<Tgt>::max()>(value)) {
return makeUnexpected(ConversionCode::ARITH_POSITIVE_OVERFLOW);
}
return makeUnexpected(ConversionCode::ARITH_NEGATIVE_OVERFLOW);
}
}
- return boost::implicit_cast<Tgt>(value);
+ return static_cast<Tgt>(value);
}
/**
* Get the 1-based type index of T in Types.
*/
template <typename T>
- size_t typeIndex() const {
- return dptr_detail::GetTypeIndex<T, Types...>::value;
+ uint16_t typeIndex() const {
+ return uint16_t(dptr_detail::GetTypeIndex<T, Types...>::value);
}
uint16_t index() const { return data_ >> 48; }
}
// 2+: for null terminator and optional sign shenanigans.
- constexpr size_t bufLen =
+ constexpr int bufLen =
2 + constexpr_max(
2 + DoubleToStringConverter::kMaxFixedDigitsBeforePoint +
DoubleToStringConverter::kMaxFixedDigitsAfterPoint,
constexpr_max(8 + DoubleToStringConverter::kMaxExponentialDigits,
7 + DoubleToStringConverter::kMaxPrecisionDigits));
char buf[bufLen];
- StringBuilder builder(buf + 1, static_cast<int> (sizeof(buf) - 1));
+ StringBuilder builder(buf + 1, bufLen - 1);
char plusSign;
switch (arg.sign) {
prefixLen = 1;
}
- piece = fbstring(p, len);
+ piece = fbstring(p, size_t(len));
}
/// constructed, but delays element construction. This means that only
/// elements that are actually returned to the caller get paged into the
/// process's resident set (RSS).
-template <typename T,
- int NumLocalLists_ = 32,
- int LocalListLimit_ = 200,
- template<typename> class Atom = std::atomic,
- bool EagerRecycleWhenTrivial = false,
- bool EagerRecycleWhenNotTrivial = true>
+template <
+ typename T,
+ uint32_t NumLocalLists_ = 32,
+ uint32_t LocalListLimit_ = 200,
+ template <typename> class Atom = std::atomic,
+ bool EagerRecycleWhenTrivial = false,
+ bool EagerRecycleWhenNotTrivial = true>
struct IndexedMemPool : boost::noncopyable {
typedef T value_type;
/// Destroys all of the contained elements
~IndexedMemPool() {
if (!eagerRecycle()) {
- for (size_t i = size_; i > 0; --i) {
+ for (uint32_t i = size_; i > 0; --i) {
slots_[i].~Slot();
}
}
/// simultaneously allocated and not yet recycled. Because of the
/// local lists it is possible that more elements than this are returned
/// successfully
- size_t capacity() {
+ uint32_t capacity() {
return capacityForMaxIndex(actualCapacity_);
}
////////// fields
+ /// the number of bytes allocated from mmap, which is a multiple of
+ /// the page size of the machine
+ size_t mmapLength_;
+
/// the actual number of slots that we will allocate, to guarantee
/// that we will satisfy the capacity requested at construction time.
/// They will be numbered 1..actualCapacity_ (note the 1-based counting),
/// and occupy slots_[1..actualCapacity_].
- size_t actualCapacity_;
-
- /// the number of bytes allocated from mmap, which is a multiple of
- /// the page size of the machine
- size_t mmapLength_;
+ uint32_t actualCapacity_;
/// this records the number of slots that have actually been constructed.
/// To allow use of atomic ++ instead of CAS, we let this overflow.
///////////// private methods
- size_t slotIndex(uint32_t idx) const {
+ uint32_t slotIndex(uint32_t idx) const {
assert(0 < idx &&
idx <= actualCapacity_ &&
idx <= size_.load(std::memory_order_acquire));
if (external_) {
enum { kUnixPathMax = sizeof(storage_.un.addr->sun_path) };
const char *path = storage_.un.addr->sun_path;
- size_t pathLength = storage_.un.pathLength();
+ auto pathLength = storage_.un.pathLength();
// TODO: this probably could be made more efficient
- for (unsigned int n = 0; n < pathLength; ++n) {
- boost::hash_combine(seed, folly::hash::twang_mix64(path[n]));
+ for (off_t n = 0; n < pathLength; ++n) {
+ boost::hash_combine(seed, folly::hash::twang_mix64(uint64_t(path[n])));
}
}
// abstract namespace. honor the specified length
} else {
// Call strnlen(), just in case the length was overspecified.
- socklen_t maxLength = addrlen - offsetof(struct sockaddr_un, sun_path);
+ size_t maxLength = addrlen - offsetof(struct sockaddr_un, sun_path);
size_t pathLength = strnlen(storage_.un.addr->sun_path, maxLength);
storage_.un.len =
socklen_t(offsetof(struct sockaddr_un, sun_path) + pathLength);
//
// Note that this still meets the requirements for a strict weak
// ordering, so we can use this operator<() with standard C++ containers.
- size_t thisPathLength = storage_.un.pathLength();
+ auto thisPathLength = storage_.un.pathLength();
if (thisPathLength == 0) {
return false;
}
- size_t otherPathLength = other.storage_.un.pathLength();
+ auto otherPathLength = other.storage_.un.pathLength();
if (otherPathLength == 0) {
return true;
}
// where k is the number of bits in the fingerprint (and deg(P)) and
// Q(X) = q7*X^7 + q6*X^6 + ... + q1*X + q0 is a degree-7 polyonomial
// whose coefficients are the bits of q.
- for (int x = 0; x < 256; x++) {
+ for (uint16_t x = 0; x < 256; x++) {
FingerprintPolynomial<DEG> t;
t.setHigh8Bits(uint8_t(x));
for (int i = 0; i < 8; i++) {
// a sub-optimal ordering, but it won't crash
auto& lhsEquiv = equivClassesByCpu[lhs];
auto& rhsEquiv = equivClassesByCpu[rhs];
- for (int i = int(std::min(lhsEquiv.size(), rhsEquiv.size())) - 1;
+ for (ssize_t i = ssize_t(std::min(lhsEquiv.size(), rhsEquiv.size())) - 1;
i >= 0;
--i) {
- if (lhsEquiv[i] != rhsEquiv[i]) {
- return lhsEquiv[i] < rhsEquiv[i];
+ auto idx = size_t(i);
+ if (lhsEquiv[idx] != rhsEquiv[idx]) {
+ return lhsEquiv[idx] < rhsEquiv[idx];
}
}
ba[byteIndex] = one[byteIndex];
++byteIndex;
}
- auto bitIndex = std::min(mask, (uint8_t)(byteIndex * 8));
+ auto bitIndex = std::min(mask, uint8_t(byteIndex * 8));
+ uint8_t bI = uint8_t(bitIndex / 8);
+ uint8_t bM = uint8_t(bitIndex % 8);
// Compute the bit up to which the two byte arrays match in the
// unmatched byte.
// Here the check is bitIndex < mask since the 0th mask entry in
// kMasks array holds the mask for masking the MSb in this byte.
// We could instead make it hold so that no 0th entry masks no
// bits but thats a useless iteration.
- while (bitIndex < mask && ((one[bitIndex / 8] & kMasks[bitIndex % 8]) ==
- (two[bitIndex / 8] & kMasks[bitIndex % 8]))) {
- ba[bitIndex / 8] = one[bitIndex / 8] & kMasks[bitIndex % 8];
+ while (bitIndex < mask &&
+ ((one[bI] & kMasks[bM]) == (two[bI] & kMasks[bM]))) {
+ ba[bI] = uint8_t(one[bI] & kMasks[bM]);
++bitIndex;
+ bI = uint8_t(bitIndex / 8);
+ bM = uint8_t(bitIndex % 8);
}
return {ba, bitIndex};
}
}
IntegralType powerToPrint = 1;
- for (int i = 1; i < DigitCount; ++i) {
+ for (IntegralType i = 1; i < DigitCount; ++i) {
powerToPrint *= Base;
}
// This load is safe because needles.size() >= 16
auto arr2 = _mm_loadu_si128(
reinterpret_cast<const __m128i*>(needles.data()));
- size_t b =
+ auto b =
_mm_cmpestri(arr2, 16, arr1, int(haystack.size() - blockStartIdx), 0);
size_t j = nextAlignedIndex(needles.data());
arr1,
int(haystack.size() - blockStartIdx),
0);
- b = std::min<size_t>(index, b);
+ b = std::min(index, b);
}
if (b < 16) {
return true;
}
- size_t upperValue = (value >> numLowerBits_);
- size_t upperSkip = upperValue - upper_.value();
+ ValueType upperValue = (value >> numLowerBits_);
+ ValueType upperSkip = upperValue - upper_.value();
// The average density of ones in upper bits is 1/2.
// LIKELY here seems to make things worse, even for small skips.
if (upperSkip < 2 * kLinearScanThreshold) {
if (!taskAdded) {
manager.addTask([&]() {
std::vector<std::function<std::unique_ptr<int>()>> funcs;
- for (size_t i = 0; i < 3; ++i) {
+ for (int i = 0; i < 3; ++i) {
funcs.push_back([i, &pendingFibers]() {
await([&pendingFibers](Promise<int> promise) {
pendingFibers.push_back(std::move(promise));
typename std::iterator_traits<InputIterator>::value_type::value_type T;
struct CollectAllContext {
- CollectAllContext(int n) : results(n) {}
+ CollectAllContext(size_t n) : results(n) {}
~CollectAllContext() {
p.setValue(std::move(results));
}
Nothing,
std::vector<Optional<T>>>::type;
- explicit CollectContext(int n) : result(n) {}
+ explicit CollectContext(size_t n) : result(n) {}
~CollectContext() {
if (!threw.exchange(true)) {
// map Optional<T> -> T
void openConsumer() { ++consumers_; }
void closeInputProducer() {
- int64_t producers = producers_--;
+ size_t producers = producers_--;
CHECK(producers);
if (producers == 1) { // last producer
wakeConsumer_.notifyAll();
}
void closeOutputConsumer() {
- int64_t consumers = consumers_--;
+ size_t consumers = consumers_--;
CHECK(consumers);
if (consumers == 1) { // last consumer
wakeProducer_.notifyAll();
auto p = in.find_first_of(kCRLF);
if (p != std::string::npos) {
const auto in_start = in.data();
- auto delim_len = 1;
+ size_t delim_len = 1;
in.advance(p);
// Either remove an MS-DOS CR-LF 2-byte newline, or eat 1 byte at a time.
if (in.removePrefix(kCRLF)) {
if (ret < 0) {
throw std::runtime_error("error formatting printf() data");
}
+ auto len = size_t(ret);
// vsnprintf() returns the number of characters that would be printed,
// not including the terminating nul.
- if (size_t(ret) < length()) {
+ if (len < length()) {
// All of the data was successfully written.
- append(ret);
+ append(len);
return;
}
// There wasn't enough room for the data.
// Allocate more room, and then retry.
- ensure(ret + 1);
+ ensure(len + 1);
ret = vsnprintf(reinterpret_cast<char*>(writableData()), length(),
fmt, apCopy);
if (ret < 0) {
throw std::runtime_error("error formatting printf() data");
}
- if (size_t(ret) >= length()) {
+ len = size_t(ret);
+ if (len >= length()) {
// This shouldn't ever happen.
throw std::runtime_error("unexpectedly out of buffer space on second "
"vsnprintf() attmept");
}
- append(ret);
+ append(len);
}
}} // folly::io
if (zero_return(error, bytes)) {
return ReadResult(bytes);
}
- long errError = ERR_get_error();
+ auto errError = ERR_get_error();
VLOG(6) << "AsyncSSLSocket(fd=" << fd_ << ", "
<< "state=" << state_ << ", "
<< "sslState=" << sslState_ << ", "
if ((addr4 != nullptr || addr6 != nullptr) && name->type == GEN_IPADD) {
// Extra const-ness for paranoia
unsigned char const* const rawIpStr = name->d.iPAddress->data;
- int const rawIpLen = name->d.iPAddress->length;
+ size_t const rawIpLen = size_t(name->d.iPAddress->length);
if (rawIpLen == 4 && addr4 != nullptr) {
if (::memcmp(rawIpStr, &addr4->sin_addr, rawIpLen) == 0) {
void OpenSSLUtils::setBioFd(BIO* b, int fd, int flags) {
#ifdef _WIN32
- SOCKET sock = portability::sockets::fd_to_socket(fd);
+ SOCKET socket = portability::sockets::fd_to_socket(fd);
+ // Internally OpenSSL uses this as an int for reasons completely
+ // beyond any form of sanity, so we do the cast ourselves to avoid
+ // the warnings that would be generated.
+ int sock = int(socket);
#else
int sock = fd;
#endif
append(head, "hello");
Appender app(head.get(), 10);
- uint32_t cap = head->capacity();
- uint32_t len1 = app.length();
+ auto cap = head->capacity();
+ auto len1 = app.length();
EXPECT_EQ(cap - 5, len1);
app.ensure(len1); // won't grow
EXPECT_EQ(len1, app.length());
};
Initializer initializer;
-unique_ptr<IOBuf>
-stringToIOBuf(const char* s, uint32_t len) {
+unique_ptr<IOBuf> stringToIOBuf(const char* s, size_t len) {
unique_ptr<IOBuf> buf = IOBuf::create(len);
memcpy(buf->writableTail(), s, len);
buf->append(len);
serialization_opts const& opts_;
};
- //////////////////////////////////////////////////////////////////////
-
- struct ParseError : std::runtime_error {
- explicit ParseError(int line)
- : std::runtime_error(to<std::string>("json parse error on line ", line))
- {}
-
- explicit ParseError(int line, std::string const& context,
- std::string const& expected)
- : std::runtime_error(to<std::string>("json parse error on line ", line,
- !context.empty() ? to<std::string>(" near `", context, '\'')
- : "",
- ": ", expected))
- {}
-
- explicit ParseError(std::string const& msg)
- : std::runtime_error("json parse error: " + msg)
- {}
- };
+//////////////////////////////////////////////////////////////////////
+
+struct ParseError : std::runtime_error {
+ explicit ParseError(
+ unsigned int line,
+ std::string const& context,
+ std::string const& expected)
+ : std::runtime_error(to<std::string>(
+ "json parse error on line ",
+ line,
+ !context.empty() ? to<std::string>(" near `", context, '\'') : "",
+ ": ",
+ expected)) {}
+};
// Wraps our input buffer with some helper functions.
struct Input {
va_start(args, fmt);
SCOPE_EXIT { va_end(args); };
- int len = vsnprintf(nullptr, 0, fmt, args);
- if (len <= 0) {
+ int ret = vsnprintf(nullptr, 0, fmt, args);
+ if (ret <= 0) {
return -1;
}
+ size_t len = size_t(ret);
char* buf = new char[len + 1];
SCOPE_EXIT { delete[] buf; };
- if (vsnprintf(buf, len + 1, fmt, args) == len && write(fd, buf, len) == len) {
- return len;
+ if (size_t(vsnprintf(buf, len + 1, fmt, args)) == len &&
+ write(fd, buf, len) == ssize_t(len)) {
+ return ret;
}
return -1;
if (len <= 0) {
return -1;
}
- char* buf = *dest = (char*)malloc(len + 1);
- if (vsnprintf(buf, len + 1, format, ap) == len) {
+ char* buf = *dest = (char*)malloc(size_t(len + 1));
+ if (vsnprintf(buf, size_t(len + 1), format, ap) == len) {
return len;
}
free(buf);
bool BucketedTimeSeries<VT, CT>::addValue(
TimePoint now,
const ValueType& val,
- int64_t times) {
- return addValueAggregated(now, val * times, times);
+ uint64_t times) {
+ return addValueAggregated(now, val * ValueType(times), times);
}
template <typename VT, typename CT>
bool BucketedTimeSeries<VT, CT>::addValueAggregated(
TimePoint now,
const ValueType& total,
- int64_t nsamples) {
+ uint64_t nsamples) {
if (isAllTime()) {
if (UNLIKELY(empty())) {
firstTime_ = now;
/*
* Adds the value 'val' the given number of 'times' at time 'now'
*/
- bool addValue(TimePoint now, const ValueType& val, int64_t times);
+ bool addValue(TimePoint now, const ValueType& val, uint64_t times);
/*
* Adds the value 'total' as the sum of 'nsamples' samples
*/
bool
- addValueAggregated(TimePoint now, const ValueType& total, int64_t nsamples);
+ addValueAggregated(TimePoint now, const ValueType& total, uint64_t nsamples);
/*
* Updates the container to the specified time, doing all the necessary
bool addValue(Duration now, const ValueType& val) {
return addValueAggregated(TimePoint(now), val, 1);
}
- bool addValue(Duration now, const ValueType& val, int64_t times) {
+ bool addValue(Duration now, const ValueType& val, uint64_t times) {
return addValueAggregated(TimePoint(now), val * ValueType(times), times);
}
bool
- addValueAggregated(Duration now, const ValueType& total, int64_t nsamples) {
+ addValueAggregated(Duration now, const ValueType& total, uint64_t nsamples) {
return addValueAggregated(TimePoint(now), total, nsamples);
}
size_t update(Duration now) {
// or flush() is called.
TimePoint cachedTime_;
ValueType cachedSum_;
- int cachedCount_;
+ uint64_t cachedCount_;
};
} // folly
}
/* Total sum of values at the given timeseries level (all buckets). */
- ValueType sum(int level) const {
+ ValueType sum(size_t level) const {
ValueType total = ValueType();
for (size_t b = 0; b < buckets_.getNumBuckets(); ++b) {
total += buckets_.getByIndex(b).sum(level);
/* Average of values at the given timeseries level (all buckets). */
template <typename ReturnType = double>
- ReturnType avg(int level) const {
+ ReturnType avg(size_t level) const {
auto total = ValueType();
uint64_t nsamples = 0;
computeAvgData(&total, &nsamples, level);
static std::mutex futexLock;
DeterministicSchedule::DeterministicSchedule(
- const std::function<int(int)>& scheduler)
+ const std::function<size_t(size_t)>& scheduler)
: scheduler_(scheduler), nextThreadId_(1), step_(0) {
assert(tls_sem == nullptr);
assert(tls_sched == nullptr);
beforeThreadExit();
}
-std::function<int(int)> DeterministicSchedule::uniform(long seed) {
+std::function<size_t(size_t)> DeterministicSchedule::uniform(uint64_t seed) {
auto rand = std::make_shared<std::ranlux48>(seed);
return [rand](size_t numActive) {
- auto dist = std::uniform_int_distribution<int>(0, numActive - 1);
+ auto dist = std::uniform_int_distribution<size_t>(0, numActive - 1);
return dist(*rand);
};
}
struct UniformSubset {
- UniformSubset(long seed, int subsetSize, int stepsBetweenSelect)
+ UniformSubset(uint64_t seed, size_t subsetSize, size_t stepsBetweenSelect)
: uniform_(DeterministicSchedule::uniform(seed)),
subsetSize_(subsetSize),
stepsBetweenSelect_(stepsBetweenSelect),
}
private:
- std::function<int(int)> uniform_;
+ std::function<size_t(size_t)> uniform_;
const size_t subsetSize_;
- const int stepsBetweenSelect_;
+ const size_t stepsBetweenSelect_;
- int stepsLeft_;
+ size_t stepsLeft_;
// only the first subsetSize_ is properly randomized
- std::vector<int> perm_;
+ std::vector<size_t> perm_;
void adjustPermSize(size_t numActive) {
if (perm_.size() > numActive) {
void shufflePrefix() {
for (size_t i = 0; i < std::min(perm_.size() - 1, subsetSize_); ++i) {
- int j = uniform_(perm_.size() - i) + i;
+ size_t j = uniform_(perm_.size() - i) + i;
std::swap(perm_[i], perm_[j]);
}
}
};
-std::function<int(int)> DeterministicSchedule::uniformSubset(long seed,
- int n,
- int m) {
+std::function<size_t(size_t)>
+DeterministicSchedule::uniformSubset(uint64_t seed, size_t n, size_t m) {
auto gen = std::make_shared<UniformSubset>(seed, n, m);
return [=](size_t numActive) { return (*gen)(numActive); };
}
sem_post(sched->sems_[sched->scheduler_(sched->sems_.size())]);
}
-int DeterministicSchedule::getRandNumber(int n) {
+size_t DeterministicSchedule::getRandNumber(size_t n) {
if (tls_sched) {
return tls_sched->scheduler_(n);
}
* DeterministicSchedule::thread on a thread participating in this
* schedule) to participate in a deterministic schedule.
*/
- explicit DeterministicSchedule(const std::function<int(int)>& scheduler);
+ explicit DeterministicSchedule(
+ const std::function<size_t(size_t)>& scheduler);
/** Completes the schedule. */
~DeterministicSchedule();
* inter-thread communication are random variables following a poisson
* distribution.
*/
- static std::function<int(int)> uniform(long seed);
+ static std::function<size_t(size_t)> uniform(uint64_t seed);
/**
* Returns a scheduling function that chooses a subset of the active
* runnable thread. The subset is chosen with size n, and the choice
* is made every m steps.
*/
- static std::function<int(int)> uniformSubset(long seed,
- int n = 2,
- int m = 64);
+ static std::function<size_t(size_t)>
+ uniformSubset(uint64_t seed, size_t n = 2, size_t m = 64);
/** Obtains permission for the current thread to perform inter-thread
* communication. */
/** Used scheduler_ to get a random number b/w [0, n). If tls_sched is
* not set-up it falls back to std::rand() */
- static int getRandNumber(int n);
+ static size_t getRandNumber(size_t n);
/** Deterministic implemencation of getcpu */
static int getcpu(unsigned* cpu, unsigned* node, void* unused);
static thread_local AuxAct tls_aux_act;
static AuxChk aux_chk;
- std::function<int(int)> scheduler_;
+ std::function<size_t(size_t)> scheduler_;
std::vector<sem_t*> sems_;
std::unordered_set<std::thread::id> active_;
unsigned nextThreadId_;
TEST(IndexedMemPool, no_starvation) {
const int count = 1000;
- const int poolSize = 100;
+ const uint32_t poolSize = 100;
typedef DeterministicSchedule Sched;
Sched sched(Sched::uniform(0));
}
struct NonTrivialStruct {
- static FOLLY_TLS int count;
+ static FOLLY_TLS size_t count;
- int elem_;
+ size_t elem_;
NonTrivialStruct() {
elem_ = 0;
++count;
}
- NonTrivialStruct(std::unique_ptr<std::string>&& arg1, int arg2) {
+ NonTrivialStruct(std::unique_ptr<std::string>&& arg1, size_t arg2) {
elem_ = arg1->length() + arg2;
++count;
}
}
};
-FOLLY_TLS int NonTrivialStruct::count;
+FOLLY_TLS size_t NonTrivialStruct::count;
TEST(IndexedMemPool, eager_recycle) {
typedef IndexedMemPool<NonTrivialStruct> Pool;
b.begin(), b.end(),
std::back_inserter(c));
EXPECT_EQ(8, c.size());
- for (int i = 0; i < 8; ++i) {
+ for (size_t i = 0; i < 8; ++i) {
EXPECT_EQ(i, c[i]);
}
}