// AtomicHashMap constructor -- Atomic wrapper that allows growth
// This class has a lot of overhead (184 Bytes) so only use for big maps
-template <typename KeyT,
- typename ValueT,
- typename HashFcn,
- typename EqualFcn,
- typename Allocator,
- typename ProbeFcn,
- typename KeyConvertFcn>
-AtomicHashMap<KeyT, ValueT, HashFcn, EqualFcn,
- Allocator, ProbeFcn, KeyConvertFcn>::
-AtomicHashMap(size_t finalSizeEst, const Config& config)
- : kGrowthFrac_(config.growthFactor < 0 ?
- 1.0 - config.maxLoadFactor : config.growthFactor) {
- CHECK(config.maxLoadFactor > 0.0 && config.maxLoadFactor < 1.0);
+template <
+ typename KeyT,
+ typename ValueT,
+ typename HashFcn,
+ typename EqualFcn,
+ typename Allocator,
+ typename ProbeFcn,
+ typename KeyConvertFcn>
+AtomicHashMap<
+ KeyT,
+ ValueT,
+ HashFcn,
+ EqualFcn,
+ Allocator,
+ ProbeFcn,
+ KeyConvertFcn>::AtomicHashMap(size_t finalSizeEst, const Config& config)
+ : kGrowthFrac_(
+ config.growthFactor < 0 ? 1.0f - config.maxLoadFactor
+ : config.growthFactor) {
+ CHECK(config.maxLoadFactor > 0.0f && config.maxLoadFactor < 1.0f);
subMaps_[0].store(SubMap::create(finalSizeEst, config).release(),
std::memory_order_relaxed);
auto subMapCount = kNumSubMaps_;
size_t numCellsAllocated = (size_t)
(primarySubMap->capacity_ *
std::pow(1.0 + kGrowthFrac_, nextMapIdx - 1));
- size_t newSize = (int) (numCellsAllocated * kGrowthFrac_);
+ size_t newSize = size_t(numCellsAllocated * kGrowthFrac_);
DCHECK(subMaps_[nextMapIdx].load(std::memory_order_relaxed) ==
(SubMap*)kLockedPtr_);
// create a new map using the settings stored in the first map
if (LIKELY(ret.idx != primaryMap->capacity_)) {
return SimpleRetT(0, ret.idx, ret.success);
}
- int const numMaps = numMapsAllocated_.load(std::memory_order_acquire);
+ const unsigned int numMaps =
+ numMapsAllocated_.load(std::memory_order_acquire);
FOR_EACH_RANGE(i, 1, numMaps) {
// Check each map successively. If one succeeds, we're done!
SubMap* thisMap = subMaps_[i].load(std::memory_order_relaxed);
explicit AtomicHashMap(size_t finalSizeEst, const Config& c = Config());
~AtomicHashMap() {
- const int numMaps = numMapsAllocated_.load(std::memory_order_relaxed);
+ const unsigned int numMaps =
+ numMapsAllocated_.load(std::memory_order_relaxed);
FOR_EACH_RANGE (i, 0, numMaps) {
SubMap* thisMap = subMaps_[i].load(std::memory_order_relaxed);
DCHECK(thisMap);
const Allocator& alloc = Allocator())
: allocator_(alloc)
{
- size_t capacity = maxSize / std::min(1.0f, maxLoadFactor) + 128;
+ size_t capacity = size_t(maxSize / std::min(1.0f, maxLoadFactor) + 128);
size_t avail = size_t{1} << (8 * sizeof(IndexType) - 2);
if (capacity > avail && maxSize < avail) {
// we'll do our best
/// can specialize it differently during deterministic testing
IndexType allocationAttempt(IndexType start, IndexType tries) const {
if (LIKELY(tries < 8 && start + tries < numSlots_)) {
- return start + tries;
+ return IndexType(start + tries);
} else {
IndexType rv;
if (sizeof(IndexType) <= 4) {
- rv = folly::Random::rand32(numSlots_);
+ rv = IndexType(folly::Random::rand32(numSlots_));
} else {
- rv = folly::Random::rand64(numSlots_);
+ rv = IndexType(folly::Random::rand64(numSlots_));
}
assert(rv < numSlots_);
return rv;
CHECK_EQ(0, ts.tv_sec) << "Clock sucks.";
CHECK_LT(0, ts.tv_nsec) << "Clock too fast for its own good.";
CHECK_EQ(1, ts.tv_nsec) << "Clock too coarse, upgrade your kernel.";
- resolutionInNs = ts.tv_nsec;
+ resolutionInNs = uint64_t(ts.tv_nsec);
}
// We choose a minimum minimum (sic) of 100,000 nanoseconds, but if
// the clock resolution is worse than that, it will be larger. In
// essence we're aiming at making the quantization noise 0.01%.
- static const auto minNanoseconds =
- max<uint64_t>(FLAGS_bm_min_usec * 1000UL,
- min<uint64_t>(resolutionInNs * 100000, 1000000000ULL));
+ static const auto minNanoseconds = max<uint64_t>(
+ uint64_t(FLAGS_bm_min_usec) * 1000ULL,
+ min<uint64_t>(resolutionInNs * 100000ULL, 1000000000ULL));
// We do measurements in several epochs and take the minimum, to
// account for jitter.
size_t actualEpochs = 0;
for (; actualEpochs < epochs; ++actualEpochs) {
- const auto maxIters = FLAGS_bm_max_iters;
- for (unsigned int n = FLAGS_bm_min_iters; n < maxIters; n *= 2) {
- auto const nsecsAndIter = fun(n);
+ const auto maxIters = uint32_t(FLAGS_bm_max_iters);
+ for (auto n = uint32_t(FLAGS_bm_min_iters); n < maxIters; n *= 2) {
+ auto const nsecsAndIter = fun(static_cast<unsigned int>(n));
if (nsecsAndIter.first < minNanoseconds) {
continue;
}
inline uint64_t timespecDiff(timespec end, timespec start) {
if (end.tv_sec == start.tv_sec) {
assert(end.tv_nsec >= start.tv_nsec);
- return end.tv_nsec - start.tv_nsec;
+ return uint64_t(end.tv_nsec - start.tv_nsec);
}
assert(end.tv_sec > start.tv_sec);
auto diff = uint64_t(end.tv_sec - start.tv_sec);
- assert(diff <
- std::numeric_limits<uint64_t>::max() / 1000000000UL);
- return diff * 1000000000UL
- + end.tv_nsec - start.tv_nsec;
+ assert(diff < std::numeric_limits<uint64_t>::max() / 1000000000ULL);
+ return diff * 1000000000ULL + end.tv_nsec - start.tv_nsec;
}
/**
sizeof(T) <= sizeof(unsigned int)),
unsigned int>::type
findFirstSet(T x) {
- return __builtin_ffs(x);
+ return static_cast<unsigned int>(__builtin_ffs(static_cast<int>(x)));
}
template <class T>
sizeof(T) <= sizeof(unsigned long)),
unsigned int>::type
findFirstSet(T x) {
- return __builtin_ffsl(x);
+ return static_cast<unsigned int>(__builtin_ffsl(static_cast<long>(x)));
}
template <class T>
sizeof(T) <= sizeof(unsigned long long)),
unsigned int>::type
findFirstSet(T x) {
- return __builtin_ffsll(x);
+ return static_cast<unsigned int>(__builtin_ffsll(static_cast<long long>(x)));
}
template <class T>
sizeof(T) <= sizeof(unsigned int)),
size_t>::type
popcount(T x) {
- return detail::popcount(x);
+ return size_t(detail::popcount(x));
}
template <class T>
sizeof(T) <= sizeof(unsigned long long)),
size_t>::type
popcount(T x) {
- return detail::popcountll(x);
+ return size_t(detail::popcountll(x));
}
/**
void advance(ssize_t n) {
size_t bpb = bitsPerBlock();
- ssize_t blocks = n / bpb;
+ ssize_t blocks = n / ssize_t(bpb);
bitOffset_ += n % bpb;
if (bitOffset_ >= bpb) {
bitOffset_ -= bpb;
}
ssize_t distance_to(const BitIterator& other) const {
- return
- (other.base_reference() - this->base_reference()) * bitsPerBlock() +
- other.bitOffset_ - bitOffset_;
+ return ssize_t(
+ (other.base_reference() - this->base_reference()) * bitsPerBlock() +
+ other.bitOffset_ - bitOffset_);
}
size_t bitOffset_;
height * sizeof(std::atomic<SkipListNode*>);
auto* node = static_cast<SkipListNode*>(alloc.allocate(size));
// do placement new
- new (node) SkipListNode(height, std::forward<U>(data), isHead);
+ new (node) SkipListNode(uint8_t(height), std::forward<U>(data), isHead);
return node;
}
bool isHeadNode() const { return getFlags() & IS_HEAD_NODE; }
void setIsHeadNode() {
- setFlags(getFlags() | IS_HEAD_NODE);
+ setFlags(uint16_t(getFlags() | IS_HEAD_NODE));
}
void setFullyLinked() {
- setFlags(getFlags() | FULLY_LINKED);
+ setFlags(uint16_t(getFlags() | FULLY_LINKED));
}
void setMarkedForRemoval() {
- setFlags(getFlags() | MARKED_FOR_REMOVAL);
+ setFlags(uint16_t(getFlags() | MARKED_FOR_REMOVAL));
}
private:
}
int max_layer = maxLayer();
for (int i = 0; i < max_layer; ++i) {
- hints_[i] = i + 1;
+ hints_[i] = uint8_t(i + 1);
}
hints_[max_layer] = max_layer;
}
}
bool result;
- size_t len = e - b;
+ size_t len = size_t(e - b);
switch (*b) {
case '0':
case '1': {
// want to raise an error; length will point past the last character
// that was processed, so we need to check if that character was
// whitespace or not.
- if (length == 0 || (result == 0.0 && std::isspace((*src)[length - 1]))) {
+ if (length == 0 ||
+ (result == 0.0 && std::isspace((*src)[size_t(length) - 1]))) {
return makeUnexpected(ConversionCode::EMPTY_INPUT_STRING);
}
- src->advance(length);
+ src->advance(size_t(length));
return Tgt(result);
}
// There must be non-whitespace, otherwise we would have caught this above
assert(b < e);
- size_t size = e - b;
+ size_t size = size_t(e - b);
bool negative = false;
if (*b == '-') {
Expected<T, ConversionCode> finalize(U value) {
T rv;
if (negative_) {
- rv = -value;
+ rv = T(-value);
if (UNLIKELY(rv > 0)) {
return makeUnexpected(ConversionCode::NEGATIVE_OVERFLOW);
}
} else {
- rv = value;
+ rv = T(value);
if (UNLIKELY(rv < 0)) {
return makeUnexpected(ConversionCode::POSITIVE_OVERFLOW);
}
return makeUnexpected(err);
}
- size_t size = e - b;
+ size_t size = size_t(e - b);
/* Although the string is entirely made of digits, we still need to
* check for overflow.
return Tgt(0); // just zeros, e.g. "0000"
}
if (*b != '0') {
- size = e - b;
+ size = size_t(e - b);
break;
}
}
auto res = sgn.finalize(tmp.value());
if (res.hasValue()) {
- src->advance(m - src->data());
+ src->advance(size_t(m - src->data()));
}
return res;
template <class... Args>
void checkKernelError(ssize_t ret, Args&&... args) {
if (UNLIKELY(ret < 0)) {
- throwSystemErrorExplicit(-ret, std::forward<Args>(args)...);
+ throwSystemErrorExplicit(int(-ret), std::forward<Args>(args)...);
}
}
template <class ForwardIterator>
fbvector(ForwardIterator first, ForwardIterator last,
const Allocator& a, std::forward_iterator_tag)
- : impl_(std::distance(first, last), a)
+ : impl_(size_type(std::distance(first, last)), a)
{ M_uninitialized_copy_e(first, last); }
template <class InputIterator>
template <class ForwardIterator>
void assign(ForwardIterator first, ForwardIterator last,
std::forward_iterator_tag) {
- const size_t newSize = std::distance(first, last);
+ const auto newSize = size_type(std::distance(first, last));
if (newSize > capacity()) {
impl_.reset(newSize);
M_uninitialized_copy_e(first, last);
return;
}
if (impl_.b_)
- M_deallocate(impl_.b_, impl_.z_ - impl_.b_);
+ M_deallocate(impl_.b_, size_type(impl_.z_ - impl_.b_));
impl_.z_ = newB + newCap;
impl_.e_ = newB + (impl_.e_ - impl_.b_);
impl_.b_ = newB;
void make_window(iterator position, size_type n) {
// The result is guaranteed to be non-negative, so use an unsigned type:
- size_type tail = std::distance(position, impl_.e_);
+ size_type tail = size_type(std::distance(position, impl_.e_));
if (tail <= n) {
relocate_move(position + n, position, impl_.e_);
assert(isValid(cpos)); \
} \
T* position = const_cast<T*>(cpos); \
- size_type idx = std::distance(impl_.b_, position); \
+ size_type idx = size_type(std::distance(impl_.b_, position)); \
T* b; \
size_type newCap; /* intentionally uninitialized */ \
\
template <class FIt>
iterator insert(const_iterator cpos, FIt first, FIt last,
std::forward_iterator_tag) {
- size_type n = std::distance(first, last);
+ size_type n = size_type(std::distance(first, last));
FOLLY_FBVECTOR_INSERT_PRE(cpos, n)
FOLLY_FBVECTOR_INSERT_START(cpos, n)
D_uninitialized_copy_a(start, first, last);
int sz = static_cast<int>(sp.size());
if (arg.precision != FormatArg::kDefaultPrecision) {
sz = std::min(arg.precision, sz);
- sp.reset(sp.data(), sz);
+ sp.reset(sp.data(), size_t(sz));
arg.precision -= sz;
}
if (!sp.empty()) {
void format(FormatArg& arg, FormatCallback& cb) const {
int key = arg.splitIntKey();
arg.enforce(key >= 0, "tuple index must be non-negative");
- doFormat(key, arg, cb);
+ doFormat(size_t(key), arg, cb);
}
private:
auto end = fullArgString.end();
// Parse key
- auto p = static_cast<const char*>(memchr(b, ':', end - b));
+ auto p = static_cast<const char*>(memchr(b, ':', size_t(end - b)));
if (!p) {
key_ = StringPiece(b, end);
return;
bool next(type* val) {
if (pos_ == count_) {
// refill
- size_t rem = end_ - p_;
+ size_t rem = size_t(end_ - p_);
if (rem == 0 || remaining_ == 0) {
return false;
}
}
} else {
// Can't decode a full group
- count_ = Base::partialCount(p_, end_ - p_);
+ count_ = Base::partialCount(p_, size_t(end_ - p_));
if (remaining_ >= count_) {
remaining_ -= count_;
p_ = end_;
CHECK(pos_ == count_ && (p_ == end_ || remaining_ == 0));
// p_ may point to the internal buffer (tmp_), but we want
// to return subpiece of the original data
- size_t size = end_ - p_;
+ size_t size = size_t(end_ - p_);
return StringPiece(rrest_ - size, rrest_);
}
"'"));
}
IPAddress subnet(vec.at(0));
- uint8_t cidr =
- (defaultCidr > -1) ? uint8_t(defaultCidr) : (subnet.isV4() ? 32 : 128);
+ auto cidr =
+ uint8_t((defaultCidr > -1) ? defaultCidr : (subnet.isV4() ? 32 : 128));
if (elemCount == 2) {
try {
// See RFC 4291 sections 2.5.1, 2.5.6, and Appendix A
const auto* macBytes = mac.bytes();
memcpy(&bytes_.front(), "\xfe\x80\x00\x00\x00\x00\x00\x00", 8);
- bytes_[8] = macBytes[0] ^ 0x02;
+ bytes_[8] = uint8_t(macBytes[0] ^ 0x02);
bytes_[9] = macBytes[1];
bytes_[10] = macBytes[2];
bytes_[11] = 0xff;
// convert two uint8_t bytes into a uint16_t as hibyte.lobyte
static inline uint16_t unpack(uint8_t lobyte, uint8_t hibyte) {
- return ((uint16_t)hibyte << 8) | (uint16_t)lobyte;
+ return uint16_t((uint16_t(hibyte) << 8) | lobyte);
}
// given a src string, unpack count*2 bytes into dest
uint8_t IPAddressV6::getMulticastFlags() const {
DCHECK(isMulticast());
- return ((addr_.bytes_[1] >> 4) & 0xf);
+ return uint8_t((addr_.bytes_[1] >> 4) & 0xf);
}
uint8_t IPAddressV6::getMulticastScope() const {
DCHECK(isMulticast());
- return (addr_.bytes_[1] & 0xf);
+ return uint8_t(addr_.bytes_[1] & 0xf);
}
IPAddressV6 IPAddressV6::getSolicitedNodeAddress() const {
// of bits required to hold indices from a pool, given its capacity
static constexpr uint32_t maxIndexForCapacity(uint32_t capacity) {
- // index of uint32_t(-1) == UINT32_MAX is reserved for isAllocated tracking
+ // index of std::numeric_limits<uint32_t>::max() is reserved for isAllocated
+ // tracking
return uint32_t(std::min(
uint64_t(capacity) + (NumLocalLists - 1) * LocalListLimit,
- uint64_t(uint32_t(-1) - 1)));
+ uint64_t(std::numeric_limits<uint32_t>::max() - 1)));
}
static constexpr uint32_t capacityForMaxIndex(uint32_t maxIndex) {
, globalHead_(TaggedPtr{})
{
const size_t needed = sizeof(Slot) * (actualCapacity_ + 1);
- size_t pagesize = sysconf(_SC_PAGESIZE);
+ size_t pagesize = size_t(sysconf(_SC_PAGESIZE));
mmapLength_ = ((needed - 1) & ~(pagesize - 1)) + pagesize;
assert(needed <= mmapLength_ && mmapLength_ < needed + pagesize);
assert((mmapLength_ % pagesize) == 0);
* in any queue) are also counted.
*/
ssize_t sizeGuess() const noexcept {
- return (std::get<0>(stages_).writeCount() * kAmplification -
- std::get<sizeof...(Stages)>(stages_).readCount());
+ return ssize_t(
+ std::get<0>(stages_).writeCount() * kAmplification -
+ std::get<sizeof...(Stages)>(stages_).readCount());
}
private:
ticket = numPushes;
const auto numPops = popTicket_.load(std::memory_order_acquire); // B
// n will be negative if pops are pending
- const int64_t n = numPushes - numPops;
+ const int64_t n = int64_t(numPushes - numPops);
if (n >= static_cast<ssize_t>(capacity_)) {
// Full, linearize at B. We don't need to recheck the read we
// performed at A, because if numPushes was stale at B then the
template <typename N, typename D>
inline constexpr detail::IdivResultType<N, D> divFloor(N num, D denom) {
using R = decltype(num / denom);
- return kIntegerDivisionGivesRemainder && std::is_signed<R>::value
- ? detail::divFloorBranchless<R>(num, denom)
- : detail::divFloorBranchful<R>(num, denom);
+ return detail::IdivResultType<N, D>(
+ kIntegerDivisionGivesRemainder && std::is_signed<R>::value
+ ? detail::divFloorBranchless<R>(num, denom)
+ : detail::divFloorBranchful<R>(num, denom));
}
/**
template <typename N, typename D>
inline constexpr detail::IdivResultType<N, D> divCeil(N num, D denom) {
using R = decltype(num / denom);
- return kIntegerDivisionGivesRemainder && std::is_signed<R>::value
- ? detail::divCeilBranchless<R>(num, denom)
- : detail::divCeilBranchful<R>(num, denom);
+ return detail::IdivResultType<N, D>(
+ kIntegerDivisionGivesRemainder && std::is_signed<R>::value
+ ? detail::divCeilBranchless<R>(num, denom)
+ : detail::divCeilBranchful<R>(num, denom));
}
/**
*/
template <typename N, typename D>
inline constexpr detail::IdivResultType<N, D> divTrunc(N num, D denom) {
- return num / denom;
+ return detail::IdivResultType<N, D>(num / denom);
}
/**
template <typename N, typename D>
inline constexpr detail::IdivResultType<N, D> divRoundAway(N num, D denom) {
using R = decltype(num / denom);
- return kIntegerDivisionGivesRemainder && std::is_signed<R>::value
- ? detail::divRoundAwayBranchless<R>(num, denom)
- : detail::divRoundAwayBranchful<R>(num, denom);
+ return detail::IdivResultType<N, D>(
+ kIntegerDivisionGivesRemainder && std::is_signed<R>::value
+ ? detail::divRoundAwayBranchless<R>(num, denom)
+ : detail::divRoundAwayBranchful<R>(num, denom));
}
} // namespace folly
(options_.writable ? PROT_WRITE : 0));
}
- unsigned char* start = static_cast<unsigned char*>(
- mmap(options_.address, mapLength_, prot, flags, file_.fd(), offset));
+ unsigned char* start = static_cast<unsigned char*>(mmap(
+ options_.address, size_t(mapLength_), prot, flags, file_.fd(), offset));
PCHECK(start != MAP_FAILED)
<< " offset=" << offset
<< " length=" << mapLength_;
mapStart_ = start;
- data_.reset(start + skipStart, length);
+ data_.reset(start + skipStart, size_t(length));
}
}
// chunks breaks the locking into intervals and lets other threads do memory
// operations of their own.
- size_t chunkSize = memOpChunkSize(off_t(bufSize), pageSize);
+ size_t chunkSize = size_t(memOpChunkSize(off_t(bufSize), pageSize));
char* addr = static_cast<char*>(mem);
amountSucceeded = 0;
bool MemoryMapping::mlock(LockMode lock) {
size_t amountSucceeded = 0;
- locked_ = memOpInChunks(::mlock, mapStart_, mapLength_, options_.pageSize,
- amountSucceeded);
+ locked_ = memOpInChunks(
+ ::mlock,
+ mapStart_,
+ size_t(mapLength_),
+ options_.pageSize,
+ amountSucceeded);
if (locked_) {
return true;
}
if (!locked_) return;
size_t amountSucceeded = 0;
- if (!memOpInChunks(::munlock, mapStart_, mapLength_, options_.pageSize,
- amountSucceeded)) {
+ if (!memOpInChunks(
+ ::munlock,
+ mapStart_,
+ size_t(mapLength_),
+ options_.pageSize,
+ amountSucceeded)) {
PLOG(WARNING) << "munlock()";
}
if (mapLength_ && dontneed &&
- ::madvise(mapStart_, mapLength_, MADV_DONTNEED)) {
+ ::madvise(mapStart_, size_t(mapLength_), MADV_DONTNEED)) {
PLOG(WARNING) << "madvise()";
}
locked_ = false;
MemoryMapping::~MemoryMapping() {
if (mapLength_) {
size_t amountSucceeded = 0;
- if (!memOpInChunks(::munmap, mapStart_, mapLength_, options_.pageSize,
- amountSucceeded)) {
+ if (!memOpInChunks(
+ ::munmap,
+ mapStart_,
+ size_t(mapLength_),
+ options_.pageSize,
+ amountSucceeded)) {
PLOG(FATAL) << folly::format("munmap({}) failed at {}",
mapLength_, amountSucceeded);
}
}
}
-void MemoryMapping::advise(int advice) const { advise(advice, 0, mapLength_); }
+void MemoryMapping::advise(int advice) const {
+ advise(advice, 0, size_t(mapLength_));
+}
void MemoryMapping::advise(int advice, size_t offset, size_t length) const {
CHECK_LE(offset + length, size_t(mapLength_))
iterator end() {
auto it = iterator(c_.end());
if (lastCount_ != Node::kElementCount) {
- it -= (Node::kElementCount - lastCount_);
+ it -= difference_type(Node::kElementCount - lastCount_);
}
return it;
}
*/
void init(IntType initialValue = 0) {
CHECK(!(initialValue & kLockBitMask_));
- lock_ = initialValue;
+ lock_ = UIntType(initialValue);
}
/*
*/
void setData(IntType w) {
CHECK(!(w & kLockBitMask_));
- lock_ = (lock_ & kLockBitMask_) | w;
+ lock_ = UIntType((lock_ & kLockBitMask_) | w);
}
/*
typedef typename std::remove_reference<
typename std::iterator_traits<Iter>::reference>::type
value_type;
+ using difference_type = typename std::iterator_traits<Iter>::difference_type;
typedef typename std::iterator_traits<Iter>::reference reference;
/**
return size_type(e_ - b_);
}
constexpr size_type walk_size() const {
- return std::distance(b_, e_);
+ return size_type(std::distance(b_, e_));
}
constexpr bool empty() const {
return b_ == e_;
auto i = find(delimiter);
Range result(b_, i == std::string::npos ? size() : i);
- b_ = result.end() == e_ ? e_ : std::next(result.end(), delimiter.size());
+ b_ = result.end() == e_
+ ? e_
+ : std::next(
+ result.end(),
+ typename std::iterator_traits<Iter>::difference_type(
+ delimiter.size()));
return result;
}
storage_.un.init();
}
external_ = true;
- memcpy(storage_.un.addr, address, addrlen);
+ memcpy(storage_.un.addr, address, size_t(addrlen));
updateUnixAddressLength(addrlen);
// Fill the rest with 0s, just for safety
}
if (storage_.un.addr->sun_path[0] == '\0') {
// abstract namespace
- return std::string(storage_.un.addr->sun_path, storage_.un.pathLength());
+ return std::string(
+ storage_.un.addr->sun_path, size_t(storage_.un.pathLength()));
}
- return std::string(storage_.un.addr->sun_path,
- strnlen(storage_.un.addr->sun_path,
- storage_.un.pathLength()));
+ return std::string(
+ storage_.un.addr->sun_path,
+ strnlen(storage_.un.addr->sun_path, size_t(storage_.un.pathLength())));
}
std::string SocketAddress::describe() const {
return "<abstract unix address>";
}
- return std::string(storage_.un.addr->sun_path,
- strnlen(storage_.un.addr->sun_path,
- storage_.un.pathLength()));
+ return std::string(
+ storage_.un.addr->sun_path,
+ strnlen(storage_.un.addr->sun_path, size_t(storage_.un.pathLength())));
}
switch (getFamily()) {
case AF_UNSPEC:
if (storage_.un.len != other.storage_.un.len) {
return false;
}
- int cmp = memcmp(storage_.un.addr->sun_path,
- other.storage_.un.addr->sun_path,
- storage_.un.pathLength());
+ int cmp = memcmp(
+ storage_.un.addr->sun_path,
+ other.storage_.un.addr->sun_path,
+ size_t(storage_.un.pathLength()));
return cmp == 0;
}
if (thisPathLength != otherPathLength) {
return thisPathLength < otherPathLength;
}
- int cmp = memcmp(storage_.un.addr->sun_path,
- other.storage_.un.addr->sun_path,
- thisPathLength);
+ int cmp = memcmp(
+ storage_.un.addr->sun_path,
+ other.storage_.un.addr->sun_path,
+ size_t(thisPathLength));
return cmp < 0;
}
switch (getFamily()) {
sa_family_t getFamily() const {
DCHECK(external_ || AF_UNIX != storage_.addr.family());
- return external_ ? AF_UNIX : storage_.addr.family();
+ return external_ ? sa_family_t(AF_UNIX) : storage_.addr.family();
}
bool empty() const {
static constexpr uint64_t kMagic = 0x1234faceb00c;
socklen_t pathLength() const {
- return len - offsetof(struct sockaddr_un, sun_path);
+ return socklen_t(len - offsetof(struct sockaddr_un, sun_path));
}
void init() {
addr = new sockaddr_un;
magic = kMagic;
len = other.len;
- memcpy(addr, other.addr, len);
+ memcpy(addr, other.addr, size_t(len));
// Fill the rest with 0s, just for safety
memset(reinterpret_cast<char*>(addr) + len, 0,
sizeof(struct sockaddr_un) - len);
void copy(const ExternalUnixAddr &other) {
CHECK(magic == kMagic);
len = other.len;
- memcpy(addr, other.addr, len);
+ memcpy(addr, other.addr, size_t(len));
}
void free() {
CHECK(magic == kMagic);
if (e == 'P') { // printable
++p;
} else if (e == 'O') { // octal
- out.append(&*last, p - last);
+ out.append(&*last, size_t(p - last));
esc[1] = '0' + ((v >> 6) & 7);
esc[2] = '0' + ((v >> 3) & 7);
esc[3] = '0' + (v & 7);
++p;
last = p;
} else { // special 1-character escape
- out.append(&*last, p - last);
+ out.append(&*last, size_t(p - last));
esc[1] = e;
out.append(esc, 2);
++p;
last = p;
}
}
- out.append(&*last, p - last);
+ out.append(&*last, size_t(p - last));
}
namespace detail {
if (LIKELY(discriminator <= minEncode)) {
++p;
} else if (mode == UriEscapeMode::QUERY && discriminator == 3) {
- out.append(&*last, p - last);
+ out.append(&*last, size_t(p - last));
out.push_back('+');
++p;
last = p;
} else {
- out.append(&*last, p - last);
+ out.append(&*last, size_t(p - last));
esc[1] = hexValues[v >> 4];
esc[2] = hexValues[v & 0x0f];
out.append(esc, 3);
last = p;
}
}
- out.append(&*last, p - last);
+ out.append(&*last, size_t(p - last));
}
template <class String>
if (UNLIKELY(h1 == 16 || h2 == 16)) {
throw std::invalid_argument("invalid percent encode sequence");
}
- out.append(&*last, p - last);
+ out.append(&*last, size_t(p - last));
out.push_back((h1 << 4) | h2);
p += 3;
last = p;
}
case '+':
if (mode == UriEscapeMode::QUERY) {
- out.append(&*last, p - last);
+ out.append(&*last, size_t(p - last));
out.push_back(' ');
++p;
last = p;
break;
}
}
- out.append(&*last, p - last);
+ out.append(&*last, size_t(p - last));
}
namespace detail {
}
if (static_cast<size_t>(bytes_used) < inline_buffer.size()) {
- output.append(inline_buffer.data(), bytes_used);
+ output.append(inline_buffer.data(), size_t(bytes_used));
return;
}
// Couldn't fit. Heap allocate a buffer, oh well.
- std::unique_ptr<char[]> heap_buffer(new char[bytes_used + 1]);
- int final_bytes_used =
- stringAppendfImplHelper(heap_buffer.get(), bytes_used + 1, format, args);
+ std::unique_ptr<char[]> heap_buffer(new char[size_t(bytes_used + 1)]);
+ int final_bytes_used = stringAppendfImplHelper(
+ heap_buffer.get(), size_t(bytes_used + 1), format, args);
// The second call can take fewer bytes if, for example, we were printing a
// string buffer with null-terminating char using a width specifier -
// vsnprintf("%.*s", buf.size(), buf)
CHECK(bytes_used >= final_bytes_used);
// We don't keep the trailing '\0' in our output string
- output.append(heap_buffer.get(), final_bytes_used);
+ output.append(heap_buffer.get(), size_t(final_bytes_used));
}
} // anon namespace
"Unable to parse suffix \"",
prettyString->toString(), "\""));
}
- prettyString->advance(longestPrefixLen);
+ prettyString->advance(size_t(longestPrefixLen));
return suffixes[bestPrefixId].val ? value * suffixes[bestPrefixId].val :
value;
}
// by adding 0x20.
// Step 1: Clear the high order bit. We'll deal with it in Step 5.
- unsigned char rotated = c & 0x7f;
+ uint8_t rotated = uint8_t(c & 0x7f);
// Currently, the value of rotated, as a function of the original c is:
// below 'A': 0- 64
// 'A'-'Z': 65- 90
// At this point, rotated is 0x20 if c is 'A'-'Z' and 0x00 otherwise
// Step 7: Add rotated to c
- c += rotated;
+ c += char(rotated);
}
void toLowerAscii32(uint32_t& c) {
ThreadCachedInt& operator-=(IntT inc) { increment(-inc); return *this; }
// pre-increment (we don't support post-increment)
ThreadCachedInt& operator++() { increment(1); return *this; }
- ThreadCachedInt& operator--() { increment(-1); return *this; }
+ ThreadCachedInt& operator--() {
+ increment(IntT(-1));
+ return *this;
+ }
// Thread-safe set function.
// This is a best effort implementation. In some edge cases, there could be
val >>= 7;
}
*p++ = uint8_t(val);
- return p - buf;
+ return size_t(p - buf);
}
template <class T>
// whose coefficients are the bits of q.
for (int x = 0; x < 256; x++) {
FingerprintPolynomial<DEG> t;
- t.setHigh8Bits(x);
+ t.setHigh8Bits(uint8_t(x));
for (int i = 0; i < 8; i++) {
t.mulXkmod(8, poly);
t.write(&(table[i][x][0]));
// wiggle room
numCpus = 32;
}
- return CacheLocality::uniform(numCpus);
+ return CacheLocality::uniform(size_t(numCpus));
}
template <>
// Multiply by X. The actual degree must be < DEG.
void mulX() {
- CHECK_EQ(0, val_[0] & (1ULL<<63));
+ CHECK_EQ(0u, val_[0] & (1ULL << 63));
uint64_t b = 0;
for (int i = size()-1; i >= 0; i--) {
uint64_t nb = val_[i] >> 63;
std::size_t asize = a.size();
std::array<uint8_t, N> ba{{0}};
for (std::size_t i = 0; i < asize; i++) {
- ba[i] = a[i] & b[i];
+ ba[i] = uint8_t(a[i] & b[i]);
}
return ba;
}
template <
class IntegralType,
IntegralType DigitCount,
- IntegralType Base = 10,
+ IntegralType Base = IntegralType(10),
bool PrintAllDigits = false,
class = typename std::enable_if<
std::is_integral<IntegralType>::value &&
bool found = PrintAllDigits;
while (powerToPrint) {
if (found || powerToPrint <= val) {
- IntegralType value = val / powerToPrint;
+ IntegralType value = IntegralType(val / powerToPrint);
if (Base == 10 || value < 10) {
value += '0';
} else {
auto index =
_mm_cmpestri(arr2, int(needles.size()), arr1, int(haystack.size()), 0);
if (index < 16) {
- return index;
+ return size_t(index);
}
// Now, we can do aligned loads hereafter...
if (idx < 0 || idx >= parray->size()) {
return nullptr;
}
- return &(*parray)[idx.asInt()];
+ return &(*parray)[size_t(idx.asInt())];
} else if (auto* pobject = get_nothrow<ObjectImpl>()) {
auto it = pobject->find(idx);
if (it == pobject->end()) {
if (idx < 0 || idx >= parray->size()) {
std::__throw_out_of_range("out of range in dynamic array");
}
- return (*parray)[idx.asInt()];
+ return (*parray)[size_t(idx.asInt())];
} else if (auto* pobject = get_nothrow<ObjectImpl>()) {
auto it = pobject->find(idx);
if (it == pobject->end()) {
private:
static void* create() {
auto ptr = mmap(nullptr, 1, PROT_READ, MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
- checkUnixError(reinterpret_cast<uintptr_t>(ptr), "mmap");
+ checkUnixError(reinterpret_cast<ssize_t>(ptr), "mmap");
// Optimistically try to lock the page so it stays resident. Could make
// the heavy barrier faster.
if (begin == end) {
return MutableCompressedList();
}
- BitVectorEncoder encoder(end - begin, *(end - 1));
+ BitVectorEncoder encoder(size_t(end - begin), *(end - 1));
for (; begin != end; ++begin) {
encoder.add(*begin);
}
size_t countInThisBlock = bitsPerBlock - offset;
size_t countInNextBlock = count - countInThisBlock;
- UnderlyingType thisBlock = value & ((one << countInThisBlock) - 1);
- UnderlyingType nextBlock = value >> countInThisBlock;
+ UnderlyingType thisBlock =
+ UnderlyingType(value & ((one << countInThisBlock) - 1));
+ UnderlyingType nextBlock = UnderlyingType(value >> countInThisBlock);
if (std::is_signed<UnderlyingType>::value) {
nextBlock &= ones(countInNextBlock);
}
return ::free(data.data());
}
- size_t upperSize() const { return data.end() - upper; }
+ size_t upperSize() const {
+ return size_t(data.end() - upper);
+ }
size_t size = 0;
uint8_t numLowerBits = 0;
return 0;
}
// floor(log(upperBound / size));
- return folly::findLastSet(upperBound / size) - 1;
+ return uint8_t(folly::findLastSet(upperBound / size) - 1);
}
// Requires: input range (begin, end) is sorted (encoding
if (begin == end) {
return MutableCompressedList();
}
- EliasFanoEncoderV2 encoder(end - begin, *(end - 1));
+ EliasFanoEncoderV2 encoder(size_t(end - begin), *(end - 1));
for (; begin != end; ++begin) {
encoder.add(*begin);
}
/* static */ if (skipQuantum != 0) {
while ((skipPointersSize_ + 1) * skipQuantum <= upperBits) {
// Store the number of preceding 1-bits.
- skipPointers_[skipPointersSize_++] = size_;
+ skipPointers_[skipPointersSize_++] = SkipValue(size_);
}
}
void reset() {
block_ = start_ != nullptr ? folly::loadUnaligned<block_t>(start_) : 0;
outer_ = 0;
- inner_ = -1;
- position_ = -1;
+ inner_ = std::numeric_limits<size_t>::max();
+ position_ = std::numeric_limits<size_t>::max();
value_ = 0;
}
}
++position_;
- inner_ = Instructions::ctz(block_);
+ inner_ = size_t(Instructions::ctz(block_));
block_ = Instructions::blsr(block_);
return setValue();
if (Encoder::forwardQuantum == 0 || n <= Encoder::forwardQuantum) {
reset();
} else {
- position_ = -1; // Avoid reading the head, skip() will reposition.
+ position_ = size_t(-1); // Avoid reading the head, skip() will reposition.
}
return skip(n);
}
lastValue_ = 0;
return;
}
- ValueType lastUpperValue = 8 * list.upperSize() - size_;
+ ValueType lastUpperValue = ValueType(8 * list.upperSize() - size_);
auto it = list.upper + list.upperSize() - 1;
DCHECK_NE(*it, 0);
lastUpperValue -= 8 - folly::findLastSet(*it);
return true;
}
static FOLLY_ALWAYS_INLINE uint64_t popcount(uint64_t value) {
- return __builtin_popcountll(value);
+ return uint64_t(__builtin_popcountll(value));
}
static FOLLY_ALWAYS_INLINE int ctz(uint64_t value) {
DCHECK_GT(value, 0u);
if (value.type() != type_) {
return none;
}
- if (!Comparison()(size_t(length_), value.size())) {
+ if (!Comparison()(length_, int64_t(value.size()))) {
return makeError("different length string/array/object", value);
}
return none;
std::string filename = file_.path().string();
// Yes, I know that I could just keep the file open instead. So sue me.
folly::File f(openNoInt(filename.c_str(), O_RDONLY), true);
- auto size = lseek(f.fd(), 0, SEEK_END) - readOffset_;
+ auto size = size_t(lseek(f.fd(), 0, SEEK_END) - readOffset_);
std::unique_ptr<char[]> buf(new char[size]);
auto bytes_read = folly::preadFull(f.fd(), buf.get(), size, readOffset_);
- PCHECK(size == bytes_read);
- readOffset_ += size;
+ PCHECK(ssize_t(size) == bytes_read);
+ readOffset_ += off_t(size);
chunkCob_(StringPiece(buf.get(), buf.get() + size));
return std::string(buf.get(), size);
}
static void bserEncodeString(folly::StringPiece str, QueueAppender& appender) {
appender.write((int8_t)BserType::String);
- bserEncodeInt(str.size(), appender);
+ bserEncodeInt(int64_t(str.size()), appender);
appender.push((uint8_t*)str.data(), str.size());
}
QueueAppender& appender,
const serialization_opts& opts) {
appender.write((int8_t)BserType::Array);
- bserEncodeInt(dyn.size(), appender);
+ bserEncodeInt(int64_t(dyn.size()), appender);
for (const auto& ele : dyn) {
bserEncode(ele, appender, opts);
}
bserEncodeArraySimple(*templ, appender, opts);
// The number of objects in the array
- bserEncodeInt(dyn.size(), appender);
+ bserEncodeInt(int64_t(dyn.size()), appender);
// For each object in the array
for (const auto& ele : dyn) {
QueueAppender& appender,
const serialization_opts& opts) {
appender.write((int8_t)BserType::Object);
- bserEncodeInt(dyn.size(), appender);
+ bserEncodeInt(int64_t(dyn.size()), appender);
if (opts.sort_keys) {
std::vector<std::pair<dynamic, dynamic>> sorted(dyn.items().begin(),
if (len < 0) {
throw std::range_error("string length must not be negative");
}
- str.reserve(len);
+ str.reserve(size_t(len));
size_t available = curs.length();
while (available < (size_t)len) {
available = curs.length();
}
- str.append(reinterpret_cast<const char*>(curs.data()), len);
- curs.skipAtMost(len);
+ str.append(reinterpret_cast<const char*>(curs.data()), size_t(len));
+ curs.skipAtMost(size_t(len));
return str;
}
bool error = gen::byLine("/proc/meminfo") |
[&] (StringPiece line) -> bool {
if (boost::regex_match(line.begin(), line.end(), match, regex)) {
- StringPiece numStr(line.begin() + match.position(1), match.length(1));
+ StringPiece numStr(
+ line.begin() + match.position(1), size_t(match.length(1)));
pageSize = to<size_t>(numStr) * 1024; // in KiB
return false; // stop
}
for (fs::directory_iterator it(path); it != fs::directory_iterator(); ++it) {
std::string filename(it->path().filename().string());
if (boost::regex_match(filename, match, regex)) {
- StringPiece numStr(filename.data() + match.position(1), match.length(1));
+ StringPiece numStr(
+ filename.data() + match.position(1), size_t(match.length(1)));
vec.emplace_back(to<size_t>(numStr) * 1024);
}
}
}
char c = '\0';
if (match.length(2) != 0) {
- c = char(tolower(value[match.position(2)]));
+ c = char(tolower(value[size_t(match.position(2))]));
}
- StringPiece numStr(value.data() + match.position(1), match.length(1));
+ StringPiece numStr(value.data() + match.position(1), size_t(match.length(1)));
size_t size = to<size_t>(numStr);
switch (c) {
case 't': size *= 1024;
// Use a read lock for reading.
SYNCHRONIZED_CONST(pages, protectedPages()) {
for (const auto& page : pages) {
- intptr_t pageEnd = page + pagesize();
+ intptr_t pageEnd = intptr_t(page + pagesize());
if (page <= addr && addr < pageEnd) {
return true;
}
std::vector<std::pair<unsigned char*, bool>> freeList_;
static size_t pagesize() {
- static const size_t pagesize = sysconf(_SC_PAGESIZE);
+ static const size_t pagesize = size_t(sysconf(_SC_PAGESIZE));
return pagesize;
}
type> inline collectAll(InputIterator first, InputIterator last) {
typedef typename std::result_of<
typename std::iterator_traits<InputIterator>::value_type()>::type Result;
- size_t n = std::distance(first, last);
+ size_t n = size_t(std::distance(first, last));
std::vector<Result> results;
std::vector<size_t> order(n);
results.reserve(n);
std::vector<Try<T>> results;
};
- auto ctx = std::make_shared<CollectAllContext>(std::distance(first, last));
+ auto ctx =
+ std::make_shared<CollectAllContext>(size_t(std::distance(first, last)));
mapSetCallback<T>(first, last, [ctx](size_t i, Try<T>&& t) {
ctx->results[i] = std::move(t);
});
};
auto ctx = std::make_shared<CollectAnyWithoutExceptionContext>();
- ctx->nTotal = std::distance(first, last);
+ ctx->nTotal = size_t(std::distance(first, last));
mapSetCallback<T>(first, last, [ctx](size_t i, Try<T>&& t) {
if (!t.hasException() && !ctx->done.exchange(true)) {
: source_(std::move(source)),
ops_(std::move(ops)),
threads_(
- threads ? threads
- : std::max<size_t>(1, sysconf(_SC_NPROCESSORS_CONF))) {}
+ threads
+ ? threads
+ : size_t(std::max<long>(1, sysconf(_SC_NPROCESSORS_CONF)))) {}
template <class Handler>
bool apply(Handler&& handler) const {
DCHECK_EQ((flags & freeFlags), freeFlags);
while (true) {
- uint16_t newFlags = (flags & ~freeFlags);
+ uint16_t newFlags = uint16_t(flags & ~freeFlags);
if (newFlags == 0) {
// The storage space is now unused. Free it.
storage->prefix.HeapPrefix::~HeapPrefix();
uint8_t* bufAddr = reinterpret_cast<uint8_t*>(&storage->align);
uint8_t* storageEnd = reinterpret_cast<uint8_t*>(storage) + mallocSize;
- size_t actualCapacity = storageEnd - bufAddr;
+ size_t actualCapacity = size_t(storageEnd - bufAddr);
unique_ptr<IOBuf> ret(new (&storage->hs.buf) IOBuf(
InternalConstructor(), packFlagsAndSharedInfo(0, &storage->shared),
bufAddr, actualCapacity, bufAddr, 0));
uint8_t* infoStart = (buf + mallocSize) - sizeof(SharedInfo);
SharedInfo* sharedInfo = new(infoStart) SharedInfo;
- *capacityReturn = infoStart - buf;
+ *capacityReturn = uint64_t(infoStart - buf);
*infoReturn = sharedInfo;
}
#endif
checkUnixError(bytes, "pwrite() failed");
- DCHECK_EQ(bytes, totalLength);
+ DCHECK_EQ(size_t(bytes), totalLength);
}
RecordIOReader::RecordIOReader(File file, uint32_t fileId)
range_.clear();
} else {
recordAndPos_.second = pos;
- range_.advance(pos);
+ range_.advance(size_t(pos));
advanceToValid();
}
}
namespace folly {
ShutdownSocketSet::ShutdownSocketSet(int maxFd)
- : maxFd_(maxFd),
- data_(static_cast<std::atomic<uint8_t>*>(
- folly::checkedCalloc(maxFd, sizeof(std::atomic<uint8_t>)))),
- nullFile_("/dev/null", O_RDWR) {
-}
+ : maxFd_(maxFd),
+ data_(static_cast<std::atomic<uint8_t>*>(
+ folly::checkedCalloc(size_t(maxFd), sizeof(std::atomic<uint8_t>)))),
+ nullFile_("/dev/null", O_RDWR) {}
void ShutdownSocketSet::add(int fd) {
// Silently ignore any fds >= maxFd_, very unlikely
return;
}
- auto& sref = data_[fd];
+ auto& sref = data_[size_t(fd)];
uint8_t prevState = FREE;
CHECK(sref.compare_exchange_strong(prevState,
IN_USE,
return;
}
- auto& sref = data_[fd];
+ auto& sref = data_[size_t(fd)];
uint8_t prevState = 0;
retry_load:
return folly::closeNoInt(fd);
}
- auto& sref = data_[fd];
+ auto& sref = data_[size_t(fd)];
uint8_t prevState = sref.load(std::memory_order_relaxed);
uint8_t newState = 0;
return;
}
- auto& sref = data_[fd];
+ auto& sref = data_[size_t(fd)];
uint8_t prevState = IN_USE;
if (!sref.compare_exchange_strong(prevState,
IN_SHUTDOWN,
void ShutdownSocketSet::shutdownAll(bool abortive) {
for (int i = 0; i < maxFd_; ++i) {
- auto& sref = data_[i];
+ auto& sref = data_[size_t(i)];
if (sref.load(std::memory_order_acquire) == IN_USE) {
shutdown(i, abortive);
}
if (bytesRead > 0) {
if (movable) {
- ioBuf->append(bytesRead);
+ ioBuf->append(uint64_t(bytesRead));
readCallback_->readBufferAvailable(std::move(ioBuf));
} else {
- readCallback_->readDataAvailable(bytesRead);
+ readCallback_->readDataAvailable(size_t(bytesRead));
}
// Fall through and continue around the loop if the read
// completely filled the available buffer.
registerHandler(EventHandler::WRITE);
return;
}
- curQueue.trimStart(rc);
+ curQueue.trimStart(size_t(rc));
if (curQueue.empty()) {
auto cb = front.second;
queue_.pop_front();
AsyncSSLSocket* tsslSock;
iov.iov_base = const_cast<char*>(in);
- iov.iov_len = inl;
+ iov.iov_len = size_t(inl);
memset(&msg, 0, sizeof(msg));
msg.msg_iov = &iov;
msg.msg_iovlen = 1;
assert(events & EventHandler::READ_WRITE);
assert(eventBase_->isInEventBaseThread());
- uint16_t relevantEvents = events & EventHandler::READ_WRITE;
+ uint16_t relevantEvents = uint16_t(events & EventHandler::READ_WRITE);
if (relevantEvents == EventHandler::READ) {
handleRead();
} else if (relevantEvents == EventHandler::WRITE) {
<< bytesRead << " bytes";
if (bytesRead > 0) {
if (!isBufferMovable_) {
- readCallback_->readDataAvailable(bytesRead);
+ readCallback_->readDataAvailable(size_t(bytesRead));
} else {
CHECK(kOpenSslModeMoveBufferOwnership);
VLOG(5) << "this=" << this << ", AsyncSocket::handleRead() got "
// Always register for persistent events, so we don't have to re-register
// after being called back.
- if (!ioHandler_.registerHandler(eventFlags_ | EventHandler::PERSIST)) {
+ if (!ioHandler_.registerHandler(
+ uint16_t(eventFlags_ | EventHandler::PERSIST))) {
eventFlags_ = EventHandler::NONE; // we're not registered after error
AsyncSocketException ex(AsyncSocketException::INTERNAL_ERROR,
withAddr("failed to update AsyncSocket event registration"));
struct sockaddr_storage addrStorage;
socklen_t addrLen = sizeof(addrStorage);
- memset(&addrStorage, 0, addrLen);
+ memset(&addrStorage, 0, size_t(addrLen));
struct sockaddr* rawAddr = reinterpret_cast<sockaddr*>(&addrStorage);
rawAddr->sa_family = localAddress_.getFamily();
bool truncated = false;
if ((size_t)bytesRead > len) {
truncated = true;
- bytesRead = len;
+ bytesRead = ssize_t(len);
}
- readCallback_->onDataAvailable(clientAddress_, bytesRead, truncated);
+ readCallback_->onDataAvailable(
+ clientAddress_, size_t(bytesRead), truncated);
}
} else {
if (errno == EAGAIN || errno == EWOULDBLOCK) {
flags |= READ;
}
- return registerHandler(flags | PERSIST);
+ return registerHandler(uint16_t(flags | PERSIST));
}
} // Namespace
clearCobTimeouts();
- DCHECK_EQ(0, runBeforeLoopCallbacks_.size());
+ DCHECK_EQ(0u, runBeforeLoopCallbacks_.size());
(void)runLoopCallbacks();
// Unfortunately, event_set() resets the event_base, so we have to remember
// it before hand, then pass it back into event_base_set() afterwards
struct event_base* evb = event_.ev_base;
- event_set(&event_, event_.ev_fd, events,
- &EventHandler::libeventCallback, this);
+ event_set(
+ &event_,
+ event_.ev_fd,
+ short(events),
+ &EventHandler::libeventCallback,
+ this);
event_base_set(evb, &event_);
// Set EVLIST_INTERNAL if this is an internal event
// this can't possibly fire if handler->eventBase_ is nullptr
handler->eventBase_->bumpHandlingTime();
- handler->handlerReady(events);
+ handler->handlerReady(uint16_t(events));
if (observer) {
observer->stopped(reinterpret_cast<uintptr_t>(handler));
<< ", but tls11AltCipherlist is of length "
<< tls11AltCipherlist.size();
} else {
- ciphers = &tls11AltCipherlist[index].first;
+ ciphers = &tls11AltCipherlist[size_t(index)].first;
}
}
size_t SSLContext::pickNextProtocols() {
CHECK(!advertisedNextProtocols_.empty()) << "Failed to pickNextProtocols";
auto rng = ThreadLocalPRNG();
- return nextProtocolDistribution_(rng);
+ return size_t(nextProtocolDistribution_(rng));
}
int SSLContext::advertisedNextProtocolCallback(SSL* ssl,
SSL_CTX_set_session_id_context(
ctx_,
reinterpret_cast<const unsigned char*>(context.data()),
- std::min(
- static_cast<int>(context.length()), SSL_MAX_SSL_SESSION_ID_LENGTH));
+ std::min<unsigned int>(
+ static_cast<unsigned int>(context.length()),
+ SSL_MAX_SSL_SESSION_ID_LENGTH));
}
/**
if (length > size) {
length = size;
}
- strncpy(password, userPassword.c_str(), length);
+ strncpy(password, userPassword.c_str(), size_t(length));
return length;
}
static void callbackLocking(int mode, int n, const char*, int) {
if (mode & CRYPTO_LOCK) {
- locks()[n].lock();
+ locks()[size_t(n)].lock();
} else {
- locks()[n].unlock();
+ locks()[size_t(n)].unlock();
}
}
SSL_load_error_strings();
ERR_load_crypto_strings();
// static locking
- locks().reset(new SSLLock[::CRYPTO_num_locks()]);
+ locks().reset(new SSLLock[size_t(::CRYPTO_num_locks())]);
for (auto it: lockTypes()) {
- locks()[it.first].lockType = it.second;
+ locks()[size_t(it.first)].lockType = it.second;
}
CRYPTO_set_id_callback(callbackThreadID);
CRYPTO_set_locking_callback(callbackLocking);
EXPECT_TRUE(sock_->isEorTrackingEnabled());
EXPECT_CALL(*(sock_.get()), getRawBytesWritten())
- // rawBytesWritten after writting initAppBytesWritten + 1500
- // + some random SSL overhead
- .WillOnce(Return(3600))
- // rawBytesWritten after writting last 6 bytes
- // + some random SSL overhead
- .WillOnce(Return(3728));
+ // rawBytesWritten after writting initAppBytesWritten + 1500
+ // + some random SSL overhead
+ .WillOnce(Return(3600u))
+ // rawBytesWritten after writting last 6 bytes
+ // + some random SSL overhead
+ .WillOnce(Return(3728u));
EXPECT_CALL(*(sock_.get()), sslWriteImpl(_, _, 1500))
.WillOnce(Invoke([=, &pos] (SSL *, const void *buf, int m) {
// the first 1500 does not have the EOR byte
wcursor.writeLE((uint64_t)1);
wcursor.write((uint8_t)1);
- EXPECT_EQ(1, rcursor.readLE<uint64_t>());
+ EXPECT_EQ(1u, rcursor.readLE<uint64_t>());
rcursor.skip(8);
- EXPECT_EQ(1, rcursor.readLE<uint32_t>());
+ EXPECT_EQ(1u, rcursor.readLE<uint32_t>());
rcursor.skip(0);
- EXPECT_EQ(0, rcursor.read<uint8_t>());
- EXPECT_EQ(0, rcursor.read<uint8_t>());
- EXPECT_EQ(0, rcursor.read<uint8_t>());
- EXPECT_EQ(0, rcursor.read<uint8_t>());
- EXPECT_EQ(1, rcursor.read<uint8_t>());
+ EXPECT_EQ(0u, rcursor.read<uint8_t>());
+ EXPECT_EQ(0u, rcursor.read<uint8_t>());
+ EXPECT_EQ(0u, rcursor.read<uint8_t>());
+ EXPECT_EQ(0u, rcursor.read<uint8_t>());
+ EXPECT_EQ(1u, rcursor.read<uint8_t>());
}
TEST(IOBuf, skip) {
"longer than our original allocation size,",
"and will therefore require a new allocation", 0x12345678);
// The tailroom should start with a nul byte now.
- EXPECT_GE(head.prev()->tailroom(), 1);
+ EXPECT_GE(head.prev()->tailroom(), 1u);
EXPECT_EQ(0, *head.prev()->tail());
EXPECT_EQ("test32this string is longer than our original "
}
// There must be a goodMallocSize between 100 and 1024...
- EXPECT_LT(1, queue.front()->countChainElements());
+ EXPECT_LT(1u, queue.front()->countChainElements());
const IOBuf* buf = queue.front();
do {
- EXPECT_LE(100, buf->capacity());
+ EXPECT_LE(100u, buf->capacity());
buf = buf->next();
} while (buf != queue.front());
in.error("expected 4 hex digits");
}
- uint16_t ret = hexVal(*in) * 4096;
+ uint16_t ret = uint16_t(hexVal(*in) * 4096);
++in;
ret += hexVal(*in) * 256;
++in;
// note that this if condition captures non readable chars
// with value < 32, so size = 1 byte (e.g control chars).
out.append("\\u00");
- out.push_back(hexDigit((*p & 0xf0) >> 4));
- out.push_back(hexDigit(*p & 0xf));
+ out.push_back(hexDigit(uint8_t((*p & 0xf0) >> 4)));
+ out.push_back(hexDigit(uint8_t(*p & 0xf)));
p++;
}
} else {
- out.push_back(*p++);
+ out.push_back(char(*p++));
}
}
std::is_integral<T>::value && !std::is_same<T, bool>::value &&
std::is_signed<T>::value>::type> {
static constexpr typename std::make_unsigned<T>::type go(T t) {
- return t < static_cast<T>(0) ? -t : t;
+ return typename std::make_unsigned<T>::type(t < static_cast<T>(0) ? -t : t);
}
};
} // namespace detail
if (h != INVALID_HANDLE_VALUE) {
DWORD flags;
if (GetHandleInformation(h, &flags)) {
- res = flags & HANDLE_FLAG_INHERIT;
+ res = int(flags & HANDLE_FLAG_INHERIT);
}
}
break;
}
const char* inet_ntop(int af, const void* src, char* dst, socklen_t size) {
- return ::inet_ntop(af, (char*)src, dst, size);
+ return ::inet_ntop(af, (char*)src, dst, size_t(size));
}
int listen(int s, int backlog) {
return -1;
}
} else {
- DWORD flags = 0
- | (operation & LOCK_NB ? LOCKFILE_FAIL_IMMEDIATELY : 0)
- | (operation & LOCK_EX ? LOCKFILE_EXCLUSIVE_LOCK : 0)
- ;
+ DWORD flags = DWORD(
+ (operation & LOCK_NB ? LOCKFILE_FAIL_IMMEDIATELY : 0) |
+ (operation & LOCK_EX ? LOCKFILE_EXCLUSIVE_LOCK : 0));
OVERLAPPED ov = {};
if (!LockFileEx(h, flags, 0, kMaxDWORD, kMaxDWORD, &ov)) {
return -1;
}
res->tv_sec = 0;
- res->tv_nsec = timeIncrement * 100;
+ res->tv_nsec = long(timeIncrement * 100);
return 0;
}
}
}
auto r = _write(fh, buf, unsigned int(count));
- if ((r > 0 && r != count) || (r == -1 && errno == ENOSPC)) {
+ if ((r > 0 && size_t(r) != count) || (r == -1 && errno == ENOSPC)) {
// Writing to a pipe with a full buffer doesn't generate
// any error type, unless it caused us to write exactly 0
// bytes, so we have to see if we have a pipe first. We
}
void hash_final(MutableByteRange out) {
const auto size = EVP_MD_size(md_);
- check_out_size(size, out);
+ check_out_size(size_t(size), out);
unsigned int len = 0;
check_libssl_result(1, EVP_DigestFinal_ex(&ctx_, out.data(), &len));
- check_libssl_result(size, len);
+ check_libssl_result(size, int(len));
md_ = nullptr;
}
private:
}
void hash_final(MutableByteRange out) {
const auto size = EVP_MD_size(md_);
- check_out_size(size, out);
+ check_out_size(size_t(size), out);
unsigned int len = 0;
check_libssl_result(1, HMAC_Final(&ctx_, out.data(), &len));
check_libssl_result(size, int(len));
auto len = i2d_SSL_SESSION(session_, nullptr);
if (len > 0) {
- std::unique_ptr<unsigned char[]> uptr(new unsigned char[len]);
+ std::unique_ptr<unsigned char[]> uptr(new unsigned char[size_t(len)]);
auto p = uptr.get();
auto written = i2d_SSL_SESSION(session_, &p);
if (written <= 0) {
auto combined = ByteRange(StringPiece("foobar"));
HMAC(
EVP_sha256(),
- key.data(), key.size(),
- combined.data(), combined.size(),
- expected.data(), nullptr);
+ key.data(),
+ int(key.size()),
+ combined.data(),
+ combined.size(),
+ expected.data(),
+ nullptr);
auto out = vector<uint8_t>(32);
OpenSSLHash::hmac_sha256(range(out), key, buf);
// There is no point in having more buckets than our timestamp
// granularity: otherwise we would have buckets that could never be used.
if (nBuckets > size_t(duration_.count())) {
- nBuckets = duration_.count();
+ nBuckets = size_t(duration_.count());
}
buckets_.resize(nBuckets, Bucket());
TimePoint bucketStart,
TimePoint nextBucketStart) -> bool {
sample_count += this->rangeAdjust(
- bucketStart, nextBucketStart, start, end, bucket.count);
+ bucketStart, nextBucketStart, start, end, ValueType(bucket.count));
return true;
});
total += this->rangeAdjust(
bucketStart, nextBucketStart, start, end, bucket.sum);
sample_count += this->rangeAdjust(
- bucketStart, nextBucketStart, start, end, bucket.count);
+ bucketStart, nextBucketStart, start, end, ValueType(bucket.count));
return true;
});
Duration timeMod = time.time_since_epoch() % duration_;
TimeInt numFullDurations = time.time_since_epoch() / duration_;
- TimeInt scaledTime = timeMod.count() * buckets_.size();
+ TimeInt scaledTime = timeMod.count() * TimeInt(buckets_.size());
// Keep these two lines together. The compiler should be able to compute
// both the division and modulus with a single operation.
- *bucketIdx = scaledTime / duration_.count();
+ *bucketIdx = size_t(scaledTime / duration_.count());
TimeInt scaledOffsetInBucket = scaledTime % duration_.count();
TimeInt scaledBucketStart = scaledTime - scaledOffsetInBucket;
Duration timeMod = latestTime_.time_since_epoch() % duration_;
TimeInt numFullDurations = latestTime_.time_since_epoch() / duration_;
TimePoint durationStart(numFullDurations * duration_);
- TimeInt scaledTime = timeMod.count() * buckets_.size();
- size_t latestBucketIdx = scaledTime / duration_.count();
+ TimeInt scaledTime = timeMod.count() * TimeInt(buckets_.size());
+ size_t latestBucketIdx = size_t(scaledTime / duration_.count());
TimeInt scaledOffsetInBucket = scaledTime % duration_.count();
TimeInt scaledBucketStart = scaledTime - scaledOffsetInBucket;
TimeInt scaledNextBucketStart = scaledBucketStart + duration_.count();
*/
template <typename ReturnType = double, typename Interval = Duration>
ReturnType rate() const {
- return rateHelper<ReturnType, Interval>(total_.sum, elapsed());
+ return rateHelper<ReturnType, Interval>(ReturnType(total_.sum), elapsed());
}
/*
*/
template <typename ReturnType = double, typename Interval = Duration>
ReturnType countRate() const {
- return rateHelper<ReturnType, Interval>(total_.count, elapsed());
+ return rateHelper<ReturnType, Interval>(
+ ReturnType(total_.count), elapsed());
}
/*
ReturnType countRate(TimePoint start, TimePoint end) const {
uint64_t intervalCount = count(start, end);
Duration interval = elapsed(start, end);
- return rateHelper<ReturnType, Interval>(intervalCount, interval);
+ return rateHelper<ReturnType, Interval>(
+ ReturnType(intervalCount), interval);
}
/*
return addValueAggregated(TimePoint(now), val, 1);
}
bool addValue(Duration now, const ValueType& val, int64_t times) {
- return addValueAggregated(TimePoint(now), val * times, times);
+ return addValueAggregated(TimePoint(now), val * ValueType(times), times);
}
bool
addValueAggregated(Duration now, const ValueType& total, int64_t nsamples) {
return max_;
}
- return min_ + ((idx - 1) * bucketSize_);
+ return ValueType(min_ + ((idx - 1) * bucketSize_));
}
/*
return std::numeric_limits<ValueType>::max();
}
- return min_ + (idx * bucketSize_);
+ return ValueType(min_ + (idx * bucketSize_));
}
/**
TimePoint now,
const ValueType& val,
uint64_t times) {
- addValueAggregated(now, val * times, times);
+ addValueAggregated(now, val * ValueType(times), times);
}
template <typename VT, typename CT>
break;
}
leak.emplace_back(std::move(ptr));
- EXPECT_LT(leak.size(), 10000);
+ EXPECT_LT(leak.size(), 10000u);
}
}
for (auto i = 0; i < count; ++i) {
Sched::wait(&allocSem);
uint32_t idx = pool.allocIndex();
- EXPECT_NE(idx, 0);
+ EXPECT_NE(idx, 0u);
EXPECT_LE(idx,
poolSize + (pool.NumLocalLists - 1) * pool.LocalListLimit);
pool[idx] = i;
Sched::wait(&readSem);
EXPECT_EQ(read(fd[0], &idx, sizeof(idx)), sizeof(idx));
EXPECT_NE(idx, 0);
- EXPECT_GE(idx, 1);
+ EXPECT_GE(idx, 1u);
EXPECT_LE(idx,
poolSize + (Pool::NumLocalLists - 1) * Pool::LocalListLimit);
EXPECT_EQ(pool[idx], i);
typedef IndexedMemPool<int,1,32> Pool;
Pool pool(10);
- EXPECT_EQ(pool.capacity(), 10);
- EXPECT_EQ(Pool::maxIndexForCapacity(10), 10);
+ EXPECT_EQ(pool.capacity(), 10u);
+ EXPECT_EQ(Pool::maxIndexForCapacity(10), 10u);
for (auto i = 0; i < 10; ++i) {
- EXPECT_NE(pool.allocIndex(), 0);
+ EXPECT_NE(pool.allocIndex(), 0u);
}
- EXPECT_EQ(pool.allocIndex(), 0);
+ EXPECT_EQ(pool.allocIndex(), 0u);
}
TEST(IndexedMemPool, mt_capacity) {
threads[i] = std::thread([&]() {
for (auto j = 0; j < 100; ++j) {
uint32_t idx = pool.allocIndex();
- EXPECT_NE(idx, 0);
+ EXPECT_NE(idx, 0u);
}
});
}
for (auto i = 0; i < 16 * 32; ++i) {
pool.allocIndex();
}
- EXPECT_EQ(pool.allocIndex(), 0);
+ EXPECT_EQ(pool.allocIndex(), 0u);
}
TEST(IndexedMemPool, locate_elem) {
std::vector<T> rv;
for (T i = 1; i < 24; ++i) {
rv.push_back(i);
- rv.push_back(std::numeric_limits<T>::max() / i);
- rv.push_back(std::numeric_limits<T>::max() - i);
- rv.push_back(std::numeric_limits<T>::max() / 2 - i);
+ rv.push_back(T(std::numeric_limits<T>::max() / i));
+ rv.push_back(T(std::numeric_limits<T>::max() - i));
+ rv.push_back(T(std::numeric_limits<T>::max() / T(2) - i));
if (std::is_signed<T>::value) {
rv.push_back(-i);
- rv.push_back(std::numeric_limits<T>::min() / i);
- rv.push_back(std::numeric_limits<T>::min() + i);
- rv.push_back(std::numeric_limits<T>::min() / 2 + i);
+ rv.push_back(T(std::numeric_limits<T>::min() / i));
+ rv.push_back(T(std::numeric_limits<T>::min() + i));
+ rv.push_back(T(std::numeric_limits<T>::min() / T(2) + i));
}
}
return rv;