Reorganize the stats directory
authorChristopher Dykes <cdykes@fb.com>
Sat, 10 Jun 2017 02:48:51 +0000 (19:48 -0700)
committerFacebook Github Bot <facebook-github-bot@users.noreply.github.com>
Sat, 10 Jun 2017 02:51:11 +0000 (19:51 -0700)
Summary: The source and tests for the stats directory was spread across folly/detail and folly/test, move them into folly/stats directly instead.

Reviewed By: yfeldblum

Differential Revision: D5216810

fbshipit-source-id: 00a8bb95a4f7830d0bc46b3f914f256a37833b78

22 files changed:
folly/Makefile.am
folly/detail/Stats.h [deleted file]
folly/stats/BucketedTimeSeries.cpp [new file with mode: 0644]
folly/stats/BucketedTimeSeries.h
folly/stats/Histogram.cpp [new file with mode: 0644]
folly/stats/Histogram.h
folly/stats/Instantiations.cpp [deleted file]
folly/stats/MultiLevelTimeSeries.cpp [new file with mode: 0644]
folly/stats/TimeseriesHistogram.cpp [new file with mode: 0644]
folly/stats/detail/Bucket.h [new file with mode: 0644]
folly/stats/test/BucketedTimeSeriesBenchmark.cpp [new file with mode: 0644]
folly/stats/test/HistogramBenchmark.cpp [new file with mode: 0644]
folly/stats/test/HistogramTest.cpp [new file with mode: 0644]
folly/stats/test/Makefile.am [new file with mode: 0755]
folly/stats/test/TimeSeriesTest.cpp [new file with mode: 0644]
folly/stats/test/TimeseriesHistogramTest.cpp [new file with mode: 0644]
folly/test/HistogramBenchmark.cpp [deleted file]
folly/test/HistogramTest.cpp [deleted file]
folly/test/Makefile.am
folly/test/TimeseriesBenchmark.cpp [deleted file]
folly/test/TimeseriesHistogramTest.cpp [deleted file]
folly/test/TimeseriesTest.cpp [deleted file]

index 0b1a10d71abf2d7adb577a749da365b63ab4359e..435b93a9e1895ec420f55966f6ca80fc17a6c7ac 100644 (file)
@@ -7,7 +7,7 @@ MAYBE_EXCEPTION_TRACER = experimental/exception_tracer
 endif
 
 SUBDIRS = . experimental $(MAYBE_INIT) test io/test experimental/io/test \
-         $(MAYBE_EXCEPTION_TRACER)
+         stats/test $(MAYBE_EXCEPTION_TRACER)
 
 ACLOCAL_AMFLAGS = -I m4
 
@@ -82,7 +82,6 @@ nobase_follyinclude_HEADERS = \
        detail/SlowFingerprint.h \
        detail/SocketFastOpen.h \
        detail/StaticSingletonManager.h \
-       detail/Stats.h \
        detail/ThreadLocalDetail.h \
        detail/TryDetail.h \
        detail/TurnSequencer.h \
@@ -367,6 +366,7 @@ nobase_follyinclude_HEADERS = \
        ssl/OpenSSLVersionFinder.h \
        ssl/SSLSession.h \
        ssl/detail/SSLSessionImpl.h \
+       stats/detail/Bucket.h \
        stats/BucketedTimeSeries-defs.h \
        stats/BucketedTimeSeries.h \
        stats/Histogram-defs.h \
@@ -534,7 +534,10 @@ libfolly_la_SOURCES = \
        ssl/OpenSSLCertUtils.cpp \
        ssl/OpenSSLHash.cpp \
        ssl/detail/SSLSessionImpl.cpp \
-       stats/Instantiations.cpp \
+       stats/BucketedTimeSeries.cpp \
+       stats/Histogram.cpp \
+       stats/MultiLevelTimeSeries.cpp \
+       stats/TimeseriesHistogram.cpp \
        Subprocess.cpp \
        ThreadCachedArena.cpp \
        ThreadName.cpp \
diff --git a/folly/detail/Stats.h b/folly/detail/Stats.h
deleted file mode 100644 (file)
index 691ad2d..0000000
+++ /dev/null
@@ -1,126 +0,0 @@
-/*
- * Copyright 2017 Facebook, Inc.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *   http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#pragma once
-
-#include <chrono>
-#include <cstdint>
-#include <type_traits>
-
-namespace folly { namespace detail {
-
-/*
- * Helper function to compute the average, given a specified input type and
- * return type.
- */
-
-// If the input is long double, divide using long double to avoid losing
-// precision.
-template <typename ReturnType>
-ReturnType avgHelper(long double sum, uint64_t count) {
-  if (count == 0) { return ReturnType(0); }
-  const long double countf = count;
-  return static_cast<ReturnType>(sum / countf);
-}
-
-// In all other cases divide using double precision.
-// This should be relatively fast, and accurate enough for most use cases.
-template <typename ReturnType, typename ValueType>
-typename std::enable_if<!std::is_same<typename std::remove_cv<ValueType>::type,
-                                      long double>::value,
-                        ReturnType>::type
-avgHelper(ValueType sum, uint64_t count) {
-  if (count == 0) { return ReturnType(0); }
-  const double sumf = double(sum);
-  const double countf = double(count);
-  return static_cast<ReturnType>(sumf / countf);
-}
-
-/*
- * Helper function to compute the rate per Interval,
- * given the specified count recorded over the elapsed time period.
- */
-template <
-    typename ReturnType = double,
-    typename Duration = std::chrono::seconds,
-    typename Interval = Duration>
-ReturnType rateHelper(ReturnType count, Duration elapsed) {
-  if (elapsed == Duration(0)) {
-    return 0;
-  }
-
-  // Use std::chrono::duration_cast to convert between the native
-  // duration and the desired interval.  However, convert the rates,
-  // rather than just converting the elapsed duration.  Converting the
-  // elapsed time first may collapse it down to 0 if the elapsed interval
-  // is less than the desired interval, which will incorrectly result in
-  // an infinite rate.
-  typedef std::chrono::duration<
-      ReturnType,
-      std::ratio<Duration::period::den, Duration::period::num>>
-      NativeRate;
-  typedef std::chrono::duration<
-      ReturnType, std::ratio<Interval::period::den,
-                             Interval::period::num>> DesiredRate;
-
-  NativeRate native(count / elapsed.count());
-  DesiredRate desired = std::chrono::duration_cast<DesiredRate>(native);
-  return desired.count();
-}
-
-
-template<typename T>
-struct Bucket {
- public:
-  typedef T ValueType;
-
-  Bucket()
-    : sum(ValueType()),
-      count(0) {}
-
-  void clear() {
-    sum = ValueType();
-    count = 0;
-  }
-
-  void add(const ValueType& s, uint64_t c) {
-    // TODO: It would be nice to handle overflow here.
-    sum += s;
-    count += c;
-  }
-
-  Bucket& operator+=(const Bucket& o) {
-    add(o.sum, o.count);
-    return *this;
-  }
-
-  Bucket& operator-=(const Bucket& o) {
-    // TODO: It would be nice to handle overflow here.
-    sum -= o.sum;
-    count -= o.count;
-    return *this;
-  }
-
-  template <typename ReturnType>
-  ReturnType avg() const {
-    return avgHelper<ReturnType>(sum, count);
-  }
-
-  ValueType sum;
-  uint64_t count;
-};
-
-}} // folly::detail
diff --git a/folly/stats/BucketedTimeSeries.cpp b/folly/stats/BucketedTimeSeries.cpp
new file mode 100644 (file)
index 0000000..7eaf46e
--- /dev/null
@@ -0,0 +1,22 @@
+/*
+ * Copyright 2017 Facebook, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <folly/stats/BucketedTimeSeries.h>
+#include <folly/stats/BucketedTimeSeries-defs.h>
+
+namespace folly {
+template class BucketedTimeSeries<int64_t>;
+}
index a31904daf6652819edb949b1c562e9a91cba3b15..73ec9e70e88baad4f481fa4c7b21999d1b0884fa 100644 (file)
@@ -19,7 +19,7 @@
 #include <chrono>
 #include <vector>
 
-#include <folly/detail/Stats.h>
+#include <folly/stats/detail/Bucket.h>
 
 namespace folly {
 
diff --git a/folly/stats/Histogram.cpp b/folly/stats/Histogram.cpp
new file mode 100644 (file)
index 0000000..5a24f1e
--- /dev/null
@@ -0,0 +1,53 @@
+/*
+ * Copyright 2017 Facebook, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/*
+ * This file contains explicit instantiations of stats template types.
+ *
+ * This allows most users to avoid having to include the template definition
+ * header files.
+ */
+
+#include <folly/stats/Histogram.h>
+#include <folly/stats/Histogram-defs.h>
+
+namespace folly {
+
+template class Histogram<int64_t>;
+template class detail::HistogramBuckets<int64_t, Histogram<int64_t>::Bucket>;
+
+// Histogram::getPercentileBucketIdx(), Histogram::getPercentileEstimate()
+// and Histogram::computeTotalCount()
+// are implemented using template methods.  Instantiate the default versions of
+// these methods too, so anyone using them won't also need to explicitly
+// include Histogram-defs.h
+template size_t detail::HistogramBuckets<int64_t, Histogram<int64_t>::Bucket>::
+    getPercentileBucketIdx<Histogram<int64_t>::CountFromBucket>(
+        double pct,
+        Histogram<int64_t>::CountFromBucket countFromBucket,
+        double* lowPct,
+        double* highPct) const;
+template int64_t detail::HistogramBuckets<int64_t, Histogram<int64_t>::Bucket>
+  ::getPercentileEstimate<Histogram<int64_t>::CountFromBucket,
+                          Histogram<int64_t>::AvgFromBucket>(
+    double pct,
+    Histogram<int64_t>::CountFromBucket countFromBucket,
+    Histogram<int64_t>::AvgFromBucket avgFromBucket) const;
+template uint64_t detail::HistogramBuckets<int64_t, Histogram<int64_t>::Bucket>
+  ::computeTotalCount<Histogram<int64_t>::CountFromBucket>(
+    Histogram<int64_t>::CountFromBucket countFromBucket) const;
+
+} // folly
index ca45ba3bc4b4160a95fe0b8e23692376331ca79c..effeceaab80036bb75db4e6078b353d5668aac1a 100644 (file)
@@ -25,7 +25,7 @@
 #include <vector>
 
 #include <folly/CPortability.h>
-#include <folly/detail/Stats.h>
+#include <folly/stats/detail/Bucket.h>
 
 namespace folly {
 
diff --git a/folly/stats/Instantiations.cpp b/folly/stats/Instantiations.cpp
deleted file mode 100644 (file)
index 20b4c2f..0000000
+++ /dev/null
@@ -1,65 +0,0 @@
-/*
- * Copyright 2017 Facebook, Inc.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *   http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-/*
- * This file contains explicit instantiations of stats template types.
- *
- * This allows most users to avoid having to include the template definition
- * header files.
- */
-
-#include <folly/stats/BucketedTimeSeries.h>
-#include <folly/stats/BucketedTimeSeries-defs.h>
-
-#include <folly/stats/Histogram.h>
-#include <folly/stats/Histogram-defs.h>
-
-#include <folly/stats/MultiLevelTimeSeries.h>
-#include <folly/stats/MultiLevelTimeSeries-defs.h>
-
-#include <folly/stats/TimeseriesHistogram.h>
-#include <folly/stats/TimeseriesHistogram-defs.h>
-
-namespace folly {
-
-template class BucketedTimeSeries<int64_t>;
-template class Histogram<int64_t>;
-template class detail::HistogramBuckets<int64_t, Histogram<int64_t>::Bucket>;
-template class MultiLevelTimeSeries<int64_t>;
-template class TimeseriesHistogram<int64_t>;
-
-// Histogram::getPercentileBucketIdx(), Histogram::getPercentileEstimate()
-// and Histogram::computeTotalCount()
-// are implemented using template methods.  Instantiate the default versions of
-// these methods too, so anyone using them won't also need to explicitly
-// include Histogram-defs.h
-template size_t detail::HistogramBuckets<int64_t, Histogram<int64_t>::Bucket>::
-    getPercentileBucketIdx<Histogram<int64_t>::CountFromBucket>(
-        double pct,
-        Histogram<int64_t>::CountFromBucket countFromBucket,
-        double* lowPct,
-        double* highPct) const;
-template int64_t detail::HistogramBuckets<int64_t, Histogram<int64_t>::Bucket>
-  ::getPercentileEstimate<Histogram<int64_t>::CountFromBucket,
-                          Histogram<int64_t>::AvgFromBucket>(
-    double pct,
-    Histogram<int64_t>::CountFromBucket countFromBucket,
-    Histogram<int64_t>::AvgFromBucket avgFromBucket) const;
-template uint64_t detail::HistogramBuckets<int64_t, Histogram<int64_t>::Bucket>
-  ::computeTotalCount<Histogram<int64_t>::CountFromBucket>(
-    Histogram<int64_t>::CountFromBucket countFromBucket) const;
-
-} // folly
diff --git a/folly/stats/MultiLevelTimeSeries.cpp b/folly/stats/MultiLevelTimeSeries.cpp
new file mode 100644 (file)
index 0000000..404a4d1
--- /dev/null
@@ -0,0 +1,22 @@
+/*
+ * Copyright 2017 Facebook, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <folly/stats/MultiLevelTimeSeries.h>
+#include <folly/stats/MultiLevelTimeSeries-defs.h>
+
+namespace folly {
+template class MultiLevelTimeSeries<int64_t>;
+}
diff --git a/folly/stats/TimeseriesHistogram.cpp b/folly/stats/TimeseriesHistogram.cpp
new file mode 100644 (file)
index 0000000..22c972f
--- /dev/null
@@ -0,0 +1,22 @@
+/*
+ * Copyright 2017 Facebook, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <folly/stats/TimeseriesHistogram.h>
+#include <folly/stats/TimeseriesHistogram-defs.h>
+
+namespace folly {
+template class TimeseriesHistogram<int64_t>;
+}
diff --git a/folly/stats/detail/Bucket.h b/folly/stats/detail/Bucket.h
new file mode 100644 (file)
index 0000000..691ad2d
--- /dev/null
@@ -0,0 +1,126 @@
+/*
+ * Copyright 2017 Facebook, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#pragma once
+
+#include <chrono>
+#include <cstdint>
+#include <type_traits>
+
+namespace folly { namespace detail {
+
+/*
+ * Helper function to compute the average, given a specified input type and
+ * return type.
+ */
+
+// If the input is long double, divide using long double to avoid losing
+// precision.
+template <typename ReturnType>
+ReturnType avgHelper(long double sum, uint64_t count) {
+  if (count == 0) { return ReturnType(0); }
+  const long double countf = count;
+  return static_cast<ReturnType>(sum / countf);
+}
+
+// In all other cases divide using double precision.
+// This should be relatively fast, and accurate enough for most use cases.
+template <typename ReturnType, typename ValueType>
+typename std::enable_if<!std::is_same<typename std::remove_cv<ValueType>::type,
+                                      long double>::value,
+                        ReturnType>::type
+avgHelper(ValueType sum, uint64_t count) {
+  if (count == 0) { return ReturnType(0); }
+  const double sumf = double(sum);
+  const double countf = double(count);
+  return static_cast<ReturnType>(sumf / countf);
+}
+
+/*
+ * Helper function to compute the rate per Interval,
+ * given the specified count recorded over the elapsed time period.
+ */
+template <
+    typename ReturnType = double,
+    typename Duration = std::chrono::seconds,
+    typename Interval = Duration>
+ReturnType rateHelper(ReturnType count, Duration elapsed) {
+  if (elapsed == Duration(0)) {
+    return 0;
+  }
+
+  // Use std::chrono::duration_cast to convert between the native
+  // duration and the desired interval.  However, convert the rates,
+  // rather than just converting the elapsed duration.  Converting the
+  // elapsed time first may collapse it down to 0 if the elapsed interval
+  // is less than the desired interval, which will incorrectly result in
+  // an infinite rate.
+  typedef std::chrono::duration<
+      ReturnType,
+      std::ratio<Duration::period::den, Duration::period::num>>
+      NativeRate;
+  typedef std::chrono::duration<
+      ReturnType, std::ratio<Interval::period::den,
+                             Interval::period::num>> DesiredRate;
+
+  NativeRate native(count / elapsed.count());
+  DesiredRate desired = std::chrono::duration_cast<DesiredRate>(native);
+  return desired.count();
+}
+
+
+template<typename T>
+struct Bucket {
+ public:
+  typedef T ValueType;
+
+  Bucket()
+    : sum(ValueType()),
+      count(0) {}
+
+  void clear() {
+    sum = ValueType();
+    count = 0;
+  }
+
+  void add(const ValueType& s, uint64_t c) {
+    // TODO: It would be nice to handle overflow here.
+    sum += s;
+    count += c;
+  }
+
+  Bucket& operator+=(const Bucket& o) {
+    add(o.sum, o.count);
+    return *this;
+  }
+
+  Bucket& operator-=(const Bucket& o) {
+    // TODO: It would be nice to handle overflow here.
+    sum -= o.sum;
+    count -= o.count;
+    return *this;
+  }
+
+  template <typename ReturnType>
+  ReturnType avg() const {
+    return avgHelper<ReturnType>(sum, count);
+  }
+
+  ValueType sum;
+  uint64_t count;
+};
+
+}} // folly::detail
diff --git a/folly/stats/test/BucketedTimeSeriesBenchmark.cpp b/folly/stats/test/BucketedTimeSeriesBenchmark.cpp
new file mode 100644 (file)
index 0000000..835eda9
--- /dev/null
@@ -0,0 +1,77 @@
+/*
+ * Copyright 2017 Facebook, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#include <folly/stats/BucketedTimeSeries.h>
+#include <folly/stats/BucketedTimeSeries-defs.h>
+
+#include <glog/logging.h>
+
+#include <folly/Benchmark.h>
+
+using std::chrono::seconds;
+using folly::BenchmarkSuspender;
+using folly::BucketedTimeSeries;
+
+void addValue(unsigned int iters,
+              seconds duration, size_t numBuckets,
+              size_t callsPerSecond) {
+  BenchmarkSuspender suspend;
+  BucketedTimeSeries<int64_t> ts(numBuckets, duration);
+  suspend.dismiss();
+
+  seconds currentTime(1342000000);
+  size_t timeCounter = 0;
+  for (unsigned int n = 0; n < iters; ++n, ++timeCounter) {
+    if (timeCounter >= callsPerSecond) {
+      timeCounter = 0;
+      ++currentTime;
+    }
+    ts.addValue(currentTime, n);
+  }
+}
+
+BENCHMARK_NAMED_PARAM(addValue, AllTime_1perSec, seconds(0), 60, 1);
+BENCHMARK_NAMED_PARAM(addValue, 3600x60_1perSec, seconds(3600), 60, 1);
+BENCHMARK_NAMED_PARAM(addValue, 600x60_1perSec, seconds(600), 60, 1);
+BENCHMARK_NAMED_PARAM(addValue, 60x60_1perSec, seconds(60), 60, 1);
+BENCHMARK_NAMED_PARAM(addValue, 100x10_1perSec, seconds(100), 10, 1);
+BENCHMARK_NAMED_PARAM(addValue, 71x5_1perSec, seconds(71), 5, 1);
+BENCHMARK_NAMED_PARAM(addValue, 1x1_1perSec, seconds(1), 1, 1);
+
+BENCHMARK_DRAW_LINE()
+
+BENCHMARK_NAMED_PARAM(addValue, AllTime_10perSec, seconds(0), 60, 10);
+BENCHMARK_NAMED_PARAM(addValue, 3600x60_10perSec, seconds(3600), 60, 10);
+BENCHMARK_NAMED_PARAM(addValue, 600x60_10perSec, seconds(600), 60, 10);
+BENCHMARK_NAMED_PARAM(addValue, 60x60_10perSec, seconds(60), 60, 10);
+BENCHMARK_NAMED_PARAM(addValue, 100x10_10perSec, seconds(100), 10, 10);
+BENCHMARK_NAMED_PARAM(addValue, 71x5_10perSec, seconds(71), 5, 10);
+BENCHMARK_NAMED_PARAM(addValue, 1x1_10perSec, seconds(1), 1, 10);
+
+BENCHMARK_DRAW_LINE()
+
+BENCHMARK_NAMED_PARAM(addValue, AllTime_100perSec, seconds(0), 60, 100);
+BENCHMARK_NAMED_PARAM(addValue, 3600x60_100perSec, seconds(3600), 60, 100);
+BENCHMARK_NAMED_PARAM(addValue, 600x60_100perSec, seconds(600), 60, 100);
+BENCHMARK_NAMED_PARAM(addValue, 60x60_100perSec, seconds(60), 60, 100);
+BENCHMARK_NAMED_PARAM(addValue, 100x10_100perSec, seconds(100), 10, 100);
+BENCHMARK_NAMED_PARAM(addValue, 71x5_100perSec, seconds(71), 5, 100);
+BENCHMARK_NAMED_PARAM(addValue, 1x1_100perSec, seconds(1), 1, 100);
+
+int main(int argc, char *argv[]) {
+  gflags::ParseCommandLineFlags(&argc, &argv, true);
+  folly::runBenchmarks();
+  return 0;
+}
diff --git a/folly/stats/test/HistogramBenchmark.cpp b/folly/stats/test/HistogramBenchmark.cpp
new file mode 100644 (file)
index 0000000..d708253
--- /dev/null
@@ -0,0 +1,42 @@
+/*
+ * Copyright 2017 Facebook, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#include <folly/stats/Histogram.h>
+
+#include <folly/Benchmark.h>
+#include <folly/Foreach.h>
+#include <folly/portability/GFlags.h>
+
+using folly::Histogram;
+
+void addValue(unsigned int n, int64_t bucketSize, int64_t min, int64_t max) {
+  Histogram<int64_t> hist(bucketSize, min, max);
+  int64_t num = min;
+  FOR_EACH_RANGE (i, 0, n) {
+    hist.addValue(num);
+    ++num;
+    if (num > max) { num = min; }
+  }
+}
+
+BENCHMARK_NAMED_PARAM(addValue, 0_to_100, 1, 0, 100);
+BENCHMARK_NAMED_PARAM(addValue, 0_to_1000, 10, 0, 1000);
+BENCHMARK_NAMED_PARAM(addValue, 5k_to_20k, 250, 5000, 20000);
+
+int main(int argc, char *argv[]) {
+  gflags::ParseCommandLineFlags(&argc, &argv, true);
+  folly::runBenchmarks();
+  return 0;
+}
diff --git a/folly/stats/test/HistogramTest.cpp b/folly/stats/test/HistogramTest.cpp
new file mode 100644 (file)
index 0000000..c2997a3
--- /dev/null
@@ -0,0 +1,223 @@
+/*
+ * Copyright 2017 Facebook, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <folly/stats/Histogram.h>
+#include <folly/stats/Histogram-defs.h>
+
+#include <folly/portability/GTest.h>
+
+using folly::Histogram;
+
+// Insert 100 evenly distributed values into a histogram with 100 buckets
+TEST(Histogram, Test100) {
+  Histogram<int64_t> h(1, 0, 100);
+
+  for (unsigned int n = 0; n < 100; ++n) {
+    h.addValue(n);
+  }
+
+  // 100 buckets, plus 1 for below min, and 1 for above max
+  EXPECT_EQ(h.getNumBuckets(), 102);
+
+  double epsilon = 1e-6;
+  for (unsigned int n = 0; n <= 100; ++n) {
+    double pct = n / 100.0;
+
+    // Floating point arithmetic isn't 100% accurate, and if we just divide
+    // (n / 100) the value should be exactly on a bucket boundary.  Add espilon
+    // to ensure we fall in the upper bucket.
+    if (n < 100) {
+      double lowPct = -1.0;
+      double highPct = -1.0;
+      unsigned int bucketIdx = h.getPercentileBucketIdx(pct + epsilon,
+                                                        &lowPct, &highPct);
+      EXPECT_EQ(n + 1, bucketIdx);
+      EXPECT_FLOAT_EQ(n / 100.0, lowPct);
+      EXPECT_FLOAT_EQ((n + 1) / 100.0, highPct);
+    }
+
+    // Also test n - epsilon, to test falling in the lower bucket.
+    if (n > 0) {
+      double lowPct = -1.0;
+      double highPct = -1.0;
+      unsigned int bucketIdx = h.getPercentileBucketIdx(pct - epsilon,
+                                                        &lowPct, &highPct);
+      EXPECT_EQ(n, bucketIdx);
+      EXPECT_FLOAT_EQ((n - 1) / 100.0, lowPct);
+      EXPECT_FLOAT_EQ(n / 100.0, highPct);
+    }
+
+    // Check getPercentileEstimate()
+    EXPECT_EQ(n, h.getPercentileEstimate(pct));
+  }
+}
+
+// Test calling getPercentileBucketIdx() and getPercentileEstimate() on an
+// empty histogram
+TEST(Histogram, TestEmpty) {
+  Histogram<int64_t> h(1, 0, 100);
+
+  for (unsigned int n = 0; n <= 100; ++n) {
+    double pct = n / 100.0;
+
+    double lowPct = -1.0;
+    double highPct = -1.0;
+    unsigned int bucketIdx = h.getPercentileBucketIdx(pct, &lowPct, &highPct);
+    EXPECT_EQ(1, bucketIdx);
+    EXPECT_FLOAT_EQ(0.0, lowPct);
+    EXPECT_FLOAT_EQ(0.0, highPct);
+
+    EXPECT_EQ(0, h.getPercentileEstimate(pct));
+  }
+}
+
+// Test calling getPercentileBucketIdx() and getPercentileEstimate() on a
+// histogram with just a single value.
+TEST(Histogram, Test1) {
+  Histogram<int64_t> h(1, 0, 100);
+  h.addValue(42);
+
+  for (unsigned int n = 0; n < 100; ++n) {
+    double pct = n / 100.0;
+
+    double lowPct = -1.0;
+    double highPct = -1.0;
+    unsigned int bucketIdx = h.getPercentileBucketIdx(pct, &lowPct, &highPct);
+    EXPECT_EQ(43, bucketIdx);
+    EXPECT_FLOAT_EQ(0.0, lowPct);
+    EXPECT_FLOAT_EQ(1.0, highPct);
+
+    EXPECT_EQ(42, h.getPercentileEstimate(pct));
+  }
+}
+
+// Test adding enough numbers to make the sum value overflow in the
+// "below min" bucket
+TEST(Histogram, TestOverflowMin) {
+  Histogram<int64_t> h(1, 0, 100);
+
+  for (unsigned int n = 0; n < 9; ++n) {
+    h.addValue(-0x0fffffffffffffff);
+  }
+
+  // Compute a percentile estimate.  We only added values to the "below min"
+  // bucket, so this should check that bucket.  We're mainly verifying that the
+  // code doesn't crash here when the bucket average is larger than the max
+  // value that is supposed to be in the bucket.
+  int64_t estimate = h.getPercentileEstimate(0.05);
+  // The code will return the smallest possible value when it detects an
+  // overflow beyond the minimum value.
+  EXPECT_EQ(std::numeric_limits<int64_t>::min(), estimate);
+}
+
+// Test adding enough numbers to make the sum value overflow in the
+// "above max" bucket
+TEST(Histogram, TestOverflowMax) {
+  Histogram<int64_t> h(1, 0, 100);
+
+  for (unsigned int n = 0; n < 9; ++n) {
+    h.addValue(0x0fffffffffffffff);
+  }
+
+  // The code will return the maximum possible value when it detects an
+  // overflow beyond the max value.
+  int64_t estimate = h.getPercentileEstimate(0.95);
+  EXPECT_EQ(std::numeric_limits<int64_t>::max(), estimate);
+}
+
+// Test adding enough numbers to make the sum value overflow in one of the
+// normal buckets
+TEST(Histogram, TestOverflowBucket) {
+  Histogram<int64_t> h(0x0100000000000000, 0, 0x1000000000000000);
+
+  for (unsigned int n = 0; n < 9; ++n) {
+    h.addValue(0x0fffffffffffffff);
+  }
+
+  // The histogram code should return the bucket midpoint
+  // when it detects overflow.
+  int64_t estimate = h.getPercentileEstimate(0.95);
+  EXPECT_EQ(0x0f80000000000000, estimate);
+}
+
+TEST(Histogram, TestDouble) {
+  // Insert 100 evenly spaced values into a histogram
+  Histogram<double> h(100.0, 0.0, 5000.0);
+  for (double n = 50; n < 5000; n += 100) {
+    h.addValue(n);
+  }
+  EXPECT_EQ(52, h.getNumBuckets());
+  EXPECT_EQ(2500.0, h.getPercentileEstimate(0.5));
+  EXPECT_EQ(4500.0, h.getPercentileEstimate(0.9));
+}
+
+// Test where the bucket width is not an even multiple of the histogram range
+TEST(Histogram, TestDoubleInexactWidth) {
+  Histogram<double> h(100.0, 0.0, 4970.0);
+  for (double n = 50; n < 5000; n += 100) {
+    h.addValue(n);
+  }
+  EXPECT_EQ(52, h.getNumBuckets());
+  EXPECT_EQ(2500.0, h.getPercentileEstimate(0.5));
+  EXPECT_EQ(4500.0, h.getPercentileEstimate(0.9));
+
+  EXPECT_EQ(0, h.getBucketByIndex(51).count);
+  h.addValue(4990);
+  h.addValue(5100);
+  EXPECT_EQ(2, h.getBucketByIndex(51).count);
+  EXPECT_EQ(2600.0, h.getPercentileEstimate(0.5));
+}
+
+// Test where the bucket width is larger than the histogram range
+// (There isn't really much point to defining a histogram this way,
+// but we want to ensure that it still works just in case.)
+TEST(Histogram, TestDoubleWidthTooBig) {
+  Histogram<double> h(100.0, 0.0, 7.0);
+  EXPECT_EQ(3, h.getNumBuckets());
+
+  for (double n = 0; n < 7; n += 1) {
+    h.addValue(n);
+  }
+  EXPECT_EQ(0, h.getBucketByIndex(0).count);
+  EXPECT_EQ(7, h.getBucketByIndex(1).count);
+  EXPECT_EQ(0, h.getBucketByIndex(2).count);
+  EXPECT_EQ(3.0, h.getPercentileEstimate(0.5));
+
+  h.addValue(-1.0);
+  EXPECT_EQ(1, h.getBucketByIndex(0).count);
+  h.addValue(7.5);
+  EXPECT_EQ(1, h.getBucketByIndex(2).count);
+  EXPECT_EQ(3.0, h.getPercentileEstimate(0.5));
+}
+
+// Test that we get counts right
+TEST(Histogram, Counts) {
+  Histogram<int32_t> h(1, 0, 10);
+  EXPECT_EQ(12, h.getNumBuckets());
+  EXPECT_EQ(0, h.computeTotalCount());
+
+  // Add one to each bucket, make sure the counts match
+  for (int32_t i = 0; i < 10; i++) {
+    h.addValue(i);
+    EXPECT_EQ(i+1, h.computeTotalCount());
+  }
+
+  // Add a lot to one bucket, make sure the counts still make sense
+  for (int32_t i = 0; i < 100; i++) {
+    h.addValue(0);
+  }
+  EXPECT_EQ(110, h.computeTotalCount());
+}
diff --git a/folly/stats/test/Makefile.am b/folly/stats/test/Makefile.am
new file mode 100755 (executable)
index 0000000..f6f4715
--- /dev/null
@@ -0,0 +1,12 @@
+ACLOCAL_AMFLAGS = -I m4
+
+CPPFLAGS = -I$(top_srcdir)/test/gtest/googletest/include
+ldadd = $(top_builddir)/test/libfollytestmain.la
+
+check_PROGRAMS = \
+       histogram_test
+
+TESTS = $(check_PROGRAMS)
+
+histogram_test_SOURCES = HistogramTest.cpp
+histogram_test_LDADD = $(ldadd)
diff --git a/folly/stats/test/TimeSeriesTest.cpp b/folly/stats/test/TimeSeriesTest.cpp
new file mode 100644 (file)
index 0000000..64928fe
--- /dev/null
@@ -0,0 +1,1280 @@
+/*
+ * Copyright 2017 Facebook, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <folly/stats/BucketedTimeSeries-defs.h>
+#include <folly/stats/BucketedTimeSeries.h>
+#include <folly/stats/MultiLevelTimeSeries-defs.h>
+#include <folly/stats/MultiLevelTimeSeries.h>
+#include <folly/stats/detail/Bucket.h>
+
+#include <array>
+
+#include <glog/logging.h>
+
+#include <folly/Foreach.h>
+#include <folly/portability/GTest.h>
+
+using std::chrono::seconds;
+using std::string;
+using std::vector;
+using folly::BucketedTimeSeries;
+
+using Bucket = folly::detail::Bucket<int64_t>;
+using StatsClock = folly::LegacyStatsClock<std::chrono::seconds>;
+using TimePoint = StatsClock::time_point;
+using Duration = StatsClock::duration;
+
+/*
+ * Helper functions to allow us to directly log time points and duration
+ */
+namespace std {
+std::ostream& operator<<(std::ostream& os, std::chrono::seconds s) {
+  os << s.count();
+  return os;
+}
+std::ostream& operator<<(std::ostream& os, TimePoint tp) {
+  os << tp.time_since_epoch().count();
+  return os;
+}
+}
+
+namespace {
+TimePoint mkTimePoint(int value) {
+  return TimePoint(StatsClock::duration(value));
+}
+
+struct TestData {
+  TestData(int d, int b, std::initializer_list<int> starts)
+      : duration(d), numBuckets(b) {
+    bucketStarts.reserve(starts.size());
+    for (int s : starts) {
+      bucketStarts.push_back(mkTimePoint(s));
+    }
+  }
+  seconds duration;
+  size_t numBuckets;
+  vector<TimePoint> bucketStarts;
+};
+vector<TestData> testData = {
+  // 71 seconds x 4 buckets
+  { 71, 4, {0, 18, 36, 54}},
+  // 100 seconds x 10 buckets
+  { 100, 10, {0, 10, 20, 30, 40, 50, 60, 70, 80, 90}},
+  // 10 seconds x 10 buckets
+  { 10, 10, {0, 1, 2, 3, 4, 5, 6, 7, 8, 9}},
+  // 10 seconds x 1 buckets
+  { 10, 1, {0}},
+  // 1 second x 1 buckets
+  { 1, 1, {0}},
+};
+}
+
+TEST(BucketedTimeSeries, getBucketInfo) {
+  for (const auto& data : testData) {
+    BucketedTimeSeries<int64_t> ts(data.numBuckets, data.duration);
+
+    for (uint32_t n = 0; n < 10000; n += 1234) {
+      seconds offset(n * data.duration);
+
+      for (uint32_t idx = 0; idx < data.numBuckets; ++idx) {
+        auto bucketStart = data.bucketStarts[idx];
+        TimePoint nextBucketStart;
+        if (idx + 1 < data.numBuckets) {
+          nextBucketStart = data.bucketStarts[idx + 1];
+        } else {
+          nextBucketStart = TimePoint(data.duration);
+        }
+
+        TimePoint expectedStart = offset + bucketStart;
+        TimePoint expectedNextStart = offset + nextBucketStart;
+        TimePoint midpoint =
+            expectedStart + (expectedNextStart - expectedStart) / 2;
+
+        vector<std::pair<string, TimePoint>> timePoints = {
+            {"expectedStart", expectedStart},
+            {"midpoint", midpoint},
+            {"expectedEnd", expectedNextStart - seconds(1)},
+        };
+
+        for (const auto& point : timePoints) {
+          // Check that getBucketIdx() returns the expected index
+          EXPECT_EQ(idx, ts.getBucketIdx(point.second))
+              << data.duration << "x" << data.numBuckets << ": " << point.first
+              << "=" << point.second;
+
+          // Check the data returned by getBucketInfo()
+          size_t returnedIdx;
+          TimePoint returnedStart;
+          TimePoint returnedNextStart;
+          ts.getBucketInfo(expectedStart, &returnedIdx,
+                           &returnedStart, &returnedNextStart);
+          EXPECT_EQ(idx, returnedIdx) << data.duration << "x" << data.numBuckets
+                                      << ": " << point.first << "="
+                                      << point.second;
+          EXPECT_EQ(expectedStart, returnedStart)
+              << data.duration << "x" << data.numBuckets << ": " << point.first
+              << "=" << point.second;
+          EXPECT_EQ(expectedNextStart, returnedNextStart)
+              << data.duration << "x" << data.numBuckets << ": " << point.first
+              << "=" << point.second;
+        }
+      }
+    }
+  }
+}
+
+void testUpdate100x10(size_t offset) {
+  // This test code only works when offset is a multiple of the bucket width
+  CHECK_EQ(0, offset % 10);
+
+  // Create a 100 second timeseries, with 10 buckets
+  BucketedTimeSeries<int64_t> ts(10, seconds(100));
+
+  auto setup = [&] {
+    ts.clear();
+    // Add 1 value to each bucket
+    for (int n = 5; n <= 95; n += 10) {
+      ts.addValue(seconds(n + offset), 6);
+    }
+
+    EXPECT_EQ(10, ts.count());
+    EXPECT_EQ(60, ts.sum());
+    EXPECT_EQ(6, ts.avg());
+  };
+
+  // Update 2 buckets forwards.  This should throw away 2 data points.
+  setup();
+  ts.update(seconds(110 + offset));
+  EXPECT_EQ(8, ts.count());
+  EXPECT_EQ(48, ts.sum());
+  EXPECT_EQ(6, ts.avg());
+
+  // The last time we added was 95.
+  // Try updating to 189.  This should clear everything but the last bucket.
+  setup();
+  ts.update(seconds(151 + offset));
+  EXPECT_EQ(4, ts.count());
+  //EXPECT_EQ(6, ts.sum());
+  EXPECT_EQ(6, ts.avg());
+
+  // The last time we added was 95.
+  // Try updating to 193: This is nearly one full loop around,
+  // back to the same bucket.  update() needs to clear everything
+  setup();
+  ts.update(seconds(193 + offset));
+  EXPECT_EQ(0, ts.count());
+  EXPECT_EQ(0, ts.sum());
+  EXPECT_EQ(0, ts.avg());
+
+  // The last time we added was 95.
+  // Try updating to 197: This is slightly over one full loop around,
+  // back to the same bucket.  update() needs to clear everything
+  setup();
+  ts.update(seconds(197 + offset));
+  EXPECT_EQ(0, ts.count());
+  EXPECT_EQ(0, ts.sum());
+  EXPECT_EQ(0, ts.avg());
+
+  // The last time we added was 95.
+  // Try updating to 230: This is well over one full loop around,
+  // and everything should be cleared.
+  setup();
+  ts.update(seconds(230 + offset));
+  EXPECT_EQ(0, ts.count());
+  EXPECT_EQ(0, ts.sum());
+  EXPECT_EQ(0, ts.avg());
+}
+
+TEST(BucketedTimeSeries, update100x10) {
+  // Run testUpdate100x10() multiple times, with various offsets.
+  // This makes sure the update code works regardless of which bucket it starts
+  // at in the modulo arithmetic.
+  testUpdate100x10(0);
+  testUpdate100x10(50);
+  testUpdate100x10(370);
+  testUpdate100x10(1937090);
+}
+
+TEST(BucketedTimeSeries, update71x5) {
+  // Create a 71 second timeseries, with 5 buckets
+  // This tests when the number of buckets does not divide evenly into the
+  // duration.
+  BucketedTimeSeries<int64_t> ts(5, seconds(71));
+
+  auto setup = [&] {
+    ts.clear();
+    // Add 1 value to each bucket
+    ts.addValue(seconds(11), 6);
+    ts.addValue(seconds(24), 6);
+    ts.addValue(seconds(42), 6);
+    ts.addValue(seconds(43), 6);
+    ts.addValue(seconds(66), 6);
+
+    EXPECT_EQ(5, ts.count());
+    EXPECT_EQ(30, ts.sum());
+    EXPECT_EQ(6, ts.avg());
+  };
+
+  // Update 2 buckets forwards.  This should throw away 2 data points.
+  setup();
+  ts.update(seconds(99));
+  EXPECT_EQ(3, ts.count());
+  EXPECT_EQ(18, ts.sum());
+  EXPECT_EQ(6, ts.avg());
+
+  // Update 3 buckets forwards.  This should throw away 3 data points.
+  setup();
+  ts.update(seconds(100));
+  EXPECT_EQ(2, ts.count());
+  EXPECT_EQ(12, ts.sum());
+  EXPECT_EQ(6, ts.avg());
+
+  // Update 4 buckets forwards, just under the wrap limit.
+  // This should throw everything but the last bucket away.
+  setup();
+  ts.update(seconds(127));
+  EXPECT_EQ(1, ts.count());
+  EXPECT_EQ(6, ts.sum());
+  EXPECT_EQ(6, ts.avg());
+
+  // Update 5 buckets forwards, exactly at the wrap limit.
+  // This should throw everything away.
+  setup();
+  ts.update(seconds(128));
+  EXPECT_EQ(0, ts.count());
+  EXPECT_EQ(0, ts.sum());
+  EXPECT_EQ(0, ts.avg());
+
+  // Update very far forwards, wrapping multiple times.
+  // This should throw everything away.
+  setup();
+  ts.update(seconds(1234));
+  EXPECT_EQ(0, ts.count());
+  EXPECT_EQ(0, ts.sum());
+  EXPECT_EQ(0, ts.avg());
+}
+
+TEST(BucketedTimeSeries, elapsed) {
+  BucketedTimeSeries<int64_t> ts(60, seconds(600));
+
+  // elapsed() is 0 when no data points have been added
+  EXPECT_EQ(0, ts.elapsed().count());
+
+  // With exactly 1 data point, elapsed() should report 1 second of data
+  seconds start(239218);
+  ts.addValue(start + seconds(0), 200);
+  EXPECT_EQ(1, ts.elapsed().count());
+  // Adding a data point 10 seconds later should result in an elapsed time of
+  // 11 seconds (the time range is [0, 10], inclusive).
+  ts.addValue(start + seconds(10), 200);
+  EXPECT_EQ(11, ts.elapsed().count());
+
+  // elapsed() returns to 0 after clear()
+  ts.clear();
+  EXPECT_EQ(0, ts.elapsed().count());
+
+  // Restart, with the starting point on an easier number to work with
+  ts.addValue(seconds(10), 200);
+  EXPECT_EQ(1, ts.elapsed().count());
+  ts.addValue(seconds(580), 200);
+  EXPECT_EQ(571, ts.elapsed().count());
+  ts.addValue(seconds(590), 200);
+  EXPECT_EQ(581, ts.elapsed().count());
+  ts.addValue(seconds(598), 200);
+  EXPECT_EQ(589, ts.elapsed().count());
+  ts.addValue(seconds(599), 200);
+  EXPECT_EQ(590, ts.elapsed().count());
+  ts.addValue(seconds(600), 200);
+  EXPECT_EQ(591, ts.elapsed().count());
+  ts.addValue(seconds(608), 200);
+  EXPECT_EQ(599, ts.elapsed().count());
+  ts.addValue(seconds(609), 200);
+  EXPECT_EQ(600, ts.elapsed().count());
+  // Once we reach 600 seconds worth of data, when we move on to the next
+  // second a full bucket will get thrown out.  Now we drop back down to 591
+  // seconds worth of data
+  ts.addValue(seconds(610), 200);
+  EXPECT_EQ(591, ts.elapsed().count());
+  ts.addValue(seconds(618), 200);
+  EXPECT_EQ(599, ts.elapsed().count());
+  ts.addValue(seconds(619), 200);
+  EXPECT_EQ(600, ts.elapsed().count());
+  ts.addValue(seconds(620), 200);
+  EXPECT_EQ(591, ts.elapsed().count());
+  ts.addValue(seconds(123419), 200);
+  EXPECT_EQ(600, ts.elapsed().count());
+  ts.addValue(seconds(123420), 200);
+  EXPECT_EQ(591, ts.elapsed().count());
+  ts.addValue(seconds(123425), 200);
+  EXPECT_EQ(596, ts.elapsed().count());
+
+  // Time never moves backwards.
+  // Calling update with an old timestamp will just be ignored.
+  ts.update(seconds(29));
+  EXPECT_EQ(596, ts.elapsed().count());
+}
+
+TEST(BucketedTimeSeries, rate) {
+  BucketedTimeSeries<int64_t> ts(60, seconds(600));
+
+  // Add 3 values every 2 seconds, until fill up the buckets
+  for (size_t n = 0; n < 600; n += 2) {
+    ts.addValue(seconds(n), 200, 3);
+  }
+
+  EXPECT_EQ(900, ts.count());
+  EXPECT_EQ(180000, ts.sum());
+  EXPECT_EQ(200, ts.avg());
+
+  // Really we only entered 599 seconds worth of data: [0, 598] (inclusive)
+  EXPECT_EQ(599, ts.elapsed().count());
+  EXPECT_NEAR(300.5, ts.rate(), 0.005);
+  EXPECT_NEAR(1.5, ts.countRate(), 0.005);
+
+  // If we add 1 more second, now we will have 600 seconds worth of data
+  ts.update(seconds(599));
+  EXPECT_EQ(600, ts.elapsed().count());
+  EXPECT_NEAR(300, ts.rate(), 0.005);
+  EXPECT_EQ(300, ts.rate<int>());
+  EXPECT_NEAR(1.5, ts.countRate(), 0.005);
+
+  // However, 1 more second after that and we will have filled up all the
+  // buckets, and have to drop one.
+  ts.update(seconds(600));
+  EXPECT_EQ(591, ts.elapsed().count());
+  EXPECT_NEAR(299.5, ts.rate(), 0.01);
+  EXPECT_EQ(299, ts.rate<int>());
+  EXPECT_NEAR(1.5, ts.countRate(), 0.005);
+}
+
+TEST(BucketedTimeSeries, avgTypeConversion) {
+  // Make sure the computed average values are accurate regardless
+  // of the input type and return type.
+
+  {
+    // Simple sanity tests for small positive integer values
+    BucketedTimeSeries<int64_t> ts(60, seconds(600));
+    ts.addValue(seconds(0), 4, 100);
+    ts.addValue(seconds(0), 10, 200);
+    ts.addValue(seconds(0), 16, 100);
+
+    EXPECT_DOUBLE_EQ(10.0, ts.avg());
+    EXPECT_DOUBLE_EQ(10.0, ts.avg<float>());
+    EXPECT_EQ(10, ts.avg<uint64_t>());
+    EXPECT_EQ(10, ts.avg<int64_t>());
+    EXPECT_EQ(10, ts.avg<int32_t>());
+    EXPECT_EQ(10, ts.avg<int16_t>());
+    EXPECT_EQ(10, ts.avg<int8_t>());
+    EXPECT_EQ(10, ts.avg<uint8_t>());
+  }
+
+  {
+    // Test signed integer types with negative values
+    BucketedTimeSeries<int64_t> ts(60, seconds(600));
+    ts.addValue(seconds(0), -100);
+    ts.addValue(seconds(0), -200);
+    ts.addValue(seconds(0), -300);
+    ts.addValue(seconds(0), -200, 65535);
+
+    EXPECT_DOUBLE_EQ(-200.0, ts.avg());
+    EXPECT_DOUBLE_EQ(-200.0, ts.avg<float>());
+    EXPECT_EQ(-200, ts.avg<int64_t>());
+    EXPECT_EQ(-200, ts.avg<int32_t>());
+    EXPECT_EQ(-200, ts.avg<int16_t>());
+  }
+
+  {
+    // Test uint64_t values that would overflow int64_t
+    BucketedTimeSeries<uint64_t> ts(60, seconds(600));
+    ts.addValueAggregated(seconds(0),
+                          std::numeric_limits<uint64_t>::max(),
+                          std::numeric_limits<uint64_t>::max());
+
+    EXPECT_DOUBLE_EQ(1.0, ts.avg());
+    EXPECT_DOUBLE_EQ(1.0, ts.avg<float>());
+    EXPECT_EQ(1, ts.avg<uint64_t>());
+    EXPECT_EQ(1, ts.avg<int64_t>());
+    EXPECT_EQ(1, ts.avg<int8_t>());
+  }
+
+  {
+    // Test doubles with small-ish values that will fit in integer types
+    BucketedTimeSeries<double> ts(60, seconds(600));
+    ts.addValue(seconds(0), 4.0, 100);
+    ts.addValue(seconds(0), 10.0, 200);
+    ts.addValue(seconds(0), 16.0, 100);
+
+    EXPECT_DOUBLE_EQ(10.0, ts.avg());
+    EXPECT_DOUBLE_EQ(10.0, ts.avg<float>());
+    EXPECT_EQ(10, ts.avg<uint64_t>());
+    EXPECT_EQ(10, ts.avg<int64_t>());
+    EXPECT_EQ(10, ts.avg<int32_t>());
+    EXPECT_EQ(10, ts.avg<int16_t>());
+    EXPECT_EQ(10, ts.avg<int8_t>());
+    EXPECT_EQ(10, ts.avg<uint8_t>());
+  }
+
+  {
+    // Test doubles with huge values
+    BucketedTimeSeries<double> ts(60, seconds(600));
+    ts.addValue(seconds(0), 1e19, 100);
+    ts.addValue(seconds(0), 2e19, 200);
+    ts.addValue(seconds(0), 3e19, 100);
+
+    EXPECT_DOUBLE_EQ(ts.avg(), 2e19);
+    EXPECT_NEAR(ts.avg<float>(), 2e19, 1e11);
+  }
+
+  {
+    // Test doubles where the sum adds up larger than a uint64_t,
+    // but the average fits in an int64_t
+    BucketedTimeSeries<double> ts(60, seconds(600));
+    uint64_t value = 0x3fffffffffffffff;
+    FOR_EACH_RANGE(i, 0, 16) {
+      ts.addValue(seconds(0), value);
+    }
+
+    EXPECT_DOUBLE_EQ(value, ts.avg());
+    EXPECT_DOUBLE_EQ(value, ts.avg<float>());
+    // Some precision is lost here due to the huge sum, so the
+    // integer average returned is off by one.
+    EXPECT_NEAR(value, ts.avg<uint64_t>(), 1);
+    EXPECT_NEAR(value, ts.avg<int64_t>(), 1);
+  }
+
+  {
+    // Test BucketedTimeSeries with a smaller integer type
+    BucketedTimeSeries<int16_t> ts(60, seconds(600));
+    FOR_EACH_RANGE(i, 0, 101) {
+      ts.addValue(seconds(0), i);
+    }
+
+    EXPECT_DOUBLE_EQ(50.0, ts.avg());
+    EXPECT_DOUBLE_EQ(50.0, ts.avg<float>());
+    EXPECT_EQ(50, ts.avg<uint64_t>());
+    EXPECT_EQ(50, ts.avg<int64_t>());
+    EXPECT_EQ(50, ts.avg<int16_t>());
+    EXPECT_EQ(50, ts.avg<int8_t>());
+  }
+
+  {
+    // Test BucketedTimeSeries with long double input
+    BucketedTimeSeries<long double> ts(60, seconds(600));
+    ts.addValueAggregated(seconds(0), 1000.0L, 7);
+
+    long double expected = 1000.0L / 7.0L;
+    EXPECT_DOUBLE_EQ(static_cast<double>(expected), ts.avg());
+    EXPECT_DOUBLE_EQ(static_cast<float>(expected), ts.avg<float>());
+    EXPECT_DOUBLE_EQ(expected, ts.avg<long double>());
+    EXPECT_EQ(static_cast<uint64_t>(expected), ts.avg<uint64_t>());
+    EXPECT_EQ(static_cast<int64_t>(expected), ts.avg<int64_t>());
+  }
+
+  {
+    // Test BucketedTimeSeries with int64_t values,
+    // but using an average that requires a fair amount of precision.
+    BucketedTimeSeries<int64_t> ts(60, seconds(600));
+    ts.addValueAggregated(seconds(0), 1000, 7);
+
+    long double expected = 1000.0L / 7.0L;
+    EXPECT_DOUBLE_EQ(static_cast<double>(expected), ts.avg());
+    EXPECT_DOUBLE_EQ(static_cast<float>(expected), ts.avg<float>());
+    EXPECT_DOUBLE_EQ(expected, ts.avg<long double>());
+    EXPECT_EQ(static_cast<uint64_t>(expected), ts.avg<uint64_t>());
+    EXPECT_EQ(static_cast<int64_t>(expected), ts.avg<int64_t>());
+  }
+}
+
+TEST(BucketedTimeSeries, forEachBucket) {
+  typedef BucketedTimeSeries<int64_t>::Bucket Bucket;
+  struct BucketInfo {
+    BucketInfo(const Bucket* b, TimePoint s, TimePoint ns)
+        : bucket(b), start(s), nextStart(ns) {}
+
+    const Bucket* bucket;
+    TimePoint start;
+    TimePoint nextStart;
+  };
+
+  for (const auto& data : testData) {
+    BucketedTimeSeries<int64_t> ts(data.numBuckets, seconds(data.duration));
+
+    vector<BucketInfo> info;
+    auto fn = [&](
+        const Bucket& bucket,
+        TimePoint bucketStart,
+        TimePoint bucketEnd) -> bool {
+      info.emplace_back(&bucket, bucketStart, bucketEnd);
+      return true;
+    };
+
+    // If we haven't yet added any data, the current bucket will start at 0,
+    // and all data previous buckets will have negative times.
+    ts.forEachBucket(fn);
+
+    CHECK_EQ(data.numBuckets, info.size());
+
+    // Check the data passed in to the function
+    size_t infoIdx = 0;
+    size_t bucketIdx = 1;
+    seconds offset = -data.duration;
+    for (size_t n = 0; n < data.numBuckets; ++n) {
+      if (bucketIdx >= data.numBuckets) {
+        bucketIdx = 0;
+        offset += data.duration;
+      }
+
+      EXPECT_EQ(data.bucketStarts[bucketIdx] + offset, info[infoIdx].start)
+          << data.duration << "x" << data.numBuckets
+          << ": bucketIdx=" << bucketIdx << ", infoIdx=" << infoIdx;
+
+      size_t nextBucketIdx = bucketIdx + 1;
+      seconds nextOffset = offset;
+      if (nextBucketIdx >= data.numBuckets) {
+        nextBucketIdx = 0;
+        nextOffset += data.duration;
+      }
+      EXPECT_EQ(
+          data.bucketStarts[nextBucketIdx] + nextOffset,
+          info[infoIdx].nextStart)
+          << data.duration << "x" << data.numBuckets
+          << ": bucketIdx=" << bucketIdx << ", infoIdx=" << infoIdx;
+
+      EXPECT_EQ(&ts.getBucketByIndex(bucketIdx), info[infoIdx].bucket);
+
+      ++bucketIdx;
+      ++infoIdx;
+    }
+  }
+}
+
+TEST(BucketedTimeSeries, queryByIntervalSimple) {
+  BucketedTimeSeries<int> a(3, seconds(12));
+  for (int i = 0; i < 8; i++) {
+    a.addValue(seconds(i), 1);
+  }
+  // We added 1 at each second from 0..7
+  // Query from the time period 0..2.
+  // This is entirely in the first bucket, which has a sum of 4.
+  // The code knows only part of the bucket is covered, and correctly
+  // estimates the desired sum as 3.
+  EXPECT_EQ(2, a.sum(mkTimePoint(0), mkTimePoint(2)));
+}
+
+TEST(BucketedTimeSeries, queryByInterval) {
+  // Set up a BucketedTimeSeries tracking 6 seconds in 3 buckets
+  const int kNumBuckets = 3;
+  const int kDuration = 6;
+  BucketedTimeSeries<double> b(kNumBuckets, seconds(kDuration));
+
+  for (unsigned int i = 0; i < kDuration; ++i) {
+    // add value 'i' at time 'i'
+    b.addValue(mkTimePoint(i), i);
+  }
+
+  // Current bucket state:
+  // 0: time=[0, 2): values=(0, 1), sum=1, count=2
+  // 1: time=[2, 4): values=(2, 3), sum=5, count=1
+  // 2: time=[4, 6): values=(4, 5), sum=9, count=2
+  double expectedSums1[kDuration + 1][kDuration + 1] = {
+    {0,  4.5,   9, 11.5,  14, 14.5,  15},
+    {0,  4.5,   7,  9.5,  10, 10.5,  -1},
+    {0,  2.5,   5,  5.5,   6,   -1,  -1},
+    {0,  2.5,   3,  3.5,  -1,   -1,  -1},
+    {0,  0.5,   1,   -1,  -1,   -1,  -1},
+    {0,  0.5,  -1,   -1,  -1,   -1,  -1},
+    {0,   -1,  -1,   -1,  -1,   -1,  -1}
+  };
+  int expectedCounts1[kDuration + 1][kDuration + 1] = {
+    {0,  1,  2,  3,  4,  5,  6},
+    {0,  1,  2,  3,  4,  5, -1},
+    {0,  1,  2,  3,  4, -1, -1},
+    {0,  1,  2,  3, -1, -1, -1},
+    {0,  1,  2, -1, -1, -1, -1},
+    {0,  1, -1, -1, -1, -1, -1},
+    {0, -1, -1, -1, -1, -1, -1}
+  };
+
+  TimePoint currentTime = b.getLatestTime() + seconds(1);
+  for (int i = 0; i <= kDuration + 1; i++) {
+    for (int j = 0; j <= kDuration - i; j++) {
+      TimePoint start = currentTime - seconds(i + j);
+      TimePoint end = currentTime - seconds(i);
+      double expectedSum = expectedSums1[i][j];
+      EXPECT_EQ(expectedSum, b.sum(start, end))
+          << "i=" << i << ", j=" << j << ", interval=[" << start << ", " << end
+          << ")";
+
+      uint64_t expectedCount = expectedCounts1[i][j];
+      EXPECT_EQ(expectedCount, b.count(start, end))
+          << "i=" << i << ", j=" << j << ", interval=[" << start << ", " << end
+          << ")";
+
+      double expectedAvg = expectedCount ? expectedSum / expectedCount : 0;
+      EXPECT_EQ(expectedAvg, b.avg(start, end))
+          << "i=" << i << ", j=" << j << ", interval=[" << start << ", " << end
+          << ")";
+
+      double expectedRate = j ? expectedSum / j : 0;
+      EXPECT_EQ(expectedRate, b.rate(start, end))
+          << "i=" << i << ", j=" << j << ", interval=[" << start << ", " << end
+          << ")";
+    }
+  }
+
+  // Add 3 more values.
+  // This will overwrite 1 full bucket, and put us halfway through the next.
+  for (unsigned int i = kDuration; i < kDuration + 3; ++i) {
+    b.addValue(mkTimePoint(i), i);
+  }
+  EXPECT_EQ(mkTimePoint(4), b.getEarliestTime());
+
+  // Current bucket state:
+  // 0: time=[6,  8): values=(6, 7), sum=13, count=2
+  // 1: time=[8, 10): values=(8),    sum=8, count=1
+  // 2: time=[4,  6): values=(4, 5), sum=9, count=2
+  double expectedSums2[kDuration + 1][kDuration + 1] = {
+    {0,    8, 14.5,   21, 25.5,  30,  30},
+    {0,  6.5,   13, 17.5,   22,  22,  -1},
+    {0,  6.5,   11, 15.5, 15.5,  -1,  -1},
+    {0,  4.5,    9,    9,   -1,  -1,  -1},
+    {0,  4.5,  4.5,   -1,   -1,  -1,  -1},
+    {0,    0,   -1,   -1,   -1,  -1,  -1},
+    {0,   -1,   -1,   -1,   -1,  -1,  -1}
+  };
+  int expectedCounts2[kDuration + 1][kDuration + 1] = {
+    {0,  1,  2,  3,  4,  5,  5},
+    {0,  1,  2,  3,  4,  4, -1},
+    {0,  1,  2,  3,  3, -1, -1},
+    {0,  1,  2,  2, -1, -1, -1},
+    {0,  1,  1, -1, -1, -1, -1},
+    {0,  0, -1, -1, -1, -1, -1},
+    {0, -1, -1, -1, -1, -1, -1}
+  };
+
+  currentTime = b.getLatestTime() + seconds(1);
+  for (int i = 0; i <= kDuration + 1; i++) {
+    for (int j = 0; j <= kDuration - i; j++) {
+      TimePoint start = currentTime - seconds(i + j);
+      TimePoint end = currentTime - seconds(i);
+      double expectedSum = expectedSums2[i][j];
+      EXPECT_EQ(expectedSum, b.sum(start, end))
+          << "i=" << i << ", j=" << j << ", interval=[" << start << ", " << end
+          << ")";
+
+      uint64_t expectedCount = expectedCounts2[i][j];
+      EXPECT_EQ(expectedCount, b.count(start, end))
+          << "i=" << i << ", j=" << j << ", interval=[" << start << ", " << end
+          << ")";
+
+      double expectedAvg = expectedCount ? expectedSum / expectedCount : 0;
+      EXPECT_EQ(expectedAvg, b.avg(start, end))
+          << "i=" << i << ", j=" << j << ", interval=[" << start << ", " << end
+          << ")";
+
+      TimePoint dataStart = std::max(start, b.getEarliestTime());
+      TimePoint dataEnd = std::max(end, dataStart);
+      seconds expectedInterval = dataEnd - dataStart;
+      EXPECT_EQ(expectedInterval, b.elapsed(start, end))
+          << "i=" << i << ", j=" << j << ", interval=[" << start << ", " << end
+          << ")";
+
+      double expectedRate = expectedInterval.count() ?
+        expectedSum / expectedInterval.count() : 0;
+      EXPECT_EQ(expectedRate, b.rate(start, end))
+          << "i=" << i << ", j=" << j << ", interval=[" << start << ", " << end
+          << ")";
+    }
+  }
+}
+
+TEST(BucketedTimeSeries, rateByInterval) {
+  const int kNumBuckets = 5;
+  const seconds kDuration(10);
+  BucketedTimeSeries<double> b(kNumBuckets, kDuration);
+
+  // Add data points at a constant rate of 10 per second.
+  // Start adding data points at kDuration, and fill half of the buckets for
+  // now.
+  TimePoint start(kDuration);
+  TimePoint end(kDuration + (kDuration / 2));
+  const double kFixedRate = 10.0;
+  for (TimePoint i = start; i < end; i += seconds(1)) {
+    b.addValue(i, kFixedRate);
+  }
+
+  // Querying the rate should yield kFixedRate.
+  EXPECT_EQ(kFixedRate, b.rate());
+  EXPECT_EQ(kFixedRate, b.rate(start, end));
+  EXPECT_EQ(kFixedRate, b.rate(start, start + kDuration));
+  EXPECT_EQ(kFixedRate, b.rate(end - kDuration, end));
+  EXPECT_EQ(kFixedRate, b.rate(end - seconds(1), end));
+  // We have been adding 1 data point per second, so countRate()
+  // should be 1.
+  EXPECT_EQ(1.0, b.countRate());
+  EXPECT_EQ(1.0, b.countRate(start, end));
+  EXPECT_EQ(1.0, b.countRate(start, start + kDuration));
+  EXPECT_EQ(1.0, b.countRate(end - kDuration, end));
+  EXPECT_EQ(1.0, b.countRate(end - seconds(1), end));
+
+  // We haven't added anything before time kDuration.
+  // Querying data earlier than this should result in a rate of 0.
+  EXPECT_EQ(0.0, b.rate(mkTimePoint(0), mkTimePoint(1)));
+  EXPECT_EQ(0.0, b.countRate(mkTimePoint(0), mkTimePoint(1)));
+
+  // Fill the remainder of the timeseries from kDuration to kDuration*2
+  start = end;
+  end = TimePoint(kDuration * 2);
+  for (TimePoint i = start; i < end; i += seconds(1)) {
+    b.addValue(i, kFixedRate);
+  }
+
+  EXPECT_EQ(kFixedRate, b.rate());
+  EXPECT_EQ(kFixedRate, b.rate(TimePoint(kDuration), TimePoint(kDuration * 2)));
+  EXPECT_EQ(kFixedRate, b.rate(TimePoint(), TimePoint(kDuration * 2)));
+  EXPECT_EQ(kFixedRate, b.rate(TimePoint(), TimePoint(kDuration * 10)));
+  EXPECT_EQ(1.0, b.countRate());
+  EXPECT_EQ(1.0, b.countRate(TimePoint(kDuration), TimePoint(kDuration * 2)));
+  EXPECT_EQ(1.0, b.countRate(TimePoint(), TimePoint(kDuration * 2)));
+  EXPECT_EQ(1.0, b.countRate(TimePoint(), TimePoint(kDuration * 10)));
+}
+
+TEST(BucketedTimeSeries, addHistorical) {
+  const int kNumBuckets = 5;
+  const seconds kDuration(10);
+  BucketedTimeSeries<double> b(kNumBuckets, kDuration);
+
+  // Initially fill with a constant rate of data
+  for (TimePoint i = mkTimePoint(0); i < mkTimePoint(10); i += seconds(1)) {
+    b.addValue(i, 10.0);
+  }
+
+  EXPECT_EQ(10.0, b.rate());
+  EXPECT_EQ(10.0, b.avg());
+  EXPECT_EQ(10, b.count());
+
+  // Add some more data points to the middle bucket
+  b.addValue(mkTimePoint(4), 40.0);
+  b.addValue(mkTimePoint(5), 40.0);
+  EXPECT_EQ(15.0, b.avg());
+  EXPECT_EQ(18.0, b.rate());
+  EXPECT_EQ(12, b.count());
+
+  // Now start adding more current data points, until we are about to roll over
+  // the bucket where we added the extra historical data.
+  for (TimePoint i = mkTimePoint(10); i < mkTimePoint(14); i += seconds(1)) {
+    b.addValue(i, 10.0);
+  }
+  EXPECT_EQ(15.0, b.avg());
+  EXPECT_EQ(18.0, b.rate());
+  EXPECT_EQ(12, b.count());
+
+  // Now roll over the middle bucket
+  b.addValue(mkTimePoint(14), 10.0);
+  b.addValue(mkTimePoint(15), 10.0);
+  EXPECT_EQ(10.0, b.avg());
+  EXPECT_EQ(10.0, b.rate());
+  EXPECT_EQ(10, b.count());
+
+  // Add more historical values past the bucket window.
+  // These should be ignored.
+  EXPECT_FALSE(b.addValue(mkTimePoint(4), 40.0));
+  EXPECT_FALSE(b.addValue(mkTimePoint(5), 40.0));
+  EXPECT_EQ(10.0, b.avg());
+  EXPECT_EQ(10.0, b.rate());
+  EXPECT_EQ(10, b.count());
+}
+
+TEST(BucketedTimeSeries, reConstructEmptyTimeSeries) {
+  auto verify = [](auto timeSeries) {
+    EXPECT_TRUE(timeSeries.empty());
+    EXPECT_EQ(0, timeSeries.sum());
+    EXPECT_EQ(0, timeSeries.count());
+  };
+
+  // Create a 100 second timeseries with 10 buckets_
+  BucketedTimeSeries<int64_t> ts(10, seconds(100));
+
+  verify(ts);
+
+  auto firstTime = ts.firstTime();
+  auto latestTime = ts.latestTime();
+  auto duration = ts.duration();
+  auto buckets = ts.buckets();
+
+  // Reconstruct the timeseries
+  BucketedTimeSeries<int64_t> newTs(firstTime, latestTime, duration, buckets);
+
+  verify(newTs);
+}
+
+TEST(BucketedTimeSeries, reConstructWithValidData) {
+  // Create a 100 second timeseries with 10 buckets_
+  BucketedTimeSeries<int64_t> ts(10, seconds(100));
+
+  auto setup = [&] {
+    ts.clear();
+    // Add 1 value to each bucket
+    for (int n = 5; n <= 95; n += 10) {
+      ts.addValue(seconds(n), 6);
+    }
+
+    EXPECT_EQ(10, ts.count());
+    EXPECT_EQ(60, ts.sum());
+    EXPECT_EQ(6, ts.avg());
+  };
+
+  setup();
+
+  auto firstTime = ts.firstTime();
+  auto latestTime = ts.latestTime();
+  auto duration = ts.duration();
+  auto buckets = ts.buckets();
+
+  // Reconstruct the timeseries
+  BucketedTimeSeries<int64_t> newTs(firstTime, latestTime, duration, buckets);
+
+  auto compare = [&] {
+    EXPECT_EQ(ts.firstTime(), newTs.firstTime());
+    EXPECT_EQ(ts.latestTime(), newTs.latestTime());
+    EXPECT_EQ(ts.duration(), newTs.duration());
+    EXPECT_EQ(ts.buckets().size(), newTs.buckets().size());
+    EXPECT_EQ(ts.sum(), newTs.sum());
+    EXPECT_EQ(ts.count(), newTs.count());
+
+    for (auto it1 = ts.buckets().begin(), it2 = newTs.buckets().begin();
+         it1 != ts.buckets().end();
+         it1++, it2++) {
+      EXPECT_EQ(it1->sum, it2->sum);
+      EXPECT_EQ(it1->count, it2->count);
+    }
+  };
+
+  compare();
+}
+
+TEST(BucketedTimeSeries, reConstructWithCorruptedData) {
+  // The total should have been 0 as firstTime > latestTime
+  EXPECT_THROW(
+      {
+        std::vector<Bucket> buckets(10);
+        buckets[0].sum = 1;
+        buckets[0].count = 1;
+
+        BucketedTimeSeries<int64_t> ts(
+            mkTimePoint(1), mkTimePoint(0), Duration(10), buckets);
+      },
+      std::invalid_argument);
+
+  // The duration should be no less than latestTime - firstTime
+  EXPECT_THROW(
+      BucketedTimeSeries<int64_t>(
+          mkTimePoint(1),
+          mkTimePoint(100),
+          Duration(10),
+          std::vector<Bucket>(10)),
+      std::invalid_argument);
+}
+
+namespace IntMHTS {
+  enum Levels {
+    MINUTE,
+    HOUR,
+    ALLTIME,
+    NUM_LEVELS,
+  };
+
+  const seconds kMinuteHourDurations[] = {
+    seconds(60), seconds(3600), seconds(0)
+  };
+};
+
+TEST(MinuteHourTimeSeries, Basic) {
+  folly::MultiLevelTimeSeries<int> mhts(60, IntMHTS::NUM_LEVELS,
+                                        IntMHTS::kMinuteHourDurations);
+  EXPECT_EQ(mhts.numLevels(), IntMHTS::NUM_LEVELS);
+  EXPECT_EQ(mhts.numLevels(), 3);
+
+  EXPECT_EQ(mhts.sum(IntMHTS::MINUTE), 0);
+  EXPECT_EQ(mhts.sum(IntMHTS::HOUR), 0);
+  EXPECT_EQ(mhts.sum(IntMHTS::ALLTIME), 0);
+
+  EXPECT_EQ(mhts.avg(IntMHTS::MINUTE), 0);
+  EXPECT_EQ(mhts.avg(IntMHTS::HOUR), 0);
+  EXPECT_EQ(mhts.avg(IntMHTS::ALLTIME), 0);
+
+  EXPECT_EQ(mhts.rate(IntMHTS::MINUTE), 0);
+  EXPECT_EQ(mhts.rate(IntMHTS::HOUR), 0);
+  EXPECT_EQ(mhts.rate(IntMHTS::ALLTIME), 0);
+
+  EXPECT_EQ(mhts.getLevel(IntMHTS::MINUTE).elapsed().count(), 0);
+  EXPECT_EQ(mhts.getLevel(IntMHTS::HOUR).elapsed().count(), 0);
+  EXPECT_EQ(mhts.getLevel(IntMHTS::ALLTIME).elapsed().count(), 0);
+
+  seconds cur_time(0);
+
+  mhts.addValue(cur_time++, 10);
+  mhts.flush();
+
+  EXPECT_EQ(mhts.getLevel(IntMHTS::MINUTE).elapsed().count(), 1);
+  EXPECT_EQ(mhts.getLevel(IntMHTS::HOUR).elapsed().count(), 1);
+  EXPECT_EQ(mhts.getLevel(IntMHTS::ALLTIME).elapsed().count(), 1);
+
+  for (int i = 0; i < 299; ++i) {
+    mhts.addValue(cur_time++, 10);
+  }
+  mhts.flush();
+
+  EXPECT_EQ(mhts.getLevel(IntMHTS::MINUTE).elapsed().count(), 60);
+  EXPECT_EQ(mhts.getLevel(IntMHTS::HOUR).elapsed().count(), 300);
+  EXPECT_EQ(mhts.getLevel(IntMHTS::ALLTIME).elapsed().count(), 300);
+
+  EXPECT_EQ(mhts.sum(IntMHTS::MINUTE), 600);
+  EXPECT_EQ(mhts.sum(IntMHTS::HOUR), 300*10);
+  EXPECT_EQ(mhts.sum(IntMHTS::ALLTIME), 300*10);
+
+  EXPECT_EQ(mhts.avg(IntMHTS::MINUTE), 10);
+  EXPECT_EQ(mhts.avg(IntMHTS::HOUR), 10);
+  EXPECT_EQ(mhts.avg(IntMHTS::ALLTIME), 10);
+
+  EXPECT_EQ(mhts.rate(IntMHTS::MINUTE), 10);
+  EXPECT_EQ(mhts.rate(IntMHTS::HOUR), 10);
+  EXPECT_EQ(mhts.rate(IntMHTS::ALLTIME), 10);
+
+  for (int i = 0; i < 3600*3 - 300; ++i) {
+    mhts.addValue(cur_time++, 10);
+  }
+  mhts.flush();
+
+  EXPECT_EQ(mhts.getLevel(IntMHTS::MINUTE).elapsed().count(), 60);
+  EXPECT_EQ(mhts.getLevel(IntMHTS::HOUR).elapsed().count(), 3600);
+  EXPECT_EQ(mhts.getLevel(IntMHTS::ALLTIME).elapsed().count(), 3600*3);
+
+  EXPECT_EQ(mhts.sum(IntMHTS::MINUTE), 600);
+  EXPECT_EQ(mhts.sum(IntMHTS::HOUR), 3600*10);
+  EXPECT_EQ(mhts.sum(IntMHTS::ALLTIME), 3600*3*10);
+
+  EXPECT_EQ(mhts.avg(IntMHTS::MINUTE), 10);
+  EXPECT_EQ(mhts.avg(IntMHTS::HOUR), 10);
+  EXPECT_EQ(mhts.avg(IntMHTS::ALLTIME), 10);
+
+  EXPECT_EQ(mhts.rate(IntMHTS::MINUTE), 10);
+  EXPECT_EQ(mhts.rate(IntMHTS::HOUR), 10);
+  EXPECT_EQ(mhts.rate(IntMHTS::ALLTIME), 10);
+
+  for (int i = 0; i < 3600; ++i) {
+    mhts.addValue(cur_time++, 100);
+  }
+  mhts.flush();
+
+  EXPECT_EQ(mhts.sum(IntMHTS::MINUTE), 60*100);
+  EXPECT_EQ(mhts.sum(IntMHTS::HOUR), 3600*100);
+  EXPECT_EQ(mhts.sum(IntMHTS::ALLTIME),
+            3600*3*10 + 3600*100);
+
+  EXPECT_EQ(mhts.avg(IntMHTS::MINUTE), 100);
+  EXPECT_EQ(mhts.avg(IntMHTS::HOUR), 100);
+  EXPECT_EQ(mhts.avg(IntMHTS::ALLTIME), 32.5);
+  EXPECT_EQ(mhts.avg<int>(IntMHTS::ALLTIME), 32);
+
+  EXPECT_EQ(mhts.rate(IntMHTS::MINUTE), 100);
+  EXPECT_EQ(mhts.rate(IntMHTS::HOUR), 100);
+  EXPECT_EQ(mhts.rate(IntMHTS::ALLTIME), 32.5);
+  EXPECT_EQ(mhts.rate<int>(IntMHTS::ALLTIME), 32);
+
+  for (int i = 0; i < 1800; ++i) {
+    mhts.addValue(cur_time++, 120);
+  }
+  mhts.flush();
+
+  EXPECT_EQ(mhts.sum(IntMHTS::MINUTE), 60*120);
+  EXPECT_EQ(mhts.sum(IntMHTS::HOUR),
+            1800*100 + 1800*120);
+  EXPECT_EQ(mhts.sum(IntMHTS::ALLTIME),
+            3600*3*10 + 3600*100 + 1800*120);
+
+  for (int i = 0; i < 60; ++i) {
+    mhts.addValue(cur_time++, 1000);
+  }
+  mhts.flush();
+
+  EXPECT_EQ(mhts.sum(IntMHTS::MINUTE), 60*1000);
+  EXPECT_EQ(mhts.sum(IntMHTS::HOUR),
+            1740*100 + 1800*120 + 60*1000);
+  EXPECT_EQ(mhts.sum(IntMHTS::ALLTIME),
+            3600*3*10 + 3600*100 + 1800*120 + 60*1000);
+
+  mhts.clear();
+  EXPECT_EQ(mhts.sum(IntMHTS::ALLTIME), 0);
+}
+
+TEST(MinuteHourTimeSeries, QueryByInterval) {
+  folly::MultiLevelTimeSeries<int> mhts(60, IntMHTS::NUM_LEVELS,
+                                        IntMHTS::kMinuteHourDurations);
+
+  TimePoint curTime;
+  for (curTime = mkTimePoint(0); curTime < mkTimePoint(7200);
+       curTime += seconds(1)) {
+    mhts.addValue(curTime, 1);
+  }
+  for (curTime = mkTimePoint(7200); curTime < mkTimePoint(7200 + 3540);
+       curTime += seconds(1)) {
+    mhts.addValue(curTime, 10);
+  }
+  for (curTime = mkTimePoint(7200 + 3540); curTime < mkTimePoint(7200 + 3600);
+       curTime += seconds(1)) {
+    mhts.addValue(curTime, 100);
+  }
+  mhts.flush();
+
+  struct TimeInterval {
+    TimePoint start;
+    TimePoint end;
+  };
+  TimeInterval intervals[12] = {
+    { curTime - seconds(60), curTime },
+    { curTime - seconds(3600), curTime },
+    { curTime - seconds(7200), curTime },
+    { curTime - seconds(3600), curTime - seconds(60) },
+    { curTime - seconds(7200), curTime - seconds(60) },
+    { curTime - seconds(7200), curTime - seconds(3600) },
+    { curTime - seconds(50), curTime - seconds(20) },
+    { curTime - seconds(3020), curTime - seconds(20) },
+    { curTime - seconds(7200), curTime - seconds(20) },
+    { curTime - seconds(3000), curTime - seconds(1000) },
+    { curTime - seconds(7200), curTime - seconds(1000) },
+    { curTime - seconds(7200), curTime - seconds(3600) },
+  };
+
+  int expectedSums[12] = {
+    6000, 41400, 32400, 35400, 32130, 16200, 3000, 33600, 32310, 20000, 27900,
+    16200
+  };
+
+  int expectedCounts[12] = {
+    60, 3600, 7200, 3540, 7140, 3600, 30, 3000, 7180, 2000, 6200, 3600
+  };
+
+  for (int i = 0; i < 12; ++i) {
+    TimeInterval interval = intervals[i];
+
+    int s = mhts.sum(interval.start, interval.end);
+    EXPECT_EQ(expectedSums[i], s);
+
+    int c = mhts.count(interval.start, interval.end);
+    EXPECT_EQ(expectedCounts[i], c);
+
+    int a = mhts.avg<int>(interval.start, interval.end);
+    EXPECT_EQ(expectedCounts[i] ?
+              (expectedSums[i] / expectedCounts[i]) : 0,
+              a);
+
+    int r = mhts.rate<int>(interval.start, interval.end);
+    int expectedRate =
+      expectedSums[i] / (interval.end - interval.start).count();
+    EXPECT_EQ(expectedRate, r);
+  }
+}
+
+TEST(MultiLevelTimeSeries, Basic) {
+  // using constructor with initializer_list parameter
+  folly::MultiLevelTimeSeries<int> mhts(
+      60, {seconds(60), seconds(3600), seconds(0)});
+  EXPECT_EQ(mhts.numLevels(), 3);
+
+  EXPECT_EQ(mhts.sum(seconds(60)), 0);
+  EXPECT_EQ(mhts.sum(seconds(3600)), 0);
+  EXPECT_EQ(mhts.sum(seconds(0)), 0);
+
+  EXPECT_EQ(mhts.avg(seconds(60)), 0);
+  EXPECT_EQ(mhts.avg(seconds(3600)), 0);
+  EXPECT_EQ(mhts.avg(seconds(0)), 0);
+
+  EXPECT_EQ(mhts.rate(seconds(60)), 0);
+  EXPECT_EQ(mhts.rate(seconds(3600)), 0);
+  EXPECT_EQ(mhts.rate(seconds(0)), 0);
+
+  EXPECT_EQ(mhts.getLevelByDuration(seconds(60)).elapsed().count(), 0);
+  EXPECT_EQ(mhts.getLevelByDuration(seconds(3600)).elapsed().count(), 0);
+  EXPECT_EQ(mhts.getLevelByDuration(seconds(0)).elapsed().count(), 0);
+
+  seconds cur_time(0);
+
+  mhts.addValue(cur_time++, 10);
+  mhts.flush();
+
+  EXPECT_EQ(mhts.getLevelByDuration(seconds(60)).elapsed().count(), 1);
+  EXPECT_EQ(mhts.getLevelByDuration(seconds(3600)).elapsed().count(), 1);
+  EXPECT_EQ(mhts.getLevelByDuration(seconds(0)).elapsed().count(), 1);
+
+  for (int i = 0; i < 299; ++i) {
+    mhts.addValue(cur_time++, 10);
+  }
+  mhts.flush();
+
+  EXPECT_EQ(mhts.getLevelByDuration(seconds(60)).elapsed().count(), 60);
+  EXPECT_EQ(mhts.getLevelByDuration(seconds(3600)).elapsed().count(), 300);
+  EXPECT_EQ(mhts.getLevelByDuration(seconds(0)).elapsed().count(), 300);
+
+  EXPECT_EQ(mhts.sum(seconds(60)), 600);
+  EXPECT_EQ(mhts.sum(seconds(3600)), 300 * 10);
+  EXPECT_EQ(mhts.sum(seconds(0)), 300 * 10);
+
+  EXPECT_EQ(mhts.avg(seconds(60)), 10);
+  EXPECT_EQ(mhts.avg(seconds(3600)), 10);
+  EXPECT_EQ(mhts.avg(seconds(0)), 10);
+
+  EXPECT_EQ(mhts.rate(seconds(60)), 10);
+  EXPECT_EQ(mhts.rate(seconds(3600)), 10);
+  EXPECT_EQ(mhts.rate(seconds(0)), 10);
+
+  for (int i = 0; i < 3600 * 3 - 300; ++i) {
+    mhts.addValue(cur_time++, 10);
+  }
+  mhts.flush();
+
+  EXPECT_EQ(mhts.getLevelByDuration(seconds(60)).elapsed().count(), 60);
+  EXPECT_EQ(mhts.getLevelByDuration(seconds(3600)).elapsed().count(), 3600);
+  EXPECT_EQ(mhts.getLevelByDuration(seconds(0)).elapsed().count(), 3600 * 3);
+
+  EXPECT_EQ(mhts.sum(seconds(60)), 600);
+  EXPECT_EQ(mhts.sum(seconds(3600)), 3600 * 10);
+  EXPECT_EQ(mhts.sum(seconds(0)), 3600 * 3 * 10);
+
+  EXPECT_EQ(mhts.avg(seconds(60)), 10);
+  EXPECT_EQ(mhts.avg(seconds(3600)), 10);
+  EXPECT_EQ(mhts.avg(seconds(0)), 10);
+
+  EXPECT_EQ(mhts.rate(seconds(60)), 10);
+  EXPECT_EQ(mhts.rate(seconds(3600)), 10);
+  EXPECT_EQ(mhts.rate(seconds(0)), 10);
+
+  for (int i = 0; i < 3600; ++i) {
+    mhts.addValue(cur_time++, 100);
+  }
+  mhts.flush();
+
+  EXPECT_EQ(mhts.sum(seconds(60)), 60 * 100);
+  EXPECT_EQ(mhts.sum(seconds(3600)), 3600 * 100);
+  EXPECT_EQ(mhts.sum(seconds(0)), 3600 * 3 * 10 + 3600 * 100);
+
+  EXPECT_EQ(mhts.avg(seconds(60)), 100);
+  EXPECT_EQ(mhts.avg(seconds(3600)), 100);
+  EXPECT_EQ(mhts.avg(seconds(0)), 32.5);
+  EXPECT_EQ(mhts.avg<int>(seconds(0)), 32);
+
+  EXPECT_EQ(mhts.rate(seconds(60)), 100);
+  EXPECT_EQ(mhts.rate(seconds(3600)), 100);
+  EXPECT_EQ(mhts.rate(seconds(0)), 32.5);
+  EXPECT_EQ(mhts.rate<int>(seconds(0)), 32);
+
+  for (int i = 0; i < 1800; ++i) {
+    mhts.addValue(cur_time++, 120);
+  }
+  mhts.flush();
+
+  EXPECT_EQ(mhts.sum(seconds(60)), 60 * 120);
+  EXPECT_EQ(mhts.sum(seconds(3600)), 1800 * 100 + 1800 * 120);
+  EXPECT_EQ(mhts.sum(seconds(0)), 3600 * 3 * 10 + 3600 * 100 + 1800 * 120);
+
+  for (int i = 0; i < 60; ++i) {
+    mhts.addValue(cur_time++, 1000);
+  }
+  mhts.flush();
+
+  EXPECT_EQ(mhts.sum(seconds(60)), 60 * 1000);
+  EXPECT_EQ(mhts.sum(seconds(3600)), 1740 * 100 + 1800 * 120 + 60 * 1000);
+  EXPECT_EQ(
+      mhts.sum(seconds(0)),
+      3600 * 3 * 10 + 3600 * 100 + 1800 * 120 + 60 * 1000);
+
+  mhts.clear();
+  EXPECT_EQ(mhts.sum(seconds(0)), 0);
+}
+
+TEST(MultiLevelTimeSeries, QueryByInterval) {
+  folly::MultiLevelTimeSeries<int> mhts(
+      60, {seconds(60), seconds(3600), seconds(0)});
+
+  TimePoint curTime;
+  for (curTime = mkTimePoint(0); curTime < mkTimePoint(7200);
+       curTime += seconds(1)) {
+    mhts.addValue(curTime, 1);
+  }
+  for (curTime = mkTimePoint(7200); curTime < mkTimePoint(7200 + 3540);
+       curTime += seconds(1)) {
+    mhts.addValue(curTime, 10);
+  }
+  for (curTime = mkTimePoint(7200 + 3540); curTime < mkTimePoint(7200 + 3600);
+       curTime += seconds(1)) {
+    mhts.addValue(curTime, 100);
+  }
+  mhts.flush();
+
+  struct TimeInterval {
+    TimePoint start;
+    TimePoint end;
+  };
+
+  std::array<TimeInterval, 12> intervals = {{
+      {curTime - seconds(60), curTime},
+      {curTime - seconds(3600), curTime},
+      {curTime - seconds(7200), curTime},
+      {curTime - seconds(3600), curTime - seconds(60)},
+      {curTime - seconds(7200), curTime - seconds(60)},
+      {curTime - seconds(7200), curTime - seconds(3600)},
+      {curTime - seconds(50), curTime - seconds(20)},
+      {curTime - seconds(3020), curTime - seconds(20)},
+      {curTime - seconds(7200), curTime - seconds(20)},
+      {curTime - seconds(3000), curTime - seconds(1000)},
+      {curTime - seconds(7200), curTime - seconds(1000)},
+      {curTime - seconds(7200), curTime - seconds(3600)},
+  }};
+
+  std::array<int, 12> expectedSums = {{6000,
+                                       41400,
+                                       32400,
+                                       35400,
+                                       32130,
+                                       16200,
+                                       3000,
+                                       33600,
+                                       32310,
+                                       20000,
+                                       27900,
+                                       16200}};
+
+  std::array<int, 12> expectedCounts = {
+      {60, 3600, 7200, 3540, 7140, 3600, 30, 3000, 7180, 2000, 6200, 3600}};
+
+  for (size_t i = 0; i < intervals.size(); ++i) {
+    TimeInterval interval = intervals[i];
+
+    int s = mhts.sum(interval.start, interval.end);
+    EXPECT_EQ(expectedSums[i], s);
+
+    int c = mhts.count(interval.start, interval.end);
+    EXPECT_EQ(expectedCounts[i], c);
+
+    int a = mhts.avg<int>(interval.start, interval.end);
+    EXPECT_EQ(expectedCounts[i] ? (expectedSums[i] / expectedCounts[i]) : 0, a);
+
+    int r = mhts.rate<int>(interval.start, interval.end);
+    int expectedRate =
+        expectedSums[i] / (interval.end - interval.start).count();
+    EXPECT_EQ(expectedRate, r);
+  }
+}
diff --git a/folly/stats/test/TimeseriesHistogramTest.cpp b/folly/stats/test/TimeseriesHistogramTest.cpp
new file mode 100644 (file)
index 0000000..4ee4a69
--- /dev/null
@@ -0,0 +1,540 @@
+/*
+ * Copyright 2017 Facebook, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <folly/stats/TimeseriesHistogram.h>
+#include <folly/stats/TimeseriesHistogram-defs.h>
+
+#include <random>
+
+#include <folly/portability/GTest.h>
+
+using namespace std;
+using namespace folly;
+using std::chrono::seconds;
+
+namespace {
+namespace IntMTMHTS {
+  enum Levels {
+    MINUTE,
+    TEN_MINUTE,
+    HOUR,
+    ALLTIME,
+    NUM_LEVELS,
+  };
+
+  const seconds kDurations[] = {
+    seconds(60), seconds(600), seconds(3600), seconds(0)
+  };
+};
+
+namespace IntMHTS {
+  enum Levels {
+    MINUTE,
+    HOUR,
+    ALLTIME,
+    NUM_LEVELS,
+  };
+
+  const seconds kDurations[] = {
+    seconds(60), seconds(3600), seconds(0)
+  };
+};
+
+typedef std::mt19937 RandomInt32;
+
+using StatsClock = folly::LegacyStatsClock<std::chrono::seconds>;
+StatsClock::time_point mkTimePoint(int value) {
+  return StatsClock::time_point(StatsClock::duration(value));
+}
+}
+
+TEST(TimeseriesHistogram, Percentile) {
+  RandomInt32 random(5);
+  // [10, 109], 12 buckets including above and below
+  {
+    TimeseriesHistogram<int> h(10, 10, 110,
+                               MultiLevelTimeSeries<int>(
+                                 60, IntMTMHTS::NUM_LEVELS,
+                                 IntMTMHTS::kDurations));
+
+    EXPECT_EQ(0, h.getPercentileEstimate(0, IntMTMHTS::ALLTIME));
+
+    EXPECT_EQ(12, h.getNumBuckets());
+    EXPECT_EQ(10, h.getBucketSize());
+    EXPECT_EQ(10, h.getMin());
+    EXPECT_EQ(110, h.getMax());
+
+    for (size_t i = 0; i < h.getNumBuckets(); ++i) {
+      EXPECT_EQ(4, h.getBucket(i).numLevels());
+    }
+
+    int maxVal = 120;
+    h.addValue(mkTimePoint(0), 0);
+    h.addValue(mkTimePoint(0), maxVal);
+    for (int i = 0; i < 98; i++) {
+      h.addValue(mkTimePoint(0), random() % maxVal);
+    }
+
+    h.update(mkTimePoint(1500000000));
+    // bucket 0 stores everything below min, so its minimum
+    // is the lowest possible number
+    EXPECT_EQ(std::numeric_limits<int>::min(),
+              h.getPercentileBucketMin(1, IntMTMHTS::ALLTIME));
+    EXPECT_EQ(110, h.getPercentileBucketMin(99, IntMTMHTS::ALLTIME));
+
+    EXPECT_EQ(-2, h.getPercentileEstimate(0, IntMTMHTS::ALLTIME));
+    EXPECT_EQ(-1, h.getPercentileEstimate(1, IntMTMHTS::ALLTIME));
+    EXPECT_EQ(119, h.getPercentileEstimate(99, IntMTMHTS::ALLTIME));
+    EXPECT_EQ(120, h.getPercentileEstimate(100, IntMTMHTS::ALLTIME));
+  }
+}
+
+TEST(TimeseriesHistogram, String) {
+  RandomInt32 random(5);
+  // [10, 109], 12 buckets including above and below
+  {
+    TimeseriesHistogram<int> hist(10, 10, 110,
+                                  MultiLevelTimeSeries<int>(
+                                    60, IntMTMHTS::NUM_LEVELS,
+                                    IntMTMHTS::kDurations));
+
+    int maxVal = 120;
+    hist.addValue(mkTimePoint(0), 0);
+    hist.addValue(mkTimePoint(0), maxVal);
+    for (int i = 0; i < 98; i++) {
+      hist.addValue(mkTimePoint(0), random() % maxVal);
+    }
+
+    hist.update(mkTimePoint(0));
+
+    const char* const kStringValues1[IntMTMHTS::NUM_LEVELS] =  {
+      "-2147483648:12:4,10:8:13,20:8:24,30:6:34,40:13:46,50:8:54,60:7:64,"
+        "70:7:74,80:8:84,90:10:94,100:3:103,110:10:115",
+      "-2147483648:12:4,10:8:13,20:8:24,30:6:34,40:13:46,50:8:54,60:7:64,"
+        "70:7:74,80:8:84,90:10:94,100:3:103,110:10:115",
+      "-2147483648:12:4,10:8:13,20:8:24,30:6:34,40:13:46,50:8:54,60:7:64,"
+        "70:7:74,80:8:84,90:10:94,100:3:103,110:10:115",
+      "-2147483648:12:4,10:8:13,20:8:24,30:6:34,40:13:46,50:8:54,60:7:64,"
+        "70:7:74,80:8:84,90:10:94,100:3:103,110:10:115",
+    };
+
+    CHECK_EQ(IntMTMHTS::NUM_LEVELS, hist.getNumLevels());
+
+    for (size_t level = 0; level < hist.getNumLevels(); ++level) {
+      EXPECT_EQ(kStringValues1[level], hist.getString(level));
+    }
+
+    const char* const kStringValues2[IntMTMHTS::NUM_LEVELS] =  {
+      "-2147483648:12:4,10:8:13,20:8:24,30:6:34,40:13:46,50:8:54,60:7:64,"
+        "70:7:74,80:8:84,90:10:94,100:3:103,110:10:115",
+      "-2147483648:12:4,10:8:13,20:8:24,30:6:34,40:13:46,50:8:54,60:7:64,"
+        "70:7:74,80:8:84,90:10:94,100:3:103,110:10:115",
+      "-2147483648:12:4,10:8:13,20:8:24,30:6:34,40:13:46,50:8:54,60:7:64,"
+        "70:7:74,80:8:84,90:10:94,100:3:103,110:10:115",
+      "-2147483648:12:4,10:8:13,20:8:24,30:6:34,40:13:46,50:8:54,60:7:64,"
+        "70:7:74,80:8:84,90:10:94,100:3:103,110:10:115",
+    };
+
+    CHECK_EQ(IntMTMHTS::NUM_LEVELS, hist.getNumLevels());
+
+    for (size_t level = 0; level < hist.getNumLevels(); ++level) {
+      EXPECT_EQ(kStringValues2[level], hist.getString(level));
+    }
+  }
+}
+
+TEST(TimeseriesHistogram, Clear) {
+  {
+    TimeseriesHistogram<int> hist(10, 0, 100,
+                                  MultiLevelTimeSeries<int>(
+                                    60, IntMTMHTS::NUM_LEVELS,
+                                    IntMTMHTS::kDurations));
+
+    for (int now = 0; now < 3600; now++) {
+      for (int i = 0; i < 100; i++) {
+        hist.addValue(mkTimePoint(now), i, 2); // adds each item 2 times
+      }
+    }
+
+    // check clearing
+    hist.clear();
+
+    for (size_t b = 0; b < hist.getNumBuckets(); ++b) {
+      EXPECT_EQ(0, hist.getBucket(b).count(IntMTMHTS::MINUTE));
+      EXPECT_EQ(0, hist.getBucket(b).count(IntMTMHTS::TEN_MINUTE));
+      EXPECT_EQ(0, hist.getBucket(b).count(IntMTMHTS::HOUR));
+      EXPECT_EQ(0, hist.getBucket(b).count(IntMTMHTS::ALLTIME));
+    }
+
+    for (int pct = 0; pct <= 100; pct++) {
+      EXPECT_EQ(0, hist.getPercentileBucketMin(pct, IntMTMHTS::MINUTE));
+      EXPECT_EQ(0, hist.getPercentileBucketMin(pct, IntMTMHTS::TEN_MINUTE));
+      EXPECT_EQ(0, hist.getPercentileBucketMin(pct, IntMTMHTS::HOUR));
+      EXPECT_EQ(0, hist.getPercentileBucketMin(pct, IntMTMHTS::ALLTIME));
+
+      EXPECT_EQ(0, hist.getPercentileEstimate(pct, IntMTMHTS::MINUTE));
+      EXPECT_EQ(0, hist.getPercentileEstimate(pct, IntMTMHTS::TEN_MINUTE));
+      EXPECT_EQ(0, hist.getPercentileEstimate(pct, IntMTMHTS::HOUR));
+      EXPECT_EQ(0, hist.getPercentileEstimate(pct, IntMTMHTS::ALLTIME));
+    }
+  }
+}
+
+
+TEST(TimeseriesHistogram, Basic) {
+  {
+    TimeseriesHistogram<int> hist(10, 0, 100,
+                                  MultiLevelTimeSeries<int>(
+                                    60, IntMTMHTS::NUM_LEVELS,
+                                    IntMTMHTS::kDurations));
+
+    for (int now = 0; now < 3600; now++) {
+      for (int i = 0; i < 100; i++) {
+        hist.addValue(mkTimePoint(now), i);
+      }
+    }
+
+    hist.update(mkTimePoint(3599));
+    for (int pct = 1; pct <= 100; pct++) {
+      int expected = (pct - 1) / 10 * 10;
+      EXPECT_EQ(expected, hist.getPercentileBucketMin(pct, IntMTMHTS::MINUTE));
+      EXPECT_EQ(expected, hist.getPercentileBucketMin(pct,
+                                                      IntMTMHTS::TEN_MINUTE));
+      EXPECT_EQ(expected, hist.getPercentileBucketMin(pct, IntMTMHTS::HOUR));
+      EXPECT_EQ(expected, hist.getPercentileBucketMin(pct, IntMTMHTS::ALLTIME));
+    }
+
+    for (size_t b = 1; (b + 1) < hist.getNumBuckets(); ++b) {
+      EXPECT_EQ(600, hist.getBucket(b).count(IntMTMHTS::MINUTE));
+      EXPECT_EQ(6000, hist.getBucket(b).count(IntMTMHTS::TEN_MINUTE));
+      EXPECT_EQ(36000, hist.getBucket(b).count(IntMTMHTS::HOUR));
+      EXPECT_EQ(36000, hist.getBucket(b).count(IntMTMHTS::ALLTIME));
+    }
+    EXPECT_EQ(0, hist.getBucket(0).count(IntMTMHTS::MINUTE));
+    EXPECT_EQ(0, hist.getBucket(hist.getNumBuckets() - 1).count(
+                IntMTMHTS::MINUTE));
+
+    EXPECT_EQ(6000, hist.count(IntMTMHTS::MINUTE));
+    EXPECT_EQ(60000, hist.count(IntMTMHTS::TEN_MINUTE));
+    EXPECT_EQ(360000, hist.count(IntMTMHTS::HOUR));
+    EXPECT_EQ(360000, hist.count(IntMTMHTS::ALLTIME));
+
+    // Each second we added 4950 total over 100 data points
+    EXPECT_EQ(297000, hist.sum(IntMTMHTS::MINUTE));
+    EXPECT_EQ(2970000, hist.sum(IntMTMHTS::TEN_MINUTE));
+    EXPECT_EQ(17820000, hist.sum(IntMTMHTS::HOUR));
+    EXPECT_EQ(17820000, hist.sum(IntMTMHTS::ALLTIME));
+
+    EXPECT_EQ(49, hist.avg<int>(IntMTMHTS::MINUTE));
+    EXPECT_EQ(49, hist.avg<int>(IntMTMHTS::TEN_MINUTE));
+    EXPECT_EQ(49, hist.avg<int>(IntMTMHTS::HOUR));
+    EXPECT_EQ(49, hist.avg<int>(IntMTMHTS::ALLTIME));
+    EXPECT_EQ(49.5, hist.avg<double>(IntMTMHTS::MINUTE));
+    EXPECT_EQ(49.5, hist.avg<double>(IntMTMHTS::TEN_MINUTE));
+    EXPECT_EQ(49.5, hist.avg<double>(IntMTMHTS::HOUR));
+    EXPECT_EQ(49.5, hist.avg<double>(IntMTMHTS::ALLTIME));
+
+    EXPECT_EQ(4950, hist.rate<int>(IntMTMHTS::MINUTE));
+    EXPECT_EQ(4950, hist.rate<int>(IntMTMHTS::TEN_MINUTE));
+    EXPECT_EQ(4950, hist.rate<int>(IntMTMHTS::HOUR));
+    EXPECT_EQ(4950, hist.rate<int>(IntMTMHTS::ALLTIME));
+    EXPECT_EQ(4950, hist.rate<double>(IntMTMHTS::MINUTE));
+    EXPECT_EQ(4950, hist.rate<double>(IntMTMHTS::TEN_MINUTE));
+    EXPECT_EQ(4950, hist.rate<double>(IntMTMHTS::HOUR));
+    EXPECT_EQ(4950, hist.rate<double>(IntMTMHTS::ALLTIME));
+
+    EXPECT_EQ(1000, hist.count(mkTimePoint(10), mkTimePoint(20)));
+    EXPECT_EQ(49500, hist.sum(mkTimePoint(10), mkTimePoint(20)));
+    EXPECT_EQ(4950, hist.rate(mkTimePoint(10), mkTimePoint(20)));
+    EXPECT_EQ(49.5, hist.avg<double>(mkTimePoint(10), mkTimePoint(20)));
+
+    EXPECT_EQ(200, hist.count(mkTimePoint(3550), mkTimePoint(3552)));
+    EXPECT_EQ(9900, hist.sum(mkTimePoint(3550), mkTimePoint(3552)));
+    EXPECT_EQ(4950, hist.rate(mkTimePoint(3550), mkTimePoint(3552)));
+    EXPECT_EQ(49.5, hist.avg<double>(mkTimePoint(3550), mkTimePoint(3552)));
+
+    EXPECT_EQ(0, hist.count(mkTimePoint(4550), mkTimePoint(4552)));
+    EXPECT_EQ(0, hist.sum(mkTimePoint(4550), mkTimePoint(4552)));
+    EXPECT_EQ(0, hist.rate(mkTimePoint(4550), mkTimePoint(4552)));
+    EXPECT_EQ(0, hist.avg<double>(mkTimePoint(4550), mkTimePoint(4552)));
+  }
+
+  // -----------------
+
+  {
+    TimeseriesHistogram<int> hist(10, 0, 100,
+                                  MultiLevelTimeSeries<int>(
+                                    60, IntMTMHTS::NUM_LEVELS,
+                                    IntMTMHTS::kDurations));
+
+    for (int now = 0; now < 3600; now++) {
+      for (int i = 0; i < 100; i++) {
+        hist.addValue(mkTimePoint(now), i, 2); // adds each item 2 times
+      }
+    }
+
+    hist.update(mkTimePoint(3599));
+    for (int pct = 1; pct <= 100; pct++) {
+      int expected = (pct - 1) / 10 * 10;
+      EXPECT_EQ(expected, hist.getPercentileBucketMin(pct, IntMTMHTS::MINUTE));
+      EXPECT_EQ(expected, hist.getPercentileBucketMin(pct,
+                                                      IntMTMHTS::TEN_MINUTE));
+      EXPECT_EQ(expected, hist.getPercentileBucketMin(pct, IntMTMHTS::HOUR));
+      EXPECT_EQ(expected, hist.getPercentileBucketMin(pct, IntMTMHTS::ALLTIME));
+   }
+
+   for (size_t b = 1; (b + 1) < hist.getNumBuckets(); ++b) {
+     EXPECT_EQ(600 * 2, hist.getBucket(b).count(IntMTMHTS::MINUTE));
+     EXPECT_EQ(6000 * 2, hist.getBucket(b).count(IntMTMHTS::TEN_MINUTE));
+     EXPECT_EQ(36000 * 2, hist.getBucket(b).count(IntMTMHTS::HOUR));
+     EXPECT_EQ(36000 * 2, hist.getBucket(b).count(IntMTMHTS::ALLTIME));
+    }
+    EXPECT_EQ(0, hist.getBucket(0).count(IntMTMHTS::MINUTE));
+    EXPECT_EQ(0, hist.getBucket(hist.getNumBuckets() - 1).count(
+                IntMTMHTS::MINUTE));
+  }
+
+  // -----------------
+
+  {
+    TimeseriesHistogram<int> hist(10, 0, 100,
+                                  MultiLevelTimeSeries<int>(
+                                    60, IntMTMHTS::NUM_LEVELS,
+                                    IntMTMHTS::kDurations));
+
+    for (int now = 0; now < 3600; now++) {
+      for (int i = 0; i < 50; i++) {
+        hist.addValue(mkTimePoint(now), i * 2, 2); // adds each item 2 times
+      }
+    }
+
+    hist.update(mkTimePoint(3599));
+    for (int pct = 1; pct <= 100; pct++) {
+      int expected = (pct - 1) / 10 * 10;
+      EXPECT_EQ(expected, hist.getPercentileBucketMin(pct, IntMTMHTS::MINUTE));
+      EXPECT_EQ(expected, hist.getPercentileBucketMin(pct,
+                                                      IntMTMHTS::TEN_MINUTE));
+      EXPECT_EQ(expected, hist.getPercentileBucketMin(pct, IntMTMHTS::HOUR));
+      EXPECT_EQ(expected, hist.getPercentileBucketMin(pct, IntMTMHTS::ALLTIME));
+    }
+
+    EXPECT_EQ(0, hist.getBucket(0).count(IntMTMHTS::MINUTE));
+    EXPECT_EQ(0, hist.getBucket(0).count(IntMTMHTS::TEN_MINUTE));
+    EXPECT_EQ(0, hist.getBucket(0).count(IntMTMHTS::HOUR));
+    EXPECT_EQ(0, hist.getBucket(0).count(IntMTMHTS::ALLTIME));
+    EXPECT_EQ(0, hist.getBucket(hist.getNumBuckets() - 1).count(
+                IntMTMHTS::MINUTE));
+    EXPECT_EQ(0,
+              hist.getBucket(hist.getNumBuckets() - 1).
+                count(IntMTMHTS::TEN_MINUTE));
+    EXPECT_EQ(0, hist.getBucket(hist.getNumBuckets() - 1).count(
+                IntMTMHTS::HOUR));
+    EXPECT_EQ(0,
+              hist.getBucket(hist.getNumBuckets() - 1).count(
+                IntMTMHTS::ALLTIME));
+
+    for (size_t b = 1; (b + 1) < hist.getNumBuckets(); ++b) {
+      EXPECT_EQ(600, hist.getBucket(b).count(IntMTMHTS::MINUTE));
+      EXPECT_EQ(6000, hist.getBucket(b).count(IntMTMHTS::TEN_MINUTE));
+      EXPECT_EQ(36000, hist.getBucket(b).count(IntMTMHTS::HOUR));
+      EXPECT_EQ(36000, hist.getBucket(b).count(IntMTMHTS::ALLTIME));
+    }
+
+    for (int i = 0; i < 100; ++i) {
+      hist.addValue(mkTimePoint(3599), 200 + i);
+    }
+    hist.update(mkTimePoint(3599));
+    EXPECT_EQ(100,
+              hist.getBucket(hist.getNumBuckets() - 1).count(
+                IntMTMHTS::ALLTIME));
+
+  }
+}
+
+TEST(TimeseriesHistogram, QueryByInterval) {
+  TimeseriesHistogram<int> mhts(8, 8, 120,
+                                MultiLevelTimeSeries<int>(
+                                  60, IntMHTS::NUM_LEVELS,
+                                  IntMHTS::kDurations));
+
+  mhts.update(mkTimePoint(0));
+
+  int curTime;
+  for (curTime = 0; curTime < 7200; curTime++) {
+    mhts.addValue(mkTimePoint(curTime), 1);
+  }
+  for (curTime = 7200; curTime < 7200 + 3540; curTime++) {
+    mhts.addValue(mkTimePoint(curTime), 10);
+  }
+  for (curTime = 7200 + 3540; curTime < 7200 + 3600; curTime++) {
+    mhts.addValue(mkTimePoint(curTime), 100);
+  }
+
+  mhts.update(mkTimePoint(7200 + 3600 - 1));
+
+  struct TimeInterval {
+    TimeInterval(int s, int e) : start(mkTimePoint(s)), end(mkTimePoint(e)) {}
+
+    StatsClock::time_point start;
+    StatsClock::time_point end;
+  };
+  TimeInterval intervals[12] = {
+    { curTime - 60, curTime },
+    { curTime - 3600, curTime },
+    { curTime - 7200, curTime },
+    { curTime - 3600, curTime - 60 },
+    { curTime - 7200, curTime - 60 },
+    { curTime - 7200, curTime - 3600 },
+    { curTime - 50, curTime - 20 },
+    { curTime - 3020, curTime - 20 },
+    { curTime - 7200, curTime - 20 },
+    { curTime - 3000, curTime - 1000 },
+    { curTime - 7200, curTime - 1000 },
+    { curTime - 7200, curTime - 3600 },
+  };
+
+  int expectedSums[12] = {
+    6000, 41400, 32400, 35400, 32129, 16200, 3000, 33600, 32308, 20000, 27899,
+    16200
+  };
+
+  int expectedCounts[12] = {
+    60, 3600, 7200, 3540, 7139, 3600, 30, 3000, 7178, 2000, 6199, 3600
+  };
+
+  // The first 7200 values added all fell below the histogram minimum,
+  // and went into the bucket that tracks all of the too-small values.
+  // This bucket reports a minimum value of the smallest possible integer.
+  int belowMinBucket = std::numeric_limits<int>::min();
+
+  int expectedValues[12][3] = {
+    {96, 96, 96},
+    { 8,  8, 96},
+    { belowMinBucket,  belowMinBucket,  8}, // alltime
+    { 8,  8,  8},
+    { belowMinBucket,  belowMinBucket,  8}, // alltime
+    { belowMinBucket,  belowMinBucket,  8}, // alltime
+    {96, 96, 96},
+    { 8,  8, 96},
+    { belowMinBucket,  belowMinBucket,  8}, // alltime
+    { 8,  8,  8},
+    { belowMinBucket,  belowMinBucket,  8}, // alltime
+    { belowMinBucket,  belowMinBucket,  8}  // alltime
+  };
+
+  for (int i = 0; i < 12; i++) {
+    const auto& itv = intervals[i];
+    int s = mhts.sum(itv.start, itv.end);
+    EXPECT_EQ(expectedSums[i], s);
+
+    int c = mhts.count(itv.start, itv.end);
+    EXPECT_EQ(expectedCounts[i], c);
+  }
+
+  // 3 levels
+  for (int i = 1; i <= 100; i++) {
+    EXPECT_EQ(96, mhts.getPercentileBucketMin(i, 0));
+    EXPECT_EQ(
+        96,
+        mhts.getPercentileBucketMin(
+            i, mkTimePoint(curTime - 60), mkTimePoint(curTime)));
+    EXPECT_EQ(
+        8,
+        mhts.getPercentileBucketMin(
+            i, mkTimePoint(curTime - 3540), mkTimePoint(curTime - 60)));
+  }
+
+  EXPECT_EQ(8, mhts.getPercentileBucketMin(1, 1));
+  EXPECT_EQ(8, mhts.getPercentileBucketMin(98, 1));
+  EXPECT_EQ(96, mhts.getPercentileBucketMin(99, 1));
+  EXPECT_EQ(96, mhts.getPercentileBucketMin(100, 1));
+
+  EXPECT_EQ(belowMinBucket, mhts.getPercentileBucketMin(1, 2));
+  EXPECT_EQ(belowMinBucket, mhts.getPercentileBucketMin(66, 2));
+  EXPECT_EQ(8, mhts.getPercentileBucketMin(67, 2));
+  EXPECT_EQ(8, mhts.getPercentileBucketMin(99, 2));
+  EXPECT_EQ(96, mhts.getPercentileBucketMin(100, 2));
+
+  // 0 is currently the value for bucket 0 (below min)
+  for (int i = 0; i < 12; i++) {
+    const auto& itv = intervals[i];
+    int v = mhts.getPercentileBucketMin(1, itv.start, itv.end);
+    EXPECT_EQ(expectedValues[i][0], v);
+
+    v = mhts.getPercentileBucketMin(50, itv.start, itv.end);
+    EXPECT_EQ(expectedValues[i][1], v);
+
+    v = mhts.getPercentileBucketMin(99, itv.start, itv.end);
+    EXPECT_EQ(expectedValues[i][2], v);
+  }
+
+  for (int i = 0; i < 12; i++) {
+    const auto& itv = intervals[i];
+    // Some of the older intervals that fall in the alltime bucket
+    // are off by 1 or 2 in their estimated counts.
+    size_t tolerance = 0;
+    if (itv.start <= mkTimePoint(curTime - 7200)) {
+      tolerance = 2;
+    } else if (itv.start <= mkTimePoint(curTime - 3000)) {
+      tolerance = 1;
+    }
+    size_t actualCount = (itv.end - itv.start).count();
+    size_t estimatedCount = mhts.count(itv.start, itv.end);
+    EXPECT_GE(actualCount, estimatedCount);
+    EXPECT_LE(actualCount - tolerance, estimatedCount);
+  }
+}
+
+TEST(TimeseriesHistogram, SingleUniqueValue) {
+  int values[] = {-1, 0, 500, 1000, 1500};
+  for (int ii = 0; ii < 5; ++ii) {
+    int value = values[ii];
+    TimeseriesHistogram<int> h(10, 0, 1000,
+                               MultiLevelTimeSeries<int>(
+                                 60, IntMTMHTS::NUM_LEVELS,
+                                 IntMTMHTS::kDurations));
+
+    const int kNumIters = 1000;
+    for (int jj = 0; jj < kNumIters; ++jj) {
+      h.addValue(mkTimePoint(1), value);
+    }
+    h.update(mkTimePoint(1));
+    // since we've only added one unique value, all percentiles should
+    // be that value
+    EXPECT_EQ(h.getPercentileEstimate(10, 0), value);
+    EXPECT_EQ(h.getPercentileEstimate(50, 0), value);
+    EXPECT_EQ(h.getPercentileEstimate(99, 0), value);
+
+    // Things get trickier if there are multiple unique values.
+    const int kNewValue = 750;
+    for (int kk = 0; kk < 2*kNumIters; ++kk) {
+      h.addValue(mkTimePoint(1), kNewValue);
+    }
+    h.update(mkTimePoint(1));
+    EXPECT_NEAR(h.getPercentileEstimate(50, 0), kNewValue+5, 5);
+    if (value >= 0 && value <= 1000) {
+      // only do further testing if value is within our bucket range,
+      // else estimates can be wildly off
+      if (kNewValue > value) {
+        EXPECT_NEAR(h.getPercentileEstimate(10, 0), value+5, 5);
+        EXPECT_NEAR(h.getPercentileEstimate(99, 0), kNewValue+5, 5);
+      } else {
+        EXPECT_NEAR(h.getPercentileEstimate(10, 0), kNewValue+5, 5);
+        EXPECT_NEAR(h.getPercentileEstimate(99, 0), value+5, 5);
+      }
+    }
+  }
+}
diff --git a/folly/test/HistogramBenchmark.cpp b/folly/test/HistogramBenchmark.cpp
deleted file mode 100644 (file)
index d708253..0000000
+++ /dev/null
@@ -1,42 +0,0 @@
-/*
- * Copyright 2017 Facebook, Inc.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *   http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-#include <folly/stats/Histogram.h>
-
-#include <folly/Benchmark.h>
-#include <folly/Foreach.h>
-#include <folly/portability/GFlags.h>
-
-using folly::Histogram;
-
-void addValue(unsigned int n, int64_t bucketSize, int64_t min, int64_t max) {
-  Histogram<int64_t> hist(bucketSize, min, max);
-  int64_t num = min;
-  FOR_EACH_RANGE (i, 0, n) {
-    hist.addValue(num);
-    ++num;
-    if (num > max) { num = min; }
-  }
-}
-
-BENCHMARK_NAMED_PARAM(addValue, 0_to_100, 1, 0, 100);
-BENCHMARK_NAMED_PARAM(addValue, 0_to_1000, 10, 0, 1000);
-BENCHMARK_NAMED_PARAM(addValue, 5k_to_20k, 250, 5000, 20000);
-
-int main(int argc, char *argv[]) {
-  gflags::ParseCommandLineFlags(&argc, &argv, true);
-  folly::runBenchmarks();
-  return 0;
-}
diff --git a/folly/test/HistogramTest.cpp b/folly/test/HistogramTest.cpp
deleted file mode 100644 (file)
index c2997a3..0000000
+++ /dev/null
@@ -1,223 +0,0 @@
-/*
- * Copyright 2017 Facebook, Inc.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *   http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include <folly/stats/Histogram.h>
-#include <folly/stats/Histogram-defs.h>
-
-#include <folly/portability/GTest.h>
-
-using folly::Histogram;
-
-// Insert 100 evenly distributed values into a histogram with 100 buckets
-TEST(Histogram, Test100) {
-  Histogram<int64_t> h(1, 0, 100);
-
-  for (unsigned int n = 0; n < 100; ++n) {
-    h.addValue(n);
-  }
-
-  // 100 buckets, plus 1 for below min, and 1 for above max
-  EXPECT_EQ(h.getNumBuckets(), 102);
-
-  double epsilon = 1e-6;
-  for (unsigned int n = 0; n <= 100; ++n) {
-    double pct = n / 100.0;
-
-    // Floating point arithmetic isn't 100% accurate, and if we just divide
-    // (n / 100) the value should be exactly on a bucket boundary.  Add espilon
-    // to ensure we fall in the upper bucket.
-    if (n < 100) {
-      double lowPct = -1.0;
-      double highPct = -1.0;
-      unsigned int bucketIdx = h.getPercentileBucketIdx(pct + epsilon,
-                                                        &lowPct, &highPct);
-      EXPECT_EQ(n + 1, bucketIdx);
-      EXPECT_FLOAT_EQ(n / 100.0, lowPct);
-      EXPECT_FLOAT_EQ((n + 1) / 100.0, highPct);
-    }
-
-    // Also test n - epsilon, to test falling in the lower bucket.
-    if (n > 0) {
-      double lowPct = -1.0;
-      double highPct = -1.0;
-      unsigned int bucketIdx = h.getPercentileBucketIdx(pct - epsilon,
-                                                        &lowPct, &highPct);
-      EXPECT_EQ(n, bucketIdx);
-      EXPECT_FLOAT_EQ((n - 1) / 100.0, lowPct);
-      EXPECT_FLOAT_EQ(n / 100.0, highPct);
-    }
-
-    // Check getPercentileEstimate()
-    EXPECT_EQ(n, h.getPercentileEstimate(pct));
-  }
-}
-
-// Test calling getPercentileBucketIdx() and getPercentileEstimate() on an
-// empty histogram
-TEST(Histogram, TestEmpty) {
-  Histogram<int64_t> h(1, 0, 100);
-
-  for (unsigned int n = 0; n <= 100; ++n) {
-    double pct = n / 100.0;
-
-    double lowPct = -1.0;
-    double highPct = -1.0;
-    unsigned int bucketIdx = h.getPercentileBucketIdx(pct, &lowPct, &highPct);
-    EXPECT_EQ(1, bucketIdx);
-    EXPECT_FLOAT_EQ(0.0, lowPct);
-    EXPECT_FLOAT_EQ(0.0, highPct);
-
-    EXPECT_EQ(0, h.getPercentileEstimate(pct));
-  }
-}
-
-// Test calling getPercentileBucketIdx() and getPercentileEstimate() on a
-// histogram with just a single value.
-TEST(Histogram, Test1) {
-  Histogram<int64_t> h(1, 0, 100);
-  h.addValue(42);
-
-  for (unsigned int n = 0; n < 100; ++n) {
-    double pct = n / 100.0;
-
-    double lowPct = -1.0;
-    double highPct = -1.0;
-    unsigned int bucketIdx = h.getPercentileBucketIdx(pct, &lowPct, &highPct);
-    EXPECT_EQ(43, bucketIdx);
-    EXPECT_FLOAT_EQ(0.0, lowPct);
-    EXPECT_FLOAT_EQ(1.0, highPct);
-
-    EXPECT_EQ(42, h.getPercentileEstimate(pct));
-  }
-}
-
-// Test adding enough numbers to make the sum value overflow in the
-// "below min" bucket
-TEST(Histogram, TestOverflowMin) {
-  Histogram<int64_t> h(1, 0, 100);
-
-  for (unsigned int n = 0; n < 9; ++n) {
-    h.addValue(-0x0fffffffffffffff);
-  }
-
-  // Compute a percentile estimate.  We only added values to the "below min"
-  // bucket, so this should check that bucket.  We're mainly verifying that the
-  // code doesn't crash here when the bucket average is larger than the max
-  // value that is supposed to be in the bucket.
-  int64_t estimate = h.getPercentileEstimate(0.05);
-  // The code will return the smallest possible value when it detects an
-  // overflow beyond the minimum value.
-  EXPECT_EQ(std::numeric_limits<int64_t>::min(), estimate);
-}
-
-// Test adding enough numbers to make the sum value overflow in the
-// "above max" bucket
-TEST(Histogram, TestOverflowMax) {
-  Histogram<int64_t> h(1, 0, 100);
-
-  for (unsigned int n = 0; n < 9; ++n) {
-    h.addValue(0x0fffffffffffffff);
-  }
-
-  // The code will return the maximum possible value when it detects an
-  // overflow beyond the max value.
-  int64_t estimate = h.getPercentileEstimate(0.95);
-  EXPECT_EQ(std::numeric_limits<int64_t>::max(), estimate);
-}
-
-// Test adding enough numbers to make the sum value overflow in one of the
-// normal buckets
-TEST(Histogram, TestOverflowBucket) {
-  Histogram<int64_t> h(0x0100000000000000, 0, 0x1000000000000000);
-
-  for (unsigned int n = 0; n < 9; ++n) {
-    h.addValue(0x0fffffffffffffff);
-  }
-
-  // The histogram code should return the bucket midpoint
-  // when it detects overflow.
-  int64_t estimate = h.getPercentileEstimate(0.95);
-  EXPECT_EQ(0x0f80000000000000, estimate);
-}
-
-TEST(Histogram, TestDouble) {
-  // Insert 100 evenly spaced values into a histogram
-  Histogram<double> h(100.0, 0.0, 5000.0);
-  for (double n = 50; n < 5000; n += 100) {
-    h.addValue(n);
-  }
-  EXPECT_EQ(52, h.getNumBuckets());
-  EXPECT_EQ(2500.0, h.getPercentileEstimate(0.5));
-  EXPECT_EQ(4500.0, h.getPercentileEstimate(0.9));
-}
-
-// Test where the bucket width is not an even multiple of the histogram range
-TEST(Histogram, TestDoubleInexactWidth) {
-  Histogram<double> h(100.0, 0.0, 4970.0);
-  for (double n = 50; n < 5000; n += 100) {
-    h.addValue(n);
-  }
-  EXPECT_EQ(52, h.getNumBuckets());
-  EXPECT_EQ(2500.0, h.getPercentileEstimate(0.5));
-  EXPECT_EQ(4500.0, h.getPercentileEstimate(0.9));
-
-  EXPECT_EQ(0, h.getBucketByIndex(51).count);
-  h.addValue(4990);
-  h.addValue(5100);
-  EXPECT_EQ(2, h.getBucketByIndex(51).count);
-  EXPECT_EQ(2600.0, h.getPercentileEstimate(0.5));
-}
-
-// Test where the bucket width is larger than the histogram range
-// (There isn't really much point to defining a histogram this way,
-// but we want to ensure that it still works just in case.)
-TEST(Histogram, TestDoubleWidthTooBig) {
-  Histogram<double> h(100.0, 0.0, 7.0);
-  EXPECT_EQ(3, h.getNumBuckets());
-
-  for (double n = 0; n < 7; n += 1) {
-    h.addValue(n);
-  }
-  EXPECT_EQ(0, h.getBucketByIndex(0).count);
-  EXPECT_EQ(7, h.getBucketByIndex(1).count);
-  EXPECT_EQ(0, h.getBucketByIndex(2).count);
-  EXPECT_EQ(3.0, h.getPercentileEstimate(0.5));
-
-  h.addValue(-1.0);
-  EXPECT_EQ(1, h.getBucketByIndex(0).count);
-  h.addValue(7.5);
-  EXPECT_EQ(1, h.getBucketByIndex(2).count);
-  EXPECT_EQ(3.0, h.getPercentileEstimate(0.5));
-}
-
-// Test that we get counts right
-TEST(Histogram, Counts) {
-  Histogram<int32_t> h(1, 0, 10);
-  EXPECT_EQ(12, h.getNumBuckets());
-  EXPECT_EQ(0, h.computeTotalCount());
-
-  // Add one to each bucket, make sure the counts match
-  for (int32_t i = 0; i < 10; i++) {
-    h.addValue(i);
-    EXPECT_EQ(i+1, h.computeTotalCount());
-  }
-
-  // Add a lot to one bucket, make sure the counts still make sense
-  for (int32_t i = 0; i < 100; i++) {
-    h.addValue(0);
-  }
-  EXPECT_EQ(110, h.computeTotalCount());
-}
index 1399315f2aa7fd2a8a86e431b50665414de9b104..7494920df1f82850e04b19cd4d49eea5c30b409f 100644 (file)
@@ -176,10 +176,6 @@ conv_benchmark_SOURCES = ConvBenchmark.cpp
 conv_benchmark_LDADD = libfollytestmain.la $(top_builddir)/libfollybenchmark.la
 check_PROGRAMS += conv_benchmark
 
-histogram_test_SOURCES = HistogramTest.cpp
-histogram_test_LDADD = libfollytestmain.la
-TESTS += histogram_test
-
 group_varint_test_SOURCES = GroupVarintTest.cpp
 group_varint_test_LDADD = libfollytestmain.la
 TESTS += group_varint_test
diff --git a/folly/test/TimeseriesBenchmark.cpp b/folly/test/TimeseriesBenchmark.cpp
deleted file mode 100644 (file)
index 835eda9..0000000
+++ /dev/null
@@ -1,77 +0,0 @@
-/*
- * Copyright 2017 Facebook, Inc.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *   http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-#include <folly/stats/BucketedTimeSeries.h>
-#include <folly/stats/BucketedTimeSeries-defs.h>
-
-#include <glog/logging.h>
-
-#include <folly/Benchmark.h>
-
-using std::chrono::seconds;
-using folly::BenchmarkSuspender;
-using folly::BucketedTimeSeries;
-
-void addValue(unsigned int iters,
-              seconds duration, size_t numBuckets,
-              size_t callsPerSecond) {
-  BenchmarkSuspender suspend;
-  BucketedTimeSeries<int64_t> ts(numBuckets, duration);
-  suspend.dismiss();
-
-  seconds currentTime(1342000000);
-  size_t timeCounter = 0;
-  for (unsigned int n = 0; n < iters; ++n, ++timeCounter) {
-    if (timeCounter >= callsPerSecond) {
-      timeCounter = 0;
-      ++currentTime;
-    }
-    ts.addValue(currentTime, n);
-  }
-}
-
-BENCHMARK_NAMED_PARAM(addValue, AllTime_1perSec, seconds(0), 60, 1);
-BENCHMARK_NAMED_PARAM(addValue, 3600x60_1perSec, seconds(3600), 60, 1);
-BENCHMARK_NAMED_PARAM(addValue, 600x60_1perSec, seconds(600), 60, 1);
-BENCHMARK_NAMED_PARAM(addValue, 60x60_1perSec, seconds(60), 60, 1);
-BENCHMARK_NAMED_PARAM(addValue, 100x10_1perSec, seconds(100), 10, 1);
-BENCHMARK_NAMED_PARAM(addValue, 71x5_1perSec, seconds(71), 5, 1);
-BENCHMARK_NAMED_PARAM(addValue, 1x1_1perSec, seconds(1), 1, 1);
-
-BENCHMARK_DRAW_LINE()
-
-BENCHMARK_NAMED_PARAM(addValue, AllTime_10perSec, seconds(0), 60, 10);
-BENCHMARK_NAMED_PARAM(addValue, 3600x60_10perSec, seconds(3600), 60, 10);
-BENCHMARK_NAMED_PARAM(addValue, 600x60_10perSec, seconds(600), 60, 10);
-BENCHMARK_NAMED_PARAM(addValue, 60x60_10perSec, seconds(60), 60, 10);
-BENCHMARK_NAMED_PARAM(addValue, 100x10_10perSec, seconds(100), 10, 10);
-BENCHMARK_NAMED_PARAM(addValue, 71x5_10perSec, seconds(71), 5, 10);
-BENCHMARK_NAMED_PARAM(addValue, 1x1_10perSec, seconds(1), 1, 10);
-
-BENCHMARK_DRAW_LINE()
-
-BENCHMARK_NAMED_PARAM(addValue, AllTime_100perSec, seconds(0), 60, 100);
-BENCHMARK_NAMED_PARAM(addValue, 3600x60_100perSec, seconds(3600), 60, 100);
-BENCHMARK_NAMED_PARAM(addValue, 600x60_100perSec, seconds(600), 60, 100);
-BENCHMARK_NAMED_PARAM(addValue, 60x60_100perSec, seconds(60), 60, 100);
-BENCHMARK_NAMED_PARAM(addValue, 100x10_100perSec, seconds(100), 10, 100);
-BENCHMARK_NAMED_PARAM(addValue, 71x5_100perSec, seconds(71), 5, 100);
-BENCHMARK_NAMED_PARAM(addValue, 1x1_100perSec, seconds(1), 1, 100);
-
-int main(int argc, char *argv[]) {
-  gflags::ParseCommandLineFlags(&argc, &argv, true);
-  folly::runBenchmarks();
-  return 0;
-}
diff --git a/folly/test/TimeseriesHistogramTest.cpp b/folly/test/TimeseriesHistogramTest.cpp
deleted file mode 100644 (file)
index 4ee4a69..0000000
+++ /dev/null
@@ -1,540 +0,0 @@
-/*
- * Copyright 2017 Facebook, Inc.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *   http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include <folly/stats/TimeseriesHistogram.h>
-#include <folly/stats/TimeseriesHistogram-defs.h>
-
-#include <random>
-
-#include <folly/portability/GTest.h>
-
-using namespace std;
-using namespace folly;
-using std::chrono::seconds;
-
-namespace {
-namespace IntMTMHTS {
-  enum Levels {
-    MINUTE,
-    TEN_MINUTE,
-    HOUR,
-    ALLTIME,
-    NUM_LEVELS,
-  };
-
-  const seconds kDurations[] = {
-    seconds(60), seconds(600), seconds(3600), seconds(0)
-  };
-};
-
-namespace IntMHTS {
-  enum Levels {
-    MINUTE,
-    HOUR,
-    ALLTIME,
-    NUM_LEVELS,
-  };
-
-  const seconds kDurations[] = {
-    seconds(60), seconds(3600), seconds(0)
-  };
-};
-
-typedef std::mt19937 RandomInt32;
-
-using StatsClock = folly::LegacyStatsClock<std::chrono::seconds>;
-StatsClock::time_point mkTimePoint(int value) {
-  return StatsClock::time_point(StatsClock::duration(value));
-}
-}
-
-TEST(TimeseriesHistogram, Percentile) {
-  RandomInt32 random(5);
-  // [10, 109], 12 buckets including above and below
-  {
-    TimeseriesHistogram<int> h(10, 10, 110,
-                               MultiLevelTimeSeries<int>(
-                                 60, IntMTMHTS::NUM_LEVELS,
-                                 IntMTMHTS::kDurations));
-
-    EXPECT_EQ(0, h.getPercentileEstimate(0, IntMTMHTS::ALLTIME));
-
-    EXPECT_EQ(12, h.getNumBuckets());
-    EXPECT_EQ(10, h.getBucketSize());
-    EXPECT_EQ(10, h.getMin());
-    EXPECT_EQ(110, h.getMax());
-
-    for (size_t i = 0; i < h.getNumBuckets(); ++i) {
-      EXPECT_EQ(4, h.getBucket(i).numLevels());
-    }
-
-    int maxVal = 120;
-    h.addValue(mkTimePoint(0), 0);
-    h.addValue(mkTimePoint(0), maxVal);
-    for (int i = 0; i < 98; i++) {
-      h.addValue(mkTimePoint(0), random() % maxVal);
-    }
-
-    h.update(mkTimePoint(1500000000));
-    // bucket 0 stores everything below min, so its minimum
-    // is the lowest possible number
-    EXPECT_EQ(std::numeric_limits<int>::min(),
-              h.getPercentileBucketMin(1, IntMTMHTS::ALLTIME));
-    EXPECT_EQ(110, h.getPercentileBucketMin(99, IntMTMHTS::ALLTIME));
-
-    EXPECT_EQ(-2, h.getPercentileEstimate(0, IntMTMHTS::ALLTIME));
-    EXPECT_EQ(-1, h.getPercentileEstimate(1, IntMTMHTS::ALLTIME));
-    EXPECT_EQ(119, h.getPercentileEstimate(99, IntMTMHTS::ALLTIME));
-    EXPECT_EQ(120, h.getPercentileEstimate(100, IntMTMHTS::ALLTIME));
-  }
-}
-
-TEST(TimeseriesHistogram, String) {
-  RandomInt32 random(5);
-  // [10, 109], 12 buckets including above and below
-  {
-    TimeseriesHistogram<int> hist(10, 10, 110,
-                                  MultiLevelTimeSeries<int>(
-                                    60, IntMTMHTS::NUM_LEVELS,
-                                    IntMTMHTS::kDurations));
-
-    int maxVal = 120;
-    hist.addValue(mkTimePoint(0), 0);
-    hist.addValue(mkTimePoint(0), maxVal);
-    for (int i = 0; i < 98; i++) {
-      hist.addValue(mkTimePoint(0), random() % maxVal);
-    }
-
-    hist.update(mkTimePoint(0));
-
-    const char* const kStringValues1[IntMTMHTS::NUM_LEVELS] =  {
-      "-2147483648:12:4,10:8:13,20:8:24,30:6:34,40:13:46,50:8:54,60:7:64,"
-        "70:7:74,80:8:84,90:10:94,100:3:103,110:10:115",
-      "-2147483648:12:4,10:8:13,20:8:24,30:6:34,40:13:46,50:8:54,60:7:64,"
-        "70:7:74,80:8:84,90:10:94,100:3:103,110:10:115",
-      "-2147483648:12:4,10:8:13,20:8:24,30:6:34,40:13:46,50:8:54,60:7:64,"
-        "70:7:74,80:8:84,90:10:94,100:3:103,110:10:115",
-      "-2147483648:12:4,10:8:13,20:8:24,30:6:34,40:13:46,50:8:54,60:7:64,"
-        "70:7:74,80:8:84,90:10:94,100:3:103,110:10:115",
-    };
-
-    CHECK_EQ(IntMTMHTS::NUM_LEVELS, hist.getNumLevels());
-
-    for (size_t level = 0; level < hist.getNumLevels(); ++level) {
-      EXPECT_EQ(kStringValues1[level], hist.getString(level));
-    }
-
-    const char* const kStringValues2[IntMTMHTS::NUM_LEVELS] =  {
-      "-2147483648:12:4,10:8:13,20:8:24,30:6:34,40:13:46,50:8:54,60:7:64,"
-        "70:7:74,80:8:84,90:10:94,100:3:103,110:10:115",
-      "-2147483648:12:4,10:8:13,20:8:24,30:6:34,40:13:46,50:8:54,60:7:64,"
-        "70:7:74,80:8:84,90:10:94,100:3:103,110:10:115",
-      "-2147483648:12:4,10:8:13,20:8:24,30:6:34,40:13:46,50:8:54,60:7:64,"
-        "70:7:74,80:8:84,90:10:94,100:3:103,110:10:115",
-      "-2147483648:12:4,10:8:13,20:8:24,30:6:34,40:13:46,50:8:54,60:7:64,"
-        "70:7:74,80:8:84,90:10:94,100:3:103,110:10:115",
-    };
-
-    CHECK_EQ(IntMTMHTS::NUM_LEVELS, hist.getNumLevels());
-
-    for (size_t level = 0; level < hist.getNumLevels(); ++level) {
-      EXPECT_EQ(kStringValues2[level], hist.getString(level));
-    }
-  }
-}
-
-TEST(TimeseriesHistogram, Clear) {
-  {
-    TimeseriesHistogram<int> hist(10, 0, 100,
-                                  MultiLevelTimeSeries<int>(
-                                    60, IntMTMHTS::NUM_LEVELS,
-                                    IntMTMHTS::kDurations));
-
-    for (int now = 0; now < 3600; now++) {
-      for (int i = 0; i < 100; i++) {
-        hist.addValue(mkTimePoint(now), i, 2); // adds each item 2 times
-      }
-    }
-
-    // check clearing
-    hist.clear();
-
-    for (size_t b = 0; b < hist.getNumBuckets(); ++b) {
-      EXPECT_EQ(0, hist.getBucket(b).count(IntMTMHTS::MINUTE));
-      EXPECT_EQ(0, hist.getBucket(b).count(IntMTMHTS::TEN_MINUTE));
-      EXPECT_EQ(0, hist.getBucket(b).count(IntMTMHTS::HOUR));
-      EXPECT_EQ(0, hist.getBucket(b).count(IntMTMHTS::ALLTIME));
-    }
-
-    for (int pct = 0; pct <= 100; pct++) {
-      EXPECT_EQ(0, hist.getPercentileBucketMin(pct, IntMTMHTS::MINUTE));
-      EXPECT_EQ(0, hist.getPercentileBucketMin(pct, IntMTMHTS::TEN_MINUTE));
-      EXPECT_EQ(0, hist.getPercentileBucketMin(pct, IntMTMHTS::HOUR));
-      EXPECT_EQ(0, hist.getPercentileBucketMin(pct, IntMTMHTS::ALLTIME));
-
-      EXPECT_EQ(0, hist.getPercentileEstimate(pct, IntMTMHTS::MINUTE));
-      EXPECT_EQ(0, hist.getPercentileEstimate(pct, IntMTMHTS::TEN_MINUTE));
-      EXPECT_EQ(0, hist.getPercentileEstimate(pct, IntMTMHTS::HOUR));
-      EXPECT_EQ(0, hist.getPercentileEstimate(pct, IntMTMHTS::ALLTIME));
-    }
-  }
-}
-
-
-TEST(TimeseriesHistogram, Basic) {
-  {
-    TimeseriesHistogram<int> hist(10, 0, 100,
-                                  MultiLevelTimeSeries<int>(
-                                    60, IntMTMHTS::NUM_LEVELS,
-                                    IntMTMHTS::kDurations));
-
-    for (int now = 0; now < 3600; now++) {
-      for (int i = 0; i < 100; i++) {
-        hist.addValue(mkTimePoint(now), i);
-      }
-    }
-
-    hist.update(mkTimePoint(3599));
-    for (int pct = 1; pct <= 100; pct++) {
-      int expected = (pct - 1) / 10 * 10;
-      EXPECT_EQ(expected, hist.getPercentileBucketMin(pct, IntMTMHTS::MINUTE));
-      EXPECT_EQ(expected, hist.getPercentileBucketMin(pct,
-                                                      IntMTMHTS::TEN_MINUTE));
-      EXPECT_EQ(expected, hist.getPercentileBucketMin(pct, IntMTMHTS::HOUR));
-      EXPECT_EQ(expected, hist.getPercentileBucketMin(pct, IntMTMHTS::ALLTIME));
-    }
-
-    for (size_t b = 1; (b + 1) < hist.getNumBuckets(); ++b) {
-      EXPECT_EQ(600, hist.getBucket(b).count(IntMTMHTS::MINUTE));
-      EXPECT_EQ(6000, hist.getBucket(b).count(IntMTMHTS::TEN_MINUTE));
-      EXPECT_EQ(36000, hist.getBucket(b).count(IntMTMHTS::HOUR));
-      EXPECT_EQ(36000, hist.getBucket(b).count(IntMTMHTS::ALLTIME));
-    }
-    EXPECT_EQ(0, hist.getBucket(0).count(IntMTMHTS::MINUTE));
-    EXPECT_EQ(0, hist.getBucket(hist.getNumBuckets() - 1).count(
-                IntMTMHTS::MINUTE));
-
-    EXPECT_EQ(6000, hist.count(IntMTMHTS::MINUTE));
-    EXPECT_EQ(60000, hist.count(IntMTMHTS::TEN_MINUTE));
-    EXPECT_EQ(360000, hist.count(IntMTMHTS::HOUR));
-    EXPECT_EQ(360000, hist.count(IntMTMHTS::ALLTIME));
-
-    // Each second we added 4950 total over 100 data points
-    EXPECT_EQ(297000, hist.sum(IntMTMHTS::MINUTE));
-    EXPECT_EQ(2970000, hist.sum(IntMTMHTS::TEN_MINUTE));
-    EXPECT_EQ(17820000, hist.sum(IntMTMHTS::HOUR));
-    EXPECT_EQ(17820000, hist.sum(IntMTMHTS::ALLTIME));
-
-    EXPECT_EQ(49, hist.avg<int>(IntMTMHTS::MINUTE));
-    EXPECT_EQ(49, hist.avg<int>(IntMTMHTS::TEN_MINUTE));
-    EXPECT_EQ(49, hist.avg<int>(IntMTMHTS::HOUR));
-    EXPECT_EQ(49, hist.avg<int>(IntMTMHTS::ALLTIME));
-    EXPECT_EQ(49.5, hist.avg<double>(IntMTMHTS::MINUTE));
-    EXPECT_EQ(49.5, hist.avg<double>(IntMTMHTS::TEN_MINUTE));
-    EXPECT_EQ(49.5, hist.avg<double>(IntMTMHTS::HOUR));
-    EXPECT_EQ(49.5, hist.avg<double>(IntMTMHTS::ALLTIME));
-
-    EXPECT_EQ(4950, hist.rate<int>(IntMTMHTS::MINUTE));
-    EXPECT_EQ(4950, hist.rate<int>(IntMTMHTS::TEN_MINUTE));
-    EXPECT_EQ(4950, hist.rate<int>(IntMTMHTS::HOUR));
-    EXPECT_EQ(4950, hist.rate<int>(IntMTMHTS::ALLTIME));
-    EXPECT_EQ(4950, hist.rate<double>(IntMTMHTS::MINUTE));
-    EXPECT_EQ(4950, hist.rate<double>(IntMTMHTS::TEN_MINUTE));
-    EXPECT_EQ(4950, hist.rate<double>(IntMTMHTS::HOUR));
-    EXPECT_EQ(4950, hist.rate<double>(IntMTMHTS::ALLTIME));
-
-    EXPECT_EQ(1000, hist.count(mkTimePoint(10), mkTimePoint(20)));
-    EXPECT_EQ(49500, hist.sum(mkTimePoint(10), mkTimePoint(20)));
-    EXPECT_EQ(4950, hist.rate(mkTimePoint(10), mkTimePoint(20)));
-    EXPECT_EQ(49.5, hist.avg<double>(mkTimePoint(10), mkTimePoint(20)));
-
-    EXPECT_EQ(200, hist.count(mkTimePoint(3550), mkTimePoint(3552)));
-    EXPECT_EQ(9900, hist.sum(mkTimePoint(3550), mkTimePoint(3552)));
-    EXPECT_EQ(4950, hist.rate(mkTimePoint(3550), mkTimePoint(3552)));
-    EXPECT_EQ(49.5, hist.avg<double>(mkTimePoint(3550), mkTimePoint(3552)));
-
-    EXPECT_EQ(0, hist.count(mkTimePoint(4550), mkTimePoint(4552)));
-    EXPECT_EQ(0, hist.sum(mkTimePoint(4550), mkTimePoint(4552)));
-    EXPECT_EQ(0, hist.rate(mkTimePoint(4550), mkTimePoint(4552)));
-    EXPECT_EQ(0, hist.avg<double>(mkTimePoint(4550), mkTimePoint(4552)));
-  }
-
-  // -----------------
-
-  {
-    TimeseriesHistogram<int> hist(10, 0, 100,
-                                  MultiLevelTimeSeries<int>(
-                                    60, IntMTMHTS::NUM_LEVELS,
-                                    IntMTMHTS::kDurations));
-
-    for (int now = 0; now < 3600; now++) {
-      for (int i = 0; i < 100; i++) {
-        hist.addValue(mkTimePoint(now), i, 2); // adds each item 2 times
-      }
-    }
-
-    hist.update(mkTimePoint(3599));
-    for (int pct = 1; pct <= 100; pct++) {
-      int expected = (pct - 1) / 10 * 10;
-      EXPECT_EQ(expected, hist.getPercentileBucketMin(pct, IntMTMHTS::MINUTE));
-      EXPECT_EQ(expected, hist.getPercentileBucketMin(pct,
-                                                      IntMTMHTS::TEN_MINUTE));
-      EXPECT_EQ(expected, hist.getPercentileBucketMin(pct, IntMTMHTS::HOUR));
-      EXPECT_EQ(expected, hist.getPercentileBucketMin(pct, IntMTMHTS::ALLTIME));
-   }
-
-   for (size_t b = 1; (b + 1) < hist.getNumBuckets(); ++b) {
-     EXPECT_EQ(600 * 2, hist.getBucket(b).count(IntMTMHTS::MINUTE));
-     EXPECT_EQ(6000 * 2, hist.getBucket(b).count(IntMTMHTS::TEN_MINUTE));
-     EXPECT_EQ(36000 * 2, hist.getBucket(b).count(IntMTMHTS::HOUR));
-     EXPECT_EQ(36000 * 2, hist.getBucket(b).count(IntMTMHTS::ALLTIME));
-    }
-    EXPECT_EQ(0, hist.getBucket(0).count(IntMTMHTS::MINUTE));
-    EXPECT_EQ(0, hist.getBucket(hist.getNumBuckets() - 1).count(
-                IntMTMHTS::MINUTE));
-  }
-
-  // -----------------
-
-  {
-    TimeseriesHistogram<int> hist(10, 0, 100,
-                                  MultiLevelTimeSeries<int>(
-                                    60, IntMTMHTS::NUM_LEVELS,
-                                    IntMTMHTS::kDurations));
-
-    for (int now = 0; now < 3600; now++) {
-      for (int i = 0; i < 50; i++) {
-        hist.addValue(mkTimePoint(now), i * 2, 2); // adds each item 2 times
-      }
-    }
-
-    hist.update(mkTimePoint(3599));
-    for (int pct = 1; pct <= 100; pct++) {
-      int expected = (pct - 1) / 10 * 10;
-      EXPECT_EQ(expected, hist.getPercentileBucketMin(pct, IntMTMHTS::MINUTE));
-      EXPECT_EQ(expected, hist.getPercentileBucketMin(pct,
-                                                      IntMTMHTS::TEN_MINUTE));
-      EXPECT_EQ(expected, hist.getPercentileBucketMin(pct, IntMTMHTS::HOUR));
-      EXPECT_EQ(expected, hist.getPercentileBucketMin(pct, IntMTMHTS::ALLTIME));
-    }
-
-    EXPECT_EQ(0, hist.getBucket(0).count(IntMTMHTS::MINUTE));
-    EXPECT_EQ(0, hist.getBucket(0).count(IntMTMHTS::TEN_MINUTE));
-    EXPECT_EQ(0, hist.getBucket(0).count(IntMTMHTS::HOUR));
-    EXPECT_EQ(0, hist.getBucket(0).count(IntMTMHTS::ALLTIME));
-    EXPECT_EQ(0, hist.getBucket(hist.getNumBuckets() - 1).count(
-                IntMTMHTS::MINUTE));
-    EXPECT_EQ(0,
-              hist.getBucket(hist.getNumBuckets() - 1).
-                count(IntMTMHTS::TEN_MINUTE));
-    EXPECT_EQ(0, hist.getBucket(hist.getNumBuckets() - 1).count(
-                IntMTMHTS::HOUR));
-    EXPECT_EQ(0,
-              hist.getBucket(hist.getNumBuckets() - 1).count(
-                IntMTMHTS::ALLTIME));
-
-    for (size_t b = 1; (b + 1) < hist.getNumBuckets(); ++b) {
-      EXPECT_EQ(600, hist.getBucket(b).count(IntMTMHTS::MINUTE));
-      EXPECT_EQ(6000, hist.getBucket(b).count(IntMTMHTS::TEN_MINUTE));
-      EXPECT_EQ(36000, hist.getBucket(b).count(IntMTMHTS::HOUR));
-      EXPECT_EQ(36000, hist.getBucket(b).count(IntMTMHTS::ALLTIME));
-    }
-
-    for (int i = 0; i < 100; ++i) {
-      hist.addValue(mkTimePoint(3599), 200 + i);
-    }
-    hist.update(mkTimePoint(3599));
-    EXPECT_EQ(100,
-              hist.getBucket(hist.getNumBuckets() - 1).count(
-                IntMTMHTS::ALLTIME));
-
-  }
-}
-
-TEST(TimeseriesHistogram, QueryByInterval) {
-  TimeseriesHistogram<int> mhts(8, 8, 120,
-                                MultiLevelTimeSeries<int>(
-                                  60, IntMHTS::NUM_LEVELS,
-                                  IntMHTS::kDurations));
-
-  mhts.update(mkTimePoint(0));
-
-  int curTime;
-  for (curTime = 0; curTime < 7200; curTime++) {
-    mhts.addValue(mkTimePoint(curTime), 1);
-  }
-  for (curTime = 7200; curTime < 7200 + 3540; curTime++) {
-    mhts.addValue(mkTimePoint(curTime), 10);
-  }
-  for (curTime = 7200 + 3540; curTime < 7200 + 3600; curTime++) {
-    mhts.addValue(mkTimePoint(curTime), 100);
-  }
-
-  mhts.update(mkTimePoint(7200 + 3600 - 1));
-
-  struct TimeInterval {
-    TimeInterval(int s, int e) : start(mkTimePoint(s)), end(mkTimePoint(e)) {}
-
-    StatsClock::time_point start;
-    StatsClock::time_point end;
-  };
-  TimeInterval intervals[12] = {
-    { curTime - 60, curTime },
-    { curTime - 3600, curTime },
-    { curTime - 7200, curTime },
-    { curTime - 3600, curTime - 60 },
-    { curTime - 7200, curTime - 60 },
-    { curTime - 7200, curTime - 3600 },
-    { curTime - 50, curTime - 20 },
-    { curTime - 3020, curTime - 20 },
-    { curTime - 7200, curTime - 20 },
-    { curTime - 3000, curTime - 1000 },
-    { curTime - 7200, curTime - 1000 },
-    { curTime - 7200, curTime - 3600 },
-  };
-
-  int expectedSums[12] = {
-    6000, 41400, 32400, 35400, 32129, 16200, 3000, 33600, 32308, 20000, 27899,
-    16200
-  };
-
-  int expectedCounts[12] = {
-    60, 3600, 7200, 3540, 7139, 3600, 30, 3000, 7178, 2000, 6199, 3600
-  };
-
-  // The first 7200 values added all fell below the histogram minimum,
-  // and went into the bucket that tracks all of the too-small values.
-  // This bucket reports a minimum value of the smallest possible integer.
-  int belowMinBucket = std::numeric_limits<int>::min();
-
-  int expectedValues[12][3] = {
-    {96, 96, 96},
-    { 8,  8, 96},
-    { belowMinBucket,  belowMinBucket,  8}, // alltime
-    { 8,  8,  8},
-    { belowMinBucket,  belowMinBucket,  8}, // alltime
-    { belowMinBucket,  belowMinBucket,  8}, // alltime
-    {96, 96, 96},
-    { 8,  8, 96},
-    { belowMinBucket,  belowMinBucket,  8}, // alltime
-    { 8,  8,  8},
-    { belowMinBucket,  belowMinBucket,  8}, // alltime
-    { belowMinBucket,  belowMinBucket,  8}  // alltime
-  };
-
-  for (int i = 0; i < 12; i++) {
-    const auto& itv = intervals[i];
-    int s = mhts.sum(itv.start, itv.end);
-    EXPECT_EQ(expectedSums[i], s);
-
-    int c = mhts.count(itv.start, itv.end);
-    EXPECT_EQ(expectedCounts[i], c);
-  }
-
-  // 3 levels
-  for (int i = 1; i <= 100; i++) {
-    EXPECT_EQ(96, mhts.getPercentileBucketMin(i, 0));
-    EXPECT_EQ(
-        96,
-        mhts.getPercentileBucketMin(
-            i, mkTimePoint(curTime - 60), mkTimePoint(curTime)));
-    EXPECT_EQ(
-        8,
-        mhts.getPercentileBucketMin(
-            i, mkTimePoint(curTime - 3540), mkTimePoint(curTime - 60)));
-  }
-
-  EXPECT_EQ(8, mhts.getPercentileBucketMin(1, 1));
-  EXPECT_EQ(8, mhts.getPercentileBucketMin(98, 1));
-  EXPECT_EQ(96, mhts.getPercentileBucketMin(99, 1));
-  EXPECT_EQ(96, mhts.getPercentileBucketMin(100, 1));
-
-  EXPECT_EQ(belowMinBucket, mhts.getPercentileBucketMin(1, 2));
-  EXPECT_EQ(belowMinBucket, mhts.getPercentileBucketMin(66, 2));
-  EXPECT_EQ(8, mhts.getPercentileBucketMin(67, 2));
-  EXPECT_EQ(8, mhts.getPercentileBucketMin(99, 2));
-  EXPECT_EQ(96, mhts.getPercentileBucketMin(100, 2));
-
-  // 0 is currently the value for bucket 0 (below min)
-  for (int i = 0; i < 12; i++) {
-    const auto& itv = intervals[i];
-    int v = mhts.getPercentileBucketMin(1, itv.start, itv.end);
-    EXPECT_EQ(expectedValues[i][0], v);
-
-    v = mhts.getPercentileBucketMin(50, itv.start, itv.end);
-    EXPECT_EQ(expectedValues[i][1], v);
-
-    v = mhts.getPercentileBucketMin(99, itv.start, itv.end);
-    EXPECT_EQ(expectedValues[i][2], v);
-  }
-
-  for (int i = 0; i < 12; i++) {
-    const auto& itv = intervals[i];
-    // Some of the older intervals that fall in the alltime bucket
-    // are off by 1 or 2 in their estimated counts.
-    size_t tolerance = 0;
-    if (itv.start <= mkTimePoint(curTime - 7200)) {
-      tolerance = 2;
-    } else if (itv.start <= mkTimePoint(curTime - 3000)) {
-      tolerance = 1;
-    }
-    size_t actualCount = (itv.end - itv.start).count();
-    size_t estimatedCount = mhts.count(itv.start, itv.end);
-    EXPECT_GE(actualCount, estimatedCount);
-    EXPECT_LE(actualCount - tolerance, estimatedCount);
-  }
-}
-
-TEST(TimeseriesHistogram, SingleUniqueValue) {
-  int values[] = {-1, 0, 500, 1000, 1500};
-  for (int ii = 0; ii < 5; ++ii) {
-    int value = values[ii];
-    TimeseriesHistogram<int> h(10, 0, 1000,
-                               MultiLevelTimeSeries<int>(
-                                 60, IntMTMHTS::NUM_LEVELS,
-                                 IntMTMHTS::kDurations));
-
-    const int kNumIters = 1000;
-    for (int jj = 0; jj < kNumIters; ++jj) {
-      h.addValue(mkTimePoint(1), value);
-    }
-    h.update(mkTimePoint(1));
-    // since we've only added one unique value, all percentiles should
-    // be that value
-    EXPECT_EQ(h.getPercentileEstimate(10, 0), value);
-    EXPECT_EQ(h.getPercentileEstimate(50, 0), value);
-    EXPECT_EQ(h.getPercentileEstimate(99, 0), value);
-
-    // Things get trickier if there are multiple unique values.
-    const int kNewValue = 750;
-    for (int kk = 0; kk < 2*kNumIters; ++kk) {
-      h.addValue(mkTimePoint(1), kNewValue);
-    }
-    h.update(mkTimePoint(1));
-    EXPECT_NEAR(h.getPercentileEstimate(50, 0), kNewValue+5, 5);
-    if (value >= 0 && value <= 1000) {
-      // only do further testing if value is within our bucket range,
-      // else estimates can be wildly off
-      if (kNewValue > value) {
-        EXPECT_NEAR(h.getPercentileEstimate(10, 0), value+5, 5);
-        EXPECT_NEAR(h.getPercentileEstimate(99, 0), kNewValue+5, 5);
-      } else {
-        EXPECT_NEAR(h.getPercentileEstimate(10, 0), kNewValue+5, 5);
-        EXPECT_NEAR(h.getPercentileEstimate(99, 0), value+5, 5);
-      }
-    }
-  }
-}
diff --git a/folly/test/TimeseriesTest.cpp b/folly/test/TimeseriesTest.cpp
deleted file mode 100644 (file)
index 02e8659..0000000
+++ /dev/null
@@ -1,1280 +0,0 @@
-/*
- * Copyright 2017 Facebook, Inc.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *   http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include <folly/detail/Stats.h>
-#include <folly/stats/BucketedTimeSeries-defs.h>
-#include <folly/stats/BucketedTimeSeries.h>
-#include <folly/stats/MultiLevelTimeSeries-defs.h>
-#include <folly/stats/MultiLevelTimeSeries.h>
-
-#include <array>
-
-#include <glog/logging.h>
-
-#include <folly/Foreach.h>
-#include <folly/portability/GTest.h>
-
-using std::chrono::seconds;
-using std::string;
-using std::vector;
-using folly::BucketedTimeSeries;
-
-using Bucket = folly::detail::Bucket<int64_t>;
-using StatsClock = folly::LegacyStatsClock<std::chrono::seconds>;
-using TimePoint = StatsClock::time_point;
-using Duration = StatsClock::duration;
-
-/*
- * Helper functions to allow us to directly log time points and duration
- */
-namespace std {
-std::ostream& operator<<(std::ostream& os, std::chrono::seconds s) {
-  os << s.count();
-  return os;
-}
-std::ostream& operator<<(std::ostream& os, TimePoint tp) {
-  os << tp.time_since_epoch().count();
-  return os;
-}
-}
-
-namespace {
-TimePoint mkTimePoint(int value) {
-  return TimePoint(StatsClock::duration(value));
-}
-
-struct TestData {
-  TestData(int d, int b, std::initializer_list<int> starts)
-      : duration(d), numBuckets(b) {
-    bucketStarts.reserve(starts.size());
-    for (int s : starts) {
-      bucketStarts.push_back(mkTimePoint(s));
-    }
-  }
-  seconds duration;
-  size_t numBuckets;
-  vector<TimePoint> bucketStarts;
-};
-vector<TestData> testData = {
-  // 71 seconds x 4 buckets
-  { 71, 4, {0, 18, 36, 54}},
-  // 100 seconds x 10 buckets
-  { 100, 10, {0, 10, 20, 30, 40, 50, 60, 70, 80, 90}},
-  // 10 seconds x 10 buckets
-  { 10, 10, {0, 1, 2, 3, 4, 5, 6, 7, 8, 9}},
-  // 10 seconds x 1 buckets
-  { 10, 1, {0}},
-  // 1 second x 1 buckets
-  { 1, 1, {0}},
-};
-}
-
-TEST(BucketedTimeSeries, getBucketInfo) {
-  for (const auto& data : testData) {
-    BucketedTimeSeries<int64_t> ts(data.numBuckets, data.duration);
-
-    for (uint32_t n = 0; n < 10000; n += 1234) {
-      seconds offset(n * data.duration);
-
-      for (uint32_t idx = 0; idx < data.numBuckets; ++idx) {
-        auto bucketStart = data.bucketStarts[idx];
-        TimePoint nextBucketStart;
-        if (idx + 1 < data.numBuckets) {
-          nextBucketStart = data.bucketStarts[idx + 1];
-        } else {
-          nextBucketStart = TimePoint(data.duration);
-        }
-
-        TimePoint expectedStart = offset + bucketStart;
-        TimePoint expectedNextStart = offset + nextBucketStart;
-        TimePoint midpoint =
-            expectedStart + (expectedNextStart - expectedStart) / 2;
-
-        vector<std::pair<string, TimePoint>> timePoints = {
-            {"expectedStart", expectedStart},
-            {"midpoint", midpoint},
-            {"expectedEnd", expectedNextStart - seconds(1)},
-        };
-
-        for (const auto& point : timePoints) {
-          // Check that getBucketIdx() returns the expected index
-          EXPECT_EQ(idx, ts.getBucketIdx(point.second))
-              << data.duration << "x" << data.numBuckets << ": " << point.first
-              << "=" << point.second;
-
-          // Check the data returned by getBucketInfo()
-          size_t returnedIdx;
-          TimePoint returnedStart;
-          TimePoint returnedNextStart;
-          ts.getBucketInfo(expectedStart, &returnedIdx,
-                           &returnedStart, &returnedNextStart);
-          EXPECT_EQ(idx, returnedIdx) << data.duration << "x" << data.numBuckets
-                                      << ": " << point.first << "="
-                                      << point.second;
-          EXPECT_EQ(expectedStart, returnedStart)
-              << data.duration << "x" << data.numBuckets << ": " << point.first
-              << "=" << point.second;
-          EXPECT_EQ(expectedNextStart, returnedNextStart)
-              << data.duration << "x" << data.numBuckets << ": " << point.first
-              << "=" << point.second;
-        }
-      }
-    }
-  }
-}
-
-void testUpdate100x10(size_t offset) {
-  // This test code only works when offset is a multiple of the bucket width
-  CHECK_EQ(0, offset % 10);
-
-  // Create a 100 second timeseries, with 10 buckets
-  BucketedTimeSeries<int64_t> ts(10, seconds(100));
-
-  auto setup = [&] {
-    ts.clear();
-    // Add 1 value to each bucket
-    for (int n = 5; n <= 95; n += 10) {
-      ts.addValue(seconds(n + offset), 6);
-    }
-
-    EXPECT_EQ(10, ts.count());
-    EXPECT_EQ(60, ts.sum());
-    EXPECT_EQ(6, ts.avg());
-  };
-
-  // Update 2 buckets forwards.  This should throw away 2 data points.
-  setup();
-  ts.update(seconds(110 + offset));
-  EXPECT_EQ(8, ts.count());
-  EXPECT_EQ(48, ts.sum());
-  EXPECT_EQ(6, ts.avg());
-
-  // The last time we added was 95.
-  // Try updating to 189.  This should clear everything but the last bucket.
-  setup();
-  ts.update(seconds(151 + offset));
-  EXPECT_EQ(4, ts.count());
-  //EXPECT_EQ(6, ts.sum());
-  EXPECT_EQ(6, ts.avg());
-
-  // The last time we added was 95.
-  // Try updating to 193: This is nearly one full loop around,
-  // back to the same bucket.  update() needs to clear everything
-  setup();
-  ts.update(seconds(193 + offset));
-  EXPECT_EQ(0, ts.count());
-  EXPECT_EQ(0, ts.sum());
-  EXPECT_EQ(0, ts.avg());
-
-  // The last time we added was 95.
-  // Try updating to 197: This is slightly over one full loop around,
-  // back to the same bucket.  update() needs to clear everything
-  setup();
-  ts.update(seconds(197 + offset));
-  EXPECT_EQ(0, ts.count());
-  EXPECT_EQ(0, ts.sum());
-  EXPECT_EQ(0, ts.avg());
-
-  // The last time we added was 95.
-  // Try updating to 230: This is well over one full loop around,
-  // and everything should be cleared.
-  setup();
-  ts.update(seconds(230 + offset));
-  EXPECT_EQ(0, ts.count());
-  EXPECT_EQ(0, ts.sum());
-  EXPECT_EQ(0, ts.avg());
-}
-
-TEST(BucketedTimeSeries, update100x10) {
-  // Run testUpdate100x10() multiple times, with various offsets.
-  // This makes sure the update code works regardless of which bucket it starts
-  // at in the modulo arithmetic.
-  testUpdate100x10(0);
-  testUpdate100x10(50);
-  testUpdate100x10(370);
-  testUpdate100x10(1937090);
-}
-
-TEST(BucketedTimeSeries, update71x5) {
-  // Create a 71 second timeseries, with 5 buckets
-  // This tests when the number of buckets does not divide evenly into the
-  // duration.
-  BucketedTimeSeries<int64_t> ts(5, seconds(71));
-
-  auto setup = [&] {
-    ts.clear();
-    // Add 1 value to each bucket
-    ts.addValue(seconds(11), 6);
-    ts.addValue(seconds(24), 6);
-    ts.addValue(seconds(42), 6);
-    ts.addValue(seconds(43), 6);
-    ts.addValue(seconds(66), 6);
-
-    EXPECT_EQ(5, ts.count());
-    EXPECT_EQ(30, ts.sum());
-    EXPECT_EQ(6, ts.avg());
-  };
-
-  // Update 2 buckets forwards.  This should throw away 2 data points.
-  setup();
-  ts.update(seconds(99));
-  EXPECT_EQ(3, ts.count());
-  EXPECT_EQ(18, ts.sum());
-  EXPECT_EQ(6, ts.avg());
-
-  // Update 3 buckets forwards.  This should throw away 3 data points.
-  setup();
-  ts.update(seconds(100));
-  EXPECT_EQ(2, ts.count());
-  EXPECT_EQ(12, ts.sum());
-  EXPECT_EQ(6, ts.avg());
-
-  // Update 4 buckets forwards, just under the wrap limit.
-  // This should throw everything but the last bucket away.
-  setup();
-  ts.update(seconds(127));
-  EXPECT_EQ(1, ts.count());
-  EXPECT_EQ(6, ts.sum());
-  EXPECT_EQ(6, ts.avg());
-
-  // Update 5 buckets forwards, exactly at the wrap limit.
-  // This should throw everything away.
-  setup();
-  ts.update(seconds(128));
-  EXPECT_EQ(0, ts.count());
-  EXPECT_EQ(0, ts.sum());
-  EXPECT_EQ(0, ts.avg());
-
-  // Update very far forwards, wrapping multiple times.
-  // This should throw everything away.
-  setup();
-  ts.update(seconds(1234));
-  EXPECT_EQ(0, ts.count());
-  EXPECT_EQ(0, ts.sum());
-  EXPECT_EQ(0, ts.avg());
-}
-
-TEST(BucketedTimeSeries, elapsed) {
-  BucketedTimeSeries<int64_t> ts(60, seconds(600));
-
-  // elapsed() is 0 when no data points have been added
-  EXPECT_EQ(0, ts.elapsed().count());
-
-  // With exactly 1 data point, elapsed() should report 1 second of data
-  seconds start(239218);
-  ts.addValue(start + seconds(0), 200);
-  EXPECT_EQ(1, ts.elapsed().count());
-  // Adding a data point 10 seconds later should result in an elapsed time of
-  // 11 seconds (the time range is [0, 10], inclusive).
-  ts.addValue(start + seconds(10), 200);
-  EXPECT_EQ(11, ts.elapsed().count());
-
-  // elapsed() returns to 0 after clear()
-  ts.clear();
-  EXPECT_EQ(0, ts.elapsed().count());
-
-  // Restart, with the starting point on an easier number to work with
-  ts.addValue(seconds(10), 200);
-  EXPECT_EQ(1, ts.elapsed().count());
-  ts.addValue(seconds(580), 200);
-  EXPECT_EQ(571, ts.elapsed().count());
-  ts.addValue(seconds(590), 200);
-  EXPECT_EQ(581, ts.elapsed().count());
-  ts.addValue(seconds(598), 200);
-  EXPECT_EQ(589, ts.elapsed().count());
-  ts.addValue(seconds(599), 200);
-  EXPECT_EQ(590, ts.elapsed().count());
-  ts.addValue(seconds(600), 200);
-  EXPECT_EQ(591, ts.elapsed().count());
-  ts.addValue(seconds(608), 200);
-  EXPECT_EQ(599, ts.elapsed().count());
-  ts.addValue(seconds(609), 200);
-  EXPECT_EQ(600, ts.elapsed().count());
-  // Once we reach 600 seconds worth of data, when we move on to the next
-  // second a full bucket will get thrown out.  Now we drop back down to 591
-  // seconds worth of data
-  ts.addValue(seconds(610), 200);
-  EXPECT_EQ(591, ts.elapsed().count());
-  ts.addValue(seconds(618), 200);
-  EXPECT_EQ(599, ts.elapsed().count());
-  ts.addValue(seconds(619), 200);
-  EXPECT_EQ(600, ts.elapsed().count());
-  ts.addValue(seconds(620), 200);
-  EXPECT_EQ(591, ts.elapsed().count());
-  ts.addValue(seconds(123419), 200);
-  EXPECT_EQ(600, ts.elapsed().count());
-  ts.addValue(seconds(123420), 200);
-  EXPECT_EQ(591, ts.elapsed().count());
-  ts.addValue(seconds(123425), 200);
-  EXPECT_EQ(596, ts.elapsed().count());
-
-  // Time never moves backwards.
-  // Calling update with an old timestamp will just be ignored.
-  ts.update(seconds(29));
-  EXPECT_EQ(596, ts.elapsed().count());
-}
-
-TEST(BucketedTimeSeries, rate) {
-  BucketedTimeSeries<int64_t> ts(60, seconds(600));
-
-  // Add 3 values every 2 seconds, until fill up the buckets
-  for (size_t n = 0; n < 600; n += 2) {
-    ts.addValue(seconds(n), 200, 3);
-  }
-
-  EXPECT_EQ(900, ts.count());
-  EXPECT_EQ(180000, ts.sum());
-  EXPECT_EQ(200, ts.avg());
-
-  // Really we only entered 599 seconds worth of data: [0, 598] (inclusive)
-  EXPECT_EQ(599, ts.elapsed().count());
-  EXPECT_NEAR(300.5, ts.rate(), 0.005);
-  EXPECT_NEAR(1.5, ts.countRate(), 0.005);
-
-  // If we add 1 more second, now we will have 600 seconds worth of data
-  ts.update(seconds(599));
-  EXPECT_EQ(600, ts.elapsed().count());
-  EXPECT_NEAR(300, ts.rate(), 0.005);
-  EXPECT_EQ(300, ts.rate<int>());
-  EXPECT_NEAR(1.5, ts.countRate(), 0.005);
-
-  // However, 1 more second after that and we will have filled up all the
-  // buckets, and have to drop one.
-  ts.update(seconds(600));
-  EXPECT_EQ(591, ts.elapsed().count());
-  EXPECT_NEAR(299.5, ts.rate(), 0.01);
-  EXPECT_EQ(299, ts.rate<int>());
-  EXPECT_NEAR(1.5, ts.countRate(), 0.005);
-}
-
-TEST(BucketedTimeSeries, avgTypeConversion) {
-  // Make sure the computed average values are accurate regardless
-  // of the input type and return type.
-
-  {
-    // Simple sanity tests for small positive integer values
-    BucketedTimeSeries<int64_t> ts(60, seconds(600));
-    ts.addValue(seconds(0), 4, 100);
-    ts.addValue(seconds(0), 10, 200);
-    ts.addValue(seconds(0), 16, 100);
-
-    EXPECT_DOUBLE_EQ(10.0, ts.avg());
-    EXPECT_DOUBLE_EQ(10.0, ts.avg<float>());
-    EXPECT_EQ(10, ts.avg<uint64_t>());
-    EXPECT_EQ(10, ts.avg<int64_t>());
-    EXPECT_EQ(10, ts.avg<int32_t>());
-    EXPECT_EQ(10, ts.avg<int16_t>());
-    EXPECT_EQ(10, ts.avg<int8_t>());
-    EXPECT_EQ(10, ts.avg<uint8_t>());
-  }
-
-  {
-    // Test signed integer types with negative values
-    BucketedTimeSeries<int64_t> ts(60, seconds(600));
-    ts.addValue(seconds(0), -100);
-    ts.addValue(seconds(0), -200);
-    ts.addValue(seconds(0), -300);
-    ts.addValue(seconds(0), -200, 65535);
-
-    EXPECT_DOUBLE_EQ(-200.0, ts.avg());
-    EXPECT_DOUBLE_EQ(-200.0, ts.avg<float>());
-    EXPECT_EQ(-200, ts.avg<int64_t>());
-    EXPECT_EQ(-200, ts.avg<int32_t>());
-    EXPECT_EQ(-200, ts.avg<int16_t>());
-  }
-
-  {
-    // Test uint64_t values that would overflow int64_t
-    BucketedTimeSeries<uint64_t> ts(60, seconds(600));
-    ts.addValueAggregated(seconds(0),
-                          std::numeric_limits<uint64_t>::max(),
-                          std::numeric_limits<uint64_t>::max());
-
-    EXPECT_DOUBLE_EQ(1.0, ts.avg());
-    EXPECT_DOUBLE_EQ(1.0, ts.avg<float>());
-    EXPECT_EQ(1, ts.avg<uint64_t>());
-    EXPECT_EQ(1, ts.avg<int64_t>());
-    EXPECT_EQ(1, ts.avg<int8_t>());
-  }
-
-  {
-    // Test doubles with small-ish values that will fit in integer types
-    BucketedTimeSeries<double> ts(60, seconds(600));
-    ts.addValue(seconds(0), 4.0, 100);
-    ts.addValue(seconds(0), 10.0, 200);
-    ts.addValue(seconds(0), 16.0, 100);
-
-    EXPECT_DOUBLE_EQ(10.0, ts.avg());
-    EXPECT_DOUBLE_EQ(10.0, ts.avg<float>());
-    EXPECT_EQ(10, ts.avg<uint64_t>());
-    EXPECT_EQ(10, ts.avg<int64_t>());
-    EXPECT_EQ(10, ts.avg<int32_t>());
-    EXPECT_EQ(10, ts.avg<int16_t>());
-    EXPECT_EQ(10, ts.avg<int8_t>());
-    EXPECT_EQ(10, ts.avg<uint8_t>());
-  }
-
-  {
-    // Test doubles with huge values
-    BucketedTimeSeries<double> ts(60, seconds(600));
-    ts.addValue(seconds(0), 1e19, 100);
-    ts.addValue(seconds(0), 2e19, 200);
-    ts.addValue(seconds(0), 3e19, 100);
-
-    EXPECT_DOUBLE_EQ(ts.avg(), 2e19);
-    EXPECT_NEAR(ts.avg<float>(), 2e19, 1e11);
-  }
-
-  {
-    // Test doubles where the sum adds up larger than a uint64_t,
-    // but the average fits in an int64_t
-    BucketedTimeSeries<double> ts(60, seconds(600));
-    uint64_t value = 0x3fffffffffffffff;
-    FOR_EACH_RANGE(i, 0, 16) {
-      ts.addValue(seconds(0), value);
-    }
-
-    EXPECT_DOUBLE_EQ(value, ts.avg());
-    EXPECT_DOUBLE_EQ(value, ts.avg<float>());
-    // Some precision is lost here due to the huge sum, so the
-    // integer average returned is off by one.
-    EXPECT_NEAR(value, ts.avg<uint64_t>(), 1);
-    EXPECT_NEAR(value, ts.avg<int64_t>(), 1);
-  }
-
-  {
-    // Test BucketedTimeSeries with a smaller integer type
-    BucketedTimeSeries<int16_t> ts(60, seconds(600));
-    FOR_EACH_RANGE(i, 0, 101) {
-      ts.addValue(seconds(0), i);
-    }
-
-    EXPECT_DOUBLE_EQ(50.0, ts.avg());
-    EXPECT_DOUBLE_EQ(50.0, ts.avg<float>());
-    EXPECT_EQ(50, ts.avg<uint64_t>());
-    EXPECT_EQ(50, ts.avg<int64_t>());
-    EXPECT_EQ(50, ts.avg<int16_t>());
-    EXPECT_EQ(50, ts.avg<int8_t>());
-  }
-
-  {
-    // Test BucketedTimeSeries with long double input
-    BucketedTimeSeries<long double> ts(60, seconds(600));
-    ts.addValueAggregated(seconds(0), 1000.0L, 7);
-
-    long double expected = 1000.0L / 7.0L;
-    EXPECT_DOUBLE_EQ(static_cast<double>(expected), ts.avg());
-    EXPECT_DOUBLE_EQ(static_cast<float>(expected), ts.avg<float>());
-    EXPECT_DOUBLE_EQ(expected, ts.avg<long double>());
-    EXPECT_EQ(static_cast<uint64_t>(expected), ts.avg<uint64_t>());
-    EXPECT_EQ(static_cast<int64_t>(expected), ts.avg<int64_t>());
-  }
-
-  {
-    // Test BucketedTimeSeries with int64_t values,
-    // but using an average that requires a fair amount of precision.
-    BucketedTimeSeries<int64_t> ts(60, seconds(600));
-    ts.addValueAggregated(seconds(0), 1000, 7);
-
-    long double expected = 1000.0L / 7.0L;
-    EXPECT_DOUBLE_EQ(static_cast<double>(expected), ts.avg());
-    EXPECT_DOUBLE_EQ(static_cast<float>(expected), ts.avg<float>());
-    EXPECT_DOUBLE_EQ(expected, ts.avg<long double>());
-    EXPECT_EQ(static_cast<uint64_t>(expected), ts.avg<uint64_t>());
-    EXPECT_EQ(static_cast<int64_t>(expected), ts.avg<int64_t>());
-  }
-}
-
-TEST(BucketedTimeSeries, forEachBucket) {
-  typedef BucketedTimeSeries<int64_t>::Bucket Bucket;
-  struct BucketInfo {
-    BucketInfo(const Bucket* b, TimePoint s, TimePoint ns)
-        : bucket(b), start(s), nextStart(ns) {}
-
-    const Bucket* bucket;
-    TimePoint start;
-    TimePoint nextStart;
-  };
-
-  for (const auto& data : testData) {
-    BucketedTimeSeries<int64_t> ts(data.numBuckets, seconds(data.duration));
-
-    vector<BucketInfo> info;
-    auto fn = [&](
-        const Bucket& bucket,
-        TimePoint bucketStart,
-        TimePoint bucketEnd) -> bool {
-      info.emplace_back(&bucket, bucketStart, bucketEnd);
-      return true;
-    };
-
-    // If we haven't yet added any data, the current bucket will start at 0,
-    // and all data previous buckets will have negative times.
-    ts.forEachBucket(fn);
-
-    CHECK_EQ(data.numBuckets, info.size());
-
-    // Check the data passed in to the function
-    size_t infoIdx = 0;
-    size_t bucketIdx = 1;
-    seconds offset = -data.duration;
-    for (size_t n = 0; n < data.numBuckets; ++n) {
-      if (bucketIdx >= data.numBuckets) {
-        bucketIdx = 0;
-        offset += data.duration;
-      }
-
-      EXPECT_EQ(data.bucketStarts[bucketIdx] + offset, info[infoIdx].start)
-          << data.duration << "x" << data.numBuckets
-          << ": bucketIdx=" << bucketIdx << ", infoIdx=" << infoIdx;
-
-      size_t nextBucketIdx = bucketIdx + 1;
-      seconds nextOffset = offset;
-      if (nextBucketIdx >= data.numBuckets) {
-        nextBucketIdx = 0;
-        nextOffset += data.duration;
-      }
-      EXPECT_EQ(
-          data.bucketStarts[nextBucketIdx] + nextOffset,
-          info[infoIdx].nextStart)
-          << data.duration << "x" << data.numBuckets
-          << ": bucketIdx=" << bucketIdx << ", infoIdx=" << infoIdx;
-
-      EXPECT_EQ(&ts.getBucketByIndex(bucketIdx), info[infoIdx].bucket);
-
-      ++bucketIdx;
-      ++infoIdx;
-    }
-  }
-}
-
-TEST(BucketedTimeSeries, queryByIntervalSimple) {
-  BucketedTimeSeries<int> a(3, seconds(12));
-  for (int i = 0; i < 8; i++) {
-    a.addValue(seconds(i), 1);
-  }
-  // We added 1 at each second from 0..7
-  // Query from the time period 0..2.
-  // This is entirely in the first bucket, which has a sum of 4.
-  // The code knows only part of the bucket is covered, and correctly
-  // estimates the desired sum as 3.
-  EXPECT_EQ(2, a.sum(mkTimePoint(0), mkTimePoint(2)));
-}
-
-TEST(BucketedTimeSeries, queryByInterval) {
-  // Set up a BucketedTimeSeries tracking 6 seconds in 3 buckets
-  const int kNumBuckets = 3;
-  const int kDuration = 6;
-  BucketedTimeSeries<double> b(kNumBuckets, seconds(kDuration));
-
-  for (unsigned int i = 0; i < kDuration; ++i) {
-    // add value 'i' at time 'i'
-    b.addValue(mkTimePoint(i), i);
-  }
-
-  // Current bucket state:
-  // 0: time=[0, 2): values=(0, 1), sum=1, count=2
-  // 1: time=[2, 4): values=(2, 3), sum=5, count=1
-  // 2: time=[4, 6): values=(4, 5), sum=9, count=2
-  double expectedSums1[kDuration + 1][kDuration + 1] = {
-    {0,  4.5,   9, 11.5,  14, 14.5,  15},
-    {0,  4.5,   7,  9.5,  10, 10.5,  -1},
-    {0,  2.5,   5,  5.5,   6,   -1,  -1},
-    {0,  2.5,   3,  3.5,  -1,   -1,  -1},
-    {0,  0.5,   1,   -1,  -1,   -1,  -1},
-    {0,  0.5,  -1,   -1,  -1,   -1,  -1},
-    {0,   -1,  -1,   -1,  -1,   -1,  -1}
-  };
-  int expectedCounts1[kDuration + 1][kDuration + 1] = {
-    {0,  1,  2,  3,  4,  5,  6},
-    {0,  1,  2,  3,  4,  5, -1},
-    {0,  1,  2,  3,  4, -1, -1},
-    {0,  1,  2,  3, -1, -1, -1},
-    {0,  1,  2, -1, -1, -1, -1},
-    {0,  1, -1, -1, -1, -1, -1},
-    {0, -1, -1, -1, -1, -1, -1}
-  };
-
-  TimePoint currentTime = b.getLatestTime() + seconds(1);
-  for (int i = 0; i <= kDuration + 1; i++) {
-    for (int j = 0; j <= kDuration - i; j++) {
-      TimePoint start = currentTime - seconds(i + j);
-      TimePoint end = currentTime - seconds(i);
-      double expectedSum = expectedSums1[i][j];
-      EXPECT_EQ(expectedSum, b.sum(start, end))
-          << "i=" << i << ", j=" << j << ", interval=[" << start << ", " << end
-          << ")";
-
-      uint64_t expectedCount = expectedCounts1[i][j];
-      EXPECT_EQ(expectedCount, b.count(start, end))
-          << "i=" << i << ", j=" << j << ", interval=[" << start << ", " << end
-          << ")";
-
-      double expectedAvg = expectedCount ? expectedSum / expectedCount : 0;
-      EXPECT_EQ(expectedAvg, b.avg(start, end))
-          << "i=" << i << ", j=" << j << ", interval=[" << start << ", " << end
-          << ")";
-
-      double expectedRate = j ? expectedSum / j : 0;
-      EXPECT_EQ(expectedRate, b.rate(start, end))
-          << "i=" << i << ", j=" << j << ", interval=[" << start << ", " << end
-          << ")";
-    }
-  }
-
-  // Add 3 more values.
-  // This will overwrite 1 full bucket, and put us halfway through the next.
-  for (unsigned int i = kDuration; i < kDuration + 3; ++i) {
-    b.addValue(mkTimePoint(i), i);
-  }
-  EXPECT_EQ(mkTimePoint(4), b.getEarliestTime());
-
-  // Current bucket state:
-  // 0: time=[6,  8): values=(6, 7), sum=13, count=2
-  // 1: time=[8, 10): values=(8),    sum=8, count=1
-  // 2: time=[4,  6): values=(4, 5), sum=9, count=2
-  double expectedSums2[kDuration + 1][kDuration + 1] = {
-    {0,    8, 14.5,   21, 25.5,  30,  30},
-    {0,  6.5,   13, 17.5,   22,  22,  -1},
-    {0,  6.5,   11, 15.5, 15.5,  -1,  -1},
-    {0,  4.5,    9,    9,   -1,  -1,  -1},
-    {0,  4.5,  4.5,   -1,   -1,  -1,  -1},
-    {0,    0,   -1,   -1,   -1,  -1,  -1},
-    {0,   -1,   -1,   -1,   -1,  -1,  -1}
-  };
-  int expectedCounts2[kDuration + 1][kDuration + 1] = {
-    {0,  1,  2,  3,  4,  5,  5},
-    {0,  1,  2,  3,  4,  4, -1},
-    {0,  1,  2,  3,  3, -1, -1},
-    {0,  1,  2,  2, -1, -1, -1},
-    {0,  1,  1, -1, -1, -1, -1},
-    {0,  0, -1, -1, -1, -1, -1},
-    {0, -1, -1, -1, -1, -1, -1}
-  };
-
-  currentTime = b.getLatestTime() + seconds(1);
-  for (int i = 0; i <= kDuration + 1; i++) {
-    for (int j = 0; j <= kDuration - i; j++) {
-      TimePoint start = currentTime - seconds(i + j);
-      TimePoint end = currentTime - seconds(i);
-      double expectedSum = expectedSums2[i][j];
-      EXPECT_EQ(expectedSum, b.sum(start, end))
-          << "i=" << i << ", j=" << j << ", interval=[" << start << ", " << end
-          << ")";
-
-      uint64_t expectedCount = expectedCounts2[i][j];
-      EXPECT_EQ(expectedCount, b.count(start, end))
-          << "i=" << i << ", j=" << j << ", interval=[" << start << ", " << end
-          << ")";
-
-      double expectedAvg = expectedCount ? expectedSum / expectedCount : 0;
-      EXPECT_EQ(expectedAvg, b.avg(start, end))
-          << "i=" << i << ", j=" << j << ", interval=[" << start << ", " << end
-          << ")";
-
-      TimePoint dataStart = std::max(start, b.getEarliestTime());
-      TimePoint dataEnd = std::max(end, dataStart);
-      seconds expectedInterval = dataEnd - dataStart;
-      EXPECT_EQ(expectedInterval, b.elapsed(start, end))
-          << "i=" << i << ", j=" << j << ", interval=[" << start << ", " << end
-          << ")";
-
-      double expectedRate = expectedInterval.count() ?
-        expectedSum / expectedInterval.count() : 0;
-      EXPECT_EQ(expectedRate, b.rate(start, end))
-          << "i=" << i << ", j=" << j << ", interval=[" << start << ", " << end
-          << ")";
-    }
-  }
-}
-
-TEST(BucketedTimeSeries, rateByInterval) {
-  const int kNumBuckets = 5;
-  const seconds kDuration(10);
-  BucketedTimeSeries<double> b(kNumBuckets, kDuration);
-
-  // Add data points at a constant rate of 10 per second.
-  // Start adding data points at kDuration, and fill half of the buckets for
-  // now.
-  TimePoint start(kDuration);
-  TimePoint end(kDuration + (kDuration / 2));
-  const double kFixedRate = 10.0;
-  for (TimePoint i = start; i < end; i += seconds(1)) {
-    b.addValue(i, kFixedRate);
-  }
-
-  // Querying the rate should yield kFixedRate.
-  EXPECT_EQ(kFixedRate, b.rate());
-  EXPECT_EQ(kFixedRate, b.rate(start, end));
-  EXPECT_EQ(kFixedRate, b.rate(start, start + kDuration));
-  EXPECT_EQ(kFixedRate, b.rate(end - kDuration, end));
-  EXPECT_EQ(kFixedRate, b.rate(end - seconds(1), end));
-  // We have been adding 1 data point per second, so countRate()
-  // should be 1.
-  EXPECT_EQ(1.0, b.countRate());
-  EXPECT_EQ(1.0, b.countRate(start, end));
-  EXPECT_EQ(1.0, b.countRate(start, start + kDuration));
-  EXPECT_EQ(1.0, b.countRate(end - kDuration, end));
-  EXPECT_EQ(1.0, b.countRate(end - seconds(1), end));
-
-  // We haven't added anything before time kDuration.
-  // Querying data earlier than this should result in a rate of 0.
-  EXPECT_EQ(0.0, b.rate(mkTimePoint(0), mkTimePoint(1)));
-  EXPECT_EQ(0.0, b.countRate(mkTimePoint(0), mkTimePoint(1)));
-
-  // Fill the remainder of the timeseries from kDuration to kDuration*2
-  start = end;
-  end = TimePoint(kDuration * 2);
-  for (TimePoint i = start; i < end; i += seconds(1)) {
-    b.addValue(i, kFixedRate);
-  }
-
-  EXPECT_EQ(kFixedRate, b.rate());
-  EXPECT_EQ(kFixedRate, b.rate(TimePoint(kDuration), TimePoint(kDuration * 2)));
-  EXPECT_EQ(kFixedRate, b.rate(TimePoint(), TimePoint(kDuration * 2)));
-  EXPECT_EQ(kFixedRate, b.rate(TimePoint(), TimePoint(kDuration * 10)));
-  EXPECT_EQ(1.0, b.countRate());
-  EXPECT_EQ(1.0, b.countRate(TimePoint(kDuration), TimePoint(kDuration * 2)));
-  EXPECT_EQ(1.0, b.countRate(TimePoint(), TimePoint(kDuration * 2)));
-  EXPECT_EQ(1.0, b.countRate(TimePoint(), TimePoint(kDuration * 10)));
-}
-
-TEST(BucketedTimeSeries, addHistorical) {
-  const int kNumBuckets = 5;
-  const seconds kDuration(10);
-  BucketedTimeSeries<double> b(kNumBuckets, kDuration);
-
-  // Initially fill with a constant rate of data
-  for (TimePoint i = mkTimePoint(0); i < mkTimePoint(10); i += seconds(1)) {
-    b.addValue(i, 10.0);
-  }
-
-  EXPECT_EQ(10.0, b.rate());
-  EXPECT_EQ(10.0, b.avg());
-  EXPECT_EQ(10, b.count());
-
-  // Add some more data points to the middle bucket
-  b.addValue(mkTimePoint(4), 40.0);
-  b.addValue(mkTimePoint(5), 40.0);
-  EXPECT_EQ(15.0, b.avg());
-  EXPECT_EQ(18.0, b.rate());
-  EXPECT_EQ(12, b.count());
-
-  // Now start adding more current data points, until we are about to roll over
-  // the bucket where we added the extra historical data.
-  for (TimePoint i = mkTimePoint(10); i < mkTimePoint(14); i += seconds(1)) {
-    b.addValue(i, 10.0);
-  }
-  EXPECT_EQ(15.0, b.avg());
-  EXPECT_EQ(18.0, b.rate());
-  EXPECT_EQ(12, b.count());
-
-  // Now roll over the middle bucket
-  b.addValue(mkTimePoint(14), 10.0);
-  b.addValue(mkTimePoint(15), 10.0);
-  EXPECT_EQ(10.0, b.avg());
-  EXPECT_EQ(10.0, b.rate());
-  EXPECT_EQ(10, b.count());
-
-  // Add more historical values past the bucket window.
-  // These should be ignored.
-  EXPECT_FALSE(b.addValue(mkTimePoint(4), 40.0));
-  EXPECT_FALSE(b.addValue(mkTimePoint(5), 40.0));
-  EXPECT_EQ(10.0, b.avg());
-  EXPECT_EQ(10.0, b.rate());
-  EXPECT_EQ(10, b.count());
-}
-
-TEST(BucketedTimeSeries, reConstructEmptyTimeSeries) {
-  auto verify = [](auto timeSeries) {
-    EXPECT_TRUE(timeSeries.empty());
-    EXPECT_EQ(0, timeSeries.sum());
-    EXPECT_EQ(0, timeSeries.count());
-  };
-
-  // Create a 100 second timeseries with 10 buckets_
-  BucketedTimeSeries<int64_t> ts(10, seconds(100));
-
-  verify(ts);
-
-  auto firstTime = ts.firstTime();
-  auto latestTime = ts.latestTime();
-  auto duration = ts.duration();
-  auto buckets = ts.buckets();
-
-  // Reconstruct the timeseries
-  BucketedTimeSeries<int64_t> newTs(firstTime, latestTime, duration, buckets);
-
-  verify(newTs);
-}
-
-TEST(BucketedTimeSeries, reConstructWithValidData) {
-  // Create a 100 second timeseries with 10 buckets_
-  BucketedTimeSeries<int64_t> ts(10, seconds(100));
-
-  auto setup = [&] {
-    ts.clear();
-    // Add 1 value to each bucket
-    for (int n = 5; n <= 95; n += 10) {
-      ts.addValue(seconds(n), 6);
-    }
-
-    EXPECT_EQ(10, ts.count());
-    EXPECT_EQ(60, ts.sum());
-    EXPECT_EQ(6, ts.avg());
-  };
-
-  setup();
-
-  auto firstTime = ts.firstTime();
-  auto latestTime = ts.latestTime();
-  auto duration = ts.duration();
-  auto buckets = ts.buckets();
-
-  // Reconstruct the timeseries
-  BucketedTimeSeries<int64_t> newTs(firstTime, latestTime, duration, buckets);
-
-  auto compare = [&] {
-    EXPECT_EQ(ts.firstTime(), newTs.firstTime());
-    EXPECT_EQ(ts.latestTime(), newTs.latestTime());
-    EXPECT_EQ(ts.duration(), newTs.duration());
-    EXPECT_EQ(ts.buckets().size(), newTs.buckets().size());
-    EXPECT_EQ(ts.sum(), newTs.sum());
-    EXPECT_EQ(ts.count(), newTs.count());
-
-    for (auto it1 = ts.buckets().begin(), it2 = newTs.buckets().begin();
-         it1 != ts.buckets().end();
-         it1++, it2++) {
-      EXPECT_EQ(it1->sum, it2->sum);
-      EXPECT_EQ(it1->count, it2->count);
-    }
-  };
-
-  compare();
-}
-
-TEST(BucketedTimeSeries, reConstructWithCorruptedData) {
-  // The total should have been 0 as firstTime > latestTime
-  EXPECT_THROW(
-      {
-        std::vector<Bucket> buckets(10);
-        buckets[0].sum = 1;
-        buckets[0].count = 1;
-
-        BucketedTimeSeries<int64_t> ts(
-            mkTimePoint(1), mkTimePoint(0), Duration(10), buckets);
-      },
-      std::invalid_argument);
-
-  // The duration should be no less than latestTime - firstTime
-  EXPECT_THROW(
-      BucketedTimeSeries<int64_t>(
-          mkTimePoint(1),
-          mkTimePoint(100),
-          Duration(10),
-          std::vector<Bucket>(10)),
-      std::invalid_argument);
-}
-
-namespace IntMHTS {
-  enum Levels {
-    MINUTE,
-    HOUR,
-    ALLTIME,
-    NUM_LEVELS,
-  };
-
-  const seconds kMinuteHourDurations[] = {
-    seconds(60), seconds(3600), seconds(0)
-  };
-};
-
-TEST(MinuteHourTimeSeries, Basic) {
-  folly::MultiLevelTimeSeries<int> mhts(60, IntMHTS::NUM_LEVELS,
-                                        IntMHTS::kMinuteHourDurations);
-  EXPECT_EQ(mhts.numLevels(), IntMHTS::NUM_LEVELS);
-  EXPECT_EQ(mhts.numLevels(), 3);
-
-  EXPECT_EQ(mhts.sum(IntMHTS::MINUTE), 0);
-  EXPECT_EQ(mhts.sum(IntMHTS::HOUR), 0);
-  EXPECT_EQ(mhts.sum(IntMHTS::ALLTIME), 0);
-
-  EXPECT_EQ(mhts.avg(IntMHTS::MINUTE), 0);
-  EXPECT_EQ(mhts.avg(IntMHTS::HOUR), 0);
-  EXPECT_EQ(mhts.avg(IntMHTS::ALLTIME), 0);
-
-  EXPECT_EQ(mhts.rate(IntMHTS::MINUTE), 0);
-  EXPECT_EQ(mhts.rate(IntMHTS::HOUR), 0);
-  EXPECT_EQ(mhts.rate(IntMHTS::ALLTIME), 0);
-
-  EXPECT_EQ(mhts.getLevel(IntMHTS::MINUTE).elapsed().count(), 0);
-  EXPECT_EQ(mhts.getLevel(IntMHTS::HOUR).elapsed().count(), 0);
-  EXPECT_EQ(mhts.getLevel(IntMHTS::ALLTIME).elapsed().count(), 0);
-
-  seconds cur_time(0);
-
-  mhts.addValue(cur_time++, 10);
-  mhts.flush();
-
-  EXPECT_EQ(mhts.getLevel(IntMHTS::MINUTE).elapsed().count(), 1);
-  EXPECT_EQ(mhts.getLevel(IntMHTS::HOUR).elapsed().count(), 1);
-  EXPECT_EQ(mhts.getLevel(IntMHTS::ALLTIME).elapsed().count(), 1);
-
-  for (int i = 0; i < 299; ++i) {
-    mhts.addValue(cur_time++, 10);
-  }
-  mhts.flush();
-
-  EXPECT_EQ(mhts.getLevel(IntMHTS::MINUTE).elapsed().count(), 60);
-  EXPECT_EQ(mhts.getLevel(IntMHTS::HOUR).elapsed().count(), 300);
-  EXPECT_EQ(mhts.getLevel(IntMHTS::ALLTIME).elapsed().count(), 300);
-
-  EXPECT_EQ(mhts.sum(IntMHTS::MINUTE), 600);
-  EXPECT_EQ(mhts.sum(IntMHTS::HOUR), 300*10);
-  EXPECT_EQ(mhts.sum(IntMHTS::ALLTIME), 300*10);
-
-  EXPECT_EQ(mhts.avg(IntMHTS::MINUTE), 10);
-  EXPECT_EQ(mhts.avg(IntMHTS::HOUR), 10);
-  EXPECT_EQ(mhts.avg(IntMHTS::ALLTIME), 10);
-
-  EXPECT_EQ(mhts.rate(IntMHTS::MINUTE), 10);
-  EXPECT_EQ(mhts.rate(IntMHTS::HOUR), 10);
-  EXPECT_EQ(mhts.rate(IntMHTS::ALLTIME), 10);
-
-  for (int i = 0; i < 3600*3 - 300; ++i) {
-    mhts.addValue(cur_time++, 10);
-  }
-  mhts.flush();
-
-  EXPECT_EQ(mhts.getLevel(IntMHTS::MINUTE).elapsed().count(), 60);
-  EXPECT_EQ(mhts.getLevel(IntMHTS::HOUR).elapsed().count(), 3600);
-  EXPECT_EQ(mhts.getLevel(IntMHTS::ALLTIME).elapsed().count(), 3600*3);
-
-  EXPECT_EQ(mhts.sum(IntMHTS::MINUTE), 600);
-  EXPECT_EQ(mhts.sum(IntMHTS::HOUR), 3600*10);
-  EXPECT_EQ(mhts.sum(IntMHTS::ALLTIME), 3600*3*10);
-
-  EXPECT_EQ(mhts.avg(IntMHTS::MINUTE), 10);
-  EXPECT_EQ(mhts.avg(IntMHTS::HOUR), 10);
-  EXPECT_EQ(mhts.avg(IntMHTS::ALLTIME), 10);
-
-  EXPECT_EQ(mhts.rate(IntMHTS::MINUTE), 10);
-  EXPECT_EQ(mhts.rate(IntMHTS::HOUR), 10);
-  EXPECT_EQ(mhts.rate(IntMHTS::ALLTIME), 10);
-
-  for (int i = 0; i < 3600; ++i) {
-    mhts.addValue(cur_time++, 100);
-  }
-  mhts.flush();
-
-  EXPECT_EQ(mhts.sum(IntMHTS::MINUTE), 60*100);
-  EXPECT_EQ(mhts.sum(IntMHTS::HOUR), 3600*100);
-  EXPECT_EQ(mhts.sum(IntMHTS::ALLTIME),
-            3600*3*10 + 3600*100);
-
-  EXPECT_EQ(mhts.avg(IntMHTS::MINUTE), 100);
-  EXPECT_EQ(mhts.avg(IntMHTS::HOUR), 100);
-  EXPECT_EQ(mhts.avg(IntMHTS::ALLTIME), 32.5);
-  EXPECT_EQ(mhts.avg<int>(IntMHTS::ALLTIME), 32);
-
-  EXPECT_EQ(mhts.rate(IntMHTS::MINUTE), 100);
-  EXPECT_EQ(mhts.rate(IntMHTS::HOUR), 100);
-  EXPECT_EQ(mhts.rate(IntMHTS::ALLTIME), 32.5);
-  EXPECT_EQ(mhts.rate<int>(IntMHTS::ALLTIME), 32);
-
-  for (int i = 0; i < 1800; ++i) {
-    mhts.addValue(cur_time++, 120);
-  }
-  mhts.flush();
-
-  EXPECT_EQ(mhts.sum(IntMHTS::MINUTE), 60*120);
-  EXPECT_EQ(mhts.sum(IntMHTS::HOUR),
-            1800*100 + 1800*120);
-  EXPECT_EQ(mhts.sum(IntMHTS::ALLTIME),
-            3600*3*10 + 3600*100 + 1800*120);
-
-  for (int i = 0; i < 60; ++i) {
-    mhts.addValue(cur_time++, 1000);
-  }
-  mhts.flush();
-
-  EXPECT_EQ(mhts.sum(IntMHTS::MINUTE), 60*1000);
-  EXPECT_EQ(mhts.sum(IntMHTS::HOUR),
-            1740*100 + 1800*120 + 60*1000);
-  EXPECT_EQ(mhts.sum(IntMHTS::ALLTIME),
-            3600*3*10 + 3600*100 + 1800*120 + 60*1000);
-
-  mhts.clear();
-  EXPECT_EQ(mhts.sum(IntMHTS::ALLTIME), 0);
-}
-
-TEST(MinuteHourTimeSeries, QueryByInterval) {
-  folly::MultiLevelTimeSeries<int> mhts(60, IntMHTS::NUM_LEVELS,
-                                        IntMHTS::kMinuteHourDurations);
-
-  TimePoint curTime;
-  for (curTime = mkTimePoint(0); curTime < mkTimePoint(7200);
-       curTime += seconds(1)) {
-    mhts.addValue(curTime, 1);
-  }
-  for (curTime = mkTimePoint(7200); curTime < mkTimePoint(7200 + 3540);
-       curTime += seconds(1)) {
-    mhts.addValue(curTime, 10);
-  }
-  for (curTime = mkTimePoint(7200 + 3540); curTime < mkTimePoint(7200 + 3600);
-       curTime += seconds(1)) {
-    mhts.addValue(curTime, 100);
-  }
-  mhts.flush();
-
-  struct TimeInterval {
-    TimePoint start;
-    TimePoint end;
-  };
-  TimeInterval intervals[12] = {
-    { curTime - seconds(60), curTime },
-    { curTime - seconds(3600), curTime },
-    { curTime - seconds(7200), curTime },
-    { curTime - seconds(3600), curTime - seconds(60) },
-    { curTime - seconds(7200), curTime - seconds(60) },
-    { curTime - seconds(7200), curTime - seconds(3600) },
-    { curTime - seconds(50), curTime - seconds(20) },
-    { curTime - seconds(3020), curTime - seconds(20) },
-    { curTime - seconds(7200), curTime - seconds(20) },
-    { curTime - seconds(3000), curTime - seconds(1000) },
-    { curTime - seconds(7200), curTime - seconds(1000) },
-    { curTime - seconds(7200), curTime - seconds(3600) },
-  };
-
-  int expectedSums[12] = {
-    6000, 41400, 32400, 35400, 32130, 16200, 3000, 33600, 32310, 20000, 27900,
-    16200
-  };
-
-  int expectedCounts[12] = {
-    60, 3600, 7200, 3540, 7140, 3600, 30, 3000, 7180, 2000, 6200, 3600
-  };
-
-  for (int i = 0; i < 12; ++i) {
-    TimeInterval interval = intervals[i];
-
-    int s = mhts.sum(interval.start, interval.end);
-    EXPECT_EQ(expectedSums[i], s);
-
-    int c = mhts.count(interval.start, interval.end);
-    EXPECT_EQ(expectedCounts[i], c);
-
-    int a = mhts.avg<int>(interval.start, interval.end);
-    EXPECT_EQ(expectedCounts[i] ?
-              (expectedSums[i] / expectedCounts[i]) : 0,
-              a);
-
-    int r = mhts.rate<int>(interval.start, interval.end);
-    int expectedRate =
-      expectedSums[i] / (interval.end - interval.start).count();
-    EXPECT_EQ(expectedRate, r);
-  }
-}
-
-TEST(MultiLevelTimeSeries, Basic) {
-  // using constructor with initializer_list parameter
-  folly::MultiLevelTimeSeries<int> mhts(
-      60, {seconds(60), seconds(3600), seconds(0)});
-  EXPECT_EQ(mhts.numLevels(), 3);
-
-  EXPECT_EQ(mhts.sum(seconds(60)), 0);
-  EXPECT_EQ(mhts.sum(seconds(3600)), 0);
-  EXPECT_EQ(mhts.sum(seconds(0)), 0);
-
-  EXPECT_EQ(mhts.avg(seconds(60)), 0);
-  EXPECT_EQ(mhts.avg(seconds(3600)), 0);
-  EXPECT_EQ(mhts.avg(seconds(0)), 0);
-
-  EXPECT_EQ(mhts.rate(seconds(60)), 0);
-  EXPECT_EQ(mhts.rate(seconds(3600)), 0);
-  EXPECT_EQ(mhts.rate(seconds(0)), 0);
-
-  EXPECT_EQ(mhts.getLevelByDuration(seconds(60)).elapsed().count(), 0);
-  EXPECT_EQ(mhts.getLevelByDuration(seconds(3600)).elapsed().count(), 0);
-  EXPECT_EQ(mhts.getLevelByDuration(seconds(0)).elapsed().count(), 0);
-
-  seconds cur_time(0);
-
-  mhts.addValue(cur_time++, 10);
-  mhts.flush();
-
-  EXPECT_EQ(mhts.getLevelByDuration(seconds(60)).elapsed().count(), 1);
-  EXPECT_EQ(mhts.getLevelByDuration(seconds(3600)).elapsed().count(), 1);
-  EXPECT_EQ(mhts.getLevelByDuration(seconds(0)).elapsed().count(), 1);
-
-  for (int i = 0; i < 299; ++i) {
-    mhts.addValue(cur_time++, 10);
-  }
-  mhts.flush();
-
-  EXPECT_EQ(mhts.getLevelByDuration(seconds(60)).elapsed().count(), 60);
-  EXPECT_EQ(mhts.getLevelByDuration(seconds(3600)).elapsed().count(), 300);
-  EXPECT_EQ(mhts.getLevelByDuration(seconds(0)).elapsed().count(), 300);
-
-  EXPECT_EQ(mhts.sum(seconds(60)), 600);
-  EXPECT_EQ(mhts.sum(seconds(3600)), 300 * 10);
-  EXPECT_EQ(mhts.sum(seconds(0)), 300 * 10);
-
-  EXPECT_EQ(mhts.avg(seconds(60)), 10);
-  EXPECT_EQ(mhts.avg(seconds(3600)), 10);
-  EXPECT_EQ(mhts.avg(seconds(0)), 10);
-
-  EXPECT_EQ(mhts.rate(seconds(60)), 10);
-  EXPECT_EQ(mhts.rate(seconds(3600)), 10);
-  EXPECT_EQ(mhts.rate(seconds(0)), 10);
-
-  for (int i = 0; i < 3600 * 3 - 300; ++i) {
-    mhts.addValue(cur_time++, 10);
-  }
-  mhts.flush();
-
-  EXPECT_EQ(mhts.getLevelByDuration(seconds(60)).elapsed().count(), 60);
-  EXPECT_EQ(mhts.getLevelByDuration(seconds(3600)).elapsed().count(), 3600);
-  EXPECT_EQ(mhts.getLevelByDuration(seconds(0)).elapsed().count(), 3600 * 3);
-
-  EXPECT_EQ(mhts.sum(seconds(60)), 600);
-  EXPECT_EQ(mhts.sum(seconds(3600)), 3600 * 10);
-  EXPECT_EQ(mhts.sum(seconds(0)), 3600 * 3 * 10);
-
-  EXPECT_EQ(mhts.avg(seconds(60)), 10);
-  EXPECT_EQ(mhts.avg(seconds(3600)), 10);
-  EXPECT_EQ(mhts.avg(seconds(0)), 10);
-
-  EXPECT_EQ(mhts.rate(seconds(60)), 10);
-  EXPECT_EQ(mhts.rate(seconds(3600)), 10);
-  EXPECT_EQ(mhts.rate(seconds(0)), 10);
-
-  for (int i = 0; i < 3600; ++i) {
-    mhts.addValue(cur_time++, 100);
-  }
-  mhts.flush();
-
-  EXPECT_EQ(mhts.sum(seconds(60)), 60 * 100);
-  EXPECT_EQ(mhts.sum(seconds(3600)), 3600 * 100);
-  EXPECT_EQ(mhts.sum(seconds(0)), 3600 * 3 * 10 + 3600 * 100);
-
-  EXPECT_EQ(mhts.avg(seconds(60)), 100);
-  EXPECT_EQ(mhts.avg(seconds(3600)), 100);
-  EXPECT_EQ(mhts.avg(seconds(0)), 32.5);
-  EXPECT_EQ(mhts.avg<int>(seconds(0)), 32);
-
-  EXPECT_EQ(mhts.rate(seconds(60)), 100);
-  EXPECT_EQ(mhts.rate(seconds(3600)), 100);
-  EXPECT_EQ(mhts.rate(seconds(0)), 32.5);
-  EXPECT_EQ(mhts.rate<int>(seconds(0)), 32);
-
-  for (int i = 0; i < 1800; ++i) {
-    mhts.addValue(cur_time++, 120);
-  }
-  mhts.flush();
-
-  EXPECT_EQ(mhts.sum(seconds(60)), 60 * 120);
-  EXPECT_EQ(mhts.sum(seconds(3600)), 1800 * 100 + 1800 * 120);
-  EXPECT_EQ(mhts.sum(seconds(0)), 3600 * 3 * 10 + 3600 * 100 + 1800 * 120);
-
-  for (int i = 0; i < 60; ++i) {
-    mhts.addValue(cur_time++, 1000);
-  }
-  mhts.flush();
-
-  EXPECT_EQ(mhts.sum(seconds(60)), 60 * 1000);
-  EXPECT_EQ(mhts.sum(seconds(3600)), 1740 * 100 + 1800 * 120 + 60 * 1000);
-  EXPECT_EQ(
-      mhts.sum(seconds(0)),
-      3600 * 3 * 10 + 3600 * 100 + 1800 * 120 + 60 * 1000);
-
-  mhts.clear();
-  EXPECT_EQ(mhts.sum(seconds(0)), 0);
-}
-
-TEST(MultiLevelTimeSeries, QueryByInterval) {
-  folly::MultiLevelTimeSeries<int> mhts(
-      60, {seconds(60), seconds(3600), seconds(0)});
-
-  TimePoint curTime;
-  for (curTime = mkTimePoint(0); curTime < mkTimePoint(7200);
-       curTime += seconds(1)) {
-    mhts.addValue(curTime, 1);
-  }
-  for (curTime = mkTimePoint(7200); curTime < mkTimePoint(7200 + 3540);
-       curTime += seconds(1)) {
-    mhts.addValue(curTime, 10);
-  }
-  for (curTime = mkTimePoint(7200 + 3540); curTime < mkTimePoint(7200 + 3600);
-       curTime += seconds(1)) {
-    mhts.addValue(curTime, 100);
-  }
-  mhts.flush();
-
-  struct TimeInterval {
-    TimePoint start;
-    TimePoint end;
-  };
-
-  std::array<TimeInterval, 12> intervals = {{
-      {curTime - seconds(60), curTime},
-      {curTime - seconds(3600), curTime},
-      {curTime - seconds(7200), curTime},
-      {curTime - seconds(3600), curTime - seconds(60)},
-      {curTime - seconds(7200), curTime - seconds(60)},
-      {curTime - seconds(7200), curTime - seconds(3600)},
-      {curTime - seconds(50), curTime - seconds(20)},
-      {curTime - seconds(3020), curTime - seconds(20)},
-      {curTime - seconds(7200), curTime - seconds(20)},
-      {curTime - seconds(3000), curTime - seconds(1000)},
-      {curTime - seconds(7200), curTime - seconds(1000)},
-      {curTime - seconds(7200), curTime - seconds(3600)},
-  }};
-
-  std::array<int, 12> expectedSums = {{6000,
-                                       41400,
-                                       32400,
-                                       35400,
-                                       32130,
-                                       16200,
-                                       3000,
-                                       33600,
-                                       32310,
-                                       20000,
-                                       27900,
-                                       16200}};
-
-  std::array<int, 12> expectedCounts = {
-      {60, 3600, 7200, 3540, 7140, 3600, 30, 3000, 7180, 2000, 6200, 3600}};
-
-  for (size_t i = 0; i < intervals.size(); ++i) {
-    TimeInterval interval = intervals[i];
-
-    int s = mhts.sum(interval.start, interval.end);
-    EXPECT_EQ(expectedSums[i], s);
-
-    int c = mhts.count(interval.start, interval.end);
-    EXPECT_EQ(expectedCounts[i], c);
-
-    int a = mhts.avg<int>(interval.start, interval.end);
-    EXPECT_EQ(expectedCounts[i] ? (expectedSums[i] / expectedCounts[i]) : 0, a);
-
-    int r = mhts.rate<int>(interval.start, interval.end);
-    int expectedRate =
-        expectedSums[i] / (interval.end - interval.start).count();
-    EXPECT_EQ(expectedRate, r);
-  }
-}