2 * Copyright 2012 Facebook, Inc.
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
8 * http://www.apache.org/licenses/LICENSE-2.0
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
16 #include "folly/stats/BucketedTimeSeries.h"
17 #include "folly/stats/BucketedTimeSeries-defs.h"
19 #include <glog/logging.h>
20 #include <gtest/gtest.h>
22 #include "folly/Foreach.h"
24 using std::chrono::seconds;
27 using folly::BucketedTimeSeries;
32 vector<ssize_t> bucketStarts;
34 vector<TestData> testData = {
35 // 71 seconds x 4 buckets
36 { 71, 4, {0, 18, 36, 54}},
37 // 100 seconds x 10 buckets
38 { 100, 10, {0, 10, 20, 30, 40, 50, 60, 70, 80, 90}},
39 // 10 seconds x 10 buckets
40 { 10, 10, {0, 1, 2, 3, 4, 5, 6, 7, 8, 9}},
41 // 10 seconds x 1 buckets
43 // 1 second x 1 buckets
47 TEST(BucketedTimeSeries, getBucketInfo) {
48 for (const auto& data : testData) {
49 BucketedTimeSeries<int64_t> ts(data.numBuckets, seconds(data.duration));
51 for (uint32_t n = 0; n < 10000; n += 1234) {
52 seconds offset(n * data.duration);
54 for (uint32_t idx = 0; idx < data.numBuckets; ++idx) {
55 seconds bucketStart(data.bucketStarts[idx]);
56 seconds nextBucketStart;
57 if (idx + 1 < data.numBuckets) {
58 nextBucketStart = seconds(data.bucketStarts[idx + 1]);
60 nextBucketStart = seconds(data.duration);
63 seconds expectedStart = offset + bucketStart;
64 seconds expectedNextStart = offset + nextBucketStart;
65 seconds midpoint = (expectedStart + expectedNextStart) / 2;
67 vector<std::pair<string, seconds>> timePoints = {
68 {"expectedStart", expectedStart},
69 {"midpoint", midpoint},
70 {"expectedEnd", expectedNextStart - seconds(1)},
73 for (const auto& point : timePoints) {
74 // Check that getBucketIdx() returns the expected index
75 EXPECT_EQ(idx, ts.getBucketIdx(point.second)) <<
76 data.duration << "x" << data.numBuckets << ": " <<
77 point.first << "=" << point.second.count();
79 // Check the data returned by getBucketInfo()
81 seconds returnedStart;
82 seconds returnedNextStart;
83 ts.getBucketInfo(expectedStart, &returnedIdx,
84 &returnedStart, &returnedNextStart);
85 EXPECT_EQ(idx, returnedIdx) <<
86 data.duration << "x" << data.numBuckets << ": " <<
87 point.first << "=" << point.second.count();
88 EXPECT_EQ(expectedStart.count(), returnedStart.count()) <<
89 data.duration << "x" << data.numBuckets << ": " <<
90 point.first << "=" << point.second.count();
91 EXPECT_EQ(expectedNextStart.count(), returnedNextStart.count()) <<
92 data.duration << "x" << data.numBuckets << ": " <<
93 point.first << "=" << point.second.count();
100 void testUpdate100x10(size_t offset) {
101 // This test code only works when offset is a multiple of the bucket width
102 CHECK_EQ(0, offset % 10);
104 // Create a 100 second timeseries, with 10 buckets
105 BucketedTimeSeries<int64_t> ts(10, seconds(100));
109 // Add 1 value to each bucket
110 for (int n = 5; n <= 95; n += 10) {
111 ts.addValue(seconds(n + offset), 6);
114 EXPECT_EQ(10, ts.count());
115 EXPECT_EQ(60, ts.sum());
116 EXPECT_EQ(6, ts.avg());
119 // Update 2 buckets forwards. This should throw away 2 data points.
121 ts.update(seconds(110 + offset));
122 EXPECT_EQ(8, ts.count());
123 EXPECT_EQ(48, ts.sum());
124 EXPECT_EQ(6, ts.avg());
126 // The last time we added was 95.
127 // Try updating to 189. This should clear everything but the last bucket.
129 ts.update(seconds(151 + offset));
130 EXPECT_EQ(4, ts.count());
131 //EXPECT_EQ(6, ts.sum());
132 EXPECT_EQ(6, ts.avg());
134 // The last time we added was 95.
135 // Try updating to 193: This is nearly one full loop around,
136 // back to the same bucket. update() needs to clear everything
138 ts.update(seconds(193 + offset));
139 EXPECT_EQ(0, ts.count());
140 EXPECT_EQ(0, ts.sum());
141 EXPECT_EQ(0, ts.avg());
143 // The last time we added was 95.
144 // Try updating to 197: This is slightly over one full loop around,
145 // back to the same bucket. update() needs to clear everything
147 ts.update(seconds(197 + offset));
148 EXPECT_EQ(0, ts.count());
149 EXPECT_EQ(0, ts.sum());
150 EXPECT_EQ(0, ts.avg());
152 // The last time we added was 95.
153 // Try updating to 230: This is well over one full loop around,
154 // and everything should be cleared.
156 ts.update(seconds(230 + offset));
157 EXPECT_EQ(0, ts.count());
158 EXPECT_EQ(0, ts.sum());
159 EXPECT_EQ(0, ts.avg());
162 TEST(BucketedTimeSeries, update100x10) {
163 // Run testUpdate100x10() multiple times, with various offsets.
164 // This makes sure the update code works regardless of which bucket it starts
165 // at in the modulo arithmetic.
167 testUpdate100x10(50);
168 testUpdate100x10(370);
169 testUpdate100x10(1937090);
172 TEST(BucketedTimeSeries, update71x5) {
173 // Create a 71 second timeseries, with 5 buckets
174 // This tests when the number of buckets does not divide evenly into the
176 BucketedTimeSeries<int64_t> ts(5, seconds(71));
180 // Add 1 value to each bucket
181 ts.addValue(seconds(11), 6);
182 ts.addValue(seconds(24), 6);
183 ts.addValue(seconds(42), 6);
184 ts.addValue(seconds(43), 6);
185 ts.addValue(seconds(66), 6);
187 EXPECT_EQ(5, ts.count());
188 EXPECT_EQ(30, ts.sum());
189 EXPECT_EQ(6, ts.avg());
192 // Update 2 buckets forwards. This should throw away 2 data points.
194 ts.update(seconds(99));
195 EXPECT_EQ(3, ts.count());
196 EXPECT_EQ(18, ts.sum());
197 EXPECT_EQ(6, ts.avg());
199 // Update 3 buckets forwards. This should throw away 3 data points.
201 ts.update(seconds(100));
202 EXPECT_EQ(2, ts.count());
203 EXPECT_EQ(12, ts.sum());
204 EXPECT_EQ(6, ts.avg());
206 // Update 4 buckets forwards, just under the wrap limit.
207 // This should throw everything but the last bucket away.
209 ts.update(seconds(127));
210 EXPECT_EQ(1, ts.count());
211 EXPECT_EQ(6, ts.sum());
212 EXPECT_EQ(6, ts.avg());
214 // Update 5 buckets forwards, exactly at the wrap limit.
215 // This should throw everything away.
217 ts.update(seconds(128));
218 EXPECT_EQ(0, ts.count());
219 EXPECT_EQ(0, ts.sum());
220 EXPECT_EQ(0, ts.avg());
222 // Update very far forwards, wrapping multiple times.
223 // This should throw everything away.
225 ts.update(seconds(1234));
226 EXPECT_EQ(0, ts.count());
227 EXPECT_EQ(0, ts.sum());
228 EXPECT_EQ(0, ts.avg());
231 TEST(BucketedTimeSeries, elapsed) {
232 BucketedTimeSeries<int64_t> ts(60, seconds(600));
234 // elapsed() is 0 when no data points have been added
235 EXPECT_EQ(0, ts.elapsed().count());
237 // With exactly 1 data point, elapsed() should report 1 second of data
238 seconds start(239218);
239 ts.addValue(start + seconds(0), 200);
240 EXPECT_EQ(1, ts.elapsed().count());
241 // Adding a data point 10 seconds later should result in an elapsed time of
242 // 11 seconds (the time range is [0, 10], inclusive).
243 ts.addValue(start + seconds(10), 200);
244 EXPECT_EQ(11, ts.elapsed().count());
246 // elapsed() returns to 0 after clear()
248 EXPECT_EQ(0, ts.elapsed().count());
250 // Restart, with the starting point on an easier number to work with
251 ts.addValue(seconds(10), 200);
252 EXPECT_EQ(1, ts.elapsed().count());
253 ts.addValue(seconds(580), 200);
254 EXPECT_EQ(571, ts.elapsed().count());
255 ts.addValue(seconds(590), 200);
256 EXPECT_EQ(581, ts.elapsed().count());
257 ts.addValue(seconds(598), 200);
258 EXPECT_EQ(589, ts.elapsed().count());
259 ts.addValue(seconds(599), 200);
260 EXPECT_EQ(590, ts.elapsed().count());
261 ts.addValue(seconds(600), 200);
262 EXPECT_EQ(591, ts.elapsed().count());
263 ts.addValue(seconds(608), 200);
264 EXPECT_EQ(599, ts.elapsed().count());
265 ts.addValue(seconds(609), 200);
266 EXPECT_EQ(600, ts.elapsed().count());
267 // Once we reach 600 seconds worth of data, when we move on to the next
268 // second a full bucket will get thrown out. Now we drop back down to 591
269 // seconds worth of data
270 ts.addValue(seconds(610), 200);
271 EXPECT_EQ(591, ts.elapsed().count());
272 ts.addValue(seconds(618), 200);
273 EXPECT_EQ(599, ts.elapsed().count());
274 ts.addValue(seconds(619), 200);
275 EXPECT_EQ(600, ts.elapsed().count());
276 ts.addValue(seconds(620), 200);
277 EXPECT_EQ(591, ts.elapsed().count());
278 ts.addValue(seconds(123419), 200);
279 EXPECT_EQ(600, ts.elapsed().count());
280 ts.addValue(seconds(123420), 200);
281 EXPECT_EQ(591, ts.elapsed().count());
282 ts.addValue(seconds(123425), 200);
283 EXPECT_EQ(596, ts.elapsed().count());
285 // Time never moves backwards.
286 // Calling update with an old timestamp will just be ignored.
287 ts.update(seconds(29));
288 EXPECT_EQ(596, ts.elapsed().count());
291 TEST(BucketedTimeSeries, rate) {
292 BucketedTimeSeries<int64_t> ts(60, seconds(600));
294 // Add 3 values every 2 seconds, until fill up the buckets
295 for (size_t n = 0; n < 600; n += 2) {
296 ts.addValue(seconds(n), 200, 3);
299 EXPECT_EQ(900, ts.count());
300 EXPECT_EQ(180000, ts.sum());
301 EXPECT_EQ(200, ts.avg());
303 // Really we only entered 599 seconds worth of data: [0, 598] (inclusive)
304 EXPECT_EQ(599, ts.elapsed().count());
305 EXPECT_NEAR(300.5, ts.rate(), 0.005);
306 EXPECT_NEAR(1.5, ts.countRate(), 0.005);
308 // If we add 1 more second, now we will have 600 seconds worth of data
309 ts.update(seconds(599));
310 EXPECT_EQ(600, ts.elapsed().count());
311 EXPECT_NEAR(300, ts.rate(), 0.005);
312 EXPECT_EQ(300, ts.rate<int>());
313 EXPECT_NEAR(1.5, ts.countRate(), 0.005);
315 // However, 1 more second after that and we will have filled up all the
316 // buckets, and have to drop one.
317 ts.update(seconds(600));
318 EXPECT_EQ(591, ts.elapsed().count());
319 EXPECT_NEAR(299.5, ts.rate(), 0.01);
320 EXPECT_EQ(299, ts.rate<int>());
321 EXPECT_NEAR(1.5, ts.countRate(), 0.005);
324 TEST(BucketedTimeSeries, avgTypeConversion) {
325 // Make sure the computed average values are accurate regardless
326 // of the input type and return type.
329 // Simple sanity tests for small positive integer values
330 BucketedTimeSeries<int64_t> ts(60, seconds(600));
331 ts.addValue(seconds(0), 4, 100);
332 ts.addValue(seconds(0), 10, 200);
333 ts.addValue(seconds(0), 16, 100);
335 EXPECT_DOUBLE_EQ(10.0, ts.avg());
336 EXPECT_DOUBLE_EQ(10.0, ts.avg<float>());
337 EXPECT_EQ(10, ts.avg<uint64_t>());
338 EXPECT_EQ(10, ts.avg<int64_t>());
339 EXPECT_EQ(10, ts.avg<int32_t>());
340 EXPECT_EQ(10, ts.avg<int16_t>());
341 EXPECT_EQ(10, ts.avg<int8_t>());
342 EXPECT_EQ(10, ts.avg<uint8_t>());
346 // Test signed integer types with negative values
347 BucketedTimeSeries<int64_t> ts(60, seconds(600));
348 ts.addValue(seconds(0), -100);
349 ts.addValue(seconds(0), -200);
350 ts.addValue(seconds(0), -300);
351 ts.addValue(seconds(0), -200, 65535);
353 EXPECT_DOUBLE_EQ(-200.0, ts.avg());
354 EXPECT_DOUBLE_EQ(-200.0, ts.avg<float>());
355 EXPECT_EQ(-200, ts.avg<int64_t>());
356 EXPECT_EQ(-200, ts.avg<int32_t>());
357 EXPECT_EQ(-200, ts.avg<int16_t>());
361 // Test uint64_t values that would overflow int64_t
362 BucketedTimeSeries<uint64_t> ts(60, seconds(600));
363 ts.addValueAggregated(seconds(0),
364 std::numeric_limits<uint64_t>::max(),
365 std::numeric_limits<uint64_t>::max());
367 EXPECT_DOUBLE_EQ(1.0, ts.avg());
368 EXPECT_DOUBLE_EQ(1.0, ts.avg<float>());
369 EXPECT_EQ(1, ts.avg<uint64_t>());
370 EXPECT_EQ(1, ts.avg<int64_t>());
371 EXPECT_EQ(1, ts.avg<int8_t>());
375 // Test doubles with small-ish values that will fit in integer types
376 BucketedTimeSeries<double> ts(60, seconds(600));
377 ts.addValue(seconds(0), 4.0, 100);
378 ts.addValue(seconds(0), 10.0, 200);
379 ts.addValue(seconds(0), 16.0, 100);
381 EXPECT_DOUBLE_EQ(10.0, ts.avg());
382 EXPECT_DOUBLE_EQ(10.0, ts.avg<float>());
383 EXPECT_EQ(10, ts.avg<uint64_t>());
384 EXPECT_EQ(10, ts.avg<int64_t>());
385 EXPECT_EQ(10, ts.avg<int32_t>());
386 EXPECT_EQ(10, ts.avg<int16_t>());
387 EXPECT_EQ(10, ts.avg<int8_t>());
388 EXPECT_EQ(10, ts.avg<uint8_t>());
392 // Test doubles with huge values
393 BucketedTimeSeries<double> ts(60, seconds(600));
394 ts.addValue(seconds(0), 1e19, 100);
395 ts.addValue(seconds(0), 2e19, 200);
396 ts.addValue(seconds(0), 3e19, 100);
398 EXPECT_DOUBLE_EQ(ts.avg(), 2e19);
399 EXPECT_NEAR(ts.avg<float>(), 2e19, 1e11);
403 // Test doubles where the sum adds up larger than a uint64_t,
404 // but the average fits in an int64_t
405 BucketedTimeSeries<double> ts(60, seconds(600));
406 uint64_t value = 0x3fffffffffffffff;
407 FOR_EACH_RANGE(i, 0, 16) {
408 ts.addValue(seconds(0), value);
411 EXPECT_DOUBLE_EQ(value, ts.avg());
412 EXPECT_DOUBLE_EQ(value, ts.avg<float>());
413 // Some precision is lost here due to the huge sum, so the
414 // integer average returned is off by one.
415 EXPECT_NEAR(value, ts.avg<uint64_t>(), 1);
416 EXPECT_NEAR(value, ts.avg<int64_t>(), 1);
420 // Test BucketedTimeSeries with a smaller integer type
421 BucketedTimeSeries<int16_t> ts(60, seconds(600));
422 FOR_EACH_RANGE(i, 0, 101) {
423 ts.addValue(seconds(0), i);
426 EXPECT_DOUBLE_EQ(50.0, ts.avg());
427 EXPECT_DOUBLE_EQ(50.0, ts.avg<float>());
428 EXPECT_EQ(50, ts.avg<uint64_t>());
429 EXPECT_EQ(50, ts.avg<int64_t>());
430 EXPECT_EQ(50, ts.avg<int16_t>());
431 EXPECT_EQ(50, ts.avg<int8_t>());
435 // Test BucketedTimeSeries with long double input
436 BucketedTimeSeries<long double> ts(60, seconds(600));
437 ts.addValueAggregated(seconds(0), 1000.0L, 7);
439 long double expected = 1000.0L / 7.0L;
440 EXPECT_DOUBLE_EQ(static_cast<double>(expected), ts.avg());
441 EXPECT_DOUBLE_EQ(static_cast<float>(expected), ts.avg<float>());
442 EXPECT_DOUBLE_EQ(expected, ts.avg<long double>());
443 EXPECT_EQ(static_cast<uint64_t>(expected), ts.avg<uint64_t>());
444 EXPECT_EQ(static_cast<int64_t>(expected), ts.avg<int64_t>());
448 // Test BucketedTimeSeries with int64_t values,
449 // but using an average that requires a fair amount of precision.
450 BucketedTimeSeries<int64_t> ts(60, seconds(600));
451 ts.addValueAggregated(seconds(0), 1000, 7);
453 long double expected = 1000.0L / 7.0L;
454 EXPECT_DOUBLE_EQ(static_cast<double>(expected), ts.avg());
455 EXPECT_DOUBLE_EQ(static_cast<float>(expected), ts.avg<float>());
456 EXPECT_DOUBLE_EQ(expected, ts.avg<long double>());
457 EXPECT_EQ(static_cast<uint64_t>(expected), ts.avg<uint64_t>());
458 EXPECT_EQ(static_cast<int64_t>(expected), ts.avg<int64_t>());
462 TEST(BucketedTimeSeries, forEachBucket) {
463 typedef BucketedTimeSeries<int64_t>::Bucket Bucket;
465 BucketInfo(const Bucket* b, seconds s, seconds ns)
466 : bucket(b), start(s), nextStart(ns) {}
468 const Bucket* bucket;
473 for (const auto& data : testData) {
474 BucketedTimeSeries<int64_t> ts(data.numBuckets, seconds(data.duration));
476 vector<BucketInfo> info;
477 auto fn = [&](const Bucket& bucket, seconds bucketStart,
479 info.emplace_back(&bucket, bucketStart, bucketEnd);
483 // If we haven't yet added any data, the current bucket will start at 0,
484 // and all data previous buckets will have negative times.
485 ts.forEachBucket(fn);
487 CHECK_EQ(data.numBuckets, info.size());
489 // Check the data passed in to the function
491 size_t bucketIdx = 1;
492 ssize_t offset = -data.duration;
493 for (size_t n = 0; n < data.numBuckets; ++n) {
494 if (bucketIdx >= data.numBuckets) {
496 offset += data.duration;
499 EXPECT_EQ(data.bucketStarts[bucketIdx] + offset,
500 info[infoIdx].start.count()) <<
501 data.duration << "x" << data.numBuckets << ": bucketIdx=" <<
502 bucketIdx << ", infoIdx=" << infoIdx;
504 size_t nextBucketIdx = bucketIdx + 1;
505 ssize_t nextOffset = offset;
506 if (nextBucketIdx >= data.numBuckets) {
508 nextOffset += data.duration;
510 EXPECT_EQ(data.bucketStarts[nextBucketIdx] + nextOffset,
511 info[infoIdx].nextStart.count()) <<
512 data.duration << "x" << data.numBuckets << ": bucketIdx=" <<
513 bucketIdx << ", infoIdx=" << infoIdx;
515 EXPECT_EQ(&ts.getBucketByIndex(bucketIdx), info[infoIdx].bucket);
523 TEST(BucketedTimeSeries, queryByIntervalSimple) {
524 BucketedTimeSeries<int> a(3, seconds(12));
525 for (int i = 0; i < 8; i++) {
526 a.addValue(seconds(i), 1);
528 // We added 1 at each second from 0..7
529 // Query from the time period 0..2.
530 // This is entirely in the first bucket, which has a sum of 4.
531 // The code knows only part of the bucket is covered, and correctly
532 // estimates the desired sum as 3.
533 EXPECT_EQ(2, a.sum(seconds(0), seconds(2)));
536 TEST(BucketedTimeSeries, queryByInterval) {
537 // Set up a BucketedTimeSeries tracking 6 seconds in 3 buckets
538 const int kNumBuckets = 3;
539 const int kDuration = 6;
540 BucketedTimeSeries<double> b(kNumBuckets, seconds(kDuration));
542 for (unsigned int i = 0; i < kDuration; ++i) {
543 // add value 'i' at time 'i'
544 b.addValue(seconds(i), i);
547 // Current bucket state:
548 // 0: time=[0, 2): values=(0, 1), sum=1, count=2
549 // 1: time=[2, 4): values=(2, 3), sum=5, count=1
550 // 2: time=[4, 6): values=(4, 5), sum=9, count=2
551 double expectedSums1[kDuration + 1][kDuration + 1] = {
552 {0, 4.5, 9, 11.5, 14, 14.5, 15},
553 {0, 4.5, 7, 9.5, 10, 10.5, -1},
554 {0, 2.5, 5, 5.5, 6, -1, -1},
555 {0, 2.5, 3, 3.5, -1, -1, -1},
556 {0, 0.5, 1, -1, -1, -1, -1},
557 {0, 0.5, -1, -1, -1, -1, -1},
558 {0, -1, -1, -1, -1, -1, -1}
560 int expectedCounts1[kDuration + 1][kDuration + 1] = {
561 {0, 1, 2, 3, 4, 5, 6},
562 {0, 1, 2, 3, 4, 5, -1},
563 {0, 1, 2, 3, 4, -1, -1},
564 {0, 1, 2, 3, -1, -1, -1},
565 {0, 1, 2, -1, -1, -1, -1},
566 {0, 1, -1, -1, -1, -1, -1},
567 {0, -1, -1, -1, -1, -1, -1}
570 seconds currentTime = b.getLatestTime() + seconds(1);
571 for (int i = 0; i <= kDuration + 1; i++) {
572 for (int j = 0; j <= kDuration - i; j++) {
573 seconds start = currentTime - seconds(i + j);
574 seconds end = currentTime - seconds(i);
575 double expectedSum = expectedSums1[i][j];
576 EXPECT_EQ(expectedSum, b.sum(start, end)) <<
577 "i=" << i << ", j=" << j <<
578 ", interval=[" << start.count() << ", " << end.count() << ")";
580 uint64_t expectedCount = expectedCounts1[i][j];
581 EXPECT_EQ(expectedCount, b.count(start, end)) <<
582 "i=" << i << ", j=" << j <<
583 ", interval=[" << start.count() << ", " << end.count() << ")";
585 double expectedAvg = expectedCount ? expectedSum / expectedCount : 0;
586 EXPECT_EQ(expectedAvg, b.avg(start, end)) <<
587 "i=" << i << ", j=" << j <<
588 ", interval=[" << start.count() << ", " << end.count() << ")";
590 double expectedRate = j ? expectedSum / j : 0;
591 EXPECT_EQ(expectedRate, b.rate(start, end)) <<
592 "i=" << i << ", j=" << j <<
593 ", interval=[" << start.count() << ", " << end.count() << ")";
597 // Add 3 more values.
598 // This will overwrite 1 full bucket, and put us halfway through the next.
599 for (unsigned int i = kDuration; i < kDuration + 3; ++i) {
600 b.addValue(seconds(i), i);
603 // Current bucket state:
604 // 0: time=[6, 8): values=(6, 7), sum=13, count=2
605 // 1: time=[8, 10): values=(8), sum=8, count=1
606 // 2: time=[4, 6): values=(4, 5), sum=9, count=2
607 double expectedSums2[kDuration + 1][kDuration + 1] = {
608 {0, 8, 14.5, 21, 25.5, 30, 30},
609 {0, 6.5, 13, 17.5, 22, 22, -1},
610 {0, 6.5, 11, 15.5, 15.5, -1, -1},
611 {0, 4.5, 9, 9, -1, -1, -1},
612 {0, 4.5, 4.5, -1, -1, -1, -1},
613 {0, 0, -1, -1, -1, -1, -1},
614 {0, -1, -1, -1, -1, -1, -1}
616 int expectedCounts2[kDuration + 1][kDuration + 1] = {
617 {0, 1, 2, 3, 4, 5, 5},
618 {0, 1, 2, 3, 4, 4, -1},
619 {0, 1, 2, 3, 3, -1, -1},
620 {0, 1, 2, 2, -1, -1, -1},
621 {0, 1, 1, -1, -1, -1, -1},
622 {0, 0, -1, -1, -1, -1, -1},
623 {0, -1, -1, -1, -1, -1, -1}
626 currentTime = b.getLatestTime() + seconds(1);
627 for (int i = 0; i <= kDuration + 1; i++) {
628 for (int j = 0; j <= kDuration - i; j++) {
629 seconds start = currentTime - seconds(i + j);
630 seconds end = currentTime - seconds(i);
631 double expectedSum = expectedSums2[i][j];
632 EXPECT_EQ(expectedSum, b.sum(start, end)) <<
633 "i=" << i << ", j=" << j <<
634 ", interval=[" << start.count() << ", " << end.count() << ")";
636 uint64_t expectedCount = expectedCounts2[i][j];
637 EXPECT_EQ(expectedCount, b.count(start, end)) <<
638 "i=" << i << ", j=" << j <<
639 ", interval=[" << start.count() << ", " << end.count() << ")";
641 double expectedAvg = expectedCount ? expectedSum / expectedCount : 0;
642 EXPECT_EQ(expectedAvg, b.avg(start, end)) <<
643 "i=" << i << ", j=" << j <<
644 ", interval=[" << start.count() << ", " << end.count() << ")";
646 double expectedRate = j ? expectedSum / j : 0;
647 EXPECT_EQ(expectedRate, b.rate(start, end)) <<
648 "i=" << i << ", j=" << j <<
649 ", interval=[" << start.count() << ", " << end.count() << ")";