2 * Licensed to the Apache Software Foundation (ASF) under one
3 * or more contributor license agreements. See the NOTICE file
4 * distributed with this work for additional information
5 * regarding copyright ownership. The ASF licenses this file
6 * to you under the Apache License, Version 2.0 (the
7 * "License"); you may not use this file except in compliance
8 * with the License. You may obtain a copy of the License at
10 * http://www.apache.org/licenses/LICENSE-2.0
12 * Unless required by applicable law or agreed to in writing,
13 * software distributed under the License is distributed on an
14 * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
15 * KIND, either express or implied. See the License for the
16 * specific language governing permissions and limitations
21 #include <folly/io/async/AsyncTimeout.h>
22 #include <folly/io/async/DelayedDestruction.h>
24 #include <boost/intrusive/list.hpp>
34 * Hashed Hierarchical Wheel Timer
37 * TAsyncTimeout - a single timeout.
38 * HHWheelTimer - a set of efficient timeouts with different interval,
39 * but timeouts are not exact.
41 * All of the above are O(1) in insertion, tick update and cancel
43 * This implementation ticks once every 10ms.
44 * We model timers as the number of ticks until the next
45 * due event. We allow 32-bits of space to track this
46 * due interval, and break that into 4 regions of 8 bits.
47 * Each region indexes into a bucket of 256 lists.
49 * Bucket 0 represents those events that are due the soonest.
50 * Each tick causes us to look at the next list in a bucket.
51 * The 0th list in a bucket is special; it means that it is time to
52 * flush the timers from the next higher bucket and schedule them
53 * into a different bucket.
55 * This technique results in a very cheap mechanism for
56 * maintaining time and timers, provided that we can maintain
57 * a consistent rate of ticks.
59 class HHWheelTimer : protected folly::AsyncTimeout,
60 public folly::DelayedDestruction {
62 typedef std::unique_ptr<HHWheelTimer, Destructor> UniquePtr;
65 * A callback to be notified when a timeout has expired.
76 * timeoutExpired() is invoked when the timeout has expired.
78 virtual void timeoutExpired() noexcept = 0;
81 * Cancel the timeout, if it is running.
83 * If the timeout is not scheduled, cancelTimeout() does nothing.
85 void cancelTimeout() {
86 if (wheel_ == nullptr) {
87 // We're not scheduled, so there's nothing to do.
94 * Return true if this timeout is currently scheduled, and false otherwise.
96 bool isScheduled() const {
97 return wheel_ != nullptr;
101 // Get the time remaining until this timeout expires
102 std::chrono::milliseconds getTimeRemaining(
103 std::chrono::milliseconds now) const {
104 if (now >= expiration_) {
105 return std::chrono::milliseconds(0);
107 return expiration_ - now;
110 void setScheduled(HHWheelTimer* wheel,
111 std::chrono::milliseconds);
112 void cancelTimeoutImpl();
114 HHWheelTimer* wheel_;
115 std::chrono::milliseconds expiration_;
117 typedef boost::intrusive::list_member_hook<
118 boost::intrusive::link_mode<boost::intrusive::auto_unlink> > ListHook;
122 typedef boost::intrusive::list<
124 boost::intrusive::member_hook<Callback, ListHook, &Callback::hook_>,
125 boost::intrusive::constant_time_size<false> > List;
127 std::shared_ptr<RequestContext> context_;
129 // Give HHWheelTimer direct access to our members so it can take care
130 // of scheduling/cancelling.
131 friend class HHWheelTimer;
135 * Create a new HHWheelTimer with the specified interval.
137 static int DEFAULT_TICK_INTERVAL;
138 explicit HHWheelTimer(folly::EventBase* eventBase,
139 std::chrono::milliseconds intervalMS =
140 std::chrono::milliseconds(DEFAULT_TICK_INTERVAL));
143 * Destroy the HHWheelTimer.
145 * A HHWheelTimer should only be destroyed when there are no more
146 * callbacks pending in the set.
148 virtual void destroy();
151 * Get the tick interval for this HHWheelTimer.
153 * Returns the tick interval in milliseconds.
155 std::chrono::milliseconds getTickInterval() const {
160 * Schedule the specified Callback to be invoked after the
161 * specified timeout interval.
163 * If the callback is already scheduled, this cancels the existing timeout
164 * before scheduling the new timeout.
166 void scheduleTimeout(Callback* callback,
167 std::chrono::milliseconds timeout);
168 void scheduleTimeoutImpl(Callback* callback,
169 std::chrono::milliseconds timeout);
172 * Return the number of currently pending timeouts
174 uint64_t count() const {
179 * This turns on more exact timing. By default the wheel timer
180 * increments its cached time only once everyN (default) ticks.
182 * With catchupEveryN at 1, timeouts will only be delayed until the
183 * next tick, at which point all overdue timeouts are called. The
184 * wheel timer is approximately 2x slower with this set to 1.
186 * Load testing in opt mode showed skew was about 1% with no catchup.
188 void setCatchupEveryN(uint32_t everyN) {
189 catchupEveryN_ = everyN;
192 using folly::AsyncTimeout::attachEventBase;
193 using folly::AsyncTimeout::detachEventBase;
194 using folly::AsyncTimeout::getTimeoutManager;
198 * Protected destructor.
200 * Use destroy() instead. See the comments in DelayedDestruction for more
203 virtual ~HHWheelTimer();
206 // Forbidden copy constructor and assignment operator
207 HHWheelTimer(HHWheelTimer const &) = delete;
208 HHWheelTimer& operator=(HHWheelTimer const &) = delete;
210 // Methods inherited from TAsyncTimeout
211 virtual void timeoutExpired() noexcept;
213 std::chrono::milliseconds interval_;
215 static constexpr int WHEEL_BUCKETS = 4;
216 static constexpr int WHEEL_BITS = 8;
217 static constexpr unsigned int WHEEL_SIZE = (1 << WHEEL_BITS);
218 static constexpr unsigned int WHEEL_MASK = (WHEEL_SIZE - 1);
219 static constexpr uint32_t LARGEST_SLOT = 0xffffffffUL;
221 typedef Callback::List CallbackList;
222 CallbackList buckets_[WHEEL_BUCKETS][WHEEL_SIZE];
224 uint32_t timeToWheelTicks(std::chrono::milliseconds t) {
225 return t.count() / interval_.count();
228 bool cascadeTimers(int bucket, int tick);
231 std::chrono::milliseconds now_;
233 static constexpr uint32_t DEFAULT_CATCHUP_EVERY_N = 100;
235 uint32_t catchupEveryN_;
236 uint32_t expirationsSinceCatchup_;