4 * Common Block IO controller cgroup interface
6 * Based on ideas and code from CFQ, CFS and BFQ:
7 * Copyright (C) 2003 Jens Axboe <axboe@kernel.dk>
9 * Copyright (C) 2008 Fabio Checconi <fabio@gandalf.sssup.it>
10 * Paolo Valente <paolo.valente@unimore.it>
12 * Copyright (C) 2009 Vivek Goyal <vgoyal@redhat.com>
13 * Nauman Rafique <nauman@google.com>
16 #include <linux/cgroup.h>
17 #include <linux/u64_stats_sync.h>
18 #include <linux/seq_file.h>
19 #include <linux/radix-tree.h>
20 #include <linux/blkdev.h>
21 #include <linux/atomic.h>
23 /* Max limits for throttle policy */
24 #define THROTL_IOPS_MAX UINT_MAX
26 #ifdef CONFIG_BLK_CGROUP
28 enum blkg_rwstat_type {
35 BLKG_RWSTAT_TOTAL = BLKG_RWSTAT_NR,
41 struct cgroup_subsys_state css;
44 struct radix_tree_root blkg_tree;
45 struct blkcg_gq *blkg_hint;
46 struct hlist_head blkg_list;
48 struct blkcg_policy_data *cpd[BLKCG_MAX_POLS];
50 struct list_head all_blkcgs_node;
51 #ifdef CONFIG_CGROUP_WRITEBACK
52 struct list_head cgwb_list;
57 * blkg_[rw]stat->aux_cnt is excluded for local stats but included for
58 * recursive. Used to carry stats of dead children.
61 struct u64_stats_sync syncp;
67 struct u64_stats_sync syncp;
68 uint64_t cnt[BLKG_RWSTAT_NR];
69 atomic64_t aux_cnt[BLKG_RWSTAT_NR];
73 * A blkcg_gq (blkg) is association between a block cgroup (blkcg) and a
74 * request_queue (q). This is used by blkcg policies which need to track
75 * information per blkcg - q pair.
77 * There can be multiple active blkcg policies and each blkg:policy pair is
78 * represented by a blkg_policy_data which is allocated and freed by each
79 * policy's pd_alloc/free_fn() methods. A policy can allocate private data
80 * area by allocating larger data structure which embeds blkg_policy_data
83 struct blkg_policy_data {
84 /* the blkg and policy id this per-policy data belongs to */
85 struct blkcg_gq *blkg;
90 * Policies that need to keep per-blkcg data which is independent from any
91 * request_queue associated to it should implement cpd_alloc/free_fn()
92 * methods. A policy can allocate private data area by allocating larger
93 * data structure which embeds blkcg_policy_data at the beginning.
94 * cpd_init() is invoked to let each policy handle per-blkcg data.
96 struct blkcg_policy_data {
97 /* the blkcg and policy id this per-policy data belongs to */
102 /* association between a blk cgroup and a request queue */
104 /* Pointer to the associated request_queue */
105 struct request_queue *q;
106 struct list_head q_node;
107 struct hlist_node blkcg_node;
111 * Each blkg gets congested separately and the congestion state is
112 * propagated to the matching bdi_writeback_congested.
114 struct bdi_writeback_congested *wb_congested;
116 /* all non-root blkcg_gq's are guaranteed to have access to parent */
117 struct blkcg_gq *parent;
119 /* request allocation list for this blkcg-q pair */
120 struct request_list rl;
122 /* reference count */
125 /* is this blkg online? protected by both blkcg and q locks */
128 struct blkg_policy_data *pd[BLKCG_MAX_POLS];
130 struct rcu_head rcu_head;
133 typedef struct blkcg_policy_data *(blkcg_pol_alloc_cpd_fn)(gfp_t gfp);
134 typedef void (blkcg_pol_init_cpd_fn)(struct blkcg_policy_data *cpd);
135 typedef void (blkcg_pol_free_cpd_fn)(struct blkcg_policy_data *cpd);
136 typedef struct blkg_policy_data *(blkcg_pol_alloc_pd_fn)(gfp_t gfp, int node);
137 typedef void (blkcg_pol_init_pd_fn)(struct blkg_policy_data *pd);
138 typedef void (blkcg_pol_online_pd_fn)(struct blkg_policy_data *pd);
139 typedef void (blkcg_pol_offline_pd_fn)(struct blkg_policy_data *pd);
140 typedef void (blkcg_pol_free_pd_fn)(struct blkg_policy_data *pd);
141 typedef void (blkcg_pol_reset_pd_stats_fn)(struct blkg_policy_data *pd);
143 struct blkcg_policy {
145 /* cgroup files for the policy */
146 struct cftype *cftypes;
149 blkcg_pol_alloc_cpd_fn *cpd_alloc_fn;
150 blkcg_pol_init_cpd_fn *cpd_init_fn;
151 blkcg_pol_free_cpd_fn *cpd_free_fn;
153 blkcg_pol_alloc_pd_fn *pd_alloc_fn;
154 blkcg_pol_init_pd_fn *pd_init_fn;
155 blkcg_pol_online_pd_fn *pd_online_fn;
156 blkcg_pol_offline_pd_fn *pd_offline_fn;
157 blkcg_pol_free_pd_fn *pd_free_fn;
158 blkcg_pol_reset_pd_stats_fn *pd_reset_stats_fn;
161 extern struct blkcg blkcg_root;
162 extern struct cgroup_subsys_state * const blkcg_root_css;
164 struct blkcg_gq *blkg_lookup_slowpath(struct blkcg *blkcg,
165 struct request_queue *q, bool update_hint);
166 struct blkcg_gq *blkg_lookup_create(struct blkcg *blkcg,
167 struct request_queue *q);
168 int blkcg_init_queue(struct request_queue *q);
169 void blkcg_drain_queue(struct request_queue *q);
170 void blkcg_exit_queue(struct request_queue *q);
172 /* Blkio controller policy registration */
173 int blkcg_policy_register(struct blkcg_policy *pol);
174 void blkcg_policy_unregister(struct blkcg_policy *pol);
175 int blkcg_activate_policy(struct request_queue *q,
176 const struct blkcg_policy *pol);
177 void blkcg_deactivate_policy(struct request_queue *q,
178 const struct blkcg_policy *pol);
180 void blkcg_print_blkgs(struct seq_file *sf, struct blkcg *blkcg,
181 u64 (*prfill)(struct seq_file *,
182 struct blkg_policy_data *, int),
183 const struct blkcg_policy *pol, int data,
185 u64 __blkg_prfill_u64(struct seq_file *sf, struct blkg_policy_data *pd, u64 v);
186 u64 __blkg_prfill_rwstat(struct seq_file *sf, struct blkg_policy_data *pd,
187 const struct blkg_rwstat *rwstat);
188 u64 blkg_prfill_stat(struct seq_file *sf, struct blkg_policy_data *pd, int off);
189 u64 blkg_prfill_rwstat(struct seq_file *sf, struct blkg_policy_data *pd,
192 u64 blkg_stat_recursive_sum(struct blkg_policy_data *pd, int off);
193 struct blkg_rwstat blkg_rwstat_recursive_sum(struct blkg_policy_data *pd,
196 struct blkg_conf_ctx {
197 struct gendisk *disk;
198 struct blkcg_gq *blkg;
202 int blkg_conf_prep(struct blkcg *blkcg, const struct blkcg_policy *pol,
203 const char *input, struct blkg_conf_ctx *ctx);
204 void blkg_conf_finish(struct blkg_conf_ctx *ctx);
207 static inline struct blkcg *css_to_blkcg(struct cgroup_subsys_state *css)
209 return css ? container_of(css, struct blkcg, css) : NULL;
212 static inline struct blkcg *task_blkcg(struct task_struct *tsk)
214 return css_to_blkcg(task_css(tsk, blkio_cgrp_id));
217 static inline struct blkcg *bio_blkcg(struct bio *bio)
219 if (bio && bio->bi_css)
220 return css_to_blkcg(bio->bi_css);
221 return task_blkcg(current);
224 static inline struct cgroup_subsys_state *
225 task_get_blkcg_css(struct task_struct *task)
227 return task_get_css(task, blkio_cgrp_id);
231 * blkcg_parent - get the parent of a blkcg
232 * @blkcg: blkcg of interest
234 * Return the parent blkcg of @blkcg. Can be called anytime.
236 static inline struct blkcg *blkcg_parent(struct blkcg *blkcg)
238 return css_to_blkcg(blkcg->css.parent);
242 * __blkg_lookup - internal version of blkg_lookup()
243 * @blkcg: blkcg of interest
244 * @q: request_queue of interest
245 * @update_hint: whether to update lookup hint with the result or not
247 * This is internal version and shouldn't be used by policy
248 * implementations. Looks up blkgs for the @blkcg - @q pair regardless of
249 * @q's bypass state. If @update_hint is %true, the caller should be
250 * holding @q->queue_lock and lookup hint is updated on success.
252 static inline struct blkcg_gq *__blkg_lookup(struct blkcg *blkcg,
253 struct request_queue *q,
256 struct blkcg_gq *blkg;
258 if (blkcg == &blkcg_root)
261 blkg = rcu_dereference(blkcg->blkg_hint);
262 if (blkg && blkg->q == q)
265 return blkg_lookup_slowpath(blkcg, q, update_hint);
269 * blkg_lookup - lookup blkg for the specified blkcg - q pair
270 * @blkcg: blkcg of interest
271 * @q: request_queue of interest
273 * Lookup blkg for the @blkcg - @q pair. This function should be called
274 * under RCU read lock and is guaranteed to return %NULL if @q is bypassing
275 * - see blk_queue_bypass_start() for details.
277 static inline struct blkcg_gq *blkg_lookup(struct blkcg *blkcg,
278 struct request_queue *q)
280 WARN_ON_ONCE(!rcu_read_lock_held());
282 if (unlikely(blk_queue_bypass(q)))
284 return __blkg_lookup(blkcg, q, false);
288 * blkg_to_pdata - get policy private data
289 * @blkg: blkg of interest
290 * @pol: policy of interest
292 * Return pointer to private data associated with the @blkg-@pol pair.
294 static inline struct blkg_policy_data *blkg_to_pd(struct blkcg_gq *blkg,
295 struct blkcg_policy *pol)
297 return blkg ? blkg->pd[pol->plid] : NULL;
300 static inline struct blkcg_policy_data *blkcg_to_cpd(struct blkcg *blkcg,
301 struct blkcg_policy *pol)
303 return blkcg ? blkcg->cpd[pol->plid] : NULL;
307 * pdata_to_blkg - get blkg associated with policy private data
308 * @pd: policy private data of interest
310 * @pd is policy private data. Determine the blkg it's associated with.
312 static inline struct blkcg_gq *pd_to_blkg(struct blkg_policy_data *pd)
314 return pd ? pd->blkg : NULL;
317 static inline struct blkcg *cpd_to_blkcg(struct blkcg_policy_data *cpd)
319 return cpd ? cpd->blkcg : NULL;
323 * blkg_path - format cgroup path of blkg
324 * @blkg: blkg of interest
325 * @buf: target buffer
326 * @buflen: target buffer length
328 * Format the path of the cgroup of @blkg into @buf.
330 static inline int blkg_path(struct blkcg_gq *blkg, char *buf, int buflen)
334 p = cgroup_path(blkg->blkcg->css.cgroup, buf, buflen);
336 strncpy(buf, "<unavailable>", buflen);
337 return -ENAMETOOLONG;
340 memmove(buf, p, buf + buflen - p);
345 * blkg_get - get a blkg reference
348 * The caller should be holding an existing reference.
350 static inline void blkg_get(struct blkcg_gq *blkg)
352 WARN_ON_ONCE(atomic_read(&blkg->refcnt) <= 0);
353 atomic_inc(&blkg->refcnt);
356 void __blkg_release_rcu(struct rcu_head *rcu);
359 * blkg_put - put a blkg reference
362 static inline void blkg_put(struct blkcg_gq *blkg)
364 WARN_ON_ONCE(atomic_read(&blkg->refcnt) <= 0);
365 if (atomic_dec_and_test(&blkg->refcnt))
366 call_rcu(&blkg->rcu_head, __blkg_release_rcu);
370 * blkg_for_each_descendant_pre - pre-order walk of a blkg's descendants
371 * @d_blkg: loop cursor pointing to the current descendant
372 * @pos_css: used for iteration
373 * @p_blkg: target blkg to walk descendants of
375 * Walk @c_blkg through the descendants of @p_blkg. Must be used with RCU
376 * read locked. If called under either blkcg or queue lock, the iteration
377 * is guaranteed to include all and only online blkgs. The caller may
378 * update @pos_css by calling css_rightmost_descendant() to skip subtree.
379 * @p_blkg is included in the iteration and the first node to be visited.
381 #define blkg_for_each_descendant_pre(d_blkg, pos_css, p_blkg) \
382 css_for_each_descendant_pre((pos_css), &(p_blkg)->blkcg->css) \
383 if (((d_blkg) = __blkg_lookup(css_to_blkcg(pos_css), \
384 (p_blkg)->q, false)))
387 * blkg_for_each_descendant_post - post-order walk of a blkg's descendants
388 * @d_blkg: loop cursor pointing to the current descendant
389 * @pos_css: used for iteration
390 * @p_blkg: target blkg to walk descendants of
392 * Similar to blkg_for_each_descendant_pre() but performs post-order
393 * traversal instead. Synchronization rules are the same. @p_blkg is
394 * included in the iteration and the last node to be visited.
396 #define blkg_for_each_descendant_post(d_blkg, pos_css, p_blkg) \
397 css_for_each_descendant_post((pos_css), &(p_blkg)->blkcg->css) \
398 if (((d_blkg) = __blkg_lookup(css_to_blkcg(pos_css), \
399 (p_blkg)->q, false)))
402 * blk_get_rl - get request_list to use
403 * @q: request_queue of interest
404 * @bio: bio which will be attached to the allocated request (may be %NULL)
406 * The caller wants to allocate a request from @q to use for @bio. Find
407 * the request_list to use and obtain a reference on it. Should be called
408 * under queue_lock. This function is guaranteed to return non-%NULL
411 static inline struct request_list *blk_get_rl(struct request_queue *q,
415 struct blkcg_gq *blkg;
419 blkcg = bio_blkcg(bio);
421 /* bypass blkg lookup and use @q->root_rl directly for root */
422 if (blkcg == &blkcg_root)
426 * Try to use blkg->rl. blkg lookup may fail under memory pressure
427 * or if either the blkcg or queue is going away. Fall back to
428 * root_rl in such cases.
430 blkg = blkg_lookup(blkcg, q);
443 * blk_put_rl - put request_list
444 * @rl: request_list to put
446 * Put the reference acquired by blk_get_rl(). Should be called under
449 static inline void blk_put_rl(struct request_list *rl)
451 if (rl->blkg->blkcg != &blkcg_root)
456 * blk_rq_set_rl - associate a request with a request_list
457 * @rq: request of interest
458 * @rl: target request_list
460 * Associate @rq with @rl so that accounting and freeing can know the
461 * request_list @rq came from.
463 static inline void blk_rq_set_rl(struct request *rq, struct request_list *rl)
469 * blk_rq_rl - return the request_list a request came from
470 * @rq: request of interest
472 * Return the request_list @rq is allocated from.
474 static inline struct request_list *blk_rq_rl(struct request *rq)
479 struct request_list *__blk_queue_next_rl(struct request_list *rl,
480 struct request_queue *q);
482 * blk_queue_for_each_rl - iterate through all request_lists of a request_queue
484 * Should be used under queue_lock.
486 #define blk_queue_for_each_rl(rl, q) \
487 for ((rl) = &(q)->root_rl; (rl); (rl) = __blk_queue_next_rl((rl), (q)))
489 static inline void blkg_stat_init(struct blkg_stat *stat)
491 u64_stats_init(&stat->syncp);
492 atomic64_set(&stat->aux_cnt, 0);
496 * blkg_stat_add - add a value to a blkg_stat
497 * @stat: target blkg_stat
500 * Add @val to @stat. The caller is responsible for synchronizing calls to
503 static inline void blkg_stat_add(struct blkg_stat *stat, uint64_t val)
505 u64_stats_update_begin(&stat->syncp);
507 u64_stats_update_end(&stat->syncp);
511 * blkg_stat_read - read the current value of a blkg_stat
512 * @stat: blkg_stat to read
514 * Read the current value of @stat. The returned value doesn't include the
515 * aux count. This function can be called without synchroniztion and takes
516 * care of u64 atomicity.
518 static inline uint64_t blkg_stat_read(struct blkg_stat *stat)
524 start = u64_stats_fetch_begin_irq(&stat->syncp);
526 } while (u64_stats_fetch_retry_irq(&stat->syncp, start));
532 * blkg_stat_reset - reset a blkg_stat
533 * @stat: blkg_stat to reset
535 static inline void blkg_stat_reset(struct blkg_stat *stat)
538 atomic64_set(&stat->aux_cnt, 0);
542 * blkg_stat_add_aux - add a blkg_stat into another's aux count
543 * @to: the destination blkg_stat
546 * Add @from's count including the aux one to @to's aux count.
548 static inline void blkg_stat_add_aux(struct blkg_stat *to,
549 struct blkg_stat *from)
551 atomic64_add(blkg_stat_read(from) + atomic64_read(&from->aux_cnt),
555 static inline void blkg_rwstat_init(struct blkg_rwstat *rwstat)
559 u64_stats_init(&rwstat->syncp);
561 for (i = 0; i < BLKG_RWSTAT_NR; i++)
562 atomic64_set(&rwstat->aux_cnt[i], 0);
566 * blkg_rwstat_add - add a value to a blkg_rwstat
567 * @rwstat: target blkg_rwstat
568 * @rw: mask of REQ_{WRITE|SYNC}
571 * Add @val to @rwstat. The counters are chosen according to @rw. The
572 * caller is responsible for synchronizing calls to this function.
574 static inline void blkg_rwstat_add(struct blkg_rwstat *rwstat,
575 int rw, uint64_t val)
577 u64_stats_update_begin(&rwstat->syncp);
580 rwstat->cnt[BLKG_RWSTAT_WRITE] += val;
582 rwstat->cnt[BLKG_RWSTAT_READ] += val;
584 rwstat->cnt[BLKG_RWSTAT_SYNC] += val;
586 rwstat->cnt[BLKG_RWSTAT_ASYNC] += val;
588 u64_stats_update_end(&rwstat->syncp);
592 * blkg_rwstat_read - read the current values of a blkg_rwstat
593 * @rwstat: blkg_rwstat to read
595 * Read the current snapshot of @rwstat and return it as the return value.
596 * This function can be called without synchronization and takes care of
599 static inline struct blkg_rwstat blkg_rwstat_read(struct blkg_rwstat *rwstat)
602 struct blkg_rwstat tmp;
605 start = u64_stats_fetch_begin_irq(&rwstat->syncp);
607 } while (u64_stats_fetch_retry_irq(&rwstat->syncp, start));
613 * blkg_rwstat_total - read the total count of a blkg_rwstat
614 * @rwstat: blkg_rwstat to read
616 * Return the total count of @rwstat regardless of the IO direction. This
617 * function can be called without synchronization and takes care of u64
620 static inline uint64_t blkg_rwstat_total(struct blkg_rwstat *rwstat)
622 struct blkg_rwstat tmp = blkg_rwstat_read(rwstat);
624 return tmp.cnt[BLKG_RWSTAT_READ] + tmp.cnt[BLKG_RWSTAT_WRITE];
628 * blkg_rwstat_reset - reset a blkg_rwstat
629 * @rwstat: blkg_rwstat to reset
631 static inline void blkg_rwstat_reset(struct blkg_rwstat *rwstat)
635 memset(rwstat->cnt, 0, sizeof(rwstat->cnt));
637 for (i = 0; i < BLKG_RWSTAT_NR; i++)
638 atomic64_set(&rwstat->aux_cnt[i], 0);
642 * blkg_rwstat_add_aux - add a blkg_rwstat into another's aux count
643 * @to: the destination blkg_rwstat
646 * Add @from's count including the aux one to @to's aux count.
648 static inline void blkg_rwstat_add_aux(struct blkg_rwstat *to,
649 struct blkg_rwstat *from)
651 struct blkg_rwstat v = blkg_rwstat_read(from);
654 for (i = 0; i < BLKG_RWSTAT_NR; i++)
655 atomic64_add(v.cnt[i] + atomic64_read(&from->aux_cnt[i]),
659 #ifdef CONFIG_BLK_DEV_THROTTLING
660 extern bool blk_throtl_bio(struct request_queue *q, struct blkcg_gq *blkg,
663 static inline bool blk_throtl_bio(struct request_queue *q, struct blkcg_gq *blkg,
664 struct bio *bio) { return false; }
667 static inline bool blkcg_bio_issue_check(struct request_queue *q,
671 struct blkcg_gq *blkg;
675 blkcg = bio_blkcg(bio);
677 blkg = blkg_lookup(blkcg, q);
678 if (unlikely(!blkg)) {
679 spin_lock_irq(q->queue_lock);
680 blkg = blkg_lookup_create(blkcg, q);
683 spin_unlock_irq(q->queue_lock);
686 throtl = blk_throtl_bio(q, blkg, bio);
692 #else /* CONFIG_BLK_CGROUP */
697 struct blkg_policy_data {
700 struct blkcg_policy_data {
706 struct blkcg_policy {
709 #define blkcg_root_css ((struct cgroup_subsys_state *)ERR_PTR(-EINVAL))
711 static inline struct cgroup_subsys_state *
712 task_get_blkcg_css(struct task_struct *task)
719 static inline struct blkcg_gq *blkg_lookup(struct blkcg *blkcg, void *key) { return NULL; }
720 static inline int blkcg_init_queue(struct request_queue *q) { return 0; }
721 static inline void blkcg_drain_queue(struct request_queue *q) { }
722 static inline void blkcg_exit_queue(struct request_queue *q) { }
723 static inline int blkcg_policy_register(struct blkcg_policy *pol) { return 0; }
724 static inline void blkcg_policy_unregister(struct blkcg_policy *pol) { }
725 static inline int blkcg_activate_policy(struct request_queue *q,
726 const struct blkcg_policy *pol) { return 0; }
727 static inline void blkcg_deactivate_policy(struct request_queue *q,
728 const struct blkcg_policy *pol) { }
730 static inline struct blkcg *bio_blkcg(struct bio *bio) { return NULL; }
732 static inline struct blkg_policy_data *blkg_to_pd(struct blkcg_gq *blkg,
733 struct blkcg_policy *pol) { return NULL; }
734 static inline struct blkcg_gq *pd_to_blkg(struct blkg_policy_data *pd) { return NULL; }
735 static inline char *blkg_path(struct blkcg_gq *blkg) { return NULL; }
736 static inline void blkg_get(struct blkcg_gq *blkg) { }
737 static inline void blkg_put(struct blkcg_gq *blkg) { }
739 static inline struct request_list *blk_get_rl(struct request_queue *q,
740 struct bio *bio) { return &q->root_rl; }
741 static inline void blk_put_rl(struct request_list *rl) { }
742 static inline void blk_rq_set_rl(struct request *rq, struct request_list *rl) { }
743 static inline struct request_list *blk_rq_rl(struct request *rq) { return &rq->q->root_rl; }
745 static inline bool blkcg_bio_issue_check(struct request_queue *q,
746 struct bio *bio) { return true; }
748 #define blk_queue_for_each_rl(rl, q) \
749 for ((rl) = &(q)->root_rl; (rl); (rl) = NULL)
751 #endif /* CONFIG_BLOCK */
752 #endif /* CONFIG_BLK_CGROUP */
753 #endif /* _BLK_CGROUP_H */