ARM: rockchip: rk3228: add grf definition
[firefly-linux-kernel-4.4.55.git] / block / blk-sysfs.c
index d935bd859c87bc1c9a0e39eb61438583f91690f9..5efc5a647183b688fe0e4312c7ad5bcb3dad403b 100644 (file)
@@ -9,6 +9,7 @@
 #include <linux/blktrace_api.h>
 
 #include "blk.h"
+#include "blk-cgroup.h"
 
 struct queue_sysfs_entry {
        struct attribute attr;
@@ -25,9 +26,15 @@ queue_var_show(unsigned long var, char *page)
 static ssize_t
 queue_var_store(unsigned long *var, const char *page, size_t count)
 {
-       char *p = (char *) page;
+       int err;
+       unsigned long v;
+
+       err = strict_strtoul(page, 10, &v);
+       if (err || v > UINT_MAX)
+               return -EINVAL;
+
+       *var = v;
 
-       *var = simple_strtoul(p, &p, 10);
        return count;
 }
 
@@ -39,7 +46,7 @@ static ssize_t queue_requests_show(struct request_queue *q, char *page)
 static ssize_t
 queue_requests_store(struct request_queue *q, const char *page, size_t count)
 {
-       struct request_list *rl = &q->rq;
+       struct request_list *rl;
        unsigned long nr;
        int ret;
 
@@ -47,6 +54,9 @@ queue_requests_store(struct request_queue *q, const char *page, size_t count)
                return -EINVAL;
 
        ret = queue_var_store(&nr, page, count);
+       if (ret < 0)
+               return ret;
+
        if (nr < BLKDEV_MIN_RQ)
                nr = BLKDEV_MIN_RQ;
 
@@ -54,6 +64,9 @@ queue_requests_store(struct request_queue *q, const char *page, size_t count)
        q->nr_requests = nr;
        blk_queue_congestion_threshold(q);
 
+       /* congestion isn't cgroup aware and follows root blkcg for now */
+       rl = &q->root_rl;
+
        if (rl->count[BLK_RW_SYNC] >= queue_congestion_on_threshold(q))
                blk_set_queue_congested(q, BLK_RW_SYNC);
        else if (rl->count[BLK_RW_SYNC] < queue_congestion_off_threshold(q))
@@ -64,19 +77,22 @@ queue_requests_store(struct request_queue *q, const char *page, size_t count)
        else if (rl->count[BLK_RW_ASYNC] < queue_congestion_off_threshold(q))
                blk_clear_queue_congested(q, BLK_RW_ASYNC);
 
-       if (rl->count[BLK_RW_SYNC] >= q->nr_requests) {
-               blk_set_queue_full(q, BLK_RW_SYNC);
-       } else {
-               blk_clear_queue_full(q, BLK_RW_SYNC);
-               wake_up(&rl->wait[BLK_RW_SYNC]);
+       blk_queue_for_each_rl(rl, q) {
+               if (rl->count[BLK_RW_SYNC] >= q->nr_requests) {
+                       blk_set_rl_full(rl, BLK_RW_SYNC);
+               } else {
+                       blk_clear_rl_full(rl, BLK_RW_SYNC);
+                       wake_up(&rl->wait[BLK_RW_SYNC]);
+               }
+
+               if (rl->count[BLK_RW_ASYNC] >= q->nr_requests) {
+                       blk_set_rl_full(rl, BLK_RW_ASYNC);
+               } else {
+                       blk_clear_rl_full(rl, BLK_RW_ASYNC);
+                       wake_up(&rl->wait[BLK_RW_ASYNC]);
+               }
        }
 
-       if (rl->count[BLK_RW_ASYNC] >= q->nr_requests) {
-               blk_set_queue_full(q, BLK_RW_ASYNC);
-       } else {
-               blk_clear_queue_full(q, BLK_RW_ASYNC);
-               wake_up(&rl->wait[BLK_RW_ASYNC]);
-       }
        spin_unlock_irq(q->queue_lock);
        return ret;
 }
@@ -95,6 +111,9 @@ queue_ra_store(struct request_queue *q, const char *page, size_t count)
        unsigned long ra_kb;
        ssize_t ret = queue_var_store(&ra_kb, page, count);
 
+       if (ret < 0)
+               return ret;
+
        q->backing_dev_info.ra_pages = ra_kb >> (PAGE_CACHE_SHIFT - 10);
 
        return ret;
@@ -161,6 +180,13 @@ static ssize_t queue_discard_zeroes_data_show(struct request_queue *q, char *pag
        return queue_var_show(queue_discard_zeroes_data(q), page);
 }
 
+static ssize_t queue_write_same_max_show(struct request_queue *q, char *page)
+{
+       return sprintf(page, "%llu\n",
+               (unsigned long long)q->limits.max_write_same_sectors << 9);
+}
+
+
 static ssize_t
 queue_max_sectors_store(struct request_queue *q, const char *page, size_t count)
 {
@@ -169,6 +195,9 @@ queue_max_sectors_store(struct request_queue *q, const char *page, size_t count)
                        page_kb = 1 << (PAGE_CACHE_SHIFT - 10);
        ssize_t ret = queue_var_store(&max_sectors_kb, page, count);
 
+       if (ret < 0)
+               return ret;
+
        if (max_sectors_kb > max_hw_sectors_kb || max_sectors_kb < page_kb)
                return -EINVAL;
 
@@ -200,6 +229,8 @@ queue_store_##name(struct request_queue *q, const char *page, size_t count) \
        unsigned long val;                                              \
        ssize_t ret;                                                    \
        ret = queue_var_store(&val, page, count);                       \
+       if (ret < 0)                                                    \
+                return ret;                                            \
        if (neg)                                                        \
                val = !val;                                             \
                                                                        \
@@ -229,6 +260,9 @@ static ssize_t queue_nomerges_store(struct request_queue *q, const char *page,
        unsigned long nm;
        ssize_t ret = queue_var_store(&nm, page, count);
 
+       if (ret < 0)
+               return ret;
+
        spin_lock_irq(q->queue_lock);
        queue_flag_clear(QUEUE_FLAG_NOMERGES, q);
        queue_flag_clear(QUEUE_FLAG_NOXMERGES, q);
@@ -244,8 +278,9 @@ static ssize_t queue_nomerges_store(struct request_queue *q, const char *page,
 static ssize_t queue_rq_affinity_show(struct request_queue *q, char *page)
 {
        bool set = test_bit(QUEUE_FLAG_SAME_COMP, &q->queue_flags);
+       bool force = test_bit(QUEUE_FLAG_SAME_FORCE, &q->queue_flags);
 
-       return queue_var_show(set, page);
+       return queue_var_show(set << force, page);
 }
 
 static ssize_t
@@ -256,11 +291,20 @@ queue_rq_affinity_store(struct request_queue *q, const char *page, size_t count)
        unsigned long val;
 
        ret = queue_var_store(&val, page, count);
+       if (ret < 0)
+               return ret;
+
        spin_lock_irq(q->queue_lock);
-       if (val)
+       if (val == 2) {
                queue_flag_set(QUEUE_FLAG_SAME_COMP, q);
-       else
-               queue_flag_clear(QUEUE_FLAG_SAME_COMP,  q);
+               queue_flag_set(QUEUE_FLAG_SAME_FORCE, q);
+       } else if (val == 1) {
+               queue_flag_set(QUEUE_FLAG_SAME_COMP, q);
+               queue_flag_clear(QUEUE_FLAG_SAME_FORCE, q);
+       } else if (val == 0) {
+               queue_flag_clear(QUEUE_FLAG_SAME_COMP, q);
+               queue_flag_clear(QUEUE_FLAG_SAME_FORCE, q);
+       }
        spin_unlock_irq(q->queue_lock);
 #endif
        return ret;
@@ -350,6 +394,11 @@ static struct queue_sysfs_entry queue_discard_zeroes_data_entry = {
        .show = queue_discard_zeroes_data_show,
 };
 
+static struct queue_sysfs_entry queue_write_same_max_entry = {
+       .attr = {.name = "write_same_max_bytes", .mode = S_IRUGO },
+       .show = queue_write_same_max_show,
+};
+
 static struct queue_sysfs_entry queue_nonrot_entry = {
        .attr = {.name = "rotational", .mode = S_IRUGO | S_IWUSR },
        .show = queue_show_nonrot,
@@ -397,6 +446,7 @@ static struct attribute *default_attrs[] = {
        &queue_discard_granularity_entry.attr,
        &queue_discard_max_entry.attr,
        &queue_discard_zeroes_data_entry.attr,
+       &queue_write_same_max_entry.attr,
        &queue_nonrot_entry.attr,
        &queue_nomerges_entry.attr,
        &queue_rq_affinity_entry.attr,
@@ -418,7 +468,7 @@ queue_attr_show(struct kobject *kobj, struct attribute *attr, char *page)
        if (!entry->show)
                return -EIO;
        mutex_lock(&q->sysfs_lock);
-       if (test_bit(QUEUE_FLAG_DEAD, &q->queue_flags)) {
+       if (blk_queue_dying(q)) {
                mutex_unlock(&q->sysfs_lock);
                return -ENOENT;
        }
@@ -440,7 +490,7 @@ queue_attr_store(struct kobject *kobj, struct attribute *attr,
 
        q = container_of(kobj, struct request_queue, kobj);
        mutex_lock(&q->sysfs_lock);
-       if (test_bit(QUEUE_FLAG_DEAD, &q->queue_flags)) {
+       if (blk_queue_dying(q)) {
                mutex_unlock(&q->sysfs_lock);
                return -ENOENT;
        }
@@ -449,12 +499,19 @@ queue_attr_store(struct kobject *kobj, struct attribute *attr,
        return res;
 }
 
+static void blk_free_queue_rcu(struct rcu_head *rcu_head)
+{
+       struct request_queue *q = container_of(rcu_head, struct request_queue,
+                                              rcu_head);
+       kmem_cache_free(blk_requestq_cachep, q);
+}
+
 /**
- * blk_cleanup_queue: - release a &struct request_queue when it is no longer needed
- * @kobj:    the kobj belonging of the request queue to be released
+ * blk_release_queue: - release a &struct request_queue when it is no longer needed
+ * @kobj:    the kobj belonging to the request queue to be released
  *
  * Description:
- *     blk_cleanup_queue is the pair to blk_init_queue() or
+ *     blk_release_queue is the pair to blk_init_queue() or
  *     blk_queue_make_request().  It should be called when a request queue is
  *     being released; typically when a block device is being de-registered.
  *     Currently, its primary task it to free all the &struct request
@@ -468,12 +525,19 @@ static void blk_release_queue(struct kobject *kobj)
 {
        struct request_queue *q =
                container_of(kobj, struct request_queue, kobj);
-       struct request_list *rl = &q->rq;
 
        blk_sync_queue(q);
 
-       if (rl->rq_pool)
-               mempool_destroy(rl->rq_pool);
+       blkcg_exit_queue(q);
+
+       if (q->elevator) {
+               spin_lock_irq(q->queue_lock);
+               ioc_clear_queue(q);
+               spin_unlock_irq(q->queue_lock);
+               elevator_exit(q->elevator);
+       }
+
+       blk_exit_rl(&q->root_rl);
 
        if (q->queue_tags)
                __blk_queue_free_tags(q);
@@ -481,7 +545,9 @@ static void blk_release_queue(struct kobject *kobj)
        blk_trace_shutdown(q);
 
        bdi_destroy(&q->backing_dev_info);
-       kmem_cache_free(blk_requestq_cachep, q);
+
+       ida_simple_remove(&blk_queue_ida, q->id);
+       call_rcu(&q->rcu_head, blk_free_queue_rcu);
 }
 
 static const struct sysfs_ops queue_sysfs_ops = {
@@ -504,6 +570,12 @@ int blk_register_queue(struct gendisk *disk)
        if (WARN_ON(!q))
                return -ENXIO;
 
+       /*
+        * Initialization must be complete by now.  Finish the initial
+        * bypass from queue allocation.
+        */
+       blk_queue_bypass_end(q);
+
        ret = blk_trace_init_sysfs(dev);
        if (ret)
                return ret;