Merge tag 'v4.4.3'
[firefly-linux-kernel-4.4.55.git] / net / core / sysctl_net_core.c
index 0a327b66a344faf18991c972187a61a28ca1b6ea..95b6139d710c46825d1e43f825188d81fcb70f60 100644 (file)
 #include <net/ip.h>
 #include <net/sock.h>
 #include <net/net_ratelimit.h>
+#include <net/busy_poll.h>
+#include <net/pkt_sched.h>
 
 static int zero = 0;
 static int one = 1;
-static int ushort_max = USHRT_MAX;
 static int min_sndbuf = SOCK_MIN_SNDBUF;
 static int min_rcvbuf = SOCK_MIN_RCVBUF;
 
+static int net_msg_warn;       /* Unused, but still a sysctl */
+
 #ifdef CONFIG_RPS
-static int rps_sock_flow_sysctl(ctl_table *table, int write,
+static int rps_sock_flow_sysctl(struct ctl_table *table, int write,
                                void __user *buffer, size_t *lenp, loff_t *ppos)
 {
        unsigned int orig_size, size;
        int ret, i;
-       ctl_table tmp = {
+       struct ctl_table tmp = {
                .data = &size,
                .maxlen = sizeof(size),
                .mode = table->mode
@@ -50,7 +53,7 @@ static int rps_sock_flow_sysctl(ctl_table *table, int write,
 
        if (write) {
                if (size) {
-                       if (size > 1<<30) {
+                       if (size > 1<<29) {
                                /* Enforce limit to prevent overflow */
                                mutex_unlock(&sock_flow_mutex);
                                return -EINVAL;
@@ -63,7 +66,7 @@ static int rps_sock_flow_sysctl(ctl_table *table, int write,
                                        mutex_unlock(&sock_flow_mutex);
                                        return -ENOMEM;
                                }
-
+                               rps_cpu_mask = roundup_pow_of_two(nr_cpu_ids) - 1;
                                sock_table->mask = size - 1;
                        } else
                                sock_table = orig_sock_table;
@@ -91,6 +94,142 @@ static int rps_sock_flow_sysctl(ctl_table *table, int write,
 }
 #endif /* CONFIG_RPS */
 
+#ifdef CONFIG_NET_FLOW_LIMIT
+static DEFINE_MUTEX(flow_limit_update_mutex);
+
+static int flow_limit_cpu_sysctl(struct ctl_table *table, int write,
+                                void __user *buffer, size_t *lenp,
+                                loff_t *ppos)
+{
+       struct sd_flow_limit *cur;
+       struct softnet_data *sd;
+       cpumask_var_t mask;
+       int i, len, ret = 0;
+
+       if (!alloc_cpumask_var(&mask, GFP_KERNEL))
+               return -ENOMEM;
+
+       if (write) {
+               ret = cpumask_parse_user(buffer, *lenp, mask);
+               if (ret)
+                       goto done;
+
+               mutex_lock(&flow_limit_update_mutex);
+               len = sizeof(*cur) + netdev_flow_limit_table_len;
+               for_each_possible_cpu(i) {
+                       sd = &per_cpu(softnet_data, i);
+                       cur = rcu_dereference_protected(sd->flow_limit,
+                                    lockdep_is_held(&flow_limit_update_mutex));
+                       if (cur && !cpumask_test_cpu(i, mask)) {
+                               RCU_INIT_POINTER(sd->flow_limit, NULL);
+                               synchronize_rcu();
+                               kfree(cur);
+                       } else if (!cur && cpumask_test_cpu(i, mask)) {
+                               cur = kzalloc_node(len, GFP_KERNEL,
+                                                  cpu_to_node(i));
+                               if (!cur) {
+                                       /* not unwinding previous changes */
+                                       ret = -ENOMEM;
+                                       goto write_unlock;
+                               }
+                               cur->num_buckets = netdev_flow_limit_table_len;
+                               rcu_assign_pointer(sd->flow_limit, cur);
+                       }
+               }
+write_unlock:
+               mutex_unlock(&flow_limit_update_mutex);
+       } else {
+               char kbuf[128];
+
+               if (*ppos || !*lenp) {
+                       *lenp = 0;
+                       goto done;
+               }
+
+               cpumask_clear(mask);
+               rcu_read_lock();
+               for_each_possible_cpu(i) {
+                       sd = &per_cpu(softnet_data, i);
+                       if (rcu_dereference(sd->flow_limit))
+                               cpumask_set_cpu(i, mask);
+               }
+               rcu_read_unlock();
+
+               len = min(sizeof(kbuf) - 1, *lenp);
+               len = scnprintf(kbuf, len, "%*pb", cpumask_pr_args(mask));
+               if (!len) {
+                       *lenp = 0;
+                       goto done;
+               }
+               if (len < *lenp)
+                       kbuf[len++] = '\n';
+               if (copy_to_user(buffer, kbuf, len)) {
+                       ret = -EFAULT;
+                       goto done;
+               }
+               *lenp = len;
+               *ppos += len;
+       }
+
+done:
+       free_cpumask_var(mask);
+       return ret;
+}
+
+static int flow_limit_table_len_sysctl(struct ctl_table *table, int write,
+                                      void __user *buffer, size_t *lenp,
+                                      loff_t *ppos)
+{
+       unsigned int old, *ptr;
+       int ret;
+
+       mutex_lock(&flow_limit_update_mutex);
+
+       ptr = table->data;
+       old = *ptr;
+       ret = proc_dointvec(table, write, buffer, lenp, ppos);
+       if (!ret && write && !is_power_of_2(*ptr)) {
+               *ptr = old;
+               ret = -EINVAL;
+       }
+
+       mutex_unlock(&flow_limit_update_mutex);
+       return ret;
+}
+#endif /* CONFIG_NET_FLOW_LIMIT */
+
+#ifdef CONFIG_NET_SCHED
+static int set_default_qdisc(struct ctl_table *table, int write,
+                            void __user *buffer, size_t *lenp, loff_t *ppos)
+{
+       char id[IFNAMSIZ];
+       struct ctl_table tbl = {
+               .data = id,
+               .maxlen = IFNAMSIZ,
+       };
+       int ret;
+
+       qdisc_get_default(id, IFNAMSIZ);
+
+       ret = proc_dostring(&tbl, write, buffer, lenp, ppos);
+       if (write && ret == 0)
+               ret = qdisc_set_default(id);
+       return ret;
+}
+#endif
+
+static int proc_do_rss_key(struct ctl_table *table, int write,
+                          void __user *buffer, size_t *lenp, loff_t *ppos)
+{
+       struct ctl_table fake_table;
+       char buf[NETDEV_RSS_KEY_LEN * 3];
+
+       snprintf(buf, sizeof(buf), "%*phC", NETDEV_RSS_KEY_LEN, netdev_rss_key);
+       fake_table.data = buf;
+       fake_table.maxlen = sizeof(buf);
+       return proc_dostring(&fake_table, write, buffer, lenp, ppos);
+}
+
 static struct ctl_table net_core_table[] = {
 #ifdef CONFIG_NET
        {
@@ -139,6 +278,13 @@ static struct ctl_table net_core_table[] = {
                .mode           = 0644,
                .proc_handler   = proc_dointvec
        },
+       {
+               .procname       = "netdev_rss_key",
+               .data           = &netdev_rss_key,
+               .maxlen         = sizeof(int),
+               .mode           = 0444,
+               .proc_handler   = proc_do_rss_key,
+       },
 #ifdef CONFIG_BPF_JIT
        {
                .procname       = "bpf_jit_enable",
@@ -176,6 +322,15 @@ static struct ctl_table net_core_table[] = {
                .mode           = 0644,
                .proc_handler   = proc_dointvec
        },
+       {
+               .procname       = "tstamp_allow_data",
+               .data           = &sysctl_tstamp_allow_data,
+               .maxlen         = sizeof(int),
+               .mode           = 0644,
+               .proc_handler   = proc_dointvec_minmax,
+               .extra1         = &zero,
+               .extra2         = &one
+       },
 #ifdef CONFIG_RPS
        {
                .procname       = "rps_sock_flow_entries",
@@ -184,6 +339,44 @@ static struct ctl_table net_core_table[] = {
                .proc_handler   = rps_sock_flow_sysctl
        },
 #endif
+#ifdef CONFIG_NET_FLOW_LIMIT
+       {
+               .procname       = "flow_limit_cpu_bitmap",
+               .mode           = 0644,
+               .proc_handler   = flow_limit_cpu_sysctl
+       },
+       {
+               .procname       = "flow_limit_table_len",
+               .data           = &netdev_flow_limit_table_len,
+               .maxlen         = sizeof(int),
+               .mode           = 0644,
+               .proc_handler   = flow_limit_table_len_sysctl
+       },
+#endif /* CONFIG_NET_FLOW_LIMIT */
+#ifdef CONFIG_NET_RX_BUSY_POLL
+       {
+               .procname       = "busy_poll",
+               .data           = &sysctl_net_busy_poll,
+               .maxlen         = sizeof(unsigned int),
+               .mode           = 0644,
+               .proc_handler   = proc_dointvec
+       },
+       {
+               .procname       = "busy_read",
+               .data           = &sysctl_net_busy_read,
+               .maxlen         = sizeof(unsigned int),
+               .mode           = 0644,
+               .proc_handler   = proc_dointvec
+       },
+#endif
+#ifdef CONFIG_NET_SCHED
+       {
+               .procname       = "default_qdisc",
+               .mode           = 0644,
+               .maxlen         = IFNAMSIZ,
+               .proc_handler   = set_default_qdisc
+       },
+#endif
 #endif /* CONFIG_NET */
        {
                .procname       = "netdev_budget",
@@ -209,7 +402,6 @@ static struct ctl_table netns_core_table[] = {
                .maxlen         = sizeof(int),
                .mode           = 0644,
                .extra1         = &zero,
-               .extra2         = &ushort_max,
                .proc_handler   = proc_dointvec_minmax
        },
        { }