net: add recursion limit to GRO
authorSabrina Dubroca <sd@queasysnail.net>
Thu, 20 Oct 2016 13:58:02 +0000 (15:58 +0200)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Tue, 15 Nov 2016 06:46:38 +0000 (07:46 +0100)
[ Upstream commit fcd91dd449867c6bfe56a81cabba76b829fd05cd ]

Currently, GRO can do unlimited recursion through the gro_receive
handlers.  This was fixed for tunneling protocols by limiting tunnel GRO
to one level with encap_mark, but both VLAN and TEB still have this
problem.  Thus, the kernel is vulnerable to a stack overflow, if we
receive a packet composed entirely of VLAN headers.

This patch adds a recursion counter to the GRO layer to prevent stack
overflow.  When a gro_receive function hits the recursion limit, GRO is
aborted for this skb and it is processed normally.  This recursion
counter is put in the GRO CB, but could be turned into a percpu counter
if we run out of space in the CB.

Thanks to Vladimír Beneš <vbenes@redhat.com> for the initial bug report.

Fixes: CVE-2016-7039
Fixes: 9b174d88c257 ("net: Add Transparent Ethernet Bridging GRO support.")
Fixes: 66e5133f19e9 ("vlan: Add GRO support for non hardware accelerated vlan")
Signed-off-by: Sabrina Dubroca <sd@queasysnail.net>
Reviewed-by: Jiri Benc <jbenc@redhat.com>
Acked-by: Hannes Frederic Sowa <hannes@stressinduktion.org>
Acked-by: Tom Herbert <tom@herbertland.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
drivers/net/geneve.c
drivers/net/vxlan.c
include/linux/netdevice.h
net/8021q/vlan.c
net/core/dev.c
net/ethernet/eth.c
net/ipv4/af_inet.c
net/ipv4/fou.c
net/ipv4/gre_offload.c
net/ipv4/udp_offload.c
net/ipv6/ip6_offload.c

index 69e31e2a68fcc5782f8553eabbfe588158765d17..4827c6987ac3f87234e7b8acb64bb9d6bbd03c94 100644 (file)
@@ -440,7 +440,7 @@ static struct sk_buff **geneve_gro_receive(struct sk_buff **head,
 
        skb_gro_pull(skb, gh_len);
        skb_gro_postpull_rcsum(skb, gh, gh_len);
-       pp = ptype->callbacks.gro_receive(head, skb);
+       pp = call_gro_receive(ptype->callbacks.gro_receive, head, skb);
 
 out_unlock:
        rcu_read_unlock();
index 003780901628760e733b07b0fc7cf43e87e373f2..6fa8e165878e1d9968e2665e0e7bcd2ca0406e25 100644 (file)
@@ -593,7 +593,7 @@ static struct sk_buff **vxlan_gro_receive(struct sk_buff **head,
                }
        }
 
-       pp = eth_gro_receive(head, skb);
+       pp = call_gro_receive(eth_gro_receive, head, skb);
 
 out:
        skb_gro_remcsum_cleanup(skb, &grc);
index 12b4d54a8ffaccb2616d848830c83fd025b01a2e..9d6025703f736746b11890ab9bbb9571ecc09831 100644 (file)
@@ -2003,7 +2003,10 @@ struct napi_gro_cb {
        /* Used in foo-over-udp, set in udp[46]_gro_receive */
        u8      is_ipv6:1;
 
-       /* 7 bit hole */
+       /* Number of gro_receive callbacks this packet already went through */
+       u8 recursion_counter:4;
+
+       /* 3 bit hole */
 
        /* used to support CHECKSUM_COMPLETE for tunneling protocols */
        __wsum  csum;
@@ -2014,6 +2017,25 @@ struct napi_gro_cb {
 
 #define NAPI_GRO_CB(skb) ((struct napi_gro_cb *)(skb)->cb)
 
+#define GRO_RECURSION_LIMIT 15
+static inline int gro_recursion_inc_test(struct sk_buff *skb)
+{
+       return ++NAPI_GRO_CB(skb)->recursion_counter == GRO_RECURSION_LIMIT;
+}
+
+typedef struct sk_buff **(*gro_receive_t)(struct sk_buff **, struct sk_buff *);
+static inline struct sk_buff **call_gro_receive(gro_receive_t cb,
+                                               struct sk_buff **head,
+                                               struct sk_buff *skb)
+{
+       if (unlikely(gro_recursion_inc_test(skb))) {
+               NAPI_GRO_CB(skb)->flush |= 1;
+               return NULL;
+       }
+
+       return cb(head, skb);
+}
+
 struct packet_type {
        __be16                  type;   /* This is really htons(ether_type). */
        struct net_device       *dev;   /* NULL is wildcarded here           */
@@ -2059,6 +2081,22 @@ struct udp_offload {
        struct udp_offload_callbacks callbacks;
 };
 
+typedef struct sk_buff **(*gro_receive_udp_t)(struct sk_buff **,
+                                             struct sk_buff *,
+                                             struct udp_offload *);
+static inline struct sk_buff **call_gro_receive_udp(gro_receive_udp_t cb,
+                                                   struct sk_buff **head,
+                                                   struct sk_buff *skb,
+                                                   struct udp_offload *uoff)
+{
+       if (unlikely(gro_recursion_inc_test(skb))) {
+               NAPI_GRO_CB(skb)->flush |= 1;
+               return NULL;
+       }
+
+       return cb(head, skb, uoff);
+}
+
 /* often modified stats are per cpu, other are shared (netdev->stats) */
 struct pcpu_sw_netstats {
        u64     rx_packets;
index d2cd9de4b7241dcc8c1df5fd7832b3b317799923..ad8d6e6b87cab9c2c03c9e317513880d1b755b95 100644 (file)
@@ -659,7 +659,7 @@ static struct sk_buff **vlan_gro_receive(struct sk_buff **head,
 
        skb_gro_pull(skb, sizeof(*vhdr));
        skb_gro_postpull_rcsum(skb, vhdr, sizeof(*vhdr));
-       pp = ptype->callbacks.gro_receive(head, skb);
+       pp = call_gro_receive(ptype->callbacks.gro_receive, head, skb);
 
 out_unlock:
        rcu_read_unlock();
index 5d9ec04589981f8c5fb5f85b254a5e9049c0fa37..d200a7ccbde69304160543ddcd8300df723ab601 100644 (file)
@@ -4240,6 +4240,7 @@ static enum gro_result dev_gro_receive(struct napi_struct *napi, struct sk_buff
                NAPI_GRO_CB(skb)->flush = 0;
                NAPI_GRO_CB(skb)->free = 0;
                NAPI_GRO_CB(skb)->encap_mark = 0;
+               NAPI_GRO_CB(skb)->recursion_counter = 0;
                NAPI_GRO_CB(skb)->gro_remcsum_start = 0;
 
                /* Setup for GRO checksum validation */
index 9e63f252a89ead08fda157e716607938452c0b49..de85d4e1cf43c10016e159248a94e7fd544e4c04 100644 (file)
@@ -436,7 +436,7 @@ struct sk_buff **eth_gro_receive(struct sk_buff **head,
 
        skb_gro_pull(skb, sizeof(*eh));
        skb_gro_postpull_rcsum(skb, eh, sizeof(*eh));
-       pp = ptype->callbacks.gro_receive(head, skb);
+       pp = call_gro_receive(ptype->callbacks.gro_receive, head, skb);
 
 out_unlock:
        rcu_read_unlock();
index 1a5c1ca3ad3c861b8bf1bbdb706ecde7cb562c28..afc18e9ca94adeb33d6344481839701971815cbc 100644 (file)
@@ -1372,7 +1372,7 @@ static struct sk_buff **inet_gro_receive(struct sk_buff **head,
        skb_gro_pull(skb, sizeof(*iph));
        skb_set_transport_header(skb, skb_gro_offset(skb));
 
-       pp = ops->callbacks.gro_receive(head, skb);
+       pp = call_gro_receive(ops->callbacks.gro_receive, head, skb);
 
 out_unlock:
        rcu_read_unlock();
index 08d7de55e57edf23a8b1425e3dda1b6d51fb671b..08d8ee12453801653860354220a60fe043252d4d 100644 (file)
@@ -201,7 +201,7 @@ static struct sk_buff **fou_gro_receive(struct sk_buff **head,
        if (!ops || !ops->callbacks.gro_receive)
                goto out_unlock;
 
-       pp = ops->callbacks.gro_receive(head, skb);
+       pp = call_gro_receive(ops->callbacks.gro_receive, head, skb);
 
 out_unlock:
        rcu_read_unlock();
@@ -360,7 +360,7 @@ static struct sk_buff **gue_gro_receive(struct sk_buff **head,
        if (WARN_ON_ONCE(!ops || !ops->callbacks.gro_receive))
                goto out_unlock;
 
-       pp = ops->callbacks.gro_receive(head, skb);
+       pp = call_gro_receive(ops->callbacks.gro_receive, head, skb);
 
 out_unlock:
        rcu_read_unlock();
index e603004c1af8293f0d8b919a5c1f2023bc4ceebd..79ae0d7becbf522250f31e84674a515b5ed5a7d4 100644 (file)
@@ -219,7 +219,7 @@ static struct sk_buff **gre_gro_receive(struct sk_buff **head,
        /* Adjusted NAPI_GRO_CB(skb)->csum after skb_gro_pull()*/
        skb_gro_postpull_rcsum(skb, greh, grehlen);
 
-       pp = ptype->callbacks.gro_receive(head, skb);
+       pp = call_gro_receive(ptype->callbacks.gro_receive, head, skb);
 
 out_unlock:
        rcu_read_unlock();
index 0e36e56dfd225ad3757e14445f9364ecff33ff9b..6396f1c80ae9ef36469acf69c4ba496ffd85dec8 100644 (file)
@@ -339,8 +339,8 @@ unflush:
        skb_gro_pull(skb, sizeof(struct udphdr)); /* pull encapsulating udp header */
        skb_gro_postpull_rcsum(skb, uh, sizeof(struct udphdr));
        NAPI_GRO_CB(skb)->proto = uo_priv->offload->ipproto;
-       pp = uo_priv->offload->callbacks.gro_receive(head, skb,
-                                                    uo_priv->offload);
+       pp = call_gro_receive_udp(uo_priv->offload->callbacks.gro_receive,
+                                 head, skb, uo_priv->offload);
 
 out_unlock:
        rcu_read_unlock();
index 82e9f30760283aca2f3d9468573aea607bd5bf6b..efe6268b8bc3e279def9b5322c8fee2c3c5ec92f 100644 (file)
@@ -247,7 +247,7 @@ static struct sk_buff **ipv6_gro_receive(struct sk_buff **head,
 
        skb_gro_postpull_rcsum(skb, iph, nlen);
 
-       pp = ops->callbacks.gro_receive(head, skb);
+       pp = call_gro_receive(ops->callbacks.gro_receive, head, skb);
 
 out_unlock:
        rcu_read_unlock();