tunnels: Don't apply GRO to multiple layers of encapsulation.
authorJesse Gross <jesse@kernel.org>
Sat, 19 Mar 2016 16:32:01 +0000 (09:32 -0700)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Mon, 31 Oct 2016 10:13:59 +0000 (04:13 -0600)
commit fac8e0f579695a3ecbc4d3cac369139d7f819971 upstream.

When drivers express support for TSO of encapsulated packets, they
only mean that they can do it for one layer of encapsulation.
Supporting additional levels would mean updating, at a minimum,
more IP length fields and they are unaware of this.

No encapsulation device expresses support for handling offloaded
encapsulated packets, so we won't generate these types of frames
in the transmit path. However, GRO doesn't have a check for
multiple levels of encapsulation and will attempt to build them.

UDP tunnel GRO actually does prevent this situation but it only
handles multiple UDP tunnels stacked on top of each other. This
generalizes that solution to prevent any kind of tunnel stacking
that would cause problems.

Fixes: bf5a755f ("net-gre-gro: Add GRE support to the GRO stack")
Signed-off-by: Jesse Gross <jesse@kernel.org>
Signed-off-by: David S. Miller <davem@davemloft.net>
Signed-off-by: Juerg Haefliger <juerg.haefliger@hpe.com>
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
include/linux/netdevice.h
net/core/dev.c
net/ipv4/af_inet.c
net/ipv4/gre_offload.c
net/ipv4/udp_offload.c
net/ipv6/ip6_offload.c

index 4e9c75226f07066dc8ae6e85a870351e1f241c13..12b4d54a8ffaccb2616d848830c83fd025b01a2e 100644 (file)
@@ -1986,8 +1986,8 @@ struct napi_gro_cb {
        /* This is non-zero if the packet may be of the same flow. */
        u8      same_flow:1;
 
-       /* Used in udp_gro_receive */
-       u8      udp_mark:1;
+       /* Used in tunnel GRO receive */
+       u8      encap_mark:1;
 
        /* GRO checksum is valid */
        u8      csum_valid:1;
index de4ed2b5a221c73f698e74b071ca8a87b504413f..0989fea88c4480ed88086c628dd7d7cf832e37eb 100644 (file)
@@ -4239,7 +4239,7 @@ static enum gro_result dev_gro_receive(struct napi_struct *napi, struct sk_buff
                NAPI_GRO_CB(skb)->same_flow = 0;
                NAPI_GRO_CB(skb)->flush = 0;
                NAPI_GRO_CB(skb)->free = 0;
-               NAPI_GRO_CB(skb)->udp_mark = 0;
+               NAPI_GRO_CB(skb)->encap_mark = 0;
                NAPI_GRO_CB(skb)->gro_remcsum_start = 0;
 
                /* Setup for GRO checksum validation */
index 71be86e965e2c5ab98b1bf181ca9f6e3e7dbeb27..1a5c1ca3ad3c861b8bf1bbdb706ecde7cb562c28 100644 (file)
@@ -1383,6 +1383,19 @@ out:
        return pp;
 }
 
+static struct sk_buff **ipip_gro_receive(struct sk_buff **head,
+                                        struct sk_buff *skb)
+{
+       if (NAPI_GRO_CB(skb)->encap_mark) {
+               NAPI_GRO_CB(skb)->flush = 1;
+               return NULL;
+       }
+
+       NAPI_GRO_CB(skb)->encap_mark = 1;
+
+       return inet_gro_receive(head, skb);
+}
+
 int inet_recv_error(struct sock *sk, struct msghdr *msg, int len, int *addr_len)
 {
        if (sk->sk_family == AF_INET)
@@ -1659,7 +1672,7 @@ static struct packet_offload ip_packet_offload __read_mostly = {
 static const struct net_offload ipip_offload = {
        .callbacks = {
                .gso_segment    = inet_gso_segment,
-               .gro_receive    = inet_gro_receive,
+               .gro_receive    = ipip_gro_receive,
                .gro_complete   = ipip_gro_complete,
        },
 };
index 5a8ee3282550880a7749b8d6a9086dc413661519..e603004c1af8293f0d8b919a5c1f2023bc4ceebd 100644 (file)
@@ -128,6 +128,11 @@ static struct sk_buff **gre_gro_receive(struct sk_buff **head,
        struct packet_offload *ptype;
        __be16 type;
 
+       if (NAPI_GRO_CB(skb)->encap_mark)
+               goto out;
+
+       NAPI_GRO_CB(skb)->encap_mark = 1;
+
        off = skb_gro_offset(skb);
        hlen = off + sizeof(*greh);
        greh = skb_gro_header_fast(skb, off);
index f9386160cbee0288e294ea2cd8ba3b5be65cdbf6..0e36e56dfd225ad3757e14445f9364ecff33ff9b 100644 (file)
@@ -299,14 +299,14 @@ struct sk_buff **udp_gro_receive(struct sk_buff **head, struct sk_buff *skb,
        unsigned int off = skb_gro_offset(skb);
        int flush = 1;
 
-       if (NAPI_GRO_CB(skb)->udp_mark ||
+       if (NAPI_GRO_CB(skb)->encap_mark ||
            (skb->ip_summed != CHECKSUM_PARTIAL &&
             NAPI_GRO_CB(skb)->csum_cnt == 0 &&
             !NAPI_GRO_CB(skb)->csum_valid))
                goto out;
 
-       /* mark that this skb passed once through the udp gro layer */
-       NAPI_GRO_CB(skb)->udp_mark = 1;
+       /* mark that this skb passed once through the tunnel gro layer */
+       NAPI_GRO_CB(skb)->encap_mark = 1;
 
        rcu_read_lock();
        uo_priv = rcu_dereference(udp_offload_base);
index eeca943f12dc083e195dde804c764c8732d11b9e..82e9f30760283aca2f3d9468573aea607bd5bf6b 100644 (file)
@@ -258,6 +258,19 @@ out:
        return pp;
 }
 
+static struct sk_buff **sit_gro_receive(struct sk_buff **head,
+                                       struct sk_buff *skb)
+{
+       if (NAPI_GRO_CB(skb)->encap_mark) {
+               NAPI_GRO_CB(skb)->flush = 1;
+               return NULL;
+       }
+
+       NAPI_GRO_CB(skb)->encap_mark = 1;
+
+       return ipv6_gro_receive(head, skb);
+}
+
 static int ipv6_gro_complete(struct sk_buff *skb, int nhoff)
 {
        const struct net_offload *ops;
@@ -302,7 +315,7 @@ static struct packet_offload ipv6_packet_offload __read_mostly = {
 static const struct net_offload sit_offload = {
        .callbacks = {
                .gso_segment    = ipv6_gso_segment,
-               .gro_receive    = ipv6_gro_receive,
+               .gro_receive    = sit_gro_receive,
                .gro_complete   = sit_gro_complete,
        },
 };