tunnels: Remove encapsulation offloads on decap.
authorJesse Gross <jesse@kernel.org>
Sat, 19 Mar 2016 16:32:02 +0000 (09:32 -0700)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Mon, 31 Oct 2016 10:13:59 +0000 (04:13 -0600)
commit a09a4c8dd1ec7f830e1fb9e59eb72bddc965d168 upstream.

If a packet is either locally encapsulated or processed through GRO
it is marked with the offloads that it requires. However, when it is
decapsulated these tunnel offload indications are not removed. This
means that if we receive an encapsulated TCP packet, aggregate it with
GRO, decapsulate, and retransmit the resulting frame on a NIC that does
not support encapsulation, we won't be able to take advantage of hardware
offloads even though it is just a simple TCP packet at this point.

This fixes the problem by stripping off encapsulation offload indications
when packets are decapsulated.

The performance impacts of this bug are significant. In a test where a
Geneve encapsulated TCP stream is sent to a hypervisor, GRO'ed, decapsulated,
and bridged to a VM performance is improved by 60% (5Gbps->8Gbps) as a
result of avoiding unnecessary segmentation at the VM tap interface.

Reported-by: Ramu Ramamurthy <sramamur@linux.vnet.ibm.com>
Fixes: 68c33163 ("v4 GRE: Add TCP segmentation offload for GRE")
Signed-off-by: Jesse Gross <jesse@kernel.org>
Signed-off-by: David S. Miller <davem@davemloft.net>
(backported from commit a09a4c8dd1ec7f830e1fb9e59eb72bddc965d168)
[adapt iptunnel_pull_header arguments, avoid 7f290c9]
Signed-off-by: Stefan Bader <stefan.bader@canonical.com>
Signed-off-by: Juerg Haefliger <juerg.haefliger@hpe.com>
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
include/net/ip_tunnels.h
net/ipv4/fou.c
net/ipv4/ip_tunnel_core.c
net/ipv6/sit.c

index af40bc586a1b6ae03ef751ce61276e58934e7406..86a7bdd61d1a90234dc84be0f48cf9fc680a4230 100644 (file)
@@ -283,6 +283,22 @@ struct metadata_dst *iptunnel_metadata_reply(struct metadata_dst *md,
 struct sk_buff *iptunnel_handle_offloads(struct sk_buff *skb, bool gre_csum,
                                         int gso_type_mask);
 
+static inline int iptunnel_pull_offloads(struct sk_buff *skb)
+{
+       if (skb_is_gso(skb)) {
+               int err;
+
+               err = skb_unclone(skb, GFP_ATOMIC);
+               if (unlikely(err))
+                       return err;
+               skb_shinfo(skb)->gso_type &= ~(NETIF_F_GSO_ENCAP_ALL >>
+                                              NETIF_F_GSO_SHIFT);
+       }
+
+       skb->encapsulation = 0;
+       return 0;
+}
+
 static inline void iptunnel_xmit_stats(int err,
                                       struct net_device_stats *err_stats,
                                       struct pcpu_sw_netstats __percpu *stats)
index bd903fe0f7508d9d7a94c4ce1e0a825f3e35c398..08d7de55e57edf23a8b1425e3dda1b6d51fb671b 100644 (file)
@@ -48,7 +48,7 @@ static inline struct fou *fou_from_sock(struct sock *sk)
        return sk->sk_user_data;
 }
 
-static void fou_recv_pull(struct sk_buff *skb, size_t len)
+static int fou_recv_pull(struct sk_buff *skb, size_t len)
 {
        struct iphdr *iph = ip_hdr(skb);
 
@@ -59,6 +59,7 @@ static void fou_recv_pull(struct sk_buff *skb, size_t len)
        __skb_pull(skb, len);
        skb_postpull_rcsum(skb, udp_hdr(skb), len);
        skb_reset_transport_header(skb);
+       return iptunnel_pull_offloads(skb);
 }
 
 static int fou_udp_recv(struct sock *sk, struct sk_buff *skb)
@@ -68,9 +69,14 @@ static int fou_udp_recv(struct sock *sk, struct sk_buff *skb)
        if (!fou)
                return 1;
 
-       fou_recv_pull(skb, sizeof(struct udphdr));
+       if (fou_recv_pull(skb, sizeof(struct udphdr)))
+               goto drop;
 
        return -fou->protocol;
+
+drop:
+       kfree_skb(skb);
+       return 0;
 }
 
 static struct guehdr *gue_remcsum(struct sk_buff *skb, struct guehdr *guehdr,
@@ -170,6 +176,9 @@ static int gue_udp_recv(struct sock *sk, struct sk_buff *skb)
        __skb_pull(skb, sizeof(struct udphdr) + hdrlen);
        skb_reset_transport_header(skb);
 
+       if (iptunnel_pull_offloads(skb))
+               goto drop;
+
        return -guehdr->proto_ctype;
 
 drop:
index 6cb9009c3d96785e566a3cb2cd065860e05980e1..dbda0565781cfe8ae1486994b3fe0795d736f71e 100644 (file)
@@ -116,7 +116,8 @@ int iptunnel_pull_header(struct sk_buff *skb, int hdr_len, __be16 inner_proto)
        skb->vlan_tci = 0;
        skb_set_queue_mapping(skb, 0);
        skb->pkt_type = PACKET_HOST;
-       return 0;
+
+       return iptunnel_pull_offloads(skb);
 }
 EXPORT_SYMBOL_GPL(iptunnel_pull_header);
 
index ba3d2f3d66d2b6c1f4668e49b71cc42d67121654..3da2b16356eb64a9ff33fdc95809a768a81917f6 100644 (file)
@@ -681,14 +681,15 @@ static int ipip6_rcv(struct sk_buff *skb)
                skb->mac_header = skb->network_header;
                skb_reset_network_header(skb);
                IPCB(skb)->flags = 0;
-               skb->protocol = htons(ETH_P_IPV6);
+               skb->dev = tunnel->dev;
 
                if (packet_is_spoofed(skb, iph, tunnel)) {
                        tunnel->dev->stats.rx_errors++;
                        goto out;
                }
 
-               __skb_tunnel_rx(skb, tunnel->dev, tunnel->net);
+               if (iptunnel_pull_header(skb, 0, htons(ETH_P_IPV6)))
+                       goto out;
 
                err = IP_ECN_decapsulate(iph, skb);
                if (unlikely(err)) {