rk: gcc-wrapper.py ignore memcontrol.c:5355
[firefly-linux-kernel-4.4.55.git] / include / linux / netdevice.h
index b97d6823ef3c7062026c62a011f04a7ca3882d8b..9d6025703f736746b11890ab9bbb9571ecc09831 100644 (file)
@@ -1986,8 +1986,8 @@ struct napi_gro_cb {
        /* This is non-zero if the packet may be of the same flow. */
        u8      same_flow:1;
 
-       /* Used in udp_gro_receive */
-       u8      udp_mark:1;
+       /* Used in tunnel GRO receive */
+       u8      encap_mark:1;
 
        /* GRO checksum is valid */
        u8      csum_valid:1;
@@ -2003,7 +2003,10 @@ struct napi_gro_cb {
        /* Used in foo-over-udp, set in udp[46]_gro_receive */
        u8      is_ipv6:1;
 
-       /* 7 bit hole */
+       /* Number of gro_receive callbacks this packet already went through */
+       u8 recursion_counter:4;
+
+       /* 3 bit hole */
 
        /* used to support CHECKSUM_COMPLETE for tunneling protocols */
        __wsum  csum;
@@ -2014,6 +2017,25 @@ struct napi_gro_cb {
 
 #define NAPI_GRO_CB(skb) ((struct napi_gro_cb *)(skb)->cb)
 
+#define GRO_RECURSION_LIMIT 15
+static inline int gro_recursion_inc_test(struct sk_buff *skb)
+{
+       return ++NAPI_GRO_CB(skb)->recursion_counter == GRO_RECURSION_LIMIT;
+}
+
+typedef struct sk_buff **(*gro_receive_t)(struct sk_buff **, struct sk_buff *);
+static inline struct sk_buff **call_gro_receive(gro_receive_t cb,
+                                               struct sk_buff **head,
+                                               struct sk_buff *skb)
+{
+       if (unlikely(gro_recursion_inc_test(skb))) {
+               NAPI_GRO_CB(skb)->flush |= 1;
+               return NULL;
+       }
+
+       return cb(head, skb);
+}
+
 struct packet_type {
        __be16                  type;   /* This is really htons(ether_type). */
        struct net_device       *dev;   /* NULL is wildcarded here           */
@@ -2059,6 +2081,22 @@ struct udp_offload {
        struct udp_offload_callbacks callbacks;
 };
 
+typedef struct sk_buff **(*gro_receive_udp_t)(struct sk_buff **,
+                                             struct sk_buff *,
+                                             struct udp_offload *);
+static inline struct sk_buff **call_gro_receive_udp(gro_receive_udp_t cb,
+                                                   struct sk_buff **head,
+                                                   struct sk_buff *skb,
+                                                   struct udp_offload *uoff)
+{
+       if (unlikely(gro_recursion_inc_test(skb))) {
+               NAPI_GRO_CB(skb)->flush |= 1;
+               return NULL;
+       }
+
+       return cb(head, skb, uoff);
+}
+
 /* often modified stats are per cpu, other are shared (netdev->stats) */
 struct pcpu_sw_netstats {
        u64     rx_packets;
@@ -3036,6 +3074,7 @@ static inline void napi_free_frags(struct napi_struct *napi)
        napi->skb = NULL;
 }
 
+bool netdev_is_rx_handler_busy(struct net_device *dev);
 int netdev_rx_handler_register(struct net_device *dev,
                               rx_handler_func_t *rx_handler,
                               void *rx_handler_data);