Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/jikos/hid
[firefly-linux-kernel-4.4.55.git] / net / openvswitch / datapath.c
1 /*
2  * Copyright (c) 2007-2014 Nicira, Inc.
3  *
4  * This program is free software; you can redistribute it and/or
5  * modify it under the terms of version 2 of the GNU General Public
6  * License as published by the Free Software Foundation.
7  *
8  * This program is distributed in the hope that it will be useful, but
9  * WITHOUT ANY WARRANTY; without even the implied warranty of
10  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11  * General Public License for more details.
12  *
13  * You should have received a copy of the GNU General Public License
14  * along with this program; if not, write to the Free Software
15  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
16  * 02110-1301, USA
17  */
18
19 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
20
21 #include <linux/init.h>
22 #include <linux/module.h>
23 #include <linux/if_arp.h>
24 #include <linux/if_vlan.h>
25 #include <linux/in.h>
26 #include <linux/ip.h>
27 #include <linux/jhash.h>
28 #include <linux/delay.h>
29 #include <linux/time.h>
30 #include <linux/etherdevice.h>
31 #include <linux/genetlink.h>
32 #include <linux/kernel.h>
33 #include <linux/kthread.h>
34 #include <linux/mutex.h>
35 #include <linux/percpu.h>
36 #include <linux/rcupdate.h>
37 #include <linux/tcp.h>
38 #include <linux/udp.h>
39 #include <linux/ethtool.h>
40 #include <linux/wait.h>
41 #include <asm/div64.h>
42 #include <linux/highmem.h>
43 #include <linux/netfilter_bridge.h>
44 #include <linux/netfilter_ipv4.h>
45 #include <linux/inetdevice.h>
46 #include <linux/list.h>
47 #include <linux/openvswitch.h>
48 #include <linux/rculist.h>
49 #include <linux/dmi.h>
50 #include <net/genetlink.h>
51 #include <net/net_namespace.h>
52 #include <net/netns/generic.h>
53
54 #include "datapath.h"
55 #include "flow.h"
56 #include "flow_table.h"
57 #include "flow_netlink.h"
58 #include "vport-internal_dev.h"
59 #include "vport-netdev.h"
60
61 int ovs_net_id __read_mostly;
62
63 static struct genl_family dp_packet_genl_family;
64 static struct genl_family dp_flow_genl_family;
65 static struct genl_family dp_datapath_genl_family;
66
67 static const struct genl_multicast_group ovs_dp_flow_multicast_group = {
68         .name = OVS_FLOW_MCGROUP,
69 };
70
71 static const struct genl_multicast_group ovs_dp_datapath_multicast_group = {
72         .name = OVS_DATAPATH_MCGROUP,
73 };
74
75 static const struct genl_multicast_group ovs_dp_vport_multicast_group = {
76         .name = OVS_VPORT_MCGROUP,
77 };
78
79 /* Check if need to build a reply message.
80  * OVS userspace sets the NLM_F_ECHO flag if it needs the reply. */
81 static bool ovs_must_notify(struct genl_info *info,
82                             const struct genl_multicast_group *grp)
83 {
84         return info->nlhdr->nlmsg_flags & NLM_F_ECHO ||
85                 netlink_has_listeners(genl_info_net(info)->genl_sock, 0);
86 }
87
88 static void ovs_notify(struct genl_family *family,
89                        struct sk_buff *skb, struct genl_info *info)
90 {
91         genl_notify(family, skb, genl_info_net(info), info->snd_portid,
92                     0, info->nlhdr, GFP_KERNEL);
93 }
94
95 /**
96  * DOC: Locking:
97  *
98  * All writes e.g. Writes to device state (add/remove datapath, port, set
99  * operations on vports, etc.), Writes to other state (flow table
100  * modifications, set miscellaneous datapath parameters, etc.) are protected
101  * by ovs_lock.
102  *
103  * Reads are protected by RCU.
104  *
105  * There are a few special cases (mostly stats) that have their own
106  * synchronization but they nest under all of above and don't interact with
107  * each other.
108  *
109  * The RTNL lock nests inside ovs_mutex.
110  */
111
112 static DEFINE_MUTEX(ovs_mutex);
113
114 void ovs_lock(void)
115 {
116         mutex_lock(&ovs_mutex);
117 }
118
119 void ovs_unlock(void)
120 {
121         mutex_unlock(&ovs_mutex);
122 }
123
124 #ifdef CONFIG_LOCKDEP
125 int lockdep_ovsl_is_held(void)
126 {
127         if (debug_locks)
128                 return lockdep_is_held(&ovs_mutex);
129         else
130                 return 1;
131 }
132 #endif
133
134 static struct vport *new_vport(const struct vport_parms *);
135 static int queue_gso_packets(struct datapath *dp, struct sk_buff *,
136                              const struct dp_upcall_info *);
137 static int queue_userspace_packet(struct datapath *dp, struct sk_buff *,
138                                   const struct dp_upcall_info *);
139
140 /* Must be called with rcu_read_lock or ovs_mutex. */
141 static struct datapath *get_dp(struct net *net, int dp_ifindex)
142 {
143         struct datapath *dp = NULL;
144         struct net_device *dev;
145
146         rcu_read_lock();
147         dev = dev_get_by_index_rcu(net, dp_ifindex);
148         if (dev) {
149                 struct vport *vport = ovs_internal_dev_get_vport(dev);
150                 if (vport)
151                         dp = vport->dp;
152         }
153         rcu_read_unlock();
154
155         return dp;
156 }
157
158 /* Must be called with rcu_read_lock or ovs_mutex. */
159 static const char *ovs_dp_name(const struct datapath *dp)
160 {
161         struct vport *vport = ovs_vport_ovsl_rcu(dp, OVSP_LOCAL);
162         return vport->ops->get_name(vport);
163 }
164
165 static int get_dpifindex(struct datapath *dp)
166 {
167         struct vport *local;
168         int ifindex;
169
170         rcu_read_lock();
171
172         local = ovs_vport_rcu(dp, OVSP_LOCAL);
173         if (local)
174                 ifindex = netdev_vport_priv(local)->dev->ifindex;
175         else
176                 ifindex = 0;
177
178         rcu_read_unlock();
179
180         return ifindex;
181 }
182
183 static void destroy_dp_rcu(struct rcu_head *rcu)
184 {
185         struct datapath *dp = container_of(rcu, struct datapath, rcu);
186
187         free_percpu(dp->stats_percpu);
188         release_net(ovs_dp_get_net(dp));
189         kfree(dp->ports);
190         kfree(dp);
191 }
192
193 static struct hlist_head *vport_hash_bucket(const struct datapath *dp,
194                                             u16 port_no)
195 {
196         return &dp->ports[port_no & (DP_VPORT_HASH_BUCKETS - 1)];
197 }
198
199 /* Called with ovs_mutex or RCU read lock. */
200 struct vport *ovs_lookup_vport(const struct datapath *dp, u16 port_no)
201 {
202         struct vport *vport;
203         struct hlist_head *head;
204
205         head = vport_hash_bucket(dp, port_no);
206         hlist_for_each_entry_rcu(vport, head, dp_hash_node) {
207                 if (vport->port_no == port_no)
208                         return vport;
209         }
210         return NULL;
211 }
212
213 /* Called with ovs_mutex. */
214 static struct vport *new_vport(const struct vport_parms *parms)
215 {
216         struct vport *vport;
217
218         vport = ovs_vport_add(parms);
219         if (!IS_ERR(vport)) {
220                 struct datapath *dp = parms->dp;
221                 struct hlist_head *head = vport_hash_bucket(dp, vport->port_no);
222
223                 hlist_add_head_rcu(&vport->dp_hash_node, head);
224         }
225         return vport;
226 }
227
228 void ovs_dp_detach_port(struct vport *p)
229 {
230         ASSERT_OVSL();
231
232         /* First drop references to device. */
233         hlist_del_rcu(&p->dp_hash_node);
234
235         /* Then destroy it. */
236         ovs_vport_del(p);
237 }
238
239 /* Must be called with rcu_read_lock. */
240 void ovs_dp_process_received_packet(struct vport *p, struct sk_buff *skb)
241 {
242         struct datapath *dp = p->dp;
243         struct sw_flow *flow;
244         struct dp_stats_percpu *stats;
245         struct sw_flow_key key;
246         u64 *stats_counter;
247         u32 n_mask_hit;
248         int error;
249
250         stats = this_cpu_ptr(dp->stats_percpu);
251
252         /* Extract flow from 'skb' into 'key'. */
253         error = ovs_flow_extract(skb, p->port_no, &key);
254         if (unlikely(error)) {
255                 kfree_skb(skb);
256                 return;
257         }
258
259         /* Look up flow. */
260         flow = ovs_flow_tbl_lookup_stats(&dp->table, &key, &n_mask_hit);
261         if (unlikely(!flow)) {
262                 struct dp_upcall_info upcall;
263
264                 upcall.cmd = OVS_PACKET_CMD_MISS;
265                 upcall.key = &key;
266                 upcall.userdata = NULL;
267                 upcall.portid = ovs_vport_find_upcall_portid(p, skb);
268                 ovs_dp_upcall(dp, skb, &upcall);
269                 consume_skb(skb);
270                 stats_counter = &stats->n_missed;
271                 goto out;
272         }
273
274         OVS_CB(skb)->flow = flow;
275         OVS_CB(skb)->pkt_key = &key;
276
277         ovs_flow_stats_update(OVS_CB(skb)->flow, key.tp.flags, skb);
278         ovs_execute_actions(dp, skb);
279         stats_counter = &stats->n_hit;
280
281 out:
282         /* Update datapath statistics. */
283         u64_stats_update_begin(&stats->syncp);
284         (*stats_counter)++;
285         stats->n_mask_hit += n_mask_hit;
286         u64_stats_update_end(&stats->syncp);
287 }
288
289 int ovs_dp_upcall(struct datapath *dp, struct sk_buff *skb,
290                   const struct dp_upcall_info *upcall_info)
291 {
292         struct dp_stats_percpu *stats;
293         int err;
294
295         if (upcall_info->portid == 0) {
296                 err = -ENOTCONN;
297                 goto err;
298         }
299
300         if (!skb_is_gso(skb))
301                 err = queue_userspace_packet(dp, skb, upcall_info);
302         else
303                 err = queue_gso_packets(dp, skb, upcall_info);
304         if (err)
305                 goto err;
306
307         return 0;
308
309 err:
310         stats = this_cpu_ptr(dp->stats_percpu);
311
312         u64_stats_update_begin(&stats->syncp);
313         stats->n_lost++;
314         u64_stats_update_end(&stats->syncp);
315
316         return err;
317 }
318
319 static int queue_gso_packets(struct datapath *dp, struct sk_buff *skb,
320                              const struct dp_upcall_info *upcall_info)
321 {
322         unsigned short gso_type = skb_shinfo(skb)->gso_type;
323         struct dp_upcall_info later_info;
324         struct sw_flow_key later_key;
325         struct sk_buff *segs, *nskb;
326         int err;
327
328         segs = __skb_gso_segment(skb, NETIF_F_SG, false);
329         if (IS_ERR(segs))
330                 return PTR_ERR(segs);
331
332         /* Queue all of the segments. */
333         skb = segs;
334         do {
335                 err = queue_userspace_packet(dp, skb, upcall_info);
336                 if (err)
337                         break;
338
339                 if (skb == segs && gso_type & SKB_GSO_UDP) {
340                         /* The initial flow key extracted by ovs_flow_extract()
341                          * in this case is for a first fragment, so we need to
342                          * properly mark later fragments.
343                          */
344                         later_key = *upcall_info->key;
345                         later_key.ip.frag = OVS_FRAG_TYPE_LATER;
346
347                         later_info = *upcall_info;
348                         later_info.key = &later_key;
349                         upcall_info = &later_info;
350                 }
351         } while ((skb = skb->next));
352
353         /* Free all of the segments. */
354         skb = segs;
355         do {
356                 nskb = skb->next;
357                 if (err)
358                         kfree_skb(skb);
359                 else
360                         consume_skb(skb);
361         } while ((skb = nskb));
362         return err;
363 }
364
365 static size_t key_attr_size(void)
366 {
367         return    nla_total_size(4)   /* OVS_KEY_ATTR_PRIORITY */
368                 + nla_total_size(0)   /* OVS_KEY_ATTR_TUNNEL */
369                   + nla_total_size(8)   /* OVS_TUNNEL_KEY_ATTR_ID */
370                   + nla_total_size(4)   /* OVS_TUNNEL_KEY_ATTR_IPV4_SRC */
371                   + nla_total_size(4)   /* OVS_TUNNEL_KEY_ATTR_IPV4_DST */
372                   + nla_total_size(1)   /* OVS_TUNNEL_KEY_ATTR_TOS */
373                   + nla_total_size(1)   /* OVS_TUNNEL_KEY_ATTR_TTL */
374                   + nla_total_size(0)   /* OVS_TUNNEL_KEY_ATTR_DONT_FRAGMENT */
375                   + nla_total_size(0)   /* OVS_TUNNEL_KEY_ATTR_CSUM */
376                 + nla_total_size(4)   /* OVS_KEY_ATTR_IN_PORT */
377                 + nla_total_size(4)   /* OVS_KEY_ATTR_SKB_MARK */
378                 + nla_total_size(12)  /* OVS_KEY_ATTR_ETHERNET */
379                 + nla_total_size(2)   /* OVS_KEY_ATTR_ETHERTYPE */
380                 + nla_total_size(4)   /* OVS_KEY_ATTR_8021Q */
381                 + nla_total_size(0)   /* OVS_KEY_ATTR_ENCAP */
382                 + nla_total_size(2)   /* OVS_KEY_ATTR_ETHERTYPE */
383                 + nla_total_size(40)  /* OVS_KEY_ATTR_IPV6 */
384                 + nla_total_size(2)   /* OVS_KEY_ATTR_ICMPV6 */
385                 + nla_total_size(28); /* OVS_KEY_ATTR_ND */
386 }
387
388 static size_t upcall_msg_size(const struct nlattr *userdata,
389                               unsigned int hdrlen)
390 {
391         size_t size = NLMSG_ALIGN(sizeof(struct ovs_header))
392                 + nla_total_size(hdrlen) /* OVS_PACKET_ATTR_PACKET */
393                 + nla_total_size(key_attr_size()); /* OVS_PACKET_ATTR_KEY */
394
395         /* OVS_PACKET_ATTR_USERDATA */
396         if (userdata)
397                 size += NLA_ALIGN(userdata->nla_len);
398
399         return size;
400 }
401
402 static int queue_userspace_packet(struct datapath *dp, struct sk_buff *skb,
403                                   const struct dp_upcall_info *upcall_info)
404 {
405         struct ovs_header *upcall;
406         struct sk_buff *nskb = NULL;
407         struct sk_buff *user_skb; /* to be queued to userspace */
408         struct nlattr *nla;
409         struct genl_info info = {
410                 .dst_sk = ovs_dp_get_net(dp)->genl_sock,
411                 .snd_portid = upcall_info->portid,
412         };
413         size_t len;
414         unsigned int hlen;
415         int err, dp_ifindex;
416
417         dp_ifindex = get_dpifindex(dp);
418         if (!dp_ifindex)
419                 return -ENODEV;
420
421         if (vlan_tx_tag_present(skb)) {
422                 nskb = skb_clone(skb, GFP_ATOMIC);
423                 if (!nskb)
424                         return -ENOMEM;
425
426                 nskb = __vlan_put_tag(nskb, nskb->vlan_proto, vlan_tx_tag_get(nskb));
427                 if (!nskb)
428                         return -ENOMEM;
429
430                 nskb->vlan_tci = 0;
431                 skb = nskb;
432         }
433
434         if (nla_attr_size(skb->len) > USHRT_MAX) {
435                 err = -EFBIG;
436                 goto out;
437         }
438
439         /* Complete checksum if needed */
440         if (skb->ip_summed == CHECKSUM_PARTIAL &&
441             (err = skb_checksum_help(skb)))
442                 goto out;
443
444         /* Older versions of OVS user space enforce alignment of the last
445          * Netlink attribute to NLA_ALIGNTO which would require extensive
446          * padding logic. Only perform zerocopy if padding is not required.
447          */
448         if (dp->user_features & OVS_DP_F_UNALIGNED)
449                 hlen = skb_zerocopy_headlen(skb);
450         else
451                 hlen = skb->len;
452
453         len = upcall_msg_size(upcall_info->userdata, hlen);
454         user_skb = genlmsg_new_unicast(len, &info, GFP_ATOMIC);
455         if (!user_skb) {
456                 err = -ENOMEM;
457                 goto out;
458         }
459
460         upcall = genlmsg_put(user_skb, 0, 0, &dp_packet_genl_family,
461                              0, upcall_info->cmd);
462         upcall->dp_ifindex = dp_ifindex;
463
464         nla = nla_nest_start(user_skb, OVS_PACKET_ATTR_KEY);
465         err = ovs_nla_put_flow(upcall_info->key, upcall_info->key, user_skb);
466         BUG_ON(err);
467         nla_nest_end(user_skb, nla);
468
469         if (upcall_info->userdata)
470                 __nla_put(user_skb, OVS_PACKET_ATTR_USERDATA,
471                           nla_len(upcall_info->userdata),
472                           nla_data(upcall_info->userdata));
473
474         /* Only reserve room for attribute header, packet data is added
475          * in skb_zerocopy() */
476         if (!(nla = nla_reserve(user_skb, OVS_PACKET_ATTR_PACKET, 0))) {
477                 err = -ENOBUFS;
478                 goto out;
479         }
480         nla->nla_len = nla_attr_size(skb->len);
481
482         err = skb_zerocopy(user_skb, skb, skb->len, hlen);
483         if (err)
484                 goto out;
485
486         /* Pad OVS_PACKET_ATTR_PACKET if linear copy was performed */
487         if (!(dp->user_features & OVS_DP_F_UNALIGNED)) {
488                 size_t plen = NLA_ALIGN(user_skb->len) - user_skb->len;
489
490                 if (plen > 0)
491                         memset(skb_put(user_skb, plen), 0, plen);
492         }
493
494         ((struct nlmsghdr *) user_skb->data)->nlmsg_len = user_skb->len;
495
496         err = genlmsg_unicast(ovs_dp_get_net(dp), user_skb, upcall_info->portid);
497 out:
498         if (err)
499                 skb_tx_error(skb);
500         kfree_skb(nskb);
501         return err;
502 }
503
504 static int ovs_packet_cmd_execute(struct sk_buff *skb, struct genl_info *info)
505 {
506         struct ovs_header *ovs_header = info->userhdr;
507         struct nlattr **a = info->attrs;
508         struct sw_flow_actions *acts;
509         struct sk_buff *packet;
510         struct sw_flow *flow;
511         struct datapath *dp;
512         struct ethhdr *eth;
513         int len;
514         int err;
515
516         err = -EINVAL;
517         if (!a[OVS_PACKET_ATTR_PACKET] || !a[OVS_PACKET_ATTR_KEY] ||
518             !a[OVS_PACKET_ATTR_ACTIONS])
519                 goto err;
520
521         len = nla_len(a[OVS_PACKET_ATTR_PACKET]);
522         packet = __dev_alloc_skb(NET_IP_ALIGN + len, GFP_KERNEL);
523         err = -ENOMEM;
524         if (!packet)
525                 goto err;
526         skb_reserve(packet, NET_IP_ALIGN);
527
528         nla_memcpy(__skb_put(packet, len), a[OVS_PACKET_ATTR_PACKET], len);
529
530         skb_reset_mac_header(packet);
531         eth = eth_hdr(packet);
532
533         /* Normally, setting the skb 'protocol' field would be handled by a
534          * call to eth_type_trans(), but it assumes there's a sending
535          * device, which we may not have. */
536         if (ntohs(eth->h_proto) >= ETH_P_802_3_MIN)
537                 packet->protocol = eth->h_proto;
538         else
539                 packet->protocol = htons(ETH_P_802_2);
540
541         /* Build an sw_flow for sending this packet. */
542         flow = ovs_flow_alloc();
543         err = PTR_ERR(flow);
544         if (IS_ERR(flow))
545                 goto err_kfree_skb;
546
547         err = ovs_flow_extract(packet, -1, &flow->key);
548         if (err)
549                 goto err_flow_free;
550
551         err = ovs_nla_get_flow_metadata(flow, a[OVS_PACKET_ATTR_KEY]);
552         if (err)
553                 goto err_flow_free;
554         acts = ovs_nla_alloc_flow_actions(nla_len(a[OVS_PACKET_ATTR_ACTIONS]));
555         err = PTR_ERR(acts);
556         if (IS_ERR(acts))
557                 goto err_flow_free;
558
559         err = ovs_nla_copy_actions(a[OVS_PACKET_ATTR_ACTIONS],
560                                    &flow->key, 0, &acts);
561         rcu_assign_pointer(flow->sf_acts, acts);
562         if (err)
563                 goto err_flow_free;
564
565         OVS_CB(packet)->flow = flow;
566         OVS_CB(packet)->pkt_key = &flow->key;
567         packet->priority = flow->key.phy.priority;
568         packet->mark = flow->key.phy.skb_mark;
569
570         rcu_read_lock();
571         dp = get_dp(sock_net(skb->sk), ovs_header->dp_ifindex);
572         err = -ENODEV;
573         if (!dp)
574                 goto err_unlock;
575
576         local_bh_disable();
577         err = ovs_execute_actions(dp, packet);
578         local_bh_enable();
579         rcu_read_unlock();
580
581         ovs_flow_free(flow, false);
582         return err;
583
584 err_unlock:
585         rcu_read_unlock();
586 err_flow_free:
587         ovs_flow_free(flow, false);
588 err_kfree_skb:
589         kfree_skb(packet);
590 err:
591         return err;
592 }
593
594 static const struct nla_policy packet_policy[OVS_PACKET_ATTR_MAX + 1] = {
595         [OVS_PACKET_ATTR_PACKET] = { .len = ETH_HLEN },
596         [OVS_PACKET_ATTR_KEY] = { .type = NLA_NESTED },
597         [OVS_PACKET_ATTR_ACTIONS] = { .type = NLA_NESTED },
598 };
599
600 static const struct genl_ops dp_packet_genl_ops[] = {
601         { .cmd = OVS_PACKET_CMD_EXECUTE,
602           .flags = GENL_ADMIN_PERM, /* Requires CAP_NET_ADMIN privilege. */
603           .policy = packet_policy,
604           .doit = ovs_packet_cmd_execute
605         }
606 };
607
608 static struct genl_family dp_packet_genl_family = {
609         .id = GENL_ID_GENERATE,
610         .hdrsize = sizeof(struct ovs_header),
611         .name = OVS_PACKET_FAMILY,
612         .version = OVS_PACKET_VERSION,
613         .maxattr = OVS_PACKET_ATTR_MAX,
614         .netnsok = true,
615         .parallel_ops = true,
616         .ops = dp_packet_genl_ops,
617         .n_ops = ARRAY_SIZE(dp_packet_genl_ops),
618 };
619
620 static void get_dp_stats(struct datapath *dp, struct ovs_dp_stats *stats,
621                          struct ovs_dp_megaflow_stats *mega_stats)
622 {
623         int i;
624
625         memset(mega_stats, 0, sizeof(*mega_stats));
626
627         stats->n_flows = ovs_flow_tbl_count(&dp->table);
628         mega_stats->n_masks = ovs_flow_tbl_num_masks(&dp->table);
629
630         stats->n_hit = stats->n_missed = stats->n_lost = 0;
631
632         for_each_possible_cpu(i) {
633                 const struct dp_stats_percpu *percpu_stats;
634                 struct dp_stats_percpu local_stats;
635                 unsigned int start;
636
637                 percpu_stats = per_cpu_ptr(dp->stats_percpu, i);
638
639                 do {
640                         start = u64_stats_fetch_begin_irq(&percpu_stats->syncp);
641                         local_stats = *percpu_stats;
642                 } while (u64_stats_fetch_retry_irq(&percpu_stats->syncp, start));
643
644                 stats->n_hit += local_stats.n_hit;
645                 stats->n_missed += local_stats.n_missed;
646                 stats->n_lost += local_stats.n_lost;
647                 mega_stats->n_mask_hit += local_stats.n_mask_hit;
648         }
649 }
650
651 static size_t ovs_flow_cmd_msg_size(const struct sw_flow_actions *acts)
652 {
653         return NLMSG_ALIGN(sizeof(struct ovs_header))
654                 + nla_total_size(key_attr_size()) /* OVS_FLOW_ATTR_KEY */
655                 + nla_total_size(key_attr_size()) /* OVS_FLOW_ATTR_MASK */
656                 + nla_total_size(sizeof(struct ovs_flow_stats)) /* OVS_FLOW_ATTR_STATS */
657                 + nla_total_size(1) /* OVS_FLOW_ATTR_TCP_FLAGS */
658                 + nla_total_size(8) /* OVS_FLOW_ATTR_USED */
659                 + nla_total_size(acts->actions_len); /* OVS_FLOW_ATTR_ACTIONS */
660 }
661
662 /* Called with ovs_mutex or RCU read lock. */
663 static int ovs_flow_cmd_fill_info(const struct sw_flow *flow, int dp_ifindex,
664                                   struct sk_buff *skb, u32 portid,
665                                   u32 seq, u32 flags, u8 cmd)
666 {
667         const int skb_orig_len = skb->len;
668         struct nlattr *start;
669         struct ovs_flow_stats stats;
670         __be16 tcp_flags;
671         unsigned long used;
672         struct ovs_header *ovs_header;
673         struct nlattr *nla;
674         int err;
675
676         ovs_header = genlmsg_put(skb, portid, seq, &dp_flow_genl_family, flags, cmd);
677         if (!ovs_header)
678                 return -EMSGSIZE;
679
680         ovs_header->dp_ifindex = dp_ifindex;
681
682         /* Fill flow key. */
683         nla = nla_nest_start(skb, OVS_FLOW_ATTR_KEY);
684         if (!nla)
685                 goto nla_put_failure;
686
687         err = ovs_nla_put_flow(&flow->unmasked_key, &flow->unmasked_key, skb);
688         if (err)
689                 goto error;
690         nla_nest_end(skb, nla);
691
692         nla = nla_nest_start(skb, OVS_FLOW_ATTR_MASK);
693         if (!nla)
694                 goto nla_put_failure;
695
696         err = ovs_nla_put_flow(&flow->key, &flow->mask->key, skb);
697         if (err)
698                 goto error;
699
700         nla_nest_end(skb, nla);
701
702         ovs_flow_stats_get(flow, &stats, &used, &tcp_flags);
703
704         if (used &&
705             nla_put_u64(skb, OVS_FLOW_ATTR_USED, ovs_flow_used_time(used)))
706                 goto nla_put_failure;
707
708         if (stats.n_packets &&
709             nla_put(skb, OVS_FLOW_ATTR_STATS, sizeof(struct ovs_flow_stats), &stats))
710                 goto nla_put_failure;
711
712         if ((u8)ntohs(tcp_flags) &&
713              nla_put_u8(skb, OVS_FLOW_ATTR_TCP_FLAGS, (u8)ntohs(tcp_flags)))
714                 goto nla_put_failure;
715
716         /* If OVS_FLOW_ATTR_ACTIONS doesn't fit, skip dumping the actions if
717          * this is the first flow to be dumped into 'skb'.  This is unusual for
718          * Netlink but individual action lists can be longer than
719          * NLMSG_GOODSIZE and thus entirely undumpable if we didn't do this.
720          * The userspace caller can always fetch the actions separately if it
721          * really wants them.  (Most userspace callers in fact don't care.)
722          *
723          * This can only fail for dump operations because the skb is always
724          * properly sized for single flows.
725          */
726         start = nla_nest_start(skb, OVS_FLOW_ATTR_ACTIONS);
727         if (start) {
728                 const struct sw_flow_actions *sf_acts;
729
730                 sf_acts = rcu_dereference_ovsl(flow->sf_acts);
731                 err = ovs_nla_put_actions(sf_acts->actions,
732                                           sf_acts->actions_len, skb);
733
734                 if (!err)
735                         nla_nest_end(skb, start);
736                 else {
737                         if (skb_orig_len)
738                                 goto error;
739
740                         nla_nest_cancel(skb, start);
741                 }
742         } else if (skb_orig_len)
743                 goto nla_put_failure;
744
745         return genlmsg_end(skb, ovs_header);
746
747 nla_put_failure:
748         err = -EMSGSIZE;
749 error:
750         genlmsg_cancel(skb, ovs_header);
751         return err;
752 }
753
754 /* May not be called with RCU read lock. */
755 static struct sk_buff *ovs_flow_cmd_alloc_info(const struct sw_flow_actions *acts,
756                                                struct genl_info *info,
757                                                bool always)
758 {
759         struct sk_buff *skb;
760
761         if (!always && !ovs_must_notify(info, &ovs_dp_flow_multicast_group))
762                 return NULL;
763
764         skb = genlmsg_new_unicast(ovs_flow_cmd_msg_size(acts), info, GFP_KERNEL);
765         if (!skb)
766                 return ERR_PTR(-ENOMEM);
767
768         return skb;
769 }
770
771 /* Called with ovs_mutex. */
772 static struct sk_buff *ovs_flow_cmd_build_info(const struct sw_flow *flow,
773                                                int dp_ifindex,
774                                                struct genl_info *info, u8 cmd,
775                                                bool always)
776 {
777         struct sk_buff *skb;
778         int retval;
779
780         skb = ovs_flow_cmd_alloc_info(ovsl_dereference(flow->sf_acts), info,
781                                       always);
782         if (IS_ERR_OR_NULL(skb))
783                 return skb;
784
785         retval = ovs_flow_cmd_fill_info(flow, dp_ifindex, skb,
786                                         info->snd_portid, info->snd_seq, 0,
787                                         cmd);
788         BUG_ON(retval < 0);
789         return skb;
790 }
791
792 static int ovs_flow_cmd_new(struct sk_buff *skb, struct genl_info *info)
793 {
794         struct nlattr **a = info->attrs;
795         struct ovs_header *ovs_header = info->userhdr;
796         struct sw_flow *flow, *new_flow;
797         struct sw_flow_mask mask;
798         struct sk_buff *reply;
799         struct datapath *dp;
800         struct sw_flow_actions *acts;
801         struct sw_flow_match match;
802         int error;
803
804         /* Must have key and actions. */
805         error = -EINVAL;
806         if (!a[OVS_FLOW_ATTR_KEY])
807                 goto error;
808         if (!a[OVS_FLOW_ATTR_ACTIONS])
809                 goto error;
810
811         /* Most of the time we need to allocate a new flow, do it before
812          * locking.
813          */
814         new_flow = ovs_flow_alloc();
815         if (IS_ERR(new_flow)) {
816                 error = PTR_ERR(new_flow);
817                 goto error;
818         }
819
820         /* Extract key. */
821         ovs_match_init(&match, &new_flow->unmasked_key, &mask);
822         error = ovs_nla_get_match(&match,
823                                   a[OVS_FLOW_ATTR_KEY], a[OVS_FLOW_ATTR_MASK]);
824         if (error)
825                 goto err_kfree_flow;
826
827         ovs_flow_mask_key(&new_flow->key, &new_flow->unmasked_key, &mask);
828
829         /* Validate actions. */
830         acts = ovs_nla_alloc_flow_actions(nla_len(a[OVS_FLOW_ATTR_ACTIONS]));
831         error = PTR_ERR(acts);
832         if (IS_ERR(acts))
833                 goto err_kfree_flow;
834
835         error = ovs_nla_copy_actions(a[OVS_FLOW_ATTR_ACTIONS], &new_flow->key,
836                                      0, &acts);
837         if (error) {
838                 OVS_NLERR("Flow actions may not be safe on all matching packets.\n");
839                 goto err_kfree_acts;
840         }
841
842         reply = ovs_flow_cmd_alloc_info(acts, info, false);
843         if (IS_ERR(reply)) {
844                 error = PTR_ERR(reply);
845                 goto err_kfree_acts;
846         }
847
848         ovs_lock();
849         dp = get_dp(sock_net(skb->sk), ovs_header->dp_ifindex);
850         if (unlikely(!dp)) {
851                 error = -ENODEV;
852                 goto err_unlock_ovs;
853         }
854         /* Check if this is a duplicate flow */
855         flow = ovs_flow_tbl_lookup(&dp->table, &new_flow->unmasked_key);
856         if (likely(!flow)) {
857                 rcu_assign_pointer(new_flow->sf_acts, acts);
858
859                 /* Put flow in bucket. */
860                 error = ovs_flow_tbl_insert(&dp->table, new_flow, &mask);
861                 if (unlikely(error)) {
862                         acts = NULL;
863                         goto err_unlock_ovs;
864                 }
865
866                 if (unlikely(reply)) {
867                         error = ovs_flow_cmd_fill_info(new_flow,
868                                                        ovs_header->dp_ifindex,
869                                                        reply, info->snd_portid,
870                                                        info->snd_seq, 0,
871                                                        OVS_FLOW_CMD_NEW);
872                         BUG_ON(error < 0);
873                 }
874                 ovs_unlock();
875         } else {
876                 struct sw_flow_actions *old_acts;
877
878                 /* Bail out if we're not allowed to modify an existing flow.
879                  * We accept NLM_F_CREATE in place of the intended NLM_F_EXCL
880                  * because Generic Netlink treats the latter as a dump
881                  * request.  We also accept NLM_F_EXCL in case that bug ever
882                  * gets fixed.
883                  */
884                 if (unlikely(info->nlhdr->nlmsg_flags & (NLM_F_CREATE
885                                                          | NLM_F_EXCL))) {
886                         error = -EEXIST;
887                         goto err_unlock_ovs;
888                 }
889                 /* The unmasked key has to be the same for flow updates. */
890                 if (unlikely(!ovs_flow_cmp_unmasked_key(flow, &match))) {
891                         flow = ovs_flow_tbl_lookup_exact(&dp->table, &match);
892                         if (!flow) {
893                                 error = -ENOENT;
894                                 goto err_unlock_ovs;
895                         }
896                 }
897                 /* Update actions. */
898                 old_acts = ovsl_dereference(flow->sf_acts);
899                 rcu_assign_pointer(flow->sf_acts, acts);
900
901                 if (unlikely(reply)) {
902                         error = ovs_flow_cmd_fill_info(flow,
903                                                        ovs_header->dp_ifindex,
904                                                        reply, info->snd_portid,
905                                                        info->snd_seq, 0,
906                                                        OVS_FLOW_CMD_NEW);
907                         BUG_ON(error < 0);
908                 }
909                 ovs_unlock();
910
911                 ovs_nla_free_flow_actions(old_acts);
912                 ovs_flow_free(new_flow, false);
913         }
914
915         if (reply)
916                 ovs_notify(&dp_flow_genl_family, reply, info);
917         return 0;
918
919 err_unlock_ovs:
920         ovs_unlock();
921         kfree_skb(reply);
922 err_kfree_acts:
923         kfree(acts);
924 err_kfree_flow:
925         ovs_flow_free(new_flow, false);
926 error:
927         return error;
928 }
929
930 static int ovs_flow_cmd_set(struct sk_buff *skb, struct genl_info *info)
931 {
932         struct nlattr **a = info->attrs;
933         struct ovs_header *ovs_header = info->userhdr;
934         struct sw_flow_key key, masked_key;
935         struct sw_flow *flow;
936         struct sw_flow_mask mask;
937         struct sk_buff *reply = NULL;
938         struct datapath *dp;
939         struct sw_flow_actions *old_acts = NULL, *acts = NULL;
940         struct sw_flow_match match;
941         int error;
942
943         /* Extract key. */
944         error = -EINVAL;
945         if (!a[OVS_FLOW_ATTR_KEY])
946                 goto error;
947
948         ovs_match_init(&match, &key, &mask);
949         error = ovs_nla_get_match(&match,
950                                   a[OVS_FLOW_ATTR_KEY], a[OVS_FLOW_ATTR_MASK]);
951         if (error)
952                 goto error;
953
954         /* Validate actions. */
955         if (a[OVS_FLOW_ATTR_ACTIONS]) {
956                 acts = ovs_nla_alloc_flow_actions(nla_len(a[OVS_FLOW_ATTR_ACTIONS]));
957                 error = PTR_ERR(acts);
958                 if (IS_ERR(acts))
959                         goto error;
960
961                 ovs_flow_mask_key(&masked_key, &key, &mask);
962                 error = ovs_nla_copy_actions(a[OVS_FLOW_ATTR_ACTIONS],
963                                              &masked_key, 0, &acts);
964                 if (error) {
965                         OVS_NLERR("Flow actions may not be safe on all matching packets.\n");
966                         goto err_kfree_acts;
967                 }
968         }
969
970         /* Can allocate before locking if have acts. */
971         if (acts) {
972                 reply = ovs_flow_cmd_alloc_info(acts, info, false);
973                 if (IS_ERR(reply)) {
974                         error = PTR_ERR(reply);
975                         goto err_kfree_acts;
976                 }
977         }
978
979         ovs_lock();
980         dp = get_dp(sock_net(skb->sk), ovs_header->dp_ifindex);
981         if (unlikely(!dp)) {
982                 error = -ENODEV;
983                 goto err_unlock_ovs;
984         }
985         /* Check that the flow exists. */
986         flow = ovs_flow_tbl_lookup_exact(&dp->table, &match);
987         if (unlikely(!flow)) {
988                 error = -ENOENT;
989                 goto err_unlock_ovs;
990         }
991
992         /* Update actions, if present. */
993         if (likely(acts)) {
994                 old_acts = ovsl_dereference(flow->sf_acts);
995                 rcu_assign_pointer(flow->sf_acts, acts);
996
997                 if (unlikely(reply)) {
998                         error = ovs_flow_cmd_fill_info(flow,
999                                                        ovs_header->dp_ifindex,
1000                                                        reply, info->snd_portid,
1001                                                        info->snd_seq, 0,
1002                                                        OVS_FLOW_CMD_NEW);
1003                         BUG_ON(error < 0);
1004                 }
1005         } else {
1006                 /* Could not alloc without acts before locking. */
1007                 reply = ovs_flow_cmd_build_info(flow, ovs_header->dp_ifindex,
1008                                                 info, OVS_FLOW_CMD_NEW, false);
1009                 if (unlikely(IS_ERR(reply))) {
1010                         error = PTR_ERR(reply);
1011                         goto err_unlock_ovs;
1012                 }
1013         }
1014
1015         /* Clear stats. */
1016         if (a[OVS_FLOW_ATTR_CLEAR])
1017                 ovs_flow_stats_clear(flow);
1018         ovs_unlock();
1019
1020         if (reply)
1021                 ovs_notify(&dp_flow_genl_family, reply, info);
1022         if (old_acts)
1023                 ovs_nla_free_flow_actions(old_acts);
1024
1025         return 0;
1026
1027 err_unlock_ovs:
1028         ovs_unlock();
1029         kfree_skb(reply);
1030 err_kfree_acts:
1031         kfree(acts);
1032 error:
1033         return error;
1034 }
1035
1036 static int ovs_flow_cmd_get(struct sk_buff *skb, struct genl_info *info)
1037 {
1038         struct nlattr **a = info->attrs;
1039         struct ovs_header *ovs_header = info->userhdr;
1040         struct sw_flow_key key;
1041         struct sk_buff *reply;
1042         struct sw_flow *flow;
1043         struct datapath *dp;
1044         struct sw_flow_match match;
1045         int err;
1046
1047         if (!a[OVS_FLOW_ATTR_KEY]) {
1048                 OVS_NLERR("Flow get message rejected, Key attribute missing.\n");
1049                 return -EINVAL;
1050         }
1051
1052         ovs_match_init(&match, &key, NULL);
1053         err = ovs_nla_get_match(&match, a[OVS_FLOW_ATTR_KEY], NULL);
1054         if (err)
1055                 return err;
1056
1057         ovs_lock();
1058         dp = get_dp(sock_net(skb->sk), ovs_header->dp_ifindex);
1059         if (!dp) {
1060                 err = -ENODEV;
1061                 goto unlock;
1062         }
1063
1064         flow = ovs_flow_tbl_lookup_exact(&dp->table, &match);
1065         if (!flow) {
1066                 err = -ENOENT;
1067                 goto unlock;
1068         }
1069
1070         reply = ovs_flow_cmd_build_info(flow, ovs_header->dp_ifindex, info,
1071                                         OVS_FLOW_CMD_NEW, true);
1072         if (IS_ERR(reply)) {
1073                 err = PTR_ERR(reply);
1074                 goto unlock;
1075         }
1076
1077         ovs_unlock();
1078         return genlmsg_reply(reply, info);
1079 unlock:
1080         ovs_unlock();
1081         return err;
1082 }
1083
1084 static int ovs_flow_cmd_del(struct sk_buff *skb, struct genl_info *info)
1085 {
1086         struct nlattr **a = info->attrs;
1087         struct ovs_header *ovs_header = info->userhdr;
1088         struct sw_flow_key key;
1089         struct sk_buff *reply;
1090         struct sw_flow *flow;
1091         struct datapath *dp;
1092         struct sw_flow_match match;
1093         int err;
1094
1095         if (likely(a[OVS_FLOW_ATTR_KEY])) {
1096                 ovs_match_init(&match, &key, NULL);
1097                 err = ovs_nla_get_match(&match, a[OVS_FLOW_ATTR_KEY], NULL);
1098                 if (unlikely(err))
1099                         return err;
1100         }
1101
1102         ovs_lock();
1103         dp = get_dp(sock_net(skb->sk), ovs_header->dp_ifindex);
1104         if (unlikely(!dp)) {
1105                 err = -ENODEV;
1106                 goto unlock;
1107         }
1108
1109         if (unlikely(!a[OVS_FLOW_ATTR_KEY])) {
1110                 err = ovs_flow_tbl_flush(&dp->table);
1111                 goto unlock;
1112         }
1113
1114         flow = ovs_flow_tbl_lookup_exact(&dp->table, &match);
1115         if (unlikely(!flow)) {
1116                 err = -ENOENT;
1117                 goto unlock;
1118         }
1119
1120         ovs_flow_tbl_remove(&dp->table, flow);
1121         ovs_unlock();
1122
1123         reply = ovs_flow_cmd_alloc_info((const struct sw_flow_actions __force *) flow->sf_acts,
1124                                         info, false);
1125         if (likely(reply)) {
1126                 if (likely(!IS_ERR(reply))) {
1127                         rcu_read_lock();        /*To keep RCU checker happy. */
1128                         err = ovs_flow_cmd_fill_info(flow, ovs_header->dp_ifindex,
1129                                                      reply, info->snd_portid,
1130                                                      info->snd_seq, 0,
1131                                                      OVS_FLOW_CMD_DEL);
1132                         rcu_read_unlock();
1133                         BUG_ON(err < 0);
1134
1135                         ovs_notify(&dp_flow_genl_family, reply, info);
1136                 } else {
1137                         netlink_set_err(sock_net(skb->sk)->genl_sock, 0, 0, PTR_ERR(reply));
1138                 }
1139         }
1140
1141         ovs_flow_free(flow, true);
1142         return 0;
1143 unlock:
1144         ovs_unlock();
1145         return err;
1146 }
1147
1148 static int ovs_flow_cmd_dump(struct sk_buff *skb, struct netlink_callback *cb)
1149 {
1150         struct ovs_header *ovs_header = genlmsg_data(nlmsg_data(cb->nlh));
1151         struct table_instance *ti;
1152         struct datapath *dp;
1153
1154         rcu_read_lock();
1155         dp = get_dp(sock_net(skb->sk), ovs_header->dp_ifindex);
1156         if (!dp) {
1157                 rcu_read_unlock();
1158                 return -ENODEV;
1159         }
1160
1161         ti = rcu_dereference(dp->table.ti);
1162         for (;;) {
1163                 struct sw_flow *flow;
1164                 u32 bucket, obj;
1165
1166                 bucket = cb->args[0];
1167                 obj = cb->args[1];
1168                 flow = ovs_flow_tbl_dump_next(ti, &bucket, &obj);
1169                 if (!flow)
1170                         break;
1171
1172                 if (ovs_flow_cmd_fill_info(flow, ovs_header->dp_ifindex, skb,
1173                                            NETLINK_CB(cb->skb).portid,
1174                                            cb->nlh->nlmsg_seq, NLM_F_MULTI,
1175                                            OVS_FLOW_CMD_NEW) < 0)
1176                         break;
1177
1178                 cb->args[0] = bucket;
1179                 cb->args[1] = obj;
1180         }
1181         rcu_read_unlock();
1182         return skb->len;
1183 }
1184
1185 static const struct nla_policy flow_policy[OVS_FLOW_ATTR_MAX + 1] = {
1186         [OVS_FLOW_ATTR_KEY] = { .type = NLA_NESTED },
1187         [OVS_FLOW_ATTR_ACTIONS] = { .type = NLA_NESTED },
1188         [OVS_FLOW_ATTR_CLEAR] = { .type = NLA_FLAG },
1189 };
1190
1191 static const struct genl_ops dp_flow_genl_ops[] = {
1192         { .cmd = OVS_FLOW_CMD_NEW,
1193           .flags = GENL_ADMIN_PERM, /* Requires CAP_NET_ADMIN privilege. */
1194           .policy = flow_policy,
1195           .doit = ovs_flow_cmd_new
1196         },
1197         { .cmd = OVS_FLOW_CMD_DEL,
1198           .flags = GENL_ADMIN_PERM, /* Requires CAP_NET_ADMIN privilege. */
1199           .policy = flow_policy,
1200           .doit = ovs_flow_cmd_del
1201         },
1202         { .cmd = OVS_FLOW_CMD_GET,
1203           .flags = 0,               /* OK for unprivileged users. */
1204           .policy = flow_policy,
1205           .doit = ovs_flow_cmd_get,
1206           .dumpit = ovs_flow_cmd_dump
1207         },
1208         { .cmd = OVS_FLOW_CMD_SET,
1209           .flags = GENL_ADMIN_PERM, /* Requires CAP_NET_ADMIN privilege. */
1210           .policy = flow_policy,
1211           .doit = ovs_flow_cmd_set,
1212         },
1213 };
1214
1215 static struct genl_family dp_flow_genl_family = {
1216         .id = GENL_ID_GENERATE,
1217         .hdrsize = sizeof(struct ovs_header),
1218         .name = OVS_FLOW_FAMILY,
1219         .version = OVS_FLOW_VERSION,
1220         .maxattr = OVS_FLOW_ATTR_MAX,
1221         .netnsok = true,
1222         .parallel_ops = true,
1223         .ops = dp_flow_genl_ops,
1224         .n_ops = ARRAY_SIZE(dp_flow_genl_ops),
1225         .mcgrps = &ovs_dp_flow_multicast_group,
1226         .n_mcgrps = 1,
1227 };
1228
1229 static size_t ovs_dp_cmd_msg_size(void)
1230 {
1231         size_t msgsize = NLMSG_ALIGN(sizeof(struct ovs_header));
1232
1233         msgsize += nla_total_size(IFNAMSIZ);
1234         msgsize += nla_total_size(sizeof(struct ovs_dp_stats));
1235         msgsize += nla_total_size(sizeof(struct ovs_dp_megaflow_stats));
1236         msgsize += nla_total_size(sizeof(u32)); /* OVS_DP_ATTR_USER_FEATURES */
1237
1238         return msgsize;
1239 }
1240
1241 /* Called with ovs_mutex or RCU read lock. */
1242 static int ovs_dp_cmd_fill_info(struct datapath *dp, struct sk_buff *skb,
1243                                 u32 portid, u32 seq, u32 flags, u8 cmd)
1244 {
1245         struct ovs_header *ovs_header;
1246         struct ovs_dp_stats dp_stats;
1247         struct ovs_dp_megaflow_stats dp_megaflow_stats;
1248         int err;
1249
1250         ovs_header = genlmsg_put(skb, portid, seq, &dp_datapath_genl_family,
1251                                    flags, cmd);
1252         if (!ovs_header)
1253                 goto error;
1254
1255         ovs_header->dp_ifindex = get_dpifindex(dp);
1256
1257         err = nla_put_string(skb, OVS_DP_ATTR_NAME, ovs_dp_name(dp));
1258         if (err)
1259                 goto nla_put_failure;
1260
1261         get_dp_stats(dp, &dp_stats, &dp_megaflow_stats);
1262         if (nla_put(skb, OVS_DP_ATTR_STATS, sizeof(struct ovs_dp_stats),
1263                         &dp_stats))
1264                 goto nla_put_failure;
1265
1266         if (nla_put(skb, OVS_DP_ATTR_MEGAFLOW_STATS,
1267                         sizeof(struct ovs_dp_megaflow_stats),
1268                         &dp_megaflow_stats))
1269                 goto nla_put_failure;
1270
1271         if (nla_put_u32(skb, OVS_DP_ATTR_USER_FEATURES, dp->user_features))
1272                 goto nla_put_failure;
1273
1274         return genlmsg_end(skb, ovs_header);
1275
1276 nla_put_failure:
1277         genlmsg_cancel(skb, ovs_header);
1278 error:
1279         return -EMSGSIZE;
1280 }
1281
1282 static struct sk_buff *ovs_dp_cmd_alloc_info(struct genl_info *info)
1283 {
1284         return genlmsg_new_unicast(ovs_dp_cmd_msg_size(), info, GFP_KERNEL);
1285 }
1286
1287 /* Called with rcu_read_lock or ovs_mutex. */
1288 static struct datapath *lookup_datapath(struct net *net,
1289                                         struct ovs_header *ovs_header,
1290                                         struct nlattr *a[OVS_DP_ATTR_MAX + 1])
1291 {
1292         struct datapath *dp;
1293
1294         if (!a[OVS_DP_ATTR_NAME])
1295                 dp = get_dp(net, ovs_header->dp_ifindex);
1296         else {
1297                 struct vport *vport;
1298
1299                 vport = ovs_vport_locate(net, nla_data(a[OVS_DP_ATTR_NAME]));
1300                 dp = vport && vport->port_no == OVSP_LOCAL ? vport->dp : NULL;
1301         }
1302         return dp ? dp : ERR_PTR(-ENODEV);
1303 }
1304
1305 static void ovs_dp_reset_user_features(struct sk_buff *skb, struct genl_info *info)
1306 {
1307         struct datapath *dp;
1308
1309         dp = lookup_datapath(sock_net(skb->sk), info->userhdr, info->attrs);
1310         if (IS_ERR(dp))
1311                 return;
1312
1313         WARN(dp->user_features, "Dropping previously announced user features\n");
1314         dp->user_features = 0;
1315 }
1316
1317 static void ovs_dp_change(struct datapath *dp, struct nlattr **a)
1318 {
1319         if (a[OVS_DP_ATTR_USER_FEATURES])
1320                 dp->user_features = nla_get_u32(a[OVS_DP_ATTR_USER_FEATURES]);
1321 }
1322
1323 static int ovs_dp_cmd_new(struct sk_buff *skb, struct genl_info *info)
1324 {
1325         struct nlattr **a = info->attrs;
1326         struct vport_parms parms;
1327         struct sk_buff *reply;
1328         struct datapath *dp;
1329         struct vport *vport;
1330         struct ovs_net *ovs_net;
1331         int err, i;
1332
1333         err = -EINVAL;
1334         if (!a[OVS_DP_ATTR_NAME] || !a[OVS_DP_ATTR_UPCALL_PID])
1335                 goto err;
1336
1337         reply = ovs_dp_cmd_alloc_info(info);
1338         if (!reply)
1339                 return -ENOMEM;
1340
1341         err = -ENOMEM;
1342         dp = kzalloc(sizeof(*dp), GFP_KERNEL);
1343         if (dp == NULL)
1344                 goto err_free_reply;
1345
1346         ovs_dp_set_net(dp, hold_net(sock_net(skb->sk)));
1347
1348         /* Allocate table. */
1349         err = ovs_flow_tbl_init(&dp->table);
1350         if (err)
1351                 goto err_free_dp;
1352
1353         dp->stats_percpu = netdev_alloc_pcpu_stats(struct dp_stats_percpu);
1354         if (!dp->stats_percpu) {
1355                 err = -ENOMEM;
1356                 goto err_destroy_table;
1357         }
1358
1359         dp->ports = kmalloc(DP_VPORT_HASH_BUCKETS * sizeof(struct hlist_head),
1360                             GFP_KERNEL);
1361         if (!dp->ports) {
1362                 err = -ENOMEM;
1363                 goto err_destroy_percpu;
1364         }
1365
1366         for (i = 0; i < DP_VPORT_HASH_BUCKETS; i++)
1367                 INIT_HLIST_HEAD(&dp->ports[i]);
1368
1369         /* Set up our datapath device. */
1370         parms.name = nla_data(a[OVS_DP_ATTR_NAME]);
1371         parms.type = OVS_VPORT_TYPE_INTERNAL;
1372         parms.options = NULL;
1373         parms.dp = dp;
1374         parms.port_no = OVSP_LOCAL;
1375         parms.upcall_portids = a[OVS_DP_ATTR_UPCALL_PID];
1376
1377         ovs_dp_change(dp, a);
1378
1379         /* So far only local changes have been made, now need the lock. */
1380         ovs_lock();
1381
1382         vport = new_vport(&parms);
1383         if (IS_ERR(vport)) {
1384                 err = PTR_ERR(vport);
1385                 if (err == -EBUSY)
1386                         err = -EEXIST;
1387
1388                 if (err == -EEXIST) {
1389                         /* An outdated user space instance that does not understand
1390                          * the concept of user_features has attempted to create a new
1391                          * datapath and is likely to reuse it. Drop all user features.
1392                          */
1393                         if (info->genlhdr->version < OVS_DP_VER_FEATURES)
1394                                 ovs_dp_reset_user_features(skb, info);
1395                 }
1396
1397                 goto err_destroy_ports_array;
1398         }
1399
1400         err = ovs_dp_cmd_fill_info(dp, reply, info->snd_portid,
1401                                    info->snd_seq, 0, OVS_DP_CMD_NEW);
1402         BUG_ON(err < 0);
1403
1404         ovs_net = net_generic(ovs_dp_get_net(dp), ovs_net_id);
1405         list_add_tail_rcu(&dp->list_node, &ovs_net->dps);
1406
1407         ovs_unlock();
1408
1409         ovs_notify(&dp_datapath_genl_family, reply, info);
1410         return 0;
1411
1412 err_destroy_ports_array:
1413         ovs_unlock();
1414         kfree(dp->ports);
1415 err_destroy_percpu:
1416         free_percpu(dp->stats_percpu);
1417 err_destroy_table:
1418         ovs_flow_tbl_destroy(&dp->table, false);
1419 err_free_dp:
1420         release_net(ovs_dp_get_net(dp));
1421         kfree(dp);
1422 err_free_reply:
1423         kfree_skb(reply);
1424 err:
1425         return err;
1426 }
1427
1428 /* Called with ovs_mutex. */
1429 static void __dp_destroy(struct datapath *dp)
1430 {
1431         int i;
1432
1433         for (i = 0; i < DP_VPORT_HASH_BUCKETS; i++) {
1434                 struct vport *vport;
1435                 struct hlist_node *n;
1436
1437                 hlist_for_each_entry_safe(vport, n, &dp->ports[i], dp_hash_node)
1438                         if (vport->port_no != OVSP_LOCAL)
1439                                 ovs_dp_detach_port(vport);
1440         }
1441
1442         list_del_rcu(&dp->list_node);
1443
1444         /* OVSP_LOCAL is datapath internal port. We need to make sure that
1445          * all ports in datapath are destroyed first before freeing datapath.
1446          */
1447         ovs_dp_detach_port(ovs_vport_ovsl(dp, OVSP_LOCAL));
1448
1449         /* RCU destroy the flow table */
1450         ovs_flow_tbl_destroy(&dp->table, true);
1451
1452         call_rcu(&dp->rcu, destroy_dp_rcu);
1453 }
1454
1455 static int ovs_dp_cmd_del(struct sk_buff *skb, struct genl_info *info)
1456 {
1457         struct sk_buff *reply;
1458         struct datapath *dp;
1459         int err;
1460
1461         reply = ovs_dp_cmd_alloc_info(info);
1462         if (!reply)
1463                 return -ENOMEM;
1464
1465         ovs_lock();
1466         dp = lookup_datapath(sock_net(skb->sk), info->userhdr, info->attrs);
1467         err = PTR_ERR(dp);
1468         if (IS_ERR(dp))
1469                 goto err_unlock_free;
1470
1471         err = ovs_dp_cmd_fill_info(dp, reply, info->snd_portid,
1472                                    info->snd_seq, 0, OVS_DP_CMD_DEL);
1473         BUG_ON(err < 0);
1474
1475         __dp_destroy(dp);
1476         ovs_unlock();
1477
1478         ovs_notify(&dp_datapath_genl_family, reply, info);
1479
1480         return 0;
1481
1482 err_unlock_free:
1483         ovs_unlock();
1484         kfree_skb(reply);
1485         return err;
1486 }
1487
1488 static int ovs_dp_cmd_set(struct sk_buff *skb, struct genl_info *info)
1489 {
1490         struct sk_buff *reply;
1491         struct datapath *dp;
1492         int err;
1493
1494         reply = ovs_dp_cmd_alloc_info(info);
1495         if (!reply)
1496                 return -ENOMEM;
1497
1498         ovs_lock();
1499         dp = lookup_datapath(sock_net(skb->sk), info->userhdr, info->attrs);
1500         err = PTR_ERR(dp);
1501         if (IS_ERR(dp))
1502                 goto err_unlock_free;
1503
1504         ovs_dp_change(dp, info->attrs);
1505
1506         err = ovs_dp_cmd_fill_info(dp, reply, info->snd_portid,
1507                                    info->snd_seq, 0, OVS_DP_CMD_NEW);
1508         BUG_ON(err < 0);
1509
1510         ovs_unlock();
1511         ovs_notify(&dp_datapath_genl_family, reply, info);
1512
1513         return 0;
1514
1515 err_unlock_free:
1516         ovs_unlock();
1517         kfree_skb(reply);
1518         return err;
1519 }
1520
1521 static int ovs_dp_cmd_get(struct sk_buff *skb, struct genl_info *info)
1522 {
1523         struct sk_buff *reply;
1524         struct datapath *dp;
1525         int err;
1526
1527         reply = ovs_dp_cmd_alloc_info(info);
1528         if (!reply)
1529                 return -ENOMEM;
1530
1531         rcu_read_lock();
1532         dp = lookup_datapath(sock_net(skb->sk), info->userhdr, info->attrs);
1533         if (IS_ERR(dp)) {
1534                 err = PTR_ERR(dp);
1535                 goto err_unlock_free;
1536         }
1537         err = ovs_dp_cmd_fill_info(dp, reply, info->snd_portid,
1538                                    info->snd_seq, 0, OVS_DP_CMD_NEW);
1539         BUG_ON(err < 0);
1540         rcu_read_unlock();
1541
1542         return genlmsg_reply(reply, info);
1543
1544 err_unlock_free:
1545         rcu_read_unlock();
1546         kfree_skb(reply);
1547         return err;
1548 }
1549
1550 static int ovs_dp_cmd_dump(struct sk_buff *skb, struct netlink_callback *cb)
1551 {
1552         struct ovs_net *ovs_net = net_generic(sock_net(skb->sk), ovs_net_id);
1553         struct datapath *dp;
1554         int skip = cb->args[0];
1555         int i = 0;
1556
1557         rcu_read_lock();
1558         list_for_each_entry_rcu(dp, &ovs_net->dps, list_node) {
1559                 if (i >= skip &&
1560                     ovs_dp_cmd_fill_info(dp, skb, NETLINK_CB(cb->skb).portid,
1561                                          cb->nlh->nlmsg_seq, NLM_F_MULTI,
1562                                          OVS_DP_CMD_NEW) < 0)
1563                         break;
1564                 i++;
1565         }
1566         rcu_read_unlock();
1567
1568         cb->args[0] = i;
1569
1570         return skb->len;
1571 }
1572
1573 static const struct nla_policy datapath_policy[OVS_DP_ATTR_MAX + 1] = {
1574         [OVS_DP_ATTR_NAME] = { .type = NLA_NUL_STRING, .len = IFNAMSIZ - 1 },
1575         [OVS_DP_ATTR_UPCALL_PID] = { .type = NLA_U32 },
1576         [OVS_DP_ATTR_USER_FEATURES] = { .type = NLA_U32 },
1577 };
1578
1579 static const struct genl_ops dp_datapath_genl_ops[] = {
1580         { .cmd = OVS_DP_CMD_NEW,
1581           .flags = GENL_ADMIN_PERM, /* Requires CAP_NET_ADMIN privilege. */
1582           .policy = datapath_policy,
1583           .doit = ovs_dp_cmd_new
1584         },
1585         { .cmd = OVS_DP_CMD_DEL,
1586           .flags = GENL_ADMIN_PERM, /* Requires CAP_NET_ADMIN privilege. */
1587           .policy = datapath_policy,
1588           .doit = ovs_dp_cmd_del
1589         },
1590         { .cmd = OVS_DP_CMD_GET,
1591           .flags = 0,               /* OK for unprivileged users. */
1592           .policy = datapath_policy,
1593           .doit = ovs_dp_cmd_get,
1594           .dumpit = ovs_dp_cmd_dump
1595         },
1596         { .cmd = OVS_DP_CMD_SET,
1597           .flags = GENL_ADMIN_PERM, /* Requires CAP_NET_ADMIN privilege. */
1598           .policy = datapath_policy,
1599           .doit = ovs_dp_cmd_set,
1600         },
1601 };
1602
1603 static struct genl_family dp_datapath_genl_family = {
1604         .id = GENL_ID_GENERATE,
1605         .hdrsize = sizeof(struct ovs_header),
1606         .name = OVS_DATAPATH_FAMILY,
1607         .version = OVS_DATAPATH_VERSION,
1608         .maxattr = OVS_DP_ATTR_MAX,
1609         .netnsok = true,
1610         .parallel_ops = true,
1611         .ops = dp_datapath_genl_ops,
1612         .n_ops = ARRAY_SIZE(dp_datapath_genl_ops),
1613         .mcgrps = &ovs_dp_datapath_multicast_group,
1614         .n_mcgrps = 1,
1615 };
1616
1617 /* Called with ovs_mutex or RCU read lock. */
1618 static int ovs_vport_cmd_fill_info(struct vport *vport, struct sk_buff *skb,
1619                                    u32 portid, u32 seq, u32 flags, u8 cmd)
1620 {
1621         struct ovs_header *ovs_header;
1622         struct ovs_vport_stats vport_stats;
1623         int err;
1624
1625         ovs_header = genlmsg_put(skb, portid, seq, &dp_vport_genl_family,
1626                                  flags, cmd);
1627         if (!ovs_header)
1628                 return -EMSGSIZE;
1629
1630         ovs_header->dp_ifindex = get_dpifindex(vport->dp);
1631
1632         if (nla_put_u32(skb, OVS_VPORT_ATTR_PORT_NO, vport->port_no) ||
1633             nla_put_u32(skb, OVS_VPORT_ATTR_TYPE, vport->ops->type) ||
1634             nla_put_string(skb, OVS_VPORT_ATTR_NAME,
1635                            vport->ops->get_name(vport)))
1636                 goto nla_put_failure;
1637
1638         ovs_vport_get_stats(vport, &vport_stats);
1639         if (nla_put(skb, OVS_VPORT_ATTR_STATS, sizeof(struct ovs_vport_stats),
1640                     &vport_stats))
1641                 goto nla_put_failure;
1642
1643         if (ovs_vport_get_upcall_portids(vport, skb))
1644                 goto nla_put_failure;
1645
1646         err = ovs_vport_get_options(vport, skb);
1647         if (err == -EMSGSIZE)
1648                 goto error;
1649
1650         return genlmsg_end(skb, ovs_header);
1651
1652 nla_put_failure:
1653         err = -EMSGSIZE;
1654 error:
1655         genlmsg_cancel(skb, ovs_header);
1656         return err;
1657 }
1658
1659 static struct sk_buff *ovs_vport_cmd_alloc_info(void)
1660 {
1661         return nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
1662 }
1663
1664 /* Called with ovs_mutex, only via ovs_dp_notify_wq(). */
1665 struct sk_buff *ovs_vport_cmd_build_info(struct vport *vport, u32 portid,
1666                                          u32 seq, u8 cmd)
1667 {
1668         struct sk_buff *skb;
1669         int retval;
1670
1671         skb = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_ATOMIC);
1672         if (!skb)
1673                 return ERR_PTR(-ENOMEM);
1674
1675         retval = ovs_vport_cmd_fill_info(vport, skb, portid, seq, 0, cmd);
1676         BUG_ON(retval < 0);
1677
1678         return skb;
1679 }
1680
1681 /* Called with ovs_mutex or RCU read lock. */
1682 static struct vport *lookup_vport(struct net *net,
1683                                   struct ovs_header *ovs_header,
1684                                   struct nlattr *a[OVS_VPORT_ATTR_MAX + 1])
1685 {
1686         struct datapath *dp;
1687         struct vport *vport;
1688
1689         if (a[OVS_VPORT_ATTR_NAME]) {
1690                 vport = ovs_vport_locate(net, nla_data(a[OVS_VPORT_ATTR_NAME]));
1691                 if (!vport)
1692                         return ERR_PTR(-ENODEV);
1693                 if (ovs_header->dp_ifindex &&
1694                     ovs_header->dp_ifindex != get_dpifindex(vport->dp))
1695                         return ERR_PTR(-ENODEV);
1696                 return vport;
1697         } else if (a[OVS_VPORT_ATTR_PORT_NO]) {
1698                 u32 port_no = nla_get_u32(a[OVS_VPORT_ATTR_PORT_NO]);
1699
1700                 if (port_no >= DP_MAX_PORTS)
1701                         return ERR_PTR(-EFBIG);
1702
1703                 dp = get_dp(net, ovs_header->dp_ifindex);
1704                 if (!dp)
1705                         return ERR_PTR(-ENODEV);
1706
1707                 vport = ovs_vport_ovsl_rcu(dp, port_no);
1708                 if (!vport)
1709                         return ERR_PTR(-ENODEV);
1710                 return vport;
1711         } else
1712                 return ERR_PTR(-EINVAL);
1713 }
1714
1715 static int ovs_vport_cmd_new(struct sk_buff *skb, struct genl_info *info)
1716 {
1717         struct nlattr **a = info->attrs;
1718         struct ovs_header *ovs_header = info->userhdr;
1719         struct vport_parms parms;
1720         struct sk_buff *reply;
1721         struct vport *vport;
1722         struct datapath *dp;
1723         u32 port_no;
1724         int err;
1725
1726         if (!a[OVS_VPORT_ATTR_NAME] || !a[OVS_VPORT_ATTR_TYPE] ||
1727             !a[OVS_VPORT_ATTR_UPCALL_PID])
1728                 return -EINVAL;
1729
1730         port_no = a[OVS_VPORT_ATTR_PORT_NO]
1731                 ? nla_get_u32(a[OVS_VPORT_ATTR_PORT_NO]) : 0;
1732         if (port_no >= DP_MAX_PORTS)
1733                 return -EFBIG;
1734
1735         reply = ovs_vport_cmd_alloc_info();
1736         if (!reply)
1737                 return -ENOMEM;
1738
1739         ovs_lock();
1740         dp = get_dp(sock_net(skb->sk), ovs_header->dp_ifindex);
1741         err = -ENODEV;
1742         if (!dp)
1743                 goto exit_unlock_free;
1744
1745         if (port_no) {
1746                 vport = ovs_vport_ovsl(dp, port_no);
1747                 err = -EBUSY;
1748                 if (vport)
1749                         goto exit_unlock_free;
1750         } else {
1751                 for (port_no = 1; ; port_no++) {
1752                         if (port_no >= DP_MAX_PORTS) {
1753                                 err = -EFBIG;
1754                                 goto exit_unlock_free;
1755                         }
1756                         vport = ovs_vport_ovsl(dp, port_no);
1757                         if (!vport)
1758                                 break;
1759                 }
1760         }
1761
1762         parms.name = nla_data(a[OVS_VPORT_ATTR_NAME]);
1763         parms.type = nla_get_u32(a[OVS_VPORT_ATTR_TYPE]);
1764         parms.options = a[OVS_VPORT_ATTR_OPTIONS];
1765         parms.dp = dp;
1766         parms.port_no = port_no;
1767         parms.upcall_portids = a[OVS_VPORT_ATTR_UPCALL_PID];
1768
1769         vport = new_vport(&parms);
1770         err = PTR_ERR(vport);
1771         if (IS_ERR(vport))
1772                 goto exit_unlock_free;
1773
1774         err = ovs_vport_cmd_fill_info(vport, reply, info->snd_portid,
1775                                       info->snd_seq, 0, OVS_VPORT_CMD_NEW);
1776         BUG_ON(err < 0);
1777         ovs_unlock();
1778
1779         ovs_notify(&dp_vport_genl_family, reply, info);
1780         return 0;
1781
1782 exit_unlock_free:
1783         ovs_unlock();
1784         kfree_skb(reply);
1785         return err;
1786 }
1787
1788 static int ovs_vport_cmd_set(struct sk_buff *skb, struct genl_info *info)
1789 {
1790         struct nlattr **a = info->attrs;
1791         struct sk_buff *reply;
1792         struct vport *vport;
1793         int err;
1794
1795         reply = ovs_vport_cmd_alloc_info();
1796         if (!reply)
1797                 return -ENOMEM;
1798
1799         ovs_lock();
1800         vport = lookup_vport(sock_net(skb->sk), info->userhdr, a);
1801         err = PTR_ERR(vport);
1802         if (IS_ERR(vport))
1803                 goto exit_unlock_free;
1804
1805         if (a[OVS_VPORT_ATTR_TYPE] &&
1806             nla_get_u32(a[OVS_VPORT_ATTR_TYPE]) != vport->ops->type) {
1807                 err = -EINVAL;
1808                 goto exit_unlock_free;
1809         }
1810
1811         if (a[OVS_VPORT_ATTR_OPTIONS]) {
1812                 err = ovs_vport_set_options(vport, a[OVS_VPORT_ATTR_OPTIONS]);
1813                 if (err)
1814                         goto exit_unlock_free;
1815         }
1816
1817
1818         if (a[OVS_VPORT_ATTR_UPCALL_PID]) {
1819                 struct nlattr *ids = a[OVS_VPORT_ATTR_UPCALL_PID];
1820
1821                 err = ovs_vport_set_upcall_portids(vport, ids);
1822                 if (err)
1823                         goto exit_unlock_free;
1824         }
1825
1826         err = ovs_vport_cmd_fill_info(vport, reply, info->snd_portid,
1827                                       info->snd_seq, 0, OVS_VPORT_CMD_NEW);
1828         BUG_ON(err < 0);
1829
1830         ovs_unlock();
1831         ovs_notify(&dp_vport_genl_family, reply, info);
1832         return 0;
1833
1834 exit_unlock_free:
1835         ovs_unlock();
1836         kfree_skb(reply);
1837         return err;
1838 }
1839
1840 static int ovs_vport_cmd_del(struct sk_buff *skb, struct genl_info *info)
1841 {
1842         struct nlattr **a = info->attrs;
1843         struct sk_buff *reply;
1844         struct vport *vport;
1845         int err;
1846
1847         reply = ovs_vport_cmd_alloc_info();
1848         if (!reply)
1849                 return -ENOMEM;
1850
1851         ovs_lock();
1852         vport = lookup_vport(sock_net(skb->sk), info->userhdr, a);
1853         err = PTR_ERR(vport);
1854         if (IS_ERR(vport))
1855                 goto exit_unlock_free;
1856
1857         if (vport->port_no == OVSP_LOCAL) {
1858                 err = -EINVAL;
1859                 goto exit_unlock_free;
1860         }
1861
1862         err = ovs_vport_cmd_fill_info(vport, reply, info->snd_portid,
1863                                       info->snd_seq, 0, OVS_VPORT_CMD_DEL);
1864         BUG_ON(err < 0);
1865         ovs_dp_detach_port(vport);
1866         ovs_unlock();
1867
1868         ovs_notify(&dp_vport_genl_family, reply, info);
1869         return 0;
1870
1871 exit_unlock_free:
1872         ovs_unlock();
1873         kfree_skb(reply);
1874         return err;
1875 }
1876
1877 static int ovs_vport_cmd_get(struct sk_buff *skb, struct genl_info *info)
1878 {
1879         struct nlattr **a = info->attrs;
1880         struct ovs_header *ovs_header = info->userhdr;
1881         struct sk_buff *reply;
1882         struct vport *vport;
1883         int err;
1884
1885         reply = ovs_vport_cmd_alloc_info();
1886         if (!reply)
1887                 return -ENOMEM;
1888
1889         rcu_read_lock();
1890         vport = lookup_vport(sock_net(skb->sk), ovs_header, a);
1891         err = PTR_ERR(vport);
1892         if (IS_ERR(vport))
1893                 goto exit_unlock_free;
1894         err = ovs_vport_cmd_fill_info(vport, reply, info->snd_portid,
1895                                       info->snd_seq, 0, OVS_VPORT_CMD_NEW);
1896         BUG_ON(err < 0);
1897         rcu_read_unlock();
1898
1899         return genlmsg_reply(reply, info);
1900
1901 exit_unlock_free:
1902         rcu_read_unlock();
1903         kfree_skb(reply);
1904         return err;
1905 }
1906
1907 static int ovs_vport_cmd_dump(struct sk_buff *skb, struct netlink_callback *cb)
1908 {
1909         struct ovs_header *ovs_header = genlmsg_data(nlmsg_data(cb->nlh));
1910         struct datapath *dp;
1911         int bucket = cb->args[0], skip = cb->args[1];
1912         int i, j = 0;
1913
1914         rcu_read_lock();
1915         dp = get_dp(sock_net(skb->sk), ovs_header->dp_ifindex);
1916         if (!dp) {
1917                 rcu_read_unlock();
1918                 return -ENODEV;
1919         }
1920         for (i = bucket; i < DP_VPORT_HASH_BUCKETS; i++) {
1921                 struct vport *vport;
1922
1923                 j = 0;
1924                 hlist_for_each_entry_rcu(vport, &dp->ports[i], dp_hash_node) {
1925                         if (j >= skip &&
1926                             ovs_vport_cmd_fill_info(vport, skb,
1927                                                     NETLINK_CB(cb->skb).portid,
1928                                                     cb->nlh->nlmsg_seq,
1929                                                     NLM_F_MULTI,
1930                                                     OVS_VPORT_CMD_NEW) < 0)
1931                                 goto out;
1932
1933                         j++;
1934                 }
1935                 skip = 0;
1936         }
1937 out:
1938         rcu_read_unlock();
1939
1940         cb->args[0] = i;
1941         cb->args[1] = j;
1942
1943         return skb->len;
1944 }
1945
1946 static const struct nla_policy vport_policy[OVS_VPORT_ATTR_MAX + 1] = {
1947         [OVS_VPORT_ATTR_NAME] = { .type = NLA_NUL_STRING, .len = IFNAMSIZ - 1 },
1948         [OVS_VPORT_ATTR_STATS] = { .len = sizeof(struct ovs_vport_stats) },
1949         [OVS_VPORT_ATTR_PORT_NO] = { .type = NLA_U32 },
1950         [OVS_VPORT_ATTR_TYPE] = { .type = NLA_U32 },
1951         [OVS_VPORT_ATTR_UPCALL_PID] = { .type = NLA_U32 },
1952         [OVS_VPORT_ATTR_OPTIONS] = { .type = NLA_NESTED },
1953 };
1954
1955 static const struct genl_ops dp_vport_genl_ops[] = {
1956         { .cmd = OVS_VPORT_CMD_NEW,
1957           .flags = GENL_ADMIN_PERM, /* Requires CAP_NET_ADMIN privilege. */
1958           .policy = vport_policy,
1959           .doit = ovs_vport_cmd_new
1960         },
1961         { .cmd = OVS_VPORT_CMD_DEL,
1962           .flags = GENL_ADMIN_PERM, /* Requires CAP_NET_ADMIN privilege. */
1963           .policy = vport_policy,
1964           .doit = ovs_vport_cmd_del
1965         },
1966         { .cmd = OVS_VPORT_CMD_GET,
1967           .flags = 0,               /* OK for unprivileged users. */
1968           .policy = vport_policy,
1969           .doit = ovs_vport_cmd_get,
1970           .dumpit = ovs_vport_cmd_dump
1971         },
1972         { .cmd = OVS_VPORT_CMD_SET,
1973           .flags = GENL_ADMIN_PERM, /* Requires CAP_NET_ADMIN privilege. */
1974           .policy = vport_policy,
1975           .doit = ovs_vport_cmd_set,
1976         },
1977 };
1978
1979 struct genl_family dp_vport_genl_family = {
1980         .id = GENL_ID_GENERATE,
1981         .hdrsize = sizeof(struct ovs_header),
1982         .name = OVS_VPORT_FAMILY,
1983         .version = OVS_VPORT_VERSION,
1984         .maxattr = OVS_VPORT_ATTR_MAX,
1985         .netnsok = true,
1986         .parallel_ops = true,
1987         .ops = dp_vport_genl_ops,
1988         .n_ops = ARRAY_SIZE(dp_vport_genl_ops),
1989         .mcgrps = &ovs_dp_vport_multicast_group,
1990         .n_mcgrps = 1,
1991 };
1992
1993 static struct genl_family * const dp_genl_families[] = {
1994         &dp_datapath_genl_family,
1995         &dp_vport_genl_family,
1996         &dp_flow_genl_family,
1997         &dp_packet_genl_family,
1998 };
1999
2000 static void dp_unregister_genl(int n_families)
2001 {
2002         int i;
2003
2004         for (i = 0; i < n_families; i++)
2005                 genl_unregister_family(dp_genl_families[i]);
2006 }
2007
2008 static int dp_register_genl(void)
2009 {
2010         int err;
2011         int i;
2012
2013         for (i = 0; i < ARRAY_SIZE(dp_genl_families); i++) {
2014
2015                 err = genl_register_family(dp_genl_families[i]);
2016                 if (err)
2017                         goto error;
2018         }
2019
2020         return 0;
2021
2022 error:
2023         dp_unregister_genl(i);
2024         return err;
2025 }
2026
2027 static int __net_init ovs_init_net(struct net *net)
2028 {
2029         struct ovs_net *ovs_net = net_generic(net, ovs_net_id);
2030
2031         INIT_LIST_HEAD(&ovs_net->dps);
2032         INIT_WORK(&ovs_net->dp_notify_work, ovs_dp_notify_wq);
2033         return 0;
2034 }
2035
2036 static void __net_exit ovs_exit_net(struct net *net)
2037 {
2038         struct datapath *dp, *dp_next;
2039         struct ovs_net *ovs_net = net_generic(net, ovs_net_id);
2040
2041         ovs_lock();
2042         list_for_each_entry_safe(dp, dp_next, &ovs_net->dps, list_node)
2043                 __dp_destroy(dp);
2044         ovs_unlock();
2045
2046         cancel_work_sync(&ovs_net->dp_notify_work);
2047 }
2048
2049 static struct pernet_operations ovs_net_ops = {
2050         .init = ovs_init_net,
2051         .exit = ovs_exit_net,
2052         .id   = &ovs_net_id,
2053         .size = sizeof(struct ovs_net),
2054 };
2055
2056 static int __init dp_init(void)
2057 {
2058         int err;
2059
2060         BUILD_BUG_ON(sizeof(struct ovs_skb_cb) > FIELD_SIZEOF(struct sk_buff, cb));
2061
2062         pr_info("Open vSwitch switching datapath\n");
2063
2064         err = ovs_internal_dev_rtnl_link_register();
2065         if (err)
2066                 goto error;
2067
2068         err = ovs_flow_init();
2069         if (err)
2070                 goto error_unreg_rtnl_link;
2071
2072         err = ovs_vport_init();
2073         if (err)
2074                 goto error_flow_exit;
2075
2076         err = register_pernet_device(&ovs_net_ops);
2077         if (err)
2078                 goto error_vport_exit;
2079
2080         err = register_netdevice_notifier(&ovs_dp_device_notifier);
2081         if (err)
2082                 goto error_netns_exit;
2083
2084         err = dp_register_genl();
2085         if (err < 0)
2086                 goto error_unreg_notifier;
2087
2088         return 0;
2089
2090 error_unreg_notifier:
2091         unregister_netdevice_notifier(&ovs_dp_device_notifier);
2092 error_netns_exit:
2093         unregister_pernet_device(&ovs_net_ops);
2094 error_vport_exit:
2095         ovs_vport_exit();
2096 error_flow_exit:
2097         ovs_flow_exit();
2098 error_unreg_rtnl_link:
2099         ovs_internal_dev_rtnl_link_unregister();
2100 error:
2101         return err;
2102 }
2103
2104 static void dp_cleanup(void)
2105 {
2106         dp_unregister_genl(ARRAY_SIZE(dp_genl_families));
2107         unregister_netdevice_notifier(&ovs_dp_device_notifier);
2108         unregister_pernet_device(&ovs_net_ops);
2109         rcu_barrier();
2110         ovs_vport_exit();
2111         ovs_flow_exit();
2112         ovs_internal_dev_rtnl_link_unregister();
2113 }
2114
2115 module_init(dp_init);
2116 module_exit(dp_cleanup);
2117
2118 MODULE_DESCRIPTION("Open vSwitch switching datapath");
2119 MODULE_LICENSE("GPL");