Merge branch 'v3.10/topic/misc' into linux-linaro-lsk
[firefly-linux-kernel-4.4.55.git] / net / ipv6 / ip6_output.c
1 /*
2  *      IPv6 output functions
3  *      Linux INET6 implementation
4  *
5  *      Authors:
6  *      Pedro Roque             <roque@di.fc.ul.pt>
7  *
8  *      Based on linux/net/ipv4/ip_output.c
9  *
10  *      This program is free software; you can redistribute it and/or
11  *      modify it under the terms of the GNU General Public License
12  *      as published by the Free Software Foundation; either version
13  *      2 of the License, or (at your option) any later version.
14  *
15  *      Changes:
16  *      A.N.Kuznetsov   :       airthmetics in fragmentation.
17  *                              extension headers are implemented.
18  *                              route changes now work.
19  *                              ip6_forward does not confuse sniffers.
20  *                              etc.
21  *
22  *      H. von Brand    :       Added missing #include <linux/string.h>
23  *      Imran Patel     :       frag id should be in NBO
24  *      Kazunori MIYAZAWA @USAGI
25  *                      :       add ip6_append_data and related functions
26  *                              for datagram xmit
27  */
28
29 #include <linux/errno.h>
30 #include <linux/kernel.h>
31 #include <linux/string.h>
32 #include <linux/socket.h>
33 #include <linux/net.h>
34 #include <linux/netdevice.h>
35 #include <linux/if_arp.h>
36 #include <linux/in6.h>
37 #include <linux/tcp.h>
38 #include <linux/route.h>
39 #include <linux/module.h>
40 #include <linux/slab.h>
41
42 #include <linux/netfilter.h>
43 #include <linux/netfilter_ipv6.h>
44
45 #include <net/sock.h>
46 #include <net/snmp.h>
47
48 #include <net/ipv6.h>
49 #include <net/ndisc.h>
50 #include <net/protocol.h>
51 #include <net/ip6_route.h>
52 #include <net/addrconf.h>
53 #include <net/rawv6.h>
54 #include <net/icmp.h>
55 #include <net/xfrm.h>
56 #include <net/checksum.h>
57 #include <linux/mroute6.h>
58
59 int __ip6_local_out(struct sk_buff *skb)
60 {
61         int len;
62
63         len = skb->len - sizeof(struct ipv6hdr);
64         if (len > IPV6_MAXPLEN)
65                 len = 0;
66         ipv6_hdr(skb)->payload_len = htons(len);
67
68         return nf_hook(NFPROTO_IPV6, NF_INET_LOCAL_OUT, skb, NULL,
69                        skb_dst(skb)->dev, dst_output);
70 }
71
72 int ip6_local_out(struct sk_buff *skb)
73 {
74         int err;
75
76         err = __ip6_local_out(skb);
77         if (likely(err == 1))
78                 err = dst_output(skb);
79
80         return err;
81 }
82 EXPORT_SYMBOL_GPL(ip6_local_out);
83
84 static int ip6_finish_output2(struct sk_buff *skb)
85 {
86         struct dst_entry *dst = skb_dst(skb);
87         struct net_device *dev = dst->dev;
88         struct neighbour *neigh;
89         struct in6_addr *nexthop;
90         int ret;
91
92         skb->protocol = htons(ETH_P_IPV6);
93         skb->dev = dev;
94
95         if (ipv6_addr_is_multicast(&ipv6_hdr(skb)->daddr)) {
96                 struct inet6_dev *idev = ip6_dst_idev(skb_dst(skb));
97
98                 if (!(dev->flags & IFF_LOOPBACK) && sk_mc_loop(skb->sk) &&
99                     ((mroute6_socket(dev_net(dev), skb) &&
100                      !(IP6CB(skb)->flags & IP6SKB_FORWARDED)) ||
101                      ipv6_chk_mcast_addr(dev, &ipv6_hdr(skb)->daddr,
102                                          &ipv6_hdr(skb)->saddr))) {
103                         struct sk_buff *newskb = skb_clone(skb, GFP_ATOMIC);
104
105                         /* Do not check for IFF_ALLMULTI; multicast routing
106                            is not supported in any case.
107                          */
108                         if (newskb)
109                                 NF_HOOK(NFPROTO_IPV6, NF_INET_POST_ROUTING,
110                                         newskb, NULL, newskb->dev,
111                                         dev_loopback_xmit);
112
113                         if (ipv6_hdr(skb)->hop_limit == 0) {
114                                 IP6_INC_STATS(dev_net(dev), idev,
115                                               IPSTATS_MIB_OUTDISCARDS);
116                                 kfree_skb(skb);
117                                 return 0;
118                         }
119                 }
120
121                 IP6_UPD_PO_STATS(dev_net(dev), idev, IPSTATS_MIB_OUTMCAST,
122                                 skb->len);
123
124                 if (IPV6_ADDR_MC_SCOPE(&ipv6_hdr(skb)->daddr) <=
125                     IPV6_ADDR_SCOPE_NODELOCAL &&
126                     !(dev->flags & IFF_LOOPBACK)) {
127                         kfree_skb(skb);
128                         return 0;
129                 }
130         }
131
132         rcu_read_lock_bh();
133         nexthop = rt6_nexthop((struct rt6_info *)dst);
134         neigh = __ipv6_neigh_lookup_noref(dst->dev, nexthop);
135         if (unlikely(!neigh))
136                 neigh = __neigh_create(&nd_tbl, nexthop, dst->dev, false);
137         if (!IS_ERR(neigh)) {
138                 ret = dst_neigh_output(dst, neigh, skb);
139                 rcu_read_unlock_bh();
140                 return ret;
141         }
142         rcu_read_unlock_bh();
143
144         IP6_INC_STATS(dev_net(dst->dev),
145                       ip6_dst_idev(dst), IPSTATS_MIB_OUTNOROUTES);
146         kfree_skb(skb);
147         return -EINVAL;
148 }
149
150 static int ip6_finish_output(struct sk_buff *skb)
151 {
152         if ((skb->len > ip6_skb_dst_mtu(skb) && !skb_is_gso(skb)) ||
153             dst_allfrag(skb_dst(skb)) ||
154             (IP6CB(skb)->frag_max_size && skb->len > IP6CB(skb)->frag_max_size))
155                 return ip6_fragment(skb, ip6_finish_output2);
156         else
157                 return ip6_finish_output2(skb);
158 }
159
160 int ip6_output(struct sk_buff *skb)
161 {
162         struct net_device *dev = skb_dst(skb)->dev;
163         struct inet6_dev *idev = ip6_dst_idev(skb_dst(skb));
164         if (unlikely(idev->cnf.disable_ipv6)) {
165                 IP6_INC_STATS(dev_net(dev), idev,
166                               IPSTATS_MIB_OUTDISCARDS);
167                 kfree_skb(skb);
168                 return 0;
169         }
170
171         return NF_HOOK_COND(NFPROTO_IPV6, NF_INET_POST_ROUTING, skb, NULL, dev,
172                             ip6_finish_output,
173                             !(IP6CB(skb)->flags & IP6SKB_REROUTED));
174 }
175
176 /*
177  *      xmit an sk_buff (used by TCP, SCTP and DCCP)
178  */
179
180 int ip6_xmit(struct sock *sk, struct sk_buff *skb, struct flowi6 *fl6,
181              struct ipv6_txoptions *opt, int tclass)
182 {
183         struct net *net = sock_net(sk);
184         struct ipv6_pinfo *np = inet6_sk(sk);
185         struct in6_addr *first_hop = &fl6->daddr;
186         struct dst_entry *dst = skb_dst(skb);
187         struct ipv6hdr *hdr;
188         u8  proto = fl6->flowi6_proto;
189         int seg_len = skb->len;
190         int hlimit = -1;
191         u32 mtu;
192
193         if (opt) {
194                 unsigned int head_room;
195
196                 /* First: exthdrs may take lots of space (~8K for now)
197                    MAX_HEADER is not enough.
198                  */
199                 head_room = opt->opt_nflen + opt->opt_flen;
200                 seg_len += head_room;
201                 head_room += sizeof(struct ipv6hdr) + LL_RESERVED_SPACE(dst->dev);
202
203                 if (skb_headroom(skb) < head_room) {
204                         struct sk_buff *skb2 = skb_realloc_headroom(skb, head_room);
205                         if (skb2 == NULL) {
206                                 IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)),
207                                               IPSTATS_MIB_OUTDISCARDS);
208                                 kfree_skb(skb);
209                                 return -ENOBUFS;
210                         }
211                         consume_skb(skb);
212                         skb = skb2;
213                         skb_set_owner_w(skb, sk);
214                 }
215                 if (opt->opt_flen)
216                         ipv6_push_frag_opts(skb, opt, &proto);
217                 if (opt->opt_nflen)
218                         ipv6_push_nfrag_opts(skb, opt, &proto, &first_hop);
219         }
220
221         skb_push(skb, sizeof(struct ipv6hdr));
222         skb_reset_network_header(skb);
223         hdr = ipv6_hdr(skb);
224
225         /*
226          *      Fill in the IPv6 header
227          */
228         if (np)
229                 hlimit = np->hop_limit;
230         if (hlimit < 0)
231                 hlimit = ip6_dst_hoplimit(dst);
232
233         ip6_flow_hdr(hdr, tclass, fl6->flowlabel);
234
235         hdr->payload_len = htons(seg_len);
236         hdr->nexthdr = proto;
237         hdr->hop_limit = hlimit;
238
239         hdr->saddr = fl6->saddr;
240         hdr->daddr = *first_hop;
241
242         skb->priority = sk->sk_priority;
243         skb->mark = sk->sk_mark;
244
245         mtu = dst_mtu(dst);
246         if ((skb->len <= mtu) || skb->local_df || skb_is_gso(skb)) {
247                 IP6_UPD_PO_STATS(net, ip6_dst_idev(skb_dst(skb)),
248                               IPSTATS_MIB_OUT, skb->len);
249                 return NF_HOOK(NFPROTO_IPV6, NF_INET_LOCAL_OUT, skb, NULL,
250                                dst->dev, dst_output);
251         }
252
253         skb->dev = dst->dev;
254         ipv6_local_error(sk, EMSGSIZE, fl6, mtu);
255         IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)), IPSTATS_MIB_FRAGFAILS);
256         kfree_skb(skb);
257         return -EMSGSIZE;
258 }
259
260 EXPORT_SYMBOL(ip6_xmit);
261
262 static int ip6_call_ra_chain(struct sk_buff *skb, int sel)
263 {
264         struct ip6_ra_chain *ra;
265         struct sock *last = NULL;
266
267         read_lock(&ip6_ra_lock);
268         for (ra = ip6_ra_chain; ra; ra = ra->next) {
269                 struct sock *sk = ra->sk;
270                 if (sk && ra->sel == sel &&
271                     (!sk->sk_bound_dev_if ||
272                      sk->sk_bound_dev_if == skb->dev->ifindex)) {
273                         if (last) {
274                                 struct sk_buff *skb2 = skb_clone(skb, GFP_ATOMIC);
275                                 if (skb2)
276                                         rawv6_rcv(last, skb2);
277                         }
278                         last = sk;
279                 }
280         }
281
282         if (last) {
283                 rawv6_rcv(last, skb);
284                 read_unlock(&ip6_ra_lock);
285                 return 1;
286         }
287         read_unlock(&ip6_ra_lock);
288         return 0;
289 }
290
291 static int ip6_forward_proxy_check(struct sk_buff *skb)
292 {
293         struct ipv6hdr *hdr = ipv6_hdr(skb);
294         u8 nexthdr = hdr->nexthdr;
295         __be16 frag_off;
296         int offset;
297
298         if (ipv6_ext_hdr(nexthdr)) {
299                 offset = ipv6_skip_exthdr(skb, sizeof(*hdr), &nexthdr, &frag_off);
300                 if (offset < 0)
301                         return 0;
302         } else
303                 offset = sizeof(struct ipv6hdr);
304
305         if (nexthdr == IPPROTO_ICMPV6) {
306                 struct icmp6hdr *icmp6;
307
308                 if (!pskb_may_pull(skb, (skb_network_header(skb) +
309                                          offset + 1 - skb->data)))
310                         return 0;
311
312                 icmp6 = (struct icmp6hdr *)(skb_network_header(skb) + offset);
313
314                 switch (icmp6->icmp6_type) {
315                 case NDISC_ROUTER_SOLICITATION:
316                 case NDISC_ROUTER_ADVERTISEMENT:
317                 case NDISC_NEIGHBOUR_SOLICITATION:
318                 case NDISC_NEIGHBOUR_ADVERTISEMENT:
319                 case NDISC_REDIRECT:
320                         /* For reaction involving unicast neighbor discovery
321                          * message destined to the proxied address, pass it to
322                          * input function.
323                          */
324                         return 1;
325                 default:
326                         break;
327                 }
328         }
329
330         /*
331          * The proxying router can't forward traffic sent to a link-local
332          * address, so signal the sender and discard the packet. This
333          * behavior is clarified by the MIPv6 specification.
334          */
335         if (ipv6_addr_type(&hdr->daddr) & IPV6_ADDR_LINKLOCAL) {
336                 dst_link_failure(skb);
337                 return -1;
338         }
339
340         return 0;
341 }
342
343 static inline int ip6_forward_finish(struct sk_buff *skb)
344 {
345         return dst_output(skb);
346 }
347
348 static bool ip6_pkt_too_big(const struct sk_buff *skb, unsigned int mtu)
349 {
350         if (skb->len <= mtu)
351                 return false;
352
353         /* ipv6 conntrack defrag sets max_frag_size + local_df */
354         if (IP6CB(skb)->frag_max_size && IP6CB(skb)->frag_max_size > mtu)
355                 return true;
356
357         if (skb->local_df)
358                 return false;
359
360         if (skb_is_gso(skb) && skb_gso_network_seglen(skb) <= mtu)
361                 return false;
362
363         return true;
364 }
365
366 int ip6_forward(struct sk_buff *skb)
367 {
368         struct dst_entry *dst = skb_dst(skb);
369         struct ipv6hdr *hdr = ipv6_hdr(skb);
370         struct inet6_skb_parm *opt = IP6CB(skb);
371         struct net *net = dev_net(dst->dev);
372         u32 mtu;
373
374         if (net->ipv6.devconf_all->forwarding == 0)
375                 goto error;
376
377         if (skb_warn_if_lro(skb))
378                 goto drop;
379
380         if (!xfrm6_policy_check(NULL, XFRM_POLICY_FWD, skb)) {
381                 IP6_INC_STATS(net, ip6_dst_idev(dst), IPSTATS_MIB_INDISCARDS);
382                 goto drop;
383         }
384
385         if (skb->pkt_type != PACKET_HOST)
386                 goto drop;
387
388         skb_forward_csum(skb);
389
390         /*
391          *      We DO NOT make any processing on
392          *      RA packets, pushing them to user level AS IS
393          *      without ane WARRANTY that application will be able
394          *      to interpret them. The reason is that we
395          *      cannot make anything clever here.
396          *
397          *      We are not end-node, so that if packet contains
398          *      AH/ESP, we cannot make anything.
399          *      Defragmentation also would be mistake, RA packets
400          *      cannot be fragmented, because there is no warranty
401          *      that different fragments will go along one path. --ANK
402          */
403         if (unlikely(opt->flags & IP6SKB_ROUTERALERT)) {
404                 if (ip6_call_ra_chain(skb, ntohs(opt->ra)))
405                         return 0;
406         }
407
408         /*
409          *      check and decrement ttl
410          */
411         if (hdr->hop_limit <= 1) {
412                 /* Force OUTPUT device used as source address */
413                 skb->dev = dst->dev;
414                 icmpv6_send(skb, ICMPV6_TIME_EXCEED, ICMPV6_EXC_HOPLIMIT, 0);
415                 IP6_INC_STATS_BH(net,
416                                  ip6_dst_idev(dst), IPSTATS_MIB_INHDRERRORS);
417
418                 kfree_skb(skb);
419                 return -ETIMEDOUT;
420         }
421
422         /* XXX: idev->cnf.proxy_ndp? */
423         if (net->ipv6.devconf_all->proxy_ndp &&
424             pneigh_lookup(&nd_tbl, net, &hdr->daddr, skb->dev, 0)) {
425                 int proxied = ip6_forward_proxy_check(skb);
426                 if (proxied > 0)
427                         return ip6_input(skb);
428                 else if (proxied < 0) {
429                         IP6_INC_STATS(net, ip6_dst_idev(dst),
430                                       IPSTATS_MIB_INDISCARDS);
431                         goto drop;
432                 }
433         }
434
435         if (!xfrm6_route_forward(skb)) {
436                 IP6_INC_STATS(net, ip6_dst_idev(dst), IPSTATS_MIB_INDISCARDS);
437                 goto drop;
438         }
439         dst = skb_dst(skb);
440
441         /* IPv6 specs say nothing about it, but it is clear that we cannot
442            send redirects to source routed frames.
443            We don't send redirects to frames decapsulated from IPsec.
444          */
445         if (skb->dev == dst->dev && opt->srcrt == 0 && !skb_sec_path(skb)) {
446                 struct in6_addr *target = NULL;
447                 struct inet_peer *peer;
448                 struct rt6_info *rt;
449
450                 /*
451                  *      incoming and outgoing devices are the same
452                  *      send a redirect.
453                  */
454
455                 rt = (struct rt6_info *) dst;
456                 if (rt->rt6i_flags & RTF_GATEWAY)
457                         target = &rt->rt6i_gateway;
458                 else
459                         target = &hdr->daddr;
460
461                 peer = inet_getpeer_v6(net->ipv6.peers, &rt->rt6i_dst.addr, 1);
462
463                 /* Limit redirects both by destination (here)
464                    and by source (inside ndisc_send_redirect)
465                  */
466                 if (inet_peer_xrlim_allow(peer, 1*HZ))
467                         ndisc_send_redirect(skb, target);
468                 if (peer)
469                         inet_putpeer(peer);
470         } else {
471                 int addrtype = ipv6_addr_type(&hdr->saddr);
472
473                 /* This check is security critical. */
474                 if (addrtype == IPV6_ADDR_ANY ||
475                     addrtype & (IPV6_ADDR_MULTICAST | IPV6_ADDR_LOOPBACK))
476                         goto error;
477                 if (addrtype & IPV6_ADDR_LINKLOCAL) {
478                         icmpv6_send(skb, ICMPV6_DEST_UNREACH,
479                                     ICMPV6_NOT_NEIGHBOUR, 0);
480                         goto error;
481                 }
482         }
483
484         mtu = dst_mtu(dst);
485         if (mtu < IPV6_MIN_MTU)
486                 mtu = IPV6_MIN_MTU;
487
488         if (ip6_pkt_too_big(skb, mtu)) {
489                 /* Again, force OUTPUT device used as source address */
490                 skb->dev = dst->dev;
491                 icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu);
492                 IP6_INC_STATS_BH(net,
493                                  ip6_dst_idev(dst), IPSTATS_MIB_INTOOBIGERRORS);
494                 IP6_INC_STATS_BH(net,
495                                  ip6_dst_idev(dst), IPSTATS_MIB_FRAGFAILS);
496                 kfree_skb(skb);
497                 return -EMSGSIZE;
498         }
499
500         if (skb_cow(skb, dst->dev->hard_header_len)) {
501                 IP6_INC_STATS(net, ip6_dst_idev(dst), IPSTATS_MIB_OUTDISCARDS);
502                 goto drop;
503         }
504
505         hdr = ipv6_hdr(skb);
506
507         /* Mangling hops number delayed to point after skb COW */
508
509         hdr->hop_limit--;
510
511         IP6_INC_STATS_BH(net, ip6_dst_idev(dst), IPSTATS_MIB_OUTFORWDATAGRAMS);
512         IP6_ADD_STATS_BH(net, ip6_dst_idev(dst), IPSTATS_MIB_OUTOCTETS, skb->len);
513         return NF_HOOK(NFPROTO_IPV6, NF_INET_FORWARD, skb, skb->dev, dst->dev,
514                        ip6_forward_finish);
515
516 error:
517         IP6_INC_STATS_BH(net, ip6_dst_idev(dst), IPSTATS_MIB_INADDRERRORS);
518 drop:
519         kfree_skb(skb);
520         return -EINVAL;
521 }
522
523 static void ip6_copy_metadata(struct sk_buff *to, struct sk_buff *from)
524 {
525         to->pkt_type = from->pkt_type;
526         to->priority = from->priority;
527         to->protocol = from->protocol;
528         skb_dst_drop(to);
529         skb_dst_set(to, dst_clone(skb_dst(from)));
530         to->dev = from->dev;
531         to->mark = from->mark;
532
533 #ifdef CONFIG_NET_SCHED
534         to->tc_index = from->tc_index;
535 #endif
536         nf_copy(to, from);
537 #if IS_ENABLED(CONFIG_NETFILTER_XT_TARGET_TRACE)
538         to->nf_trace = from->nf_trace;
539 #endif
540         skb_copy_secmark(to, from);
541 }
542
543 int ip6_fragment(struct sk_buff *skb, int (*output)(struct sk_buff *))
544 {
545         struct sk_buff *frag;
546         struct rt6_info *rt = (struct rt6_info*)skb_dst(skb);
547         struct ipv6_pinfo *np = skb->sk ? inet6_sk(skb->sk) : NULL;
548         struct ipv6hdr *tmp_hdr;
549         struct frag_hdr *fh;
550         unsigned int mtu, hlen, left, len;
551         int hroom, troom;
552         __be32 frag_id = 0;
553         int ptr, offset = 0, err=0;
554         u8 *prevhdr, nexthdr = 0;
555         struct net *net = dev_net(skb_dst(skb)->dev);
556
557         hlen = ip6_find_1stfragopt(skb, &prevhdr);
558         nexthdr = *prevhdr;
559
560         mtu = ip6_skb_dst_mtu(skb);
561
562         /* We must not fragment if the socket is set to force MTU discovery
563          * or if the skb it not generated by a local socket.
564          */
565         if (unlikely(!skb->local_df && skb->len > mtu) ||
566                      (IP6CB(skb)->frag_max_size &&
567                       IP6CB(skb)->frag_max_size > mtu)) {
568                 if (skb->sk && dst_allfrag(skb_dst(skb)))
569                         sk_nocaps_add(skb->sk, NETIF_F_GSO_MASK);
570
571                 skb->dev = skb_dst(skb)->dev;
572                 icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu);
573                 IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)),
574                               IPSTATS_MIB_FRAGFAILS);
575                 kfree_skb(skb);
576                 return -EMSGSIZE;
577         }
578
579         if (np && np->frag_size < mtu) {
580                 if (np->frag_size)
581                         mtu = np->frag_size;
582         }
583         mtu -= hlen + sizeof(struct frag_hdr);
584
585         if (skb_has_frag_list(skb)) {
586                 int first_len = skb_pagelen(skb);
587                 struct sk_buff *frag2;
588
589                 if (first_len - hlen > mtu ||
590                     ((first_len - hlen) & 7) ||
591                     skb_cloned(skb))
592                         goto slow_path;
593
594                 skb_walk_frags(skb, frag) {
595                         /* Correct geometry. */
596                         if (frag->len > mtu ||
597                             ((frag->len & 7) && frag->next) ||
598                             skb_headroom(frag) < hlen)
599                                 goto slow_path_clean;
600
601                         /* Partially cloned skb? */
602                         if (skb_shared(frag))
603                                 goto slow_path_clean;
604
605                         BUG_ON(frag->sk);
606                         if (skb->sk) {
607                                 frag->sk = skb->sk;
608                                 frag->destructor = sock_wfree;
609                         }
610                         skb->truesize -= frag->truesize;
611                 }
612
613                 err = 0;
614                 offset = 0;
615                 frag = skb_shinfo(skb)->frag_list;
616                 skb_frag_list_init(skb);
617                 /* BUILD HEADER */
618
619                 *prevhdr = NEXTHDR_FRAGMENT;
620                 tmp_hdr = kmemdup(skb_network_header(skb), hlen, GFP_ATOMIC);
621                 if (!tmp_hdr) {
622                         IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)),
623                                       IPSTATS_MIB_FRAGFAILS);
624                         return -ENOMEM;
625                 }
626
627                 __skb_pull(skb, hlen);
628                 fh = (struct frag_hdr*)__skb_push(skb, sizeof(struct frag_hdr));
629                 __skb_push(skb, hlen);
630                 skb_reset_network_header(skb);
631                 memcpy(skb_network_header(skb), tmp_hdr, hlen);
632
633                 ipv6_select_ident(fh, rt);
634                 fh->nexthdr = nexthdr;
635                 fh->reserved = 0;
636                 fh->frag_off = htons(IP6_MF);
637                 frag_id = fh->identification;
638
639                 first_len = skb_pagelen(skb);
640                 skb->data_len = first_len - skb_headlen(skb);
641                 skb->len = first_len;
642                 ipv6_hdr(skb)->payload_len = htons(first_len -
643                                                    sizeof(struct ipv6hdr));
644
645                 dst_hold(&rt->dst);
646
647                 for (;;) {
648                         /* Prepare header of the next frame,
649                          * before previous one went down. */
650                         if (frag) {
651                                 frag->ip_summed = CHECKSUM_NONE;
652                                 skb_reset_transport_header(frag);
653                                 fh = (struct frag_hdr*)__skb_push(frag, sizeof(struct frag_hdr));
654                                 __skb_push(frag, hlen);
655                                 skb_reset_network_header(frag);
656                                 memcpy(skb_network_header(frag), tmp_hdr,
657                                        hlen);
658                                 offset += skb->len - hlen - sizeof(struct frag_hdr);
659                                 fh->nexthdr = nexthdr;
660                                 fh->reserved = 0;
661                                 fh->frag_off = htons(offset);
662                                 if (frag->next != NULL)
663                                         fh->frag_off |= htons(IP6_MF);
664                                 fh->identification = frag_id;
665                                 ipv6_hdr(frag)->payload_len =
666                                                 htons(frag->len -
667                                                       sizeof(struct ipv6hdr));
668                                 ip6_copy_metadata(frag, skb);
669                         }
670
671                         err = output(skb);
672                         if(!err)
673                                 IP6_INC_STATS(net, ip6_dst_idev(&rt->dst),
674                                               IPSTATS_MIB_FRAGCREATES);
675
676                         if (err || !frag)
677                                 break;
678
679                         skb = frag;
680                         frag = skb->next;
681                         skb->next = NULL;
682                 }
683
684                 kfree(tmp_hdr);
685
686                 if (err == 0) {
687                         IP6_INC_STATS(net, ip6_dst_idev(&rt->dst),
688                                       IPSTATS_MIB_FRAGOKS);
689                         ip6_rt_put(rt);
690                         return 0;
691                 }
692
693                 while (frag) {
694                         skb = frag->next;
695                         kfree_skb(frag);
696                         frag = skb;
697                 }
698
699                 IP6_INC_STATS(net, ip6_dst_idev(&rt->dst),
700                               IPSTATS_MIB_FRAGFAILS);
701                 ip6_rt_put(rt);
702                 return err;
703
704 slow_path_clean:
705                 skb_walk_frags(skb, frag2) {
706                         if (frag2 == frag)
707                                 break;
708                         frag2->sk = NULL;
709                         frag2->destructor = NULL;
710                         skb->truesize += frag2->truesize;
711                 }
712         }
713
714 slow_path:
715         if ((skb->ip_summed == CHECKSUM_PARTIAL) &&
716             skb_checksum_help(skb))
717                 goto fail;
718
719         left = skb->len - hlen;         /* Space per frame */
720         ptr = hlen;                     /* Where to start from */
721
722         /*
723          *      Fragment the datagram.
724          */
725
726         *prevhdr = NEXTHDR_FRAGMENT;
727         hroom = LL_RESERVED_SPACE(rt->dst.dev);
728         troom = rt->dst.dev->needed_tailroom;
729
730         /*
731          *      Keep copying data until we run out.
732          */
733         while(left > 0) {
734                 len = left;
735                 /* IF: it doesn't fit, use 'mtu' - the data space left */
736                 if (len > mtu)
737                         len = mtu;
738                 /* IF: we are not sending up to and including the packet end
739                    then align the next start on an eight byte boundary */
740                 if (len < left) {
741                         len &= ~7;
742                 }
743                 /*
744                  *      Allocate buffer.
745                  */
746
747                 if ((frag = alloc_skb(len + hlen + sizeof(struct frag_hdr) +
748                                       hroom + troom, GFP_ATOMIC)) == NULL) {
749                         NETDEBUG(KERN_INFO "IPv6: frag: no memory for new fragment!\n");
750                         IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)),
751                                       IPSTATS_MIB_FRAGFAILS);
752                         err = -ENOMEM;
753                         goto fail;
754                 }
755
756                 /*
757                  *      Set up data on packet
758                  */
759
760                 ip6_copy_metadata(frag, skb);
761                 skb_reserve(frag, hroom);
762                 skb_put(frag, len + hlen + sizeof(struct frag_hdr));
763                 skb_reset_network_header(frag);
764                 fh = (struct frag_hdr *)(skb_network_header(frag) + hlen);
765                 frag->transport_header = (frag->network_header + hlen +
766                                           sizeof(struct frag_hdr));
767
768                 /*
769                  *      Charge the memory for the fragment to any owner
770                  *      it might possess
771                  */
772                 if (skb->sk)
773                         skb_set_owner_w(frag, skb->sk);
774
775                 /*
776                  *      Copy the packet header into the new buffer.
777                  */
778                 skb_copy_from_linear_data(skb, skb_network_header(frag), hlen);
779
780                 /*
781                  *      Build fragment header.
782                  */
783                 fh->nexthdr = nexthdr;
784                 fh->reserved = 0;
785                 if (!frag_id) {
786                         ipv6_select_ident(fh, rt);
787                         frag_id = fh->identification;
788                 } else
789                         fh->identification = frag_id;
790
791                 /*
792                  *      Copy a block of the IP datagram.
793                  */
794                 if (skb_copy_bits(skb, ptr, skb_transport_header(frag), len))
795                         BUG();
796                 left -= len;
797
798                 fh->frag_off = htons(offset);
799                 if (left > 0)
800                         fh->frag_off |= htons(IP6_MF);
801                 ipv6_hdr(frag)->payload_len = htons(frag->len -
802                                                     sizeof(struct ipv6hdr));
803
804                 ptr += len;
805                 offset += len;
806
807                 /*
808                  *      Put this fragment into the sending queue.
809                  */
810                 err = output(frag);
811                 if (err)
812                         goto fail;
813
814                 IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)),
815                               IPSTATS_MIB_FRAGCREATES);
816         }
817         IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)),
818                       IPSTATS_MIB_FRAGOKS);
819         consume_skb(skb);
820         return err;
821
822 fail:
823         IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)),
824                       IPSTATS_MIB_FRAGFAILS);
825         kfree_skb(skb);
826         return err;
827 }
828
829 static inline int ip6_rt_check(const struct rt6key *rt_key,
830                                const struct in6_addr *fl_addr,
831                                const struct in6_addr *addr_cache)
832 {
833         return (rt_key->plen != 128 || !ipv6_addr_equal(fl_addr, &rt_key->addr)) &&
834                 (addr_cache == NULL || !ipv6_addr_equal(fl_addr, addr_cache));
835 }
836
837 static struct dst_entry *ip6_sk_dst_check(struct sock *sk,
838                                           struct dst_entry *dst,
839                                           const struct flowi6 *fl6)
840 {
841         struct ipv6_pinfo *np = inet6_sk(sk);
842         struct rt6_info *rt;
843
844         if (!dst)
845                 goto out;
846
847         if (dst->ops->family != AF_INET6) {
848                 dst_release(dst);
849                 return NULL;
850         }
851
852         rt = (struct rt6_info *)dst;
853         /* Yes, checking route validity in not connected
854          * case is not very simple. Take into account,
855          * that we do not support routing by source, TOS,
856          * and MSG_DONTROUTE            --ANK (980726)
857          *
858          * 1. ip6_rt_check(): If route was host route,
859          *    check that cached destination is current.
860          *    If it is network route, we still may
861          *    check its validity using saved pointer
862          *    to the last used address: daddr_cache.
863          *    We do not want to save whole address now,
864          *    (because main consumer of this service
865          *    is tcp, which has not this problem),
866          *    so that the last trick works only on connected
867          *    sockets.
868          * 2. oif also should be the same.
869          */
870         if (ip6_rt_check(&rt->rt6i_dst, &fl6->daddr, np->daddr_cache) ||
871 #ifdef CONFIG_IPV6_SUBTREES
872             ip6_rt_check(&rt->rt6i_src, &fl6->saddr, np->saddr_cache) ||
873 #endif
874             (fl6->flowi6_oif && fl6->flowi6_oif != dst->dev->ifindex)) {
875                 dst_release(dst);
876                 dst = NULL;
877         }
878
879 out:
880         return dst;
881 }
882
883 static int ip6_dst_lookup_tail(struct sock *sk,
884                                struct dst_entry **dst, struct flowi6 *fl6)
885 {
886         struct net *net = sock_net(sk);
887 #ifdef CONFIG_IPV6_OPTIMISTIC_DAD
888         struct neighbour *n;
889         struct rt6_info *rt;
890 #endif
891         int err;
892
893         if (*dst == NULL)
894                 *dst = ip6_route_output(net, sk, fl6);
895
896         if ((err = (*dst)->error))
897                 goto out_err_release;
898
899         if (ipv6_addr_any(&fl6->saddr)) {
900                 struct rt6_info *rt = (struct rt6_info *) *dst;
901                 err = ip6_route_get_saddr(net, rt, &fl6->daddr,
902                                           sk ? inet6_sk(sk)->srcprefs : 0,
903                                           &fl6->saddr);
904                 if (err)
905                         goto out_err_release;
906         }
907
908 #ifdef CONFIG_IPV6_OPTIMISTIC_DAD
909         /*
910          * Here if the dst entry we've looked up
911          * has a neighbour entry that is in the INCOMPLETE
912          * state and the src address from the flow is
913          * marked as OPTIMISTIC, we release the found
914          * dst entry and replace it instead with the
915          * dst entry of the nexthop router
916          */
917         rt = (struct rt6_info *) *dst;
918         rcu_read_lock_bh();
919         n = __ipv6_neigh_lookup_noref(rt->dst.dev, rt6_nexthop(rt));
920         err = n && !(n->nud_state & NUD_VALID) ? -EINVAL : 0;
921         rcu_read_unlock_bh();
922
923         if (err) {
924                 struct inet6_ifaddr *ifp;
925                 struct flowi6 fl_gw6;
926                 int redirect;
927
928                 ifp = ipv6_get_ifaddr(net, &fl6->saddr,
929                                       (*dst)->dev, 1);
930
931                 redirect = (ifp && ifp->flags & IFA_F_OPTIMISTIC);
932                 if (ifp)
933                         in6_ifa_put(ifp);
934
935                 if (redirect) {
936                         /*
937                          * We need to get the dst entry for the
938                          * default router instead
939                          */
940                         dst_release(*dst);
941                         memcpy(&fl_gw6, fl6, sizeof(struct flowi6));
942                         memset(&fl_gw6.daddr, 0, sizeof(struct in6_addr));
943                         *dst = ip6_route_output(net, sk, &fl_gw6);
944                         if ((err = (*dst)->error))
945                                 goto out_err_release;
946                 }
947         }
948 #endif
949
950         return 0;
951
952 out_err_release:
953         if (err == -ENETUNREACH)
954                 IP6_INC_STATS_BH(net, NULL, IPSTATS_MIB_OUTNOROUTES);
955         dst_release(*dst);
956         *dst = NULL;
957         return err;
958 }
959
960 /**
961  *      ip6_dst_lookup - perform route lookup on flow
962  *      @sk: socket which provides route info
963  *      @dst: pointer to dst_entry * for result
964  *      @fl6: flow to lookup
965  *
966  *      This function performs a route lookup on the given flow.
967  *
968  *      It returns zero on success, or a standard errno code on error.
969  */
970 int ip6_dst_lookup(struct sock *sk, struct dst_entry **dst, struct flowi6 *fl6)
971 {
972         *dst = NULL;
973         return ip6_dst_lookup_tail(sk, dst, fl6);
974 }
975 EXPORT_SYMBOL_GPL(ip6_dst_lookup);
976
977 /**
978  *      ip6_dst_lookup_flow - perform route lookup on flow with ipsec
979  *      @sk: socket which provides route info
980  *      @fl6: flow to lookup
981  *      @final_dst: final destination address for ipsec lookup
982  *      @can_sleep: we are in a sleepable context
983  *
984  *      This function performs a route lookup on the given flow.
985  *
986  *      It returns a valid dst pointer on success, or a pointer encoded
987  *      error code.
988  */
989 struct dst_entry *ip6_dst_lookup_flow(struct sock *sk, struct flowi6 *fl6,
990                                       const struct in6_addr *final_dst,
991                                       bool can_sleep)
992 {
993         struct dst_entry *dst = NULL;
994         int err;
995
996         err = ip6_dst_lookup_tail(sk, &dst, fl6);
997         if (err)
998                 return ERR_PTR(err);
999         if (final_dst)
1000                 fl6->daddr = *final_dst;
1001         if (can_sleep)
1002                 fl6->flowi6_flags |= FLOWI_FLAG_CAN_SLEEP;
1003
1004         return xfrm_lookup(sock_net(sk), dst, flowi6_to_flowi(fl6), sk, 0);
1005 }
1006 EXPORT_SYMBOL_GPL(ip6_dst_lookup_flow);
1007
1008 /**
1009  *      ip6_sk_dst_lookup_flow - perform socket cached route lookup on flow
1010  *      @sk: socket which provides the dst cache and route info
1011  *      @fl6: flow to lookup
1012  *      @final_dst: final destination address for ipsec lookup
1013  *      @can_sleep: we are in a sleepable context
1014  *
1015  *      This function performs a route lookup on the given flow with the
1016  *      possibility of using the cached route in the socket if it is valid.
1017  *      It will take the socket dst lock when operating on the dst cache.
1018  *      As a result, this function can only be used in process context.
1019  *
1020  *      It returns a valid dst pointer on success, or a pointer encoded
1021  *      error code.
1022  */
1023 struct dst_entry *ip6_sk_dst_lookup_flow(struct sock *sk, struct flowi6 *fl6,
1024                                          const struct in6_addr *final_dst,
1025                                          bool can_sleep)
1026 {
1027         struct dst_entry *dst = sk_dst_check(sk, inet6_sk(sk)->dst_cookie);
1028         int err;
1029
1030         dst = ip6_sk_dst_check(sk, dst, fl6);
1031
1032         err = ip6_dst_lookup_tail(sk, &dst, fl6);
1033         if (err)
1034                 return ERR_PTR(err);
1035         if (final_dst)
1036                 fl6->daddr = *final_dst;
1037         if (can_sleep)
1038                 fl6->flowi6_flags |= FLOWI_FLAG_CAN_SLEEP;
1039
1040         return xfrm_lookup(sock_net(sk), dst, flowi6_to_flowi(fl6), sk, 0);
1041 }
1042 EXPORT_SYMBOL_GPL(ip6_sk_dst_lookup_flow);
1043
1044 static inline int ip6_ufo_append_data(struct sock *sk,
1045                         int getfrag(void *from, char *to, int offset, int len,
1046                         int odd, struct sk_buff *skb),
1047                         void *from, int length, int hh_len, int fragheaderlen,
1048                         int transhdrlen, int mtu,unsigned int flags,
1049                         struct rt6_info *rt)
1050
1051 {
1052         struct sk_buff *skb;
1053         int err;
1054
1055         /* There is support for UDP large send offload by network
1056          * device, so create one single skb packet containing complete
1057          * udp datagram
1058          */
1059         if ((skb = skb_peek_tail(&sk->sk_write_queue)) == NULL) {
1060                 struct frag_hdr fhdr;
1061
1062                 skb = sock_alloc_send_skb(sk,
1063                         hh_len + fragheaderlen + transhdrlen + 20,
1064                         (flags & MSG_DONTWAIT), &err);
1065                 if (skb == NULL)
1066                         return err;
1067
1068                 /* reserve space for Hardware header */
1069                 skb_reserve(skb, hh_len);
1070
1071                 /* create space for UDP/IP header */
1072                 skb_put(skb,fragheaderlen + transhdrlen);
1073
1074                 /* initialize network header pointer */
1075                 skb_reset_network_header(skb);
1076
1077                 /* initialize protocol header pointer */
1078                 skb->transport_header = skb->network_header + fragheaderlen;
1079
1080                 skb->ip_summed = CHECKSUM_PARTIAL;
1081                 skb->csum = 0;
1082
1083                 /* Specify the length of each IPv6 datagram fragment.
1084                  * It has to be a multiple of 8.
1085                  */
1086                 skb_shinfo(skb)->gso_size = (mtu - fragheaderlen -
1087                                              sizeof(struct frag_hdr)) & ~7;
1088                 skb_shinfo(skb)->gso_type = SKB_GSO_UDP;
1089                 ipv6_select_ident(&fhdr, rt);
1090                 skb_shinfo(skb)->ip6_frag_id = fhdr.identification;
1091                 __skb_queue_tail(&sk->sk_write_queue, skb);
1092         }
1093
1094         return skb_append_datato_frags(sk, skb, getfrag, from,
1095                                        (length - transhdrlen));
1096 }
1097
1098 static inline struct ipv6_opt_hdr *ip6_opt_dup(struct ipv6_opt_hdr *src,
1099                                                gfp_t gfp)
1100 {
1101         return src ? kmemdup(src, (src->hdrlen + 1) * 8, gfp) : NULL;
1102 }
1103
1104 static inline struct ipv6_rt_hdr *ip6_rthdr_dup(struct ipv6_rt_hdr *src,
1105                                                 gfp_t gfp)
1106 {
1107         return src ? kmemdup(src, (src->hdrlen + 1) * 8, gfp) : NULL;
1108 }
1109
1110 static void ip6_append_data_mtu(unsigned int *mtu,
1111                                 int *maxfraglen,
1112                                 unsigned int fragheaderlen,
1113                                 struct sk_buff *skb,
1114                                 struct rt6_info *rt,
1115                                 unsigned int orig_mtu)
1116 {
1117         if (!(rt->dst.flags & DST_XFRM_TUNNEL)) {
1118                 if (skb == NULL) {
1119                         /* first fragment, reserve header_len */
1120                         *mtu = orig_mtu - rt->dst.header_len;
1121
1122                 } else {
1123                         /*
1124                          * this fragment is not first, the headers
1125                          * space is regarded as data space.
1126                          */
1127                         *mtu = orig_mtu;
1128                 }
1129                 *maxfraglen = ((*mtu - fragheaderlen) & ~7)
1130                               + fragheaderlen - sizeof(struct frag_hdr);
1131         }
1132 }
1133
1134 int ip6_append_data(struct sock *sk, int getfrag(void *from, char *to,
1135         int offset, int len, int odd, struct sk_buff *skb),
1136         void *from, int length, int transhdrlen,
1137         int hlimit, int tclass, struct ipv6_txoptions *opt, struct flowi6 *fl6,
1138         struct rt6_info *rt, unsigned int flags, int dontfrag)
1139 {
1140         struct inet_sock *inet = inet_sk(sk);
1141         struct ipv6_pinfo *np = inet6_sk(sk);
1142         struct inet_cork *cork;
1143         struct sk_buff *skb, *skb_prev = NULL;
1144         unsigned int maxfraglen, fragheaderlen, mtu, orig_mtu;
1145         int exthdrlen;
1146         int dst_exthdrlen;
1147         int hh_len;
1148         int copy;
1149         int err;
1150         int offset = 0;
1151         __u8 tx_flags = 0;
1152
1153         if (flags&MSG_PROBE)
1154                 return 0;
1155         cork = &inet->cork.base;
1156         if (skb_queue_empty(&sk->sk_write_queue)) {
1157                 /*
1158                  * setup for corking
1159                  */
1160                 if (opt) {
1161                         if (WARN_ON(np->cork.opt))
1162                                 return -EINVAL;
1163
1164                         np->cork.opt = kzalloc(opt->tot_len, sk->sk_allocation);
1165                         if (unlikely(np->cork.opt == NULL))
1166                                 return -ENOBUFS;
1167
1168                         np->cork.opt->tot_len = opt->tot_len;
1169                         np->cork.opt->opt_flen = opt->opt_flen;
1170                         np->cork.opt->opt_nflen = opt->opt_nflen;
1171
1172                         np->cork.opt->dst0opt = ip6_opt_dup(opt->dst0opt,
1173                                                             sk->sk_allocation);
1174                         if (opt->dst0opt && !np->cork.opt->dst0opt)
1175                                 return -ENOBUFS;
1176
1177                         np->cork.opt->dst1opt = ip6_opt_dup(opt->dst1opt,
1178                                                             sk->sk_allocation);
1179                         if (opt->dst1opt && !np->cork.opt->dst1opt)
1180                                 return -ENOBUFS;
1181
1182                         np->cork.opt->hopopt = ip6_opt_dup(opt->hopopt,
1183                                                            sk->sk_allocation);
1184                         if (opt->hopopt && !np->cork.opt->hopopt)
1185                                 return -ENOBUFS;
1186
1187                         np->cork.opt->srcrt = ip6_rthdr_dup(opt->srcrt,
1188                                                             sk->sk_allocation);
1189                         if (opt->srcrt && !np->cork.opt->srcrt)
1190                                 return -ENOBUFS;
1191
1192                         /* need source address above miyazawa*/
1193                 }
1194                 dst_hold(&rt->dst);
1195                 cork->dst = &rt->dst;
1196                 inet->cork.fl.u.ip6 = *fl6;
1197                 np->cork.hop_limit = hlimit;
1198                 np->cork.tclass = tclass;
1199                 if (rt->dst.flags & DST_XFRM_TUNNEL)
1200                         mtu = np->pmtudisc == IPV6_PMTUDISC_PROBE ?
1201                               rt->dst.dev->mtu : dst_mtu(&rt->dst);
1202                 else
1203                         mtu = np->pmtudisc == IPV6_PMTUDISC_PROBE ?
1204                               rt->dst.dev->mtu : dst_mtu(rt->dst.path);
1205                 if (np->frag_size < mtu) {
1206                         if (np->frag_size)
1207                                 mtu = np->frag_size;
1208                 }
1209                 cork->fragsize = mtu;
1210                 if (dst_allfrag(rt->dst.path))
1211                         cork->flags |= IPCORK_ALLFRAG;
1212                 cork->length = 0;
1213                 exthdrlen = (opt ? opt->opt_flen : 0);
1214                 length += exthdrlen;
1215                 transhdrlen += exthdrlen;
1216                 dst_exthdrlen = rt->dst.header_len - rt->rt6i_nfheader_len;
1217         } else {
1218                 rt = (struct rt6_info *)cork->dst;
1219                 fl6 = &inet->cork.fl.u.ip6;
1220                 opt = np->cork.opt;
1221                 transhdrlen = 0;
1222                 exthdrlen = 0;
1223                 dst_exthdrlen = 0;
1224                 mtu = cork->fragsize;
1225         }
1226         orig_mtu = mtu;
1227
1228         hh_len = LL_RESERVED_SPACE(rt->dst.dev);
1229
1230         fragheaderlen = sizeof(struct ipv6hdr) + rt->rt6i_nfheader_len +
1231                         (opt ? opt->opt_nflen : 0);
1232         maxfraglen = ((mtu - fragheaderlen) & ~7) + fragheaderlen - sizeof(struct frag_hdr);
1233
1234         if (mtu <= sizeof(struct ipv6hdr) + IPV6_MAXPLEN) {
1235                 if (cork->length + length > sizeof(struct ipv6hdr) + IPV6_MAXPLEN - fragheaderlen) {
1236                         ipv6_local_error(sk, EMSGSIZE, fl6, mtu-exthdrlen);
1237                         return -EMSGSIZE;
1238                 }
1239         }
1240
1241         /* For UDP, check if TX timestamp is enabled */
1242         if (sk->sk_type == SOCK_DGRAM)
1243                 sock_tx_timestamp(sk, &tx_flags);
1244
1245         /*
1246          * Let's try using as much space as possible.
1247          * Use MTU if total length of the message fits into the MTU.
1248          * Otherwise, we need to reserve fragment header and
1249          * fragment alignment (= 8-15 octects, in total).
1250          *
1251          * Note that we may need to "move" the data from the tail of
1252          * of the buffer to the new fragment when we split
1253          * the message.
1254          *
1255          * FIXME: It may be fragmented into multiple chunks
1256          *        at once if non-fragmentable extension headers
1257          *        are too large.
1258          * --yoshfuji
1259          */
1260
1261         if ((length > mtu) && dontfrag && (sk->sk_protocol == IPPROTO_UDP ||
1262                                            sk->sk_protocol == IPPROTO_RAW)) {
1263                 ipv6_local_rxpmtu(sk, fl6, mtu-exthdrlen);
1264                 return -EMSGSIZE;
1265         }
1266
1267         skb = skb_peek_tail(&sk->sk_write_queue);
1268         cork->length += length;
1269         if (((length > mtu) ||
1270              (skb && skb_has_frags(skb))) &&
1271             (sk->sk_protocol == IPPROTO_UDP) &&
1272             (rt->dst.dev->features & NETIF_F_UFO)) {
1273                 err = ip6_ufo_append_data(sk, getfrag, from, length,
1274                                           hh_len, fragheaderlen,
1275                                           transhdrlen, mtu, flags, rt);
1276                 if (err)
1277                         goto error;
1278                 return 0;
1279         }
1280
1281         if (!skb)
1282                 goto alloc_new_skb;
1283
1284         while (length > 0) {
1285                 /* Check if the remaining data fits into current packet. */
1286                 copy = (cork->length <= mtu && !(cork->flags & IPCORK_ALLFRAG) ? mtu : maxfraglen) - skb->len;
1287                 if (copy < length)
1288                         copy = maxfraglen - skb->len;
1289
1290                 if (copy <= 0) {
1291                         char *data;
1292                         unsigned int datalen;
1293                         unsigned int fraglen;
1294                         unsigned int fraggap;
1295                         unsigned int alloclen;
1296 alloc_new_skb:
1297                         /* There's no room in the current skb */
1298                         if (skb)
1299                                 fraggap = skb->len - maxfraglen;
1300                         else
1301                                 fraggap = 0;
1302                         /* update mtu and maxfraglen if necessary */
1303                         if (skb == NULL || skb_prev == NULL)
1304                                 ip6_append_data_mtu(&mtu, &maxfraglen,
1305                                                     fragheaderlen, skb, rt,
1306                                                     orig_mtu);
1307
1308                         skb_prev = skb;
1309
1310                         /*
1311                          * If remaining data exceeds the mtu,
1312                          * we know we need more fragment(s).
1313                          */
1314                         datalen = length + fraggap;
1315
1316                         if (datalen > (cork->length <= mtu && !(cork->flags & IPCORK_ALLFRAG) ? mtu : maxfraglen) - fragheaderlen)
1317                                 datalen = maxfraglen - fragheaderlen - rt->dst.trailer_len;
1318                         if ((flags & MSG_MORE) &&
1319                             !(rt->dst.dev->features&NETIF_F_SG))
1320                                 alloclen = mtu;
1321                         else
1322                                 alloclen = datalen + fragheaderlen;
1323
1324                         alloclen += dst_exthdrlen;
1325
1326                         if (datalen != length + fraggap) {
1327                                 /*
1328                                  * this is not the last fragment, the trailer
1329                                  * space is regarded as data space.
1330                                  */
1331                                 datalen += rt->dst.trailer_len;
1332                         }
1333
1334                         alloclen += rt->dst.trailer_len;
1335                         fraglen = datalen + fragheaderlen;
1336
1337                         /*
1338                          * We just reserve space for fragment header.
1339                          * Note: this may be overallocation if the message
1340                          * (without MSG_MORE) fits into the MTU.
1341                          */
1342                         alloclen += sizeof(struct frag_hdr);
1343
1344                         if (transhdrlen) {
1345                                 skb = sock_alloc_send_skb(sk,
1346                                                 alloclen + hh_len,
1347                                                 (flags & MSG_DONTWAIT), &err);
1348                         } else {
1349                                 skb = NULL;
1350                                 if (atomic_read(&sk->sk_wmem_alloc) <=
1351                                     2 * sk->sk_sndbuf)
1352                                         skb = sock_wmalloc(sk,
1353                                                            alloclen + hh_len, 1,
1354                                                            sk->sk_allocation);
1355                                 if (unlikely(skb == NULL))
1356                                         err = -ENOBUFS;
1357                                 else {
1358                                         /* Only the initial fragment
1359                                          * is time stamped.
1360                                          */
1361                                         tx_flags = 0;
1362                                 }
1363                         }
1364                         if (skb == NULL)
1365                                 goto error;
1366                         /*
1367                          *      Fill in the control structures
1368                          */
1369                         skb->ip_summed = CHECKSUM_NONE;
1370                         skb->csum = 0;
1371                         /* reserve for fragmentation and ipsec header */
1372                         skb_reserve(skb, hh_len + sizeof(struct frag_hdr) +
1373                                     dst_exthdrlen);
1374
1375                         if (sk->sk_type == SOCK_DGRAM)
1376                                 skb_shinfo(skb)->tx_flags = tx_flags;
1377
1378                         /*
1379                          *      Find where to start putting bytes
1380                          */
1381                         data = skb_put(skb, fraglen);
1382                         skb_set_network_header(skb, exthdrlen);
1383                         data += fragheaderlen;
1384                         skb->transport_header = (skb->network_header +
1385                                                  fragheaderlen);
1386                         if (fraggap) {
1387                                 skb->csum = skb_copy_and_csum_bits(
1388                                         skb_prev, maxfraglen,
1389                                         data + transhdrlen, fraggap, 0);
1390                                 skb_prev->csum = csum_sub(skb_prev->csum,
1391                                                           skb->csum);
1392                                 data += fraggap;
1393                                 pskb_trim_unique(skb_prev, maxfraglen);
1394                         }
1395                         copy = datalen - transhdrlen - fraggap;
1396
1397                         if (copy < 0) {
1398                                 err = -EINVAL;
1399                                 kfree_skb(skb);
1400                                 goto error;
1401                         } else if (copy > 0 && getfrag(from, data + transhdrlen, offset, copy, fraggap, skb) < 0) {
1402                                 err = -EFAULT;
1403                                 kfree_skb(skb);
1404                                 goto error;
1405                         }
1406
1407                         offset += copy;
1408                         length -= datalen - fraggap;
1409                         transhdrlen = 0;
1410                         exthdrlen = 0;
1411                         dst_exthdrlen = 0;
1412
1413                         /*
1414                          * Put the packet on the pending queue
1415                          */
1416                         __skb_queue_tail(&sk->sk_write_queue, skb);
1417                         continue;
1418                 }
1419
1420                 if (copy > length)
1421                         copy = length;
1422
1423                 if (!(rt->dst.dev->features&NETIF_F_SG)) {
1424                         unsigned int off;
1425
1426                         off = skb->len;
1427                         if (getfrag(from, skb_put(skb, copy),
1428                                                 offset, copy, off, skb) < 0) {
1429                                 __skb_trim(skb, off);
1430                                 err = -EFAULT;
1431                                 goto error;
1432                         }
1433                 } else {
1434                         int i = skb_shinfo(skb)->nr_frags;
1435                         struct page_frag *pfrag = sk_page_frag(sk);
1436
1437                         err = -ENOMEM;
1438                         if (!sk_page_frag_refill(sk, pfrag))
1439                                 goto error;
1440
1441                         if (!skb_can_coalesce(skb, i, pfrag->page,
1442                                               pfrag->offset)) {
1443                                 err = -EMSGSIZE;
1444                                 if (i == MAX_SKB_FRAGS)
1445                                         goto error;
1446
1447                                 __skb_fill_page_desc(skb, i, pfrag->page,
1448                                                      pfrag->offset, 0);
1449                                 skb_shinfo(skb)->nr_frags = ++i;
1450                                 get_page(pfrag->page);
1451                         }
1452                         copy = min_t(int, copy, pfrag->size - pfrag->offset);
1453                         if (getfrag(from,
1454                                     page_address(pfrag->page) + pfrag->offset,
1455                                     offset, copy, skb->len, skb) < 0)
1456                                 goto error_efault;
1457
1458                         pfrag->offset += copy;
1459                         skb_frag_size_add(&skb_shinfo(skb)->frags[i - 1], copy);
1460                         skb->len += copy;
1461                         skb->data_len += copy;
1462                         skb->truesize += copy;
1463                         atomic_add(copy, &sk->sk_wmem_alloc);
1464                 }
1465                 offset += copy;
1466                 length -= copy;
1467         }
1468
1469         return 0;
1470
1471 error_efault:
1472         err = -EFAULT;
1473 error:
1474         cork->length -= length;
1475         IP6_INC_STATS(sock_net(sk), rt->rt6i_idev, IPSTATS_MIB_OUTDISCARDS);
1476         return err;
1477 }
1478 EXPORT_SYMBOL_GPL(ip6_append_data);
1479
1480 static void ip6_cork_release(struct inet_sock *inet, struct ipv6_pinfo *np)
1481 {
1482         if (np->cork.opt) {
1483                 kfree(np->cork.opt->dst0opt);
1484                 kfree(np->cork.opt->dst1opt);
1485                 kfree(np->cork.opt->hopopt);
1486                 kfree(np->cork.opt->srcrt);
1487                 kfree(np->cork.opt);
1488                 np->cork.opt = NULL;
1489         }
1490
1491         if (inet->cork.base.dst) {
1492                 dst_release(inet->cork.base.dst);
1493                 inet->cork.base.dst = NULL;
1494                 inet->cork.base.flags &= ~IPCORK_ALLFRAG;
1495         }
1496         memset(&inet->cork.fl, 0, sizeof(inet->cork.fl));
1497 }
1498
1499 int ip6_push_pending_frames(struct sock *sk)
1500 {
1501         struct sk_buff *skb, *tmp_skb;
1502         struct sk_buff **tail_skb;
1503         struct in6_addr final_dst_buf, *final_dst = &final_dst_buf;
1504         struct inet_sock *inet = inet_sk(sk);
1505         struct ipv6_pinfo *np = inet6_sk(sk);
1506         struct net *net = sock_net(sk);
1507         struct ipv6hdr *hdr;
1508         struct ipv6_txoptions *opt = np->cork.opt;
1509         struct rt6_info *rt = (struct rt6_info *)inet->cork.base.dst;
1510         struct flowi6 *fl6 = &inet->cork.fl.u.ip6;
1511         unsigned char proto = fl6->flowi6_proto;
1512         int err = 0;
1513
1514         if ((skb = __skb_dequeue(&sk->sk_write_queue)) == NULL)
1515                 goto out;
1516         tail_skb = &(skb_shinfo(skb)->frag_list);
1517
1518         /* move skb->data to ip header from ext header */
1519         if (skb->data < skb_network_header(skb))
1520                 __skb_pull(skb, skb_network_offset(skb));
1521         while ((tmp_skb = __skb_dequeue(&sk->sk_write_queue)) != NULL) {
1522                 __skb_pull(tmp_skb, skb_network_header_len(skb));
1523                 *tail_skb = tmp_skb;
1524                 tail_skb = &(tmp_skb->next);
1525                 skb->len += tmp_skb->len;
1526                 skb->data_len += tmp_skb->len;
1527                 skb->truesize += tmp_skb->truesize;
1528                 tmp_skb->destructor = NULL;
1529                 tmp_skb->sk = NULL;
1530         }
1531
1532         /* Allow local fragmentation. */
1533         if (np->pmtudisc < IPV6_PMTUDISC_DO)
1534                 skb->local_df = 1;
1535
1536         *final_dst = fl6->daddr;
1537         __skb_pull(skb, skb_network_header_len(skb));
1538         if (opt && opt->opt_flen)
1539                 ipv6_push_frag_opts(skb, opt, &proto);
1540         if (opt && opt->opt_nflen)
1541                 ipv6_push_nfrag_opts(skb, opt, &proto, &final_dst);
1542
1543         skb_push(skb, sizeof(struct ipv6hdr));
1544         skb_reset_network_header(skb);
1545         hdr = ipv6_hdr(skb);
1546
1547         ip6_flow_hdr(hdr, np->cork.tclass, fl6->flowlabel);
1548         hdr->hop_limit = np->cork.hop_limit;
1549         hdr->nexthdr = proto;
1550         hdr->saddr = fl6->saddr;
1551         hdr->daddr = *final_dst;
1552
1553         skb->priority = sk->sk_priority;
1554         skb->mark = sk->sk_mark;
1555
1556         skb_dst_set(skb, dst_clone(&rt->dst));
1557         IP6_UPD_PO_STATS(net, rt->rt6i_idev, IPSTATS_MIB_OUT, skb->len);
1558         if (proto == IPPROTO_ICMPV6) {
1559                 struct inet6_dev *idev = ip6_dst_idev(skb_dst(skb));
1560
1561                 ICMP6MSGOUT_INC_STATS(net, idev, icmp6_hdr(skb)->icmp6_type);
1562                 ICMP6_INC_STATS(net, idev, ICMP6_MIB_OUTMSGS);
1563         }
1564
1565         err = ip6_local_out(skb);
1566         if (err) {
1567                 if (err > 0)
1568                         err = net_xmit_errno(err);
1569                 if (err)
1570                         goto error;
1571         }
1572
1573 out:
1574         ip6_cork_release(inet, np);
1575         return err;
1576 error:
1577         IP6_INC_STATS(net, rt->rt6i_idev, IPSTATS_MIB_OUTDISCARDS);
1578         goto out;
1579 }
1580 EXPORT_SYMBOL_GPL(ip6_push_pending_frames);
1581
1582 void ip6_flush_pending_frames(struct sock *sk)
1583 {
1584         struct sk_buff *skb;
1585
1586         while ((skb = __skb_dequeue_tail(&sk->sk_write_queue)) != NULL) {
1587                 if (skb_dst(skb))
1588                         IP6_INC_STATS(sock_net(sk), ip6_dst_idev(skb_dst(skb)),
1589                                       IPSTATS_MIB_OUTDISCARDS);
1590                 kfree_skb(skb);
1591         }
1592
1593         ip6_cork_release(inet_sk(sk), inet6_sk(sk));
1594 }
1595 EXPORT_SYMBOL_GPL(ip6_flush_pending_frames);