Version:  2.0.40 2.2.26 2.4.37 3.13 3.14 3.15 3.16 3.17 3.18 3.19 4.0 4.1 4.2 4.3 4.4 4.5 4.6 4.7 4.8 4.9 4.10

Linux/net/ipv4/ip_output.c

  1 /*
  2  * INET         An implementation of the TCP/IP protocol suite for the LINUX
  3  *              operating system.  INET is implemented using the  BSD Socket
  4  *              interface as the means of communication with the user level.
  5  *
  6  *              The Internet Protocol (IP) output module.
  7  *
  8  * Authors:     Ross Biro
  9  *              Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
 10  *              Donald Becker, <becker@super.org>
 11  *              Alan Cox, <Alan.Cox@linux.org>
 12  *              Richard Underwood
 13  *              Stefan Becker, <stefanb@yello.ping.de>
 14  *              Jorge Cwik, <jorge@laser.satlink.net>
 15  *              Arnt Gulbrandsen, <agulbra@nvg.unit.no>
 16  *              Hirokazu Takahashi, <taka@valinux.co.jp>
 17  *
 18  *      See ip_input.c for original log
 19  *
 20  *      Fixes:
 21  *              Alan Cox        :       Missing nonblock feature in ip_build_xmit.
 22  *              Mike Kilburn    :       htons() missing in ip_build_xmit.
 23  *              Bradford Johnson:       Fix faulty handling of some frames when
 24  *                                      no route is found.
 25  *              Alexander Demenshin:    Missing sk/skb free in ip_queue_xmit
 26  *                                      (in case if packet not accepted by
 27  *                                      output firewall rules)
 28  *              Mike McLagan    :       Routing by source
 29  *              Alexey Kuznetsov:       use new route cache
 30  *              Andi Kleen:             Fix broken PMTU recovery and remove
 31  *                                      some redundant tests.
 32  *      Vitaly E. Lavrov        :       Transparent proxy revived after year coma.
 33  *              Andi Kleen      :       Replace ip_reply with ip_send_reply.
 34  *              Andi Kleen      :       Split fast and slow ip_build_xmit path
 35  *                                      for decreased register pressure on x86
 36  *                                      and more readibility.
 37  *              Marc Boucher    :       When call_out_firewall returns FW_QUEUE,
 38  *                                      silently drop skb instead of failing with -EPERM.
 39  *              Detlev Wengorz  :       Copy protocol for fragments.
 40  *              Hirokazu Takahashi:     HW checksumming for outgoing UDP
 41  *                                      datagrams.
 42  *              Hirokazu Takahashi:     sendfile() on UDP works now.
 43  */
 44 
 45 #include <linux/uaccess.h>
 46 #include <linux/module.h>
 47 #include <linux/types.h>
 48 #include <linux/kernel.h>
 49 #include <linux/mm.h>
 50 #include <linux/string.h>
 51 #include <linux/errno.h>
 52 #include <linux/highmem.h>
 53 #include <linux/slab.h>
 54 
 55 #include <linux/socket.h>
 56 #include <linux/sockios.h>
 57 #include <linux/in.h>
 58 #include <linux/inet.h>
 59 #include <linux/netdevice.h>
 60 #include <linux/etherdevice.h>
 61 #include <linux/proc_fs.h>
 62 #include <linux/stat.h>
 63 #include <linux/init.h>
 64 
 65 #include <net/snmp.h>
 66 #include <net/ip.h>
 67 #include <net/protocol.h>
 68 #include <net/route.h>
 69 #include <net/xfrm.h>
 70 #include <linux/skbuff.h>
 71 #include <net/sock.h>
 72 #include <net/arp.h>
 73 #include <net/icmp.h>
 74 #include <net/checksum.h>
 75 #include <net/inetpeer.h>
 76 #include <net/lwtunnel.h>
 77 #include <linux/bpf-cgroup.h>
 78 #include <linux/igmp.h>
 79 #include <linux/netfilter_ipv4.h>
 80 #include <linux/netfilter_bridge.h>
 81 #include <linux/netlink.h>
 82 #include <linux/tcp.h>
 83 
 84 static int
 85 ip_fragment(struct net *net, struct sock *sk, struct sk_buff *skb,
 86             unsigned int mtu,
 87             int (*output)(struct net *, struct sock *, struct sk_buff *));
 88 
 89 /* Generate a checksum for an outgoing IP datagram. */
 90 void ip_send_check(struct iphdr *iph)
 91 {
 92         iph->check = 0;
 93         iph->check = ip_fast_csum((unsigned char *)iph, iph->ihl);
 94 }
 95 EXPORT_SYMBOL(ip_send_check);
 96 
 97 int __ip_local_out(struct net *net, struct sock *sk, struct sk_buff *skb)
 98 {
 99         struct iphdr *iph = ip_hdr(skb);
100 
101         iph->tot_len = htons(skb->len);
102         ip_send_check(iph);
103 
104         /* if egress device is enslaved to an L3 master device pass the
105          * skb to its handler for processing
106          */
107         skb = l3mdev_ip_out(sk, skb);
108         if (unlikely(!skb))
109                 return 0;
110 
111         skb->protocol = htons(ETH_P_IP);
112 
113         return nf_hook(NFPROTO_IPV4, NF_INET_LOCAL_OUT,
114                        net, sk, skb, NULL, skb_dst(skb)->dev,
115                        dst_output);
116 }
117 
118 int ip_local_out(struct net *net, struct sock *sk, struct sk_buff *skb)
119 {
120         int err;
121 
122         err = __ip_local_out(net, sk, skb);
123         if (likely(err == 1))
124                 err = dst_output(net, sk, skb);
125 
126         return err;
127 }
128 EXPORT_SYMBOL_GPL(ip_local_out);
129 
130 static inline int ip_select_ttl(struct inet_sock *inet, struct dst_entry *dst)
131 {
132         int ttl = inet->uc_ttl;
133 
134         if (ttl < 0)
135                 ttl = ip4_dst_hoplimit(dst);
136         return ttl;
137 }
138 
139 /*
140  *              Add an ip header to a skbuff and send it out.
141  *
142  */
143 int ip_build_and_send_pkt(struct sk_buff *skb, const struct sock *sk,
144                           __be32 saddr, __be32 daddr, struct ip_options_rcu *opt)
145 {
146         struct inet_sock *inet = inet_sk(sk);
147         struct rtable *rt = skb_rtable(skb);
148         struct net *net = sock_net(sk);
149         struct iphdr *iph;
150 
151         /* Build the IP header. */
152         skb_push(skb, sizeof(struct iphdr) + (opt ? opt->opt.optlen : 0));
153         skb_reset_network_header(skb);
154         iph = ip_hdr(skb);
155         iph->version  = 4;
156         iph->ihl      = 5;
157         iph->tos      = inet->tos;
158         iph->ttl      = ip_select_ttl(inet, &rt->dst);
159         iph->daddr    = (opt && opt->opt.srr ? opt->opt.faddr : daddr);
160         iph->saddr    = saddr;
161         iph->protocol = sk->sk_protocol;
162         if (ip_dont_fragment(sk, &rt->dst)) {
163                 iph->frag_off = htons(IP_DF);
164                 iph->id = 0;
165         } else {
166                 iph->frag_off = 0;
167                 __ip_select_ident(net, iph, 1);
168         }
169 
170         if (opt && opt->opt.optlen) {
171                 iph->ihl += opt->opt.optlen>>2;
172                 ip_options_build(skb, &opt->opt, daddr, rt, 0);
173         }
174 
175         skb->priority = sk->sk_priority;
176         skb->mark = sk->sk_mark;
177 
178         /* Send it out. */
179         return ip_local_out(net, skb->sk, skb);
180 }
181 EXPORT_SYMBOL_GPL(ip_build_and_send_pkt);
182 
183 static int ip_finish_output2(struct net *net, struct sock *sk, struct sk_buff *skb)
184 {
185         struct dst_entry *dst = skb_dst(skb);
186         struct rtable *rt = (struct rtable *)dst;
187         struct net_device *dev = dst->dev;
188         unsigned int hh_len = LL_RESERVED_SPACE(dev);
189         struct neighbour *neigh;
190         u32 nexthop;
191 
192         if (rt->rt_type == RTN_MULTICAST) {
193                 IP_UPD_PO_STATS(net, IPSTATS_MIB_OUTMCAST, skb->len);
194         } else if (rt->rt_type == RTN_BROADCAST)
195                 IP_UPD_PO_STATS(net, IPSTATS_MIB_OUTBCAST, skb->len);
196 
197         /* Be paranoid, rather than too clever. */
198         if (unlikely(skb_headroom(skb) < hh_len && dev->header_ops)) {
199                 struct sk_buff *skb2;
200 
201                 skb2 = skb_realloc_headroom(skb, LL_RESERVED_SPACE(dev));
202                 if (!skb2) {
203                         kfree_skb(skb);
204                         return -ENOMEM;
205                 }
206                 if (skb->sk)
207                         skb_set_owner_w(skb2, skb->sk);
208                 consume_skb(skb);
209                 skb = skb2;
210         }
211 
212         if (lwtunnel_xmit_redirect(dst->lwtstate)) {
213                 int res = lwtunnel_xmit(skb);
214 
215                 if (res < 0 || res == LWTUNNEL_XMIT_DONE)
216                         return res;
217         }
218 
219         rcu_read_lock_bh();
220         nexthop = (__force u32) rt_nexthop(rt, ip_hdr(skb)->daddr);
221         neigh = __ipv4_neigh_lookup_noref(dev, nexthop);
222         if (unlikely(!neigh))
223                 neigh = __neigh_create(&arp_tbl, &nexthop, dev, false);
224         if (!IS_ERR(neigh)) {
225                 int res = dst_neigh_output(dst, neigh, skb);
226 
227                 rcu_read_unlock_bh();
228                 return res;
229         }
230         rcu_read_unlock_bh();
231 
232         net_dbg_ratelimited("%s: No header cache and no neighbour!\n",
233                             __func__);
234         kfree_skb(skb);
235         return -EINVAL;
236 }
237 
238 static int ip_finish_output_gso(struct net *net, struct sock *sk,
239                                 struct sk_buff *skb, unsigned int mtu)
240 {
241         netdev_features_t features;
242         struct sk_buff *segs;
243         int ret = 0;
244 
245         /* common case: seglen is <= mtu
246          */
247         if (skb_gso_validate_mtu(skb, mtu))
248                 return ip_finish_output2(net, sk, skb);
249 
250         /* Slowpath -  GSO segment length exceeds the egress MTU.
251          *
252          * This can happen in several cases:
253          *  - Forwarding of a TCP GRO skb, when DF flag is not set.
254          *  - Forwarding of an skb that arrived on a virtualization interface
255          *    (virtio-net/vhost/tap) with TSO/GSO size set by other network
256          *    stack.
257          *  - Local GSO skb transmitted on an NETIF_F_TSO tunnel stacked over an
258          *    interface with a smaller MTU.
259          *  - Arriving GRO skb (or GSO skb in a virtualized environment) that is
260          *    bridged to a NETIF_F_TSO tunnel stacked over an interface with an
261          *    insufficent MTU.
262          */
263         features = netif_skb_features(skb);
264         BUILD_BUG_ON(sizeof(*IPCB(skb)) > SKB_SGO_CB_OFFSET);
265         segs = skb_gso_segment(skb, features & ~NETIF_F_GSO_MASK);
266         if (IS_ERR_OR_NULL(segs)) {
267                 kfree_skb(skb);
268                 return -ENOMEM;
269         }
270 
271         consume_skb(skb);
272 
273         do {
274                 struct sk_buff *nskb = segs->next;
275                 int err;
276 
277                 segs->next = NULL;
278                 err = ip_fragment(net, sk, segs, mtu, ip_finish_output2);
279 
280                 if (err && ret == 0)
281                         ret = err;
282                 segs = nskb;
283         } while (segs);
284 
285         return ret;
286 }
287 
288 static int ip_finish_output(struct net *net, struct sock *sk, struct sk_buff *skb)
289 {
290         unsigned int mtu;
291         int ret;
292 
293         ret = BPF_CGROUP_RUN_PROG_INET_EGRESS(sk, skb);
294         if (ret) {
295                 kfree_skb(skb);
296                 return ret;
297         }
298 
299 #if defined(CONFIG_NETFILTER) && defined(CONFIG_XFRM)
300         /* Policy lookup after SNAT yielded a new policy */
301         if (skb_dst(skb)->xfrm) {
302                 IPCB(skb)->flags |= IPSKB_REROUTED;
303                 return dst_output(net, sk, skb);
304         }
305 #endif
306         mtu = ip_skb_dst_mtu(sk, skb);
307         if (skb_is_gso(skb))
308                 return ip_finish_output_gso(net, sk, skb, mtu);
309 
310         if (skb->len > mtu || (IPCB(skb)->flags & IPSKB_FRAG_PMTU))
311                 return ip_fragment(net, sk, skb, mtu, ip_finish_output2);
312 
313         return ip_finish_output2(net, sk, skb);
314 }
315 
316 static int ip_mc_finish_output(struct net *net, struct sock *sk,
317                                struct sk_buff *skb)
318 {
319         int ret;
320 
321         ret = BPF_CGROUP_RUN_PROG_INET_EGRESS(sk, skb);
322         if (ret) {
323                 kfree_skb(skb);
324                 return ret;
325         }
326 
327         return dev_loopback_xmit(net, sk, skb);
328 }
329 
330 int ip_mc_output(struct net *net, struct sock *sk, struct sk_buff *skb)
331 {
332         struct rtable *rt = skb_rtable(skb);
333         struct net_device *dev = rt->dst.dev;
334 
335         /*
336          *      If the indicated interface is up and running, send the packet.
337          */
338         IP_UPD_PO_STATS(net, IPSTATS_MIB_OUT, skb->len);
339 
340         skb->dev = dev;
341         skb->protocol = htons(ETH_P_IP);
342 
343         /*
344          *      Multicasts are looped back for other local users
345          */
346 
347         if (rt->rt_flags&RTCF_MULTICAST) {
348                 if (sk_mc_loop(sk)
349 #ifdef CONFIG_IP_MROUTE
350                 /* Small optimization: do not loopback not local frames,
351                    which returned after forwarding; they will be  dropped
352                    by ip_mr_input in any case.
353                    Note, that local frames are looped back to be delivered
354                    to local recipients.
355 
356                    This check is duplicated in ip_mr_input at the moment.
357                  */
358                     &&
359                     ((rt->rt_flags & RTCF_LOCAL) ||
360                      !(IPCB(skb)->flags & IPSKB_FORWARDED))
361 #endif
362                    ) {
363                         struct sk_buff *newskb = skb_clone(skb, GFP_ATOMIC);
364                         if (newskb)
365                                 NF_HOOK(NFPROTO_IPV4, NF_INET_POST_ROUTING,
366                                         net, sk, newskb, NULL, newskb->dev,
367                                         ip_mc_finish_output);
368                 }
369 
370                 /* Multicasts with ttl 0 must not go beyond the host */
371 
372                 if (ip_hdr(skb)->ttl == 0) {
373                         kfree_skb(skb);
374                         return 0;
375                 }
376         }
377 
378         if (rt->rt_flags&RTCF_BROADCAST) {
379                 struct sk_buff *newskb = skb_clone(skb, GFP_ATOMIC);
380                 if (newskb)
381                         NF_HOOK(NFPROTO_IPV4, NF_INET_POST_ROUTING,
382                                 net, sk, newskb, NULL, newskb->dev,
383                                 ip_mc_finish_output);
384         }
385 
386         return NF_HOOK_COND(NFPROTO_IPV4, NF_INET_POST_ROUTING,
387                             net, sk, skb, NULL, skb->dev,
388                             ip_finish_output,
389                             !(IPCB(skb)->flags & IPSKB_REROUTED));
390 }
391 
392 int ip_output(struct net *net, struct sock *sk, struct sk_buff *skb)
393 {
394         struct net_device *dev = skb_dst(skb)->dev;
395 
396         IP_UPD_PO_STATS(net, IPSTATS_MIB_OUT, skb->len);
397 
398         skb->dev = dev;
399         skb->protocol = htons(ETH_P_IP);
400 
401         return NF_HOOK_COND(NFPROTO_IPV4, NF_INET_POST_ROUTING,
402                             net, sk, skb, NULL, dev,
403                             ip_finish_output,
404                             !(IPCB(skb)->flags & IPSKB_REROUTED));
405 }
406 
407 /*
408  * copy saddr and daddr, possibly using 64bit load/stores
409  * Equivalent to :
410  *   iph->saddr = fl4->saddr;
411  *   iph->daddr = fl4->daddr;
412  */
413 static void ip_copy_addrs(struct iphdr *iph, const struct flowi4 *fl4)
414 {
415         BUILD_BUG_ON(offsetof(typeof(*fl4), daddr) !=
416                      offsetof(typeof(*fl4), saddr) + sizeof(fl4->saddr));
417         memcpy(&iph->saddr, &fl4->saddr,
418                sizeof(fl4->saddr) + sizeof(fl4->daddr));
419 }
420 
421 /* Note: skb->sk can be different from sk, in case of tunnels */
422 int ip_queue_xmit(struct sock *sk, struct sk_buff *skb, struct flowi *fl)
423 {
424         struct inet_sock *inet = inet_sk(sk);
425         struct net *net = sock_net(sk);
426         struct ip_options_rcu *inet_opt;
427         struct flowi4 *fl4;
428         struct rtable *rt;
429         struct iphdr *iph;
430         int res;
431 
432         /* Skip all of this if the packet is already routed,
433          * f.e. by something like SCTP.
434          */
435         rcu_read_lock();
436         inet_opt = rcu_dereference(inet->inet_opt);
437         fl4 = &fl->u.ip4;
438         rt = skb_rtable(skb);
439         if (rt)
440                 goto packet_routed;
441 
442         /* Make sure we can route this packet. */
443         rt = (struct rtable *)__sk_dst_check(sk, 0);
444         if (!rt) {
445                 __be32 daddr;
446 
447                 /* Use correct destination address if we have options. */
448                 daddr = inet->inet_daddr;
449                 if (inet_opt && inet_opt->opt.srr)
450                         daddr = inet_opt->opt.faddr;
451 
452                 /* If this fails, retransmit mechanism of transport layer will
453                  * keep trying until route appears or the connection times
454                  * itself out.
455                  */
456                 rt = ip_route_output_ports(net, fl4, sk,
457                                            daddr, inet->inet_saddr,
458                                            inet->inet_dport,
459                                            inet->inet_sport,
460                                            sk->sk_protocol,
461                                            RT_CONN_FLAGS(sk),
462                                            sk->sk_bound_dev_if);
463                 if (IS_ERR(rt))
464                         goto no_route;
465                 sk_setup_caps(sk, &rt->dst);
466         }
467         skb_dst_set_noref(skb, &rt->dst);
468 
469 packet_routed:
470         if (inet_opt && inet_opt->opt.is_strictroute && rt->rt_uses_gateway)
471                 goto no_route;
472 
473         /* OK, we know where to send it, allocate and build IP header. */
474         skb_push(skb, sizeof(struct iphdr) + (inet_opt ? inet_opt->opt.optlen : 0));
475         skb_reset_network_header(skb);
476         iph = ip_hdr(skb);
477         *((__be16 *)iph) = htons((4 << 12) | (5 << 8) | (inet->tos & 0xff));
478         if (ip_dont_fragment(sk, &rt->dst) && !skb->ignore_df)
479                 iph->frag_off = htons(IP_DF);
480         else
481                 iph->frag_off = 0;
482         iph->ttl      = ip_select_ttl(inet, &rt->dst);
483         iph->protocol = sk->sk_protocol;
484         ip_copy_addrs(iph, fl4);
485 
486         /* Transport layer set skb->h.foo itself. */
487 
488         if (inet_opt && inet_opt->opt.optlen) {
489                 iph->ihl += inet_opt->opt.optlen >> 2;
490                 ip_options_build(skb, &inet_opt->opt, inet->inet_daddr, rt, 0);
491         }
492 
493         ip_select_ident_segs(net, skb, sk,
494                              skb_shinfo(skb)->gso_segs ?: 1);
495 
496         /* TODO : should we use skb->sk here instead of sk ? */
497         skb->priority = sk->sk_priority;
498         skb->mark = sk->sk_mark;
499 
500         res = ip_local_out(net, sk, skb);
501         rcu_read_unlock();
502         return res;
503 
504 no_route:
505         rcu_read_unlock();
506         IP_INC_STATS(net, IPSTATS_MIB_OUTNOROUTES);
507         kfree_skb(skb);
508         return -EHOSTUNREACH;
509 }
510 EXPORT_SYMBOL(ip_queue_xmit);
511 
512 static void ip_copy_metadata(struct sk_buff *to, struct sk_buff *from)
513 {
514         to->pkt_type = from->pkt_type;
515         to->priority = from->priority;
516         to->protocol = from->protocol;
517         skb_dst_drop(to);
518         skb_dst_copy(to, from);
519         to->dev = from->dev;
520         to->mark = from->mark;
521 
522         /* Copy the flags to each fragment. */
523         IPCB(to)->flags = IPCB(from)->flags;
524 
525 #ifdef CONFIG_NET_SCHED
526         to->tc_index = from->tc_index;
527 #endif
528         nf_copy(to, from);
529 #if IS_ENABLED(CONFIG_IP_VS)
530         to->ipvs_property = from->ipvs_property;
531 #endif
532         skb_copy_secmark(to, from);
533 }
534 
535 static int ip_fragment(struct net *net, struct sock *sk, struct sk_buff *skb,
536                        unsigned int mtu,
537                        int (*output)(struct net *, struct sock *, struct sk_buff *))
538 {
539         struct iphdr *iph = ip_hdr(skb);
540 
541         if ((iph->frag_off & htons(IP_DF)) == 0)
542                 return ip_do_fragment(net, sk, skb, output);
543 
544         if (unlikely(!skb->ignore_df ||
545                      (IPCB(skb)->frag_max_size &&
546                       IPCB(skb)->frag_max_size > mtu))) {
547                 IP_INC_STATS(net, IPSTATS_MIB_FRAGFAILS);
548                 icmp_send(skb, ICMP_DEST_UNREACH, ICMP_FRAG_NEEDED,
549                           htonl(mtu));
550                 kfree_skb(skb);
551                 return -EMSGSIZE;
552         }
553 
554         return ip_do_fragment(net, sk, skb, output);
555 }
556 
557 /*
558  *      This IP datagram is too large to be sent in one piece.  Break it up into
559  *      smaller pieces (each of size equal to IP header plus
560  *      a block of the data of the original IP data part) that will yet fit in a
561  *      single device frame, and queue such a frame for sending.
562  */
563 
564 int ip_do_fragment(struct net *net, struct sock *sk, struct sk_buff *skb,
565                    int (*output)(struct net *, struct sock *, struct sk_buff *))
566 {
567         struct iphdr *iph;
568         int ptr;
569         struct sk_buff *skb2;
570         unsigned int mtu, hlen, left, len, ll_rs;
571         int offset;
572         __be16 not_last_frag;
573         struct rtable *rt = skb_rtable(skb);
574         int err = 0;
575 
576         /* for offloaded checksums cleanup checksum before fragmentation */
577         if (skb->ip_summed == CHECKSUM_PARTIAL &&
578             (err = skb_checksum_help(skb)))
579                 goto fail;
580 
581         /*
582          *      Point into the IP datagram header.
583          */
584 
585         iph = ip_hdr(skb);
586 
587         mtu = ip_skb_dst_mtu(sk, skb);
588         if (IPCB(skb)->frag_max_size && IPCB(skb)->frag_max_size < mtu)
589                 mtu = IPCB(skb)->frag_max_size;
590 
591         /*
592          *      Setup starting values.
593          */
594 
595         hlen = iph->ihl * 4;
596         mtu = mtu - hlen;       /* Size of data space */
597         IPCB(skb)->flags |= IPSKB_FRAG_COMPLETE;
598 
599         /* When frag_list is given, use it. First, check its validity:
600          * some transformers could create wrong frag_list or break existing
601          * one, it is not prohibited. In this case fall back to copying.
602          *
603          * LATER: this step can be merged to real generation of fragments,
604          * we can switch to copy when see the first bad fragment.
605          */
606         if (skb_has_frag_list(skb)) {
607                 struct sk_buff *frag, *frag2;
608                 unsigned int first_len = skb_pagelen(skb);
609 
610                 if (first_len - hlen > mtu ||
611                     ((first_len - hlen) & 7) ||
612                     ip_is_fragment(iph) ||
613                     skb_cloned(skb))
614                         goto slow_path;
615 
616                 skb_walk_frags(skb, frag) {
617                         /* Correct geometry. */
618                         if (frag->len > mtu ||
619                             ((frag->len & 7) && frag->next) ||
620                             skb_headroom(frag) < hlen)
621                                 goto slow_path_clean;
622 
623                         /* Partially cloned skb? */
624                         if (skb_shared(frag))
625                                 goto slow_path_clean;
626 
627                         BUG_ON(frag->sk);
628                         if (skb->sk) {
629                                 frag->sk = skb->sk;
630                                 frag->destructor = sock_wfree;
631                         }
632                         skb->truesize -= frag->truesize;
633                 }
634 
635                 /* Everything is OK. Generate! */
636 
637                 err = 0;
638                 offset = 0;
639                 frag = skb_shinfo(skb)->frag_list;
640                 skb_frag_list_init(skb);
641                 skb->data_len = first_len - skb_headlen(skb);
642                 skb->len = first_len;
643                 iph->tot_len = htons(first_len);
644                 iph->frag_off = htons(IP_MF);
645                 ip_send_check(iph);
646 
647                 for (;;) {
648                         /* Prepare header of the next frame,
649                          * before previous one went down. */
650                         if (frag) {
651                                 frag->ip_summed = CHECKSUM_NONE;
652                                 skb_reset_transport_header(frag);
653                                 __skb_push(frag, hlen);
654                                 skb_reset_network_header(frag);
655                                 memcpy(skb_network_header(frag), iph, hlen);
656                                 iph = ip_hdr(frag);
657                                 iph->tot_len = htons(frag->len);
658                                 ip_copy_metadata(frag, skb);
659                                 if (offset == 0)
660                                         ip_options_fragment(frag);
661                                 offset += skb->len - hlen;
662                                 iph->frag_off = htons(offset>>3);
663                                 if (frag->next)
664                                         iph->frag_off |= htons(IP_MF);
665                                 /* Ready, complete checksum */
666                                 ip_send_check(iph);
667                         }
668 
669                         err = output(net, sk, skb);
670 
671                         if (!err)
672                                 IP_INC_STATS(net, IPSTATS_MIB_FRAGCREATES);
673                         if (err || !frag)
674                                 break;
675 
676                         skb = frag;
677                         frag = skb->next;
678                         skb->next = NULL;
679                 }
680 
681                 if (err == 0) {
682                         IP_INC_STATS(net, IPSTATS_MIB_FRAGOKS);
683                         return 0;
684                 }
685 
686                 while (frag) {
687                         skb = frag->next;
688                         kfree_skb(frag);
689                         frag = skb;
690                 }
691                 IP_INC_STATS(net, IPSTATS_MIB_FRAGFAILS);
692                 return err;
693 
694 slow_path_clean:
695                 skb_walk_frags(skb, frag2) {
696                         if (frag2 == frag)
697                                 break;
698                         frag2->sk = NULL;
699                         frag2->destructor = NULL;
700                         skb->truesize += frag2->truesize;
701                 }
702         }
703 
704 slow_path:
705         iph = ip_hdr(skb);
706 
707         left = skb->len - hlen;         /* Space per frame */
708         ptr = hlen;             /* Where to start from */
709 
710         ll_rs = LL_RESERVED_SPACE(rt->dst.dev);
711 
712         /*
713          *      Fragment the datagram.
714          */
715 
716         offset = (ntohs(iph->frag_off) & IP_OFFSET) << 3;
717         not_last_frag = iph->frag_off & htons(IP_MF);
718 
719         /*
720          *      Keep copying data until we run out.
721          */
722 
723         while (left > 0) {
724                 len = left;
725                 /* IF: it doesn't fit, use 'mtu' - the data space left */
726                 if (len > mtu)
727                         len = mtu;
728                 /* IF: we are not sending up to and including the packet end
729                    then align the next start on an eight byte boundary */
730                 if (len < left) {
731                         len &= ~7;
732                 }
733 
734                 /* Allocate buffer */
735                 skb2 = alloc_skb(len + hlen + ll_rs, GFP_ATOMIC);
736                 if (!skb2) {
737                         err = -ENOMEM;
738                         goto fail;
739                 }
740 
741                 /*
742                  *      Set up data on packet
743                  */
744 
745                 ip_copy_metadata(skb2, skb);
746                 skb_reserve(skb2, ll_rs);
747                 skb_put(skb2, len + hlen);
748                 skb_reset_network_header(skb2);
749                 skb2->transport_header = skb2->network_header + hlen;
750 
751                 /*
752                  *      Charge the memory for the fragment to any owner
753                  *      it might possess
754                  */
755 
756                 if (skb->sk)
757                         skb_set_owner_w(skb2, skb->sk);
758 
759                 /*
760                  *      Copy the packet header into the new buffer.
761                  */
762 
763                 skb_copy_from_linear_data(skb, skb_network_header(skb2), hlen);
764 
765                 /*
766                  *      Copy a block of the IP datagram.
767                  */
768                 if (skb_copy_bits(skb, ptr, skb_transport_header(skb2), len))
769                         BUG();
770                 left -= len;
771 
772                 /*
773                  *      Fill in the new header fields.
774                  */
775                 iph = ip_hdr(skb2);
776                 iph->frag_off = htons((offset >> 3));
777 
778                 if (IPCB(skb)->flags & IPSKB_FRAG_PMTU)
779                         iph->frag_off |= htons(IP_DF);
780 
781                 /* ANK: dirty, but effective trick. Upgrade options only if
782                  * the segment to be fragmented was THE FIRST (otherwise,
783                  * options are already fixed) and make it ONCE
784                  * on the initial skb, so that all the following fragments
785                  * will inherit fixed options.
786                  */
787                 if (offset == 0)
788                         ip_options_fragment(skb);
789 
790                 /*
791                  *      Added AC : If we are fragmenting a fragment that's not the
792                  *                 last fragment then keep MF on each bit
793                  */
794                 if (left > 0 || not_last_frag)
795                         iph->frag_off |= htons(IP_MF);
796                 ptr += len;
797                 offset += len;
798 
799                 /*
800                  *      Put this fragment into the sending queue.
801                  */
802                 iph->tot_len = htons(len + hlen);
803 
804                 ip_send_check(iph);
805 
806                 err = output(net, sk, skb2);
807                 if (err)
808                         goto fail;
809 
810                 IP_INC_STATS(net, IPSTATS_MIB_FRAGCREATES);
811         }
812         consume_skb(skb);
813         IP_INC_STATS(net, IPSTATS_MIB_FRAGOKS);
814         return err;
815 
816 fail:
817         kfree_skb(skb);
818         IP_INC_STATS(net, IPSTATS_MIB_FRAGFAILS);
819         return err;
820 }
821 EXPORT_SYMBOL(ip_do_fragment);
822 
823 int
824 ip_generic_getfrag(void *from, char *to, int offset, int len, int odd, struct sk_buff *skb)
825 {
826         struct msghdr *msg = from;
827 
828         if (skb->ip_summed == CHECKSUM_PARTIAL) {
829                 if (!copy_from_iter_full(to, len, &msg->msg_iter))
830                         return -EFAULT;
831         } else {
832                 __wsum csum = 0;
833                 if (!csum_and_copy_from_iter_full(to, len, &csum, &msg->msg_iter))
834                         return -EFAULT;
835                 skb->csum = csum_block_add(skb->csum, csum, odd);
836         }
837         return 0;
838 }
839 EXPORT_SYMBOL(ip_generic_getfrag);
840 
841 static inline __wsum
842 csum_page(struct page *page, int offset, int copy)
843 {
844         char *kaddr;
845         __wsum csum;
846         kaddr = kmap(page);
847         csum = csum_partial(kaddr + offset, copy, 0);
848         kunmap(page);
849         return csum;
850 }
851 
852 static inline int ip_ufo_append_data(struct sock *sk,
853                         struct sk_buff_head *queue,
854                         int getfrag(void *from, char *to, int offset, int len,
855                                int odd, struct sk_buff *skb),
856                         void *from, int length, int hh_len, int fragheaderlen,
857                         int transhdrlen, int maxfraglen, unsigned int flags)
858 {
859         struct sk_buff *skb;
860         int err;
861 
862         /* There is support for UDP fragmentation offload by network
863          * device, so create one single skb packet containing complete
864          * udp datagram
865          */
866         skb = skb_peek_tail(queue);
867         if (!skb) {
868                 skb = sock_alloc_send_skb(sk,
869                         hh_len + fragheaderlen + transhdrlen + 20,
870                         (flags & MSG_DONTWAIT), &err);
871 
872                 if (!skb)
873                         return err;
874 
875                 /* reserve space for Hardware header */
876                 skb_reserve(skb, hh_len);
877 
878                 /* create space for UDP/IP header */
879                 skb_put(skb, fragheaderlen + transhdrlen);
880 
881                 /* initialize network header pointer */
882                 skb_reset_network_header(skb);
883 
884                 /* initialize protocol header pointer */
885                 skb->transport_header = skb->network_header + fragheaderlen;
886 
887                 skb->csum = 0;
888 
889                 __skb_queue_tail(queue, skb);
890         } else if (skb_is_gso(skb)) {
891                 goto append;
892         }
893 
894         skb->ip_summed = CHECKSUM_PARTIAL;
895         /* specify the length of each IP datagram fragment */
896         skb_shinfo(skb)->gso_size = maxfraglen - fragheaderlen;
897         skb_shinfo(skb)->gso_type = SKB_GSO_UDP;
898 
899 append:
900         return skb_append_datato_frags(sk, skb, getfrag, from,
901                                        (length - transhdrlen));
902 }
903 
904 static int __ip_append_data(struct sock *sk,
905                             struct flowi4 *fl4,
906                             struct sk_buff_head *queue,
907                             struct inet_cork *cork,
908                             struct page_frag *pfrag,
909                             int getfrag(void *from, char *to, int offset,
910                                         int len, int odd, struct sk_buff *skb),
911                             void *from, int length, int transhdrlen,
912                             unsigned int flags)
913 {
914         struct inet_sock *inet = inet_sk(sk);
915         struct sk_buff *skb;
916 
917         struct ip_options *opt = cork->opt;
918         int hh_len;
919         int exthdrlen;
920         int mtu;
921         int copy;
922         int err;
923         int offset = 0;
924         unsigned int maxfraglen, fragheaderlen, maxnonfragsize;
925         int csummode = CHECKSUM_NONE;
926         struct rtable *rt = (struct rtable *)cork->dst;
927         u32 tskey = 0;
928 
929         skb = skb_peek_tail(queue);
930 
931         exthdrlen = !skb ? rt->dst.header_len : 0;
932         mtu = cork->fragsize;
933         if (cork->tx_flags & SKBTX_ANY_SW_TSTAMP &&
934             sk->sk_tsflags & SOF_TIMESTAMPING_OPT_ID)
935                 tskey = sk->sk_tskey++;
936 
937         hh_len = LL_RESERVED_SPACE(rt->dst.dev);
938 
939         fragheaderlen = sizeof(struct iphdr) + (opt ? opt->optlen : 0);
940         maxfraglen = ((mtu - fragheaderlen) & ~7) + fragheaderlen;
941         maxnonfragsize = ip_sk_ignore_df(sk) ? 0xFFFF : mtu;
942 
943         if (cork->length + length > maxnonfragsize - fragheaderlen) {
944                 ip_local_error(sk, EMSGSIZE, fl4->daddr, inet->inet_dport,
945                                mtu - (opt ? opt->optlen : 0));
946                 return -EMSGSIZE;
947         }
948 
949         /*
950          * transhdrlen > 0 means that this is the first fragment and we wish
951          * it won't be fragmented in the future.
952          */
953         if (transhdrlen &&
954             length + fragheaderlen <= mtu &&
955             rt->dst.dev->features & (NETIF_F_HW_CSUM | NETIF_F_IP_CSUM) &&
956             !(flags & MSG_MORE) &&
957             !exthdrlen)
958                 csummode = CHECKSUM_PARTIAL;
959 
960         cork->length += length;
961         if ((((length + fragheaderlen) > mtu) || (skb && skb_is_gso(skb))) &&
962             (sk->sk_protocol == IPPROTO_UDP) &&
963             (rt->dst.dev->features & NETIF_F_UFO) && !rt->dst.header_len &&
964             (sk->sk_type == SOCK_DGRAM) && !sk->sk_no_check_tx) {
965                 err = ip_ufo_append_data(sk, queue, getfrag, from, length,
966                                          hh_len, fragheaderlen, transhdrlen,
967                                          maxfraglen, flags);
968                 if (err)
969                         goto error;
970                 return 0;
971         }
972 
973         /* So, what's going on in the loop below?
974          *
975          * We use calculated fragment length to generate chained skb,
976          * each of segments is IP fragment ready for sending to network after
977          * adding appropriate IP header.
978          */
979 
980         if (!skb)
981                 goto alloc_new_skb;
982 
983         while (length > 0) {
984                 /* Check if the remaining data fits into current packet. */
985                 copy = mtu - skb->len;
986                 if (copy < length)
987                         copy = maxfraglen - skb->len;
988                 if (copy <= 0) {
989                         char *data;
990                         unsigned int datalen;
991                         unsigned int fraglen;
992                         unsigned int fraggap;
993                         unsigned int alloclen;
994                         struct sk_buff *skb_prev;
995 alloc_new_skb:
996                         skb_prev = skb;
997                         if (skb_prev)
998                                 fraggap = skb_prev->len - maxfraglen;
999                         else
1000                                 fraggap = 0;
1001 
1002                         /*
1003                          * If remaining data exceeds the mtu,
1004                          * we know we need more fragment(s).
1005                          */
1006                         datalen = length + fraggap;
1007                         if (datalen > mtu - fragheaderlen)
1008                                 datalen = maxfraglen - fragheaderlen;
1009                         fraglen = datalen + fragheaderlen;
1010 
1011                         if ((flags & MSG_MORE) &&
1012                             !(rt->dst.dev->features&NETIF_F_SG))
1013                                 alloclen = mtu;
1014                         else
1015                                 alloclen = fraglen;
1016 
1017                         alloclen += exthdrlen;
1018 
1019                         /* The last fragment gets additional space at tail.
1020                          * Note, with MSG_MORE we overallocate on fragments,
1021                          * because we have no idea what fragment will be
1022                          * the last.
1023                          */
1024                         if (datalen == length + fraggap)
1025                                 alloclen += rt->dst.trailer_len;
1026 
1027                         if (transhdrlen) {
1028                                 skb = sock_alloc_send_skb(sk,
1029                                                 alloclen + hh_len + 15,
1030                                                 (flags & MSG_DONTWAIT), &err);
1031                         } else {
1032                                 skb = NULL;
1033                                 if (atomic_read(&sk->sk_wmem_alloc) <=
1034                                     2 * sk->sk_sndbuf)
1035                                         skb = sock_wmalloc(sk,
1036                                                            alloclen + hh_len + 15, 1,
1037                                                            sk->sk_allocation);
1038                                 if (unlikely(!skb))
1039                                         err = -ENOBUFS;
1040                         }
1041                         if (!skb)
1042                                 goto error;
1043 
1044                         /*
1045                          *      Fill in the control structures
1046                          */
1047                         skb->ip_summed = csummode;
1048                         skb->csum = 0;
1049                         skb_reserve(skb, hh_len);
1050 
1051                         /* only the initial fragment is time stamped */
1052                         skb_shinfo(skb)->tx_flags = cork->tx_flags;
1053                         cork->tx_flags = 0;
1054                         skb_shinfo(skb)->tskey = tskey;
1055                         tskey = 0;
1056 
1057                         /*
1058                          *      Find where to start putting bytes.
1059                          */
1060                         data = skb_put(skb, fraglen + exthdrlen);
1061                         skb_set_network_header(skb, exthdrlen);
1062                         skb->transport_header = (skb->network_header +
1063                                                  fragheaderlen);
1064                         data += fragheaderlen + exthdrlen;
1065 
1066                         if (fraggap) {
1067                                 skb->csum = skb_copy_and_csum_bits(
1068                                         skb_prev, maxfraglen,
1069                                         data + transhdrlen, fraggap, 0);
1070                                 skb_prev->csum = csum_sub(skb_prev->csum,
1071                                                           skb->csum);
1072                                 data += fraggap;
1073                                 pskb_trim_unique(skb_prev, maxfraglen);
1074                         }
1075 
1076                         copy = datalen - transhdrlen - fraggap;
1077                         if (copy > 0 && getfrag(from, data + transhdrlen, offset, copy, fraggap, skb) < 0) {
1078                                 err = -EFAULT;
1079                                 kfree_skb(skb);
1080                                 goto error;
1081                         }
1082 
1083                         offset += copy;
1084                         length -= datalen - fraggap;
1085                         transhdrlen = 0;
1086                         exthdrlen = 0;
1087                         csummode = CHECKSUM_NONE;
1088 
1089                         /*
1090                          * Put the packet on the pending queue.
1091                          */
1092                         __skb_queue_tail(queue, skb);
1093                         continue;
1094                 }
1095 
1096                 if (copy > length)
1097                         copy = length;
1098 
1099                 if (!(rt->dst.dev->features&NETIF_F_SG)) {
1100                         unsigned int off;
1101 
1102                         off = skb->len;
1103                         if (getfrag(from, skb_put(skb, copy),
1104                                         offset, copy, off, skb) < 0) {
1105                                 __skb_trim(skb, off);
1106                                 err = -EFAULT;
1107                                 goto error;
1108                         }
1109                 } else {
1110                         int i = skb_shinfo(skb)->nr_frags;
1111 
1112                         err = -ENOMEM;
1113                         if (!sk_page_frag_refill(sk, pfrag))
1114                                 goto error;
1115 
1116                         if (!skb_can_coalesce(skb, i, pfrag->page,
1117                                               pfrag->offset)) {
1118                                 err = -EMSGSIZE;
1119                                 if (i == MAX_SKB_FRAGS)
1120                                         goto error;
1121 
1122                                 __skb_fill_page_desc(skb, i, pfrag->page,
1123                                                      pfrag->offset, 0);
1124                                 skb_shinfo(skb)->nr_frags = ++i;
1125                                 get_page(pfrag->page);
1126                         }
1127                         copy = min_t(int, copy, pfrag->size - pfrag->offset);
1128                         if (getfrag(from,
1129                                     page_address(pfrag->page) + pfrag->offset,
1130                                     offset, copy, skb->len, skb) < 0)
1131                                 goto error_efault;
1132 
1133                         pfrag->offset += copy;
1134                         skb_frag_size_add(&skb_shinfo(skb)->frags[i - 1], copy);
1135                         skb->len += copy;
1136                         skb->data_len += copy;
1137                         skb->truesize += copy;
1138                         atomic_add(copy, &sk->sk_wmem_alloc);
1139                 }
1140                 offset += copy;
1141                 length -= copy;
1142         }
1143 
1144         return 0;
1145 
1146 error_efault:
1147         err = -EFAULT;
1148 error:
1149         cork->length -= length;
1150         IP_INC_STATS(sock_net(sk), IPSTATS_MIB_OUTDISCARDS);
1151         return err;
1152 }
1153 
1154 static int ip_setup_cork(struct sock *sk, struct inet_cork *cork,
1155                          struct ipcm_cookie *ipc, struct rtable **rtp)
1156 {
1157         struct ip_options_rcu *opt;
1158         struct rtable *rt;
1159 
1160         /*
1161          * setup for corking.
1162          */
1163         opt = ipc->opt;
1164         if (opt) {
1165                 if (!cork->opt) {
1166                         cork->opt = kmalloc(sizeof(struct ip_options) + 40,
1167                                             sk->sk_allocation);
1168                         if (unlikely(!cork->opt))
1169                                 return -ENOBUFS;
1170                 }
1171                 memcpy(cork->opt, &opt->opt, sizeof(struct ip_options) + opt->opt.optlen);
1172                 cork->flags |= IPCORK_OPT;
1173                 cork->addr = ipc->addr;
1174         }
1175         rt = *rtp;
1176         if (unlikely(!rt))
1177                 return -EFAULT;
1178         /*
1179          * We steal reference to this route, caller should not release it
1180          */
1181         *rtp = NULL;
1182         cork->fragsize = ip_sk_use_pmtu(sk) ?
1183                          dst_mtu(&rt->dst) : rt->dst.dev->mtu;
1184         cork->dst = &rt->dst;
1185         cork->length = 0;
1186         cork->ttl = ipc->ttl;
1187         cork->tos = ipc->tos;
1188         cork->priority = ipc->priority;
1189         cork->tx_flags = ipc->tx_flags;
1190 
1191         return 0;
1192 }
1193 
1194 /*
1195  *      ip_append_data() and ip_append_page() can make one large IP datagram
1196  *      from many pieces of data. Each pieces will be holded on the socket
1197  *      until ip_push_pending_frames() is called. Each piece can be a page
1198  *      or non-page data.
1199  *
1200  *      Not only UDP, other transport protocols - e.g. raw sockets - can use
1201  *      this interface potentially.
1202  *
1203  *      LATER: length must be adjusted by pad at tail, when it is required.
1204  */
1205 int ip_append_data(struct sock *sk, struct flowi4 *fl4,
1206                    int getfrag(void *from, char *to, int offset, int len,
1207                                int odd, struct sk_buff *skb),
1208                    void *from, int length, int transhdrlen,
1209                    struct ipcm_cookie *ipc, struct rtable **rtp,
1210                    unsigned int flags)
1211 {
1212         struct inet_sock *inet = inet_sk(sk);
1213         int err;
1214 
1215         if (flags&MSG_PROBE)
1216                 return 0;
1217 
1218         if (skb_queue_empty(&sk->sk_write_queue)) {
1219                 err = ip_setup_cork(sk, &inet->cork.base, ipc, rtp);
1220                 if (err)
1221                         return err;
1222         } else {
1223                 transhdrlen = 0;
1224         }
1225 
1226         return __ip_append_data(sk, fl4, &sk->sk_write_queue, &inet->cork.base,
1227                                 sk_page_frag(sk), getfrag,
1228                                 from, length, transhdrlen, flags);
1229 }
1230 
1231 ssize_t ip_append_page(struct sock *sk, struct flowi4 *fl4, struct page *page,
1232                        int offset, size_t size, int flags)
1233 {
1234         struct inet_sock *inet = inet_sk(sk);
1235         struct sk_buff *skb;
1236         struct rtable *rt;
1237         struct ip_options *opt = NULL;
1238         struct inet_cork *cork;
1239         int hh_len;
1240         int mtu;
1241         int len;
1242         int err;
1243         unsigned int maxfraglen, fragheaderlen, fraggap, maxnonfragsize;
1244 
1245         if (inet->hdrincl)
1246                 return -EPERM;
1247 
1248         if (flags&MSG_PROBE)
1249                 return 0;
1250 
1251         if (skb_queue_empty(&sk->sk_write_queue))
1252                 return -EINVAL;
1253 
1254         cork = &inet->cork.base;
1255         rt = (struct rtable *)cork->dst;
1256         if (cork->flags & IPCORK_OPT)
1257                 opt = cork->opt;
1258 
1259         if (!(rt->dst.dev->features&NETIF_F_SG))
1260                 return -EOPNOTSUPP;
1261 
1262         hh_len = LL_RESERVED_SPACE(rt->dst.dev);
1263         mtu = cork->fragsize;
1264 
1265         fragheaderlen = sizeof(struct iphdr) + (opt ? opt->optlen : 0);
1266         maxfraglen = ((mtu - fragheaderlen) & ~7) + fragheaderlen;
1267         maxnonfragsize = ip_sk_ignore_df(sk) ? 0xFFFF : mtu;
1268 
1269         if (cork->length + size > maxnonfragsize - fragheaderlen) {
1270                 ip_local_error(sk, EMSGSIZE, fl4->daddr, inet->inet_dport,
1271                                mtu - (opt ? opt->optlen : 0));
1272                 return -EMSGSIZE;
1273         }
1274 
1275         skb = skb_peek_tail(&sk->sk_write_queue);
1276         if (!skb)
1277                 return -EINVAL;
1278 
1279         if ((size + skb->len > mtu) &&
1280             (sk->sk_protocol == IPPROTO_UDP) &&
1281             (rt->dst.dev->features & NETIF_F_UFO)) {
1282                 if (skb->ip_summed != CHECKSUM_PARTIAL)
1283                         return -EOPNOTSUPP;
1284 
1285                 skb_shinfo(skb)->gso_size = mtu - fragheaderlen;
1286                 skb_shinfo(skb)->gso_type = SKB_GSO_UDP;
1287         }
1288         cork->length += size;
1289 
1290         while (size > 0) {
1291                 if (skb_is_gso(skb)) {
1292                         len = size;
1293                 } else {
1294 
1295                         /* Check if the remaining data fits into current packet. */
1296                         len = mtu - skb->len;
1297                         if (len < size)
1298                                 len = maxfraglen - skb->len;
1299                 }
1300                 if (len <= 0) {
1301                         struct sk_buff *skb_prev;
1302                         int alloclen;
1303 
1304                         skb_prev = skb;
1305                         fraggap = skb_prev->len - maxfraglen;
1306 
1307                         alloclen = fragheaderlen + hh_len + fraggap + 15;
1308                         skb = sock_wmalloc(sk, alloclen, 1, sk->sk_allocation);
1309                         if (unlikely(!skb)) {
1310                                 err = -ENOBUFS;
1311                                 goto error;
1312                         }
1313 
1314                         /*
1315                          *      Fill in the control structures
1316                          */
1317                         skb->ip_summed = CHECKSUM_NONE;
1318                         skb->csum = 0;
1319                         skb_reserve(skb, hh_len);
1320 
1321                         /*
1322                          *      Find where to start putting bytes.
1323                          */
1324                         skb_put(skb, fragheaderlen + fraggap);
1325                         skb_reset_network_header(skb);
1326                         skb->transport_header = (skb->network_header +
1327                                                  fragheaderlen);
1328                         if (fraggap) {
1329                                 skb->csum = skb_copy_and_csum_bits(skb_prev,
1330                                                                    maxfraglen,
1331                                                     skb_transport_header(skb),
1332                                                                    fraggap, 0);
1333                                 skb_prev->csum = csum_sub(skb_prev->csum,
1334                                                           skb->csum);
1335                                 pskb_trim_unique(skb_prev, maxfraglen);
1336                         }
1337 
1338                         /*
1339                          * Put the packet on the pending queue.
1340                          */
1341                         __skb_queue_tail(&sk->sk_write_queue, skb);
1342                         continue;
1343                 }
1344 
1345                 if (len > size)
1346                         len = size;
1347 
1348                 if (skb_append_pagefrags(skb, page, offset, len)) {
1349                         err = -EMSGSIZE;
1350                         goto error;
1351                 }
1352 
1353                 if (skb->ip_summed == CHECKSUM_NONE) {
1354                         __wsum csum;
1355                         csum = csum_page(page, offset, len);
1356                         skb->csum = csum_block_add(skb->csum, csum, skb->len);
1357                 }
1358 
1359                 skb->len += len;
1360                 skb->data_len += len;
1361                 skb->truesize += len;
1362                 atomic_add(len, &sk->sk_wmem_alloc);
1363                 offset += len;
1364                 size -= len;
1365         }
1366         return 0;
1367 
1368 error:
1369         cork->length -= size;
1370         IP_INC_STATS(sock_net(sk), IPSTATS_MIB_OUTDISCARDS);
1371         return err;
1372 }
1373 
1374 static void ip_cork_release(struct inet_cork *cork)
1375 {
1376         cork->flags &= ~IPCORK_OPT;
1377         kfree(cork->opt);
1378         cork->opt = NULL;
1379         dst_release(cork->dst);
1380         cork->dst = NULL;
1381 }
1382 
1383 /*
1384  *      Combined all pending IP fragments on the socket as one IP datagram
1385  *      and push them out.
1386  */
1387 struct sk_buff *__ip_make_skb(struct sock *sk,
1388                               struct flowi4 *fl4,
1389                               struct sk_buff_head *queue,
1390                               struct inet_cork *cork)
1391 {
1392         struct sk_buff *skb, *tmp_skb;
1393         struct sk_buff **tail_skb;
1394         struct inet_sock *inet = inet_sk(sk);
1395         struct net *net = sock_net(sk);
1396         struct ip_options *opt = NULL;
1397         struct rtable *rt = (struct rtable *)cork->dst;
1398         struct iphdr *iph;
1399         __be16 df = 0;
1400         __u8 ttl;
1401 
1402         skb = __skb_dequeue(queue);
1403         if (!skb)
1404                 goto out;
1405         tail_skb = &(skb_shinfo(skb)->frag_list);
1406 
1407         /* move skb->data to ip header from ext header */
1408         if (skb->data < skb_network_header(skb))
1409                 __skb_pull(skb, skb_network_offset(skb));
1410         while ((tmp_skb = __skb_dequeue(queue)) != NULL) {
1411                 __skb_pull(tmp_skb, skb_network_header_len(skb));
1412                 *tail_skb = tmp_skb;
1413                 tail_skb = &(tmp_skb->next);
1414                 skb->len += tmp_skb->len;
1415                 skb->data_len += tmp_skb->len;
1416                 skb->truesize += tmp_skb->truesize;
1417                 tmp_skb->destructor = NULL;
1418                 tmp_skb->sk = NULL;
1419         }
1420 
1421         /* Unless user demanded real pmtu discovery (IP_PMTUDISC_DO), we allow
1422          * to fragment the frame generated here. No matter, what transforms
1423          * how transforms change size of the packet, it will come out.
1424          */
1425         skb->ignore_df = ip_sk_ignore_df(sk);
1426 
1427         /* DF bit is set when we want to see DF on outgoing frames.
1428          * If ignore_df is set too, we still allow to fragment this frame
1429          * locally. */
1430         if (inet->pmtudisc == IP_PMTUDISC_DO ||
1431             inet->pmtudisc == IP_PMTUDISC_PROBE ||
1432             (skb->len <= dst_mtu(&rt->dst) &&
1433              ip_dont_fragment(sk, &rt->dst)))
1434                 df = htons(IP_DF);
1435 
1436         if (cork->flags & IPCORK_OPT)
1437                 opt = cork->opt;
1438 
1439         if (cork->ttl != 0)
1440                 ttl = cork->ttl;
1441         else if (rt->rt_type == RTN_MULTICAST)
1442                 ttl = inet->mc_ttl;
1443         else
1444                 ttl = ip_select_ttl(inet, &rt->dst);
1445 
1446         iph = ip_hdr(skb);
1447         iph->version = 4;
1448         iph->ihl = 5;
1449         iph->tos = (cork->tos != -1) ? cork->tos : inet->tos;
1450         iph->frag_off = df;
1451         iph->ttl = ttl;
1452         iph->protocol = sk->sk_protocol;
1453         ip_copy_addrs(iph, fl4);
1454         ip_select_ident(net, skb, sk);
1455 
1456         if (opt) {
1457                 iph->ihl += opt->optlen>>2;
1458                 ip_options_build(skb, opt, cork->addr, rt, 0);
1459         }
1460 
1461         skb->priority = (cork->tos != -1) ? cork->priority: sk->sk_priority;
1462         skb->mark = sk->sk_mark;
1463         /*
1464          * Steal rt from cork.dst to avoid a pair of atomic_inc/atomic_dec
1465          * on dst refcount
1466          */
1467         cork->dst = NULL;
1468         skb_dst_set(skb, &rt->dst);
1469 
1470         if (iph->protocol == IPPROTO_ICMP)
1471                 icmp_out_count(net, ((struct icmphdr *)
1472                         skb_transport_header(skb))->type);
1473 
1474         ip_cork_release(cork);
1475 out:
1476         return skb;
1477 }
1478 
1479 int ip_send_skb(struct net *net, struct sk_buff *skb)
1480 {
1481         int err;
1482 
1483         err = ip_local_out(net, skb->sk, skb);
1484         if (err) {
1485                 if (err > 0)
1486                         err = net_xmit_errno(err);
1487                 if (err)
1488                         IP_INC_STATS(net, IPSTATS_MIB_OUTDISCARDS);
1489         }
1490 
1491         return err;
1492 }
1493 
1494 int ip_push_pending_frames(struct sock *sk, struct flowi4 *fl4)
1495 {
1496         struct sk_buff *skb;
1497 
1498         skb = ip_finish_skb(sk, fl4);
1499         if (!skb)
1500                 return 0;
1501 
1502         /* Netfilter gets whole the not fragmented skb. */
1503         return ip_send_skb(sock_net(sk), skb);
1504 }
1505 
1506 /*
1507  *      Throw away all pending data on the socket.
1508  */
1509 static void __ip_flush_pending_frames(struct sock *sk,
1510                                       struct sk_buff_head *queue,
1511                                       struct inet_cork *cork)
1512 {
1513         struct sk_buff *skb;
1514 
1515         while ((skb = __skb_dequeue_tail(queue)) != NULL)
1516                 kfree_skb(skb);
1517 
1518         ip_cork_release(cork);
1519 }
1520 
1521 void ip_flush_pending_frames(struct sock *sk)
1522 {
1523         __ip_flush_pending_frames(sk, &sk->sk_write_queue, &inet_sk(sk)->cork.base);
1524 }
1525 
1526 struct sk_buff *ip_make_skb(struct sock *sk,
1527                             struct flowi4 *fl4,
1528                             int getfrag(void *from, char *to, int offset,
1529                                         int len, int odd, struct sk_buff *skb),
1530                             void *from, int length, int transhdrlen,
1531                             struct ipcm_cookie *ipc, struct rtable **rtp,
1532                             unsigned int flags)
1533 {
1534         struct inet_cork cork;
1535         struct sk_buff_head queue;
1536         int err;
1537 
1538         if (flags & MSG_PROBE)
1539                 return NULL;
1540 
1541         __skb_queue_head_init(&queue);
1542 
1543         cork.flags = 0;
1544         cork.addr = 0;
1545         cork.opt = NULL;
1546         err = ip_setup_cork(sk, &cork, ipc, rtp);
1547         if (err)
1548                 return ERR_PTR(err);
1549 
1550         err = __ip_append_data(sk, fl4, &queue, &cork,
1551                                &current->task_frag, getfrag,
1552                                from, length, transhdrlen, flags);
1553         if (err) {
1554                 __ip_flush_pending_frames(sk, &queue, &cork);
1555                 return ERR_PTR(err);
1556         }
1557 
1558         return __ip_make_skb(sk, fl4, &queue, &cork);
1559 }
1560 
1561 /*
1562  *      Fetch data from kernel space and fill in checksum if needed.
1563  */
1564 static int ip_reply_glue_bits(void *dptr, char *to, int offset,
1565                               int len, int odd, struct sk_buff *skb)
1566 {
1567         __wsum csum;
1568 
1569         csum = csum_partial_copy_nocheck(dptr+offset, to, len, 0);
1570         skb->csum = csum_block_add(skb->csum, csum, odd);
1571         return 0;
1572 }
1573 
1574 /*
1575  *      Generic function to send a packet as reply to another packet.
1576  *      Used to send some TCP resets/acks so far.
1577  */
1578 void ip_send_unicast_reply(struct sock *sk, struct sk_buff *skb,
1579                            const struct ip_options *sopt,
1580                            __be32 daddr, __be32 saddr,
1581                            const struct ip_reply_arg *arg,
1582                            unsigned int len)
1583 {
1584         struct ip_options_data replyopts;
1585         struct ipcm_cookie ipc;
1586         struct flowi4 fl4;
1587         struct rtable *rt = skb_rtable(skb);
1588         struct net *net = sock_net(sk);
1589         struct sk_buff *nskb;
1590         int err;
1591         int oif;
1592 
1593         if (__ip_options_echo(&replyopts.opt.opt, skb, sopt))
1594                 return;
1595 
1596         ipc.addr = daddr;
1597         ipc.opt = NULL;
1598         ipc.tx_flags = 0;
1599         ipc.ttl = 0;
1600         ipc.tos = -1;
1601 
1602         if (replyopts.opt.opt.optlen) {
1603                 ipc.opt = &replyopts.opt;
1604 
1605                 if (replyopts.opt.opt.srr)
1606                         daddr = replyopts.opt.opt.faddr;
1607         }
1608 
1609         oif = arg->bound_dev_if;
1610         if (!oif && netif_index_is_l3_master(net, skb->skb_iif))
1611                 oif = skb->skb_iif;
1612 
1613         flowi4_init_output(&fl4, oif,
1614                            IP4_REPLY_MARK(net, skb->mark),
1615                            RT_TOS(arg->tos),
1616                            RT_SCOPE_UNIVERSE, ip_hdr(skb)->protocol,
1617                            ip_reply_arg_flowi_flags(arg),
1618                            daddr, saddr,
1619                            tcp_hdr(skb)->source, tcp_hdr(skb)->dest,
1620                            arg->uid);
1621         security_skb_classify_flow(skb, flowi4_to_flowi(&fl4));
1622         rt = ip_route_output_key(net, &fl4);
1623         if (IS_ERR(rt))
1624                 return;
1625 
1626         inet_sk(sk)->tos = arg->tos;
1627 
1628         sk->sk_priority = skb->priority;
1629         sk->sk_protocol = ip_hdr(skb)->protocol;
1630         sk->sk_bound_dev_if = arg->bound_dev_if;
1631         sk->sk_sndbuf = sysctl_wmem_default;
1632         sk->sk_mark = fl4.flowi4_mark;
1633         err = ip_append_data(sk, &fl4, ip_reply_glue_bits, arg->iov->iov_base,
1634                              len, 0, &ipc, &rt, MSG_DONTWAIT);
1635         if (unlikely(err)) {
1636                 ip_flush_pending_frames(sk);
1637                 goto out;
1638         }
1639 
1640         nskb = skb_peek(&sk->sk_write_queue);
1641         if (nskb) {
1642                 if (arg->csumoffset >= 0)
1643                         *((__sum16 *)skb_transport_header(nskb) +
1644                           arg->csumoffset) = csum_fold(csum_add(nskb->csum,
1645                                                                 arg->csum));
1646                 nskb->ip_summed = CHECKSUM_NONE;
1647                 ip_push_pending_frames(sk, &fl4);
1648         }
1649 out:
1650         ip_rt_put(rt);
1651 }
1652 
1653 void __init ip_init(void)
1654 {
1655         ip_rt_init();
1656         inet_initpeers();
1657 
1658 #if defined(CONFIG_IP_MULTICAST)
1659         igmp_mc_init();
1660 #endif
1661 }
1662 

This page was automatically generated by LXR 0.3.1 (source).  •  Linux is a registered trademark of Linus Torvalds  •  Contact us