Version:  2.0.40 2.2.26 2.4.37 3.13 3.14 3.15 3.16 3.17 3.18 3.19 4.0 4.1 4.2 4.3 4.4 4.5 4.6 4.7 4.8 4.9 4.10

Linux/include/linux/skbuff.h

  1 /*
  2  *      Definitions for the 'struct sk_buff' memory handlers.
  3  *
  4  *      Authors:
  5  *              Alan Cox, <gw4pts@gw4pts.ampr.org>
  6  *              Florian La Roche, <rzsfl@rz.uni-sb.de>
  7  *
  8  *      This program is free software; you can redistribute it and/or
  9  *      modify it under the terms of the GNU General Public License
 10  *      as published by the Free Software Foundation; either version
 11  *      2 of the License, or (at your option) any later version.
 12  */
 13  
 14 #ifndef _LINUX_SKBUFF_H
 15 #define _LINUX_SKBUFF_H
 16 
 17 #include <linux/config.h>
 18 #include <linux/kernel.h>
 19 #include <linux/sched.h>
 20 #include <linux/time.h>
 21 #include <linux/cache.h>
 22 
 23 #include <asm/atomic.h>
 24 #include <asm/types.h>
 25 #include <linux/spinlock.h>
 26 #include <linux/mm.h>
 27 #include <linux/highmem.h>
 28 
 29 #define HAVE_ALLOC_SKB          /* For the drivers to know */
 30 #define HAVE_ALIGNABLE_SKB      /* Ditto 8)                */
 31 #define SLAB_SKB                /* Slabified skbuffs       */
 32 
 33 #define CHECKSUM_NONE 0
 34 #define CHECKSUM_HW 1
 35 #define CHECKSUM_UNNECESSARY 2
 36 
 37 #define SKB_DATA_ALIGN(X)       (((X) + (SMP_CACHE_BYTES-1)) & ~(SMP_CACHE_BYTES-1))
 38 #define SKB_MAX_ORDER(X,ORDER)  (((PAGE_SIZE<<(ORDER)) - (X) - sizeof(struct skb_shared_info))&~(SMP_CACHE_BYTES-1))
 39 #define SKB_MAX_HEAD(X)         (SKB_MAX_ORDER((X),0))
 40 #define SKB_MAX_ALLOC           (SKB_MAX_ORDER(0,2))
 41 
 42 /* A. Checksumming of received packets by device.
 43  *
 44  *      NONE: device failed to checksum this packet.
 45  *              skb->csum is undefined.
 46  *
 47  *      UNNECESSARY: device parsed packet and wouldbe verified checksum.
 48  *              skb->csum is undefined.
 49  *            It is bad option, but, unfortunately, many of vendors do this.
 50  *            Apparently with secret goal to sell you new device, when you
 51  *            will add new protocol to your host. F.e. IPv6. 8)
 52  *
 53  *      HW: the most generic way. Device supplied checksum of _all_
 54  *          the packet as seen by netif_rx in skb->csum.
 55  *          NOTE: Even if device supports only some protocols, but
 56  *          is able to produce some skb->csum, it MUST use HW,
 57  *          not UNNECESSARY.
 58  *
 59  * B. Checksumming on output.
 60  *
 61  *      NONE: skb is checksummed by protocol or csum is not required.
 62  *
 63  *      HW: device is required to csum packet as seen by hard_start_xmit
 64  *      from skb->h.raw to the end and to record the checksum
 65  *      at skb->h.raw+skb->csum.
 66  *
 67  *      Device must show its capabilities in dev->features, set
 68  *      at device setup time.
 69  *      NETIF_F_HW_CSUM - it is clever device, it is able to checksum
 70  *                        everything.
 71  *      NETIF_F_NO_CSUM - loopback or reliable single hop media.
 72  *      NETIF_F_IP_CSUM - device is dumb. It is able to csum only
 73  *                        TCP/UDP over IPv4. Sigh. Vendors like this
 74  *                        way by an unknown reason. Though, see comment above
 75  *                        about CHECKSUM_UNNECESSARY. 8)
 76  *
 77  *      Any questions? No questions, good.              --ANK
 78  */
 79 
 80 #ifdef __i386__
 81 #define NET_CALLER(arg) (*(((void**)&arg)-1))
 82 #else
 83 #define NET_CALLER(arg) __builtin_return_address(0)
 84 #endif
 85 
 86 #ifdef CONFIG_NETFILTER
 87 struct nf_conntrack {
 88         atomic_t use;
 89         void (*destroy)(struct nf_conntrack *);
 90 };
 91 
 92 struct nf_ct_info {
 93         struct nf_conntrack *master;
 94 };
 95 #endif
 96 
 97 struct sk_buff_head {
 98         /* These two members must be first. */
 99         struct sk_buff  * next;
100         struct sk_buff  * prev;
101 
102         __u32           qlen;
103         spinlock_t      lock;
104 };
105 
106 struct sk_buff;
107 
108 #define MAX_SKB_FRAGS 6
109 
110 typedef struct skb_frag_struct skb_frag_t;
111 
112 struct skb_frag_struct
113 {
114         struct page *page;
115         __u16 page_offset;
116         __u16 size;
117 };
118 
119 /* This data is invariant across clones and lives at
120  * the end of the header data, ie. at skb->end.
121  */
122 struct skb_shared_info {
123         atomic_t        dataref;
124         unsigned int    nr_frags;
125         struct sk_buff  *frag_list;
126         skb_frag_t      frags[MAX_SKB_FRAGS];
127 };
128 
129 struct sk_buff {
130         /* These two members must be first. */
131         struct sk_buff  * next;                 /* Next buffer in list                          */
132         struct sk_buff  * prev;                 /* Previous buffer in list                      */
133 
134         struct sk_buff_head * list;             /* List we are on                               */
135         struct sock     *sk;                    /* Socket we are owned by                       */
136         struct timeval  stamp;                  /* Time we arrived                              */
137         struct net_device       *dev;           /* Device we arrived on/are leaving by          */
138         struct net_device       *real_dev;      /* For support of point to point protocols 
139                                                    (e.g. 802.3ad) over bonding, we must save the
140                                                    physical device that got the packet before
141                                                    replacing skb->dev with the virtual device.  */
142 
143         /* Transport layer header */
144         union
145         {
146                 struct tcphdr   *th;
147                 struct udphdr   *uh;
148                 struct icmphdr  *icmph;
149                 struct igmphdr  *igmph;
150                 struct iphdr    *ipiph;
151                 struct spxhdr   *spxh;
152                 unsigned char   *raw;
153         } h;
154 
155         /* Network layer header */
156         union
157         {
158                 struct iphdr    *iph;
159                 struct ipv6hdr  *ipv6h;
160                 struct arphdr   *arph;
161                 struct ipxhdr   *ipxh;
162                 unsigned char   *raw;
163         } nh;
164   
165         /* Link layer header */
166         union 
167         {       
168                 struct ethhdr   *ethernet;
169                 unsigned char   *raw;
170         } mac;
171 
172         struct  dst_entry *dst;
173 
174         /* 
175          * This is the control buffer. It is free to use for every
176          * layer. Please put your private variables there. If you
177          * want to keep them across layers you have to do a skb_clone()
178          * first. This is owned by whoever has the skb queued ATM.
179          */ 
180         char            cb[48];  
181 
182         unsigned int    len;                    /* Length of actual data                        */
183         unsigned int    data_len;
184         unsigned int    csum;                   /* Checksum                                     */
185         unsigned char   __unused,               /* Dead field, may be reused                    */
186                         cloned,                 /* head may be cloned (check refcnt to be sure). */
187                         pkt_type,               /* Packet class                                 */
188                         ip_summed;              /* Driver fed us an IP checksum                 */
189         __u32           priority;               /* Packet queueing priority                     */
190         atomic_t        users;                  /* User count - see datagram.c,tcp.c            */
191         unsigned short  protocol;               /* Packet protocol from driver.                 */
192         unsigned short  security;               /* Security level of packet                     */
193         unsigned int    truesize;               /* Buffer size                                  */
194 
195         unsigned char   *head;                  /* Head of buffer                               */
196         unsigned char   *data;                  /* Data head pointer                            */
197         unsigned char   *tail;                  /* Tail pointer                                 */
198         unsigned char   *end;                   /* End pointer                                  */
199 
200         void            (*destructor)(struct sk_buff *);        /* Destruct function            */
201 #ifdef CONFIG_NETFILTER
202         /* Can be used for communication between hooks. */
203         unsigned long   nfmark;
204         /* Cache info */
205         __u32           nfcache;
206         /* Associated connection, if any */
207         struct nf_ct_info *nfct;
208 #ifdef CONFIG_NETFILTER_DEBUG
209         unsigned int nf_debug;
210 #endif
211 #endif /*CONFIG_NETFILTER*/
212 
213 #if defined(CONFIG_HIPPI)
214         union{
215                 __u32   ifield;
216         } private;
217 #endif
218 
219 #ifdef CONFIG_NET_SCHED
220        __u32           tc_index;               /* traffic control index */
221 #endif
222 };
223 
224 #ifdef __KERNEL__
225 /*
226  *      Handling routines are only of interest to the kernel
227  */
228 #include <linux/slab.h>
229 
230 #include <asm/system.h>
231 
232 extern void                     __kfree_skb(struct sk_buff *skb);
233 extern struct sk_buff *         alloc_skb(unsigned int size, int priority);
234 extern void                     kfree_skbmem(struct sk_buff *skb);
235 extern struct sk_buff *         skb_clone(struct sk_buff *skb, int priority);
236 extern struct sk_buff *         skb_copy(const struct sk_buff *skb, int priority);
237 extern struct sk_buff *         pskb_copy(struct sk_buff *skb, int gfp_mask);
238 extern int                      pskb_expand_head(struct sk_buff *skb, int nhead, int ntail, int gfp_mask);
239 extern struct sk_buff *         skb_realloc_headroom(struct sk_buff *skb, unsigned int headroom);
240 extern struct sk_buff *         skb_copy_expand(const struct sk_buff *skb, 
241                                                 int newheadroom,
242                                                 int newtailroom,
243                                                 int priority);
244 extern struct sk_buff *         skb_pad(struct sk_buff *skb, int pad);
245 #define dev_kfree_skb(a)        kfree_skb(a)
246 extern void     skb_over_panic(struct sk_buff *skb, int len, void *here);
247 extern void     skb_under_panic(struct sk_buff *skb, int len, void *here);
248 
249 /* Internal */
250 #define skb_shinfo(SKB)         ((struct skb_shared_info *)((SKB)->end))
251 
252 /**
253  *      skb_queue_empty - check if a queue is empty
254  *      @list: queue head
255  *
256  *      Returns true if the queue is empty, false otherwise.
257  */
258  
259 static inline int skb_queue_empty(struct sk_buff_head *list)
260 {
261         return (list->next == (struct sk_buff *) list);
262 }
263 
264 /**
265  *      skb_get - reference buffer
266  *      @skb: buffer to reference
267  *
268  *      Makes another reference to a socket buffer and returns a pointer
269  *      to the buffer.
270  */
271  
272 static inline struct sk_buff *skb_get(struct sk_buff *skb)
273 {
274         atomic_inc(&skb->users);
275         return skb;
276 }
277 
278 /*
279  * If users==1, we are the only owner and are can avoid redundant
280  * atomic change.
281  */
282  
283 /**
284  *      kfree_skb - free an sk_buff
285  *      @skb: buffer to free
286  *
287  *      Drop a reference to the buffer and free it if the usage count has
288  *      hit zero.
289  */
290  
291 static inline void kfree_skb(struct sk_buff *skb)
292 {
293         if (likely(atomic_read(&skb->users) == 1))
294                 smp_rmb();
295         else if (likely(!atomic_dec_and_test(&skb->users)))
296                 return;
297         __kfree_skb(skb);
298 }
299 
300 /**
301  *      skb_cloned - is the buffer a clone
302  *      @skb: buffer to check
303  *
304  *      Returns true if the buffer was generated with skb_clone() and is
305  *      one of multiple shared copies of the buffer. Cloned buffers are
306  *      shared data so must not be written to under normal circumstances.
307  */
308 
309 static inline int skb_cloned(struct sk_buff *skb)
310 {
311         return skb->cloned && atomic_read(&skb_shinfo(skb)->dataref) != 1;
312 }
313 
314 /**
315  *      skb_shared - is the buffer shared
316  *      @skb: buffer to check
317  *
318  *      Returns true if more than one person has a reference to this
319  *      buffer.
320  */
321  
322 static inline int skb_shared(struct sk_buff *skb)
323 {
324         return (atomic_read(&skb->users) != 1);
325 }
326 
327 /** 
328  *      skb_share_check - check if buffer is shared and if so clone it
329  *      @skb: buffer to check
330  *      @pri: priority for memory allocation
331  *      
332  *      If the buffer is shared the buffer is cloned and the old copy
333  *      drops a reference. A new clone with a single reference is returned.
334  *      If the buffer is not shared the original buffer is returned. When
335  *      being called from interrupt status or with spinlocks held pri must
336  *      be GFP_ATOMIC.
337  *
338  *      NULL is returned on a memory allocation failure.
339  */
340  
341 static inline struct sk_buff *skb_share_check(struct sk_buff *skb, int pri)
342 {
343         if (skb_shared(skb)) {
344                 struct sk_buff *nskb;
345                 nskb = skb_clone(skb, pri);
346                 kfree_skb(skb);
347                 return nskb;
348         }
349         return skb;
350 }
351 
352 
353 /*
354  *      Copy shared buffers into a new sk_buff. We effectively do COW on
355  *      packets to handle cases where we have a local reader and forward
356  *      and a couple of other messy ones. The normal one is tcpdumping
357  *      a packet thats being forwarded.
358  */
359  
360 /**
361  *      skb_unshare - make a copy of a shared buffer
362  *      @skb: buffer to check
363  *      @pri: priority for memory allocation
364  *
365  *      If the socket buffer is a clone then this function creates a new
366  *      copy of the data, drops a reference count on the old copy and returns
367  *      the new copy with the reference count at 1. If the buffer is not a clone
368  *      the original buffer is returned. When called with a spinlock held or
369  *      from interrupt state @pri must be %GFP_ATOMIC
370  *
371  *      %NULL is returned on a memory allocation failure.
372  */
373  
374 static inline struct sk_buff *skb_unshare(struct sk_buff *skb, int pri)
375 {
376         struct sk_buff *nskb;
377         if(!skb_cloned(skb))
378                 return skb;
379         nskb=skb_copy(skb, pri);
380         kfree_skb(skb);         /* Free our shared copy */
381         return nskb;
382 }
383 
384 /**
385  *      skb_peek
386  *      @list_: list to peek at
387  *
388  *      Peek an &sk_buff. Unlike most other operations you _MUST_
389  *      be careful with this one. A peek leaves the buffer on the
390  *      list and someone else may run off with it. You must hold
391  *      the appropriate locks or have a private queue to do this.
392  *
393  *      Returns %NULL for an empty list or a pointer to the head element.
394  *      The reference count is not incremented and the reference is therefore
395  *      volatile. Use with caution.
396  */
397  
398 static inline struct sk_buff *skb_peek(struct sk_buff_head *list_)
399 {
400         struct sk_buff *list = ((struct sk_buff *)list_)->next;
401         if (list == (struct sk_buff *)list_)
402                 list = NULL;
403         return list;
404 }
405 
406 /**
407  *      skb_peek_tail
408  *      @list_: list to peek at
409  *
410  *      Peek an &sk_buff. Unlike most other operations you _MUST_
411  *      be careful with this one. A peek leaves the buffer on the
412  *      list and someone else may run off with it. You must hold
413  *      the appropriate locks or have a private queue to do this.
414  *
415  *      Returns %NULL for an empty list or a pointer to the tail element.
416  *      The reference count is not incremented and the reference is therefore
417  *      volatile. Use with caution.
418  */
419 
420 static inline struct sk_buff *skb_peek_tail(struct sk_buff_head *list_)
421 {
422         struct sk_buff *list = ((struct sk_buff *)list_)->prev;
423         if (list == (struct sk_buff *)list_)
424                 list = NULL;
425         return list;
426 }
427 
428 /**
429  *      skb_queue_len   - get queue length
430  *      @list_: list to measure
431  *
432  *      Return the length of an &sk_buff queue. 
433  */
434  
435 static inline __u32 skb_queue_len(struct sk_buff_head *list_)
436 {
437         return(list_->qlen);
438 }
439 
440 static inline void skb_queue_head_init(struct sk_buff_head *list)
441 {
442         spin_lock_init(&list->lock);
443         list->prev = (struct sk_buff *)list;
444         list->next = (struct sk_buff *)list;
445         list->qlen = 0;
446 }
447 
448 /*
449  *      Insert an sk_buff at the start of a list.
450  *
451  *      The "__skb_xxxx()" functions are the non-atomic ones that
452  *      can only be called with interrupts disabled.
453  */
454 
455 /**
456  *      __skb_queue_head - queue a buffer at the list head
457  *      @list: list to use
458  *      @newsk: buffer to queue
459  *
460  *      Queue a buffer at the start of a list. This function takes no locks
461  *      and you must therefore hold required locks before calling it.
462  *
463  *      A buffer cannot be placed on two lists at the same time.
464  */     
465  
466 static inline void __skb_queue_head(struct sk_buff_head *list, struct sk_buff *newsk)
467 {
468         struct sk_buff *prev, *next;
469 
470         newsk->list = list;
471         list->qlen++;
472         prev = (struct sk_buff *)list;
473         next = prev->next;
474         newsk->next = next;
475         newsk->prev = prev;
476         next->prev = newsk;
477         prev->next = newsk;
478 }
479 
480 
481 /**
482  *      skb_queue_head - queue a buffer at the list head
483  *      @list: list to use
484  *      @newsk: buffer to queue
485  *
486  *      Queue a buffer at the start of the list. This function takes the
487  *      list lock and can be used safely with other locking &sk_buff functions
488  *      safely.
489  *
490  *      A buffer cannot be placed on two lists at the same time.
491  */     
492 
493 static inline void skb_queue_head(struct sk_buff_head *list, struct sk_buff *newsk)
494 {
495         unsigned long flags;
496 
497         spin_lock_irqsave(&list->lock, flags);
498         __skb_queue_head(list, newsk);
499         spin_unlock_irqrestore(&list->lock, flags);
500 }
501 
502 /**
503  *      __skb_queue_tail - queue a buffer at the list tail
504  *      @list: list to use
505  *      @newsk: buffer to queue
506  *
507  *      Queue a buffer at the end of a list. This function takes no locks
508  *      and you must therefore hold required locks before calling it.
509  *
510  *      A buffer cannot be placed on two lists at the same time.
511  */     
512  
513 
514 static inline void __skb_queue_tail(struct sk_buff_head *list, struct sk_buff *newsk)
515 {
516         struct sk_buff *prev, *next;
517 
518         newsk->list = list;
519         list->qlen++;
520         next = (struct sk_buff *)list;
521         prev = next->prev;
522         newsk->next = next;
523         newsk->prev = prev;
524         next->prev = newsk;
525         prev->next = newsk;
526 }
527 
528 /**
529  *      skb_queue_tail - queue a buffer at the list tail
530  *      @list: list to use
531  *      @newsk: buffer to queue
532  *
533  *      Queue a buffer at the tail of the list. This function takes the
534  *      list lock and can be used safely with other locking &sk_buff functions
535  *      safely.
536  *
537  *      A buffer cannot be placed on two lists at the same time.
538  */     
539 
540 static inline void skb_queue_tail(struct sk_buff_head *list, struct sk_buff *newsk)
541 {
542         unsigned long flags;
543 
544         spin_lock_irqsave(&list->lock, flags);
545         __skb_queue_tail(list, newsk);
546         spin_unlock_irqrestore(&list->lock, flags);
547 }
548 
549 /**
550  *      __skb_dequeue - remove from the head of the queue
551  *      @list: list to dequeue from
552  *
553  *      Remove the head of the list. This function does not take any locks
554  *      so must be used with appropriate locks held only. The head item is
555  *      returned or %NULL if the list is empty.
556  */
557 
558 static inline struct sk_buff *__skb_dequeue(struct sk_buff_head *list)
559 {
560         struct sk_buff *next, *prev, *result;
561 
562         prev = (struct sk_buff *) list;
563         next = prev->next;
564         result = NULL;
565         if (next != prev) {
566                 result = next;
567                 next = next->next;
568                 list->qlen--;
569                 next->prev = prev;
570                 prev->next = next;
571                 result->next = NULL;
572                 result->prev = NULL;
573                 result->list = NULL;
574         }
575         return result;
576 }
577 
578 /**
579  *      skb_dequeue - remove from the head of the queue
580  *      @list: list to dequeue from
581  *
582  *      Remove the head of the list. The list lock is taken so the function
583  *      may be used safely with other locking list functions. The head item is
584  *      returned or %NULL if the list is empty.
585  */
586 
587 static inline struct sk_buff *skb_dequeue(struct sk_buff_head *list)
588 {
589         unsigned long flags;
590         struct sk_buff *result;
591 
592         spin_lock_irqsave(&list->lock, flags);
593         result = __skb_dequeue(list);
594         spin_unlock_irqrestore(&list->lock, flags);
595         return result;
596 }
597 
598 /*
599  *      Insert a packet on a list.
600  */
601 
602 static inline void __skb_insert(struct sk_buff *newsk,
603         struct sk_buff * prev, struct sk_buff *next,
604         struct sk_buff_head * list)
605 {
606         newsk->next = next;
607         newsk->prev = prev;
608         next->prev = newsk;
609         prev->next = newsk;
610         newsk->list = list;
611         list->qlen++;
612 }
613 
614 /**
615  *      skb_insert      -       insert a buffer
616  *      @old: buffer to insert before
617  *      @newsk: buffer to insert
618  *
619  *      Place a packet before a given packet in a list. The list locks are taken
620  *      and this function is atomic with respect to other list locked calls
621  *      A buffer cannot be placed on two lists at the same time.
622  */
623 
624 static inline void skb_insert(struct sk_buff *old, struct sk_buff *newsk)
625 {
626         unsigned long flags;
627 
628         spin_lock_irqsave(&old->list->lock, flags);
629         __skb_insert(newsk, old->prev, old, old->list);
630         spin_unlock_irqrestore(&old->list->lock, flags);
631 }
632 
633 /*
634  *      Place a packet after a given packet in a list.
635  */
636 
637 static inline void __skb_append(struct sk_buff *old, struct sk_buff *newsk)
638 {
639         __skb_insert(newsk, old, old->next, old->list);
640 }
641 
642 /**
643  *      skb_append      -       append a buffer
644  *      @old: buffer to insert after
645  *      @newsk: buffer to insert
646  *
647  *      Place a packet after a given packet in a list. The list locks are taken
648  *      and this function is atomic with respect to other list locked calls.
649  *      A buffer cannot be placed on two lists at the same time.
650  */
651 
652 
653 static inline void skb_append(struct sk_buff *old, struct sk_buff *newsk)
654 {
655         unsigned long flags;
656 
657         spin_lock_irqsave(&old->list->lock, flags);
658         __skb_append(old, newsk);
659         spin_unlock_irqrestore(&old->list->lock, flags);
660 }
661 
662 /*
663  * remove sk_buff from list. _Must_ be called atomically, and with
664  * the list known..
665  */
666  
667 static inline void __skb_unlink(struct sk_buff *skb, struct sk_buff_head *list)
668 {
669         struct sk_buff * next, * prev;
670 
671         list->qlen--;
672         next = skb->next;
673         prev = skb->prev;
674         skb->next = NULL;
675         skb->prev = NULL;
676         skb->list = NULL;
677         next->prev = prev;
678         prev->next = next;
679 }
680 
681 /**
682  *      skb_unlink      -       remove a buffer from a list
683  *      @skb: buffer to remove
684  *
685  *      Place a packet after a given packet in a list. The list locks are taken
686  *      and this function is atomic with respect to other list locked calls
687  *      
688  *      Works even without knowing the list it is sitting on, which can be 
689  *      handy at times. It also means that THE LIST MUST EXIST when you 
690  *      unlink. Thus a list must have its contents unlinked before it is
691  *      destroyed.
692  */
693 
694 static inline void skb_unlink(struct sk_buff *skb)
695 {
696         struct sk_buff_head *list = skb->list;
697 
698         if(list) {
699                 unsigned long flags;
700 
701                 spin_lock_irqsave(&list->lock, flags);
702                 if(skb->list == list)
703                         __skb_unlink(skb, skb->list);
704                 spin_unlock_irqrestore(&list->lock, flags);
705         }
706 }
707 
708 /* XXX: more streamlined implementation */
709 
710 /**
711  *      __skb_dequeue_tail - remove from the tail of the queue
712  *      @list: list to dequeue from
713  *
714  *      Remove the tail of the list. This function does not take any locks
715  *      so must be used with appropriate locks held only. The tail item is
716  *      returned or %NULL if the list is empty.
717  */
718 
719 static inline struct sk_buff *__skb_dequeue_tail(struct sk_buff_head *list)
720 {
721         struct sk_buff *skb = skb_peek_tail(list); 
722         if (skb)
723                 __skb_unlink(skb, list);
724         return skb;
725 }
726 
727 /**
728  *      skb_dequeue - remove from the head of the queue
729  *      @list: list to dequeue from
730  *
731  *      Remove the head of the list. The list lock is taken so the function
732  *      may be used safely with other locking list functions. The tail item is
733  *      returned or %NULL if the list is empty.
734  */
735 
736 static inline struct sk_buff *skb_dequeue_tail(struct sk_buff_head *list)
737 {
738         unsigned long flags;
739         struct sk_buff *result;
740 
741         spin_lock_irqsave(&list->lock, flags);
742         result = __skb_dequeue_tail(list);
743         spin_unlock_irqrestore(&list->lock, flags);
744         return result;
745 }
746 
747 static inline int skb_is_nonlinear(const struct sk_buff *skb)
748 {
749         return skb->data_len;
750 }
751 
752 static inline unsigned int skb_headlen(const struct sk_buff *skb)
753 {
754         return skb->len - skb->data_len;
755 }
756 
757 #define SKB_PAGE_ASSERT(skb) do { if (skb_shinfo(skb)->nr_frags) out_of_line_bug(); } while (0)
758 #define SKB_FRAG_ASSERT(skb) do { if (skb_shinfo(skb)->frag_list) out_of_line_bug(); } while (0)
759 #define SKB_LINEAR_ASSERT(skb) do { if (skb_is_nonlinear(skb)) out_of_line_bug(); } while (0)
760 
761 /*
762  *      Add data to an sk_buff
763  */
764  
765 static inline unsigned char *__skb_put(struct sk_buff *skb, unsigned int len)
766 {
767         unsigned char *tmp=skb->tail;
768         SKB_LINEAR_ASSERT(skb);
769         skb->tail+=len;
770         skb->len+=len;
771         return tmp;
772 }
773 
774 /**
775  *      skb_put - add data to a buffer
776  *      @skb: buffer to use 
777  *      @len: amount of data to add
778  *
779  *      This function extends the used data area of the buffer. If this would
780  *      exceed the total buffer size the kernel will panic. A pointer to the
781  *      first byte of the extra data is returned.
782  */
783  
784 static inline unsigned char *skb_put(struct sk_buff *skb, unsigned int len)
785 {
786         unsigned char *tmp=skb->tail;
787         SKB_LINEAR_ASSERT(skb);
788         skb->tail+=len;
789         skb->len+=len;
790         if(skb->tail>skb->end) {
791                 skb_over_panic(skb, len, current_text_addr());
792         }
793         return tmp;
794 }
795 
796 static inline unsigned char *__skb_push(struct sk_buff *skb, unsigned int len)
797 {
798         skb->data-=len;
799         skb->len+=len;
800         return skb->data;
801 }
802 
803 /**
804  *      skb_push - add data to the start of a buffer
805  *      @skb: buffer to use 
806  *      @len: amount of data to add
807  *
808  *      This function extends the used data area of the buffer at the buffer
809  *      start. If this would exceed the total buffer headroom the kernel will
810  *      panic. A pointer to the first byte of the extra data is returned.
811  */
812 
813 static inline unsigned char *skb_push(struct sk_buff *skb, unsigned int len)
814 {
815         skb->data-=len;
816         skb->len+=len;
817         if(skb->data<skb->head) {
818                 skb_under_panic(skb, len, current_text_addr());
819         }
820         return skb->data;
821 }
822 
823 static inline char *__skb_pull(struct sk_buff *skb, unsigned int len)
824 {
825         skb->len-=len;
826         if (skb->len < skb->data_len)
827                 out_of_line_bug();
828         return  skb->data+=len;
829 }
830 
831 /**
832  *      skb_pull - remove data from the start of a buffer
833  *      @skb: buffer to use 
834  *      @len: amount of data to remove
835  *
836  *      This function removes data from the start of a buffer, returning
837  *      the memory to the headroom. A pointer to the next data in the buffer
838  *      is returned. Once the data has been pulled future pushes will overwrite
839  *      the old data.
840  */
841 
842 static inline unsigned char * skb_pull(struct sk_buff *skb, unsigned int len)
843 {       
844         if (len > skb->len)
845                 return NULL;
846         return __skb_pull(skb,len);
847 }
848 
849 extern unsigned char * __pskb_pull_tail(struct sk_buff *skb, int delta);
850 
851 static inline char *__pskb_pull(struct sk_buff *skb, unsigned int len)
852 {
853         if (len > skb_headlen(skb) &&
854             __pskb_pull_tail(skb, len-skb_headlen(skb)) == NULL)
855                 return NULL;
856         skb->len -= len;
857         return  skb->data += len;
858 }
859 
860 static inline unsigned char * pskb_pull(struct sk_buff *skb, unsigned int len)
861 {       
862         if (len > skb->len)
863                 return NULL;
864         return __pskb_pull(skb,len);
865 }
866 
867 static inline int pskb_may_pull(struct sk_buff *skb, unsigned int len)
868 {
869         if (len <= skb_headlen(skb))
870                 return 1;
871         if (len > skb->len)
872                 return 0;
873         return (__pskb_pull_tail(skb, len-skb_headlen(skb)) != NULL);
874 }
875 
876 /**
877  *      skb_headroom - bytes at buffer head
878  *      @skb: buffer to check
879  *
880  *      Return the number of bytes of free space at the head of an &sk_buff.
881  */
882  
883 static inline int skb_headroom(const struct sk_buff *skb)
884 {
885         return skb->data-skb->head;
886 }
887 
888 /**
889  *      skb_tailroom - bytes at buffer end
890  *      @skb: buffer to check
891  *
892  *      Return the number of bytes of free space at the tail of an sk_buff
893  */
894 
895 static inline int skb_tailroom(const struct sk_buff *skb)
896 {
897         return skb_is_nonlinear(skb) ? 0 : skb->end-skb->tail;
898 }
899 
900 /**
901  *      skb_reserve - adjust headroom
902  *      @skb: buffer to alter
903  *      @len: bytes to move
904  *
905  *      Increase the headroom of an empty &sk_buff by reducing the tail
906  *      room. This is only allowed for an empty buffer.
907  */
908 
909 static inline void skb_reserve(struct sk_buff *skb, unsigned int len)
910 {
911         skb->data+=len;
912         skb->tail+=len;
913 }
914 
915 extern int ___pskb_trim(struct sk_buff *skb, unsigned int len, int realloc);
916 
917 static inline void __skb_trim(struct sk_buff *skb, unsigned int len)
918 {
919         if (!skb->data_len) {
920                 skb->len = len;
921                 skb->tail = skb->data+len;
922         } else {
923                 ___pskb_trim(skb, len, 0);
924         }
925 }
926 
927 /**
928  *      skb_trim - remove end from a buffer
929  *      @skb: buffer to alter
930  *      @len: new length
931  *
932  *      Cut the length of a buffer down by removing data from the tail. If
933  *      the buffer is already under the length specified it is not modified.
934  */
935 
936 static inline void skb_trim(struct sk_buff *skb, unsigned int len)
937 {
938         if (skb->len > len) {
939                 __skb_trim(skb, len);
940         }
941 }
942 
943 
944 static inline int __pskb_trim(struct sk_buff *skb, unsigned int len)
945 {
946         if (!skb->data_len) {
947                 skb->len = len;
948                 skb->tail = skb->data+len;
949                 return 0;
950         } else {
951                 return ___pskb_trim(skb, len, 1);
952         }
953 }
954 
955 static inline int pskb_trim(struct sk_buff *skb, unsigned int len)
956 {
957         if (len < skb->len)
958                 return __pskb_trim(skb, len);
959         return 0;
960 }
961 
962 /**
963  *      skb_orphan - orphan a buffer
964  *      @skb: buffer to orphan
965  *
966  *      If a buffer currently has an owner then we call the owner's
967  *      destructor function and make the @skb unowned. The buffer continues
968  *      to exist but is no longer charged to its former owner.
969  */
970 
971 
972 static inline void skb_orphan(struct sk_buff *skb)
973 {
974         if (skb->destructor)
975                 skb->destructor(skb);
976         skb->destructor = NULL;
977         skb->sk = NULL;
978 }
979 
980 /**
981  *      skb_purge - empty a list
982  *      @list: list to empty
983  *
984  *      Delete all buffers on an &sk_buff list. Each buffer is removed from
985  *      the list and one reference dropped. This function takes the list
986  *      lock and is atomic with respect to other list locking functions.
987  */
988 
989 
990 static inline void skb_queue_purge(struct sk_buff_head *list)
991 {
992         struct sk_buff *skb;
993         while ((skb=skb_dequeue(list))!=NULL)
994                 kfree_skb(skb);
995 }
996 
997 /**
998  *      __skb_purge - empty a list
999  *      @list: list to empty
1000  *
1001  *      Delete all buffers on an &sk_buff list. Each buffer is removed from
1002  *      the list and one reference dropped. This function does not take the
1003  *      list lock and the caller must hold the relevant locks to use it.
1004  */
1005 
1006 
1007 static inline void __skb_queue_purge(struct sk_buff_head *list)
1008 {
1009         struct sk_buff *skb;
1010         while ((skb=__skb_dequeue(list))!=NULL)
1011                 kfree_skb(skb);
1012 }
1013 
1014 /**
1015  *      __dev_alloc_skb - allocate an skbuff for sending
1016  *      @length: length to allocate
1017  *      @gfp_mask: get_free_pages mask, passed to alloc_skb
1018  *
1019  *      Allocate a new &sk_buff and assign it a usage count of one. The
1020  *      buffer has unspecified headroom built in. Users should allocate
1021  *      the headroom they think they need without accounting for the
1022  *      built in space. The built in space is used for optimisations.
1023  *
1024  *      %NULL is returned in there is no free memory.
1025  */
1026  
1027 static inline struct sk_buff *__dev_alloc_skb(unsigned int length,
1028                                               int gfp_mask)
1029 {
1030         struct sk_buff *skb;
1031 
1032         skb = alloc_skb(length+16, gfp_mask);
1033         if (skb)
1034                 skb_reserve(skb,16);
1035         return skb;
1036 }
1037 
1038 /**
1039  *      dev_alloc_skb - allocate an skbuff for sending
1040  *      @length: length to allocate
1041  *
1042  *      Allocate a new &sk_buff and assign it a usage count of one. The
1043  *      buffer has unspecified headroom built in. Users should allocate
1044  *      the headroom they think they need without accounting for the
1045  *      built in space. The built in space is used for optimisations.
1046  *
1047  *      %NULL is returned in there is no free memory. Although this function
1048  *      allocates memory it can be called from an interrupt.
1049  */
1050  
1051 static inline struct sk_buff *dev_alloc_skb(unsigned int length)
1052 {
1053         return __dev_alloc_skb(length, GFP_ATOMIC);
1054 }
1055 
1056 /**
1057  *      skb_cow - copy header of skb when it is required
1058  *      @skb: buffer to cow
1059  *      @headroom: needed headroom
1060  *
1061  *      If the skb passed lacks sufficient headroom or its data part
1062  *      is shared, data is reallocated. If reallocation fails, an error
1063  *      is returned and original skb is not changed.
1064  *
1065  *      The result is skb with writable area skb->head...skb->tail
1066  *      and at least @headroom of space at head.
1067  */
1068 
1069 static inline int
1070 skb_cow(struct sk_buff *skb, unsigned int headroom)
1071 {
1072         int delta = (headroom > 16 ? headroom : 16) - skb_headroom(skb);
1073 
1074         if (delta < 0)
1075                 delta = 0;
1076 
1077         if (delta || skb_cloned(skb))
1078                 return pskb_expand_head(skb, (delta+15)&~15, 0, GFP_ATOMIC);
1079         return 0;
1080 }
1081 
1082 /**
1083  *      skb_padto       - pad an skbuff up to a minimal size
1084  *      @skb: buffer to pad
1085  *      @len: minimal length
1086  *
1087  *      Pads up a buffer to ensure the trailing bytes exist and are
1088  *      blanked. If the buffer already contains sufficient data it
1089  *      is untouched. Returns the buffer, which may be a replacement
1090  *      for the original, or NULL for out of memory - in which case
1091  *      the original buffer is still freed.
1092  */
1093  
1094 static inline struct sk_buff *skb_padto(struct sk_buff *skb, unsigned int len)
1095 {
1096         unsigned int size = skb->len;
1097         if(likely(size >= len))
1098                 return skb;
1099         return skb_pad(skb, len-size);
1100 }
1101 
1102 /**
1103  *      skb_linearize - convert paged skb to linear one
1104  *      @skb: buffer to linarize
1105  *      @gfp: allocation mode
1106  *
1107  *      If there is no free memory -ENOMEM is returned, otherwise zero
1108  *      is returned and the old skb data released.  */
1109 int skb_linearize(struct sk_buff *skb, int gfp);
1110 
1111 static inline void *kmap_skb_frag(const skb_frag_t *frag)
1112 {
1113 #ifdef CONFIG_HIGHMEM
1114         if (in_irq())
1115                 out_of_line_bug();
1116 
1117         local_bh_disable();
1118 #endif
1119         return kmap_atomic(frag->page, KM_SKB_DATA_SOFTIRQ);
1120 }
1121 
1122 static inline void kunmap_skb_frag(void *vaddr)
1123 {
1124         kunmap_atomic(vaddr, KM_SKB_DATA_SOFTIRQ);
1125 #ifdef CONFIG_HIGHMEM
1126         local_bh_enable();
1127 #endif
1128 }
1129 
1130 #define skb_queue_walk(queue, skb) \
1131                 for (skb = (queue)->next;                       \
1132                      (skb != (struct sk_buff *)(queue));        \
1133                      skb=skb->next)
1134 
1135 
1136 extern struct sk_buff *         skb_recv_datagram(struct sock *sk,unsigned flags,int noblock, int *err);
1137 extern unsigned int             datagram_poll(struct file *file, struct socket *sock, struct poll_table_struct *wait);
1138 extern int                      skb_copy_datagram(const struct sk_buff *from, int offset, char *to,int size);
1139 extern int                      skb_copy_datagram_iovec(const struct sk_buff *from, int offset, struct iovec *to,int size);
1140 extern int                      skb_copy_and_csum_datagram(const struct sk_buff *skb, int offset, u8 *to, int len, unsigned int *csump);
1141 extern int                      skb_copy_and_csum_datagram_iovec(const struct sk_buff *skb, int hlen, struct iovec *iov);
1142 extern void                     skb_free_datagram(struct sock * sk, struct sk_buff *skb);
1143 
1144 extern unsigned int             skb_checksum(const struct sk_buff *skb, int offset, int len, unsigned int csum);
1145 extern int                      skb_copy_bits(const struct sk_buff *skb, int offset, void *to, int len);
1146 extern unsigned int             skb_copy_and_csum_bits(const struct sk_buff *skb, int offset, u8 *to, int len, unsigned int csum);
1147 extern void                     skb_copy_and_csum_dev(const struct sk_buff *skb, u8 *to);
1148 
1149 extern void skb_init(void);
1150 extern void skb_add_mtu(int mtu);
1151 
1152 #ifdef CONFIG_NETFILTER
1153 static inline void
1154 nf_conntrack_put(struct nf_ct_info *nfct)
1155 {
1156         if (nfct && atomic_dec_and_test(&nfct->master->use))
1157                 nfct->master->destroy(nfct->master);
1158 }
1159 static inline void
1160 nf_conntrack_get(struct nf_ct_info *nfct)
1161 {
1162         if (nfct)
1163                 atomic_inc(&nfct->master->use);
1164 }
1165 static inline void
1166 nf_reset(struct sk_buff *skb)
1167 {
1168         nf_conntrack_put(skb->nfct);
1169         skb->nfct = NULL;
1170 #ifdef CONFIG_NETFILTER_DEBUG
1171         skb->nf_debug = 0;
1172 #endif
1173 }
1174 #else /* CONFIG_NETFILTER */
1175 static inline void nf_reset(struct sk_buff *skb) {}
1176 #endif /* CONFIG_NETFILTER */
1177 
1178 #endif  /* __KERNEL__ */
1179 #endif  /* _LINUX_SKBUFF_H */
1180 

This page was automatically generated by LXR 0.3.1 (source).  •  Linux is a registered trademark of Linus Torvalds  •  Contact us