Version:  2.0.40 2.2.26 2.4.37 3.13 3.14 3.15 3.16 3.17 3.18 3.19 4.0 4.1 4.2 4.3 4.4 4.5 4.6 4.7 4.8 4.9 4.10

Linux/include/linux/skbuff.h

  1 /*
  2  *      Definitions for the 'struct sk_buff' memory handlers.
  3  *
  4  *      Authors:
  5  *              Alan Cox, <gw4pts@gw4pts.ampr.org>
  6  *              Florian La Roche, <rzsfl@rz.uni-sb.de>
  7  *
  8  *      This program is free software; you can redistribute it and/or
  9  *      modify it under the terms of the GNU General Public License
 10  *      as published by the Free Software Foundation; either version
 11  *      2 of the License, or (at your option) any later version.
 12  */
 13  
 14 #ifndef _LINUX_SKBUFF_H
 15 #define _LINUX_SKBUFF_H
 16 
 17 #include <linux/config.h>
 18 #include <linux/time.h>
 19 
 20 #include <asm/atomic.h>
 21 #include <asm/types.h>
 22 #include <asm/spinlock.h>
 23 
 24 #define HAVE_ALLOC_SKB          /* For the drivers to know */
 25 #define HAVE_ALIGNABLE_SKB      /* Ditto 8)                */
 26 #define SLAB_SKB                /* Slabified skbuffs       */
 27 
 28 #define CHECKSUM_NONE 0
 29 #define CHECKSUM_HW 1
 30 #define CHECKSUM_UNNECESSARY 2
 31 
 32 struct sk_buff_head {
 33         struct sk_buff  * next;
 34         struct sk_buff  * prev;
 35         __u32           qlen;           /* Must be same length as a pointer
 36                                            for using debugging */
 37 };
 38 
 39 struct sk_buff {
 40         struct sk_buff  * next;                 /* Next buffer in list                          */
 41         struct sk_buff  * prev;                 /* Previous buffer in list                      */
 42         struct sk_buff_head * list;             /* List we are on                               */
 43         struct sock     *sk;                    /* Socket we are owned by                       */
 44         struct timeval  stamp;                  /* Time we arrived                              */
 45         struct device   *dev;                   /* Device we arrived on/are leaving by          */
 46 
 47         /* Transport layer header */
 48         union
 49         {
 50                 struct tcphdr   *th;
 51                 struct udphdr   *uh;
 52                 struct icmphdr  *icmph;
 53                 struct igmphdr  *igmph;
 54                 struct iphdr    *ipiph;
 55                 struct spxhdr   *spxh;
 56                 unsigned char   *raw;
 57         } h;
 58 
 59         /* Network layer header */
 60         union
 61         {
 62                 struct iphdr    *iph;
 63                 struct ipv6hdr  *ipv6h;
 64                 struct arphdr   *arph;
 65                 struct ipxhdr   *ipxh;
 66                 unsigned char   *raw;
 67         } nh;
 68   
 69         /* Link layer header */
 70         union 
 71         {       
 72                 struct ethhdr   *ethernet;
 73                 unsigned char   *raw;
 74         } mac;
 75 
 76         struct  dst_entry *dst;
 77 
 78         char            cb[48];  
 79 
 80         unsigned int    len;                    /* Length of actual data                        */
 81         unsigned int    csum;                   /* Checksum                                     */
 82         volatile char   used;                   /* Data moved to user and not MSG_PEEK          */
 83         unsigned char   is_clone,               /* We are a clone                               */
 84                         cloned,                 /* head may be cloned (check refcnt to be sure). */
 85                         pkt_type,               /* Packet class                                 */
 86                         pkt_bridged,            /* Tracker for bridging                         */
 87                         ip_summed;              /* Driver fed us an IP checksum                 */
 88         __u32           priority;               /* Packet queueing priority                     */
 89         atomic_t        users;                  /* User count - see datagram.c,tcp.c            */
 90         unsigned short  protocol;               /* Packet protocol from driver.                 */
 91         unsigned short  security;               /* Security level of packet                     */
 92         unsigned int    truesize;               /* Buffer size                                  */
 93 
 94         unsigned char   *head;                  /* Head of buffer                               */
 95         unsigned char   *data;                  /* Data head pointer                            */
 96         unsigned char   *tail;                  /* Tail pointer                                 */
 97         unsigned char   *end;                   /* End pointer                                  */
 98         void            (*destructor)(struct sk_buff *);        /* Destruct function            */
 99 #ifdef CONFIG_IP_FIREWALL
100         __u32           fwmark;                 /* Label made by fwchains, used by pktsched     */
101 #endif
102 #if defined(CONFIG_SHAPER) || defined(CONFIG_SHAPER_MODULE)
103         __u32           shapelatency;           /* Latency on frame */
104         __u32           shapeclock;             /* Time it should go out */
105         __u32           shapelen;               /* Frame length in clocks */
106         __u32           shapestamp;             /* Stamp for shaper    */
107         __u16           shapepend;              /* Pending */
108 #endif
109 
110 #if defined(CONFIG_HIPPI)
111         union{
112                 __u32   ifield;
113         } private;
114 #endif
115 };
116 
117 /* These are just the default values. This is run time configurable.
118  * FIXME: Probably the config option should go away. -- erics
119  */
120 #ifdef CONFIG_SKB_LARGE
121 #define SK_WMEM_MAX     65535
122 #define SK_RMEM_MAX     65535
123 #else
124 #define SK_WMEM_MAX     32767
125 #define SK_RMEM_MAX     32767
126 #endif
127 
128 #ifdef __KERNEL__
129 /*
130  *      Handling routines are only of interest to the kernel
131  */
132 #include <linux/malloc.h>
133 
134 #include <asm/system.h>
135 
136 extern void                     __kfree_skb(struct sk_buff *skb);
137 extern void                     skb_queue_head_init(struct sk_buff_head *list);
138 extern void                     skb_queue_head(struct sk_buff_head *list,struct sk_buff *buf);
139 extern void                     skb_queue_tail(struct sk_buff_head *list,struct sk_buff *buf);
140 extern struct sk_buff *         skb_dequeue(struct sk_buff_head *list);
141 extern void                     skb_insert(struct sk_buff *old,struct sk_buff *newsk);
142 extern void                     skb_append(struct sk_buff *old,struct sk_buff *newsk);
143 extern void                     skb_unlink(struct sk_buff *buf);
144 extern __u32                    skb_queue_len(struct sk_buff_head *list);
145 extern struct sk_buff *         skb_peek_copy(struct sk_buff_head *list);
146 extern struct sk_buff *         alloc_skb(unsigned int size, int priority);
147 extern struct sk_buff *         dev_alloc_skb(unsigned int size);
148 extern void                     kfree_skbmem(struct sk_buff *skb);
149 extern struct sk_buff *         skb_clone(struct sk_buff *skb, int priority);
150 extern struct sk_buff *         skb_copy(struct sk_buff *skb, int priority);
151 extern struct sk_buff *         skb_realloc_headroom(struct sk_buff *skb, int newheadroom);
152 extern struct sk_buff *         skb_pad(struct sk_buff *skb, int pad);
153 #define dev_kfree_skb(a)        kfree_skb(a)
154 extern unsigned char *          skb_put(struct sk_buff *skb, unsigned int len);
155 extern unsigned char *          skb_push(struct sk_buff *skb, unsigned int len);
156 extern unsigned char *          skb_pull(struct sk_buff *skb, unsigned int len);
157 extern int                      skb_headroom(struct sk_buff *skb);
158 extern int                      skb_tailroom(struct sk_buff *skb);
159 extern void                     skb_reserve(struct sk_buff *skb, unsigned int len);
160 extern void                     skb_trim(struct sk_buff *skb, unsigned int len);
161 extern void     skb_over_panic(struct sk_buff *skb, int len, void *here);
162 extern void     skb_under_panic(struct sk_buff *skb, int len, void *here);
163 
164 /* Internal */
165 extern __inline__ atomic_t *skb_datarefp(struct sk_buff *skb)
166 {
167         return (atomic_t *)(skb->end);
168 }
169 
170 extern __inline__ int skb_queue_empty(struct sk_buff_head *list)
171 {
172         return (list->next == (struct sk_buff *) list);
173 }
174 
175 extern __inline__ void kfree_skb(struct sk_buff *skb)
176 {
177         if (atomic_dec_and_test(&skb->users))
178                 __kfree_skb(skb);
179 }
180 
181 /* Use this if you didn't touch the skb state [for fast switching] */
182 extern __inline__ void kfree_skb_fast(struct sk_buff *skb)
183 {
184         if (atomic_dec_and_test(&skb->users))
185                 kfree_skbmem(skb);      
186 }
187 
188 extern __inline__ int skb_cloned(struct sk_buff *skb)
189 {
190         return skb->cloned && atomic_read(skb_datarefp(skb)) != 1;
191 }
192 
193 extern __inline__ int skb_shared(struct sk_buff *skb)
194 {
195         return (atomic_read(&skb->users) != 1);
196 }
197 
198 /*
199  *      Copy shared buffers into a new sk_buff. We effectively do COW on
200  *      packets to handle cases where we have a local reader and forward
201  *      and a couple of other messy ones. The normal one is tcpdumping
202  *      a packet thats being forwarded.
203  */
204  
205 extern __inline__ struct sk_buff *skb_unshare(struct sk_buff *skb, int pri)
206 {
207         struct sk_buff *nskb;
208         if(!skb_cloned(skb))
209                 return skb;
210         nskb=skb_copy(skb, pri);
211         kfree_skb(skb);         /* Free our shared copy */
212         return nskb;
213 }
214 
215 /*
216  *      Peek an sk_buff. Unlike most other operations you _MUST_
217  *      be careful with this one. A peek leaves the buffer on the
218  *      list and someone else may run off with it. For an interrupt
219  *      type system cli() peek the buffer copy the data and sti();
220  */
221  
222 extern __inline__ struct sk_buff *skb_peek(struct sk_buff_head *list_)
223 {
224         struct sk_buff *list = ((struct sk_buff *)list_)->next;
225         if (list == (struct sk_buff *)list_)
226                 list = NULL;
227         return list;
228 }
229 
230 extern __inline__ struct sk_buff *skb_peek_tail(struct sk_buff_head *list_)
231 {
232         struct sk_buff *list = ((struct sk_buff *)list_)->prev;
233         if (list == (struct sk_buff *)list_)
234                 list = NULL;
235         return list;
236 }
237 
238 /*
239  *      Return the length of an sk_buff queue
240  */
241  
242 extern __inline__ __u32 skb_queue_len(struct sk_buff_head *list_)
243 {
244         return(list_->qlen);
245 }
246 
247 extern __inline__ void skb_queue_head_init(struct sk_buff_head *list)
248 {
249         list->prev = (struct sk_buff *)list;
250         list->next = (struct sk_buff *)list;
251         list->qlen = 0;
252 }
253 
254 /*
255  *      Insert an sk_buff at the start of a list.
256  *
257  *      The "__skb_xxxx()" functions are the non-atomic ones that
258  *      can only be called with interrupts disabled.
259  */
260 
261 extern __inline__ void __skb_queue_head(struct sk_buff_head *list, struct sk_buff *newsk)
262 {
263         struct sk_buff *prev, *next;
264 
265         newsk->list = list;
266         list->qlen++;
267         prev = (struct sk_buff *)list;
268         next = prev->next;
269         newsk->next = next;
270         newsk->prev = prev;
271         next->prev = newsk;
272         prev->next = newsk;
273 }
274 
275 extern spinlock_t skb_queue_lock;
276 
277 extern __inline__ void skb_queue_head(struct sk_buff_head *list, struct sk_buff *newsk)
278 {
279         unsigned long flags;
280 
281         spin_lock_irqsave(&skb_queue_lock, flags);
282         __skb_queue_head(list, newsk);
283         spin_unlock_irqrestore(&skb_queue_lock, flags);
284 }
285 
286 /*
287  *      Insert an sk_buff at the end of a list.
288  */
289 
290 extern __inline__ void __skb_queue_tail(struct sk_buff_head *list, struct sk_buff *newsk)
291 {
292         struct sk_buff *prev, *next;
293 
294         newsk->list = list;
295         list->qlen++;
296         next = (struct sk_buff *)list;
297         prev = next->prev;
298         newsk->next = next;
299         newsk->prev = prev;
300         next->prev = newsk;
301         prev->next = newsk;
302 }
303 
304 extern __inline__ void skb_queue_tail(struct sk_buff_head *list, struct sk_buff *newsk)
305 {
306         unsigned long flags;
307 
308         spin_lock_irqsave(&skb_queue_lock, flags);
309         __skb_queue_tail(list, newsk);
310         spin_unlock_irqrestore(&skb_queue_lock, flags);
311 }
312 
313 /*
314  *      Remove an sk_buff from a list.
315  */
316 
317 extern __inline__ struct sk_buff *__skb_dequeue(struct sk_buff_head *list)
318 {
319         struct sk_buff *next, *prev, *result;
320 
321         prev = (struct sk_buff *) list;
322         next = prev->next;
323         result = NULL;
324         if (next != prev) {
325                 result = next;
326                 next = next->next;
327                 list->qlen--;
328                 next->prev = prev;
329                 prev->next = next;
330                 result->next = NULL;
331                 result->prev = NULL;
332                 result->list = NULL;
333         }
334         return result;
335 }
336 
337 extern __inline__ struct sk_buff *skb_dequeue(struct sk_buff_head *list)
338 {
339         long flags;
340         struct sk_buff *result;
341 
342         spin_lock_irqsave(&skb_queue_lock, flags);
343         result = __skb_dequeue(list);
344         spin_unlock_irqrestore(&skb_queue_lock, flags);
345         return result;
346 }
347 
348 /*
349  *      Insert a packet on a list.
350  */
351 
352 extern __inline__ void __skb_insert(struct sk_buff *newsk,
353         struct sk_buff * prev, struct sk_buff *next,
354         struct sk_buff_head * list)
355 {
356         newsk->next = next;
357         newsk->prev = prev;
358         next->prev = newsk;
359         prev->next = newsk;
360         newsk->list = list;
361         list->qlen++;
362 }
363 
364 /*
365  *      Place a packet before a given packet in a list
366  */
367 extern __inline__ void skb_insert(struct sk_buff *old, struct sk_buff *newsk)
368 {
369         unsigned long flags;
370 
371         spin_lock_irqsave(&skb_queue_lock, flags);
372         __skb_insert(newsk, old->prev, old, old->list);
373         spin_unlock_irqrestore(&skb_queue_lock, flags);
374 }
375 
376 /*
377  *      Place a packet after a given packet in a list.
378  */
379 
380 extern __inline__ void __skb_append(struct sk_buff *old, struct sk_buff *newsk)
381 {
382         __skb_insert(newsk, old, old->next, old->list);
383 }
384 
385 extern __inline__ void skb_append(struct sk_buff *old, struct sk_buff *newsk)
386 {
387         unsigned long flags;
388 
389         spin_lock_irqsave(&skb_queue_lock, flags);
390         __skb_append(old, newsk);
391         spin_unlock_irqrestore(&skb_queue_lock, flags);
392 }
393 
394 /*
395  * remove sk_buff from list. _Must_ be called atomically, and with
396  * the list known..
397  */
398 extern __inline__ void __skb_unlink(struct sk_buff *skb, struct sk_buff_head *list)
399 {
400         struct sk_buff * next, * prev;
401 
402         list->qlen--;
403         next = skb->next;
404         prev = skb->prev;
405         skb->next = NULL;
406         skb->prev = NULL;
407         skb->list = NULL;
408         next->prev = prev;
409         prev->next = next;
410 }
411 
412 /*
413  *      Remove an sk_buff from its list. Works even without knowing the list it
414  *      is sitting on, which can be handy at times. It also means that THE LIST
415  *      MUST EXIST when you unlink. Thus a list must have its contents unlinked
416  *      _FIRST_.
417  */
418 
419 extern __inline__ void skb_unlink(struct sk_buff *skb)
420 {
421         unsigned long flags;
422 
423         spin_lock_irqsave(&skb_queue_lock, flags);
424         if(skb->list)
425                 __skb_unlink(skb, skb->list);
426         spin_unlock_irqrestore(&skb_queue_lock, flags);
427 }
428 
429 /* XXX: more streamlined implementation */
430 extern __inline__ struct sk_buff *__skb_dequeue_tail(struct sk_buff_head *list)
431 {
432         struct sk_buff *skb = skb_peek_tail(list); 
433         if (skb)
434                 __skb_unlink(skb, list);
435         return skb;
436 }
437 
438 extern __inline__ struct sk_buff *skb_dequeue_tail(struct sk_buff_head *list)
439 {
440         long flags;
441         struct sk_buff *result;
442 
443         spin_lock_irqsave(&skb_queue_lock, flags);
444         result = __skb_dequeue_tail(list);
445         spin_unlock_irqrestore(&skb_queue_lock, flags);
446         return result;
447 }
448 
449 /*
450  *      Add data to an sk_buff
451  */
452  
453 extern __inline__ unsigned char *__skb_put(struct sk_buff *skb, unsigned int len)
454 {
455         unsigned char *tmp=skb->tail;
456         skb->tail+=len;
457         skb->len+=len;
458         return tmp;
459 }
460 
461 extern __inline__ unsigned char *skb_put(struct sk_buff *skb, unsigned int len)
462 {
463         unsigned char *tmp=skb->tail;
464         skb->tail+=len;
465         skb->len+=len;
466         if(skb->tail>skb->end)
467         {
468                 __label__ here; 
469                 skb_over_panic(skb, len, &&here); 
470 here:           ;
471         }
472         return tmp;
473 }
474 
475 extern __inline__ unsigned char *__skb_push(struct sk_buff *skb, unsigned int len)
476 {
477         skb->data-=len;
478         skb->len+=len;
479         return skb->data;
480 }
481 
482 extern __inline__ unsigned char *skb_push(struct sk_buff *skb, unsigned int len)
483 {
484         skb->data-=len;
485         skb->len+=len;
486         if(skb->data<skb->head)
487         {
488                 __label__ here;
489                 skb_under_panic(skb, len, &&here);
490 here:           ;
491         }
492         return skb->data;
493 }
494 
495 extern __inline__ char *__skb_pull(struct sk_buff *skb, unsigned int len)
496 {
497         skb->len-=len;
498         return  skb->data+=len;
499 }
500 
501 extern __inline__ unsigned char * skb_pull(struct sk_buff *skb, unsigned int len)
502 {       
503         if (len > skb->len)
504                 return NULL;
505         return __skb_pull(skb,len);
506 }
507 
508 extern __inline__ int skb_headroom(struct sk_buff *skb)
509 {
510         return skb->data-skb->head;
511 }
512 
513 extern __inline__ int skb_tailroom(struct sk_buff *skb)
514 {
515         return skb->end-skb->tail;
516 }
517 
518 extern __inline__ void skb_reserve(struct sk_buff *skb, unsigned int len)
519 {
520         skb->data+=len;
521         skb->tail+=len;
522 }
523 
524 extern __inline__ void __skb_trim(struct sk_buff *skb, unsigned int len)
525 {
526         skb->len = len;
527         skb->tail = skb->data+len;
528 }
529 
530 extern __inline__ void skb_trim(struct sk_buff *skb, unsigned int len)
531 {
532         if (skb->len > len) {
533                 __skb_trim(skb, len);
534         }
535 }
536 
537 extern __inline__ void skb_orphan(struct sk_buff *skb)
538 {
539         if (skb->destructor)
540                 skb->destructor(skb);
541         skb->destructor = NULL;
542         skb->sk = NULL;
543 }
544 
545 extern __inline__ void skb_queue_purge(struct sk_buff_head *list)
546 {
547         struct sk_buff *skb;
548         while ((skb=skb_dequeue(list))!=NULL)
549                 kfree_skb(skb);
550 }
551 
552 extern __inline__ struct sk_buff *dev_alloc_skb(unsigned int length)
553 {
554         struct sk_buff *skb;
555 
556         skb = alloc_skb(length+16, GFP_ATOMIC);
557         if (skb)
558                 skb_reserve(skb,16);
559         return skb;
560 }
561 
562 extern __inline__ struct sk_buff *
563 skb_cow(struct sk_buff *skb, unsigned int headroom)
564 {
565         headroom = (headroom+15)&~15;
566 
567         if ((unsigned)skb_headroom(skb) < headroom || skb_cloned(skb)) {
568                 struct sk_buff *skb2;
569                 
570                 if ((unsigned)skb_headroom(skb) < headroom)
571                         skb2 = skb_realloc_headroom(skb, headroom);
572                 else
573                         skb2 = skb_copy(skb, GFP_ATOMIC);
574                 kfree_skb(skb);
575                 skb = skb2;
576         }
577         return skb;
578 }
579 
580 /**
581  *      skb_padto       - pad an skbuff up to a minimal size
582  *      @skb: buffer to pad
583  *      @len: minimal length
584  *
585  *      Pads up a buffer to ensure the trailing bytes exist and are
586  *      blanked. If the buffer already contains sufficient data it
587  *      is untouched. Returns the buffer, which may be a replacement
588  *      for the original, or NULL for out of memory - in which case
589  *      the original buffer is still freed.
590  */
591  
592 static inline struct sk_buff *skb_padto(struct sk_buff *skb, unsigned int len)
593 {
594         unsigned int size = skb->len;
595         if(size >= len)
596                 return skb;
597         return skb_pad(skb, len-size);
598 }
599 
600 extern struct sk_buff *         skb_recv_datagram(struct sock *sk,unsigned flags,int noblock, int *err);
601 extern unsigned int             datagram_poll(struct file *file, struct socket *sock, struct poll_table_struct *wait);
602 extern int                      skb_copy_datagram(struct sk_buff *from, int offset, char *to,int size);
603 extern int                      skb_copy_datagram_iovec(struct sk_buff *from, int offset, struct iovec *to,int size);
604 extern void                     skb_free_datagram(struct sock * sk, struct sk_buff *skb);
605 
606 extern void skb_init(void);
607 extern void skb_add_mtu(int mtu);
608 
609 #endif  /* __KERNEL__ */
610 #endif  /* _LINUX_SKBUFF_H */
611 

This page was automatically generated by LXR 0.3.1 (source).  •  Linux is a registered trademark of Linus Torvalds  •  Contact us