LCOV - code coverage report
Current view: top level - include/linux - skbuff.h (source / functions) Hit Total Coverage
Test: coverage.info Lines: 5 5 100.0 %
Date: 2017-01-25 Functions: 0 0 -

          Line data    Source code
       1             : /*
       2             :  *      Definitions for the 'struct sk_buff' memory handlers.
       3             :  *
       4             :  *      Authors:
       5             :  *              Alan Cox, <gw4pts@gw4pts.ampr.org>
       6             :  *              Florian La Roche, <rzsfl@rz.uni-sb.de>
       7             :  *
       8             :  *      This program is free software; you can redistribute it and/or
       9             :  *      modify it under the terms of the GNU General Public License
      10             :  *      as published by the Free Software Foundation; either version
      11             :  *      2 of the License, or (at your option) any later version.
      12             :  */
      13             : 
      14             : #ifndef _LINUX_SKBUFF_H
      15             : #define _LINUX_SKBUFF_H
      16             : 
      17             : #include <linux/kernel.h>
      18             : #include <linux/kmemcheck.h>
      19             : #include <linux/compiler.h>
      20             : #include <linux/time.h>
      21             : #include <linux/cache.h>
      22             : 
      23             : #include <asm/atomic.h>
      24             : #include <asm/types.h>
      25             : #include <linux/spinlock.h>
      26             : #include <linux/net.h>
      27             : #include <linux/textsearch.h>
      28             : #include <net/checksum.h>
      29             : #include <linux/rcupdate.h>
      30             : #include <linux/dmaengine.h>
      31             : #include <linux/hrtimer.h>
      32             : 
      33             : /* Don't change this without changing skb_csum_unnecessary! */
      34             : #define CHECKSUM_NONE 0
      35             : #define CHECKSUM_UNNECESSARY 1
      36             : #define CHECKSUM_COMPLETE 2
      37             : #define CHECKSUM_PARTIAL 3
      38             : 
      39             : #define SKB_DATA_ALIGN(X)       (((X) + (SMP_CACHE_BYTES - 1)) & \
      40             :                                  ~(SMP_CACHE_BYTES - 1))
      41             : #define SKB_WITH_OVERHEAD(X)    \
      42             :         ((X) - SKB_DATA_ALIGN(sizeof(struct skb_shared_info)))
      43             : #define SKB_MAX_ORDER(X, ORDER) \
      44             :         SKB_WITH_OVERHEAD((PAGE_SIZE << (ORDER)) - (X))
      45             : #define SKB_MAX_HEAD(X)         (SKB_MAX_ORDER((X), 0))
      46             : #define SKB_MAX_ALLOC           (SKB_MAX_ORDER(0, 2))
      47             : 
      48             : /* A. Checksumming of received packets by device.
      49             :  *
      50             :  *      NONE: device failed to checksum this packet.
      51             :  *              skb->csum is undefined.
      52             :  *
      53             :  *      UNNECESSARY: device parsed packet and wouldbe verified checksum.
      54             :  *              skb->csum is undefined.
      55             :  *            It is bad option, but, unfortunately, many of vendors do this.
      56             :  *            Apparently with secret goal to sell you new device, when you
      57             :  *            will add new protocol to your host. F.e. IPv6. 8)
      58             :  *
      59             :  *      COMPLETE: the most generic way. Device supplied checksum of _all_
      60             :  *          the packet as seen by netif_rx in skb->csum.
      61             :  *          NOTE: Even if device supports only some protocols, but
      62             :  *          is able to produce some skb->csum, it MUST use COMPLETE,
      63             :  *          not UNNECESSARY.
      64             :  *
      65             :  *      PARTIAL: identical to the case for output below.  This may occur
      66             :  *          on a packet received directly from another Linux OS, e.g.,
      67             :  *          a virtualised Linux kernel on the same host.  The packet can
      68             :  *          be treated in the same way as UNNECESSARY except that on
      69             :  *          output (i.e., forwarding) the checksum must be filled in
      70             :  *          by the OS or the hardware.
      71             :  *
      72             :  * B. Checksumming on output.
      73             :  *
      74             :  *      NONE: skb is checksummed by protocol or csum is not required.
      75             :  *
      76             :  *      PARTIAL: device is required to csum packet as seen by hard_start_xmit
      77             :  *      from skb->csum_start to the end and to record the checksum
      78             :  *      at skb->csum_start + skb->csum_offset.
      79             :  *
      80             :  *      Device must show its capabilities in dev->features, set
      81             :  *      at device setup time.
      82             :  *      NETIF_F_HW_CSUM - it is clever device, it is able to checksum
      83             :  *                        everything.
      84             :  *      NETIF_F_NO_CSUM - loopback or reliable single hop media.
      85             :  *      NETIF_F_IP_CSUM - device is dumb. It is able to csum only
      86             :  *                        TCP/UDP over IPv4. Sigh. Vendors like this
      87             :  *                        way by an unknown reason. Though, see comment above
      88             :  *                        about CHECKSUM_UNNECESSARY. 8)
      89             :  *      NETIF_F_IPV6_CSUM about as dumb as the last one but does IPv6 instead.
      90             :  *
      91             :  *      Any questions? No questions, good.              --ANK
      92             :  */
      93             : 
      94             : struct net_device;
      95             : struct scatterlist;
      96             : struct pipe_inode_info;
      97             : 
      98             : #if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE)
      99             : struct nf_conntrack {
     100             :         atomic_t use;
     101             : };
     102             : #endif
     103             : 
     104             : #ifdef CONFIG_BRIDGE_NETFILTER
     105             : struct nf_bridge_info {
     106             :         atomic_t use;
     107             :         struct net_device *physindev;
     108             :         struct net_device *physoutdev;
     109             :         unsigned int mask;
     110             :         unsigned long data[32 / sizeof(unsigned long)];
     111             : };
     112             : #endif
     113             : 
     114             : struct sk_buff_head {
     115             :         /* These two members must be first. */
     116             :         struct sk_buff  *next;
     117             :         struct sk_buff  *prev;
     118             : 
     119             :         __u32           qlen;
     120             :         spinlock_t      lock;
     121             : };
     122             : 
     123             : struct sk_buff;
     124             : 
     125             : /* To allow 64K frame to be packed as single skb without frag_list */
     126             : #define MAX_SKB_FRAGS (65536/PAGE_SIZE + 2)
     127             : 
     128             : typedef struct skb_frag_struct skb_frag_t;
     129             : 
     130             : struct skb_frag_struct {
     131             :         struct page *page;
     132             :         __u32 page_offset;
     133             :         __u32 size;
     134             : };
     135             : 
     136             : #define HAVE_HW_TIME_STAMP
     137             : 
     138             : /**
     139             :  * struct skb_shared_hwtstamps - hardware time stamps
     140             :  * @hwtstamp:   hardware time stamp transformed into duration
     141             :  *              since arbitrary point in time
     142             :  * @syststamp:  hwtstamp transformed to system time base
     143             :  *
     144             :  * Software time stamps generated by ktime_get_real() are stored in
     145             :  * skb->tstamp. The relation between the different kinds of time
     146             :  * stamps is as follows:
     147             :  *
     148             :  * syststamp and tstamp can be compared against each other in
     149             :  * arbitrary combinations.  The accuracy of a
     150             :  * syststamp/tstamp/"syststamp from other device" comparison is
     151             :  * limited by the accuracy of the transformation into system time
     152             :  * base. This depends on the device driver and its underlying
     153             :  * hardware.
     154             :  *
     155             :  * hwtstamps can only be compared against other hwtstamps from
     156             :  * the same device.
     157             :  *
     158             :  * This structure is attached to packets as part of the
     159             :  * &skb_shared_info. Use skb_hwtstamps() to get a pointer.
     160             :  */
     161             : struct skb_shared_hwtstamps {
     162             :         ktime_t hwtstamp;
     163             :         ktime_t syststamp;
     164             : };
     165             : 
     166             : /**
     167             :  * struct skb_shared_tx - instructions for time stamping of outgoing packets
     168             :  * @hardware:           generate hardware time stamp
     169             :  * @software:           generate software time stamp
     170             :  * @in_progress:        device driver is going to provide
     171             :  *                      hardware time stamp
     172             :  * @flags:              all shared_tx flags
     173             :  *
     174             :  * These flags are attached to packets as part of the
     175             :  * &skb_shared_info. Use skb_tx() to get a pointer.
     176             :  */
     177             : union skb_shared_tx {
     178             :         struct {
     179             :                 __u8    hardware:1,
     180             :                         software:1,
     181             :                         in_progress:1;
     182             :         };
     183             :         __u8 flags;
     184             : };
     185             : 
     186             : /* This data is invariant across clones and lives at
     187             :  * the end of the header data, ie. at skb->end.
     188             :  */
     189             : struct skb_shared_info {
     190             :         atomic_t        dataref;
     191             :         unsigned short  nr_frags;
     192             :         unsigned short  gso_size;
     193             :         /* Warning: this field is not always filled in (UFO)! */
     194             :         unsigned short  gso_segs;
     195             :         unsigned short  gso_type;
     196             :         __be32          ip6_frag_id;
     197             :         union skb_shared_tx tx_flags;
     198             :         struct sk_buff  *frag_list;
     199             :         struct skb_shared_hwtstamps hwtstamps;
     200             :         skb_frag_t      frags[MAX_SKB_FRAGS];
     201             :         /* Intermediate layers must ensure that destructor_arg
     202             :          * remains valid until skb destructor */
     203             :         void *          destructor_arg;
     204             : };
     205             : 
     206             : /* We divide dataref into two halves.  The higher 16 bits hold references
     207             :  * to the payload part of skb->data.  The lower 16 bits hold references to
     208             :  * the entire skb->data.  A clone of a headerless skb holds the length of
     209             :  * the header in skb->hdr_len.
     210             :  *
     211             :  * All users must obey the rule that the skb->data reference count must be
     212             :  * greater than or equal to the payload reference count.
     213             :  *
     214             :  * Holding a reference to the payload part means that the user does not
     215             :  * care about modifications to the header part of skb->data.
     216             :  */
     217             : #define SKB_DATAREF_SHIFT 16
     218             : #define SKB_DATAREF_MASK ((1 << SKB_DATAREF_SHIFT) - 1)
     219             : 
     220             : 
     221             : enum {
     222             :         SKB_FCLONE_UNAVAILABLE,
     223             :         SKB_FCLONE_ORIG,
     224             :         SKB_FCLONE_CLONE,
     225             : };
     226             : 
     227             : enum {
     228             :         SKB_GSO_TCPV4 = 1 << 0,
     229             :         SKB_GSO_UDP = 1 << 1,
     230             : 
     231             :         /* This indicates the skb is from an untrusted source. */
     232             :         SKB_GSO_DODGY = 1 << 2,
     233             : 
     234             :         /* This indicates the tcp segment has CWR set. */
     235             :         SKB_GSO_TCP_ECN = 1 << 3,
     236             : 
     237             :         SKB_GSO_TCPV6 = 1 << 4,
     238             : 
     239             :         SKB_GSO_FCOE = 1 << 5,
     240             : };
     241             : 
     242             : #if BITS_PER_LONG > 32
     243             : #define NET_SKBUFF_DATA_USES_OFFSET 1
     244             : #endif
     245             : 
     246             : #ifdef NET_SKBUFF_DATA_USES_OFFSET
     247           1 : typedef unsigned int sk_buff_data_t;
     248           4 : #else
     249             : typedef unsigned char *sk_buff_data_t;
     250             : #endif
     251             : 
     252             : /** 
     253             :  *      struct sk_buff - socket buffer
     254             :  *      @next: Next buffer in list
     255             :  *      @prev: Previous buffer in list
     256             :  *      @sk: Socket we are owned by
     257             :  *      @tstamp: Time we arrived
     258             :  *      @dev: Device we arrived on/are leaving by
     259             :  *      @transport_header: Transport layer header
     260             :  *      @network_header: Network layer header
     261             :  *      @mac_header: Link layer header
     262             :  *      @_skb_dst: destination entry
     263             :  *      @sp: the security path, used for xfrm
     264             :  *      @cb: Control buffer. Free for use by every layer. Put private vars here
     265             :  *      @len: Length of actual data
     266             :  *      @data_len: Data length
     267             :  *      @mac_len: Length of link layer header
     268             :  *      @hdr_len: writable header length of cloned skb
     269             :  *      @csum: Checksum (must include start/offset pair)
     270             :  *      @csum_start: Offset from skb->head where checksumming should start
     271             :  *      @csum_offset: Offset from csum_start where checksum should be stored
     272             :  *      @local_df: allow local fragmentation
     273             :  *      @cloned: Head may be cloned (check refcnt to be sure)
     274             :  *      @nohdr: Payload reference only, must not modify header
     275             :  *      @pkt_type: Packet class
     276             :  *      @fclone: skbuff clone status
     277             :  *      @ip_summed: Driver fed us an IP checksum
     278             :  *      @priority: Packet queueing priority
     279             :  *      @users: User count - see {datagram,tcp}.c
     280             :  *      @protocol: Packet protocol from driver
     281             :  *      @truesize: Buffer size 
     282             :  *      @head: Head of buffer
     283             :  *      @data: Data head pointer
     284             :  *      @tail: Tail pointer
     285             :  *      @end: End pointer
     286             :  *      @destructor: Destruct function
     287             :  *      @mark: Generic packet mark
     288             :  *      @nfct: Associated connection, if any
     289             :  *      @ipvs_property: skbuff is owned by ipvs
     290             :  *      @peeked: this packet has been seen already, so stats have been
     291             :  *              done for it, don't do them again
     292             :  *      @nf_trace: netfilter packet trace flag
     293             :  *      @nfctinfo: Relationship of this skb to the connection
     294             :  *      @nfct_reasm: netfilter conntrack re-assembly pointer
     295             :  *      @nf_bridge: Saved data about a bridged frame - see br_netfilter.c
     296             :  *      @skb_iif: ifindex of device we arrived on
     297             :  *      @queue_mapping: Queue mapping for multiqueue devices
     298             :  *      @tc_index: Traffic control index
     299             :  *      @tc_verd: traffic control verdict
     300             :  *      @ndisc_nodetype: router type (from link layer)
     301             :  *      @dma_cookie: a cookie to one of several possible DMA operations
     302             :  *              done by skb DMA functions
     303             :  *      @secmark: security marking
     304             :  *      @vlan_tci: vlan tag control information
     305             :  */
     306             : 
     307             : struct sk_buff {
     308             :         /* These two members must be first. */
     309             :         struct sk_buff          *next;
     310             :         struct sk_buff          *prev;
     311             : 
     312             :         ktime_t                 tstamp;
     313             : 
     314             :         struct sock             *sk;
     315             :         struct net_device       *dev;
     316             : 
     317             :         /*
     318             :          * This is the control buffer. It is free to use for every
     319             :          * layer. Please put your private variables there. If you
     320             :          * want to keep them across layers you have to do a skb_clone()
     321             :          * first. This is owned by whoever has the skb queued ATM.
     322             :          */
     323             :         char                    cb[48] __aligned(8);
     324             : 
     325             :         unsigned long           _skb_dst;
     326             : #ifdef CONFIG_XFRM
     327             :         struct  sec_path        *sp;
     328             : #endif
     329             :         unsigned int            len,
     330             :                                 data_len;
     331             :         __u16                   mac_len,
     332             :                                 hdr_len;
     333             :         union {
     334             :                 __wsum          csum;
     335             :                 struct {
     336             :                         __u16   csum_start;
     337             :                         __u16   csum_offset;
     338             :                 };
     339             :         };
     340             :         __u32                   priority;
     341             :         kmemcheck_bitfield_begin(flags1);
     342             :         __u8                    local_df:1,
     343             :                                 cloned:1,
     344             :                                 ip_summed:2,
     345             :                                 nohdr:1,
     346             :                                 nfctinfo:3;
     347             :         __u8                    pkt_type:3,
     348             :                                 fclone:2,
     349             :                                 ipvs_property:1,
     350             :                                 peeked:1,
     351             :                                 nf_trace:1;
     352             :         __be16                  protocol:16;
     353             :         kmemcheck_bitfield_end(flags1);
     354             : 
     355             :         void                    (*destructor)(struct sk_buff *skb);
     356             : #if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE)
     357             :         struct nf_conntrack     *nfct;
     358             :         struct sk_buff          *nfct_reasm;
     359             : #endif
     360             : #ifdef CONFIG_BRIDGE_NETFILTER
     361             :         struct nf_bridge_info   *nf_bridge;
     362             : #endif
     363             : 
     364             :         int                     skb_iif;
     365             : #ifdef CONFIG_NET_SCHED
     366             :         __u16                   tc_index;       /* traffic control index */
     367             : #ifdef CONFIG_NET_CLS_ACT
     368             :         __u16                   tc_verd;        /* traffic control verdict */
     369             : #endif
     370             : #endif
     371             : 
     372             :         kmemcheck_bitfield_begin(flags2);
     373             :         __u16                   queue_mapping:16;
     374             : #ifdef CONFIG_IPV6_NDISC_NODETYPE
     375             :         __u8                    ndisc_nodetype:2;
     376             : #endif
     377             :         kmemcheck_bitfield_end(flags2);
     378             : 
     379             :         /* 0/14 bit hole */
     380             : 
     381             : #ifdef CONFIG_NET_DMA
     382             :         dma_cookie_t            dma_cookie;
     383             : #endif
     384             : #ifdef CONFIG_NETWORK_SECMARK
     385             :         __u32                   secmark;
     386             : #endif
     387             :         union {
     388             :                 __u32           mark;
     389             :                 __u32           dropcount;
     390             :         };
     391             : 
     392             :         __u16                   vlan_tci;
     393             : 
     394             :         sk_buff_data_t          transport_header;
     395             :         sk_buff_data_t          network_header;
     396             :         sk_buff_data_t          mac_header;
     397             :         /* These elements must be at the end, see alloc_skb() for details.  */
     398             :         sk_buff_data_t          tail;
     399             :         sk_buff_data_t          end;
     400             :         unsigned char           *head,
     401             :                                 *data;
     402             :         unsigned int            truesize;
     403             :         atomic_t                users;
     404             : };
     405             : 
     406             : #ifdef __KERNEL__
     407             : /*
     408             :  *      Handling routines are only of interest to the kernel
     409             :  */
     410             : #include <linux/slab.h>
     411             : 
     412             : #include <asm/system.h>
     413             : 
     414           1 : static inline struct dst_entry *skb_dst(const struct sk_buff *skb)
     415             : {
     416             :         return (struct dst_entry *)skb->_skb_dst;
     417             : }
     418             : 
     419             : static inline void skb_dst_set(struct sk_buff *skb, struct dst_entry *dst)
     420             : {
     421             :         skb->_skb_dst = (unsigned long)dst;
     422             : }
     423             : 
     424           1 : static inline struct rtable *skb_rtable(const struct sk_buff *skb)
     425             : {
     426             :         return (struct rtable *)skb_dst(skb);
     427             : }
     428             : 
     429             : extern void kfree_skb(struct sk_buff *skb);
     430             : extern void consume_skb(struct sk_buff *skb);
     431             : extern void            __kfree_skb(struct sk_buff *skb);
     432             : extern struct sk_buff *__alloc_skb(unsigned int size,
     433             :                                    gfp_t priority, int fclone, int node);
     434             : static inline struct sk_buff *alloc_skb(unsigned int size,
     435             :                                         gfp_t priority)
     436             : {
     437             :         return __alloc_skb(size, priority, 0, -1);
     438             : }
     439             : 
     440             : static inline struct sk_buff *alloc_skb_fclone(unsigned int size,
     441             :                                                gfp_t priority)
     442             : {
     443             :         return __alloc_skb(size, priority, 1, -1);
     444             : }
     445             : 
     446             : extern int skb_recycle_check(struct sk_buff *skb, int skb_size);
     447             : 
     448             : extern struct sk_buff *skb_morph(struct sk_buff *dst, struct sk_buff *src);
     449             : extern struct sk_buff *skb_clone(struct sk_buff *skb,
     450             :                                  gfp_t priority);
     451             : extern struct sk_buff *skb_copy(const struct sk_buff *skb,
     452             :                                 gfp_t priority);
     453             : extern struct sk_buff *pskb_copy(struct sk_buff *skb,
     454             :                                  gfp_t gfp_mask);
     455             : extern int             pskb_expand_head(struct sk_buff *skb,
     456             :                                         int nhead, int ntail,
     457             :                                         gfp_t gfp_mask);
     458             : extern struct sk_buff *skb_realloc_headroom(struct sk_buff *skb,
     459             :                                             unsigned int headroom);
     460             : extern struct sk_buff *skb_copy_expand(const struct sk_buff *skb,
     461             :                                        int newheadroom, int newtailroom,
     462             :                                        gfp_t priority);
     463             : extern int             skb_to_sgvec(struct sk_buff *skb,
     464             :                                     struct scatterlist *sg, int offset,
     465             :                                     int len);
     466             : extern int             skb_cow_data(struct sk_buff *skb, int tailbits,
     467             :                                     struct sk_buff **trailer);
     468             : extern int             skb_pad(struct sk_buff *skb, int pad);
     469             : #define dev_kfree_skb(a)        consume_skb(a)
     470             : #define dev_consume_skb(a)      kfree_skb_clean(a)
     471             : extern void           skb_over_panic(struct sk_buff *skb, int len,
     472             :                                      void *here);
     473             : extern void           skb_under_panic(struct sk_buff *skb, int len,
     474             :                                       void *here);
     475             : 
     476             : extern int skb_append_datato_frags(struct sock *sk, struct sk_buff *skb,
     477             :                         int getfrag(void *from, char *to, int offset,
     478             :                         int len,int odd, struct sk_buff *skb),
     479             :                         void *from, int length);
     480             : 
     481             : struct skb_seq_state {
     482             :         __u32           lower_offset;
     483             :         __u32           upper_offset;
     484             :         __u32           frag_idx;
     485             :         __u32           stepped_offset;
     486             :         struct sk_buff  *root_skb;
     487             :         struct sk_buff  *cur_skb;
     488             :         __u8            *frag_data;
     489             : };
     490             : 
     491             : extern void           skb_prepare_seq_read(struct sk_buff *skb,
     492             :                                            unsigned int from, unsigned int to,
     493             :                                            struct skb_seq_state *st);
     494             : extern unsigned int   skb_seq_read(unsigned int consumed, const u8 **data,
     495             :                                    struct skb_seq_state *st);
     496             : extern void           skb_abort_seq_read(struct skb_seq_state *st);
     497             : 
     498             : extern unsigned int   skb_find_text(struct sk_buff *skb, unsigned int from,
     499             :                                     unsigned int to, struct ts_config *config,
     500             :                                     struct ts_state *state);
     501             : 
     502             : #ifdef NET_SKBUFF_DATA_USES_OFFSET
     503             : static inline unsigned char *skb_end_pointer(const struct sk_buff *skb)
     504             : {
     505             :         return skb->head + skb->end;
     506             : }
     507             : #else
     508             : static inline unsigned char *skb_end_pointer(const struct sk_buff *skb)
     509             : {
     510             :         return skb->end;
     511             : }
     512             : #endif
     513             : 
     514             : /* Internal */
     515             : #define skb_shinfo(SKB) ((struct skb_shared_info *)(skb_end_pointer(SKB)))
     516             : 
     517             : static inline struct skb_shared_hwtstamps *skb_hwtstamps(struct sk_buff *skb)
     518             : {
     519             :         return &skb_shinfo(skb)->hwtstamps;
     520             : }
     521             : 
     522             : static inline union skb_shared_tx *skb_tx(struct sk_buff *skb)
     523             : {
     524             :         return &skb_shinfo(skb)->tx_flags;
     525             : }
     526             : 
     527             : /**
     528             :  *      skb_queue_empty - check if a queue is empty
     529             :  *      @list: queue head
     530             :  *
     531             :  *      Returns true if the queue is empty, false otherwise.
     532             :  */
     533             : static inline int skb_queue_empty(const struct sk_buff_head *list)
     534             : {
     535             :         return list->next == (struct sk_buff *)list;
     536             : }
     537             : 
     538             : /**
     539             :  *      skb_queue_is_last - check if skb is the last entry in the queue
     540             :  *      @list: queue head
     541             :  *      @skb: buffer
     542             :  *
     543             :  *      Returns true if @skb is the last buffer on the list.
     544             :  */
     545             : static inline bool skb_queue_is_last(const struct sk_buff_head *list,
     546             :                                      const struct sk_buff *skb)
     547             : {
     548             :         return (skb->next == (struct sk_buff *) list);
     549             : }
     550             : 
     551             : /**
     552             :  *      skb_queue_is_first - check if skb is the first entry in the queue
     553             :  *      @list: queue head
     554             :  *      @skb: buffer
     555             :  *
     556             :  *      Returns true if @skb is the first buffer on the list.
     557             :  */
     558             : static inline bool skb_queue_is_first(const struct sk_buff_head *list,
     559             :                                       const struct sk_buff *skb)
     560             : {
     561             :         return (skb->prev == (struct sk_buff *) list);
     562             : }
     563             : 
     564             : /**
     565             :  *      skb_queue_next - return the next packet in the queue
     566             :  *      @list: queue head
     567             :  *      @skb: current buffer
     568             :  *
     569             :  *      Return the next packet in @list after @skb.  It is only valid to
     570             :  *      call this if skb_queue_is_last() evaluates to false.
     571             :  */
     572             : static inline struct sk_buff *skb_queue_next(const struct sk_buff_head *list,
     573             :                                              const struct sk_buff *skb)
     574             : {
     575             :         /* This BUG_ON may seem severe, but if we just return then we
     576             :          * are going to dereference garbage.
     577             :          */
     578             :         BUG_ON(skb_queue_is_last(list, skb));
     579             :         return skb->next;
     580             : }
     581             : 
     582             : /**
     583             :  *      skb_queue_prev - return the prev packet in the queue
     584             :  *      @list: queue head
     585             :  *      @skb: current buffer
     586             :  *
     587             :  *      Return the prev packet in @list before @skb.  It is only valid to
     588             :  *      call this if skb_queue_is_first() evaluates to false.
     589             :  */
     590             : static inline struct sk_buff *skb_queue_prev(const struct sk_buff_head *list,
     591             :                                              const struct sk_buff *skb)
     592             : {
     593             :         /* This BUG_ON may seem severe, but if we just return then we
     594             :          * are going to dereference garbage.
     595             :          */
     596             :         BUG_ON(skb_queue_is_first(list, skb));
     597             :         return skb->prev;
     598             : }
     599             : 
     600             : /**
     601             :  *      skb_get - reference buffer
     602             :  *      @skb: buffer to reference
     603             :  *
     604             :  *      Makes another reference to a socket buffer and returns a pointer
     605             :  *      to the buffer.
     606             :  */
     607             : static inline struct sk_buff *skb_get(struct sk_buff *skb)
     608             : {
     609             :         atomic_inc(&skb->users);
     610             :         return skb;
     611             : }
     612             : 
     613             : /*
     614             :  * If users == 1, we are the only owner and are can avoid redundant
     615             :  * atomic change.
     616             :  */
     617             : 
     618             : /**
     619             :  *      skb_cloned - is the buffer a clone
     620             :  *      @skb: buffer to check
     621             :  *
     622             :  *      Returns true if the buffer was generated with skb_clone() and is
     623             :  *      one of multiple shared copies of the buffer. Cloned buffers are
     624             :  *      shared data so must not be written to under normal circumstances.
     625             :  */
     626             : static inline int skb_cloned(const struct sk_buff *skb)
     627             : {
     628             :         return skb->cloned &&
     629             :                (atomic_read(&skb_shinfo(skb)->dataref) & SKB_DATAREF_MASK) != 1;
     630             : }
     631             : 
     632             : /**
     633             :  *      skb_header_cloned - is the header a clone
     634             :  *      @skb: buffer to check
     635             :  *
     636             :  *      Returns true if modifying the header part of the buffer requires
     637             :  *      the data to be copied.
     638             :  */
     639             : static inline int skb_header_cloned(const struct sk_buff *skb)
     640             : {
     641             :         int dataref;
     642             : 
     643             :         if (!skb->cloned)
     644             :                 return 0;
     645             : 
     646             :         dataref = atomic_read(&skb_shinfo(skb)->dataref);
     647             :         dataref = (dataref & SKB_DATAREF_MASK) - (dataref >> SKB_DATAREF_SHIFT);
     648             :         return dataref != 1;
     649             : }
     650             : 
     651             : /**
     652             :  *      skb_header_release - release reference to header
     653             :  *      @skb: buffer to operate on
     654             :  *
     655             :  *      Drop a reference to the header part of the buffer.  This is done
     656             :  *      by acquiring a payload reference.  You must not read from the header
     657             :  *      part of skb->data after this.
     658             :  */
     659             : static inline void skb_header_release(struct sk_buff *skb)
     660             : {
     661             :         BUG_ON(skb->nohdr);
     662             :         skb->nohdr = 1;
     663             :         atomic_add(1 << SKB_DATAREF_SHIFT, &skb_shinfo(skb)->dataref);
     664             : }
     665             : 
     666             : /**
     667             :  *      skb_shared - is the buffer shared
     668             :  *      @skb: buffer to check
     669             :  *
     670             :  *      Returns true if more than one person has a reference to this
     671             :  *      buffer.
     672             :  */
     673             : static inline int skb_shared(const struct sk_buff *skb)
     674             : {
     675             :         return atomic_read(&skb->users) != 1;
     676             : }
     677             : 
     678             : /**
     679             :  *      skb_share_check - check if buffer is shared and if so clone it
     680             :  *      @skb: buffer to check
     681             :  *      @pri: priority for memory allocation
     682             :  *
     683             :  *      If the buffer is shared the buffer is cloned and the old copy
     684             :  *      drops a reference. A new clone with a single reference is returned.
     685             :  *      If the buffer is not shared the original buffer is returned. When
     686             :  *      being called from interrupt status or with spinlocks held pri must
     687             :  *      be GFP_ATOMIC.
     688             :  *
     689             :  *      NULL is returned on a memory allocation failure.
     690             :  */
     691             : static inline struct sk_buff *skb_share_check(struct sk_buff *skb,
     692             :                                               gfp_t pri)
     693             : {
     694             :         might_sleep_if(pri & __GFP_WAIT);
     695             :         if (skb_shared(skb)) {
     696             :                 struct sk_buff *nskb = skb_clone(skb, pri);
     697             :                 kfree_skb(skb);
     698             :                 skb = nskb;
     699             :         }
     700             :         return skb;
     701             : }
     702             : 
     703             : /*
     704             :  *      Copy shared buffers into a new sk_buff. We effectively do COW on
     705             :  *      packets to handle cases where we have a local reader and forward
     706             :  *      and a couple of other messy ones. The normal one is tcpdumping
     707             :  *      a packet thats being forwarded.
     708             :  */
     709             : 
     710             : /**
     711             :  *      skb_unshare - make a copy of a shared buffer
     712             :  *      @skb: buffer to check
     713             :  *      @pri: priority for memory allocation
     714             :  *
     715             :  *      If the socket buffer is a clone then this function creates a new
     716             :  *      copy of the data, drops a reference count on the old copy and returns
     717             :  *      the new copy with the reference count at 1. If the buffer is not a clone
     718             :  *      the original buffer is returned. When called with a spinlock held or
     719             :  *      from interrupt state @pri must be %GFP_ATOMIC
     720             :  *
     721             :  *      %NULL is returned on a memory allocation failure.
     722             :  */
     723             : static inline struct sk_buff *skb_unshare(struct sk_buff *skb,
     724             :                                           gfp_t pri)
     725             : {
     726             :         might_sleep_if(pri & __GFP_WAIT);
     727             :         if (skb_cloned(skb)) {
     728             :                 struct sk_buff *nskb = skb_copy(skb, pri);
     729             :                 kfree_skb(skb); /* Free our shared copy */
     730             :                 skb = nskb;
     731             :         }
     732             :         return skb;
     733             : }
     734             : 
     735             : /**
     736             :  *      skb_peek
     737             :  *      @list_: list to peek at
     738             :  *
     739             :  *      Peek an &sk_buff. Unlike most other operations you _MUST_
     740             :  *      be careful with this one. A peek leaves the buffer on the
     741             :  *      list and someone else may run off with it. You must hold
     742             :  *      the appropriate locks or have a private queue to do this.
     743             :  *
     744             :  *      Returns %NULL for an empty list or a pointer to the head element.
     745             :  *      The reference count is not incremented and the reference is therefore
     746             :  *      volatile. Use with caution.
     747             :  */
     748             : static inline struct sk_buff *skb_peek(struct sk_buff_head *list_)
     749             : {
     750             :         struct sk_buff *list = ((struct sk_buff *)list_)->next;
     751             :         if (list == (struct sk_buff *)list_)
     752             :                 list = NULL;
     753             :         return list;
     754             : }
     755             : 
     756             : /**
     757             :  *      skb_peek_tail
     758             :  *      @list_: list to peek at
     759             :  *
     760             :  *      Peek an &sk_buff. Unlike most other operations you _MUST_
     761             :  *      be careful with this one. A peek leaves the buffer on the
     762             :  *      list and someone else may run off with it. You must hold
     763             :  *      the appropriate locks or have a private queue to do this.
     764             :  *
     765             :  *      Returns %NULL for an empty list or a pointer to the tail element.
     766             :  *      The reference count is not incremented and the reference is therefore
     767             :  *      volatile. Use with caution.
     768             :  */
     769             : static inline struct sk_buff *skb_peek_tail(struct sk_buff_head *list_)
     770             : {
     771             :         struct sk_buff *list = ((struct sk_buff *)list_)->prev;
     772             :         if (list == (struct sk_buff *)list_)
     773             :                 list = NULL;
     774             :         return list;
     775             : }
     776             : 
     777             : /**
     778             :  *      skb_queue_len   - get queue length
     779             :  *      @list_: list to measure
     780             :  *
     781             :  *      Return the length of an &sk_buff queue.
     782             :  */
     783             : static inline __u32 skb_queue_len(const struct sk_buff_head *list_)
     784             : {
     785             :         return list_->qlen;
     786             : }
     787             : 
     788             : /**
     789             :  *      __skb_queue_head_init - initialize non-spinlock portions of sk_buff_head
     790             :  *      @list: queue to initialize
     791             :  *
     792             :  *      This initializes only the list and queue length aspects of
     793             :  *      an sk_buff_head object.  This allows to initialize the list
     794             :  *      aspects of an sk_buff_head without reinitializing things like
     795             :  *      the spinlock.  It can also be used for on-stack sk_buff_head
     796             :  *      objects where the spinlock is known to not be used.
     797             :  */
     798             : static inline void __skb_queue_head_init(struct sk_buff_head *list)
     799             : {
     800             :         list->prev = list->next = (struct sk_buff *)list;
     801             :         list->qlen = 0;
     802             : }
     803             : 
     804             : /*
     805             :  * This function creates a split out lock class for each invocation;
     806             :  * this is needed for now since a whole lot of users of the skb-queue
     807             :  * infrastructure in drivers have different locking usage (in hardirq)
     808             :  * than the networking core (in softirq only). In the long run either the
     809             :  * network layer or drivers should need annotation to consolidate the
     810             :  * main types of usage into 3 classes.
     811             :  */
     812             : static inline void skb_queue_head_init(struct sk_buff_head *list)
     813             : {
     814             :         spin_lock_init(&list->lock);
     815             :         __skb_queue_head_init(list);
     816             : }
     817             : 
     818             : static inline void skb_queue_head_init_class(struct sk_buff_head *list,
     819             :                 struct lock_class_key *class)
     820             : {
     821             :         skb_queue_head_init(list);
     822             :         lockdep_set_class(&list->lock, class);
     823             : }
     824             : 
     825             : /*
     826             :  *      Insert an sk_buff on a list.
     827             :  *
     828             :  *      The "__skb_xxxx()" functions are the non-atomic ones that
     829             :  *      can only be called with interrupts disabled.
     830             :  */
     831             : extern void        skb_insert(struct sk_buff *old, struct sk_buff *newsk, struct sk_buff_head *list);
     832             : static inline void __skb_insert(struct sk_buff *newsk,
     833             :                                 struct sk_buff *prev, struct sk_buff *next,
     834             :                                 struct sk_buff_head *list)
     835             : {
     836             :         newsk->next = next;
     837             :         newsk->prev = prev;
     838             :         next->prev  = prev->next = newsk;
     839             :         list->qlen++;
     840             : }
     841             : 
     842             : static inline void __skb_queue_splice(const struct sk_buff_head *list,
     843             :                                       struct sk_buff *prev,
     844             :                                       struct sk_buff *next)
     845             : {
     846             :         struct sk_buff *first = list->next;
     847             :         struct sk_buff *last = list->prev;
     848             : 
     849             :         first->prev = prev;
     850             :         prev->next = first;
     851             : 
     852             :         last->next = next;
     853             :         next->prev = last;
     854             : }
     855             : 
     856             : /**
     857             :  *      skb_queue_splice - join two skb lists, this is designed for stacks
     858             :  *      @list: the new list to add
     859             :  *      @head: the place to add it in the first list
     860             :  */
     861             : static inline void skb_queue_splice(const struct sk_buff_head *list,
     862             :                                     struct sk_buff_head *head)
     863             : {
     864             :         if (!skb_queue_empty(list)) {
     865             :                 __skb_queue_splice(list, (struct sk_buff *) head, head->next);
     866             :                 head->qlen += list->qlen;
     867             :         }
     868             : }
     869             : 
     870             : /**
     871             :  *      skb_queue_splice - join two skb lists and reinitialise the emptied list
     872             :  *      @list: the new list to add
     873             :  *      @head: the place to add it in the first list
     874             :  *
     875             :  *      The list at @list is reinitialised
     876             :  */
     877             : static inline void skb_queue_splice_init(struct sk_buff_head *list,
     878             :                                          struct sk_buff_head *head)
     879             : {
     880             :         if (!skb_queue_empty(list)) {
     881             :                 __skb_queue_splice(list, (struct sk_buff *) head, head->next);
     882             :                 head->qlen += list->qlen;
     883             :                 __skb_queue_head_init(list);
     884             :         }
     885             : }
     886             : 
     887             : /**
     888             :  *      skb_queue_splice_tail - join two skb lists, each list being a queue
     889             :  *      @list: the new list to add
     890             :  *      @head: the place to add it in the first list
     891             :  */
     892             : static inline void skb_queue_splice_tail(const struct sk_buff_head *list,
     893             :                                          struct sk_buff_head *head)
     894             : {
     895             :         if (!skb_queue_empty(list)) {
     896             :                 __skb_queue_splice(list, head->prev, (struct sk_buff *) head);
     897             :                 head->qlen += list->qlen;
     898             :         }
     899             : }
     900             : 
     901             : /**
     902             :  *      skb_queue_splice_tail - join two skb lists and reinitialise the emptied list
     903             :  *      @list: the new list to add
     904             :  *      @head: the place to add it in the first list
     905             :  *
     906             :  *      Each of the lists is a queue.
     907             :  *      The list at @list is reinitialised
     908             :  */
     909             : static inline void skb_queue_splice_tail_init(struct sk_buff_head *list,
     910             :                                               struct sk_buff_head *head)
     911             : {
     912             :         if (!skb_queue_empty(list)) {
     913             :                 __skb_queue_splice(list, head->prev, (struct sk_buff *) head);
     914             :                 head->qlen += list->qlen;
     915             :                 __skb_queue_head_init(list);
     916             :         }
     917             : }
     918             : 
     919             : /**
     920             :  *      __skb_queue_after - queue a buffer at the list head
     921             :  *      @list: list to use
     922             :  *      @prev: place after this buffer
     923             :  *      @newsk: buffer to queue
     924             :  *
     925             :  *      Queue a buffer int the middle of a list. This function takes no locks
     926             :  *      and you must therefore hold required locks before calling it.
     927             :  *
     928             :  *      A buffer cannot be placed on two lists at the same time.
     929             :  */
     930             : static inline void __skb_queue_after(struct sk_buff_head *list,
     931             :                                      struct sk_buff *prev,
     932             :                                      struct sk_buff *newsk)
     933             : {
     934             :         __skb_insert(newsk, prev, prev->next, list);
     935             : }
     936             : 
     937             : extern void skb_append(struct sk_buff *old, struct sk_buff *newsk,
     938             :                        struct sk_buff_head *list);
     939             : 
     940             : static inline void __skb_queue_before(struct sk_buff_head *list,
     941             :                                       struct sk_buff *next,
     942             :                                       struct sk_buff *newsk)
     943             : {
     944             :         __skb_insert(newsk, next->prev, next, list);
     945             : }
     946             : 
     947             : /**
     948             :  *      __skb_queue_head - queue a buffer at the list head
     949             :  *      @list: list to use
     950             :  *      @newsk: buffer to queue
     951             :  *
     952             :  *      Queue a buffer at the start of a list. This function takes no locks
     953             :  *      and you must therefore hold required locks before calling it.
     954             :  *
     955             :  *      A buffer cannot be placed on two lists at the same time.
     956             :  */
     957             : extern void skb_queue_head(struct sk_buff_head *list, struct sk_buff *newsk);
     958             : static inline void __skb_queue_head(struct sk_buff_head *list,
     959             :                                     struct sk_buff *newsk)
     960             : {
     961             :         __skb_queue_after(list, (struct sk_buff *)list, newsk);
     962             : }
     963             : 
     964             : /**
     965             :  *      __skb_queue_tail - queue a buffer at the list tail
     966             :  *      @list: list to use
     967             :  *      @newsk: buffer to queue
     968             :  *
     969             :  *      Queue a buffer at the end of a list. This function takes no locks
     970             :  *      and you must therefore hold required locks before calling it.
     971             :  *
     972             :  *      A buffer cannot be placed on two lists at the same time.
     973             :  */
     974             : extern void skb_queue_tail(struct sk_buff_head *list, struct sk_buff *newsk);
     975             : static inline void __skb_queue_tail(struct sk_buff_head *list,
     976             :                                    struct sk_buff *newsk)
     977             : {
     978             :         __skb_queue_before(list, (struct sk_buff *)list, newsk);
     979             : }
     980             : 
     981             : /*
     982             :  * remove sk_buff from list. _Must_ be called atomically, and with
     983             :  * the list known..
     984             :  */
     985             : extern void        skb_unlink(struct sk_buff *skb, struct sk_buff_head *list);
     986             : static inline void __skb_unlink(struct sk_buff *skb, struct sk_buff_head *list)
     987             : {
     988             :         struct sk_buff *next, *prev;
     989             : 
     990             :         list->qlen--;
     991             :         next       = skb->next;
     992             :         prev       = skb->prev;
     993             :         skb->next  = skb->prev = NULL;
     994             :         next->prev = prev;
     995             :         prev->next = next;
     996             : }
     997             : 
     998             : /**
     999             :  *      __skb_dequeue - remove from the head of the queue
    1000             :  *      @list: list to dequeue from
    1001             :  *
    1002             :  *      Remove the head of the list. This function does not take any locks
    1003             :  *      so must be used with appropriate locks held only. The head item is
    1004             :  *      returned or %NULL if the list is empty.
    1005             :  */
    1006             : extern struct sk_buff *skb_dequeue(struct sk_buff_head *list);
    1007             : static inline struct sk_buff *__skb_dequeue(struct sk_buff_head *list)
    1008             : {
    1009             :         struct sk_buff *skb = skb_peek(list);
    1010             :         if (skb)
    1011             :                 __skb_unlink(skb, list);
    1012             :         return skb;
    1013             : }
    1014             : 
    1015             : /**
    1016             :  *      __skb_dequeue_tail - remove from the tail of the queue
    1017             :  *      @list: list to dequeue from
    1018             :  *
    1019             :  *      Remove the tail of the list. This function does not take any locks
    1020             :  *      so must be used with appropriate locks held only. The tail item is
    1021             :  *      returned or %NULL if the list is empty.
    1022             :  */
    1023             : extern struct sk_buff *skb_dequeue_tail(struct sk_buff_head *list);
    1024             : static inline struct sk_buff *__skb_dequeue_tail(struct sk_buff_head *list)
    1025             : {
    1026             :         struct sk_buff *skb = skb_peek_tail(list);
    1027             :         if (skb)
    1028             :                 __skb_unlink(skb, list);
    1029             :         return skb;
    1030             : }
    1031             : 
    1032             : 
    1033             : static inline int skb_is_nonlinear(const struct sk_buff *skb)
    1034             : {
    1035             :         return skb->data_len;
    1036             : }
    1037             : 
    1038             : static inline unsigned int skb_headlen(const struct sk_buff *skb)
    1039             : {
    1040             :         return skb->len - skb->data_len;
    1041             : }
    1042             : 
    1043             : static inline int skb_pagelen(const struct sk_buff *skb)
    1044             : {
    1045             :         int i, len = 0;
    1046             : 
    1047             :         for (i = (int)skb_shinfo(skb)->nr_frags - 1; i >= 0; i--)
    1048             :                 len += skb_shinfo(skb)->frags[i].size;
    1049             :         return len + skb_headlen(skb);
    1050             : }
    1051             : 
    1052             : static inline void skb_fill_page_desc(struct sk_buff *skb, int i,
    1053             :                                       struct page *page, int off, int size)
    1054             : {
    1055             :         skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
    1056             : 
    1057             :         frag->page             = page;
    1058             :         frag->page_offset      = off;
    1059             :         frag->size             = size;
    1060             :         skb_shinfo(skb)->nr_frags = i + 1;
    1061             : }
    1062             : 
    1063             : extern void skb_add_rx_frag(struct sk_buff *skb, int i, struct page *page,
    1064             :                             int off, int size);
    1065             : 
    1066             : #define SKB_PAGE_ASSERT(skb)    BUG_ON(skb_shinfo(skb)->nr_frags)
    1067             : #define SKB_FRAG_ASSERT(skb)    BUG_ON(skb_has_frags(skb))
    1068             : #define SKB_LINEAR_ASSERT(skb)  BUG_ON(skb_is_nonlinear(skb))
    1069             : 
    1070             : #ifdef NET_SKBUFF_DATA_USES_OFFSET
    1071             : static inline unsigned char *skb_tail_pointer(const struct sk_buff *skb)
    1072             : {
    1073             :         return skb->head + skb->tail;
    1074             : }
    1075             : 
    1076             : static inline void skb_reset_tail_pointer(struct sk_buff *skb)
    1077             : {
    1078             :         skb->tail = skb->data - skb->head;
    1079             : }
    1080             : 
    1081             : static inline void skb_set_tail_pointer(struct sk_buff *skb, const int offset)
    1082             : {
    1083             :         skb_reset_tail_pointer(skb);
    1084             :         skb->tail += offset;
    1085             : }
    1086             : #else /* NET_SKBUFF_DATA_USES_OFFSET */
    1087             : static inline unsigned char *skb_tail_pointer(const struct sk_buff *skb)
    1088             : {
    1089             :         return skb->tail;
    1090             : }
    1091             : 
    1092             : static inline void skb_reset_tail_pointer(struct sk_buff *skb)
    1093             : {
    1094             :         skb->tail = skb->data;
    1095             : }
    1096             : 
    1097             : static inline void skb_set_tail_pointer(struct sk_buff *skb, const int offset)
    1098             : {
    1099             :         skb->tail = skb->data + offset;
    1100             : }
    1101             : 
    1102             : #endif /* NET_SKBUFF_DATA_USES_OFFSET */
    1103             : 
    1104             : /*
    1105             :  *      Add data to an sk_buff
    1106             :  */
    1107             : extern unsigned char *skb_put(struct sk_buff *skb, unsigned int len);
    1108             : static inline unsigned char *__skb_put(struct sk_buff *skb, unsigned int len)
    1109             : {
    1110             :         unsigned char *tmp = skb_tail_pointer(skb);
    1111             :         SKB_LINEAR_ASSERT(skb);
    1112             :         skb->tail += len;
    1113             :         skb->len  += len;
    1114             :         return tmp;
    1115             : }
    1116             : 
    1117             : extern unsigned char *skb_push(struct sk_buff *skb, unsigned int len);
    1118             : static inline unsigned char *__skb_push(struct sk_buff *skb, unsigned int len)
    1119             : {
    1120             :         skb->data -= len;
    1121             :         skb->len  += len;
    1122             :         return skb->data;
    1123             : }
    1124             : 
    1125             : extern unsigned char *skb_pull(struct sk_buff *skb, unsigned int len);
    1126             : static inline unsigned char *__skb_pull(struct sk_buff *skb, unsigned int len)
    1127             : {
    1128             :         skb->len -= len;
    1129             :         BUG_ON(skb->len < skb->data_len);
    1130             :         return skb->data += len;
    1131             : }
    1132             : 
    1133             : extern unsigned char *__pskb_pull_tail(struct sk_buff *skb, int delta);
    1134             : 
    1135             : static inline unsigned char *__pskb_pull(struct sk_buff *skb, unsigned int len)
    1136             : {
    1137             :         if (len > skb_headlen(skb) &&
    1138             :             !__pskb_pull_tail(skb, len - skb_headlen(skb)))
    1139             :                 return NULL;
    1140             :         skb->len -= len;
    1141             :         return skb->data += len;
    1142             : }
    1143             : 
    1144             : static inline unsigned char *pskb_pull(struct sk_buff *skb, unsigned int len)
    1145             : {
    1146             :         return unlikely(len > skb->len) ? NULL : __pskb_pull(skb, len);
    1147             : }
    1148             : 
    1149             : static inline int pskb_may_pull(struct sk_buff *skb, unsigned int len)
    1150             : {
    1151             :         if (likely(len <= skb_headlen(skb)))
    1152             :                 return 1;
    1153             :         if (unlikely(len > skb->len))
    1154             :                 return 0;
    1155             :         return __pskb_pull_tail(skb, len - skb_headlen(skb)) != NULL;
    1156             : }
    1157             : 
    1158             : /**
    1159             :  *      skb_headroom - bytes at buffer head
    1160             :  *      @skb: buffer to check
    1161             :  *
    1162             :  *      Return the number of bytes of free space at the head of an &sk_buff.
    1163             :  */
    1164             : static inline unsigned int skb_headroom(const struct sk_buff *skb)
    1165             : {
    1166             :         return skb->data - skb->head;
    1167             : }
    1168             : 
    1169             : /**
    1170             :  *      skb_tailroom - bytes at buffer end
    1171             :  *      @skb: buffer to check
    1172             :  *
    1173             :  *      Return the number of bytes of free space at the tail of an sk_buff
    1174             :  */
    1175             : static inline int skb_tailroom(const struct sk_buff *skb)
    1176             : {
    1177             :         return skb_is_nonlinear(skb) ? 0 : skb->end - skb->tail;
    1178             : }
    1179             : 
    1180             : /**
    1181             :  *      skb_reserve - adjust headroom
    1182             :  *      @skb: buffer to alter
    1183             :  *      @len: bytes to move
    1184             :  *
    1185             :  *      Increase the headroom of an empty &sk_buff by reducing the tail
    1186             :  *      room. This is only allowed for an empty buffer.
    1187             :  */
    1188             : static inline void skb_reserve(struct sk_buff *skb, int len)
    1189             : {
    1190             :         skb->data += len;
    1191             :         skb->tail += len;
    1192             : }
    1193             : 
    1194             : #ifdef NET_SKBUFF_DATA_USES_OFFSET
    1195             : static inline unsigned char *skb_transport_header(const struct sk_buff *skb)
    1196             : {
    1197             :         return skb->head + skb->transport_header;
    1198             : }
    1199             : 
    1200             : static inline void skb_reset_transport_header(struct sk_buff *skb)
    1201             : {
    1202             :         skb->transport_header = skb->data - skb->head;
    1203             : }
    1204             : 
    1205             : static inline void skb_set_transport_header(struct sk_buff *skb,
    1206             :                                             const int offset)
    1207             : {
    1208             :         skb_reset_transport_header(skb);
    1209             :         skb->transport_header += offset;
    1210             : }
    1211             : 
    1212             : static inline unsigned char *skb_network_header(const struct sk_buff *skb)
    1213             : {
    1214             :         return skb->head + skb->network_header;
    1215             : }
    1216             : 
    1217             : static inline void skb_reset_network_header(struct sk_buff *skb)
    1218             : {
    1219             :         skb->network_header = skb->data - skb->head;
    1220             : }
    1221             : 
    1222             : static inline void skb_set_network_header(struct sk_buff *skb, const int offset)
    1223             : {
    1224             :         skb_reset_network_header(skb);
    1225             :         skb->network_header += offset;
    1226             : }
    1227             : 
    1228             : static inline unsigned char *skb_mac_header(const struct sk_buff *skb)
    1229             : {
    1230             :         return skb->head + skb->mac_header;
    1231             : }
    1232             : 
    1233             : static inline int skb_mac_header_was_set(const struct sk_buff *skb)
    1234             : {
    1235             :         return skb->mac_header != ~0U;
    1236             : }
    1237             : 
    1238             : static inline void skb_reset_mac_header(struct sk_buff *skb)
    1239             : {
    1240             :         skb->mac_header = skb->data - skb->head;
    1241             : }
    1242             : 
    1243             : static inline void skb_set_mac_header(struct sk_buff *skb, const int offset)
    1244             : {
    1245             :         skb_reset_mac_header(skb);
    1246             :         skb->mac_header += offset;
    1247             : }
    1248             : 
    1249             : #else /* NET_SKBUFF_DATA_USES_OFFSET */
    1250             : 
    1251             : static inline unsigned char *skb_transport_header(const struct sk_buff *skb)
    1252             : {
    1253             :         return skb->transport_header;
    1254             : }
    1255             : 
    1256             : static inline void skb_reset_transport_header(struct sk_buff *skb)
    1257             : {
    1258             :         skb->transport_header = skb->data;
    1259             : }
    1260             : 
    1261             : static inline void skb_set_transport_header(struct sk_buff *skb,
    1262             :                                             const int offset)
    1263             : {
    1264             :         skb->transport_header = skb->data + offset;
    1265             : }
    1266             : 
    1267             : static inline unsigned char *skb_network_header(const struct sk_buff *skb)
    1268             : {
    1269             :         return skb->network_header;
    1270             : }
    1271             : 
    1272             : static inline void skb_reset_network_header(struct sk_buff *skb)
    1273             : {
    1274             :         skb->network_header = skb->data;
    1275             : }
    1276             : 
    1277             : static inline void skb_set_network_header(struct sk_buff *skb, const int offset)
    1278             : {
    1279             :         skb->network_header = skb->data + offset;
    1280             : }
    1281             : 
    1282             : static inline unsigned char *skb_mac_header(const struct sk_buff *skb)
    1283             : {
    1284             :         return skb->mac_header;
    1285             : }
    1286             : 
    1287             : static inline int skb_mac_header_was_set(const struct sk_buff *skb)
    1288             : {
    1289             :         return skb->mac_header != NULL;
    1290             : }
    1291             : 
    1292             : static inline void skb_reset_mac_header(struct sk_buff *skb)
    1293             : {
    1294             :         skb->mac_header = skb->data;
    1295             : }
    1296             : 
    1297             : static inline void skb_set_mac_header(struct sk_buff *skb, const int offset)
    1298             : {
    1299             :         skb->mac_header = skb->data + offset;
    1300             : }
    1301             : #endif /* NET_SKBUFF_DATA_USES_OFFSET */
    1302             : 
    1303             : static inline int skb_transport_offset(const struct sk_buff *skb)
    1304             : {
    1305             :         return skb_transport_header(skb) - skb->data;
    1306             : }
    1307             : 
    1308             : static inline u32 skb_network_header_len(const struct sk_buff *skb)
    1309             : {
    1310             :         return skb->transport_header - skb->network_header;
    1311             : }
    1312             : 
    1313             : static inline int skb_network_offset(const struct sk_buff *skb)
    1314             : {
    1315             :         return skb_network_header(skb) - skb->data;
    1316             : }
    1317             : 
    1318             : /*
    1319             :  * CPUs often take a performance hit when accessing unaligned memory
    1320             :  * locations. The actual performance hit varies, it can be small if the
    1321             :  * hardware handles it or large if we have to take an exception and fix it
    1322             :  * in software.
    1323             :  *
    1324             :  * Since an ethernet header is 14 bytes network drivers often end up with
    1325             :  * the IP header at an unaligned offset. The IP header can be aligned by
    1326             :  * shifting the start of the packet by 2 bytes. Drivers should do this
    1327             :  * with:
    1328             :  *
    1329             :  * skb_reserve(skb, NET_IP_ALIGN);
    1330             :  *
    1331             :  * The downside to this alignment of the IP header is that the DMA is now
    1332             :  * unaligned. On some architectures the cost of an unaligned DMA is high
    1333             :  * and this cost outweighs the gains made by aligning the IP header.
    1334             :  *
    1335             :  * Since this trade off varies between architectures, we allow NET_IP_ALIGN
    1336             :  * to be overridden.
    1337             :  */
    1338             : #ifndef NET_IP_ALIGN
    1339             : #define NET_IP_ALIGN    2
    1340             : #endif
    1341             : 
    1342             : /*
    1343             :  * The networking layer reserves some headroom in skb data (via
    1344             :  * dev_alloc_skb). This is used to avoid having to reallocate skb data when
    1345             :  * the header has to grow. In the default case, if the header has to grow
    1346             :  * 32 bytes or less we avoid the reallocation.
    1347             :  *
    1348             :  * Unfortunately this headroom changes the DMA alignment of the resulting
    1349             :  * network packet. As for NET_IP_ALIGN, this unaligned DMA is expensive
    1350             :  * on some architectures. An architecture can override this value,
    1351             :  * perhaps setting it to a cacheline in size (since that will maintain
    1352             :  * cacheline alignment of the DMA). It must be a power of 2.
    1353             :  *
    1354             :  * Various parts of the networking layer expect at least 32 bytes of
    1355             :  * headroom, you should not reduce this.
    1356             :  */
    1357             : #ifndef NET_SKB_PAD
    1358             : #define NET_SKB_PAD     32
    1359             : #endif
    1360             : 
    1361             : extern int ___pskb_trim(struct sk_buff *skb, unsigned int len);
    1362             : 
    1363             : static inline void __skb_trim(struct sk_buff *skb, unsigned int len)
    1364             : {
    1365             :         if (unlikely(skb->data_len)) {
    1366             :                 WARN_ON(1);
    1367             :                 return;
    1368             :         }
    1369             :         skb->len = len;
    1370             :         skb_set_tail_pointer(skb, len);
    1371             : }
    1372             : 
    1373             : extern void skb_trim(struct sk_buff *skb, unsigned int len);
    1374             : 
    1375             : static inline int __pskb_trim(struct sk_buff *skb, unsigned int len)
    1376             : {
    1377             :         if (skb->data_len)
    1378             :                 return ___pskb_trim(skb, len);
    1379             :         __skb_trim(skb, len);
    1380             :         return 0;
    1381             : }
    1382             : 
    1383             : static inline int pskb_trim(struct sk_buff *skb, unsigned int len)
    1384             : {
    1385             :         return (len < skb->len) ? __pskb_trim(skb, len) : 0;
    1386             : }
    1387             : 
    1388             : /**
    1389             :  *      pskb_trim_unique - remove end from a paged unique (not cloned) buffer
    1390             :  *      @skb: buffer to alter
    1391             :  *      @len: new length
    1392             :  *
    1393             :  *      This is identical to pskb_trim except that the caller knows that
    1394             :  *      the skb is not cloned so we should never get an error due to out-
    1395             :  *      of-memory.
    1396             :  */
    1397             : static inline void pskb_trim_unique(struct sk_buff *skb, unsigned int len)
    1398             : {
    1399             :         int err = pskb_trim(skb, len);
    1400             :         BUG_ON(err);
    1401             : }
    1402             : 
    1403             : /**
    1404             :  *      skb_orphan - orphan a buffer
    1405             :  *      @skb: buffer to orphan
    1406             :  *
    1407             :  *      If a buffer currently has an owner then we call the owner's
    1408             :  *      destructor function and make the @skb unowned. The buffer continues
    1409             :  *      to exist but is no longer charged to its former owner.
    1410             :  */
    1411             : static inline void skb_orphan(struct sk_buff *skb)
    1412             : {
    1413             :         if (skb->destructor)
    1414             :                 skb->destructor(skb);
    1415             :         skb->destructor = NULL;
    1416             :         skb->sk              = NULL;
    1417             : }
    1418             : 
    1419             : /**
    1420             :  *      __skb_queue_purge - empty a list
    1421             :  *      @list: list to empty
    1422             :  *
    1423             :  *      Delete all buffers on an &sk_buff list. Each buffer is removed from
    1424             :  *      the list and one reference dropped. This function does not take the
    1425             :  *      list lock and the caller must hold the relevant locks to use it.
    1426             :  */
    1427             : extern void skb_queue_purge(struct sk_buff_head *list);
    1428             : static inline void __skb_queue_purge(struct sk_buff_head *list)
    1429             : {
    1430             :         struct sk_buff *skb;
    1431             :         while ((skb = __skb_dequeue(list)) != NULL)
    1432             :                 kfree_skb(skb);
    1433             : }
    1434             : 
    1435             : /**
    1436             :  *      __dev_alloc_skb - allocate an skbuff for receiving
    1437             :  *      @length: length to allocate
    1438             :  *      @gfp_mask: get_free_pages mask, passed to alloc_skb
    1439             :  *
    1440             :  *      Allocate a new &sk_buff and assign it a usage count of one. The
    1441             :  *      buffer has unspecified headroom built in. Users should allocate
    1442             :  *      the headroom they think they need without accounting for the
    1443             :  *      built in space. The built in space is used for optimisations.
    1444             :  *
    1445             :  *      %NULL is returned if there is no free memory.
    1446             :  */
    1447             : static inline struct sk_buff *__dev_alloc_skb(unsigned int length,
    1448             :                                               gfp_t gfp_mask)
    1449             : {
    1450             :         struct sk_buff *skb = alloc_skb(length + NET_SKB_PAD, gfp_mask);
    1451             :         if (likely(skb))
    1452             :                 skb_reserve(skb, NET_SKB_PAD);
    1453             :         return skb;
    1454             : }
    1455             : 
    1456             : extern struct sk_buff *dev_alloc_skb(unsigned int length);
    1457             : 
    1458             : extern struct sk_buff *__netdev_alloc_skb(struct net_device *dev,
    1459             :                 unsigned int length, gfp_t gfp_mask);
    1460             : 
    1461             : /**
    1462             :  *      netdev_alloc_skb - allocate an skbuff for rx on a specific device
    1463             :  *      @dev: network device to receive on
    1464             :  *      @length: length to allocate
    1465             :  *
    1466             :  *      Allocate a new &sk_buff and assign it a usage count of one. The
    1467             :  *      buffer has unspecified headroom built in. Users should allocate
    1468             :  *      the headroom they think they need without accounting for the
    1469             :  *      built in space. The built in space is used for optimisations.
    1470             :  *
    1471             :  *      %NULL is returned if there is no free memory. Although this function
    1472             :  *      allocates memory it can be called from an interrupt.
    1473             :  */
    1474             : static inline struct sk_buff *netdev_alloc_skb(struct net_device *dev,
    1475             :                 unsigned int length)
    1476             : {
    1477             :         return __netdev_alloc_skb(dev, length, GFP_ATOMIC);
    1478             : }
    1479             : 
    1480             : static inline struct sk_buff *netdev_alloc_skb_ip_align(struct net_device *dev,
    1481             :                 unsigned int length)
    1482             : {
    1483             :         struct sk_buff *skb = netdev_alloc_skb(dev, length + NET_IP_ALIGN);
    1484             : 
    1485             :         if (NET_IP_ALIGN && skb)
    1486             :                 skb_reserve(skb, NET_IP_ALIGN);
    1487             :         return skb;
    1488             : }
    1489             : 
    1490             : extern struct page *__netdev_alloc_page(struct net_device *dev, gfp_t gfp_mask);
    1491             : 
    1492             : /**
    1493             :  *      netdev_alloc_page - allocate a page for ps-rx on a specific device
    1494             :  *      @dev: network device to receive on
    1495             :  *
    1496             :  *      Allocate a new page node local to the specified device.
    1497             :  *
    1498             :  *      %NULL is returned if there is no free memory.
    1499             :  */
    1500             : static inline struct page *netdev_alloc_page(struct net_device *dev)
    1501             : {
    1502             :         return __netdev_alloc_page(dev, GFP_ATOMIC);
    1503             : }
    1504             : 
    1505             : static inline void netdev_free_page(struct net_device *dev, struct page *page)
    1506             : {
    1507             :         __free_page(page);
    1508             : }
    1509             : 
    1510             : /**
    1511             :  *      skb_clone_writable - is the header of a clone writable
    1512             :  *      @skb: buffer to check
    1513             :  *      @len: length up to which to write
    1514             :  *
    1515             :  *      Returns true if modifying the header part of the cloned buffer
    1516             :  *      does not requires the data to be copied.
    1517             :  */
    1518             : static inline int skb_clone_writable(struct sk_buff *skb, unsigned int len)
    1519             : {
    1520             :         return !skb_header_cloned(skb) &&
    1521             :                skb_headroom(skb) + len <= skb->hdr_len;
    1522             : }
    1523             : 
    1524             : static inline int __skb_cow(struct sk_buff *skb, unsigned int headroom,
    1525             :                             int cloned)
    1526             : {
    1527             :         int delta = 0;
    1528             : 
    1529             :         if (headroom < NET_SKB_PAD)
    1530             :                 headroom = NET_SKB_PAD;
    1531             :         if (headroom > skb_headroom(skb))
    1532             :                 delta = headroom - skb_headroom(skb);
    1533             : 
    1534             :         if (delta || cloned)
    1535             :                 return pskb_expand_head(skb, ALIGN(delta, NET_SKB_PAD), 0,
    1536             :                                         GFP_ATOMIC);
    1537             :         return 0;
    1538             : }
    1539             : 
    1540             : /**
    1541             :  *      skb_cow - copy header of skb when it is required
    1542             :  *      @skb: buffer to cow
    1543             :  *      @headroom: needed headroom
    1544             :  *
    1545             :  *      If the skb passed lacks sufficient headroom or its data part
    1546             :  *      is shared, data is reallocated. If reallocation fails, an error
    1547             :  *      is returned and original skb is not changed.
    1548             :  *
    1549             :  *      The result is skb with writable area skb->head...skb->tail
    1550             :  *      and at least @headroom of space at head.
    1551             :  */
    1552             : static inline int skb_cow(struct sk_buff *skb, unsigned int headroom)
    1553             : {
    1554             :         return __skb_cow(skb, headroom, skb_cloned(skb));
    1555             : }
    1556             : 
    1557             : /**
    1558             :  *      skb_cow_head - skb_cow but only making the head writable
    1559             :  *      @skb: buffer to cow
    1560             :  *      @headroom: needed headroom
    1561             :  *
    1562             :  *      This function is identical to skb_cow except that we replace the
    1563             :  *      skb_cloned check by skb_header_cloned.  It should be used when
    1564             :  *      you only need to push on some header and do not need to modify
    1565             :  *      the data.
    1566             :  */
    1567             : static inline int skb_cow_head(struct sk_buff *skb, unsigned int headroom)
    1568             : {
    1569             :         return __skb_cow(skb, headroom, skb_header_cloned(skb));
    1570             : }
    1571             : 
    1572             : /**
    1573             :  *      skb_padto       - pad an skbuff up to a minimal size
    1574             :  *      @skb: buffer to pad
    1575             :  *      @len: minimal length
    1576             :  *
    1577             :  *      Pads up a buffer to ensure the trailing bytes exist and are
    1578             :  *      blanked. If the buffer already contains sufficient data it
    1579             :  *      is untouched. Otherwise it is extended. Returns zero on
    1580             :  *      success. The skb is freed on error.
    1581             :  */
    1582             :  
    1583             : static inline int skb_padto(struct sk_buff *skb, unsigned int len)
    1584             : {
    1585             :         unsigned int size = skb->len;
    1586             :         if (likely(size >= len))
    1587             :                 return 0;
    1588             :         return skb_pad(skb, len - size);
    1589             : }
    1590             : 
    1591             : static inline int skb_add_data(struct sk_buff *skb,
    1592             :                                char __user *from, int copy)
    1593             : {
    1594             :         const int off = skb->len;
    1595             : 
    1596             :         if (skb->ip_summed == CHECKSUM_NONE) {
    1597             :                 int err = 0;
    1598             :                 __wsum csum = csum_and_copy_from_user(from, skb_put(skb, copy),
    1599             :                                                             copy, 0, &err);
    1600             :                 if (!err) {
    1601             :                         skb->csum = csum_block_add(skb->csum, csum, off);
    1602             :                         return 0;
    1603             :                 }
    1604             :         } else if (!copy_from_user(skb_put(skb, copy), from, copy))
    1605             :                 return 0;
    1606             : 
    1607             :         __skb_trim(skb, off);
    1608             :         return -EFAULT;
    1609             : }
    1610             : 
    1611             : static inline int skb_can_coalesce(struct sk_buff *skb, int i,
    1612             :                                    struct page *page, int off)
    1613             : {
    1614             :         if (i) {
    1615             :                 struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[i - 1];
    1616             : 
    1617             :                 return page == frag->page &&
    1618             :                        off == frag->page_offset + frag->size;
    1619             :         }
    1620             :         return 0;
    1621             : }
    1622             : 
    1623             : static inline int __skb_linearize(struct sk_buff *skb)
    1624             : {
    1625             :         return __pskb_pull_tail(skb, skb->data_len) ? 0 : -ENOMEM;
    1626             : }
    1627             : 
    1628             : /**
    1629             :  *      skb_linearize - convert paged skb to linear one
    1630             :  *      @skb: buffer to linarize
    1631             :  *
    1632             :  *      If there is no free memory -ENOMEM is returned, otherwise zero
    1633             :  *      is returned and the old skb data released.
    1634             :  */
    1635             : static inline int skb_linearize(struct sk_buff *skb)
    1636             : {
    1637             :         return skb_is_nonlinear(skb) ? __skb_linearize(skb) : 0;
    1638             : }
    1639             : 
    1640             : /**
    1641             :  *      skb_linearize_cow - make sure skb is linear and writable
    1642             :  *      @skb: buffer to process
    1643             :  *
    1644             :  *      If there is no free memory -ENOMEM is returned, otherwise zero
    1645             :  *      is returned and the old skb data released.
    1646             :  */
    1647             : static inline int skb_linearize_cow(struct sk_buff *skb)
    1648             : {
    1649             :         return skb_is_nonlinear(skb) || skb_cloned(skb) ?
    1650             :                __skb_linearize(skb) : 0;
    1651             : }
    1652             : 
    1653             : /**
    1654             :  *      skb_postpull_rcsum - update checksum for received skb after pull
    1655             :  *      @skb: buffer to update
    1656             :  *      @start: start of data before pull
    1657             :  *      @len: length of data pulled
    1658             :  *
    1659             :  *      After doing a pull on a received packet, you need to call this to
    1660             :  *      update the CHECKSUM_COMPLETE checksum, or set ip_summed to
    1661             :  *      CHECKSUM_NONE so that it can be recomputed from scratch.
    1662             :  */
    1663             : 
    1664             : static inline void skb_postpull_rcsum(struct sk_buff *skb,
    1665             :                                       const void *start, unsigned int len)
    1666             : {
    1667             :         if (skb->ip_summed == CHECKSUM_COMPLETE)
    1668             :                 skb->csum = csum_sub(skb->csum, csum_partial(start, len, 0));
    1669             : }
    1670             : 
    1671             : unsigned char *skb_pull_rcsum(struct sk_buff *skb, unsigned int len);
    1672             : 
    1673             : /**
    1674             :  *      pskb_trim_rcsum - trim received skb and update checksum
    1675             :  *      @skb: buffer to trim
    1676             :  *      @len: new length
    1677             :  *
    1678             :  *      This is exactly the same as pskb_trim except that it ensures the
    1679             :  *      checksum of received packets are still valid after the operation.
    1680             :  */
    1681             : 
    1682             : static inline int pskb_trim_rcsum(struct sk_buff *skb, unsigned int len)
    1683             : {
    1684             :         if (likely(len >= skb->len))
    1685             :                 return 0;
    1686             :         if (skb->ip_summed == CHECKSUM_COMPLETE)
    1687             :                 skb->ip_summed = CHECKSUM_NONE;
    1688             :         return __pskb_trim(skb, len);
    1689             : }
    1690             : 
    1691             : #define skb_queue_walk(queue, skb) \
    1692             :                 for (skb = (queue)->next;                                    \
    1693             :                      prefetch(skb->next), (skb != (struct sk_buff *)(queue));        \
    1694             :                      skb = skb->next)
    1695             : 
    1696             : #define skb_queue_walk_safe(queue, skb, tmp)                                    \
    1697             :                 for (skb = (queue)->next, tmp = skb->next;                        \
    1698             :                      skb != (struct sk_buff *)(queue);                          \
    1699             :                      skb = tmp, tmp = skb->next)
    1700             : 
    1701             : #define skb_queue_walk_from(queue, skb)                                         \
    1702             :                 for (; prefetch(skb->next), (skb != (struct sk_buff *)(queue));      \
    1703             :                      skb = skb->next)
    1704             : 
    1705             : #define skb_queue_walk_from_safe(queue, skb, tmp)                               \
    1706             :                 for (tmp = skb->next;                                                \
    1707             :                      skb != (struct sk_buff *)(queue);                          \
    1708             :                      skb = tmp, tmp = skb->next)
    1709             : 
    1710             : #define skb_queue_reverse_walk(queue, skb) \
    1711             :                 for (skb = (queue)->prev;                                    \
    1712             :                      prefetch(skb->prev), (skb != (struct sk_buff *)(queue));        \
    1713             :                      skb = skb->prev)
    1714             : 
    1715             : 
    1716             : static inline bool skb_has_frags(const struct sk_buff *skb)
    1717             : {
    1718             :         return skb_shinfo(skb)->frag_list != NULL;
    1719             : }
    1720             : 
    1721             : static inline void skb_frag_list_init(struct sk_buff *skb)
    1722             : {
    1723             :         skb_shinfo(skb)->frag_list = NULL;
    1724             : }
    1725             : 
    1726             : static inline void skb_frag_add_head(struct sk_buff *skb, struct sk_buff *frag)
    1727             : {
    1728             :         frag->next = skb_shinfo(skb)->frag_list;
    1729             :         skb_shinfo(skb)->frag_list = frag;
    1730             : }
    1731             : 
    1732             : #define skb_walk_frags(skb, iter)       \
    1733             :         for (iter = skb_shinfo(skb)->frag_list; iter; iter = iter->next)
    1734             : 
    1735             : extern struct sk_buff *__skb_recv_datagram(struct sock *sk, unsigned flags,
    1736             :                                            int *peeked, int *err);
    1737             : extern struct sk_buff *skb_recv_datagram(struct sock *sk, unsigned flags,
    1738             :                                          int noblock, int *err);
    1739             : extern unsigned int    datagram_poll(struct file *file, struct socket *sock,
    1740             :                                      struct poll_table_struct *wait);
    1741             : extern int             skb_copy_datagram_iovec(const struct sk_buff *from,
    1742             :                                                int offset, struct iovec *to,
    1743             :                                                int size);
    1744             : extern int             skb_copy_and_csum_datagram_iovec(struct sk_buff *skb,
    1745             :                                                         int hlen,
    1746             :                                                         struct iovec *iov);
    1747             : extern int             skb_copy_datagram_from_iovec(struct sk_buff *skb,
    1748             :                                                     int offset,
    1749             :                                                     const struct iovec *from,
    1750             :                                                     int from_offset,
    1751             :                                                     int len);
    1752             : extern int             skb_copy_datagram_const_iovec(const struct sk_buff *from,
    1753             :                                                      int offset,
    1754             :                                                      const struct iovec *to,
    1755             :                                                      int to_offset,
    1756             :                                                      int size);
    1757             : extern void            skb_free_datagram(struct sock *sk, struct sk_buff *skb);
    1758             : extern void            skb_free_datagram_locked(struct sock *sk,
    1759             :                                                 struct sk_buff *skb);
    1760             : extern int             skb_kill_datagram(struct sock *sk, struct sk_buff *skb,
    1761             :                                          unsigned int flags);
    1762             : extern __wsum          skb_checksum(const struct sk_buff *skb, int offset,
    1763             :                                     int len, __wsum csum);
    1764             : extern int             skb_copy_bits(const struct sk_buff *skb, int offset,
    1765             :                                      void *to, int len);
    1766             : extern int             skb_store_bits(struct sk_buff *skb, int offset,
    1767             :                                       const void *from, int len);
    1768             : extern __wsum          skb_copy_and_csum_bits(const struct sk_buff *skb,
    1769             :                                               int offset, u8 *to, int len,
    1770             :                                               __wsum csum);
    1771             : extern int             skb_splice_bits(struct sk_buff *skb,
    1772             :                                                 unsigned int offset,
    1773             :                                                 struct pipe_inode_info *pipe,
    1774             :                                                 unsigned int len,
    1775             :                                                 unsigned int flags);
    1776             : extern void            skb_copy_and_csum_dev(const struct sk_buff *skb, u8 *to);
    1777             : extern void            skb_split(struct sk_buff *skb,
    1778             :                                  struct sk_buff *skb1, const u32 len);
    1779             : extern int             skb_shift(struct sk_buff *tgt, struct sk_buff *skb,
    1780             :                                  int shiftlen);
    1781             : 
    1782             : extern struct sk_buff *skb_segment(struct sk_buff *skb, int features);
    1783             : 
    1784             : static inline void *skb_header_pointer(const struct sk_buff *skb, int offset,
    1785             :                                        int len, void *buffer)
    1786             : {
    1787             :         int hlen = skb_headlen(skb);
    1788             : 
    1789             :         if (hlen - offset >= len)
    1790             :                 return skb->data + offset;
    1791             : 
    1792             :         if (skb_copy_bits(skb, offset, buffer, len) < 0)
    1793             :                 return NULL;
    1794             : 
    1795             :         return buffer;
    1796             : }
    1797             : 
    1798             : static inline void skb_copy_from_linear_data(const struct sk_buff *skb,
    1799             :                                              void *to,
    1800             :                                              const unsigned int len)
    1801             : {
    1802             :         memcpy(to, skb->data, len);
    1803             : }
    1804             : 
    1805             : static inline void skb_copy_from_linear_data_offset(const struct sk_buff *skb,
    1806             :                                                     const int offset, void *to,
    1807             :                                                     const unsigned int len)
    1808             : {
    1809             :         memcpy(to, skb->data + offset, len);
    1810             : }
    1811             : 
    1812             : static inline void skb_copy_to_linear_data(struct sk_buff *skb,
    1813             :                                            const void *from,
    1814             :                                            const unsigned int len)
    1815             : {
    1816             :         memcpy(skb->data, from, len);
    1817             : }
    1818             : 
    1819             : static inline void skb_copy_to_linear_data_offset(struct sk_buff *skb,
    1820             :                                                   const int offset,
    1821             :                                                   const void *from,
    1822             :                                                   const unsigned int len)
    1823             : {
    1824             :         memcpy(skb->data + offset, from, len);
    1825             : }
    1826             : 
    1827             : extern void skb_init(void);
    1828             : 
    1829             : static inline ktime_t skb_get_ktime(const struct sk_buff *skb)
    1830             : {
    1831             :         return skb->tstamp;
    1832             : }
    1833             : 
    1834             : /**
    1835             :  *      skb_get_timestamp - get timestamp from a skb
    1836             :  *      @skb: skb to get stamp from
    1837             :  *      @stamp: pointer to struct timeval to store stamp in
    1838             :  *
    1839             :  *      Timestamps are stored in the skb as offsets to a base timestamp.
    1840             :  *      This function converts the offset back to a struct timeval and stores
    1841             :  *      it in stamp.
    1842             :  */
    1843             : static inline void skb_get_timestamp(const struct sk_buff *skb,
    1844             :                                      struct timeval *stamp)
    1845             : {
    1846             :         *stamp = ktime_to_timeval(skb->tstamp);
    1847             : }
    1848             : 
    1849             : static inline void skb_get_timestampns(const struct sk_buff *skb,
    1850             :                                        struct timespec *stamp)
    1851             : {
    1852             :         *stamp = ktime_to_timespec(skb->tstamp);
    1853             : }
    1854             : 
    1855             : static inline void __net_timestamp(struct sk_buff *skb)
    1856             : {
    1857             :         skb->tstamp = ktime_get_real();
    1858             : }
    1859             : 
    1860             : static inline ktime_t net_timedelta(ktime_t t)
    1861             : {
    1862             :         return ktime_sub(ktime_get_real(), t);
    1863             : }
    1864             : 
    1865             : static inline ktime_t net_invalid_timestamp(void)
    1866             : {
    1867             :         return ktime_set(0, 0);
    1868             : }
    1869             : 
    1870             : /**
    1871             :  * skb_tstamp_tx - queue clone of skb with send time stamps
    1872             :  * @orig_skb:   the original outgoing packet
    1873             :  * @hwtstamps:  hardware time stamps, may be NULL if not available
    1874             :  *
    1875             :  * If the skb has a socket associated, then this function clones the
    1876             :  * skb (thus sharing the actual data and optional structures), stores
    1877             :  * the optional hardware time stamping information (if non NULL) or
    1878             :  * generates a software time stamp (otherwise), then queues the clone
    1879             :  * to the error queue of the socket.  Errors are silently ignored.
    1880             :  */
    1881             : extern void skb_tstamp_tx(struct sk_buff *orig_skb,
    1882             :                         struct skb_shared_hwtstamps *hwtstamps);
    1883             : 
    1884             : extern __sum16 __skb_checksum_complete_head(struct sk_buff *skb, int len);
    1885             : extern __sum16 __skb_checksum_complete(struct sk_buff *skb);
    1886             : 
    1887             : static inline int skb_csum_unnecessary(const struct sk_buff *skb)
    1888             : {
    1889             :         return skb->ip_summed & CHECKSUM_UNNECESSARY;
    1890             : }
    1891             : 
    1892             : /**
    1893             :  *      skb_checksum_complete - Calculate checksum of an entire packet
    1894             :  *      @skb: packet to process
    1895             :  *
    1896             :  *      This function calculates the checksum over the entire packet plus
    1897             :  *      the value of skb->csum.  The latter can be used to supply the
    1898             :  *      checksum of a pseudo header as used by TCP/UDP.  It returns the
    1899             :  *      checksum.
    1900             :  *
    1901             :  *      For protocols that contain complete checksums such as ICMP/TCP/UDP,
    1902             :  *      this function can be used to verify that checksum on received
    1903             :  *      packets.  In that case the function should return zero if the
    1904             :  *      checksum is correct.  In particular, this function will return zero
    1905             :  *      if skb->ip_summed is CHECKSUM_UNNECESSARY which indicates that the
    1906             :  *      hardware has already verified the correctness of the checksum.
    1907             :  */
    1908             : static inline __sum16 skb_checksum_complete(struct sk_buff *skb)
    1909             : {
    1910             :         return skb_csum_unnecessary(skb) ?
    1911             :                0 : __skb_checksum_complete(skb);
    1912             : }
    1913             : 
    1914             : #if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE)
    1915             : extern void nf_conntrack_destroy(struct nf_conntrack *nfct);
    1916             : static inline void nf_conntrack_put(struct nf_conntrack *nfct)
    1917             : {
    1918             :         if (nfct && atomic_dec_and_test(&nfct->use))
    1919             :                 nf_conntrack_destroy(nfct);
    1920             : }
    1921             : static inline void nf_conntrack_get(struct nf_conntrack *nfct)
    1922             : {
    1923             :         if (nfct)
    1924             :                 atomic_inc(&nfct->use);
    1925             : }
    1926             : static inline void nf_conntrack_get_reasm(struct sk_buff *skb)
    1927             : {
    1928             :         if (skb)
    1929             :                 atomic_inc(&skb->users);
    1930             : }
    1931             : static inline void nf_conntrack_put_reasm(struct sk_buff *skb)
    1932             : {
    1933             :         if (skb)
    1934             :                 kfree_skb(skb);
    1935             : }
    1936             : #endif
    1937             : #ifdef CONFIG_BRIDGE_NETFILTER
    1938             : static inline void nf_bridge_put(struct nf_bridge_info *nf_bridge)
    1939             : {
    1940             :         if (nf_bridge && atomic_dec_and_test(&nf_bridge->use))
    1941             :                 kfree(nf_bridge);
    1942             : }
    1943             : static inline void nf_bridge_get(struct nf_bridge_info *nf_bridge)
    1944             : {
    1945             :         if (nf_bridge)
    1946             :                 atomic_inc(&nf_bridge->use);
    1947             : }
    1948             : #endif /* CONFIG_BRIDGE_NETFILTER */
    1949             : static inline void nf_reset(struct sk_buff *skb)
    1950             : {
    1951             : #if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE)
    1952             :         nf_conntrack_put(skb->nfct);
    1953             :         skb->nfct = NULL;
    1954             :         nf_conntrack_put_reasm(skb->nfct_reasm);
    1955             :         skb->nfct_reasm = NULL;
    1956             : #endif
    1957             : #ifdef CONFIG_BRIDGE_NETFILTER
    1958             :         nf_bridge_put(skb->nf_bridge);
    1959             :         skb->nf_bridge = NULL;
    1960             : #endif
    1961             : }
    1962             : 
    1963             : /* Note: This doesn't put any conntrack and bridge info in dst. */
    1964             : static inline void __nf_copy(struct sk_buff *dst, const struct sk_buff *src)
    1965             : {
    1966             : #if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE)
    1967             :         dst->nfct = src->nfct;
    1968             :         nf_conntrack_get(src->nfct);
    1969             :         dst->nfctinfo = src->nfctinfo;
    1970             :         dst->nfct_reasm = src->nfct_reasm;
    1971             :         nf_conntrack_get_reasm(src->nfct_reasm);
    1972             : #endif
    1973             : #ifdef CONFIG_BRIDGE_NETFILTER
    1974             :         dst->nf_bridge  = src->nf_bridge;
    1975             :         nf_bridge_get(src->nf_bridge);
    1976             : #endif
    1977             : }
    1978             : 
    1979             : static inline void nf_copy(struct sk_buff *dst, const struct sk_buff *src)
    1980             : {
    1981             : #if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE)
    1982             :         nf_conntrack_put(dst->nfct);
    1983             :         nf_conntrack_put_reasm(dst->nfct_reasm);
    1984             : #endif
    1985             : #ifdef CONFIG_BRIDGE_NETFILTER
    1986             :         nf_bridge_put(dst->nf_bridge);
    1987             : #endif
    1988             :         __nf_copy(dst, src);
    1989             : }
    1990             : 
    1991             : #ifdef CONFIG_NETWORK_SECMARK
    1992             : static inline void skb_copy_secmark(struct sk_buff *to, const struct sk_buff *from)
    1993             : {
    1994             :         to->secmark = from->secmark;
    1995             : }
    1996             : 
    1997             : static inline void skb_init_secmark(struct sk_buff *skb)
    1998             : {
    1999             :         skb->secmark = 0;
    2000             : }
    2001             : #else
    2002             : static inline void skb_copy_secmark(struct sk_buff *to, const struct sk_buff *from)
    2003             : { }
    2004             : 
    2005             : static inline void skb_init_secmark(struct sk_buff *skb)
    2006             : { }
    2007             : #endif
    2008             : 
    2009             : static inline void skb_set_queue_mapping(struct sk_buff *skb, u16 queue_mapping)
    2010             : {
    2011             :         skb->queue_mapping = queue_mapping;
    2012             : }
    2013             : 
    2014             : static inline u16 skb_get_queue_mapping(const struct sk_buff *skb)
    2015             : {
    2016             :         return skb->queue_mapping;
    2017             : }
    2018             : 
    2019             : static inline void skb_copy_queue_mapping(struct sk_buff *to, const struct sk_buff *from)
    2020             : {
    2021             :         to->queue_mapping = from->queue_mapping;
    2022             : }
    2023             : 
    2024             : static inline void skb_record_rx_queue(struct sk_buff *skb, u16 rx_queue)
    2025             : {
    2026             :         skb->queue_mapping = rx_queue + 1;
    2027             : }
    2028             : 
    2029             : static inline u16 skb_get_rx_queue(const struct sk_buff *skb)
    2030             : {
    2031             :         return skb->queue_mapping - 1;
    2032             : }
    2033             : 
    2034             : static inline bool skb_rx_queue_recorded(const struct sk_buff *skb)
    2035             : {
    2036             :         return (skb->queue_mapping != 0);
    2037             : }
    2038             : 
    2039             : extern u16 skb_tx_hash(const struct net_device *dev,
    2040             :                        const struct sk_buff *skb);
    2041             : 
    2042             : #ifdef CONFIG_XFRM
    2043             : static inline struct sec_path *skb_sec_path(struct sk_buff *skb)
    2044             : {
    2045             :         return skb->sp;
    2046             : }
    2047             : #else
    2048             : static inline struct sec_path *skb_sec_path(struct sk_buff *skb)
    2049             : {
    2050             :         return NULL;
    2051             : }
    2052             : #endif
    2053             : 
    2054             : static inline int skb_is_gso(const struct sk_buff *skb)
    2055             : {
    2056             :         return skb_shinfo(skb)->gso_size;
    2057             : }
    2058             : 
    2059             : static inline int skb_is_gso_v6(const struct sk_buff *skb)
    2060             : {
    2061             :         return skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6;
    2062             : }
    2063             : 
    2064             : extern void __skb_warn_lro_forwarding(const struct sk_buff *skb);
    2065             : 
    2066             : static inline bool skb_warn_if_lro(const struct sk_buff *skb)
    2067             : {
    2068             :         /* LRO sets gso_size but not gso_type, whereas if GSO is really
    2069             :          * wanted then gso_type will be set. */
    2070             :         struct skb_shared_info *shinfo = skb_shinfo(skb);
    2071             :         if (shinfo->gso_size != 0 && unlikely(shinfo->gso_type == 0)) {
    2072             :                 __skb_warn_lro_forwarding(skb);
    2073             :                 return true;
    2074             :         }
    2075             :         return false;
    2076             : }
    2077             : 
    2078             : static inline void skb_forward_csum(struct sk_buff *skb)
    2079             : {
    2080             :         /* Unfortunately we don't support this one.  Any brave souls? */
    2081             :         if (skb->ip_summed == CHECKSUM_COMPLETE)
    2082             :                 skb->ip_summed = CHECKSUM_NONE;
    2083             : }
    2084             : 
    2085             : bool skb_partial_csum_set(struct sk_buff *skb, u16 start, u16 off);
    2086           2 : #endif  /* __KERNEL__ */
    2087             : #endif  /* _LINUX_SKBUFF_H */

Generated by: LCOV version 1.10