X-Git-Url: http://pilppa.org/gitweb/gitweb.cgi?a=blobdiff_plain;f=include%2Flinux%2Fskbuff.h;h=91140fe8c119a317b28d62ce1255f00d8719dba8;hb=92e21e79a85924ddda00f4678d60bbd8f891a553;hp=93c27f71122a8d88139cdb8ae251b5e953404485;hpb=440fdb53b4ae58602711b5b8c3a139ace2404dbb;p=linux-2.6-omap-h63xx.git diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h index 93c27f71122..91140fe8c11 100644 --- a/include/linux/skbuff.h +++ b/include/linux/skbuff.h @@ -41,8 +41,7 @@ #define SKB_DATA_ALIGN(X) (((X) + (SMP_CACHE_BYTES - 1)) & \ ~(SMP_CACHE_BYTES - 1)) #define SKB_WITH_OVERHEAD(X) \ - (((X) - sizeof(struct skb_shared_info)) & \ - ~(SMP_CACHE_BYTES - 1)) + ((X) - SKB_DATA_ALIGN(sizeof(struct skb_shared_info))) #define SKB_MAX_ORDER(X, ORDER) \ SKB_WITH_OVERHEAD((PAGE_SIZE << (ORDER)) - (X)) #define SKB_MAX_HEAD(X) (SKB_MAX_ORDER((X), 0)) @@ -301,8 +300,9 @@ struct sk_buff { #endif int iif; +#ifdef CONFIG_NETDEVICES_MULTIQUEUE __u16 queue_mapping; - +#endif #ifdef CONFIG_NET_SCHED __u16 tc_index; /* traffic control index */ #ifdef CONFIG_NET_CLS_ACT @@ -357,6 +357,7 @@ static inline struct sk_buff *alloc_skb_fclone(unsigned int size, } extern void kfree_skbmem(struct sk_buff *skb); +extern struct sk_buff *skb_morph(struct sk_buff *dst, struct sk_buff *src); extern struct sk_buff *skb_clone(struct sk_buff *skb, gfp_t priority); extern struct sk_buff *skb_copy(const struct sk_buff *skb, @@ -386,7 +387,9 @@ extern void skb_truesize_bug(struct sk_buff *skb); static inline void skb_truesize_check(struct sk_buff *skb) { - if (unlikely((int)skb->truesize < sizeof(struct sk_buff) + skb->len)) + int len = sizeof(struct sk_buff) + skb->len; + + if (unlikely((int)skb->truesize < len)) skb_truesize_bug(skb); } @@ -993,7 +996,7 @@ static inline int pskb_may_pull(struct sk_buff *skb, unsigned int len) * * Return the number of bytes of free space at the head of an &sk_buff. */ -static inline int skb_headroom(const struct sk_buff *skb) +static inline unsigned int skb_headroom(const struct sk_buff *skb) { return skb->data - skb->head; } @@ -1346,12 +1349,28 @@ static inline struct sk_buff *netdev_alloc_skb(struct net_device *dev, * Returns true if modifying the header part of the cloned buffer * does not requires the data to be copied. */ -static inline int skb_clone_writable(struct sk_buff *skb, int len) +static inline int skb_clone_writable(struct sk_buff *skb, unsigned int len) { return !skb_header_cloned(skb) && skb_headroom(skb) + len <= skb->hdr_len; } +static inline int __skb_cow(struct sk_buff *skb, unsigned int headroom, + int cloned) +{ + int delta = 0; + + if (headroom < NET_SKB_PAD) + headroom = NET_SKB_PAD; + if (headroom > skb_headroom(skb)) + delta = headroom - skb_headroom(skb); + + if (delta || cloned) + return pskb_expand_head(skb, ALIGN(delta, NET_SKB_PAD), 0, + GFP_ATOMIC); + return 0; +} + /** * skb_cow - copy header of skb when it is required * @skb: buffer to cow @@ -1366,16 +1385,22 @@ static inline int skb_clone_writable(struct sk_buff *skb, int len) */ static inline int skb_cow(struct sk_buff *skb, unsigned int headroom) { - int delta = (headroom > NET_SKB_PAD ? headroom : NET_SKB_PAD) - - skb_headroom(skb); - - if (delta < 0) - delta = 0; + return __skb_cow(skb, headroom, skb_cloned(skb)); +} - if (delta || skb_cloned(skb)) - return pskb_expand_head(skb, (delta + (NET_SKB_PAD-1)) & - ~(NET_SKB_PAD-1), 0, GFP_ATOMIC); - return 0; +/** + * skb_cow_head - skb_cow but only making the head writable + * @skb: buffer to cow + * @headroom: needed headroom + * + * This function is identical to skb_cow except that we replace the + * skb_cloned check by skb_header_cloned. It should be used when + * you only need to push on some header and do not need to modify + * the data. + */ +static inline int skb_cow_head(struct sk_buff *skb, unsigned int headroom) +{ + return __skb_cow(skb, headroom, skb_header_cloned(skb)); } /** @@ -1747,6 +1772,15 @@ static inline void skb_set_queue_mapping(struct sk_buff *skb, u16 queue_mapping) #endif } +static inline u16 skb_get_queue_mapping(struct sk_buff *skb) +{ +#ifdef CONFIG_NETDEVICES_MULTIQUEUE + return skb->queue_mapping; +#else + return 0; +#endif +} + static inline void skb_copy_queue_mapping(struct sk_buff *to, const struct sk_buff *from) { #ifdef CONFIG_NETDEVICES_MULTIQUEUE @@ -1759,6 +1793,11 @@ static inline int skb_is_gso(const struct sk_buff *skb) return skb_shinfo(skb)->gso_size; } +static inline int skb_is_gso_v6(const struct sk_buff *skb) +{ + return skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6; +} + static inline void skb_forward_csum(struct sk_buff *skb) { /* Unfortunately we don't support this one. Any brave souls? */