*/
batch_target = np->rx_target - (req_prod - np->rx.rsp_cons);
for (i = skb_queue_len(&np->rx_batch); i < batch_target; i++) {
- skb = __netdev_alloc_skb(dev, RX_COPY_THRESHOLD,
+ skb = __netdev_alloc_skb(dev, RX_COPY_THRESHOLD + NET_IP_ALIGN,
GFP_ATOMIC | __GFP_NOWARN);
if (unlikely(!skb))
goto no_skb;
+ /* Align ip header to a 16 bytes boundary */
+ skb_reserve(skb, NET_IP_ALIGN);
+
page = alloc_page(GFP_ATOMIC | __GFP_NOWARN);
if (!page) {
kfree_skb(skb);
unsigned int offset = offset_in_page(data);
unsigned int len = skb_headlen(skb);
- frags += (offset + len + PAGE_SIZE - 1) / PAGE_SIZE;
+ frags += DIV_ROUND_UP(offset + len, PAGE_SIZE);
if (unlikely(frags > MAX_SKB_FRAGS + 1)) {
printk(KERN_ALERT "xennet: skb rides the rocket: %d frags\n",
frags);