/* Fill in the Rx buffers.  Handle allocation failure gracefully. */
        for (i = 0; i < RX_RING_SIZE; i++) {
-               struct sk_buff *skb = dev_alloc_skb(rp->rx_buf_sz);
+               struct sk_buff *skb = netdev_alloc_skb(dev, rp->rx_buf_sz);
                rp->rx_skbuff[i] = skb;
                if (skb == NULL)
                        break;
                        /* Check if the packet is long enough to accept without
                           copying to a minimally-sized skbuff. */
                        if (pkt_len < rx_copybreak &&
-                               (skb = dev_alloc_skb(pkt_len + 2)) != NULL) {
-                               skb_reserve(skb, 2);    /* 16 byte align the IP header */
+                               (skb = netdev_alloc_skb(dev, pkt_len + NET_IP_ALIGN)) != NULL) {
+                               skb_reserve(skb, NET_IP_ALIGN); /* 16 byte align the IP header */
                                pci_dma_sync_single_for_cpu(rp->pdev,
                                                            rp->rx_skbuff_dma[entry],
                                                            rp->rx_buf_sz,
                struct sk_buff *skb;
                entry = rp->dirty_rx % RX_RING_SIZE;
                if (rp->rx_skbuff[entry] == NULL) {
-                       skb = dev_alloc_skb(rp->rx_buf_sz);
+                       skb = netdev_alloc_skb(dev, rp->rx_buf_sz);
                        rp->rx_skbuff[entry] = skb;
                        if (skb == NULL)
                                break;  /* Better luck next round. */