rp->rx_bytes += skb->len;
skb->protocol = eth_type_trans(skb, np->dev);
+ skb_record_rx_queue(skb, rp->rx_channel);
netif_receive_skb(skb);
return num_rcr;
work_done = niu_poll_core(np, lp, budget);
if (work_done < budget) {
- netif_rx_complete(napi);
+ napi_complete(napi);
niu_ldg_rearm(np, lp, 1);
}
return work_done;
static void niu_schedule_napi(struct niu *np, struct niu_ldg *lp,
u64 v0, u64 v1, u64 v2)
{
- if (likely(netif_rx_schedule_prep(&lp->napi))) {
+ if (likely(napi_schedule_prep(&lp->napi))) {
lp->v0 = v0;
lp->v1 = v1;
lp->v2 = v2;
__niu_fastpath_interrupt(np, lp->ldg_num, v0);
- __netif_rx_schedule(&lp->napi);
+ __napi_schedule(&lp->napi);
}
}
ipv6 = ihl = 0;
switch (skb->protocol) {
- case __constant_htons(ETH_P_IP):
+ case cpu_to_be16(ETH_P_IP):
ip_proto = ip_hdr(skb)->protocol;
ihl = ip_hdr(skb)->ihl;
break;
- case __constant_htons(ETH_P_IPV6):
+ case cpu_to_be16(ETH_P_IPV6):
ip_proto = ipv6_hdr(skb)->nexthdr;
ihl = (40 >> 2);
ipv6 = 1;
static void niu_pci_unmap_page(struct device *dev, u64 dma_address,
size_t size, enum dma_data_direction direction)
{
- return dma_unmap_page(dev, dma_address, size, direction);
+ dma_unmap_page(dev, dma_address, size, direction);
}
static u64 niu_pci_map_single(struct device *dev, void *cpu_addr,