RTL_CFG_2
};
+static void rtl_hw_start_8169(struct net_device *);
+static void rtl_hw_start_8168(struct net_device *);
+static void rtl_hw_start_8101(struct net_device *);
+
static const struct {
+ void (*hw_start)(struct net_device *);
unsigned int region;
unsigned int align;
} rtl_cfg_info[] = {
- [RTL_CFG_0] = { 1, NET_IP_ALIGN },
- [RTL_CFG_1] = { 2, NET_IP_ALIGN },
- [RTL_CFG_2] = { 2, 8 }
+ [RTL_CFG_0] = { rtl_hw_start_8169, 1, NET_IP_ALIGN },
+ [RTL_CFG_1] = { rtl_hw_start_8168, 2, 8 },
+ [RTL_CFG_2] = { rtl_hw_start_8101, 2, 8 }
};
static struct pci_device_id rtl8169_pci_tbl[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_REALTEK, 0x8129), 0, 0, RTL_CFG_0 },
{ PCI_DEVICE(PCI_VENDOR_ID_REALTEK, 0x8136), 0, 0, RTL_CFG_2 },
{ PCI_DEVICE(PCI_VENDOR_ID_REALTEK, 0x8167), 0, 0, RTL_CFG_0 },
- { PCI_DEVICE(PCI_VENDOR_ID_REALTEK, 0x8168), 0, 0, RTL_CFG_2 },
+ { PCI_DEVICE(PCI_VENDOR_ID_REALTEK, 0x8168), 0, 0, RTL_CFG_1 },
{ PCI_DEVICE(PCI_VENDOR_ID_REALTEK, 0x8169), 0, 0, RTL_CFG_0 },
{ PCI_DEVICE(PCI_VENDOR_ID_DLINK, 0x4300), 0, 0, RTL_CFG_0 },
{ PCI_DEVICE(0x1259, 0xc107), 0, 0, RTL_CFG_0 },
int (*set_speed)(struct net_device *, u8 autoneg, u16 speed, u8 duplex);
void (*get_settings)(struct net_device *, struct ethtool_cmd *);
void (*phy_reset_enable)(void __iomem *);
+ void (*hw_start)(struct net_device *);
unsigned int (*phy_reset_pending)(void __iomem *);
unsigned int (*link_ok)(void __iomem *);
struct delayed_work task;
static int rtl8169_start_xmit(struct sk_buff *skb, struct net_device *dev);
static irqreturn_t rtl8169_interrupt(int irq, void *dev_instance);
static int rtl8169_init_ring(struct net_device *dev);
-static void rtl8169_hw_start(struct net_device *dev);
+static void rtl_hw_start(struct net_device *dev);
static int rtl8169_close(struct net_device *dev);
-static void rtl8169_set_rx_mode(struct net_device *dev);
+static void rtl_set_rx_mode(struct net_device *dev);
static void rtl8169_tx_timeout(struct net_device *dev);
static struct net_device_stats *rtl8169_get_stats(struct net_device *dev);
static int rtl8169_rx_interrupt(struct net_device *, struct rtl8169_private *,
spin_unlock_irqrestore(&tp->lock, flags);
}
-static void rtl8169_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid)
-{
- struct rtl8169_private *tp = netdev_priv(dev);
- unsigned long flags;
-
- spin_lock_irqsave(&tp->lock, flags);
- vlan_group_set_device(tp->vlgrp, vid, NULL);
- spin_unlock_irqrestore(&tp->lock, flags);
-}
-
static int rtl8169_rx_vlan_skb(struct rtl8169_private *tp, struct RxDesc *desc,
struct sk_buff *skb)
{
SET_ETHTOOL_OPS(dev, &rtl8169_ethtool_ops);
dev->stop = rtl8169_close;
dev->tx_timeout = rtl8169_tx_timeout;
- dev->set_multicast_list = rtl8169_set_rx_mode;
+ dev->set_multicast_list = rtl_set_rx_mode;
dev->watchdog_timeo = RTL8169_TX_TIMEOUT;
dev->irq = pdev->irq;
dev->base_addr = (unsigned long) ioaddr;
#ifdef CONFIG_R8169_VLAN
dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
dev->vlan_rx_register = rtl8169_vlan_rx_register;
- dev->vlan_rx_kill_vid = rtl8169_vlan_rx_kill_vid;
#endif
#ifdef CONFIG_NET_POLL_CONTROLLER
tp->timer.data = (unsigned long) dev;
tp->timer.function = rtl8169_phy_timer;
+ tp->hw_start = rtl_cfg_info[ent->driver_data].hw_start;
+
spin_lock_init(&tp->lock);
rc = register_netdev(dev);
if (retval < 0)
goto err_release_ring_2;
- rtl8169_hw_start(dev);
+ rtl_hw_start(dev);
rtl8169_request_timer(dev);
(InterFrameGap << TxInterFrameGapShift));
}
-static void rtl8169_hw_start(struct net_device *dev)
+static void rtl_hw_start(struct net_device *dev)
{
struct rtl8169_private *tp = netdev_priv(dev);
void __iomem *ioaddr = tp->mmio_addr;
- struct pci_dev *pdev = tp->pci_dev;
- u16 cmd;
u32 i;
/* Soft reset the chip. */
msleep_interruptible(1);
}
+ tp->hw_start(dev);
+
+ /* Enable all known interrupts by setting the interrupt mask. */
+ RTL_W16(IntrMask, rtl8169_intr_mask);
+
+ netif_start_queue(dev);
+}
+
+
+static void rtl_hw_start_8169(struct net_device *dev)
+{
+ struct rtl8169_private *tp = netdev_priv(dev);
+ void __iomem *ioaddr = tp->mmio_addr;
+ struct pci_dev *pdev = tp->pci_dev;
+ u16 cmd;
+
if (tp->mac_version == RTL_GIGA_MAC_VER_05) {
RTL_W16(CPlusCmd, RTL_R16(CPlusCmd) | PCIMulRW);
pci_write_config_byte(pdev, PCI_CACHE_LINE_SIZE, 0x08);
RTL_W32(RxMissed, 0);
- rtl8169_set_rx_mode(dev);
+ rtl_set_rx_mode(dev);
/* no early-rx interrupts */
RTL_W16(MultiIntr, RTL_R16(MultiIntr) & 0xF000);
+}
- /* Enable all known interrupts by setting the interrupt mask. */
- RTL_W16(IntrMask, rtl8169_intr_mask);
+static void rtl_hw_start_8168(struct net_device *dev)
+{
+ rtl_hw_start_8169(dev);
+}
- netif_start_queue(dev);
+static void rtl_hw_start_8101(struct net_device *dev)
+{
+ rtl_hw_start_8169(dev);
}
static int rtl8169_change_mtu(struct net_device *dev, int new_mtu)
netif_poll_enable(dev);
- rtl8169_hw_start(dev);
+ rtl_hw_start(dev);
rtl8169_request_timer(dev);
rtl8169_mark_to_asic(desc, rx_buf_sz);
}
-static int rtl8169_alloc_rx_skb(struct pci_dev *pdev, struct sk_buff **sk_buff,
- struct RxDesc *desc, int rx_buf_sz,
- unsigned int align)
+static struct sk_buff *rtl8169_alloc_rx_skb(struct pci_dev *pdev,
+ struct net_device *dev,
+ struct RxDesc *desc, int rx_buf_sz,
+ unsigned int align)
{
struct sk_buff *skb;
dma_addr_t mapping;
- int ret = 0;
- skb = dev_alloc_skb(rx_buf_sz + align);
+ skb = netdev_alloc_skb(dev, rx_buf_sz + align);
if (!skb)
goto err_out;
skb_reserve(skb, (align - 1) & (unsigned long)skb->data);
- *sk_buff = skb;
mapping = pci_map_single(pdev, skb->data, rx_buf_sz,
PCI_DMA_FROMDEVICE);
rtl8169_map_to_asic(desc, mapping, rx_buf_sz);
-
out:
- return ret;
+ return skb;
err_out:
- ret = -ENOMEM;
rtl8169_make_unusable_by_asic(desc);
goto out;
}
{
u32 cur;
- for (cur = start; end - cur > 0; cur++) {
- int ret, i = cur % NUM_RX_DESC;
+ for (cur = start; end - cur != 0; cur++) {
+ struct sk_buff *skb;
+ unsigned int i = cur % NUM_RX_DESC;
+
+ WARN_ON((s32)(end - cur) < 0);
if (tp->Rx_skbuff[i])
continue;
- ret = rtl8169_alloc_rx_skb(tp->pci_dev, tp->Rx_skbuff + i,
- tp->RxDescArray + i, tp->rx_buf_sz, tp->align);
- if (ret < 0)
+ skb = rtl8169_alloc_rx_skb(tp->pci_dev, dev,
+ tp->RxDescArray + i,
+ tp->rx_buf_sz, tp->align);
+ if (!skb)
break;
+
+ tp->Rx_skbuff[i] = skb;
}
return cur - start;
}
if (tp->dirty_rx == tp->cur_rx) {
rtl8169_init_ring_indexes(tp);
- rtl8169_hw_start(dev);
+ rtl_hw_start(dev);
netif_wake_queue(dev);
} else {
if (net_ratelimit()) {
return LargeSend | ((mss & MSSMask) << MSSShift);
}
if (skb->ip_summed == CHECKSUM_PARTIAL) {
- const struct iphdr *ip = skb->nh.iph;
+ const struct iphdr *ip = ip_hdr(skb);
if (ip->protocol == IPPROTO_TCP)
return IPCS | TCPCS;
skb->ip_summed = CHECKSUM_NONE;
}
-static inline int rtl8169_try_rx_copy(struct sk_buff **sk_buff, int pkt_size,
- struct RxDesc *desc, int rx_buf_sz,
- unsigned int align)
+static inline bool rtl8169_try_rx_copy(struct sk_buff **sk_buff, int pkt_size,
+ struct pci_dev *pdev, dma_addr_t addr,
+ unsigned int align)
{
- int ret = -1;
+ struct sk_buff *skb;
+ bool done = false;
- if (pkt_size < rx_copybreak) {
- struct sk_buff *skb;
+ if (pkt_size >= rx_copybreak)
+ goto out;
- skb = dev_alloc_skb(pkt_size + align);
- if (skb) {
- skb_reserve(skb, (align - 1) & (unsigned long)skb->data);
- eth_copy_and_sum(skb, sk_buff[0]->data, pkt_size, 0);
- *sk_buff = skb;
- rtl8169_mark_to_asic(desc, rx_buf_sz);
- ret = 0;
- }
- }
- return ret;
+ skb = dev_alloc_skb(pkt_size + align);
+ if (!skb)
+ goto out;
+
+ pci_dma_sync_single_for_cpu(pdev, addr, pkt_size, PCI_DMA_FROMDEVICE);
+ skb_reserve(skb, (align - 1) & (unsigned long)skb->data);
+ skb_copy_from_linear_data(*sk_buff, skb->data, pkt_size);
+ *sk_buff = skb;
+ done = true;
+out:
+ return done;
}
static int
rtl8169_mark_to_asic(desc, tp->rx_buf_sz);
} else {
struct sk_buff *skb = tp->Rx_skbuff[entry];
+ dma_addr_t addr = le64_to_cpu(desc->addr);
int pkt_size = (status & 0x00001FFF) - 4;
- void (*pci_action)(struct pci_dev *, dma_addr_t,
- size_t, int) = pci_dma_sync_single_for_device;
+ struct pci_dev *pdev = tp->pci_dev;
/*
* The driver does not support incoming fragmented
rtl8169_rx_csum(skb, desc);
- pci_dma_sync_single_for_cpu(tp->pci_dev,
- le64_to_cpu(desc->addr), tp->rx_buf_sz,
- PCI_DMA_FROMDEVICE);
-
- if (rtl8169_try_rx_copy(&skb, pkt_size, desc,
- tp->rx_buf_sz, tp->align)) {
- pci_action = pci_unmap_single;
+ if (rtl8169_try_rx_copy(&skb, pkt_size, pdev, addr,
+ tp->align)) {
+ pci_dma_sync_single_for_device(pdev, addr,
+ pkt_size, PCI_DMA_FROMDEVICE);
+ rtl8169_mark_to_asic(desc, tp->rx_buf_sz);
+ } else {
+ pci_unmap_single(pdev, addr, pkt_size,
+ PCI_DMA_FROMDEVICE);
tp->Rx_skbuff[entry] = NULL;
}
- pci_action(tp->pci_dev, le64_to_cpu(desc->addr),
- tp->rx_buf_sz, PCI_DMA_FROMDEVICE);
-
skb_put(skb, pkt_size);
skb->protocol = eth_type_trans(skb, dev);
return 0;
}
-static void
-rtl8169_set_rx_mode(struct net_device *dev)
+static void rtl_set_rx_mode(struct net_device *dev)
{
struct rtl8169_private *tp = netdev_priv(dev);
void __iomem *ioaddr = tp->mmio_addr;