#include <linux/delay.h>
#include <linux/workqueue.h>
#include <linux/if_vlan.h>
+#include <linux/prefetch.h>
#include <linux/mii.h>
#include <asm/irq.h>
#include "sky2.h"
#define DRV_NAME "sky2"
-#define DRV_VERSION "0.9"
+#define DRV_VERSION "0.11"
#define PFX DRV_NAME " "
/*
gm_phy_write(hw, port, PHY_MARV_INT_MASK, PHY_M_DEF_MSK);
}
+/* Force a renegotiation */
+static void sky2_phy_reinit(struct sky2_port *sky2)
+{
+ down(&sky2->phy_sema);
+ sky2_phy_init(sky2->hw, sky2->port);
+ up(&sky2->phy_sema);
+}
+
static void sky2_mac_init(struct sky2_hw *hw, unsigned port)
{
struct sky2_port *sky2 = netdev_priv(hw->dev[port]);
/* Setup prefetch unit registers. This is the interface between
* hardware and driver list elements
*/
-static inline void sky2_prefetch_init(struct sky2_hw *hw, u32 qaddr,
+static void sky2_prefetch_init(struct sky2_hw *hw, u32 qaddr,
u64 addr, u32 last)
{
sky2_write32(hw, Y2_QADDR(qaddr, PREF_UNIT_CTRL), PREF_UNIT_RST_SET);
return 0;
err_out:
- if (sky2->rx_le)
+ if (sky2->rx_le) {
pci_free_consistent(hw->pdev, RX_LE_BYTES,
sky2->rx_le, sky2->rx_le_map);
- if (sky2->tx_le)
+ sky2->rx_le = NULL;
+ }
+ if (sky2->tx_le) {
pci_free_consistent(hw->pdev,
TX_RING_SIZE * sizeof(struct sky2_tx_le),
sky2->tx_le, sky2->tx_le_map);
- if (sky2->tx_ring)
- kfree(sky2->tx_ring);
- if (sky2->rx_ring)
- kfree(sky2->rx_ring);
+ sky2->tx_le = NULL;
+ }
+ kfree(sky2->tx_ring);
+ kfree(sky2->rx_ring);
+ sky2->tx_ring = NULL;
+ sky2->rx_ring = NULL;
return err;
}
if (skb_shinfo(skb)->tso_size)
++count;
- if (skb->ip_summed)
+ if (skb->ip_summed == CHECKSUM_HW)
++count;
return count;
return NETDEV_TX_LOCKED;
if (unlikely(tx_avail(sky2) < tx_le_req(skb))) {
- netif_stop_queue(dev);
+ /* There is a known but harmless race with lockless tx
+ * and netif_stop_queue.
+ */
+ if (!netif_queue_stopped(dev)) {
+ netif_stop_queue(dev);
+ printk(KERN_WARNING PFX "%s: ring full when queue awake!\n",
+ dev->name);
+ }
spin_unlock(&sky2->tx_lock);
- printk(KERN_WARNING PFX "%s: ring full when queue awake!\n",
- dev->name);
return NETDEV_TX_BUSY;
}
sky2_put_idx(hw, txqaddr[sky2->port], sky2->tx_prod,
&sky2->tx_last_put, TX_RING_SIZE);
- if (tx_avail(sky2) < MAX_SKB_TX_LE + 1)
+ if (tx_avail(sky2) <= MAX_SKB_TX_LE)
netif_stop_queue(dev);
out_unlock:
static void sky2_tx_complete(struct sky2_port *sky2, u16 done)
{
struct net_device *dev = sky2->netdev;
+ struct pci_dev *pdev = sky2->hw->pdev;
+ u16 nxt, put;
unsigned i;
- if (done == sky2->tx_cons)
- return;
+ BUG_ON(done >= TX_RING_SIZE);
if (unlikely(netif_msg_tx_done(sky2)))
printk(KERN_DEBUG "%s: tx done, up to %u\n",
dev->name, done);
- spin_lock(&sky2->tx_lock);
+ for (put = sky2->tx_cons; put != done; put = nxt) {
+ struct tx_ring_info *re = sky2->tx_ring + put;
+ struct sk_buff *skb = re->skb;
- while (sky2->tx_cons != done) {
- struct tx_ring_info *re = sky2->tx_ring + sky2->tx_cons;
- struct sk_buff *skb;
+ nxt = re->idx;
+ BUG_ON(nxt >= TX_RING_SIZE);
+ prefetch(sky2->tx_ring + nxt);
/* Check for partial status */
- if (tx_dist(sky2->tx_cons, done)
- < tx_dist(sky2->tx_cons, re->idx))
- goto out;
+ if (tx_dist(put, done) < tx_dist(put, nxt))
+ break;
skb = re->skb;
- pci_unmap_single(sky2->hw->pdev,
- pci_unmap_addr(re, mapaddr),
+ pci_unmap_single(pdev, pci_unmap_addr(re, mapaddr),
skb_headlen(skb), PCI_DMA_TODEVICE);
for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
struct tx_ring_info *fre;
- fre =
- sky2->tx_ring + (sky2->tx_cons + i +
- 1) % TX_RING_SIZE;
- pci_unmap_page(sky2->hw->pdev,
- pci_unmap_addr(fre, mapaddr),
- skb_shinfo(skb)->frags[i].size,
+ fre = sky2->tx_ring + (put + i + 1) % TX_RING_SIZE;
+ pci_unmap_page(pdev, pci_unmap_addr(fre, mapaddr),
+ skb_shinfo(skb)->frags[i].size,
PCI_DMA_TODEVICE);
}
dev_kfree_skb_any(skb);
-
- sky2->tx_cons = re->idx;
}
-out:
+ spin_lock(&sky2->tx_lock);
+ sky2->tx_cons = put;
if (netif_queue_stopped(dev) && tx_avail(sky2) > MAX_SKB_TX_LE)
netif_wake_queue(dev);
spin_unlock(&sky2->tx_lock);
}
/* Cleanup all untransmitted buffers, assume transmitter not running */
-static inline void sky2_tx_clean(struct sky2_port *sky2)
+static void sky2_tx_clean(struct sky2_port *sky2)
{
sky2_tx_complete(sky2, sky2->tx_prod);
}
unsigned port = sky2->port;
u16 ctrl;
+ /* Never really got started! */
+ if (!sky2->tx_le)
+ return 0;
+
if (netif_msg_ifdown(sky2))
printk(KERN_INFO PFX "%s: disabling interface\n", dev->name);
sky2->tx_le, sky2->tx_le_map);
kfree(sky2->tx_ring);
+ sky2->tx_le = NULL;
+ sky2->rx_le = NULL;
+
+ sky2->rx_ring = NULL;
+ sky2->tx_ring = NULL;
+
return 0;
}
| PHY_M_AN_ASP);
}
- sky2_phy_reset(hw, port);
-
netif_carrier_off(sky2->netdev);
netif_stop_queue(sky2->netdev);
static void sky2_tx_timeout(struct net_device *dev)
{
struct sky2_port *sky2 = netdev_priv(dev);
+ struct sky2_hw *hw = sky2->hw;
+ unsigned txq = txqaddr[sky2->port];
if (netif_msg_timer(sky2))
printk(KERN_ERR PFX "%s: tx timeout\n", dev->name);
- sky2_write32(sky2->hw, Q_ADDR(txqaddr[sky2->port], Q_CSR), BMU_STOP);
- sky2_read32(sky2->hw, Q_ADDR(txqaddr[sky2->port], Q_CSR));
+ netif_stop_queue(dev);
+
+ sky2_write32(hw, Q_ADDR(txq, Q_CSR), BMU_STOP);
+ sky2_read32(hw, Q_ADDR(txq, Q_CSR));
+
+ sky2_write32(hw, Y2_QADDR(txq, PREF_UNIT_CTRL), PREF_UNIT_RST_SET);
sky2_tx_clean(sky2);
+
+ sky2_qset(hw, txq);
+ sky2_prefetch_init(hw, txq, sky2->tx_le_map, TX_RING_SIZE - 1);
+
+ netif_wake_queue(dev);
}
sky2_write8(hw, RB_ADDR(rxqaddr[sky2->port], RB_CTRL), RB_ENA_OP_MD);
err = sky2_rx_start(sky2);
- gma_write16(hw, sky2->port, GM_GP_CTRL, ctl);
-
- netif_poll_disable(hw->dev[0]);
- netif_wake_queue(dev);
sky2_write32(hw, B0_IMSK, hw->intr_mask);
+ if (err)
+ dev_close(dev);
+ else {
+ gma_write16(hw, sky2->port, GM_GP_CTRL, ctl);
+
+ netif_poll_enable(hw->dev[0]);
+ netif_wake_queue(dev);
+ }
+
return err;
}
sky2->netdev->name, sky2->rx_next, status, length);
sky2->rx_next = (sky2->rx_next + 1) % sky2->rx_pending;
+ prefetch(sky2->rx_ring + sky2->rx_next);
if (status & GMR_FS_ANY_ERR)
goto error;
if (!(status & GMR_FS_RX_OK))
goto resubmit;
+ if ((status >> 16) != length || length > sky2->rx_bufsize)
+ goto oversize;
+
if (length < copybreak) {
skb = alloc_skb(length + 2, GFP_ATOMIC);
if (!skb)
return skb;
+oversize:
+ ++sky2->net_stats.rx_over_errors;
+ goto resubmit;
+
error:
+ ++sky2->net_stats.rx_errors;
+
if (netif_msg_rx_err(sky2))
printk(KERN_INFO PFX "%s: rx error, status 0x%x length %d\n",
sky2->netdev->name, status, length);
/*
* Check for transmit complete
*/
-static inline void sky2_tx_check(struct sky2_hw *hw, int port)
-{
- struct net_device *dev = hw->dev[port];
+#define TX_NO_STATUS 0xffff
- if (dev && netif_running(dev)) {
- sky2_tx_complete(netdev_priv(dev),
- sky2_read16(hw, port == 0
- ? STAT_TXA1_RIDX : STAT_TXA2_RIDX));
+static inline void sky2_tx_check(struct sky2_hw *hw, int port, u16 last)
+{
+ if (last != TX_NO_STATUS) {
+ struct net_device *dev = hw->dev[port];
+ if (dev && netif_running(dev)) {
+ struct sky2_port *sky2 = netdev_priv(dev);
+ sky2_tx_complete(sky2, last);
+ }
}
}
unsigned int to_do = min(dev0->quota, *budget);
unsigned int work_done = 0;
u16 hwidx;
+ u16 tx_done[2] = { TX_NO_STATUS, TX_NO_STATUS };
- sky2_write32(hw, STAT_CTRL, SC_STAT_CLR_IRQ);
hwidx = sky2_read16(hw, STAT_PUT_IDX);
BUG_ON(hwidx >= STATUS_RING_SIZE);
- rmb();
+ rmb();
while (hwidx != hw->st_idx) {
struct sky2_status_le *le = hw->st_le + hw->st_idx;
hw->st_idx = (hw->st_idx + 1) % STATUS_RING_SIZE;
prefetch(hw->st_le + hw->st_idx);
- BUG_ON(le->link >= hw->ports || !hw->dev[le->link]);
-
BUG_ON(le->link >= 2);
dev = hw->dev[le->link];
if (dev == NULL || !netif_running(dev))
break;
case OP_TXINDEXLE:
- /* pick up transmit status later */
+ /* TX index reports status for both ports */
+ tx_done[0] = status & 0xffff;
+ tx_done[1] = ((status >> 24) & 0xff)
+ | (u16)(length & 0xf) << 8;
break;
default:
}
exit_loop:
- sky2_tx_check(hw, 0);
- sky2_tx_check(hw, 1);
-
+ sky2_write32(hw, STAT_CTRL, SC_STAT_CLR_IRQ);
mmiowb();
- if (work_done < to_do) {
- /*
- * Another chip workaround, need to restart TX timer if status
- * LE was handled. WA_DEV_43_418
- */
+ sky2_tx_check(hw, 0, tx_done[0]);
+ sky2_tx_check(hw, 1, tx_done[1]);
+
+ if (sky2_read16(hw, STAT_PUT_IDX) == hw->st_idx) {
+ /* need to restart TX timer */
if (is_ec_a1(hw)) {
sky2_write8(hw, STAT_TX_TIMER_CTRL, TIM_STOP);
sky2_write8(hw, STAT_TX_TIMER_CTRL, TIM_START);
sky2_write8(hw, B0_Y2LED, LED_STAT_ON);
- /* Turn on descriptor polling (every 75us) */
- sky2_write32(hw, B28_DPT_INI, sky2_us2clk(hw, 75));
- sky2_write8(hw, B28_DPT_CTRL, DPT_START);
+ /* Turn off descriptor polling */
+ sky2_write32(hw, B28_DPT_CTRL, DPT_STOP);
/* Turn off receive timestamp */
sky2_write8(hw, GMAC_TI_ST_CTRL, GMT_ST_STOP);
/* Set the list last index */
sky2_write16(hw, STAT_LAST_IDX, STATUS_RING_SIZE - 1);
- sky2_write32(hw, STAT_TX_TIMER_INI, sky2_us2clk(hw, 1000));
-
/* These status setup values are copied from SysKonnect's driver */
if (is_ec_a1(hw)) {
/* WA for dev. #4.3 */
/* set Status-FIFO ISR watermark */
sky2_write8(hw, STAT_FIFO_ISR_WM, 0x07); /* WA for dev. #4.18 */
-
+ sky2_write32(hw, STAT_TX_TIMER_INI, sky2_us2clk(hw, 10000));
} else {
- sky2_write16(hw, STAT_TX_IDX_TH, 0x000a);
-
- /* set Status-FIFO watermark */
- sky2_write8(hw, STAT_FIFO_WM, 0x10);
+ sky2_write16(hw, STAT_TX_IDX_TH, 10);
+ sky2_write8(hw, STAT_FIFO_WM, 16);
/* set Status-FIFO ISR watermark */
if (hw->chip_id == CHIP_ID_YUKON_XL && hw->chip_rev == 0)
- sky2_write8(hw, STAT_FIFO_ISR_WM, 0x10);
-
- else /* WA dev 4.109 */
- sky2_write8(hw, STAT_FIFO_ISR_WM, 0x04);
+ sky2_write8(hw, STAT_FIFO_ISR_WM, 4);
+ else
+ sky2_write8(hw, STAT_FIFO_ISR_WM, 16);
- sky2_write32(hw, STAT_ISR_TIMER_INI, 0x0190);
+ sky2_write32(hw, STAT_TX_TIMER_INI, sky2_us2clk(hw, 1000));
+ sky2_write32(hw, STAT_LEV_TIMER_INI, sky2_us2clk(hw, 100));
+ sky2_write32(hw, STAT_ISR_TIMER_INI, sky2_us2clk(hw, 20));
}
/* enable status unit */
sky2->autoneg = ecmd->autoneg;
sky2->advertising = ecmd->advertising;
- if (netif_running(dev)) {
- sky2_down(dev);
- sky2_up(dev);
- }
+ if (netif_running(dev))
+ sky2_phy_reinit(sky2);
return 0;
}
static int sky2_nway_reset(struct net_device *dev)
{
struct sky2_port *sky2 = netdev_priv(dev);
- struct sky2_hw *hw = sky2->hw;
if (sky2->autoneg != AUTONEG_ENABLE)
return -EINVAL;
- netif_stop_queue(dev);
-
- down(&sky2->phy_sema);
- sky2_phy_reset(hw, sky2->port);
- sky2_phy_init(hw, sky2->port);
- up(&sky2->phy_sema);
+ sky2_phy_reinit(sky2);
return 0;
}
{
struct sky2_port *sky2 = netdev_priv(dev);
struct sockaddr *addr = p;
- int err = 0;
if (!is_valid_ether_addr(addr->sa_data))
return -EADDRNOTAVAIL;
- sky2_down(dev);
memcpy(dev->dev_addr, addr->sa_data, ETH_ALEN);
memcpy_toio(sky2->hw->regs + B2_MAC_1 + sky2->port * 8,
dev->dev_addr, ETH_ALEN);
memcpy_toio(sky2->hw->regs + B2_MAC_2 + sky2->port * 8,
dev->dev_addr, ETH_ALEN);
- if (dev->flags & IFF_UP)
- err = sky2_up(dev);
- return err;
+
+ if (netif_running(dev))
+ sky2_phy_reinit(sky2);
+
+ return 0;
}
static void sky2_set_multicast(struct net_device *dev)
sky2->tx_pause = ecmd->tx_pause != 0;
sky2->rx_pause = ecmd->rx_pause != 0;
- if (netif_running(dev)) {
- sky2_down(dev);
- err = sky2_up(dev);
- }
+ sky2_phy_reinit(sky2);
return err;
}
sky2->rx_pending = ering->rx_pending;
sky2->tx_pending = ering->tx_pending;
- if (netif_running(dev))
+ if (netif_running(dev)) {
err = sky2_up(dev);
+ if (err)
+ dev_close(dev);
+ else
+ sky2_set_multicast(dev);
+ }
return err;
}
spin_lock_init(&sky2->tx_lock);
/* Auto speed and flow control */
sky2->autoneg = AUTONEG_ENABLE;
- sky2->tx_pause = 0;
+ sky2->tx_pause = 1;
sky2->rx_pause = 1;
sky2->duplex = -1;
sky2->speed = -1;
sky2->advertising = sky2_supported_modes(hw);
- sky2->rx_csum = 1;
+
+ /* Receive checksum disabled for Yukon XL
+ * because of observed problems with incorrect
+ * values when multiple packets are received in one interrupt
+ */
+ sky2->rx_csum = (hw->chip_id != CHIP_ID_YUKON_XL);
+
INIT_WORK(&sky2->phy_task, sky2_phy_task, sky2);
init_MUTEX(&sky2->phy_sema);
sky2->tx_pending = TX_DEF_PENDING;
if (dev) {
if (netif_running(dev)) {
netif_device_attach(dev);
- sky2_up(dev);
+ if (sky2_up(dev))
+ dev_close(dev);
}
}
}