#include <asm/io.h>
#define DRV_NAME "ehea"
-#define DRV_VERSION "EHEA_0045"
+#define DRV_VERSION "EHEA_0046"
#define EHEA_MSG_DEFAULT (NETIF_MSG_LINK | NETIF_MSG_TIMER \
| NETIF_MSG_RX_ERR | NETIF_MSG_TX_ERR)
int x;
unsigned char *deb = adr;
for (x = 0; x < len; x += 16) {
- printk(DRV_NAME "%s adr=%p ofs=%04x %016lx %016lx\n", msg,
+ printk(DRV_NAME " %s adr=%p ofs=%04x %016lx %016lx\n", msg,
deb, x, *((u64*)&deb[0]), *((u64*)&deb[8]));
deb += 16;
}
{
struct ehea_port *port = param;
struct ehea_eqe *eqe;
+ struct ehea_qp *qp;
u32 qp_token;
eqe = ehea_poll_eq(port->qp_eq);
qp_token = EHEA_BMASK_GET(EHEA_EQE_QP_TOKEN, eqe->entry);
ehea_error("QP aff_err: entry=0x%lx, token=0x%x",
eqe->entry, qp_token);
+
+ qp = port->port_res[qp_token].qp;
+ ehea_error_data(port->adapter, qp->fw_handle);
eqe = ehea_poll_eq(port->qp_eq);
}
+ queue_work(port->adapter->ehea_wq, &port->reset_task);
+
return IRQ_HANDLED;
}
event_mask, /* R6 */
0, 0, 0, 0); /* R7-R12 */
}
+
+u64 ehea_h_error_data(const u64 adapter_handle, const u64 ressource_handle,
+ void *rblock)
+{
+ return ehea_plpar_hcall_norets(H_ERROR_DATA,
+ adapter_handle, /* R4 */
+ ressource_handle, /* R5 */
+ virt_to_abs(rblock), /* R6 */
+ 0, 0, 0, 0); /* R7-R12 */
+}
u64 ehea_h_reset_events(const u64 adapter_handle, const u64 neq_handle,
const u64 event_mask);
+u64 ehea_h_error_data(const u64 adapter_handle, const u64 ressource_handle,
+ void *rblock);
+
#endif /* __EHEA_PHYP_H__ */
if (!qp)
return 0;
+ ehea_h_disable_and_get_hea(qp->adapter->handle, qp->fw_handle);
hret = ehea_h_free_resource(qp->adapter->handle, qp->fw_handle);
if (hret != H_SUCCESS) {
ehea_error("destroy_qp failed");
return ret;
}
+void print_error_data(u64 *data)
+{
+ int length;
+ u64 type = EHEA_BMASK_GET(ERROR_DATA_TYPE, data[2]);
+ u64 resource = data[1];
+
+ length = EHEA_BMASK_GET(ERROR_DATA_LENGTH, data[0]);
+
+ if (length > EHEA_PAGESIZE)
+ length = EHEA_PAGESIZE;
+
+ if (type == 0x8) /* Queue Pair */
+ ehea_error("QP (resource=%lX) state: AER=0x%lX, AERR=0x%lX, "
+ "port=%lX", resource, data[6], data[12], data[22]);
+
+ ehea_dump(data, length, "error data");
+}
+
+void ehea_error_data(struct ehea_adapter *adapter, u64 res_handle)
+{
+ unsigned long ret;
+ u64 *rblock;
+
+ rblock = kzalloc(PAGE_SIZE, GFP_KERNEL);
+ if (!rblock) {
+ ehea_error("Cannot allocate rblock memory.");
+ return;
+ }
+ ret = ehea_h_error_data(adapter->handle,
+ res_handle,
+ rblock);
+
+ if (ret == H_R_STATE)
+ ehea_error("No error data is available: %lX.", res_handle);
+ else if (ret == H_SUCCESS)
+ print_error_data(rblock);
+ else
+ ehea_error("Error data could not be fetched: %lX", res_handle);
+
+ kfree(rblock);
+}
u64 entry;
};
+#define ERROR_DATA_LENGTH EHEA_BMASK_IBM(52,63)
+#define ERROR_DATA_TYPE EHEA_BMASK_IBM(0,7)
+
static inline void *hw_qeit_calc(struct hw_queue *queue, u64 q_offset)
{
struct ehea_page *current_page;
int ehea_reg_mr_adapter(struct ehea_adapter *adapter);
+void ehea_error_data(struct ehea_adapter *adapter, u64 res_handle);
+
#endif /* __EHEA_QMR_H__ */
#include "gianfar.h"
#define GFAR_ATTR(_name) \
-static ssize_t gfar_show_##_name(struct class_device *cdev, char *buf); \
-static ssize_t gfar_set_##_name(struct class_device *cdev, \
+static ssize_t gfar_show_##_name(struct device *dev, \
+ struct device_attribute *attr, char *buf); \
+static ssize_t gfar_set_##_name(struct device *dev, \
+ struct device_attribute *attr, \
const char *buf, size_t count); \
-static CLASS_DEVICE_ATTR(_name, 0644, gfar_show_##_name, gfar_set_##_name)
+static DEVICE_ATTR(_name, 0644, gfar_show_##_name, gfar_set_##_name)
#define GFAR_CREATE_FILE(_dev, _name) \
- class_device_create_file(&_dev->class_dev, &class_device_attr_##_name)
+ device_create_file(&_dev->dev, &dev_attr_##_name)
GFAR_ATTR(bd_stash);
GFAR_ATTR(rx_stash_size);
GFAR_ATTR(fifo_starve);
GFAR_ATTR(fifo_starve_off);
-#define to_net_dev(cd) container_of(cd, struct net_device, class_dev)
-
-static ssize_t gfar_show_bd_stash(struct class_device *cdev, char *buf)
+static ssize_t gfar_show_bd_stash(struct device *dev,
+ struct device_attribute *attr, char *buf)
{
- struct net_device *dev = to_net_dev(cdev);
- struct gfar_private *priv = netdev_priv(dev);
+ struct gfar_private *priv = netdev_priv(to_net_dev(dev));
- return sprintf(buf, "%s\n", priv->bd_stash_en? "on" : "off");
+ return sprintf(buf, "%s\n", priv->bd_stash_en ? "on" : "off");
}
-static ssize_t gfar_set_bd_stash(struct class_device *cdev,
- const char *buf, size_t count)
+static ssize_t gfar_set_bd_stash(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
{
- struct net_device *dev = to_net_dev(cdev);
- struct gfar_private *priv = netdev_priv(dev);
+ struct gfar_private *priv = netdev_priv(to_net_dev(dev));
int new_setting = 0;
u32 temp;
unsigned long flags;
/* Find out the new setting */
- if (!strncmp("on", buf, count-1) || !strncmp("1", buf, count-1))
+ if (!strncmp("on", buf, count - 1) || !strncmp("1", buf, count - 1))
new_setting = 1;
- else if (!strncmp("off", buf, count-1) || !strncmp("0", buf, count-1))
+ else if (!strncmp("off", buf, count - 1)
+ || !strncmp("0", buf, count - 1))
new_setting = 0;
else
return count;
return count;
}
-static ssize_t gfar_show_rx_stash_size(struct class_device *cdev, char *buf)
+static ssize_t gfar_show_rx_stash_size(struct device *dev,
+ struct device_attribute *attr, char *buf)
{
- struct net_device *dev = to_net_dev(cdev);
- struct gfar_private *priv = netdev_priv(dev);
+ struct gfar_private *priv = netdev_priv(to_net_dev(dev));
return sprintf(buf, "%d\n", priv->rx_stash_size);
}
-static ssize_t gfar_set_rx_stash_size(struct class_device *cdev,
- const char *buf, size_t count)
+static ssize_t gfar_set_rx_stash_size(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
{
- struct net_device *dev = to_net_dev(cdev);
- struct gfar_private *priv = netdev_priv(dev);
+ struct gfar_private *priv = netdev_priv(to_net_dev(dev));
unsigned int length = simple_strtoul(buf, NULL, 0);
u32 temp;
unsigned long flags;
return count;
}
-
/* Stashing will only be enabled when rx_stash_size != 0 */
-static ssize_t gfar_show_rx_stash_index(struct class_device *cdev, char *buf)
+static ssize_t gfar_show_rx_stash_index(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
{
- struct net_device *dev = to_net_dev(cdev);
- struct gfar_private *priv = netdev_priv(dev);
+ struct gfar_private *priv = netdev_priv(to_net_dev(dev));
return sprintf(buf, "%d\n", priv->rx_stash_index);
}
-static ssize_t gfar_set_rx_stash_index(struct class_device *cdev,
- const char *buf, size_t count)
+static ssize_t gfar_set_rx_stash_index(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
{
- struct net_device *dev = to_net_dev(cdev);
- struct gfar_private *priv = netdev_priv(dev);
+ struct gfar_private *priv = netdev_priv(to_net_dev(dev));
unsigned short index = simple_strtoul(buf, NULL, 0);
u32 temp;
unsigned long flags;
return count;
}
-static ssize_t gfar_show_fifo_threshold(struct class_device *cdev, char *buf)
+static ssize_t gfar_show_fifo_threshold(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
{
- struct net_device *dev = to_net_dev(cdev);
- struct gfar_private *priv = netdev_priv(dev);
+ struct gfar_private *priv = netdev_priv(to_net_dev(dev));
return sprintf(buf, "%d\n", priv->fifo_threshold);
}
-static ssize_t gfar_set_fifo_threshold(struct class_device *cdev,
- const char *buf, size_t count)
+static ssize_t gfar_set_fifo_threshold(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
{
- struct net_device *dev = to_net_dev(cdev);
- struct gfar_private *priv = netdev_priv(dev);
+ struct gfar_private *priv = netdev_priv(to_net_dev(dev));
unsigned int length = simple_strtoul(buf, NULL, 0);
u32 temp;
unsigned long flags;
return count;
}
-static ssize_t gfar_show_fifo_starve(struct class_device *cdev, char *buf)
+static ssize_t gfar_show_fifo_starve(struct device *dev,
+ struct device_attribute *attr, char *buf)
{
- struct net_device *dev = to_net_dev(cdev);
- struct gfar_private *priv = netdev_priv(dev);
+ struct gfar_private *priv = netdev_priv(to_net_dev(dev));
return sprintf(buf, "%d\n", priv->fifo_starve);
}
-
-static ssize_t gfar_set_fifo_starve(struct class_device *cdev,
- const char *buf, size_t count)
+static ssize_t gfar_set_fifo_starve(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
{
- struct net_device *dev = to_net_dev(cdev);
- struct gfar_private *priv = netdev_priv(dev);
+ struct gfar_private *priv = netdev_priv(to_net_dev(dev));
unsigned int num = simple_strtoul(buf, NULL, 0);
u32 temp;
unsigned long flags;
return count;
}
-static ssize_t gfar_show_fifo_starve_off(struct class_device *cdev, char *buf)
+static ssize_t gfar_show_fifo_starve_off(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
{
- struct net_device *dev = to_net_dev(cdev);
- struct gfar_private *priv = netdev_priv(dev);
+ struct gfar_private *priv = netdev_priv(to_net_dev(dev));
return sprintf(buf, "%d\n", priv->fifo_starve_off);
}
-static ssize_t gfar_set_fifo_starve_off(struct class_device *cdev,
- const char *buf, size_t count)
+static ssize_t gfar_set_fifo_starve_off(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
{
- struct net_device *dev = to_net_dev(cdev);
- struct gfar_private *priv = netdev_priv(dev);
+ struct gfar_private *priv = netdev_priv(to_net_dev(dev));
unsigned int num = simple_strtoul(buf, NULL, 0);
u32 temp;
unsigned long flags;
int netxen_backup_crbinit(struct netxen_adapter *adapter);
int netxen_flash_erase_secondary(struct netxen_adapter *adapter);
int netxen_flash_erase_primary(struct netxen_adapter *adapter);
+void netxen_halt_pegs(struct netxen_adapter *adapter);
int netxen_rom_fast_write(struct netxen_adapter *adapter, int addr, int data);
int netxen_rom_se(struct netxen_adapter *adapter, int addr);
wol->wolopts = 0;
}
-static u32 netxen_nic_get_link(struct net_device *dev)
+static u32 netxen_nic_test_link(struct net_device *dev)
{
struct netxen_port *port = netdev_priv(dev);
struct netxen_adapter *adapter = port->adapter;
int ret;
if (flash_start == 0) {
+ netxen_halt_pegs(adapter);
ret = netxen_flash_unlock(adapter);
if (ret < 0) {
printk(KERN_ERR "%s: Flash unlock failed.\n",
{
if (eth_test->flags == ETH_TEST_FL_OFFLINE) { /* offline tests */
/* link test */
- if (!(data[4] = (u64) netxen_nic_get_link(dev)))
+ if (!(data[4] = (u64) netxen_nic_test_link(dev)))
eth_test->flags |= ETH_TEST_FL_FAILED;
if (netif_running(dev))
dev->open(dev);
} else { /* online tests */
/* link test */
- if (!(data[4] = (u64) netxen_nic_get_link(dev)))
+ if (!(data[4] = (u64) netxen_nic_test_link(dev)))
eth_test->flags |= ETH_TEST_FL_FAILED;
/* other tests pass by default */
.get_regs_len = netxen_nic_get_regs_len,
.get_regs = netxen_nic_get_regs,
.get_wol = netxen_nic_get_wol,
- .get_link = netxen_nic_get_link,
+ .get_link = ethtool_op_get_link,
.get_eeprom_len = netxen_nic_get_eeprom_len,
.get_eeprom = netxen_nic_get_eeprom,
.set_eeprom = netxen_nic_set_eeprom,
for (i = 0; i < size / sizeof(u32); i++) {
if (netxen_rom_fast_read(adapter, addr, ptr32) == -1)
return -1;
+ *ptr32 = cpu_to_le32(*ptr32);
ptr32++;
addr += sizeof(u32);
}
if (netxen_rom_fast_read(adapter, addr, &local) == -1)
return -1;
+ local = cpu_to_le32(local);
memcpy(ptr32, &local, (char *)buf + size - (char *)ptr32);
}
return ret;
}
+void netxen_halt_pegs(struct netxen_adapter *adapter)
+{
+ netxen_crb_writelit_adapter(adapter, NETXEN_CRB_PEG_NET_0 + 0x3c, 1);
+ netxen_crb_writelit_adapter(adapter, NETXEN_CRB_PEG_NET_1 + 0x3c, 1);
+ netxen_crb_writelit_adapter(adapter, NETXEN_CRB_PEG_NET_2 + 0x3c, 1);
+ netxen_crb_writelit_adapter(adapter, NETXEN_CRB_PEG_NET_3 + 0x3c, 1);
+}
+
int netxen_flash_unlock(struct netxen_adapter *adapter)
{
int ret = 0;
* the netdev which is associated with that device.
*/
- consumer = *(adapter->cmd_consumer);
+ consumer = le32_to_cpu(*(adapter->cmd_consumer));
if (last_consumer == consumer) { /* Ring is empty */
DPRINTK(INFO, "last_consumer %d == consumer %d\n",
last_consumer, consumer);
if (adapter->last_cmd_consumer == consumer &&
(((adapter->cmd_producer + 1) %
adapter->max_tx_desc_count) == adapter->last_cmd_consumer)) {
- consumer = *(adapter->cmd_consumer);
+ consumer = le32_to_cpu(*(adapter->cmd_consumer));
}
done = (adapter->last_cmd_consumer == consumer);
adapter->port_count++;
adapter->port[i] = port;
}
-
+#ifndef CONFIG_PPC64
writel(0, NETXEN_CRB_NORMALIZE(adapter, CRB_CMDPEG_STATE));
netxen_pinit_from_rom(adapter, 0);
udelay(500);
netxen_load_firmware(adapter);
netxen_phantom_init(adapter, NETXEN_NIC_PEG_TUNE);
+#endif
/*
* delay a while to ensure that the Pegs are up & running.
* Otherwise, we might see some flaky behaviour.
int netxen_niu_xg_init_port(struct netxen_adapter *adapter, int port)
{
- long reg = 0, ret = 0;
+ u32 reg = 0, ret = 0;
if (adapter->ahw.boardcfg.board_type == NETXEN_BRDTYPE_P2_SB31_10G_IMEZ) {
netxen_crb_writelit_adapter(adapter,
*/
struct phy_device * phy_connect(struct net_device *dev, const char *phy_id,
void (*handler)(struct net_device *), u32 flags,
- u32 interface)
+ phy_interface_t interface)
{
struct phy_device *phydev;
}
struct phy_device *phy_attach(struct net_device *dev,
- const char *phy_id, u32 flags, u32 interface)
+ const char *phy_id, u32 flags, phy_interface_t interface)
{
struct bus_type *bus = &mdio_bus_type;
struct phy_device *phydev;
* This is currently always BCM43xx_BUSTYPE_PCI
*/
u8 bustype;
+ u64 dma_mask;
u16 board_vendor;
u16 board_type;
int tx)
{
dma_addr_t dmaaddr;
+ int direction = PCI_DMA_FROMDEVICE;
- if (tx) {
- dmaaddr = dma_map_single(&ring->bcm->pci_dev->dev,
- buf, len,
- DMA_TO_DEVICE);
- } else {
- dmaaddr = dma_map_single(&ring->bcm->pci_dev->dev,
+ if (tx)
+ direction = PCI_DMA_TODEVICE;
+
+ dmaaddr = pci_map_single(ring->bcm->pci_dev,
buf, len,
- DMA_FROM_DEVICE);
- }
+ direction);
return dmaaddr;
}
int tx)
{
if (tx) {
- dma_unmap_single(&ring->bcm->pci_dev->dev,
+ pci_unmap_single(ring->bcm->pci_dev,
addr, len,
- DMA_TO_DEVICE);
+ PCI_DMA_TODEVICE);
} else {
- dma_unmap_single(&ring->bcm->pci_dev->dev,
+ pci_unmap_single(ring->bcm->pci_dev,
addr, len,
- DMA_FROM_DEVICE);
+ PCI_DMA_FROMDEVICE);
}
}
{
assert(!ring->tx);
- dma_sync_single_for_cpu(&ring->bcm->pci_dev->dev,
- addr, len, DMA_FROM_DEVICE);
+ pci_dma_sync_single_for_cpu(ring->bcm->pci_dev,
+ addr, len, PCI_DMA_FROMDEVICE);
}
static inline
{
assert(!ring->tx);
- dma_sync_single_for_device(&ring->bcm->pci_dev->dev,
- addr, len, DMA_FROM_DEVICE);
+ pci_dma_sync_single_for_cpu(ring->bcm->pci_dev,
+ addr, len, PCI_DMA_TODEVICE);
}
/* Unmap and free a descriptor buffer. */
static int alloc_ringmemory(struct bcm43xx_dmaring *ring)
{
- struct device *dev = &(ring->bcm->pci_dev->dev);
-
- ring->descbase = dma_alloc_coherent(dev, BCM43xx_DMA_RINGMEMSIZE,
- &(ring->dmabase), GFP_KERNEL);
+ ring->descbase = pci_alloc_consistent(ring->bcm->pci_dev, BCM43xx_DMA_RINGMEMSIZE,
+ &(ring->dmabase));
if (!ring->descbase) {
- printk(KERN_ERR PFX "DMA ringmemory allocation failed\n");
- return -ENOMEM;
+ /* Allocation may have failed due to pci_alloc_consistent
+ insisting on use of GFP_DMA, which is more restrictive
+ than necessary... */
+ struct dma_desc *rx_ring;
+ dma_addr_t rx_ring_dma;
+
+ rx_ring = kzalloc(BCM43xx_DMA_RINGMEMSIZE, GFP_KERNEL);
+ if (!rx_ring)
+ goto out_err;
+
+ rx_ring_dma = pci_map_single(ring->bcm->pci_dev, rx_ring,
+ BCM43xx_DMA_RINGMEMSIZE,
+ PCI_DMA_BIDIRECTIONAL);
+
+ if (pci_dma_mapping_error(rx_ring_dma) ||
+ rx_ring_dma + BCM43xx_DMA_RINGMEMSIZE > ring->bcm->dma_mask) {
+ /* Sigh... */
+ if (!pci_dma_mapping_error(rx_ring_dma))
+ pci_unmap_single(ring->bcm->pci_dev,
+ rx_ring_dma, BCM43xx_DMA_RINGMEMSIZE,
+ PCI_DMA_BIDIRECTIONAL);
+ rx_ring_dma = pci_map_single(ring->bcm->pci_dev,
+ rx_ring, BCM43xx_DMA_RINGMEMSIZE,
+ PCI_DMA_BIDIRECTIONAL);
+ if (pci_dma_mapping_error(rx_ring_dma) ||
+ rx_ring_dma + BCM43xx_DMA_RINGMEMSIZE > ring->bcm->dma_mask) {
+ assert(0);
+ if (!pci_dma_mapping_error(rx_ring_dma))
+ pci_unmap_single(ring->bcm->pci_dev,
+ rx_ring_dma, BCM43xx_DMA_RINGMEMSIZE,
+ PCI_DMA_BIDIRECTIONAL);
+ goto out_err;
+ }
+ }
+
+ ring->descbase = rx_ring;
+ ring->dmabase = rx_ring_dma;
}
memset(ring->descbase, 0, BCM43xx_DMA_RINGMEMSIZE);
return 0;
+out_err:
+ printk(KERN_ERR PFX "DMA ringmemory allocation failed\n");
+ return -ENOMEM;
}
static void free_ringmemory(struct bcm43xx_dmaring *ring)
if (unlikely(!skb))
return -ENOMEM;
dmaaddr = map_descbuffer(ring, skb->data, ring->rx_buffersize, 0);
+ /* This hardware bug work-around adapted from the b44 driver.
+ The chip may be unable to do PCI DMA to/from anything above 1GB */
+ if (pci_dma_mapping_error(dmaaddr) ||
+ dmaaddr + ring->rx_buffersize > ring->bcm->dma_mask) {
+ /* This one has 30-bit addressing... */
+ if (!pci_dma_mapping_error(dmaaddr))
+ pci_unmap_single(ring->bcm->pci_dev,
+ dmaaddr, ring->rx_buffersize,
+ PCI_DMA_FROMDEVICE);
+ dev_kfree_skb_any(skb);
+ skb = __dev_alloc_skb(ring->rx_buffersize,GFP_DMA);
+ if (skb == NULL)
+ return -ENOMEM;
+ dmaaddr = pci_map_single(ring->bcm->pci_dev,
+ skb->data, ring->rx_buffersize,
+ PCI_DMA_FROMDEVICE);
+ if (pci_dma_mapping_error(dmaaddr) ||
+ dmaaddr + ring->rx_buffersize > ring->bcm->dma_mask) {
+ assert(0);
+ dev_kfree_skb_any(skb);
+ return -ENOMEM;
+ }
+ }
meta->skb = skb;
meta->dmaaddr = dmaaddr;
skb->dev = ring->bcm->net_dev;
err = dmacontroller_setup(ring);
if (err)
goto err_free_ringmemory;
+ return ring;
out:
+ printk(KERN_ERR PFX "Error in bcm43xx_setup_dmaring\n");
return ring;
err_free_ringmemory:
struct bcm43xx_dmaring *ring;
int err = -ENOMEM;
int dma64 = 0;
- u64 mask = bcm43xx_get_supported_dma_mask(bcm);
- int nobits;
- if (mask == DMA_64BIT_MASK) {
+ bcm->dma_mask = bcm43xx_get_supported_dma_mask(bcm);
+ if (bcm->dma_mask == DMA_64BIT_MASK)
dma64 = 1;
- nobits = 64;
- } else if (mask == DMA_32BIT_MASK)
- nobits = 32;
- else
- nobits = 30;
- err = pci_set_dma_mask(bcm->pci_dev, mask);
- err |= pci_set_consistent_dma_mask(bcm->pci_dev, mask);
- if (err) {
-#ifdef CONFIG_BCM43XX_PIO
- printk(KERN_WARNING PFX "DMA not supported on this device."
- " Falling back to PIO.\n");
- bcm->__using_pio = 1;
- return -ENOSYS;
-#else
- printk(KERN_ERR PFX "FATAL: DMA not supported and PIO not configured. "
- "Please recompile the driver with PIO support.\n");
- return -ENODEV;
-#endif /* CONFIG_BCM43XX_PIO */
- }
+ err = pci_set_dma_mask(bcm->pci_dev, bcm->dma_mask);
+ if (err)
+ goto no_dma;
+ err = pci_set_consistent_dma_mask(bcm->pci_dev, bcm->dma_mask);
+ if (err)
+ goto no_dma;
/* setup TX DMA channels. */
ring = bcm43xx_setup_dmaring(bcm, 0, 1, dma64);
dma->rx_ring3 = ring;
}
- dprintk(KERN_INFO PFX "%d-bit DMA initialized\n", nobits);
+ dprintk(KERN_INFO PFX "%d-bit DMA initialized\n",
+ (bcm->dma_mask == DMA_64BIT_MASK) ? 64 :
+ (bcm->dma_mask == DMA_32BIT_MASK) ? 32 : 30);
err = 0;
out:
return err;
err_destroy_tx0:
bcm43xx_destroy_dmaring(dma->tx_ring0);
dma->tx_ring0 = NULL;
- goto out;
+no_dma:
+#ifdef CONFIG_BCM43XX_PIO
+ printk(KERN_WARNING PFX "DMA not supported on this device."
+ " Falling back to PIO.\n");
+ bcm->__using_pio = 1;
+ return -ENOSYS;
+#else
+ printk(KERN_ERR PFX "FATAL: DMA not supported and PIO not configured. "
+ "Please recompile the driver with PIO support.\n");
+ return -ENODEV;
+#endif /* CONFIG_BCM43XX_PIO */
}
/* Generate a cookie for the TX header. */
struct bcm43xx_dmadesc_generic *desc;
struct bcm43xx_dmadesc_meta *meta;
dma_addr_t dmaaddr;
+ struct sk_buff *bounce_skb;
assert(skb_shinfo(skb)->nr_frags == 0);
skb->len - sizeof(struct bcm43xx_txhdr),
(cur_frag == 0),
generate_cookie(ring, slot));
+ dmaaddr = map_descbuffer(ring, skb->data, skb->len, 1);
+ if (dma_mapping_error(dmaaddr) || dmaaddr + skb->len > ring->bcm->dma_mask) {
+ /* chip cannot handle DMA to/from > 1GB, use bounce buffer (copied from b44 driver) */
+ if (!dma_mapping_error(dmaaddr))
+ unmap_descbuffer(ring, dmaaddr, skb->len, 1);
+ bounce_skb = __dev_alloc_skb(skb->len, GFP_ATOMIC|GFP_DMA);
+ if (!bounce_skb)
+ return;
+ dmaaddr = map_descbuffer(ring, bounce_skb->data, bounce_skb->len, 1);
+ if (dma_mapping_error(dmaaddr) || dmaaddr + skb->len > ring->bcm->dma_mask) {
+ if (!dma_mapping_error(dmaaddr))
+ unmap_descbuffer(ring, dmaaddr, skb->len, 1);
+ dev_kfree_skb_any(bounce_skb);
+ assert(0);
+ return;
+ }
+ memcpy(skb_put(bounce_skb, skb->len), skb->data, skb->len);
+ dev_kfree_skb_any(skb);
+ skb = bounce_skb;
+ }
meta->skb = skb;
- dmaaddr = map_descbuffer(ring, skb->data, skb->len, 1);
meta->dmaaddr = dmaaddr;
fill_descriptor(ring, desc, dmaaddr,
module_param_named(noleds, modparam_noleds, int, 0444);
MODULE_PARM_DESC(noleds, "Turn off all LED activity");
-#ifdef CONFIG_BCM43XX_DEBUG
static char modparam_fwpostfix[64];
module_param_string(fwpostfix, modparam_fwpostfix, 64, 0444);
-MODULE_PARM_DESC(fwpostfix, "Postfix for .fw files. Useful for debugging.");
-#else
-# define modparam_fwpostfix ""
-#endif /* CONFIG_BCM43XX_DEBUG*/
+MODULE_PARM_DESC(fwpostfix, "Postfix for .fw files. Useful for using multiple firmware image versions.");
/* If you want to debug with just a single device, enable this,
err = bcm43xx_pctl_set_crystal(bcm, 1);
if (err)
goto out;
- bcm43xx_pci_read_config16(bcm, PCI_STATUS, &pci_status);
- bcm43xx_pci_write_config16(bcm, PCI_STATUS, pci_status & ~PCI_STATUS_SIG_TARGET_ABORT);
+ err = bcm43xx_pci_read_config16(bcm, PCI_STATUS, &pci_status);
+ if (err)
+ goto out;
+ err = bcm43xx_pci_write_config16(bcm, PCI_STATUS, pci_status & ~PCI_STATUS_SIG_TARGET_ABORT);
out:
return err;
}
net_dev->base_addr = (unsigned long)bcm->mmio_addr;
- bcm43xx_pci_read_config16(bcm, PCI_SUBSYSTEM_VENDOR_ID,
+ err = bcm43xx_pci_read_config16(bcm, PCI_SUBSYSTEM_VENDOR_ID,
&bcm->board_vendor);
- bcm43xx_pci_read_config16(bcm, PCI_SUBSYSTEM_ID,
+ if (err)
+ goto err_iounmap;
+ err = bcm43xx_pci_read_config16(bcm, PCI_SUBSYSTEM_ID,
&bcm->board_type);
- bcm43xx_pci_read_config16(bcm, PCI_REVISION_ID,
+ if (err)
+ goto err_iounmap;
+ err = bcm43xx_pci_read_config16(bcm, PCI_REVISION_ID,
&bcm->board_revision);
+ if (err)
+ goto err_iounmap;
err = bcm43xx_chipset_attach(bcm);
if (err)
pci_release_regions(pci_dev);
err_pci_disable:
pci_disable_device(pci_dev);
+ printk(KERN_ERR PFX "Unable to attach board\n");
goto out;
}
if (phy->type == BCM43xx_PHYTYPE_A ||
phy->type == BCM43xx_PHYTYPE_G) {
range->num_bitrates = 8;
- range->bitrate[i++] = IEEE80211_OFDM_RATE_6MB;
- range->bitrate[i++] = IEEE80211_OFDM_RATE_9MB;
- range->bitrate[i++] = IEEE80211_OFDM_RATE_12MB;
- range->bitrate[i++] = IEEE80211_OFDM_RATE_18MB;
- range->bitrate[i++] = IEEE80211_OFDM_RATE_24MB;
- range->bitrate[i++] = IEEE80211_OFDM_RATE_36MB;
- range->bitrate[i++] = IEEE80211_OFDM_RATE_48MB;
- range->bitrate[i++] = IEEE80211_OFDM_RATE_54MB;
+ range->bitrate[i++] = IEEE80211_OFDM_RATE_6MB * 500000;
+ range->bitrate[i++] = IEEE80211_OFDM_RATE_9MB * 500000;
+ range->bitrate[i++] = IEEE80211_OFDM_RATE_12MB * 500000;
+ range->bitrate[i++] = IEEE80211_OFDM_RATE_18MB * 500000;
+ range->bitrate[i++] = IEEE80211_OFDM_RATE_24MB * 500000;
+ range->bitrate[i++] = IEEE80211_OFDM_RATE_36MB * 500000;
+ range->bitrate[i++] = IEEE80211_OFDM_RATE_48MB * 500000;
+ range->bitrate[i++] = IEEE80211_OFDM_RATE_54MB * 500000;
}
if (phy->type == BCM43xx_PHYTYPE_B ||
phy->type == BCM43xx_PHYTYPE_G) {
range->num_bitrates += 4;
- range->bitrate[i++] = IEEE80211_CCK_RATE_1MB;
- range->bitrate[i++] = IEEE80211_CCK_RATE_2MB;
- range->bitrate[i++] = IEEE80211_CCK_RATE_5MB;
- range->bitrate[i++] = IEEE80211_CCK_RATE_11MB;
+ range->bitrate[i++] = IEEE80211_CCK_RATE_1MB * 500000;
+ range->bitrate[i++] = IEEE80211_CCK_RATE_2MB * 500000;
+ range->bitrate[i++] = IEEE80211_CCK_RATE_5MB * 500000;
+ range->bitrate[i++] = IEEE80211_CCK_RATE_11MB * 500000;
}
geo = ieee80211_get_geo(bcm->ieee);
if (j == IW_MAX_FREQUENCIES)
break;
range->freq[j].i = j + 1;
- range->freq[j].m = geo->a[i].freq;//FIXME?
+ range->freq[j].m = geo->a[i].freq * 100000;
range->freq[j].e = 1;
j++;
}
if (j == IW_MAX_FREQUENCIES)
break;
range->freq[j].i = j + 1;
- range->freq[j].m = geo->bg[i].freq;//FIXME?
+ range->freq[j].m = geo->bg[i].freq * 100000;
range->freq[j].e = 1;
j++;
}
/* Debugging stuff */
#ifdef CONFIG_IPW2100_DEBUG
-#define CONFIG_IPW2100_RX_DEBUG /* Reception debugging */
+#define IPW2100_RX_DEBUG /* Reception debugging */
#endif
MODULE_DESCRIPTION(DRV_DESCRIPTION);
priv->snapshot[0] = NULL;
}
-#ifdef CONFIG_IPW2100_DEBUG_C3
+#ifdef IPW2100_DEBUG_C3
static int ipw2100_snapshot_alloc(struct ipw2100_priv *priv)
{
int i;
* The size of the constructed ethernet
*
*/
-#ifdef CONFIG_IPW2100_RX_DEBUG
+#ifdef IPW2100_RX_DEBUG
static u8 packet_data[IPW_RX_NIC_BUFFER_LENGTH];
#endif
static void ipw2100_corruption_detected(struct ipw2100_priv *priv, int i)
{
-#ifdef CONFIG_IPW2100_DEBUG_C3
+#ifdef IPW2100_DEBUG_C3
struct ipw2100_status *status = &priv->status_queue.drv[i];
u32 match, reg;
int j;
}
#endif
-#ifdef CONFIG_IPW2100_DEBUG_C3
+#ifdef IPW2100_DEBUG_C3
/* Halt the fimrware so we can get a good image */
write_register(priv->net_dev, IPW_REG_RESET_REG,
IPW_AUX_HOST_RESET_REG_STOP_MASTER);
skb_put(packet->skb, status->frame_size);
-#ifdef CONFIG_IPW2100_RX_DEBUG
+#ifdef IPW2100_RX_DEBUG
/* Make a copy of the frame so we can dump it to the logs if
* ieee80211_rx fails */
memcpy(packet_data, packet->skb->data,
#endif
if (!ieee80211_rx(priv->ieee, packet->skb, stats)) {
-#ifdef CONFIG_IPW2100_RX_DEBUG
+#ifdef IPW2100_RX_DEBUG
IPW_DEBUG_DROP("%s: Non consumed packet:\n",
priv->net_dev->name);
printk_buf(IPW_DL_DROP, packet_data, status->frame_size);
else
priv->power_mode = IPW_POWER_ENABLED | power_level;
-#ifdef CONFIG_IPW2100_TX_POWER
+#ifdef IPW2100_TX_POWER
if (priv->port_type == IBSS && priv->adhoc_power != DFTL_IBSS_TX_POWER) {
/* Set beacon interval */
cmd.host_command = TX_POWER_INDEX;
static int reset_mode(struct zd_mac *mac)
{
struct ieee80211_device *ieee = zd_mac_to_ieee80211(mac);
- struct zd_ioreq32 ioreqs[3] = {
+ struct zd_ioreq32 ioreqs[] = {
{ CR_RX_FILTER, STA_RX_FILTER },
{ CR_SNIFFER_ON, 0U },
};
if (ieee->iw_mode == IW_MODE_MONITOR) {
ioreqs[0].value = 0xffffffff;
ioreqs[1].value = 0x1;
- ioreqs[2].value = ENC_SNIFFER;
}
- return zd_iowrite32a(&mac->chip, ioreqs, 3);
+ return zd_iowrite32a(&mac->chip, ioreqs, ARRAY_SIZE(ioreqs));
}
int zd_mac_open(struct net_device *netdev)
static int zd_mac_tx(struct zd_mac *mac, struct ieee80211_txb *txb, int pri)
{
int i, r;
+ struct ieee80211_device *ieee = zd_mac_to_ieee80211(mac);
for (i = 0; i < txb->nr_frags; i++) {
struct sk_buff *skb = txb->fragments[i];
r = fill_ctrlset(mac, txb, i);
- if (r)
+ if (r) {
+ ieee->stats.tx_dropped++;
return r;
+ }
r = zd_usb_tx(&mac->chip.usb, skb->data, skb->len);
- if (r)
+ if (r) {
+ ieee->stats.tx_dropped++;
return r;
+ }
}
/* FIXME: shouldn't this be handled by the upper layers? */
*pstatus = status = zd_tail(buffer, length, sizeof(struct rx_status));
if (status->frame_status & ZD_RX_ERROR) {
- /* FIXME: update? */
+ struct ieee80211_device *ieee = zd_mac_to_ieee80211(mac);
+ ieee->stats.rx_errors++;
+ if (status->frame_status & ZD_RX_TIMEOUT_ERROR)
+ ieee->stats.rx_missed_errors++;
+ else if (status->frame_status & ZD_RX_FIFO_OVERRUN_ERROR)
+ ieee->stats.rx_fifo_errors++;
+ else if (status->frame_status & ZD_RX_DECRYPTION_ERROR)
+ ieee->ieee_stats.rx_discards_undecryptable++;
+ else if (status->frame_status & ZD_RX_CRC32_ERROR) {
+ ieee->stats.rx_crc_errors++;
+ ieee->ieee_stats.rx_fcs_errors++;
+ }
+ else if (status->frame_status & ZD_RX_CRC16_ERROR)
+ ieee->stats.rx_crc_errors++;
return -EINVAL;
}
+
memset(stats, 0, sizeof(struct ieee80211_rx_stats));
stats->len = length - (ZD_PLCP_HEADER_SIZE + IEEE80211_FCS_LEN +
+ sizeof(struct rx_status));
if (skb->len < ZD_PLCP_HEADER_SIZE + IEEE80211_1ADDR_LEN +
IEEE80211_FCS_LEN + sizeof(struct rx_status))
{
- dev_dbg_f(zd_mac_dev(mac), "Packet with length %u to small.\n",
- skb->len);
+ ieee->stats.rx_errors++;
+ ieee->stats.rx_length_errors++;
goto free_skb;
}
r = fill_rx_stats(&stats, &status, mac, skb->data, skb->len);
if (r) {
- /* Only packets with rx errors are included here. */
+ /* Only packets with rx errors are included here.
+ * The error stats have already been set in fill_rx_stats.
+ */
goto free_skb;
}
r = filter_rx(ieee, skb->data, skb->len, &stats);
if (r <= 0) {
- if (r < 0)
+ if (r < 0) {
+ ieee->stats.rx_errors++;
dev_dbg_f(zd_mac_dev(mac), "Error in packet.\n");
+ }
goto free_skb;
}
skb = dev_alloc_skb(sizeof(struct zd_rt_hdr) + length);
if (!skb) {
+ struct ieee80211_device *ieee = zd_mac_to_ieee80211(mac);
dev_warn(zd_mac_dev(mac), "Could not allocate skb.\n");
+ ieee->stats.rx_dropped++;
return -ENOMEM;
}
skb_reserve(skb, sizeof(struct zd_rt_hdr));
static inline void handle_retry_failed_int(struct urb *urb)
{
+ struct zd_usb *usb = urb->context;
+ struct zd_mac *mac = zd_usb_to_mac(usb);
+ struct ieee80211_device *ieee = zd_mac_to_ieee80211(mac);
+
+ ieee->stats.tx_errors++;
+ ieee->ieee_stats.tx_retry_limit_exceeded++;
dev_dbg_f(urb_dev(urb), "retry failed interrupt\n");
}
if (length < sizeof(struct rx_length_info)) {
/* It's not a complete packet anyhow. */
+ struct ieee80211_device *ieee = zd_mac_to_ieee80211(mac);
+ ieee->stats.rx_errors++;
+ ieee->stats.rx_length_errors++;
return;
}
length_info = (struct rx_length_info *)
goto error;
}
+ usb_reset_device(interface_to_usbdev(intf));
+
netdev = zd_netdev_alloc(intf);
if (netdev == NULL) {
r = -ENOMEM;
r = usb_register(&driver);
if (r) {
+ destroy_workqueue(zd_workqueue);
printk(KERN_ERR "%s usb_register() failed. Error number %d\n",
driver.name, r);
return r;
.attrs = netstat_attrs,
};
-#ifdef WIRELESS_EXT
+#ifdef CONFIG_WIRELESS_EXT
/* helper function that does all the locking etc for wireless stats */
static ssize_t wireless_show(struct device *d, char *buf,
ssize_t (*format)(const struct iw_statistics *,
if (net->get_stats)
*groups++ = &netstat_group;
-#ifdef WIRELESS_EXT
+#ifdef CONFIG_WIRELESS_EXT
if (net->wireless_handlers && net->wireless_handlers->get_wireless_stats)
*groups++ = &wireless_group;
#endif
if (host_encrypt)
ieee80211_encrypt_fragment(ieee, skb_frag, hdr_len);
else if (host_build_iv) {
- struct ieee80211_crypt_data *crypt;
-
- crypt = ieee->crypt[ieee->tx_keyidx];
atomic_inc(&crypt->refcnt);
if (crypt->ops->build_iv)
crypt->ops->build_iv(skb_frag, hdr_len,