1 /************************************************************************
2 * s2io.c: A Linux PCI-X Ethernet driver for S2IO 10GbE Server NIC
3 * Copyright(c) 2002-2005 Neterion Inc.
5 * This software may be used and distributed according to the terms of
6 * the GNU General Public License (GPL), incorporated herein by reference.
7 * Drivers based on or derived from this code fall under the GPL and must
8 * retain the authorship, copyright and license notice. This file is not
9 * a complete program and may only be used when the entire operating
10 * system is licensed under the GPL.
11 * See the file COPYING in this distribution for more information.
14 * Jeff Garzik : For pointing out the improper error condition
15 * check in the s2io_xmit routine and also some
16 * issues in the Tx watch dog function. Also for
17 * patiently answering all those innumerable
18 * questions regaring the 2.6 porting issues.
19 * Stephen Hemminger : Providing proper 2.6 porting mechanism for some
20 * macros available only in 2.6 Kernel.
21 * Francois Romieu : For pointing out all code part that were
22 * deprecated and also styling related comments.
23 * Grant Grundler : For helping me get rid of some Architecture
25 * Christopher Hellwig : Some more 2.6 specific issues in the driver.
27 * The module loadable parameters that are supported by the driver and a brief
28 * explaination of all the variables.
29 * rx_ring_num : This can be used to program the number of receive rings used
31 * rx_ring_len: This defines the number of descriptors each ring can have. This
32 * is also an array of size 8.
33 * tx_fifo_num: This defines the number of Tx FIFOs thats used int the driver.
34 * tx_fifo_len: This too is an array of 8. Each element defines the number of
35 * Tx descriptors that can be associated with each corresponding FIFO.
36 ************************************************************************/
38 #include <linux/config.h>
39 #include <linux/module.h>
40 #include <linux/types.h>
41 #include <linux/errno.h>
42 #include <linux/ioport.h>
43 #include <linux/pci.h>
44 #include <linux/dma-mapping.h>
45 #include <linux/kernel.h>
46 #include <linux/netdevice.h>
47 #include <linux/etherdevice.h>
48 #include <linux/skbuff.h>
49 #include <linux/init.h>
50 #include <linux/delay.h>
51 #include <linux/stddef.h>
52 #include <linux/ioctl.h>
53 #include <linux/timex.h>
54 #include <linux/sched.h>
55 #include <linux/ethtool.h>
56 #include <linux/version.h>
57 #include <linux/workqueue.h>
59 #include <asm/system.h>
60 #include <asm/uaccess.h>
65 #include "s2io-regs.h"
67 /* S2io Driver name & version. */
68 static char s2io_driver_name[] = "Neterion";
69 static char s2io_driver_version[] = "Version 1.7.7";
71 static inline int RXD_IS_UP2DT(RxD_t *rxdp)
75 ret = ((!(rxdp->Control_1 & RXD_OWN_XENA)) &&
76 (GET_RXD_MARKER(rxdp->Control_2) != THE_RXD_MARK));
82 * Cards with following subsystem_id have a link state indication
83 * problem, 600B, 600C, 600D, 640B, 640C and 640D.
84 * macro below identifies these cards given the subsystem_id.
86 #define CARDS_WITH_FAULTY_LINK_INDICATORS(subid) \
87 (((subid >= 0x600B) && (subid <= 0x600D)) || \
88 ((subid >= 0x640B) && (subid <= 0x640D))) ? 1 : 0
90 #define LINK_IS_UP(val64) (!(val64 & (ADAPTER_STATUS_RMAC_REMOTE_FAULT | \
91 ADAPTER_STATUS_RMAC_LOCAL_FAULT)))
92 #define TASKLET_IN_USE test_and_set_bit(0, (&sp->tasklet_status))
95 static inline int rx_buffer_level(nic_t * sp, int rxb_size, int ring)
98 mac_info_t *mac_control;
100 mac_control = &sp->mac_control;
101 if ((mac_control->rings[ring].pkt_cnt - rxb_size) > 16) {
103 if (rxb_size <= MAX_RXDS_PER_BLOCK) {
111 /* Ethtool related variables and Macros. */
112 static char s2io_gstrings[][ETH_GSTRING_LEN] = {
113 "Register test\t(offline)",
114 "Eeprom test\t(offline)",
115 "Link test\t(online)",
116 "RLDRAM test\t(offline)",
117 "BIST Test\t(offline)"
120 static char ethtool_stats_keys[][ETH_GSTRING_LEN] = {
122 {"tmac_data_octets"},
126 {"tmac_pause_ctrl_frms"},
127 {"tmac_any_err_frms"},
128 {"tmac_vld_ip_octets"},
136 {"rmac_data_octets"},
137 {"rmac_fcs_err_frms"},
139 {"rmac_vld_mcst_frms"},
140 {"rmac_vld_bcst_frms"},
141 {"rmac_in_rng_len_err_frms"},
143 {"rmac_pause_ctrl_frms"},
144 {"rmac_discarded_frms"},
145 {"rmac_usized_frms"},
146 {"rmac_osized_frms"},
148 {"rmac_jabber_frms"},
156 {"rmac_err_drp_udp"},
158 {"rmac_accepted_ip"},
160 {"\n DRIVER STATISTICS"},
161 {"single_bit_ecc_errs"},
162 {"double_bit_ecc_errs"},
165 #define S2IO_STAT_LEN sizeof(ethtool_stats_keys)/ ETH_GSTRING_LEN
166 #define S2IO_STAT_STRINGS_LEN S2IO_STAT_LEN * ETH_GSTRING_LEN
168 #define S2IO_TEST_LEN sizeof(s2io_gstrings) / ETH_GSTRING_LEN
169 #define S2IO_STRINGS_LEN S2IO_TEST_LEN * ETH_GSTRING_LEN
171 #define S2IO_TIMER_CONF(timer, handle, arg, exp) \
172 init_timer(&timer); \
173 timer.function = handle; \
174 timer.data = (unsigned long) arg; \
175 mod_timer(&timer, (jiffies + exp)) \
178 * Constants to be programmed into the Xena's registers, to configure
182 #define SWITCH_SIGN 0xA5A5A5A5A5A5A5A5ULL
185 static u64 default_mdio_cfg[] = {
187 0xC001010000000000ULL, 0xC0010100000000E0ULL,
188 0xC0010100008000E4ULL,
189 /* Remove Reset from PMA PLL */
190 0xC001010000000000ULL, 0xC0010100000000E0ULL,
191 0xC0010100000000E4ULL,
195 static u64 default_dtx_cfg[] = {
196 0x8000051500000000ULL, 0x80000515000000E0ULL,
197 0x80000515D93500E4ULL, 0x8001051500000000ULL,
198 0x80010515000000E0ULL, 0x80010515001E00E4ULL,
199 0x8002051500000000ULL, 0x80020515000000E0ULL,
200 0x80020515F21000E4ULL,
201 /* Set PADLOOPBACKN */
202 0x8002051500000000ULL, 0x80020515000000E0ULL,
203 0x80020515B20000E4ULL, 0x8003051500000000ULL,
204 0x80030515000000E0ULL, 0x80030515B20000E4ULL,
205 0x8004051500000000ULL, 0x80040515000000E0ULL,
206 0x80040515B20000E4ULL, 0x8005051500000000ULL,
207 0x80050515000000E0ULL, 0x80050515B20000E4ULL,
209 /* Remove PADLOOPBACKN */
210 0x8002051500000000ULL, 0x80020515000000E0ULL,
211 0x80020515F20000E4ULL, 0x8003051500000000ULL,
212 0x80030515000000E0ULL, 0x80030515F20000E4ULL,
213 0x8004051500000000ULL, 0x80040515000000E0ULL,
214 0x80040515F20000E4ULL, 0x8005051500000000ULL,
215 0x80050515000000E0ULL, 0x80050515F20000E4ULL,
220 * Constants for Fixing the MacAddress problem seen mostly on
223 static u64 fix_mac[] = {
224 0x0060000000000000ULL, 0x0060600000000000ULL,
225 0x0040600000000000ULL, 0x0000600000000000ULL,
226 0x0020600000000000ULL, 0x0060600000000000ULL,
227 0x0020600000000000ULL, 0x0060600000000000ULL,
228 0x0020600000000000ULL, 0x0060600000000000ULL,
229 0x0020600000000000ULL, 0x0060600000000000ULL,
230 0x0020600000000000ULL, 0x0060600000000000ULL,
231 0x0020600000000000ULL, 0x0060600000000000ULL,
232 0x0020600000000000ULL, 0x0060600000000000ULL,
233 0x0020600000000000ULL, 0x0060600000000000ULL,
234 0x0020600000000000ULL, 0x0060600000000000ULL,
235 0x0020600000000000ULL, 0x0060600000000000ULL,
236 0x0020600000000000ULL, 0x0000600000000000ULL,
237 0x0040600000000000ULL, 0x0060600000000000ULL,
241 /* Module Loadable parameters. */
242 static unsigned int tx_fifo_num = 1;
243 static unsigned int tx_fifo_len[MAX_TX_FIFOS] =
244 {[0 ...(MAX_TX_FIFOS - 1)] = 0 };
245 static unsigned int rx_ring_num = 1;
246 static unsigned int rx_ring_sz[MAX_RX_RINGS] =
247 {[0 ...(MAX_RX_RINGS - 1)] = 0 };
248 static unsigned int rts_frm_len[MAX_RX_RINGS] =
249 {[0 ...(MAX_RX_RINGS - 1)] = 0 };
250 static unsigned int use_continuous_tx_intrs = 1;
251 static unsigned int rmac_pause_time = 65535;
252 static unsigned int mc_pause_threshold_q0q3 = 187;
253 static unsigned int mc_pause_threshold_q4q7 = 187;
254 static unsigned int shared_splits;
255 static unsigned int tmac_util_period = 5;
256 static unsigned int rmac_util_period = 5;
257 #ifndef CONFIG_S2IO_NAPI
258 static unsigned int indicate_max_pkts;
263 * This table lists all the devices that this driver supports.
265 static struct pci_device_id s2io_tbl[] __devinitdata = {
266 {PCI_VENDOR_ID_S2IO, PCI_DEVICE_ID_S2IO_WIN,
267 PCI_ANY_ID, PCI_ANY_ID},
268 {PCI_VENDOR_ID_S2IO, PCI_DEVICE_ID_S2IO_UNI,
269 PCI_ANY_ID, PCI_ANY_ID},
270 {PCI_VENDOR_ID_S2IO, PCI_DEVICE_ID_HERC_WIN,
271 PCI_ANY_ID, PCI_ANY_ID},
272 {PCI_VENDOR_ID_S2IO, PCI_DEVICE_ID_HERC_UNI,
273 PCI_ANY_ID, PCI_ANY_ID},
277 MODULE_DEVICE_TABLE(pci, s2io_tbl);
279 static struct pci_driver s2io_driver = {
281 .id_table = s2io_tbl,
282 .probe = s2io_init_nic,
283 .remove = __devexit_p(s2io_rem_nic),
286 /* A simplifier macro used both by init and free shared_mem Fns(). */
287 #define TXD_MEM_PAGE_CNT(len, per_each) ((len+per_each - 1) / per_each)
290 * init_shared_mem - Allocation and Initialization of Memory
291 * @nic: Device private variable.
292 * Description: The function allocates all the memory areas shared
293 * between the NIC and the driver. This includes Tx descriptors,
294 * Rx descriptors and the statistics block.
297 static int init_shared_mem(struct s2io_nic *nic)
300 void *tmp_v_addr, *tmp_v_addr_next;
301 dma_addr_t tmp_p_addr, tmp_p_addr_next;
302 RxD_block_t *pre_rxd_blk = NULL;
303 int i, j, blk_cnt, rx_sz, tx_sz;
304 int lst_size, lst_per_page;
305 struct net_device *dev = nic->dev;
306 #ifdef CONFIG_2BUFF_MODE
311 mac_info_t *mac_control;
312 struct config_param *config;
314 mac_control = &nic->mac_control;
315 config = &nic->config;
318 /* Allocation and initialization of TXDLs in FIOFs */
320 for (i = 0; i < config->tx_fifo_num; i++) {
321 size += config->tx_cfg[i].fifo_len;
323 if (size > MAX_AVAILABLE_TXDS) {
324 DBG_PRINT(ERR_DBG, "%s: Total number of Tx FIFOs ",
326 DBG_PRINT(ERR_DBG, "exceeds the maximum value ");
327 DBG_PRINT(ERR_DBG, "that can be used\n");
331 lst_size = (sizeof(TxD_t) * config->max_txds);
332 tx_sz = lst_size * size;
333 lst_per_page = PAGE_SIZE / lst_size;
335 for (i = 0; i < config->tx_fifo_num; i++) {
336 int fifo_len = config->tx_cfg[i].fifo_len;
337 int list_holder_size = fifo_len * sizeof(list_info_hold_t);
338 mac_control->fifos[i].list_info = kmalloc(list_holder_size,
340 if (!mac_control->fifos[i].list_info) {
342 "Malloc failed for list_info\n");
345 memset(mac_control->fifos[i].list_info, 0, list_holder_size);
347 for (i = 0; i < config->tx_fifo_num; i++) {
348 int page_num = TXD_MEM_PAGE_CNT(config->tx_cfg[i].fifo_len,
350 mac_control->fifos[i].tx_curr_put_info.offset = 0;
351 mac_control->fifos[i].tx_curr_put_info.fifo_len =
352 config->tx_cfg[i].fifo_len - 1;
353 mac_control->fifos[i].tx_curr_get_info.offset = 0;
354 mac_control->fifos[i].tx_curr_get_info.fifo_len =
355 config->tx_cfg[i].fifo_len - 1;
356 mac_control->fifos[i].fifo_no = i;
357 mac_control->fifos[i].nic = nic;
358 mac_control->fifos[i].max_txds = MAX_SKB_FRAGS;
360 for (j = 0; j < page_num; j++) {
364 tmp_v = pci_alloc_consistent(nic->pdev,
368 "pci_alloc_consistent ");
369 DBG_PRINT(ERR_DBG, "failed for TxDL\n");
372 while (k < lst_per_page) {
373 int l = (j * lst_per_page) + k;
374 if (l == config->tx_cfg[i].fifo_len)
376 mac_control->fifos[i].list_info[l].list_virt_addr =
377 tmp_v + (k * lst_size);
378 mac_control->fifos[i].list_info[l].list_phy_addr =
379 tmp_p + (k * lst_size);
385 /* Allocation and initialization of RXDs in Rings */
387 for (i = 0; i < config->rx_ring_num; i++) {
388 if (config->rx_cfg[i].num_rxd % (MAX_RXDS_PER_BLOCK + 1)) {
389 DBG_PRINT(ERR_DBG, "%s: RxD count of ", dev->name);
390 DBG_PRINT(ERR_DBG, "Ring%d is not a multiple of ",
392 DBG_PRINT(ERR_DBG, "RxDs per Block");
395 size += config->rx_cfg[i].num_rxd;
396 mac_control->rings[i].block_count =
397 config->rx_cfg[i].num_rxd / (MAX_RXDS_PER_BLOCK + 1);
398 mac_control->rings[i].pkt_cnt =
399 config->rx_cfg[i].num_rxd - mac_control->rings[i].block_count;
401 size = (size * (sizeof(RxD_t)));
404 for (i = 0; i < config->rx_ring_num; i++) {
405 mac_control->rings[i].rx_curr_get_info.block_index = 0;
406 mac_control->rings[i].rx_curr_get_info.offset = 0;
407 mac_control->rings[i].rx_curr_get_info.ring_len =
408 config->rx_cfg[i].num_rxd - 1;
409 mac_control->rings[i].rx_curr_put_info.block_index = 0;
410 mac_control->rings[i].rx_curr_put_info.offset = 0;
411 mac_control->rings[i].rx_curr_put_info.ring_len =
412 config->rx_cfg[i].num_rxd - 1;
413 mac_control->rings[i].nic = nic;
414 mac_control->rings[i].ring_no = i;
417 config->rx_cfg[i].num_rxd / (MAX_RXDS_PER_BLOCK + 1);
418 /* Allocating all the Rx blocks */
419 for (j = 0; j < blk_cnt; j++) {
420 #ifndef CONFIG_2BUFF_MODE
421 size = (MAX_RXDS_PER_BLOCK + 1) * (sizeof(RxD_t));
423 size = SIZE_OF_BLOCK;
425 tmp_v_addr = pci_alloc_consistent(nic->pdev, size,
427 if (tmp_v_addr == NULL) {
429 * In case of failure, free_shared_mem()
430 * is called, which should free any
431 * memory that was alloced till the
434 mac_control->rings[i].rx_blocks[j].block_virt_addr =
438 memset(tmp_v_addr, 0, size);
439 mac_control->rings[i].rx_blocks[j].block_virt_addr =
441 mac_control->rings[i].rx_blocks[j].block_dma_addr =
444 /* Interlinking all Rx Blocks */
445 for (j = 0; j < blk_cnt; j++) {
447 mac_control->rings[i].rx_blocks[j].block_virt_addr;
449 mac_control->rings[i].rx_blocks[(j + 1) %
450 blk_cnt].block_virt_addr;
452 mac_control->rings[i].rx_blocks[j].block_dma_addr;
454 mac_control->rings[i].rx_blocks[(j + 1) %
455 blk_cnt].block_dma_addr;
457 pre_rxd_blk = (RxD_block_t *) tmp_v_addr;
458 pre_rxd_blk->reserved_1 = END_OF_BLOCK; /* last RxD
461 #ifndef CONFIG_2BUFF_MODE
462 pre_rxd_blk->reserved_2_pNext_RxD_block =
463 (unsigned long) tmp_v_addr_next;
465 pre_rxd_blk->pNext_RxD_Blk_physical =
466 (u64) tmp_p_addr_next;
470 #ifdef CONFIG_2BUFF_MODE
472 * Allocation of Storages for buffer addresses in 2BUFF mode
473 * and the buffers as well.
475 for (i = 0; i < config->rx_ring_num; i++) {
477 config->rx_cfg[i].num_rxd / (MAX_RXDS_PER_BLOCK + 1);
478 mac_control->rings[i].ba = kmalloc((sizeof(buffAdd_t *) * blk_cnt),
480 if (!mac_control->rings[i].ba)
482 for (j = 0; j < blk_cnt; j++) {
484 mac_control->rings[i].ba[j] = kmalloc((sizeof(buffAdd_t) *
485 (MAX_RXDS_PER_BLOCK + 1)),
487 if (!mac_control->rings[i].ba[j])
489 while (k != MAX_RXDS_PER_BLOCK) {
490 ba = &mac_control->rings[i].ba[j][k];
492 ba->ba_0_org = (void *) kmalloc
493 (BUF0_LEN + ALIGN_SIZE, GFP_KERNEL);
496 tmp = (u64) ba->ba_0_org;
498 tmp &= ~((u64) ALIGN_SIZE);
499 ba->ba_0 = (void *) tmp;
501 ba->ba_1_org = (void *) kmalloc
502 (BUF1_LEN + ALIGN_SIZE, GFP_KERNEL);
505 tmp = (u64) ba->ba_1_org;
507 tmp &= ~((u64) ALIGN_SIZE);
508 ba->ba_1 = (void *) tmp;
515 /* Allocation and initialization of Statistics block */
516 size = sizeof(StatInfo_t);
517 mac_control->stats_mem = pci_alloc_consistent
518 (nic->pdev, size, &mac_control->stats_mem_phy);
520 if (!mac_control->stats_mem) {
522 * In case of failure, free_shared_mem() is called, which
523 * should free any memory that was alloced till the
528 mac_control->stats_mem_sz = size;
530 tmp_v_addr = mac_control->stats_mem;
531 mac_control->stats_info = (StatInfo_t *) tmp_v_addr;
532 memset(tmp_v_addr, 0, size);
533 DBG_PRINT(INIT_DBG, "%s:Ring Mem PHY: 0x%llx\n", dev->name,
534 (unsigned long long) tmp_p_addr);
540 * free_shared_mem - Free the allocated Memory
541 * @nic: Device private variable.
542 * Description: This function is to free all memory locations allocated by
543 * the init_shared_mem() function and return it to the kernel.
546 static void free_shared_mem(struct s2io_nic *nic)
548 int i, j, blk_cnt, size;
550 dma_addr_t tmp_p_addr;
551 mac_info_t *mac_control;
552 struct config_param *config;
553 int lst_size, lst_per_page;
559 mac_control = &nic->mac_control;
560 config = &nic->config;
562 lst_size = (sizeof(TxD_t) * config->max_txds);
563 lst_per_page = PAGE_SIZE / lst_size;
565 for (i = 0; i < config->tx_fifo_num; i++) {
566 int page_num = TXD_MEM_PAGE_CNT(config->tx_cfg[i].fifo_len,
568 for (j = 0; j < page_num; j++) {
569 int mem_blks = (j * lst_per_page);
570 if (!mac_control->fifos[i].list_info[mem_blks].
573 pci_free_consistent(nic->pdev, PAGE_SIZE,
574 mac_control->fifos[i].
577 mac_control->fifos[i].
581 kfree(mac_control->fifos[i].list_info);
584 #ifndef CONFIG_2BUFF_MODE
585 size = (MAX_RXDS_PER_BLOCK + 1) * (sizeof(RxD_t));
587 size = SIZE_OF_BLOCK;
589 for (i = 0; i < config->rx_ring_num; i++) {
590 blk_cnt = mac_control->rings[i].block_count;
591 for (j = 0; j < blk_cnt; j++) {
592 tmp_v_addr = mac_control->rings[i].rx_blocks[j].
594 tmp_p_addr = mac_control->rings[i].rx_blocks[j].
596 if (tmp_v_addr == NULL)
598 pci_free_consistent(nic->pdev, size,
599 tmp_v_addr, tmp_p_addr);
603 #ifdef CONFIG_2BUFF_MODE
604 /* Freeing buffer storage addresses in 2BUFF mode. */
605 for (i = 0; i < config->rx_ring_num; i++) {
607 config->rx_cfg[i].num_rxd / (MAX_RXDS_PER_BLOCK + 1);
608 for (j = 0; j < blk_cnt; j++) {
610 if (!mac_control->rings[i].ba[j])
612 while (k != MAX_RXDS_PER_BLOCK) {
613 buffAdd_t *ba = &mac_control->rings[i].ba[j][k];
618 kfree(mac_control->rings[i].ba[j]);
620 if (mac_control->rings[i].ba)
621 kfree(mac_control->rings[i].ba);
625 if (mac_control->stats_mem) {
626 pci_free_consistent(nic->pdev,
627 mac_control->stats_mem_sz,
628 mac_control->stats_mem,
629 mac_control->stats_mem_phy);
634 * init_nic - Initialization of hardware
635 * @nic: device peivate variable
636 * Description: The function sequentially configures every block
637 * of the H/W from their reset values.
638 * Return Value: SUCCESS on success and
639 * '-1' on failure (endian settings incorrect).
642 static int init_nic(struct s2io_nic *nic)
644 XENA_dev_config_t __iomem *bar0 = nic->bar0;
645 struct net_device *dev = nic->dev;
646 register u64 val64 = 0;
650 mac_info_t *mac_control;
651 struct config_param *config;
652 int mdio_cnt = 0, dtx_cnt = 0;
653 unsigned long long mem_share;
656 mac_control = &nic->mac_control;
657 config = &nic->config;
659 /* to set the swapper controle on the card */
660 if(s2io_set_swapper(nic)) {
661 DBG_PRINT(ERR_DBG,"ERROR: Setting Swapper failed\n");
665 /* Remove XGXS from reset state */
667 writeq(val64, &bar0->sw_reset);
669 val64 = readq(&bar0->sw_reset);
671 /* Enable Receiving broadcasts */
672 add = &bar0->mac_cfg;
673 val64 = readq(&bar0->mac_cfg);
674 val64 |= MAC_RMAC_BCAST_ENABLE;
675 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
676 writel((u32) val64, add);
677 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
678 writel((u32) (val64 >> 32), (add + 4));
680 /* Read registers in all blocks */
681 val64 = readq(&bar0->mac_int_mask);
682 val64 = readq(&bar0->mc_int_mask);
683 val64 = readq(&bar0->xgxs_int_mask);
687 writeq(vBIT(val64, 2, 14), &bar0->rmac_max_pyld_len);
690 * Configuring the XAUI Interface of Xena.
691 * ***************************************
692 * To Configure the Xena's XAUI, one has to write a series
693 * of 64 bit values into two registers in a particular
694 * sequence. Hence a macro 'SWITCH_SIGN' has been defined
695 * which will be defined in the array of configuration values
696 * (default_dtx_cfg & default_mdio_cfg) at appropriate places
697 * to switch writing from one regsiter to another. We continue
698 * writing these values until we encounter the 'END_SIGN' macro.
699 * For example, After making a series of 21 writes into
700 * dtx_control register the 'SWITCH_SIGN' appears and hence we
701 * start writing into mdio_control until we encounter END_SIGN.
705 while (default_dtx_cfg[dtx_cnt] != END_SIGN) {
706 if (default_dtx_cfg[dtx_cnt] == SWITCH_SIGN) {
710 SPECIAL_REG_WRITE(default_dtx_cfg[dtx_cnt],
711 &bar0->dtx_control, UF);
712 val64 = readq(&bar0->dtx_control);
716 while (default_mdio_cfg[mdio_cnt] != END_SIGN) {
717 if (default_mdio_cfg[mdio_cnt] == SWITCH_SIGN) {
721 SPECIAL_REG_WRITE(default_mdio_cfg[mdio_cnt],
722 &bar0->mdio_control, UF);
723 val64 = readq(&bar0->mdio_control);
726 if ((default_dtx_cfg[dtx_cnt] == END_SIGN) &&
727 (default_mdio_cfg[mdio_cnt] == END_SIGN)) {
734 /* Tx DMA Initialization */
736 writeq(val64, &bar0->tx_fifo_partition_0);
737 writeq(val64, &bar0->tx_fifo_partition_1);
738 writeq(val64, &bar0->tx_fifo_partition_2);
739 writeq(val64, &bar0->tx_fifo_partition_3);
742 for (i = 0, j = 0; i < config->tx_fifo_num; i++) {
744 vBIT(config->tx_cfg[i].fifo_len - 1, ((i * 32) + 19),
745 13) | vBIT(config->tx_cfg[i].fifo_priority,
748 if (i == (config->tx_fifo_num - 1)) {
755 writeq(val64, &bar0->tx_fifo_partition_0);
759 writeq(val64, &bar0->tx_fifo_partition_1);
763 writeq(val64, &bar0->tx_fifo_partition_2);
767 writeq(val64, &bar0->tx_fifo_partition_3);
772 /* Enable Tx FIFO partition 0. */
773 val64 = readq(&bar0->tx_fifo_partition_0);
774 val64 |= BIT(0); /* To enable the FIFO partition. */
775 writeq(val64, &bar0->tx_fifo_partition_0);
778 * Disable 4 PCCs for Xena1, 2 and 3 as per H/W bug
779 * SXE-008 TRANSMIT DMA ARBITRATION ISSUE.
781 if (get_xena_rev_id(nic->pdev) < 4)
782 writeq(PCC_ENABLE_FOUR, &bar0->pcc_enable);
784 val64 = readq(&bar0->tx_fifo_partition_0);
785 DBG_PRINT(INIT_DBG, "Fifo partition at: 0x%p is: 0x%llx\n",
786 &bar0->tx_fifo_partition_0, (unsigned long long) val64);
789 * Initialization of Tx_PA_CONFIG register to ignore packet
790 * integrity checking.
792 val64 = readq(&bar0->tx_pa_cfg);
793 val64 |= TX_PA_CFG_IGNORE_FRM_ERR | TX_PA_CFG_IGNORE_SNAP_OUI |
794 TX_PA_CFG_IGNORE_LLC_CTRL | TX_PA_CFG_IGNORE_L2_ERR;
795 writeq(val64, &bar0->tx_pa_cfg);
797 /* Rx DMA intialization. */
799 for (i = 0; i < config->rx_ring_num; i++) {
801 vBIT(config->rx_cfg[i].ring_priority, (5 + (i * 8)),
804 writeq(val64, &bar0->rx_queue_priority);
807 * Allocating equal share of memory to all the
812 for (i = 0; i < config->rx_ring_num; i++) {
815 mem_share = (mem_size / config->rx_ring_num +
816 mem_size % config->rx_ring_num);
817 val64 |= RX_QUEUE_CFG_Q0_SZ(mem_share);
820 mem_share = (mem_size / config->rx_ring_num);
821 val64 |= RX_QUEUE_CFG_Q1_SZ(mem_share);
824 mem_share = (mem_size / config->rx_ring_num);
825 val64 |= RX_QUEUE_CFG_Q2_SZ(mem_share);
828 mem_share = (mem_size / config->rx_ring_num);
829 val64 |= RX_QUEUE_CFG_Q3_SZ(mem_share);
832 mem_share = (mem_size / config->rx_ring_num);
833 val64 |= RX_QUEUE_CFG_Q4_SZ(mem_share);
836 mem_share = (mem_size / config->rx_ring_num);
837 val64 |= RX_QUEUE_CFG_Q5_SZ(mem_share);
840 mem_share = (mem_size / config->rx_ring_num);
841 val64 |= RX_QUEUE_CFG_Q6_SZ(mem_share);
844 mem_share = (mem_size / config->rx_ring_num);
845 val64 |= RX_QUEUE_CFG_Q7_SZ(mem_share);
849 writeq(val64, &bar0->rx_queue_cfg);
852 * Filling Tx round robin registers
853 * as per the number of FIFOs
855 switch (config->tx_fifo_num) {
857 val64 = 0x0000000000000000ULL;
858 writeq(val64, &bar0->tx_w_round_robin_0);
859 writeq(val64, &bar0->tx_w_round_robin_1);
860 writeq(val64, &bar0->tx_w_round_robin_2);
861 writeq(val64, &bar0->tx_w_round_robin_3);
862 writeq(val64, &bar0->tx_w_round_robin_4);
865 val64 = 0x0000010000010000ULL;
866 writeq(val64, &bar0->tx_w_round_robin_0);
867 val64 = 0x0100000100000100ULL;
868 writeq(val64, &bar0->tx_w_round_robin_1);
869 val64 = 0x0001000001000001ULL;
870 writeq(val64, &bar0->tx_w_round_robin_2);
871 val64 = 0x0000010000010000ULL;
872 writeq(val64, &bar0->tx_w_round_robin_3);
873 val64 = 0x0100000000000000ULL;
874 writeq(val64, &bar0->tx_w_round_robin_4);
877 val64 = 0x0001000102000001ULL;
878 writeq(val64, &bar0->tx_w_round_robin_0);
879 val64 = 0x0001020000010001ULL;
880 writeq(val64, &bar0->tx_w_round_robin_1);
881 val64 = 0x0200000100010200ULL;
882 writeq(val64, &bar0->tx_w_round_robin_2);
883 val64 = 0x0001000102000001ULL;
884 writeq(val64, &bar0->tx_w_round_robin_3);
885 val64 = 0x0001020000000000ULL;
886 writeq(val64, &bar0->tx_w_round_robin_4);
889 val64 = 0x0001020300010200ULL;
890 writeq(val64, &bar0->tx_w_round_robin_0);
891 val64 = 0x0100000102030001ULL;
892 writeq(val64, &bar0->tx_w_round_robin_1);
893 val64 = 0x0200010000010203ULL;
894 writeq(val64, &bar0->tx_w_round_robin_2);
895 val64 = 0x0001020001000001ULL;
896 writeq(val64, &bar0->tx_w_round_robin_3);
897 val64 = 0x0203000100000000ULL;
898 writeq(val64, &bar0->tx_w_round_robin_4);
901 val64 = 0x0001000203000102ULL;
902 writeq(val64, &bar0->tx_w_round_robin_0);
903 val64 = 0x0001020001030004ULL;
904 writeq(val64, &bar0->tx_w_round_robin_1);
905 val64 = 0x0001000203000102ULL;
906 writeq(val64, &bar0->tx_w_round_robin_2);
907 val64 = 0x0001020001030004ULL;
908 writeq(val64, &bar0->tx_w_round_robin_3);
909 val64 = 0x0001000000000000ULL;
910 writeq(val64, &bar0->tx_w_round_robin_4);
913 val64 = 0x0001020304000102ULL;
914 writeq(val64, &bar0->tx_w_round_robin_0);
915 val64 = 0x0304050001020001ULL;
916 writeq(val64, &bar0->tx_w_round_robin_1);
917 val64 = 0x0203000100000102ULL;
918 writeq(val64, &bar0->tx_w_round_robin_2);
919 val64 = 0x0304000102030405ULL;
920 writeq(val64, &bar0->tx_w_round_robin_3);
921 val64 = 0x0001000200000000ULL;
922 writeq(val64, &bar0->tx_w_round_robin_4);
925 val64 = 0x0001020001020300ULL;
926 writeq(val64, &bar0->tx_w_round_robin_0);
927 val64 = 0x0102030400010203ULL;
928 writeq(val64, &bar0->tx_w_round_robin_1);
929 val64 = 0x0405060001020001ULL;
930 writeq(val64, &bar0->tx_w_round_robin_2);
931 val64 = 0x0304050000010200ULL;
932 writeq(val64, &bar0->tx_w_round_robin_3);
933 val64 = 0x0102030000000000ULL;
934 writeq(val64, &bar0->tx_w_round_robin_4);
937 val64 = 0x0001020300040105ULL;
938 writeq(val64, &bar0->tx_w_round_robin_0);
939 val64 = 0x0200030106000204ULL;
940 writeq(val64, &bar0->tx_w_round_robin_1);
941 val64 = 0x0103000502010007ULL;
942 writeq(val64, &bar0->tx_w_round_robin_2);
943 val64 = 0x0304010002060500ULL;
944 writeq(val64, &bar0->tx_w_round_robin_3);
945 val64 = 0x0103020400000000ULL;
946 writeq(val64, &bar0->tx_w_round_robin_4);
950 /* Filling the Rx round robin registers as per the
951 * number of Rings and steering based on QoS.
953 switch (config->rx_ring_num) {
955 val64 = 0x8080808080808080ULL;
956 writeq(val64, &bar0->rts_qos_steering);
959 val64 = 0x0000010000010000ULL;
960 writeq(val64, &bar0->rx_w_round_robin_0);
961 val64 = 0x0100000100000100ULL;
962 writeq(val64, &bar0->rx_w_round_robin_1);
963 val64 = 0x0001000001000001ULL;
964 writeq(val64, &bar0->rx_w_round_robin_2);
965 val64 = 0x0000010000010000ULL;
966 writeq(val64, &bar0->rx_w_round_robin_3);
967 val64 = 0x0100000000000000ULL;
968 writeq(val64, &bar0->rx_w_round_robin_4);
970 val64 = 0x8080808040404040ULL;
971 writeq(val64, &bar0->rts_qos_steering);
974 val64 = 0x0001000102000001ULL;
975 writeq(val64, &bar0->rx_w_round_robin_0);
976 val64 = 0x0001020000010001ULL;
977 writeq(val64, &bar0->rx_w_round_robin_1);
978 val64 = 0x0200000100010200ULL;
979 writeq(val64, &bar0->rx_w_round_robin_2);
980 val64 = 0x0001000102000001ULL;
981 writeq(val64, &bar0->rx_w_round_robin_3);
982 val64 = 0x0001020000000000ULL;
983 writeq(val64, &bar0->rx_w_round_robin_4);
985 val64 = 0x8080804040402020ULL;
986 writeq(val64, &bar0->rts_qos_steering);
989 val64 = 0x0001020300010200ULL;
990 writeq(val64, &bar0->rx_w_round_robin_0);
991 val64 = 0x0100000102030001ULL;
992 writeq(val64, &bar0->rx_w_round_robin_1);
993 val64 = 0x0200010000010203ULL;
994 writeq(val64, &bar0->rx_w_round_robin_2);
995 val64 = 0x0001020001000001ULL;
996 writeq(val64, &bar0->rx_w_round_robin_3);
997 val64 = 0x0203000100000000ULL;
998 writeq(val64, &bar0->rx_w_round_robin_4);
1000 val64 = 0x8080404020201010ULL;
1001 writeq(val64, &bar0->rts_qos_steering);
1004 val64 = 0x0001000203000102ULL;
1005 writeq(val64, &bar0->rx_w_round_robin_0);
1006 val64 = 0x0001020001030004ULL;
1007 writeq(val64, &bar0->rx_w_round_robin_1);
1008 val64 = 0x0001000203000102ULL;
1009 writeq(val64, &bar0->rx_w_round_robin_2);
1010 val64 = 0x0001020001030004ULL;
1011 writeq(val64, &bar0->rx_w_round_robin_3);
1012 val64 = 0x0001000000000000ULL;
1013 writeq(val64, &bar0->rx_w_round_robin_4);
1015 val64 = 0x8080404020201008ULL;
1016 writeq(val64, &bar0->rts_qos_steering);
1019 val64 = 0x0001020304000102ULL;
1020 writeq(val64, &bar0->rx_w_round_robin_0);
1021 val64 = 0x0304050001020001ULL;
1022 writeq(val64, &bar0->rx_w_round_robin_1);
1023 val64 = 0x0203000100000102ULL;
1024 writeq(val64, &bar0->rx_w_round_robin_2);
1025 val64 = 0x0304000102030405ULL;
1026 writeq(val64, &bar0->rx_w_round_robin_3);
1027 val64 = 0x0001000200000000ULL;
1028 writeq(val64, &bar0->rx_w_round_robin_4);
1030 val64 = 0x8080404020100804ULL;
1031 writeq(val64, &bar0->rts_qos_steering);
1034 val64 = 0x0001020001020300ULL;
1035 writeq(val64, &bar0->rx_w_round_robin_0);
1036 val64 = 0x0102030400010203ULL;
1037 writeq(val64, &bar0->rx_w_round_robin_1);
1038 val64 = 0x0405060001020001ULL;
1039 writeq(val64, &bar0->rx_w_round_robin_2);
1040 val64 = 0x0304050000010200ULL;
1041 writeq(val64, &bar0->rx_w_round_robin_3);
1042 val64 = 0x0102030000000000ULL;
1043 writeq(val64, &bar0->rx_w_round_robin_4);
1045 val64 = 0x8080402010080402ULL;
1046 writeq(val64, &bar0->rts_qos_steering);
1049 val64 = 0x0001020300040105ULL;
1050 writeq(val64, &bar0->rx_w_round_robin_0);
1051 val64 = 0x0200030106000204ULL;
1052 writeq(val64, &bar0->rx_w_round_robin_1);
1053 val64 = 0x0103000502010007ULL;
1054 writeq(val64, &bar0->rx_w_round_robin_2);
1055 val64 = 0x0304010002060500ULL;
1056 writeq(val64, &bar0->rx_w_round_robin_3);
1057 val64 = 0x0103020400000000ULL;
1058 writeq(val64, &bar0->rx_w_round_robin_4);
1060 val64 = 0x8040201008040201ULL;
1061 writeq(val64, &bar0->rts_qos_steering);
1067 for (i = 0; i < 8; i++)
1068 writeq(val64, &bar0->rts_frm_len_n[i]);
1070 /* Set the default rts frame length for the rings configured */
1071 val64 = MAC_RTS_FRM_LEN_SET(dev->mtu+22);
1072 for (i = 0 ; i < config->rx_ring_num ; i++)
1073 writeq(val64, &bar0->rts_frm_len_n[i]);
1075 /* Set the frame length for the configured rings
1076 * desired by the user
1078 for (i = 0; i < config->rx_ring_num; i++) {
1079 /* If rts_frm_len[i] == 0 then it is assumed that user not
1080 * specified frame length steering.
1081 * If the user provides the frame length then program
1082 * the rts_frm_len register for those values or else
1083 * leave it as it is.
1085 if (rts_frm_len[i] != 0) {
1086 writeq(MAC_RTS_FRM_LEN_SET(rts_frm_len[i]),
1087 &bar0->rts_frm_len_n[i]);
1091 /* Program statistics memory */
1092 writeq(mac_control->stats_mem_phy, &bar0->stat_addr);
1095 * Initializing the sampling rate for the device to calculate the
1096 * bandwidth utilization.
1098 val64 = MAC_TX_LINK_UTIL_VAL(tmac_util_period) |
1099 MAC_RX_LINK_UTIL_VAL(rmac_util_period);
1100 writeq(val64, &bar0->mac_link_util);
1104 * Initializing the Transmit and Receive Traffic Interrupt
1108 * TTI Initialization. Default Tx timer gets us about
1109 * 250 interrupts per sec. Continuous interrupts are enabled
1112 val64 = TTI_DATA1_MEM_TX_TIMER_VAL(0x2078) |
1113 TTI_DATA1_MEM_TX_URNG_A(0xA) |
1114 TTI_DATA1_MEM_TX_URNG_B(0x10) |
1115 TTI_DATA1_MEM_TX_URNG_C(0x30) | TTI_DATA1_MEM_TX_TIMER_AC_EN;
1116 if (use_continuous_tx_intrs)
1117 val64 |= TTI_DATA1_MEM_TX_TIMER_CI_EN;
1118 writeq(val64, &bar0->tti_data1_mem);
1120 val64 = TTI_DATA2_MEM_TX_UFC_A(0x10) |
1121 TTI_DATA2_MEM_TX_UFC_B(0x20) |
1122 TTI_DATA2_MEM_TX_UFC_C(0x70) | TTI_DATA2_MEM_TX_UFC_D(0x80);
1123 writeq(val64, &bar0->tti_data2_mem);
1125 val64 = TTI_CMD_MEM_WE | TTI_CMD_MEM_STROBE_NEW_CMD;
1126 writeq(val64, &bar0->tti_command_mem);
1129 * Once the operation completes, the Strobe bit of the command
1130 * register will be reset. We poll for this particular condition
1131 * We wait for a maximum of 500ms for the operation to complete,
1132 * if it's not complete by then we return error.
1136 val64 = readq(&bar0->tti_command_mem);
1137 if (!(val64 & TTI_CMD_MEM_STROBE_NEW_CMD)) {
1141 DBG_PRINT(ERR_DBG, "%s: TTI init Failed\n",
1149 /* RTI Initialization */
1150 val64 = RTI_DATA1_MEM_RX_TIMER_VAL(0xFFF) |
1151 RTI_DATA1_MEM_RX_URNG_A(0xA) |
1152 RTI_DATA1_MEM_RX_URNG_B(0x10) |
1153 RTI_DATA1_MEM_RX_URNG_C(0x30) | RTI_DATA1_MEM_RX_TIMER_AC_EN;
1155 writeq(val64, &bar0->rti_data1_mem);
1157 val64 = RTI_DATA2_MEM_RX_UFC_A(0x1) |
1158 RTI_DATA2_MEM_RX_UFC_B(0x2) |
1159 RTI_DATA2_MEM_RX_UFC_C(0x40) | RTI_DATA2_MEM_RX_UFC_D(0x80);
1160 writeq(val64, &bar0->rti_data2_mem);
1162 val64 = RTI_CMD_MEM_WE | RTI_CMD_MEM_STROBE_NEW_CMD;
1163 writeq(val64, &bar0->rti_command_mem);
1166 * Once the operation completes, the Strobe bit of the
1167 * command register will be reset. We poll for this
1168 * particular condition. We wait for a maximum of 500ms
1169 * for the operation to complete, if it's not complete
1170 * by then we return error.
1174 val64 = readq(&bar0->rti_command_mem);
1175 if (!(val64 & RTI_CMD_MEM_STROBE_NEW_CMD)) {
1179 DBG_PRINT(ERR_DBG, "%s: RTI init Failed\n",
1188 * Initializing proper values as Pause threshold into all
1189 * the 8 Queues on Rx side.
1191 writeq(0xffbbffbbffbbffbbULL, &bar0->mc_pause_thresh_q0q3);
1192 writeq(0xffbbffbbffbbffbbULL, &bar0->mc_pause_thresh_q4q7);
1194 /* Disable RMAC PAD STRIPPING */
1195 add = (void *) &bar0->mac_cfg;
1196 val64 = readq(&bar0->mac_cfg);
1197 val64 &= ~(MAC_CFG_RMAC_STRIP_PAD);
1198 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
1199 writel((u32) (val64), add);
1200 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
1201 writel((u32) (val64 >> 32), (add + 4));
1202 val64 = readq(&bar0->mac_cfg);
1205 * Set the time value to be inserted in the pause frame
1206 * generated by xena.
1208 val64 = readq(&bar0->rmac_pause_cfg);
1209 val64 &= ~(RMAC_PAUSE_HG_PTIME(0xffff));
1210 val64 |= RMAC_PAUSE_HG_PTIME(nic->mac_control.rmac_pause_time);
1211 writeq(val64, &bar0->rmac_pause_cfg);
1214 * Set the Threshold Limit for Generating the pause frame
1215 * If the amount of data in any Queue exceeds ratio of
1216 * (mac_control.mc_pause_threshold_q0q3 or q4q7)/256
1217 * pause frame is generated
1220 for (i = 0; i < 4; i++) {
1222 (((u64) 0xFF00 | nic->mac_control.
1223 mc_pause_threshold_q0q3)
1226 writeq(val64, &bar0->mc_pause_thresh_q0q3);
1229 for (i = 0; i < 4; i++) {
1231 (((u64) 0xFF00 | nic->mac_control.
1232 mc_pause_threshold_q4q7)
1235 writeq(val64, &bar0->mc_pause_thresh_q4q7);
1238 * TxDMA will stop Read request if the number of read split has
1239 * exceeded the limit pointed by shared_splits
1241 val64 = readq(&bar0->pic_control);
1242 val64 |= PIC_CNTL_SHARED_SPLITS(shared_splits);
1243 writeq(val64, &bar0->pic_control);
1249 * en_dis_able_nic_intrs - Enable or Disable the interrupts
1250 * @nic: device private variable,
1251 * @mask: A mask indicating which Intr block must be modified and,
1252 * @flag: A flag indicating whether to enable or disable the Intrs.
1253 * Description: This function will either disable or enable the interrupts
1254 * depending on the flag argument. The mask argument can be used to
1255 * enable/disable any Intr block.
1256 * Return Value: NONE.
1259 static void en_dis_able_nic_intrs(struct s2io_nic *nic, u16 mask, int flag)
1261 XENA_dev_config_t __iomem *bar0 = nic->bar0;
1262 register u64 val64 = 0, temp64 = 0;
1264 /* Top level interrupt classification */
1265 /* PIC Interrupts */
1266 if ((mask & (TX_PIC_INTR | RX_PIC_INTR))) {
1267 /* Enable PIC Intrs in the general intr mask register */
1268 val64 = TXPIC_INT_M | PIC_RX_INT_M;
1269 if (flag == ENABLE_INTRS) {
1270 temp64 = readq(&bar0->general_int_mask);
1271 temp64 &= ~((u64) val64);
1272 writeq(temp64, &bar0->general_int_mask);
1274 * Disabled all PCIX, Flash, MDIO, IIC and GPIO
1275 * interrupts for now.
1278 writeq(DISABLE_ALL_INTRS, &bar0->pic_int_mask);
1280 * No MSI Support is available presently, so TTI and
1281 * RTI interrupts are also disabled.
1283 } else if (flag == DISABLE_INTRS) {
1285 * Disable PIC Intrs in the general
1286 * intr mask register
1288 writeq(DISABLE_ALL_INTRS, &bar0->pic_int_mask);
1289 temp64 = readq(&bar0->general_int_mask);
1291 writeq(val64, &bar0->general_int_mask);
1295 /* DMA Interrupts */
1296 /* Enabling/Disabling Tx DMA interrupts */
1297 if (mask & TX_DMA_INTR) {
1298 /* Enable TxDMA Intrs in the general intr mask register */
1299 val64 = TXDMA_INT_M;
1300 if (flag == ENABLE_INTRS) {
1301 temp64 = readq(&bar0->general_int_mask);
1302 temp64 &= ~((u64) val64);
1303 writeq(temp64, &bar0->general_int_mask);
1305 * Keep all interrupts other than PFC interrupt
1306 * and PCC interrupt disabled in DMA level.
1308 val64 = DISABLE_ALL_INTRS & ~(TXDMA_PFC_INT_M |
1310 writeq(val64, &bar0->txdma_int_mask);
1312 * Enable only the MISC error 1 interrupt in PFC block
1314 val64 = DISABLE_ALL_INTRS & (~PFC_MISC_ERR_1);
1315 writeq(val64, &bar0->pfc_err_mask);
1317 * Enable only the FB_ECC error interrupt in PCC block
1319 val64 = DISABLE_ALL_INTRS & (~PCC_FB_ECC_ERR);
1320 writeq(val64, &bar0->pcc_err_mask);
1321 } else if (flag == DISABLE_INTRS) {
1323 * Disable TxDMA Intrs in the general intr mask
1326 writeq(DISABLE_ALL_INTRS, &bar0->txdma_int_mask);
1327 writeq(DISABLE_ALL_INTRS, &bar0->pfc_err_mask);
1328 temp64 = readq(&bar0->general_int_mask);
1330 writeq(val64, &bar0->general_int_mask);
1334 /* Enabling/Disabling Rx DMA interrupts */
1335 if (mask & RX_DMA_INTR) {
1336 /* Enable RxDMA Intrs in the general intr mask register */
1337 val64 = RXDMA_INT_M;
1338 if (flag == ENABLE_INTRS) {
1339 temp64 = readq(&bar0->general_int_mask);
1340 temp64 &= ~((u64) val64);
1341 writeq(temp64, &bar0->general_int_mask);
1343 * All RxDMA block interrupts are disabled for now
1346 writeq(DISABLE_ALL_INTRS, &bar0->rxdma_int_mask);
1347 } else if (flag == DISABLE_INTRS) {
1349 * Disable RxDMA Intrs in the general intr mask
1352 writeq(DISABLE_ALL_INTRS, &bar0->rxdma_int_mask);
1353 temp64 = readq(&bar0->general_int_mask);
1355 writeq(val64, &bar0->general_int_mask);
1359 /* MAC Interrupts */
1360 /* Enabling/Disabling MAC interrupts */
1361 if (mask & (TX_MAC_INTR | RX_MAC_INTR)) {
1362 val64 = TXMAC_INT_M | RXMAC_INT_M;
1363 if (flag == ENABLE_INTRS) {
1364 temp64 = readq(&bar0->general_int_mask);
1365 temp64 &= ~((u64) val64);
1366 writeq(temp64, &bar0->general_int_mask);
1368 * All MAC block error interrupts are disabled for now
1369 * except the link status change interrupt.
1372 val64 = MAC_INT_STATUS_RMAC_INT;
1373 temp64 = readq(&bar0->mac_int_mask);
1374 temp64 &= ~((u64) val64);
1375 writeq(temp64, &bar0->mac_int_mask);
1377 val64 = readq(&bar0->mac_rmac_err_mask);
1378 val64 &= ~((u64) RMAC_LINK_STATE_CHANGE_INT);
1379 writeq(val64, &bar0->mac_rmac_err_mask);
1380 } else if (flag == DISABLE_INTRS) {
1382 * Disable MAC Intrs in the general intr mask register
1384 writeq(DISABLE_ALL_INTRS, &bar0->mac_int_mask);
1385 writeq(DISABLE_ALL_INTRS,
1386 &bar0->mac_rmac_err_mask);
1388 temp64 = readq(&bar0->general_int_mask);
1390 writeq(val64, &bar0->general_int_mask);
1394 /* XGXS Interrupts */
1395 if (mask & (TX_XGXS_INTR | RX_XGXS_INTR)) {
1396 val64 = TXXGXS_INT_M | RXXGXS_INT_M;
1397 if (flag == ENABLE_INTRS) {
1398 temp64 = readq(&bar0->general_int_mask);
1399 temp64 &= ~((u64) val64);
1400 writeq(temp64, &bar0->general_int_mask);
1402 * All XGXS block error interrupts are disabled for now
1405 writeq(DISABLE_ALL_INTRS, &bar0->xgxs_int_mask);
1406 } else if (flag == DISABLE_INTRS) {
1408 * Disable MC Intrs in the general intr mask register
1410 writeq(DISABLE_ALL_INTRS, &bar0->xgxs_int_mask);
1411 temp64 = readq(&bar0->general_int_mask);
1413 writeq(val64, &bar0->general_int_mask);
1417 /* Memory Controller(MC) interrupts */
1418 if (mask & MC_INTR) {
1420 if (flag == ENABLE_INTRS) {
1421 temp64 = readq(&bar0->general_int_mask);
1422 temp64 &= ~((u64) val64);
1423 writeq(temp64, &bar0->general_int_mask);
1425 * Enable all MC Intrs.
1427 writeq(0x0, &bar0->mc_int_mask);
1428 writeq(0x0, &bar0->mc_err_mask);
1429 } else if (flag == DISABLE_INTRS) {
1431 * Disable MC Intrs in the general intr mask register
1433 writeq(DISABLE_ALL_INTRS, &bar0->mc_int_mask);
1434 temp64 = readq(&bar0->general_int_mask);
1436 writeq(val64, &bar0->general_int_mask);
1441 /* Tx traffic interrupts */
1442 if (mask & TX_TRAFFIC_INTR) {
1443 val64 = TXTRAFFIC_INT_M;
1444 if (flag == ENABLE_INTRS) {
1445 temp64 = readq(&bar0->general_int_mask);
1446 temp64 &= ~((u64) val64);
1447 writeq(temp64, &bar0->general_int_mask);
1449 * Enable all the Tx side interrupts
1450 * writing 0 Enables all 64 TX interrupt levels
1452 writeq(0x0, &bar0->tx_traffic_mask);
1453 } else if (flag == DISABLE_INTRS) {
1455 * Disable Tx Traffic Intrs in the general intr mask
1458 writeq(DISABLE_ALL_INTRS, &bar0->tx_traffic_mask);
1459 temp64 = readq(&bar0->general_int_mask);
1461 writeq(val64, &bar0->general_int_mask);
1465 /* Rx traffic interrupts */
1466 if (mask & RX_TRAFFIC_INTR) {
1467 val64 = RXTRAFFIC_INT_M;
1468 if (flag == ENABLE_INTRS) {
1469 temp64 = readq(&bar0->general_int_mask);
1470 temp64 &= ~((u64) val64);
1471 writeq(temp64, &bar0->general_int_mask);
1472 /* writing 0 Enables all 8 RX interrupt levels */
1473 writeq(0x0, &bar0->rx_traffic_mask);
1474 } else if (flag == DISABLE_INTRS) {
1476 * Disable Rx Traffic Intrs in the general intr mask
1479 writeq(DISABLE_ALL_INTRS, &bar0->rx_traffic_mask);
1480 temp64 = readq(&bar0->general_int_mask);
1482 writeq(val64, &bar0->general_int_mask);
1487 static int check_prc_pcc_state(u64 val64, int flag, int rev_id)
1491 if (flag == FALSE) {
1493 if (!(val64 & ADAPTER_STATUS_RMAC_PCC_IDLE) &&
1494 ((val64 & ADAPTER_STATUS_RC_PRC_QUIESCENT) ==
1495 ADAPTER_STATUS_RC_PRC_QUIESCENT)) {
1499 if (!(val64 & ADAPTER_STATUS_RMAC_PCC_FOUR_IDLE) &&
1500 ((val64 & ADAPTER_STATUS_RC_PRC_QUIESCENT) ==
1501 ADAPTER_STATUS_RC_PRC_QUIESCENT)) {
1507 if (((val64 & ADAPTER_STATUS_RMAC_PCC_IDLE) ==
1508 ADAPTER_STATUS_RMAC_PCC_IDLE) &&
1509 (!(val64 & ADAPTER_STATUS_RC_PRC_QUIESCENT) ||
1510 ((val64 & ADAPTER_STATUS_RC_PRC_QUIESCENT) ==
1511 ADAPTER_STATUS_RC_PRC_QUIESCENT))) {
1515 if (((val64 & ADAPTER_STATUS_RMAC_PCC_FOUR_IDLE) ==
1516 ADAPTER_STATUS_RMAC_PCC_FOUR_IDLE) &&
1517 (!(val64 & ADAPTER_STATUS_RC_PRC_QUIESCENT) ||
1518 ((val64 & ADAPTER_STATUS_RC_PRC_QUIESCENT) ==
1519 ADAPTER_STATUS_RC_PRC_QUIESCENT))) {
1528 * verify_xena_quiescence - Checks whether the H/W is ready
1529 * @val64 : Value read from adapter status register.
1530 * @flag : indicates if the adapter enable bit was ever written once
1532 * Description: Returns whether the H/W is ready to go or not. Depending
1533 * on whether adapter enable bit was written or not the comparison
1534 * differs and the calling function passes the input argument flag to
1536 * Return: 1 If xena is quiescence
1537 * 0 If Xena is not quiescence
1540 static int verify_xena_quiescence(nic_t *sp, u64 val64, int flag)
1543 u64 tmp64 = ~((u64) val64);
1544 int rev_id = get_xena_rev_id(sp->pdev);
1548 (ADAPTER_STATUS_TDMA_READY | ADAPTER_STATUS_RDMA_READY |
1549 ADAPTER_STATUS_PFC_READY | ADAPTER_STATUS_TMAC_BUF_EMPTY |
1550 ADAPTER_STATUS_PIC_QUIESCENT | ADAPTER_STATUS_MC_DRAM_READY |
1551 ADAPTER_STATUS_MC_QUEUES_READY | ADAPTER_STATUS_M_PLL_LOCK |
1552 ADAPTER_STATUS_P_PLL_LOCK))) {
1553 ret = check_prc_pcc_state(val64, flag, rev_id);
1560 * fix_mac_address - Fix for Mac addr problem on Alpha platforms
1561 * @sp: Pointer to device specifc structure
1563 * New procedure to clear mac address reading problems on Alpha platforms
1567 void fix_mac_address(nic_t * sp)
1569 XENA_dev_config_t __iomem *bar0 = sp->bar0;
1573 while (fix_mac[i] != END_SIGN) {
1574 writeq(fix_mac[i++], &bar0->gpio_control);
1576 val64 = readq(&bar0->gpio_control);
1581 * start_nic - Turns the device on
1582 * @nic : device private variable.
1584 * This function actually turns the device on. Before this function is
1585 * called,all Registers are configured from their reset states
1586 * and shared memory is allocated but the NIC is still quiescent. On
1587 * calling this function, the device interrupts are cleared and the NIC is
1588 * literally switched on by writing into the adapter control register.
1590 * SUCCESS on success and -1 on failure.
1593 static int start_nic(struct s2io_nic *nic)
1595 XENA_dev_config_t __iomem *bar0 = nic->bar0;
1596 struct net_device *dev = nic->dev;
1597 register u64 val64 = 0;
1600 mac_info_t *mac_control;
1601 struct config_param *config;
1603 mac_control = &nic->mac_control;
1604 config = &nic->config;
1606 /* PRC Initialization and configuration */
1607 for (i = 0; i < config->rx_ring_num; i++) {
1608 writeq((u64) mac_control->rings[i].rx_blocks[0].block_dma_addr,
1609 &bar0->prc_rxd0_n[i]);
1611 val64 = readq(&bar0->prc_ctrl_n[i]);
1612 #ifndef CONFIG_2BUFF_MODE
1613 val64 |= PRC_CTRL_RC_ENABLED;
1615 val64 |= PRC_CTRL_RC_ENABLED | PRC_CTRL_RING_MODE_3;
1617 writeq(val64, &bar0->prc_ctrl_n[i]);
1620 #ifdef CONFIG_2BUFF_MODE
1621 /* Enabling 2 buffer mode by writing into Rx_pa_cfg reg. */
1622 val64 = readq(&bar0->rx_pa_cfg);
1623 val64 |= RX_PA_CFG_IGNORE_L2_ERR;
1624 writeq(val64, &bar0->rx_pa_cfg);
1628 * Enabling MC-RLDRAM. After enabling the device, we timeout
1629 * for around 100ms, which is approximately the time required
1630 * for the device to be ready for operation.
1632 val64 = readq(&bar0->mc_rldram_mrs);
1633 val64 |= MC_RLDRAM_QUEUE_SIZE_ENABLE | MC_RLDRAM_MRS_ENABLE;
1634 SPECIAL_REG_WRITE(val64, &bar0->mc_rldram_mrs, UF);
1635 val64 = readq(&bar0->mc_rldram_mrs);
1637 msleep(100); /* Delay by around 100 ms. */
1639 /* Enabling ECC Protection. */
1640 val64 = readq(&bar0->adapter_control);
1641 val64 &= ~ADAPTER_ECC_EN;
1642 writeq(val64, &bar0->adapter_control);
1645 * Clearing any possible Link state change interrupts that
1646 * could have popped up just before Enabling the card.
1648 val64 = readq(&bar0->mac_rmac_err_reg);
1650 writeq(val64, &bar0->mac_rmac_err_reg);
1653 * Verify if the device is ready to be enabled, if so enable
1656 val64 = readq(&bar0->adapter_status);
1657 if (!verify_xena_quiescence(nic, val64, nic->device_enabled_once)) {
1658 DBG_PRINT(ERR_DBG, "%s: device is not ready, ", dev->name);
1659 DBG_PRINT(ERR_DBG, "Adapter status reads: 0x%llx\n",
1660 (unsigned long long) val64);
1664 /* Enable select interrupts */
1665 interruptible = TX_TRAFFIC_INTR | RX_TRAFFIC_INTR | TX_MAC_INTR |
1666 RX_MAC_INTR | MC_INTR;
1667 en_dis_able_nic_intrs(nic, interruptible, ENABLE_INTRS);
1670 * With some switches, link might be already up at this point.
1671 * Because of this weird behavior, when we enable laser,
1672 * we may not get link. We need to handle this. We cannot
1673 * figure out which switch is misbehaving. So we are forced to
1674 * make a global change.
1677 /* Enabling Laser. */
1678 val64 = readq(&bar0->adapter_control);
1679 val64 |= ADAPTER_EOI_TX_ON;
1680 writeq(val64, &bar0->adapter_control);
1682 /* SXE-002: Initialize link and activity LED */
1683 subid = nic->pdev->subsystem_device;
1684 if ((subid & 0xFF) >= 0x07) {
1685 val64 = readq(&bar0->gpio_control);
1686 val64 |= 0x0000800000000000ULL;
1687 writeq(val64, &bar0->gpio_control);
1688 val64 = 0x0411040400000000ULL;
1689 writeq(val64, (void __iomem *) ((u8 *) bar0 + 0x2700));
1693 * Don't see link state interrupts on certain switches, so
1694 * directly scheduling a link state task from here.
1696 schedule_work(&nic->set_link_task);
1702 * free_tx_buffers - Free all queued Tx buffers
1703 * @nic : device private variable.
1705 * Free all queued Tx buffers.
1706 * Return Value: void
1709 static void free_tx_buffers(struct s2io_nic *nic)
1711 struct net_device *dev = nic->dev;
1712 struct sk_buff *skb;
1715 mac_info_t *mac_control;
1716 struct config_param *config;
1717 int cnt = 0, frg_cnt;
1719 mac_control = &nic->mac_control;
1720 config = &nic->config;
1722 for (i = 0; i < config->tx_fifo_num; i++) {
1723 for (j = 0; j < config->tx_cfg[i].fifo_len - 1; j++) {
1724 txdp = (TxD_t *) mac_control->fifos[i].list_info[j].
1727 (struct sk_buff *) ((unsigned long) txdp->
1730 memset(txdp, 0, sizeof(TxD_t) *
1734 frg_cnt = skb_shinfo(skb)->nr_frags;
1735 pci_unmap_single(nic->pdev, (dma_addr_t)
1736 txdp->Buffer_Pointer,
1737 skb->len - skb->data_len,
1743 for (j = 0; j < frg_cnt; j++, txdp++) {
1745 &skb_shinfo(skb)->frags[j];
1746 pci_unmap_page(nic->pdev,
1756 memset(txdp, 0, sizeof(TxD_t) * config->max_txds);
1760 "%s:forcibly freeing %d skbs on FIFO%d\n",
1762 mac_control->fifos[i].tx_curr_get_info.offset = 0;
1763 mac_control->fifos[i].tx_curr_put_info.offset = 0;
1768 * stop_nic - To stop the nic
1769 * @nic ; device private variable.
1771 * This function does exactly the opposite of what the start_nic()
1772 * function does. This function is called to stop the device.
1777 static void stop_nic(struct s2io_nic *nic)
1779 XENA_dev_config_t __iomem *bar0 = nic->bar0;
1780 register u64 val64 = 0;
1781 u16 interruptible, i;
1782 mac_info_t *mac_control;
1783 struct config_param *config;
1785 mac_control = &nic->mac_control;
1786 config = &nic->config;
1788 /* Disable all interrupts */
1789 interruptible = TX_TRAFFIC_INTR | RX_TRAFFIC_INTR | TX_MAC_INTR |
1790 RX_MAC_INTR | MC_INTR;
1791 en_dis_able_nic_intrs(nic, interruptible, DISABLE_INTRS);
1794 for (i = 0; i < config->rx_ring_num; i++) {
1795 val64 = readq(&bar0->prc_ctrl_n[i]);
1796 val64 &= ~((u64) PRC_CTRL_RC_ENABLED);
1797 writeq(val64, &bar0->prc_ctrl_n[i]);
1802 * fill_rx_buffers - Allocates the Rx side skbs
1803 * @nic: device private variable
1804 * @ring_no: ring number
1806 * The function allocates Rx side skbs and puts the physical
1807 * address of these buffers into the RxD buffer pointers, so that the NIC
1808 * can DMA the received frame into these locations.
1809 * The NIC supports 3 receive modes, viz
1811 * 2. three buffer and
1812 * 3. Five buffer modes.
1813 * Each mode defines how many fragments the received frame will be split
1814 * up into by the NIC. The frame is split into L3 header, L4 Header,
1815 * L4 payload in three buffer mode and in 5 buffer mode, L4 payload itself
1816 * is split into 3 fragments. As of now only single buffer mode is
1819 * SUCCESS on success or an appropriate -ve value on failure.
1822 int fill_rx_buffers(struct s2io_nic *nic, int ring_no)
1824 struct net_device *dev = nic->dev;
1825 struct sk_buff *skb;
1827 int off, off1, size, block_no, block_no1;
1828 int offset, offset1;
1831 mac_info_t *mac_control;
1832 struct config_param *config;
1833 #ifdef CONFIG_2BUFF_MODE
1838 dma_addr_t rxdpphys;
1840 #ifndef CONFIG_S2IO_NAPI
1841 unsigned long flags;
1844 mac_control = &nic->mac_control;
1845 config = &nic->config;
1846 alloc_cnt = mac_control->rings[ring_no].pkt_cnt -
1847 atomic_read(&nic->rx_bufs_left[ring_no]);
1848 size = dev->mtu + HEADER_ETHERNET_II_802_3_SIZE +
1849 HEADER_802_2_SIZE + HEADER_SNAP_SIZE;
1851 while (alloc_tab < alloc_cnt) {
1852 block_no = mac_control->rings[ring_no].rx_curr_put_info.
1854 block_no1 = mac_control->rings[ring_no].rx_curr_get_info.
1856 off = mac_control->rings[ring_no].rx_curr_put_info.offset;
1857 off1 = mac_control->rings[ring_no].rx_curr_get_info.offset;
1858 #ifndef CONFIG_2BUFF_MODE
1859 offset = block_no * (MAX_RXDS_PER_BLOCK + 1) + off;
1860 offset1 = block_no1 * (MAX_RXDS_PER_BLOCK + 1) + off1;
1862 offset = block_no * (MAX_RXDS_PER_BLOCK) + off;
1863 offset1 = block_no1 * (MAX_RXDS_PER_BLOCK) + off1;
1866 rxdp = mac_control->rings[ring_no].rx_blocks[block_no].
1867 block_virt_addr + off;
1868 if ((offset == offset1) && (rxdp->Host_Control)) {
1869 DBG_PRINT(INTR_DBG, "%s: Get and Put", dev->name);
1870 DBG_PRINT(INTR_DBG, " info equated\n");
1873 #ifndef CONFIG_2BUFF_MODE
1874 if (rxdp->Control_1 == END_OF_BLOCK) {
1875 mac_control->rings[ring_no].rx_curr_put_info.
1877 mac_control->rings[ring_no].rx_curr_put_info.
1878 block_index %= mac_control->rings[ring_no].block_count;
1879 block_no = mac_control->rings[ring_no].rx_curr_put_info.
1882 off %= (MAX_RXDS_PER_BLOCK + 1);
1883 mac_control->rings[ring_no].rx_curr_put_info.offset =
1885 rxdp = (RxD_t *) ((unsigned long) rxdp->Control_2);
1886 DBG_PRINT(INTR_DBG, "%s: Next block at: %p\n",
1889 #ifndef CONFIG_S2IO_NAPI
1890 spin_lock_irqsave(&nic->put_lock, flags);
1891 mac_control->rings[ring_no].put_pos =
1892 (block_no * (MAX_RXDS_PER_BLOCK + 1)) + off;
1893 spin_unlock_irqrestore(&nic->put_lock, flags);
1896 if (rxdp->Host_Control == END_OF_BLOCK) {
1897 mac_control->rings[ring_no].rx_curr_put_info.
1899 mac_control->rings[ring_no].rx_curr_put_info.block_index
1900 %= mac_control->rings[ring_no].block_count;
1901 block_no = mac_control->rings[ring_no].rx_curr_put_info
1904 DBG_PRINT(INTR_DBG, "%s: block%d at: 0x%llx\n",
1905 dev->name, block_no,
1906 (unsigned long long) rxdp->Control_1);
1907 mac_control->rings[ring_no].rx_curr_put_info.offset =
1909 rxdp = mac_control->rings[ring_no].rx_blocks[block_no].
1912 #ifndef CONFIG_S2IO_NAPI
1913 spin_lock_irqsave(&nic->put_lock, flags);
1914 mac_control->rings[ring_no].put_pos = (block_no *
1915 (MAX_RXDS_PER_BLOCK + 1)) + off;
1916 spin_unlock_irqrestore(&nic->put_lock, flags);
1920 #ifndef CONFIG_2BUFF_MODE
1921 if (rxdp->Control_1 & RXD_OWN_XENA)
1923 if (rxdp->Control_2 & BIT(0))
1926 mac_control->rings[ring_no].rx_curr_put_info.
1930 #ifdef CONFIG_2BUFF_MODE
1932 * RxDs Spanning cache lines will be replenished only
1933 * if the succeeding RxD is also owned by Host. It
1934 * will always be the ((8*i)+3) and ((8*i)+6)
1935 * descriptors for the 48 byte descriptor. The offending
1936 * decsriptor is of-course the 3rd descriptor.
1938 rxdpphys = mac_control->rings[ring_no].rx_blocks[block_no].
1939 block_dma_addr + (off * sizeof(RxD_t));
1940 if (((u64) (rxdpphys)) % 128 > 80) {
1941 rxdpnext = mac_control->rings[ring_no].rx_blocks[block_no].
1942 block_virt_addr + (off + 1);
1943 if (rxdpnext->Host_Control == END_OF_BLOCK) {
1944 nextblk = (block_no + 1) %
1945 (mac_control->rings[ring_no].block_count);
1946 rxdpnext = mac_control->rings[ring_no].rx_blocks
1947 [nextblk].block_virt_addr;
1949 if (rxdpnext->Control_2 & BIT(0))
1954 #ifndef CONFIG_2BUFF_MODE
1955 skb = dev_alloc_skb(size + NET_IP_ALIGN);
1957 skb = dev_alloc_skb(dev->mtu + ALIGN_SIZE + BUF0_LEN + 4);
1960 DBG_PRINT(ERR_DBG, "%s: Out of ", dev->name);
1961 DBG_PRINT(ERR_DBG, "memory to allocate SKBs\n");
1964 #ifndef CONFIG_2BUFF_MODE
1965 skb_reserve(skb, NET_IP_ALIGN);
1966 memset(rxdp, 0, sizeof(RxD_t));
1967 rxdp->Buffer0_ptr = pci_map_single
1968 (nic->pdev, skb->data, size, PCI_DMA_FROMDEVICE);
1969 rxdp->Control_2 &= (~MASK_BUFFER0_SIZE);
1970 rxdp->Control_2 |= SET_BUFFER0_SIZE(size);
1971 rxdp->Host_Control = (unsigned long) (skb);
1972 rxdp->Control_1 |= RXD_OWN_XENA;
1974 off %= (MAX_RXDS_PER_BLOCK + 1);
1975 mac_control->rings[ring_no].rx_curr_put_info.offset = off;
1977 ba = &mac_control->rings[ring_no].ba[block_no][off];
1978 skb_reserve(skb, BUF0_LEN);
1979 tmp = ((unsigned long) skb->data & ALIGN_SIZE);
1981 skb_reserve(skb, (ALIGN_SIZE + 1) - tmp);
1983 memset(rxdp, 0, sizeof(RxD_t));
1984 rxdp->Buffer2_ptr = pci_map_single
1985 (nic->pdev, skb->data, dev->mtu + BUF0_LEN + 4,
1986 PCI_DMA_FROMDEVICE);
1988 pci_map_single(nic->pdev, ba->ba_0, BUF0_LEN,
1989 PCI_DMA_FROMDEVICE);
1991 pci_map_single(nic->pdev, ba->ba_1, BUF1_LEN,
1992 PCI_DMA_FROMDEVICE);
1994 rxdp->Control_2 = SET_BUFFER2_SIZE(dev->mtu + 4);
1995 rxdp->Control_2 |= SET_BUFFER0_SIZE(BUF0_LEN);
1996 rxdp->Control_2 |= SET_BUFFER1_SIZE(1); /* dummy. */
1997 rxdp->Control_2 |= BIT(0); /* Set Buffer_Empty bit. */
1998 rxdp->Host_Control = (u64) ((unsigned long) (skb));
1999 rxdp->Control_1 |= RXD_OWN_XENA;
2001 mac_control->rings[ring_no].rx_curr_put_info.offset = off;
2003 rxdp->Control_2 |= SET_RXD_MARKER;
2005 atomic_inc(&nic->rx_bufs_left[ring_no]);
2014 * free_rx_buffers - Frees all Rx buffers
2015 * @sp: device private variable.
2017 * This function will free all Rx buffers allocated by host.
2022 static void free_rx_buffers(struct s2io_nic *sp)
2024 struct net_device *dev = sp->dev;
2025 int i, j, blk = 0, off, buf_cnt = 0;
2027 struct sk_buff *skb;
2028 mac_info_t *mac_control;
2029 struct config_param *config;
2030 #ifdef CONFIG_2BUFF_MODE
2034 mac_control = &sp->mac_control;
2035 config = &sp->config;
2037 for (i = 0; i < config->rx_ring_num; i++) {
2038 for (j = 0, blk = 0; j < config->rx_cfg[i].num_rxd; j++) {
2039 off = j % (MAX_RXDS_PER_BLOCK + 1);
2040 rxdp = mac_control->rings[i].rx_blocks[blk].
2041 block_virt_addr + off;
2043 #ifndef CONFIG_2BUFF_MODE
2044 if (rxdp->Control_1 == END_OF_BLOCK) {
2046 (RxD_t *) ((unsigned long) rxdp->
2052 if (rxdp->Host_Control == END_OF_BLOCK) {
2058 if (!(rxdp->Control_1 & RXD_OWN_XENA)) {
2059 memset(rxdp, 0, sizeof(RxD_t));
2064 (struct sk_buff *) ((unsigned long) rxdp->
2067 #ifndef CONFIG_2BUFF_MODE
2068 pci_unmap_single(sp->pdev, (dma_addr_t)
2071 HEADER_ETHERNET_II_802_3_SIZE
2072 + HEADER_802_2_SIZE +
2074 PCI_DMA_FROMDEVICE);
2076 ba = &mac_control->rings[i].ba[blk][off];
2077 pci_unmap_single(sp->pdev, (dma_addr_t)
2080 PCI_DMA_FROMDEVICE);
2081 pci_unmap_single(sp->pdev, (dma_addr_t)
2084 PCI_DMA_FROMDEVICE);
2085 pci_unmap_single(sp->pdev, (dma_addr_t)
2087 dev->mtu + BUF0_LEN + 4,
2088 PCI_DMA_FROMDEVICE);
2091 atomic_dec(&sp->rx_bufs_left[i]);
2094 memset(rxdp, 0, sizeof(RxD_t));
2096 mac_control->rings[i].rx_curr_put_info.block_index = 0;
2097 mac_control->rings[i].rx_curr_get_info.block_index = 0;
2098 mac_control->rings[i].rx_curr_put_info.offset = 0;
2099 mac_control->rings[i].rx_curr_get_info.offset = 0;
2100 atomic_set(&sp->rx_bufs_left[i], 0);
2101 DBG_PRINT(INIT_DBG, "%s:Freed 0x%x Rx Buffers on ring%d\n",
2102 dev->name, buf_cnt, i);
2107 * s2io_poll - Rx interrupt handler for NAPI support
2108 * @dev : pointer to the device structure.
2109 * @budget : The number of packets that were budgeted to be processed
2110 * during one pass through the 'Poll" function.
2112 * Comes into picture only if NAPI support has been incorporated. It does
2113 * the same thing that rx_intr_handler does, but not in a interrupt context
2114 * also It will process only a given number of packets.
2116 * 0 on success and 1 if there are No Rx packets to be processed.
2119 #if defined(CONFIG_S2IO_NAPI)
2120 static int s2io_poll(struct net_device *dev, int *budget)
2122 nic_t *nic = dev->priv;
2123 int pkt_cnt = 0, org_pkts_to_process;
2124 mac_info_t *mac_control;
2125 struct config_param *config;
2126 XENA_dev_config_t *bar0 = (XENA_dev_config_t *) nic->bar0;
2130 atomic_inc(&nic->isr_cnt);
2131 mac_control = &nic->mac_control;
2132 config = &nic->config;
2134 nic->pkts_to_process = *budget;
2135 if (nic->pkts_to_process > dev->quota)
2136 nic->pkts_to_process = dev->quota;
2137 org_pkts_to_process = nic->pkts_to_process;
2139 val64 = readq(&bar0->rx_traffic_int);
2140 writeq(val64, &bar0->rx_traffic_int);
2142 for (i = 0; i < config->rx_ring_num; i++) {
2143 rx_intr_handler(&mac_control->rings[i]);
2144 pkt_cnt = org_pkts_to_process - nic->pkts_to_process;
2145 if (!nic->pkts_to_process) {
2146 /* Quota for the current iteration has been met */
2153 dev->quota -= pkt_cnt;
2155 netif_rx_complete(dev);
2157 for (i = 0; i < config->rx_ring_num; i++) {
2158 if (fill_rx_buffers(nic, i) == -ENOMEM) {
2159 DBG_PRINT(ERR_DBG, "%s:Out of memory", dev->name);
2160 DBG_PRINT(ERR_DBG, " in Rx Poll!!\n");
2164 /* Re enable the Rx interrupts. */
2165 en_dis_able_nic_intrs(nic, RX_TRAFFIC_INTR, ENABLE_INTRS);
2166 atomic_dec(&nic->isr_cnt);
2170 dev->quota -= pkt_cnt;
2173 for (i = 0; i < config->rx_ring_num; i++) {
2174 if (fill_rx_buffers(nic, i) == -ENOMEM) {
2175 DBG_PRINT(ERR_DBG, "%s:Out of memory", dev->name);
2176 DBG_PRINT(ERR_DBG, " in Rx Poll!!\n");
2180 atomic_dec(&nic->isr_cnt);
2186 * rx_intr_handler - Rx interrupt handler
2187 * @nic: device private variable.
2189 * If the interrupt is because of a received frame or if the
2190 * receive ring contains fresh as yet un-processed frames,this function is
2191 * called. It picks out the RxD at which place the last Rx processing had
2192 * stopped and sends the skb to the OSM's Rx handler and then increments
2197 static void rx_intr_handler(ring_info_t *ring_data)
2199 nic_t *nic = ring_data->nic;
2200 struct net_device *dev = (struct net_device *) nic->dev;
2201 int get_block, get_offset, put_block, put_offset, ring_bufs;
2202 rx_curr_get_info_t get_info, put_info;
2204 struct sk_buff *skb;
2205 #ifndef CONFIG_S2IO_NAPI
2208 spin_lock(&nic->rx_lock);
2209 if (atomic_read(&nic->card_state) == CARD_DOWN) {
2210 DBG_PRINT(ERR_DBG, "%s: %s going down for reset\n",
2211 __FUNCTION__, dev->name);
2212 spin_unlock(&nic->rx_lock);
2215 get_info = ring_data->rx_curr_get_info;
2216 get_block = get_info.block_index;
2217 put_info = ring_data->rx_curr_put_info;
2218 put_block = put_info.block_index;
2219 ring_bufs = get_info.ring_len+1;
2220 rxdp = ring_data->rx_blocks[get_block].block_virt_addr +
2222 get_offset = (get_block * (MAX_RXDS_PER_BLOCK + 1)) +
2224 #ifndef CONFIG_S2IO_NAPI
2225 spin_lock(&nic->put_lock);
2226 put_offset = ring_data->put_pos;
2227 spin_unlock(&nic->put_lock);
2229 put_offset = (put_block * (MAX_RXDS_PER_BLOCK + 1)) +
2232 while (RXD_IS_UP2DT(rxdp) &&
2233 (((get_offset + 1) % ring_bufs) != put_offset)) {
2234 skb = (struct sk_buff *) ((unsigned long)rxdp->Host_Control);
2236 DBG_PRINT(ERR_DBG, "%s: The skb is ",
2238 DBG_PRINT(ERR_DBG, "Null in Rx Intr\n");
2239 spin_unlock(&nic->rx_lock);
2242 #ifndef CONFIG_2BUFF_MODE
2243 pci_unmap_single(nic->pdev, (dma_addr_t)
2246 HEADER_ETHERNET_II_802_3_SIZE +
2249 PCI_DMA_FROMDEVICE);
2251 pci_unmap_single(nic->pdev, (dma_addr_t)
2253 BUF0_LEN, PCI_DMA_FROMDEVICE);
2254 pci_unmap_single(nic->pdev, (dma_addr_t)
2256 BUF1_LEN, PCI_DMA_FROMDEVICE);
2257 pci_unmap_single(nic->pdev, (dma_addr_t)
2259 dev->mtu + BUF0_LEN + 4,
2260 PCI_DMA_FROMDEVICE);
2262 rx_osm_handler(ring_data, rxdp);
2264 ring_data->rx_curr_get_info.offset =
2266 rxdp = ring_data->rx_blocks[get_block].block_virt_addr +
2268 if (get_info.offset &&
2269 (!(get_info.offset % MAX_RXDS_PER_BLOCK))) {
2270 get_info.offset = 0;
2271 ring_data->rx_curr_get_info.offset
2274 get_block %= ring_data->block_count;
2275 ring_data->rx_curr_get_info.block_index
2277 rxdp = ring_data->rx_blocks[get_block].block_virt_addr;
2280 get_offset = (get_block * (MAX_RXDS_PER_BLOCK + 1)) +
2282 #ifdef CONFIG_S2IO_NAPI
2283 nic->pkts_to_process -= 1;
2284 if (!nic->pkts_to_process)
2288 if ((indicate_max_pkts) && (pkt_cnt > indicate_max_pkts))
2292 spin_unlock(&nic->rx_lock);
2296 * tx_intr_handler - Transmit interrupt handler
2297 * @nic : device private variable
2299 * If an interrupt was raised to indicate DMA complete of the
2300 * Tx packet, this function is called. It identifies the last TxD
2301 * whose buffer was freed and frees all skbs whose data have already
2302 * DMA'ed into the NICs internal memory.
2307 static void tx_intr_handler(fifo_info_t *fifo_data)
2309 nic_t *nic = fifo_data->nic;
2310 struct net_device *dev = (struct net_device *) nic->dev;
2311 tx_curr_get_info_t get_info, put_info;
2312 struct sk_buff *skb;
2316 get_info = fifo_data->tx_curr_get_info;
2317 put_info = fifo_data->tx_curr_put_info;
2318 txdlp = (TxD_t *) fifo_data->list_info[get_info.offset].
2320 while ((!(txdlp->Control_1 & TXD_LIST_OWN_XENA)) &&
2321 (get_info.offset != put_info.offset) &&
2322 (txdlp->Host_Control)) {
2323 /* Check for TxD errors */
2324 if (txdlp->Control_1 & TXD_T_CODE) {
2325 unsigned long long err;
2326 err = txdlp->Control_1 & TXD_T_CODE;
2327 DBG_PRINT(ERR_DBG, "***TxD error %llx\n",
2331 skb = (struct sk_buff *) ((unsigned long)
2332 txdlp->Host_Control);
2334 DBG_PRINT(ERR_DBG, "%s: Null skb ",
2336 DBG_PRINT(ERR_DBG, "in Tx Free Intr\n");
2340 frg_cnt = skb_shinfo(skb)->nr_frags;
2341 nic->tx_pkt_count++;
2343 pci_unmap_single(nic->pdev, (dma_addr_t)
2344 txdlp->Buffer_Pointer,
2345 skb->len - skb->data_len,
2351 for (j = 0; j < frg_cnt; j++, txdlp++) {
2353 &skb_shinfo(skb)->frags[j];
2354 pci_unmap_page(nic->pdev,
2364 (sizeof(TxD_t) * fifo_data->max_txds));
2366 /* Updating the statistics block */
2367 nic->stats.tx_bytes += skb->len;
2368 dev_kfree_skb_irq(skb);
2371 get_info.offset %= get_info.fifo_len + 1;
2372 txdlp = (TxD_t *) fifo_data->list_info
2373 [get_info.offset].list_virt_addr;
2374 fifo_data->tx_curr_get_info.offset =
2378 spin_lock(&nic->tx_lock);
2379 if (netif_queue_stopped(dev))
2380 netif_wake_queue(dev);
2381 spin_unlock(&nic->tx_lock);
2385 * alarm_intr_handler - Alarm Interrrupt handler
2386 * @nic: device private variable
2387 * Description: If the interrupt was neither because of Rx packet or Tx
2388 * complete, this function is called. If the interrupt was to indicate
2389 * a loss of link, the OSM link status handler is invoked for any other
2390 * alarm interrupt the block that raised the interrupt is displayed
2391 * and a H/W reset is issued.
2396 static void alarm_intr_handler(struct s2io_nic *nic)
2398 struct net_device *dev = (struct net_device *) nic->dev;
2399 XENA_dev_config_t __iomem *bar0 = nic->bar0;
2400 register u64 val64 = 0, err_reg = 0;
2402 /* Handling link status change error Intr */
2403 err_reg = readq(&bar0->mac_rmac_err_reg);
2404 writeq(err_reg, &bar0->mac_rmac_err_reg);
2405 if (err_reg & RMAC_LINK_STATE_CHANGE_INT) {
2406 schedule_work(&nic->set_link_task);
2409 /* Handling Ecc errors */
2410 val64 = readq(&bar0->mc_err_reg);
2411 writeq(val64, &bar0->mc_err_reg);
2412 if (val64 & (MC_ERR_REG_ECC_ALL_SNG | MC_ERR_REG_ECC_ALL_DBL)) {
2413 if (val64 & MC_ERR_REG_ECC_ALL_DBL) {
2414 nic->mac_control.stats_info->sw_stat.
2416 DBG_PRINT(ERR_DBG, "%s: Device indicates ",
2418 DBG_PRINT(ERR_DBG, "double ECC error!!\n");
2419 netif_stop_queue(dev);
2420 schedule_work(&nic->rst_timer_task);
2422 nic->mac_control.stats_info->sw_stat.
2427 /* In case of a serious error, the device will be Reset. */
2428 val64 = readq(&bar0->serr_source);
2429 if (val64 & SERR_SOURCE_ANY) {
2430 DBG_PRINT(ERR_DBG, "%s: Device indicates ", dev->name);
2431 DBG_PRINT(ERR_DBG, "serious error!!\n");
2432 netif_stop_queue(dev);
2433 schedule_work(&nic->rst_timer_task);
2437 * Also as mentioned in the latest Errata sheets if the PCC_FB_ECC
2438 * Error occurs, the adapter will be recycled by disabling the
2439 * adapter enable bit and enabling it again after the device
2440 * becomes Quiescent.
2442 val64 = readq(&bar0->pcc_err_reg);
2443 writeq(val64, &bar0->pcc_err_reg);
2444 if (val64 & PCC_FB_ECC_DB_ERR) {
2445 u64 ac = readq(&bar0->adapter_control);
2446 ac &= ~(ADAPTER_CNTL_EN);
2447 writeq(ac, &bar0->adapter_control);
2448 ac = readq(&bar0->adapter_control);
2449 schedule_work(&nic->set_link_task);
2452 /* Other type of interrupts are not being handled now, TODO */
2456 * wait_for_cmd_complete - waits for a command to complete.
2457 * @sp : private member of the device structure, which is a pointer to the
2458 * s2io_nic structure.
2459 * Description: Function that waits for a command to Write into RMAC
2460 * ADDR DATA registers to be completed and returns either success or
2461 * error depending on whether the command was complete or not.
2463 * SUCCESS on success and FAILURE on failure.
2466 int wait_for_cmd_complete(nic_t * sp)
2468 XENA_dev_config_t __iomem *bar0 = sp->bar0;
2469 int ret = FAILURE, cnt = 0;
2473 val64 = readq(&bar0->rmac_addr_cmd_mem);
2474 if (!(val64 & RMAC_ADDR_CMD_MEM_STROBE_CMD_EXECUTING)) {
2487 * s2io_reset - Resets the card.
2488 * @sp : private member of the device structure.
2489 * Description: Function to Reset the card. This function then also
2490 * restores the previously saved PCI configuration space registers as
2491 * the card reset also resets the configuration space.
2496 void s2io_reset(nic_t * sp)
2498 XENA_dev_config_t __iomem *bar0 = sp->bar0;
2502 val64 = SW_RESET_ALL;
2503 writeq(val64, &bar0->sw_reset);
2506 * At this stage, if the PCI write is indeed completed, the
2507 * card is reset and so is the PCI Config space of the device.
2508 * So a read cannot be issued at this stage on any of the
2509 * registers to ensure the write into "sw_reset" register
2511 * Question: Is there any system call that will explicitly force
2512 * all the write commands still pending on the bus to be pushed
2514 * As of now I'am just giving a 250ms delay and hoping that the
2515 * PCI write to sw_reset register is done by this time.
2519 /* Restore the PCI state saved during initializarion. */
2520 pci_restore_state(sp->pdev);
2526 /* Set swapper to enable I/O register access */
2527 s2io_set_swapper(sp);
2529 /* Clear certain PCI/PCI-X fields after reset */
2530 pci_read_config_word(sp->pdev, PCI_COMMAND, &pci_cmd);
2531 pci_cmd &= 0x7FFF; /* Clear parity err detect bit */
2532 pci_write_config_word(sp->pdev, PCI_COMMAND, pci_cmd);
2534 val64 = readq(&bar0->txpic_int_reg);
2535 val64 &= ~BIT(62); /* Clearing PCI_STATUS error reflected here */
2536 writeq(val64, &bar0->txpic_int_reg);
2538 /* Clearing PCIX Ecc status register */
2539 pci_write_config_dword(sp->pdev, 0x68, 0);
2541 /* Reset device statistics maintained by OS */
2542 memset(&sp->stats, 0, sizeof (struct net_device_stats));
2544 /* SXE-002: Configure link and activity LED to turn it off */
2545 subid = sp->pdev->subsystem_device;
2546 if ((subid & 0xFF) >= 0x07) {
2547 val64 = readq(&bar0->gpio_control);
2548 val64 |= 0x0000800000000000ULL;
2549 writeq(val64, &bar0->gpio_control);
2550 val64 = 0x0411040400000000ULL;
2551 writeq(val64, (void __iomem *) ((u8 *) bar0 + 0x2700));
2554 sp->device_enabled_once = FALSE;
2558 * s2io_set_swapper - to set the swapper controle on the card
2559 * @sp : private member of the device structure,
2560 * pointer to the s2io_nic structure.
2561 * Description: Function to set the swapper control on the card
2562 * correctly depending on the 'endianness' of the system.
2564 * SUCCESS on success and FAILURE on failure.
2567 int s2io_set_swapper(nic_t * sp)
2569 struct net_device *dev = sp->dev;
2570 XENA_dev_config_t __iomem *bar0 = sp->bar0;
2571 u64 val64, valt, valr;
2574 * Set proper endian settings and verify the same by reading
2575 * the PIF Feed-back register.
2578 val64 = readq(&bar0->pif_rd_swapper_fb);
2579 if (val64 != 0x0123456789ABCDEFULL) {
2581 u64 value[] = { 0xC30000C3C30000C3ULL, /* FE=1, SE=1 */
2582 0x8100008181000081ULL, /* FE=1, SE=0 */
2583 0x4200004242000042ULL, /* FE=0, SE=1 */
2584 0}; /* FE=0, SE=0 */
2587 writeq(value[i], &bar0->swapper_ctrl);
2588 val64 = readq(&bar0->pif_rd_swapper_fb);
2589 if (val64 == 0x0123456789ABCDEFULL)
2594 DBG_PRINT(ERR_DBG, "%s: Endian settings are wrong, ",
2596 DBG_PRINT(ERR_DBG, "feedback read %llx\n",
2597 (unsigned long long) val64);
2602 valr = readq(&bar0->swapper_ctrl);
2605 valt = 0x0123456789ABCDEFULL;
2606 writeq(valt, &bar0->xmsi_address);
2607 val64 = readq(&bar0->xmsi_address);
2611 u64 value[] = { 0x00C3C30000C3C300ULL, /* FE=1, SE=1 */
2612 0x0081810000818100ULL, /* FE=1, SE=0 */
2613 0x0042420000424200ULL, /* FE=0, SE=1 */
2614 0}; /* FE=0, SE=0 */
2617 writeq((value[i] | valr), &bar0->swapper_ctrl);
2618 writeq(valt, &bar0->xmsi_address);
2619 val64 = readq(&bar0->xmsi_address);
2625 unsigned long long x = val64;
2626 DBG_PRINT(ERR_DBG, "Write failed, Xmsi_addr ");
2627 DBG_PRINT(ERR_DBG, "reads:0x%llx\n", x);
2631 val64 = readq(&bar0->swapper_ctrl);
2632 val64 &= 0xFFFF000000000000ULL;
2636 * The device by default set to a big endian format, so a
2637 * big endian driver need not set anything.
2639 val64 |= (SWAPPER_CTRL_TXP_FE |
2640 SWAPPER_CTRL_TXP_SE |
2641 SWAPPER_CTRL_TXD_R_FE |
2642 SWAPPER_CTRL_TXD_W_FE |
2643 SWAPPER_CTRL_TXF_R_FE |
2644 SWAPPER_CTRL_RXD_R_FE |
2645 SWAPPER_CTRL_RXD_W_FE |
2646 SWAPPER_CTRL_RXF_W_FE |
2647 SWAPPER_CTRL_XMSI_FE |
2648 SWAPPER_CTRL_XMSI_SE |
2649 SWAPPER_CTRL_STATS_FE | SWAPPER_CTRL_STATS_SE);
2650 writeq(val64, &bar0->swapper_ctrl);
2653 * Initially we enable all bits to make it accessible by the
2654 * driver, then we selectively enable only those bits that
2657 val64 |= (SWAPPER_CTRL_TXP_FE |
2658 SWAPPER_CTRL_TXP_SE |
2659 SWAPPER_CTRL_TXD_R_FE |
2660 SWAPPER_CTRL_TXD_R_SE |
2661 SWAPPER_CTRL_TXD_W_FE |
2662 SWAPPER_CTRL_TXD_W_SE |
2663 SWAPPER_CTRL_TXF_R_FE |
2664 SWAPPER_CTRL_RXD_R_FE |
2665 SWAPPER_CTRL_RXD_R_SE |
2666 SWAPPER_CTRL_RXD_W_FE |
2667 SWAPPER_CTRL_RXD_W_SE |
2668 SWAPPER_CTRL_RXF_W_FE |
2669 SWAPPER_CTRL_XMSI_FE |
2670 SWAPPER_CTRL_XMSI_SE |
2671 SWAPPER_CTRL_STATS_FE | SWAPPER_CTRL_STATS_SE);
2672 writeq(val64, &bar0->swapper_ctrl);
2674 val64 = readq(&bar0->swapper_ctrl);
2677 * Verifying if endian settings are accurate by reading a
2678 * feedback register.
2680 val64 = readq(&bar0->pif_rd_swapper_fb);
2681 if (val64 != 0x0123456789ABCDEFULL) {
2682 /* Endian settings are incorrect, calls for another dekko. */
2683 DBG_PRINT(ERR_DBG, "%s: Endian settings are wrong, ",
2685 DBG_PRINT(ERR_DBG, "feedback read %llx\n",
2686 (unsigned long long) val64);
2693 /* ********************************************************* *
2694 * Functions defined below concern the OS part of the driver *
2695 * ********************************************************* */
2698 * s2io_open - open entry point of the driver
2699 * @dev : pointer to the device structure.
2701 * This function is the open entry point of the driver. It mainly calls a
2702 * function to allocate Rx buffers and inserts them into the buffer
2703 * descriptors and then enables the Rx part of the NIC.
2705 * 0 on success and an appropriate (-)ve integer as defined in errno.h
2709 int s2io_open(struct net_device *dev)
2711 nic_t *sp = dev->priv;
2715 * Make sure you have link off by default every time
2716 * Nic is initialized
2718 netif_carrier_off(dev);
2719 sp->last_link_state = 0; /* Unkown link state */
2721 /* Initialize H/W and enable interrupts */
2722 if (s2io_card_up(sp)) {
2723 DBG_PRINT(ERR_DBG, "%s: H/W initialization failed\n",
2726 goto hw_init_failed;
2729 /* After proper initialization of H/W, register ISR */
2730 err = request_irq((int) sp->pdev->irq, s2io_isr, SA_SHIRQ,
2733 DBG_PRINT(ERR_DBG, "%s: ISR registration failed\n",
2735 goto isr_registration_failed;
2738 if (s2io_set_mac_addr(dev, dev->dev_addr) == FAILURE) {
2739 DBG_PRINT(ERR_DBG, "Set Mac Address Failed\n");
2741 goto setting_mac_address_failed;
2744 netif_start_queue(dev);
2747 setting_mac_address_failed:
2748 free_irq(sp->pdev->irq, dev);
2749 isr_registration_failed:
2750 del_timer_sync(&sp->alarm_timer);
2757 * s2io_close -close entry point of the driver
2758 * @dev : device pointer.
2760 * This is the stop entry point of the driver. It needs to undo exactly
2761 * whatever was done by the open entry point,thus it's usually referred to
2762 * as the close function.Among other things this function mainly stops the
2763 * Rx side of the NIC and frees all the Rx buffers in the Rx rings.
2765 * 0 on success and an appropriate (-)ve integer as defined in errno.h
2769 int s2io_close(struct net_device *dev)
2771 nic_t *sp = dev->priv;
2772 flush_scheduled_work();
2773 netif_stop_queue(dev);
2774 /* Reset card, kill tasklet and free Tx and Rx buffers. */
2777 free_irq(sp->pdev->irq, dev);
2778 sp->device_close_flag = TRUE; /* Device is shut down. */
2783 * s2io_xmit - Tx entry point of te driver
2784 * @skb : the socket buffer containing the Tx data.
2785 * @dev : device pointer.
2787 * This function is the Tx entry point of the driver. S2IO NIC supports
2788 * certain protocol assist features on Tx side, namely CSO, S/G, LSO.
2789 * NOTE: when device cant queue the pkt,just the trans_start variable will
2792 * 0 on success & 1 on failure.
2795 int s2io_xmit(struct sk_buff *skb, struct net_device *dev)
2797 nic_t *sp = dev->priv;
2798 u16 frg_cnt, frg_len, i, queue, queue_len, put_off, get_off;
2801 TxFIFO_element_t __iomem *tx_fifo;
2802 unsigned long flags;
2806 mac_info_t *mac_control;
2807 struct config_param *config;
2809 mac_control = &sp->mac_control;
2810 config = &sp->config;
2812 DBG_PRINT(TX_DBG, "%s: In Neterion Tx routine\n", dev->name);
2813 spin_lock_irqsave(&sp->tx_lock, flags);
2814 if (atomic_read(&sp->card_state) == CARD_DOWN) {
2815 DBG_PRINT(TX_DBG, "%s: Card going down for reset\n",
2817 spin_unlock_irqrestore(&sp->tx_lock, flags);
2824 put_off = (u16) mac_control->fifos[queue].tx_curr_put_info.offset;
2825 get_off = (u16) mac_control->fifos[queue].tx_curr_get_info.offset;
2826 txdp = (TxD_t *) mac_control->fifos[queue].list_info[put_off].
2829 queue_len = mac_control->fifos[queue].tx_curr_put_info.fifo_len + 1;
2830 /* Avoid "put" pointer going beyond "get" pointer */
2831 if (txdp->Host_Control || (((put_off + 1) % queue_len) == get_off)) {
2832 DBG_PRINT(ERR_DBG, "Error in xmit, No free TXDs.\n");
2833 netif_stop_queue(dev);
2835 spin_unlock_irqrestore(&sp->tx_lock, flags);
2839 mss = skb_shinfo(skb)->tso_size;
2841 txdp->Control_1 |= TXD_TCP_LSO_EN;
2842 txdp->Control_1 |= TXD_TCP_LSO_MSS(mss);
2846 frg_cnt = skb_shinfo(skb)->nr_frags;
2847 frg_len = skb->len - skb->data_len;
2849 txdp->Buffer_Pointer = pci_map_single
2850 (sp->pdev, skb->data, frg_len, PCI_DMA_TODEVICE);
2851 txdp->Host_Control = (unsigned long) skb;
2852 if (skb->ip_summed == CHECKSUM_HW) {
2854 (TXD_TX_CKO_IPV4_EN | TXD_TX_CKO_TCP_EN |
2858 txdp->Control_2 |= config->tx_intr_type;
2860 txdp->Control_1 |= (TXD_BUFFER0_SIZE(frg_len) |
2861 TXD_GATHER_CODE_FIRST);
2862 txdp->Control_1 |= TXD_LIST_OWN_XENA;
2864 /* For fragmented SKB. */
2865 for (i = 0; i < frg_cnt; i++) {
2866 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
2868 txdp->Buffer_Pointer = (u64) pci_map_page
2869 (sp->pdev, frag->page, frag->page_offset,
2870 frag->size, PCI_DMA_TODEVICE);
2871 txdp->Control_1 |= TXD_BUFFER0_SIZE(frag->size);
2873 txdp->Control_1 |= TXD_GATHER_CODE_LAST;
2875 tx_fifo = mac_control->tx_FIFO_start[queue];
2876 val64 = mac_control->fifos[queue].list_info[put_off].list_phy_addr;
2877 writeq(val64, &tx_fifo->TxDL_Pointer);
2881 val64 = (TX_FIFO_LAST_TXD_NUM(frg_cnt) | TX_FIFO_FIRST_LIST |
2886 val64 |= TX_FIFO_SPECIAL_FUNC;
2888 writeq(val64, &tx_fifo->List_Control);
2891 put_off %= mac_control->fifos[queue].tx_curr_put_info.fifo_len + 1;
2892 mac_control->fifos[queue].tx_curr_put_info.offset = put_off;
2894 /* Avoid "put" pointer going beyond "get" pointer */
2895 if (((put_off + 1) % queue_len) == get_off) {
2897 "No free TxDs for xmit, Put: 0x%x Get:0x%x\n",
2899 netif_stop_queue(dev);
2902 dev->trans_start = jiffies;
2903 spin_unlock_irqrestore(&sp->tx_lock, flags);
2909 s2io_alarm_handle(unsigned long data)
2911 nic_t *sp = (nic_t *)data;
2913 alarm_intr_handler(sp);
2914 mod_timer(&sp->alarm_timer, jiffies + HZ / 2);
2918 * s2io_isr - ISR handler of the device .
2919 * @irq: the irq of the device.
2920 * @dev_id: a void pointer to the dev structure of the NIC.
2921 * @pt_regs: pointer to the registers pushed on the stack.
2922 * Description: This function is the ISR handler of the device. It
2923 * identifies the reason for the interrupt and calls the relevant
2924 * service routines. As a contongency measure, this ISR allocates the
2925 * recv buffers, if their numbers are below the panic value which is
2926 * presently set to 25% of the original number of rcv buffers allocated.
2928 * IRQ_HANDLED: will be returned if IRQ was handled by this routine
2929 * IRQ_NONE: will be returned if interrupt is not from our device
2931 static irqreturn_t s2io_isr(int irq, void *dev_id, struct pt_regs *regs)
2933 struct net_device *dev = (struct net_device *) dev_id;
2934 nic_t *sp = dev->priv;
2935 XENA_dev_config_t __iomem *bar0 = sp->bar0;
2937 u64 reason = 0, val64;
2938 mac_info_t *mac_control;
2939 struct config_param *config;
2941 atomic_inc(&sp->isr_cnt);
2942 mac_control = &sp->mac_control;
2943 config = &sp->config;
2946 * Identify the cause for interrupt and call the appropriate
2947 * interrupt handler. Causes for the interrupt could be;
2951 * 4. Error in any functional blocks of the NIC.
2953 reason = readq(&bar0->general_int_status);
2956 /* The interrupt was not raised by Xena. */
2957 atomic_dec(&sp->isr_cnt);
2961 #ifdef CONFIG_S2IO_NAPI
2962 if (reason & GEN_INTR_RXTRAFFIC) {
2963 if (netif_rx_schedule_prep(dev)) {
2964 en_dis_able_nic_intrs(sp, RX_TRAFFIC_INTR,
2966 __netif_rx_schedule(dev);
2970 /* If Intr is because of Rx Traffic */
2971 if (reason & GEN_INTR_RXTRAFFIC) {
2973 * rx_traffic_int reg is an R1 register, writing all 1's
2974 * will ensure that the actual interrupt causing bit get's
2975 * cleared and hence a read can be avoided.
2977 val64 = 0xFFFFFFFFFFFFFFFFULL;
2978 writeq(val64, &bar0->rx_traffic_int);
2979 for (i = 0; i < config->rx_ring_num; i++) {
2980 rx_intr_handler(&mac_control->rings[i]);
2985 /* If Intr is because of Tx Traffic */
2986 if (reason & GEN_INTR_TXTRAFFIC) {
2988 * tx_traffic_int reg is an R1 register, writing all 1's
2989 * will ensure that the actual interrupt causing bit get's
2990 * cleared and hence a read can be avoided.
2992 val64 = 0xFFFFFFFFFFFFFFFFULL;
2993 writeq(val64, &bar0->tx_traffic_int);
2995 for (i = 0; i < config->tx_fifo_num; i++)
2996 tx_intr_handler(&mac_control->fifos[i]);
3000 * If the Rx buffer count is below the panic threshold then
3001 * reallocate the buffers from the interrupt handler itself,
3002 * else schedule a tasklet to reallocate the buffers.
3004 #ifndef CONFIG_S2IO_NAPI
3005 for (i = 0; i < config->rx_ring_num; i++) {
3007 int rxb_size = atomic_read(&sp->rx_bufs_left[i]);
3008 int level = rx_buffer_level(sp, rxb_size, i);
3010 if ((level == PANIC) && (!TASKLET_IN_USE)) {
3011 DBG_PRINT(INTR_DBG, "%s: Rx BD hit ", dev->name);
3012 DBG_PRINT(INTR_DBG, "PANIC levels\n");
3013 if ((ret = fill_rx_buffers(sp, i)) == -ENOMEM) {
3014 DBG_PRINT(ERR_DBG, "%s:Out of memory",
3016 DBG_PRINT(ERR_DBG, " in ISR!!\n");
3017 clear_bit(0, (&sp->tasklet_status));
3018 atomic_dec(&sp->isr_cnt);
3021 clear_bit(0, (&sp->tasklet_status));
3022 } else if (level == LOW) {
3023 tasklet_schedule(&sp->task);
3028 atomic_dec(&sp->isr_cnt);
3035 static void s2io_updt_stats(nic_t *sp)
3037 XENA_dev_config_t __iomem *bar0 = sp->bar0;
3041 if (atomic_read(&sp->card_state) == CARD_UP) {
3042 /* Apprx 30us on a 133 MHz bus */
3043 val64 = SET_UPDT_CLICKS(10) |
3044 STAT_CFG_ONE_SHOT_EN | STAT_CFG_STAT_EN;
3045 writeq(val64, &bar0->stat_cfg);
3048 val64 = readq(&bar0->stat_cfg);
3049 if (!(val64 & BIT(0)))
3053 break; /* Updt failed */
3059 * s2io_get_stats - Updates the device statistics structure.
3060 * @dev : pointer to the device structure.
3062 * This function updates the device statistics structure in the s2io_nic
3063 * structure and returns a pointer to the same.
3065 * pointer to the updated net_device_stats structure.
3068 struct net_device_stats *s2io_get_stats(struct net_device *dev)
3070 nic_t *sp = dev->priv;
3071 mac_info_t *mac_control;
3072 struct config_param *config;
3075 mac_control = &sp->mac_control;
3076 config = &sp->config;
3078 /* Configure Stats for immediate updt */
3079 s2io_updt_stats(sp);
3081 sp->stats.tx_packets =
3082 le32_to_cpu(mac_control->stats_info->tmac_frms);
3083 sp->stats.tx_errors =
3084 le32_to_cpu(mac_control->stats_info->tmac_any_err_frms);
3085 sp->stats.rx_errors =
3086 le32_to_cpu(mac_control->stats_info->rmac_drop_frms);
3087 sp->stats.multicast =
3088 le32_to_cpu(mac_control->stats_info->rmac_vld_mcst_frms);
3089 sp->stats.rx_length_errors =
3090 le32_to_cpu(mac_control->stats_info->rmac_long_frms);
3092 return (&sp->stats);
3096 * s2io_set_multicast - entry point for multicast address enable/disable.
3097 * @dev : pointer to the device structure
3099 * This function is a driver entry point which gets called by the kernel
3100 * whenever multicast addresses must be enabled/disabled. This also gets
3101 * called to set/reset promiscuous mode. Depending on the deivce flag, we
3102 * determine, if multicast address must be enabled or if promiscuous mode
3103 * is to be disabled etc.
3108 static void s2io_set_multicast(struct net_device *dev)
3111 struct dev_mc_list *mclist;
3112 nic_t *sp = dev->priv;
3113 XENA_dev_config_t __iomem *bar0 = sp->bar0;
3114 u64 val64 = 0, multi_mac = 0x010203040506ULL, mask =
3116 u64 dis_addr = 0xffffffffffffULL, mac_addr = 0;
3119 if ((dev->flags & IFF_ALLMULTI) && (!sp->m_cast_flg)) {
3120 /* Enable all Multicast addresses */
3121 writeq(RMAC_ADDR_DATA0_MEM_ADDR(multi_mac),
3122 &bar0->rmac_addr_data0_mem);
3123 writeq(RMAC_ADDR_DATA1_MEM_MASK(mask),
3124 &bar0->rmac_addr_data1_mem);
3125 val64 = RMAC_ADDR_CMD_MEM_WE |
3126 RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD |
3127 RMAC_ADDR_CMD_MEM_OFFSET(MAC_MC_ALL_MC_ADDR_OFFSET);
3128 writeq(val64, &bar0->rmac_addr_cmd_mem);
3129 /* Wait till command completes */
3130 wait_for_cmd_complete(sp);
3133 sp->all_multi_pos = MAC_MC_ALL_MC_ADDR_OFFSET;
3134 } else if ((dev->flags & IFF_ALLMULTI) && (sp->m_cast_flg)) {
3135 /* Disable all Multicast addresses */
3136 writeq(RMAC_ADDR_DATA0_MEM_ADDR(dis_addr),
3137 &bar0->rmac_addr_data0_mem);
3138 writeq(RMAC_ADDR_DATA1_MEM_MASK(0x0),
3139 &bar0->rmac_addr_data1_mem);
3140 val64 = RMAC_ADDR_CMD_MEM_WE |
3141 RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD |
3142 RMAC_ADDR_CMD_MEM_OFFSET(sp->all_multi_pos);
3143 writeq(val64, &bar0->rmac_addr_cmd_mem);
3144 /* Wait till command completes */
3145 wait_for_cmd_complete(sp);
3148 sp->all_multi_pos = 0;
3151 if ((dev->flags & IFF_PROMISC) && (!sp->promisc_flg)) {
3152 /* Put the NIC into promiscuous mode */
3153 add = &bar0->mac_cfg;
3154 val64 = readq(&bar0->mac_cfg);
3155 val64 |= MAC_CFG_RMAC_PROM_ENABLE;
3157 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
3158 writel((u32) val64, add);
3159 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
3160 writel((u32) (val64 >> 32), (add + 4));
3162 val64 = readq(&bar0->mac_cfg);
3163 sp->promisc_flg = 1;
3164 DBG_PRINT(ERR_DBG, "%s: entered promiscuous mode\n",
3166 } else if (!(dev->flags & IFF_PROMISC) && (sp->promisc_flg)) {
3167 /* Remove the NIC from promiscuous mode */
3168 add = &bar0->mac_cfg;
3169 val64 = readq(&bar0->mac_cfg);
3170 val64 &= ~MAC_CFG_RMAC_PROM_ENABLE;
3172 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
3173 writel((u32) val64, add);
3174 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
3175 writel((u32) (val64 >> 32), (add + 4));
3177 val64 = readq(&bar0->mac_cfg);
3178 sp->promisc_flg = 0;
3179 DBG_PRINT(ERR_DBG, "%s: left promiscuous mode\n",
3183 /* Update individual M_CAST address list */
3184 if ((!sp->m_cast_flg) && dev->mc_count) {
3186 (MAX_ADDRS_SUPPORTED - MAC_MC_ADDR_START_OFFSET - 1)) {
3187 DBG_PRINT(ERR_DBG, "%s: No more Rx filters ",
3189 DBG_PRINT(ERR_DBG, "can be added, please enable ");
3190 DBG_PRINT(ERR_DBG, "ALL_MULTI instead\n");
3194 prev_cnt = sp->mc_addr_count;
3195 sp->mc_addr_count = dev->mc_count;
3197 /* Clear out the previous list of Mc in the H/W. */
3198 for (i = 0; i < prev_cnt; i++) {
3199 writeq(RMAC_ADDR_DATA0_MEM_ADDR(dis_addr),
3200 &bar0->rmac_addr_data0_mem);
3201 writeq(RMAC_ADDR_DATA1_MEM_MASK(0ULL),
3202 &bar0->rmac_addr_data1_mem);
3203 val64 = RMAC_ADDR_CMD_MEM_WE |
3204 RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD |
3205 RMAC_ADDR_CMD_MEM_OFFSET
3206 (MAC_MC_ADDR_START_OFFSET + i);
3207 writeq(val64, &bar0->rmac_addr_cmd_mem);
3209 /* Wait for command completes */
3210 if (wait_for_cmd_complete(sp)) {
3211 DBG_PRINT(ERR_DBG, "%s: Adding ",
3213 DBG_PRINT(ERR_DBG, "Multicasts failed\n");
3218 /* Create the new Rx filter list and update the same in H/W. */
3219 for (i = 0, mclist = dev->mc_list; i < dev->mc_count;
3220 i++, mclist = mclist->next) {
3221 memcpy(sp->usr_addrs[i].addr, mclist->dmi_addr,
3223 for (j = 0; j < ETH_ALEN; j++) {
3224 mac_addr |= mclist->dmi_addr[j];
3228 writeq(RMAC_ADDR_DATA0_MEM_ADDR(mac_addr),
3229 &bar0->rmac_addr_data0_mem);
3230 writeq(RMAC_ADDR_DATA1_MEM_MASK(0ULL),
3231 &bar0->rmac_addr_data1_mem);
3232 val64 = RMAC_ADDR_CMD_MEM_WE |
3233 RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD |
3234 RMAC_ADDR_CMD_MEM_OFFSET
3235 (i + MAC_MC_ADDR_START_OFFSET);
3236 writeq(val64, &bar0->rmac_addr_cmd_mem);
3238 /* Wait for command completes */
3239 if (wait_for_cmd_complete(sp)) {
3240 DBG_PRINT(ERR_DBG, "%s: Adding ",
3242 DBG_PRINT(ERR_DBG, "Multicasts failed\n");
3250 * s2io_set_mac_addr - Programs the Xframe mac address
3251 * @dev : pointer to the device structure.
3252 * @addr: a uchar pointer to the new mac address which is to be set.
3253 * Description : This procedure will program the Xframe to receive
3254 * frames with new Mac Address
3255 * Return value: SUCCESS on success and an appropriate (-)ve integer
3256 * as defined in errno.h file on failure.
3259 int s2io_set_mac_addr(struct net_device *dev, u8 * addr)
3261 nic_t *sp = dev->priv;
3262 XENA_dev_config_t __iomem *bar0 = sp->bar0;
3263 register u64 val64, mac_addr = 0;
3267 * Set the new MAC address as the new unicast filter and reflect this
3268 * change on the device address registered with the OS. It will be
3271 for (i = 0; i < ETH_ALEN; i++) {
3273 mac_addr |= addr[i];
3276 writeq(RMAC_ADDR_DATA0_MEM_ADDR(mac_addr),
3277 &bar0->rmac_addr_data0_mem);
3280 RMAC_ADDR_CMD_MEM_WE | RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD |
3281 RMAC_ADDR_CMD_MEM_OFFSET(0);
3282 writeq(val64, &bar0->rmac_addr_cmd_mem);
3283 /* Wait till command completes */
3284 if (wait_for_cmd_complete(sp)) {
3285 DBG_PRINT(ERR_DBG, "%s: set_mac_addr failed\n", dev->name);
3293 * s2io_ethtool_sset - Sets different link parameters.
3294 * @sp : private member of the device structure, which is a pointer to the * s2io_nic structure.
3295 * @info: pointer to the structure with parameters given by ethtool to set
3298 * The function sets different link parameters provided by the user onto
3304 static int s2io_ethtool_sset(struct net_device *dev,
3305 struct ethtool_cmd *info)
3307 nic_t *sp = dev->priv;
3308 if ((info->autoneg == AUTONEG_ENABLE) ||
3309 (info->speed != SPEED_10000) || (info->duplex != DUPLEX_FULL))
3312 s2io_close(sp->dev);
3320 * s2io_ethtol_gset - Return link specific information.
3321 * @sp : private member of the device structure, pointer to the
3322 * s2io_nic structure.
3323 * @info : pointer to the structure with parameters given by ethtool
3324 * to return link information.
3326 * Returns link specific information like speed, duplex etc.. to ethtool.
3328 * return 0 on success.
3331 static int s2io_ethtool_gset(struct net_device *dev, struct ethtool_cmd *info)
3333 nic_t *sp = dev->priv;
3334 info->supported = (SUPPORTED_10000baseT_Full | SUPPORTED_FIBRE);
3335 info->advertising = (SUPPORTED_10000baseT_Full | SUPPORTED_FIBRE);
3336 info->port = PORT_FIBRE;
3337 /* info->transceiver?? TODO */
3339 if (netif_carrier_ok(sp->dev)) {
3340 info->speed = 10000;
3341 info->duplex = DUPLEX_FULL;
3347 info->autoneg = AUTONEG_DISABLE;
3352 * s2io_ethtool_gdrvinfo - Returns driver specific information.
3353 * @sp : private member of the device structure, which is a pointer to the
3354 * s2io_nic structure.
3355 * @info : pointer to the structure with parameters given by ethtool to
3356 * return driver information.
3358 * Returns driver specefic information like name, version etc.. to ethtool.
3363 static void s2io_ethtool_gdrvinfo(struct net_device *dev,
3364 struct ethtool_drvinfo *info)
3366 nic_t *sp = dev->priv;
3368 strncpy(info->driver, s2io_driver_name, sizeof(s2io_driver_name));
3369 strncpy(info->version, s2io_driver_version,
3370 sizeof(s2io_driver_version));
3371 strncpy(info->fw_version, "", 32);
3372 strncpy(info->bus_info, pci_name(sp->pdev), 32);
3373 info->regdump_len = XENA_REG_SPACE;
3374 info->eedump_len = XENA_EEPROM_SPACE;
3375 info->testinfo_len = S2IO_TEST_LEN;
3376 info->n_stats = S2IO_STAT_LEN;
3380 * s2io_ethtool_gregs - dumps the entire space of Xfame into the buffer.
3381 * @sp: private member of the device structure, which is a pointer to the
3382 * s2io_nic structure.
3383 * @regs : pointer to the structure with parameters given by ethtool for
3384 * dumping the registers.
3385 * @reg_space: The input argumnet into which all the registers are dumped.
3387 * Dumps the entire register space of xFrame NIC into the user given
3393 static void s2io_ethtool_gregs(struct net_device *dev,
3394 struct ethtool_regs *regs, void *space)
3398 u8 *reg_space = (u8 *) space;
3399 nic_t *sp = dev->priv;
3401 regs->len = XENA_REG_SPACE;
3402 regs->version = sp->pdev->subsystem_device;
3404 for (i = 0; i < regs->len; i += 8) {
3405 reg = readq(sp->bar0 + i);
3406 memcpy((reg_space + i), ®, 8);
3411 * s2io_phy_id - timer function that alternates adapter LED.
3412 * @data : address of the private member of the device structure, which
3413 * is a pointer to the s2io_nic structure, provided as an u32.
3414 * Description: This is actually the timer function that alternates the
3415 * adapter LED bit of the adapter control bit to set/reset every time on
3416 * invocation. The timer is set for 1/2 a second, hence tha NIC blinks
3417 * once every second.
3419 static void s2io_phy_id(unsigned long data)
3421 nic_t *sp = (nic_t *) data;
3422 XENA_dev_config_t __iomem *bar0 = sp->bar0;
3426 subid = sp->pdev->subsystem_device;
3427 if ((subid & 0xFF) >= 0x07) {
3428 val64 = readq(&bar0->gpio_control);
3429 val64 ^= GPIO_CTRL_GPIO_0;
3430 writeq(val64, &bar0->gpio_control);
3432 val64 = readq(&bar0->adapter_control);
3433 val64 ^= ADAPTER_LED_ON;
3434 writeq(val64, &bar0->adapter_control);
3437 mod_timer(&sp->id_timer, jiffies + HZ / 2);
3441 * s2io_ethtool_idnic - To physically identify the nic on the system.
3442 * @sp : private member of the device structure, which is a pointer to the
3443 * s2io_nic structure.
3444 * @id : pointer to the structure with identification parameters given by
3446 * Description: Used to physically identify the NIC on the system.
3447 * The Link LED will blink for a time specified by the user for
3449 * NOTE: The Link has to be Up to be able to blink the LED. Hence
3450 * identification is possible only if it's link is up.
3452 * int , returns 0 on success
3455 static int s2io_ethtool_idnic(struct net_device *dev, u32 data)
3457 u64 val64 = 0, last_gpio_ctrl_val;
3458 nic_t *sp = dev->priv;
3459 XENA_dev_config_t __iomem *bar0 = sp->bar0;
3462 subid = sp->pdev->subsystem_device;
3463 last_gpio_ctrl_val = readq(&bar0->gpio_control);
3464 if ((subid & 0xFF) < 0x07) {
3465 val64 = readq(&bar0->adapter_control);
3466 if (!(val64 & ADAPTER_CNTL_EN)) {
3468 "Adapter Link down, cannot blink LED\n");
3472 if (sp->id_timer.function == NULL) {
3473 init_timer(&sp->id_timer);
3474 sp->id_timer.function = s2io_phy_id;
3475 sp->id_timer.data = (unsigned long) sp;
3477 mod_timer(&sp->id_timer, jiffies);
3479 msleep_interruptible(data * HZ);
3481 msleep_interruptible(MAX_FLICKER_TIME);
3482 del_timer_sync(&sp->id_timer);
3484 if (CARDS_WITH_FAULTY_LINK_INDICATORS(subid)) {
3485 writeq(last_gpio_ctrl_val, &bar0->gpio_control);
3486 last_gpio_ctrl_val = readq(&bar0->gpio_control);
3493 * s2io_ethtool_getpause_data -Pause frame frame generation and reception.
3494 * @sp : private member of the device structure, which is a pointer to the
3495 * s2io_nic structure.
3496 * @ep : pointer to the structure with pause parameters given by ethtool.
3498 * Returns the Pause frame generation and reception capability of the NIC.
3502 static void s2io_ethtool_getpause_data(struct net_device *dev,
3503 struct ethtool_pauseparam *ep)
3506 nic_t *sp = dev->priv;
3507 XENA_dev_config_t __iomem *bar0 = sp->bar0;
3509 val64 = readq(&bar0->rmac_pause_cfg);
3510 if (val64 & RMAC_PAUSE_GEN_ENABLE)
3511 ep->tx_pause = TRUE;
3512 if (val64 & RMAC_PAUSE_RX_ENABLE)
3513 ep->rx_pause = TRUE;
3514 ep->autoneg = FALSE;
3518 * s2io_ethtool_setpause_data - set/reset pause frame generation.
3519 * @sp : private member of the device structure, which is a pointer to the
3520 * s2io_nic structure.
3521 * @ep : pointer to the structure with pause parameters given by ethtool.
3523 * It can be used to set or reset Pause frame generation or reception
3524 * support of the NIC.
3526 * int, returns 0 on Success
3529 static int s2io_ethtool_setpause_data(struct net_device *dev,
3530 struct ethtool_pauseparam *ep)
3533 nic_t *sp = dev->priv;
3534 XENA_dev_config_t __iomem *bar0 = sp->bar0;
3536 val64 = readq(&bar0->rmac_pause_cfg);
3538 val64 |= RMAC_PAUSE_GEN_ENABLE;
3540 val64 &= ~RMAC_PAUSE_GEN_ENABLE;
3542 val64 |= RMAC_PAUSE_RX_ENABLE;
3544 val64 &= ~RMAC_PAUSE_RX_ENABLE;
3545 writeq(val64, &bar0->rmac_pause_cfg);
3550 * read_eeprom - reads 4 bytes of data from user given offset.
3551 * @sp : private member of the device structure, which is a pointer to the
3552 * s2io_nic structure.
3553 * @off : offset at which the data must be written
3554 * @data : Its an output parameter where the data read at the given
3557 * Will read 4 bytes of data from the user given offset and return the
3559 * NOTE: Will allow to read only part of the EEPROM visible through the
3562 * -1 on failure and 0 on success.
3565 #define S2IO_DEV_ID 5
3566 static int read_eeprom(nic_t * sp, int off, u32 * data)
3571 XENA_dev_config_t __iomem *bar0 = sp->bar0;
3573 val64 = I2C_CONTROL_DEV_ID(S2IO_DEV_ID) | I2C_CONTROL_ADDR(off) |
3574 I2C_CONTROL_BYTE_CNT(0x3) | I2C_CONTROL_READ |
3575 I2C_CONTROL_CNTL_START;
3576 SPECIAL_REG_WRITE(val64, &bar0->i2c_control, LF);
3578 while (exit_cnt < 5) {
3579 val64 = readq(&bar0->i2c_control);
3580 if (I2C_CONTROL_CNTL_END(val64)) {
3581 *data = I2C_CONTROL_GET_DATA(val64);
3593 * write_eeprom - actually writes the relevant part of the data value.
3594 * @sp : private member of the device structure, which is a pointer to the
3595 * s2io_nic structure.
3596 * @off : offset at which the data must be written
3597 * @data : The data that is to be written
3598 * @cnt : Number of bytes of the data that are actually to be written into
3599 * the Eeprom. (max of 3)
3601 * Actually writes the relevant part of the data value into the Eeprom
3602 * through the I2C bus.
3604 * 0 on success, -1 on failure.
3607 static int write_eeprom(nic_t * sp, int off, u32 data, int cnt)
3609 int exit_cnt = 0, ret = -1;
3611 XENA_dev_config_t __iomem *bar0 = sp->bar0;
3613 val64 = I2C_CONTROL_DEV_ID(S2IO_DEV_ID) | I2C_CONTROL_ADDR(off) |
3614 I2C_CONTROL_BYTE_CNT(cnt) | I2C_CONTROL_SET_DATA(data) |
3615 I2C_CONTROL_CNTL_START;
3616 SPECIAL_REG_WRITE(val64, &bar0->i2c_control, LF);
3618 while (exit_cnt < 5) {
3619 val64 = readq(&bar0->i2c_control);
3620 if (I2C_CONTROL_CNTL_END(val64)) {
3621 if (!(val64 & I2C_CONTROL_NACK))
3633 * s2io_ethtool_geeprom - reads the value stored in the Eeprom.
3634 * @sp : private member of the device structure, which is a pointer to the * s2io_nic structure.
3635 * @eeprom : pointer to the user level structure provided by ethtool,
3636 * containing all relevant information.
3637 * @data_buf : user defined value to be written into Eeprom.
3638 * Description: Reads the values stored in the Eeprom at given offset
3639 * for a given length. Stores these values int the input argument data
3640 * buffer 'data_buf' and returns these to the caller (ethtool.)
3645 static int s2io_ethtool_geeprom(struct net_device *dev,
3646 struct ethtool_eeprom *eeprom, u8 * data_buf)
3649 nic_t *sp = dev->priv;
3651 eeprom->magic = sp->pdev->vendor | (sp->pdev->device << 16);
3653 if ((eeprom->offset + eeprom->len) > (XENA_EEPROM_SPACE))
3654 eeprom->len = XENA_EEPROM_SPACE - eeprom->offset;
3656 for (i = 0; i < eeprom->len; i += 4) {
3657 if (read_eeprom(sp, (eeprom->offset + i), &data)) {
3658 DBG_PRINT(ERR_DBG, "Read of EEPROM failed\n");
3662 memcpy((data_buf + i), &valid, 4);
3668 * s2io_ethtool_seeprom - tries to write the user provided value in Eeprom
3669 * @sp : private member of the device structure, which is a pointer to the
3670 * s2io_nic structure.
3671 * @eeprom : pointer to the user level structure provided by ethtool,
3672 * containing all relevant information.
3673 * @data_buf ; user defined value to be written into Eeprom.
3675 * Tries to write the user provided value in the Eeprom, at the offset
3676 * given by the user.
3678 * 0 on success, -EFAULT on failure.
3681 static int s2io_ethtool_seeprom(struct net_device *dev,
3682 struct ethtool_eeprom *eeprom,
3685 int len = eeprom->len, cnt = 0;
3686 u32 valid = 0, data;
3687 nic_t *sp = dev->priv;
3689 if (eeprom->magic != (sp->pdev->vendor | (sp->pdev->device << 16))) {
3691 "ETHTOOL_WRITE_EEPROM Err: Magic value ");
3692 DBG_PRINT(ERR_DBG, "is wrong, Its not 0x%x\n",
3698 data = (u32) data_buf[cnt] & 0x000000FF;
3700 valid = (u32) (data << 24);
3704 if (write_eeprom(sp, (eeprom->offset + cnt), valid, 0)) {
3706 "ETHTOOL_WRITE_EEPROM Err: Cannot ");
3708 "write into the specified offset\n");
3719 * s2io_register_test - reads and writes into all clock domains.
3720 * @sp : private member of the device structure, which is a pointer to the
3721 * s2io_nic structure.
3722 * @data : variable that returns the result of each of the test conducted b
3725 * Read and write into all clock domains. The NIC has 3 clock domains,
3726 * see that registers in all the three regions are accessible.
3731 static int s2io_register_test(nic_t * sp, uint64_t * data)
3733 XENA_dev_config_t __iomem *bar0 = sp->bar0;
3737 val64 = readq(&bar0->pif_rd_swapper_fb);
3738 if (val64 != 0x123456789abcdefULL) {
3740 DBG_PRINT(INFO_DBG, "Read Test level 1 fails\n");
3743 val64 = readq(&bar0->rmac_pause_cfg);
3744 if (val64 != 0xc000ffff00000000ULL) {
3746 DBG_PRINT(INFO_DBG, "Read Test level 2 fails\n");
3749 val64 = readq(&bar0->rx_queue_cfg);
3750 if (val64 != 0x0808080808080808ULL) {
3752 DBG_PRINT(INFO_DBG, "Read Test level 3 fails\n");
3755 val64 = readq(&bar0->xgxs_efifo_cfg);
3756 if (val64 != 0x000000001923141EULL) {
3758 DBG_PRINT(INFO_DBG, "Read Test level 4 fails\n");
3761 val64 = 0x5A5A5A5A5A5A5A5AULL;
3762 writeq(val64, &bar0->xmsi_data);
3763 val64 = readq(&bar0->xmsi_data);
3764 if (val64 != 0x5A5A5A5A5A5A5A5AULL) {
3766 DBG_PRINT(ERR_DBG, "Write Test level 1 fails\n");
3769 val64 = 0xA5A5A5A5A5A5A5A5ULL;
3770 writeq(val64, &bar0->xmsi_data);
3771 val64 = readq(&bar0->xmsi_data);
3772 if (val64 != 0xA5A5A5A5A5A5A5A5ULL) {
3774 DBG_PRINT(ERR_DBG, "Write Test level 2 fails\n");
3782 * s2io_eeprom_test - to verify that EEprom in the xena can be programmed.
3783 * @sp : private member of the device structure, which is a pointer to the
3784 * s2io_nic structure.
3785 * @data:variable that returns the result of each of the test conducted by
3788 * Verify that EEPROM in the xena can be programmed using I2C_CONTROL
3794 static int s2io_eeprom_test(nic_t * sp, uint64_t * data)
3799 /* Test Write Error at offset 0 */
3800 if (!write_eeprom(sp, 0, 0, 3))
3803 /* Test Write at offset 4f0 */
3804 if (write_eeprom(sp, 0x4F0, 0x01234567, 3))
3806 if (read_eeprom(sp, 0x4F0, &ret_data))
3809 if (ret_data != 0x01234567)
3812 /* Reset the EEPROM data go FFFF */
3813 write_eeprom(sp, 0x4F0, 0xFFFFFFFF, 3);
3815 /* Test Write Request Error at offset 0x7c */
3816 if (!write_eeprom(sp, 0x07C, 0, 3))
3819 /* Test Write Request at offset 0x7fc */
3820 if (write_eeprom(sp, 0x7FC, 0x01234567, 3))
3822 if (read_eeprom(sp, 0x7FC, &ret_data))
3825 if (ret_data != 0x01234567)
3828 /* Reset the EEPROM data go FFFF */
3829 write_eeprom(sp, 0x7FC, 0xFFFFFFFF, 3);
3831 /* Test Write Error at offset 0x80 */
3832 if (!write_eeprom(sp, 0x080, 0, 3))
3835 /* Test Write Error at offset 0xfc */
3836 if (!write_eeprom(sp, 0x0FC, 0, 3))
3839 /* Test Write Error at offset 0x100 */
3840 if (!write_eeprom(sp, 0x100, 0, 3))
3843 /* Test Write Error at offset 4ec */
3844 if (!write_eeprom(sp, 0x4EC, 0, 3))
3852 * s2io_bist_test - invokes the MemBist test of the card .
3853 * @sp : private member of the device structure, which is a pointer to the
3854 * s2io_nic structure.
3855 * @data:variable that returns the result of each of the test conducted by
3858 * This invokes the MemBist test of the card. We give around
3859 * 2 secs time for the Test to complete. If it's still not complete
3860 * within this peiod, we consider that the test failed.
3862 * 0 on success and -1 on failure.
3865 static int s2io_bist_test(nic_t * sp, uint64_t * data)
3868 int cnt = 0, ret = -1;
3870 pci_read_config_byte(sp->pdev, PCI_BIST, &bist);
3871 bist |= PCI_BIST_START;
3872 pci_write_config_word(sp->pdev, PCI_BIST, bist);
3875 pci_read_config_byte(sp->pdev, PCI_BIST, &bist);
3876 if (!(bist & PCI_BIST_START)) {
3877 *data = (bist & PCI_BIST_CODE_MASK);
3889 * s2io-link_test - verifies the link state of the nic
3890 * @sp ; private member of the device structure, which is a pointer to the
3891 * s2io_nic structure.
3892 * @data: variable that returns the result of each of the test conducted by
3895 * The function verifies the link state of the NIC and updates the input
3896 * argument 'data' appropriately.
3901 static int s2io_link_test(nic_t * sp, uint64_t * data)
3903 XENA_dev_config_t __iomem *bar0 = sp->bar0;
3906 val64 = readq(&bar0->adapter_status);
3907 if (val64 & ADAPTER_STATUS_RMAC_LOCAL_FAULT)
3914 * s2io_rldram_test - offline test for access to the RldRam chip on the NIC
3915 * @sp - private member of the device structure, which is a pointer to the
3916 * s2io_nic structure.
3917 * @data - variable that returns the result of each of the test
3918 * conducted by the driver.
3920 * This is one of the offline test that tests the read and write
3921 * access to the RldRam chip on the NIC.
3926 static int s2io_rldram_test(nic_t * sp, uint64_t * data)
3928 XENA_dev_config_t __iomem *bar0 = sp->bar0;
3930 int cnt, iteration = 0, test_pass = 0;
3932 val64 = readq(&bar0->adapter_control);
3933 val64 &= ~ADAPTER_ECC_EN;
3934 writeq(val64, &bar0->adapter_control);
3936 val64 = readq(&bar0->mc_rldram_test_ctrl);
3937 val64 |= MC_RLDRAM_TEST_MODE;
3938 writeq(val64, &bar0->mc_rldram_test_ctrl);
3940 val64 = readq(&bar0->mc_rldram_mrs);
3941 val64 |= MC_RLDRAM_QUEUE_SIZE_ENABLE;
3942 SPECIAL_REG_WRITE(val64, &bar0->mc_rldram_mrs, UF);
3944 val64 |= MC_RLDRAM_MRS_ENABLE;
3945 SPECIAL_REG_WRITE(val64, &bar0->mc_rldram_mrs, UF);
3947 while (iteration < 2) {
3948 val64 = 0x55555555aaaa0000ULL;
3949 if (iteration == 1) {
3950 val64 ^= 0xFFFFFFFFFFFF0000ULL;
3952 writeq(val64, &bar0->mc_rldram_test_d0);
3954 val64 = 0xaaaa5a5555550000ULL;
3955 if (iteration == 1) {
3956 val64 ^= 0xFFFFFFFFFFFF0000ULL;
3958 writeq(val64, &bar0->mc_rldram_test_d1);
3960 val64 = 0x55aaaaaaaa5a0000ULL;
3961 if (iteration == 1) {
3962 val64 ^= 0xFFFFFFFFFFFF0000ULL;
3964 writeq(val64, &bar0->mc_rldram_test_d2);
3966 val64 = (u64) (0x0000003fffff0000ULL);
3967 writeq(val64, &bar0->mc_rldram_test_add);
3970 val64 = MC_RLDRAM_TEST_MODE;
3971 writeq(val64, &bar0->mc_rldram_test_ctrl);
3974 MC_RLDRAM_TEST_MODE | MC_RLDRAM_TEST_WRITE |
3976 writeq(val64, &bar0->mc_rldram_test_ctrl);
3978 for (cnt = 0; cnt < 5; cnt++) {
3979 val64 = readq(&bar0->mc_rldram_test_ctrl);
3980 if (val64 & MC_RLDRAM_TEST_DONE)
3988 val64 = MC_RLDRAM_TEST_MODE;
3989 writeq(val64, &bar0->mc_rldram_test_ctrl);
3991 val64 |= MC_RLDRAM_TEST_MODE | MC_RLDRAM_TEST_GO;
3992 writeq(val64, &bar0->mc_rldram_test_ctrl);
3994 for (cnt = 0; cnt < 5; cnt++) {
3995 val64 = readq(&bar0->mc_rldram_test_ctrl);
3996 if (val64 & MC_RLDRAM_TEST_DONE)
4004 val64 = readq(&bar0->mc_rldram_test_ctrl);
4005 if (val64 & MC_RLDRAM_TEST_PASS)
4020 * s2io_ethtool_test - conducts 6 tsets to determine the health of card.
4021 * @sp : private member of the device structure, which is a pointer to the
4022 * s2io_nic structure.
4023 * @ethtest : pointer to a ethtool command specific structure that will be
4024 * returned to the user.
4025 * @data : variable that returns the result of each of the test
4026 * conducted by the driver.
4028 * This function conducts 6 tests ( 4 offline and 2 online) to determine
4029 * the health of the card.
4034 static void s2io_ethtool_test(struct net_device *dev,
4035 struct ethtool_test *ethtest,
4038 nic_t *sp = dev->priv;
4039 int orig_state = netif_running(sp->dev);
4041 if (ethtest->flags == ETH_TEST_FL_OFFLINE) {
4042 /* Offline Tests. */
4044 s2io_close(sp->dev);
4046 if (s2io_register_test(sp, &data[0]))
4047 ethtest->flags |= ETH_TEST_FL_FAILED;
4051 if (s2io_rldram_test(sp, &data[3]))
4052 ethtest->flags |= ETH_TEST_FL_FAILED;
4056 if (s2io_eeprom_test(sp, &data[1]))
4057 ethtest->flags |= ETH_TEST_FL_FAILED;
4059 if (s2io_bist_test(sp, &data[4]))
4060 ethtest->flags |= ETH_TEST_FL_FAILED;
4070 "%s: is not up, cannot run test\n",
4079 if (s2io_link_test(sp, &data[2]))
4080 ethtest->flags |= ETH_TEST_FL_FAILED;
4089 static void s2io_get_ethtool_stats(struct net_device *dev,
4090 struct ethtool_stats *estats,
4094 nic_t *sp = dev->priv;
4095 StatInfo_t *stat_info = sp->mac_control.stats_info;
4097 s2io_updt_stats(sp);
4098 tmp_stats[i++] = le32_to_cpu(stat_info->tmac_frms);
4099 tmp_stats[i++] = le32_to_cpu(stat_info->tmac_data_octets);
4100 tmp_stats[i++] = le64_to_cpu(stat_info->tmac_drop_frms);
4101 tmp_stats[i++] = le32_to_cpu(stat_info->tmac_mcst_frms);
4102 tmp_stats[i++] = le32_to_cpu(stat_info->tmac_bcst_frms);
4103 tmp_stats[i++] = le64_to_cpu(stat_info->tmac_pause_ctrl_frms);
4104 tmp_stats[i++] = le32_to_cpu(stat_info->tmac_any_err_frms);
4105 tmp_stats[i++] = le64_to_cpu(stat_info->tmac_vld_ip_octets);
4106 tmp_stats[i++] = le32_to_cpu(stat_info->tmac_vld_ip);
4107 tmp_stats[i++] = le32_to_cpu(stat_info->tmac_drop_ip);
4108 tmp_stats[i++] = le32_to_cpu(stat_info->tmac_icmp);
4109 tmp_stats[i++] = le32_to_cpu(stat_info->tmac_rst_tcp);
4110 tmp_stats[i++] = le64_to_cpu(stat_info->tmac_tcp);
4111 tmp_stats[i++] = le32_to_cpu(stat_info->tmac_udp);
4112 tmp_stats[i++] = le32_to_cpu(stat_info->rmac_vld_frms);
4113 tmp_stats[i++] = le32_to_cpu(stat_info->rmac_data_octets);
4114 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_fcs_err_frms);
4115 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_drop_frms);
4116 tmp_stats[i++] = le32_to_cpu(stat_info->rmac_vld_mcst_frms);
4117 tmp_stats[i++] = le32_to_cpu(stat_info->rmac_vld_bcst_frms);
4118 tmp_stats[i++] = le32_to_cpu(stat_info->rmac_in_rng_len_err_frms);
4119 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_long_frms);
4120 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_pause_ctrl_frms);
4121 tmp_stats[i++] = le32_to_cpu(stat_info->rmac_discarded_frms);
4122 tmp_stats[i++] = le32_to_cpu(stat_info->rmac_usized_frms);
4123 tmp_stats[i++] = le32_to_cpu(stat_info->rmac_osized_frms);
4124 tmp_stats[i++] = le32_to_cpu(stat_info->rmac_frag_frms);
4125 tmp_stats[i++] = le32_to_cpu(stat_info->rmac_jabber_frms);
4126 tmp_stats[i++] = le32_to_cpu(stat_info->rmac_ip);
4127 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_ip_octets);
4128 tmp_stats[i++] = le32_to_cpu(stat_info->rmac_hdr_err_ip);
4129 tmp_stats[i++] = le32_to_cpu(stat_info->rmac_drop_ip);
4130 tmp_stats[i++] = le32_to_cpu(stat_info->rmac_icmp);
4131 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_tcp);
4132 tmp_stats[i++] = le32_to_cpu(stat_info->rmac_udp);
4133 tmp_stats[i++] = le32_to_cpu(stat_info->rmac_err_drp_udp);
4134 tmp_stats[i++] = le32_to_cpu(stat_info->rmac_pause_cnt);
4135 tmp_stats[i++] = le32_to_cpu(stat_info->rmac_accepted_ip);
4136 tmp_stats[i++] = le32_to_cpu(stat_info->rmac_err_tcp);
4138 tmp_stats[i++] = stat_info->sw_stat.single_ecc_errs;
4139 tmp_stats[i++] = stat_info->sw_stat.double_ecc_errs;
4142 int s2io_ethtool_get_regs_len(struct net_device *dev)
4144 return (XENA_REG_SPACE);
4148 u32 s2io_ethtool_get_rx_csum(struct net_device * dev)
4150 nic_t *sp = dev->priv;
4152 return (sp->rx_csum);
4154 int s2io_ethtool_set_rx_csum(struct net_device *dev, u32 data)
4156 nic_t *sp = dev->priv;
4165 int s2io_get_eeprom_len(struct net_device *dev)
4167 return (XENA_EEPROM_SPACE);
4170 int s2io_ethtool_self_test_count(struct net_device *dev)
4172 return (S2IO_TEST_LEN);
4174 void s2io_ethtool_get_strings(struct net_device *dev,
4175 u32 stringset, u8 * data)
4177 switch (stringset) {
4179 memcpy(data, s2io_gstrings, S2IO_STRINGS_LEN);
4182 memcpy(data, ðtool_stats_keys,
4183 sizeof(ethtool_stats_keys));
4186 static int s2io_ethtool_get_stats_count(struct net_device *dev)
4188 return (S2IO_STAT_LEN);
4191 int s2io_ethtool_op_set_tx_csum(struct net_device *dev, u32 data)
4194 dev->features |= NETIF_F_IP_CSUM;
4196 dev->features &= ~NETIF_F_IP_CSUM;
4202 static struct ethtool_ops netdev_ethtool_ops = {
4203 .get_settings = s2io_ethtool_gset,
4204 .set_settings = s2io_ethtool_sset,
4205 .get_drvinfo = s2io_ethtool_gdrvinfo,
4206 .get_regs_len = s2io_ethtool_get_regs_len,
4207 .get_regs = s2io_ethtool_gregs,
4208 .get_link = ethtool_op_get_link,
4209 .get_eeprom_len = s2io_get_eeprom_len,
4210 .get_eeprom = s2io_ethtool_geeprom,
4211 .set_eeprom = s2io_ethtool_seeprom,
4212 .get_pauseparam = s2io_ethtool_getpause_data,
4213 .set_pauseparam = s2io_ethtool_setpause_data,
4214 .get_rx_csum = s2io_ethtool_get_rx_csum,
4215 .set_rx_csum = s2io_ethtool_set_rx_csum,
4216 .get_tx_csum = ethtool_op_get_tx_csum,
4217 .set_tx_csum = s2io_ethtool_op_set_tx_csum,
4218 .get_sg = ethtool_op_get_sg,
4219 .set_sg = ethtool_op_set_sg,
4221 .get_tso = ethtool_op_get_tso,
4222 .set_tso = ethtool_op_set_tso,
4224 .self_test_count = s2io_ethtool_self_test_count,
4225 .self_test = s2io_ethtool_test,
4226 .get_strings = s2io_ethtool_get_strings,
4227 .phys_id = s2io_ethtool_idnic,
4228 .get_stats_count = s2io_ethtool_get_stats_count,
4229 .get_ethtool_stats = s2io_get_ethtool_stats
4233 * s2io_ioctl - Entry point for the Ioctl
4234 * @dev : Device pointer.
4235 * @ifr : An IOCTL specefic structure, that can contain a pointer to
4236 * a proprietary structure used to pass information to the driver.
4237 * @cmd : This is used to distinguish between the different commands that
4238 * can be passed to the IOCTL functions.
4240 * Currently there are no special functionality supported in IOCTL, hence
4241 * function always return EOPNOTSUPPORTED
4244 int s2io_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
4250 * s2io_change_mtu - entry point to change MTU size for the device.
4251 * @dev : device pointer.
4252 * @new_mtu : the new MTU size for the device.
4253 * Description: A driver entry point to change MTU size for the device.
4254 * Before changing the MTU the device must be stopped.
4256 * 0 on success and an appropriate (-)ve integer as defined in errno.h
4260 int s2io_change_mtu(struct net_device *dev, int new_mtu)
4262 nic_t *sp = dev->priv;
4264 if ((new_mtu < MIN_MTU) || (new_mtu > S2IO_JUMBO_SIZE)) {
4265 DBG_PRINT(ERR_DBG, "%s: MTU size is invalid.\n",
4271 if (netif_running(dev)) {
4273 netif_stop_queue(dev);
4274 if (s2io_card_up(sp)) {
4275 DBG_PRINT(ERR_DBG, "%s: Device bring up failed\n",
4278 if (netif_queue_stopped(dev))
4279 netif_wake_queue(dev);
4280 } else { /* Device is down */
4281 XENA_dev_config_t __iomem *bar0 = sp->bar0;
4282 u64 val64 = new_mtu;
4284 writeq(vBIT(val64, 2, 14), &bar0->rmac_max_pyld_len);
4291 * s2io_tasklet - Bottom half of the ISR.
4292 * @dev_adr : address of the device structure in dma_addr_t format.
4294 * This is the tasklet or the bottom half of the ISR. This is
4295 * an extension of the ISR which is scheduled by the scheduler to be run
4296 * when the load on the CPU is low. All low priority tasks of the ISR can
4297 * be pushed into the tasklet. For now the tasklet is used only to
4298 * replenish the Rx buffers in the Rx buffer descriptors.
4303 static void s2io_tasklet(unsigned long dev_addr)
4305 struct net_device *dev = (struct net_device *) dev_addr;
4306 nic_t *sp = dev->priv;
4308 mac_info_t *mac_control;
4309 struct config_param *config;
4311 mac_control = &sp->mac_control;
4312 config = &sp->config;
4314 if (!TASKLET_IN_USE) {
4315 for (i = 0; i < config->rx_ring_num; i++) {
4316 ret = fill_rx_buffers(sp, i);
4317 if (ret == -ENOMEM) {
4318 DBG_PRINT(ERR_DBG, "%s: Out of ",
4320 DBG_PRINT(ERR_DBG, "memory in tasklet\n");
4322 } else if (ret == -EFILL) {
4324 "%s: Rx Ring %d is full\n",
4329 clear_bit(0, (&sp->tasklet_status));
4334 * s2io_set_link - Set the LInk status
4335 * @data: long pointer to device private structue
4336 * Description: Sets the link status for the adapter
4339 static void s2io_set_link(unsigned long data)
4341 nic_t *nic = (nic_t *) data;
4342 struct net_device *dev = nic->dev;
4343 XENA_dev_config_t __iomem *bar0 = nic->bar0;
4347 if (test_and_set_bit(0, &(nic->link_state))) {
4348 /* The card is being reset, no point doing anything */
4352 subid = nic->pdev->subsystem_device;
4354 * Allow a small delay for the NICs self initiated
4355 * cleanup to complete.
4359 val64 = readq(&bar0->adapter_status);
4360 if (verify_xena_quiescence(nic, val64, nic->device_enabled_once)) {
4361 if (LINK_IS_UP(val64)) {
4362 val64 = readq(&bar0->adapter_control);
4363 val64 |= ADAPTER_CNTL_EN;
4364 writeq(val64, &bar0->adapter_control);
4365 if (CARDS_WITH_FAULTY_LINK_INDICATORS(subid)) {
4366 val64 = readq(&bar0->gpio_control);
4367 val64 |= GPIO_CTRL_GPIO_0;
4368 writeq(val64, &bar0->gpio_control);
4369 val64 = readq(&bar0->gpio_control);
4371 val64 |= ADAPTER_LED_ON;
4372 writeq(val64, &bar0->adapter_control);
4374 val64 = readq(&bar0->adapter_status);
4375 if (!LINK_IS_UP(val64)) {
4376 DBG_PRINT(ERR_DBG, "%s:", dev->name);
4377 DBG_PRINT(ERR_DBG, " Link down");
4378 DBG_PRINT(ERR_DBG, "after ");
4379 DBG_PRINT(ERR_DBG, "enabling ");
4380 DBG_PRINT(ERR_DBG, "device \n");
4382 if (nic->device_enabled_once == FALSE) {
4383 nic->device_enabled_once = TRUE;
4385 s2io_link(nic, LINK_UP);
4387 if (CARDS_WITH_FAULTY_LINK_INDICATORS(subid)) {
4388 val64 = readq(&bar0->gpio_control);
4389 val64 &= ~GPIO_CTRL_GPIO_0;
4390 writeq(val64, &bar0->gpio_control);
4391 val64 = readq(&bar0->gpio_control);
4393 s2io_link(nic, LINK_DOWN);
4395 } else { /* NIC is not Quiescent. */
4396 DBG_PRINT(ERR_DBG, "%s: Error: ", dev->name);
4397 DBG_PRINT(ERR_DBG, "device is not Quiescent\n");
4398 netif_stop_queue(dev);
4400 clear_bit(0, &(nic->link_state));
4403 static void s2io_card_down(nic_t * sp)
4406 XENA_dev_config_t __iomem *bar0 = sp->bar0;
4407 unsigned long flags;
4408 register u64 val64 = 0;
4410 del_timer_sync(&sp->alarm_timer);
4411 /* If s2io_set_link task is executing, wait till it completes. */
4412 while (test_and_set_bit(0, &(sp->link_state))) {
4415 atomic_set(&sp->card_state, CARD_DOWN);
4417 /* disable Tx and Rx traffic on the NIC */
4421 tasklet_kill(&sp->task);
4423 /* Check if the device is Quiescent and then Reset the NIC */
4425 val64 = readq(&bar0->adapter_status);
4426 if (verify_xena_quiescence(sp, val64, sp->device_enabled_once)) {
4434 "s2io_close:Device not Quiescent ");
4435 DBG_PRINT(ERR_DBG, "adaper status reads 0x%llx\n",
4436 (unsigned long long) val64);
4442 /* Waiting till all Interrupt handlers are complete */
4446 if (!atomic_read(&sp->isr_cnt))
4451 spin_lock_irqsave(&sp->tx_lock, flags);
4452 /* Free all Tx buffers */
4453 free_tx_buffers(sp);
4454 spin_unlock_irqrestore(&sp->tx_lock, flags);
4456 /* Free all Rx buffers */
4457 spin_lock_irqsave(&sp->rx_lock, flags);
4458 free_rx_buffers(sp);
4459 spin_unlock_irqrestore(&sp->rx_lock, flags);
4461 clear_bit(0, &(sp->link_state));
4464 static int s2io_card_up(nic_t * sp)
4467 mac_info_t *mac_control;
4468 struct config_param *config;
4469 struct net_device *dev = (struct net_device *) sp->dev;
4471 /* Initialize the H/W I/O registers */
4472 if (init_nic(sp) != 0) {
4473 DBG_PRINT(ERR_DBG, "%s: H/W initialization failed\n",
4479 * Initializing the Rx buffers. For now we are considering only 1
4480 * Rx ring and initializing buffers into 30 Rx blocks
4482 mac_control = &sp->mac_control;
4483 config = &sp->config;
4485 for (i = 0; i < config->rx_ring_num; i++) {
4486 if ((ret = fill_rx_buffers(sp, i))) {
4487 DBG_PRINT(ERR_DBG, "%s: Out of memory in Open\n",
4490 free_rx_buffers(sp);
4493 DBG_PRINT(INFO_DBG, "Buf in ring:%d is %d:\n", i,
4494 atomic_read(&sp->rx_bufs_left[i]));
4497 /* Setting its receive mode */
4498 s2io_set_multicast(dev);
4500 /* Enable tasklet for the device */
4501 tasklet_init(&sp->task, s2io_tasklet, (unsigned long) dev);
4503 /* Enable Rx Traffic and interrupts on the NIC */
4504 if (start_nic(sp)) {
4505 DBG_PRINT(ERR_DBG, "%s: Starting NIC failed\n", dev->name);
4506 tasklet_kill(&sp->task);
4508 free_irq(dev->irq, dev);
4509 free_rx_buffers(sp);
4513 S2IO_TIMER_CONF(sp->alarm_timer, s2io_alarm_handle, sp, (HZ/2));
4515 atomic_set(&sp->card_state, CARD_UP);
4520 * s2io_restart_nic - Resets the NIC.
4521 * @data : long pointer to the device private structure
4523 * This function is scheduled to be run by the s2io_tx_watchdog
4524 * function after 0.5 secs to reset the NIC. The idea is to reduce
4525 * the run time of the watch dog routine which is run holding a
4529 static void s2io_restart_nic(unsigned long data)
4531 struct net_device *dev = (struct net_device *) data;
4532 nic_t *sp = dev->priv;
4535 if (s2io_card_up(sp)) {
4536 DBG_PRINT(ERR_DBG, "%s: Device bring up failed\n",
4539 netif_wake_queue(dev);
4540 DBG_PRINT(ERR_DBG, "%s: was reset by Tx watchdog timer\n",
4546 * s2io_tx_watchdog - Watchdog for transmit side.
4547 * @dev : Pointer to net device structure
4549 * This function is triggered if the Tx Queue is stopped
4550 * for a pre-defined amount of time when the Interface is still up.
4551 * If the Interface is jammed in such a situation, the hardware is
4552 * reset (by s2io_close) and restarted again (by s2io_open) to
4553 * overcome any problem that might have been caused in the hardware.
4558 static void s2io_tx_watchdog(struct net_device *dev)
4560 nic_t *sp = dev->priv;
4562 if (netif_carrier_ok(dev)) {
4563 schedule_work(&sp->rst_timer_task);
4568 * rx_osm_handler - To perform some OS related operations on SKB.
4569 * @sp: private member of the device structure,pointer to s2io_nic structure.
4570 * @skb : the socket buffer pointer.
4571 * @len : length of the packet
4572 * @cksum : FCS checksum of the frame.
4573 * @ring_no : the ring from which this RxD was extracted.
4575 * This function is called by the Tx interrupt serivce routine to perform
4576 * some OS related operations on the SKB before passing it to the upper
4577 * layers. It mainly checks if the checksum is OK, if so adds it to the
4578 * SKBs cksum variable, increments the Rx packet count and passes the SKB
4579 * to the upper layer. If the checksum is wrong, it increments the Rx
4580 * packet error count, frees the SKB and returns error.
4582 * SUCCESS on success and -1 on failure.
4584 static int rx_osm_handler(ring_info_t *ring_data, RxD_t * rxdp)
4586 nic_t *sp = ring_data->nic;
4587 struct net_device *dev = (struct net_device *) sp->dev;
4588 struct sk_buff *skb = (struct sk_buff *)
4589 ((unsigned long) rxdp->Host_Control);
4590 int ring_no = ring_data->ring_no;
4591 u16 l3_csum, l4_csum;
4592 #ifdef CONFIG_2BUFF_MODE
4593 int buf0_len = RXD_GET_BUFFER0_SIZE(rxdp->Control_2);
4594 int buf2_len = RXD_GET_BUFFER2_SIZE(rxdp->Control_2);
4595 int get_block = ring_data->rx_curr_get_info.block_index;
4596 int get_off = ring_data->rx_curr_get_info.offset;
4597 buffAdd_t *ba = &ring_data->ba[get_block][get_off];
4598 unsigned char *buff;
4600 u16 len = (u16) ((RXD_GET_BUFFER0_SIZE(rxdp->Control_2)) >> 48);;
4603 if (rxdp->Control_1 & RXD_T_CODE) {
4604 unsigned long long err = rxdp->Control_1 & RXD_T_CODE;
4605 DBG_PRINT(ERR_DBG, "%s: Rx error Value: 0x%llx\n",
4608 sp->stats.rx_crc_errors++;
4609 atomic_dec(&sp->rx_bufs_left[ring_no]);
4610 rxdp->Host_Control = 0;
4614 /* Updating statistics */
4615 rxdp->Host_Control = 0;
4617 sp->stats.rx_packets++;
4618 #ifndef CONFIG_2BUFF_MODE
4619 sp->stats.rx_bytes += len;
4621 sp->stats.rx_bytes += buf0_len + buf2_len;
4624 #ifndef CONFIG_2BUFF_MODE
4627 buff = skb_push(skb, buf0_len);
4628 memcpy(buff, ba->ba_0, buf0_len);
4629 skb_put(skb, buf2_len);
4632 if ((rxdp->Control_1 & TCP_OR_UDP_FRAME) &&
4634 l3_csum = RXD_GET_L3_CKSUM(rxdp->Control_1);
4635 l4_csum = RXD_GET_L4_CKSUM(rxdp->Control_1);
4636 if ((l3_csum == L3_CKSUM_OK) && (l4_csum == L4_CKSUM_OK)) {
4638 * NIC verifies if the Checksum of the received
4639 * frame is Ok or not and accordingly returns
4640 * a flag in the RxD.
4642 skb->ip_summed = CHECKSUM_UNNECESSARY;
4645 * Packet with erroneous checksum, let the
4646 * upper layers deal with it.
4648 skb->ip_summed = CHECKSUM_NONE;
4651 skb->ip_summed = CHECKSUM_NONE;
4654 skb->protocol = eth_type_trans(skb, dev);
4655 #ifdef CONFIG_S2IO_NAPI
4656 netif_receive_skb(skb);
4660 dev->last_rx = jiffies;
4661 atomic_dec(&sp->rx_bufs_left[ring_no]);
4666 * s2io_link - stops/starts the Tx queue.
4667 * @sp : private member of the device structure, which is a pointer to the
4668 * s2io_nic structure.
4669 * @link : inidicates whether link is UP/DOWN.
4671 * This function stops/starts the Tx queue depending on whether the link
4672 * status of the NIC is is down or up. This is called by the Alarm
4673 * interrupt handler whenever a link change interrupt comes up.
4678 void s2io_link(nic_t * sp, int link)
4680 struct net_device *dev = (struct net_device *) sp->dev;
4682 if (link != sp->last_link_state) {
4683 if (link == LINK_DOWN) {
4684 DBG_PRINT(ERR_DBG, "%s: Link down\n", dev->name);
4685 netif_carrier_off(dev);
4687 DBG_PRINT(ERR_DBG, "%s: Link Up\n", dev->name);
4688 netif_carrier_on(dev);
4691 sp->last_link_state = link;
4695 * get_xena_rev_id - to identify revision ID of xena.
4696 * @pdev : PCI Dev structure
4698 * Function to identify the Revision ID of xena.
4700 * returns the revision ID of the device.
4703 int get_xena_rev_id(struct pci_dev *pdev)
4707 ret = pci_read_config_byte(pdev, PCI_REVISION_ID, (u8 *) & id);
4712 * s2io_init_pci -Initialization of PCI and PCI-X configuration registers .
4713 * @sp : private member of the device structure, which is a pointer to the
4714 * s2io_nic structure.
4716 * This function initializes a few of the PCI and PCI-X configuration registers
4717 * with recommended values.
4722 static void s2io_init_pci(nic_t * sp)
4724 u16 pci_cmd = 0, pcix_cmd = 0;
4726 /* Enable Data Parity Error Recovery in PCI-X command register. */
4727 pci_read_config_word(sp->pdev, PCIX_COMMAND_REGISTER,
4729 pci_write_config_word(sp->pdev, PCIX_COMMAND_REGISTER,
4731 pci_read_config_word(sp->pdev, PCIX_COMMAND_REGISTER,
4734 /* Set the PErr Response bit in PCI command register. */
4735 pci_read_config_word(sp->pdev, PCI_COMMAND, &pci_cmd);
4736 pci_write_config_word(sp->pdev, PCI_COMMAND,
4737 (pci_cmd | PCI_COMMAND_PARITY));
4738 pci_read_config_word(sp->pdev, PCI_COMMAND, &pci_cmd);
4740 /* Forcibly disabling relaxed ordering capability of the card. */
4742 pci_write_config_word(sp->pdev, PCIX_COMMAND_REGISTER,
4744 pci_read_config_word(sp->pdev, PCIX_COMMAND_REGISTER,
4748 MODULE_AUTHOR("Raghavendra Koushik <raghavendra.koushik@neterion.com>");
4749 MODULE_LICENSE("GPL");
4750 module_param(tx_fifo_num, int, 0);
4751 module_param(rx_ring_num, int, 0);
4752 module_param_array(tx_fifo_len, uint, NULL, 0);
4753 module_param_array(rx_ring_sz, uint, NULL, 0);
4754 module_param_array(rts_frm_len, uint, NULL, 0);
4755 module_param(use_continuous_tx_intrs, int, 1);
4756 module_param(rmac_pause_time, int, 0);
4757 module_param(mc_pause_threshold_q0q3, int, 0);
4758 module_param(mc_pause_threshold_q4q7, int, 0);
4759 module_param(shared_splits, int, 0);
4760 module_param(tmac_util_period, int, 0);
4761 module_param(rmac_util_period, int, 0);
4762 #ifndef CONFIG_S2IO_NAPI
4763 module_param(indicate_max_pkts, int, 0);
4767 * s2io_init_nic - Initialization of the adapter .
4768 * @pdev : structure containing the PCI related information of the device.
4769 * @pre: List of PCI devices supported by the driver listed in s2io_tbl.
4771 * The function initializes an adapter identified by the pci_dec structure.
4772 * All OS related initialization including memory and device structure and
4773 * initlaization of the device private variable is done. Also the swapper
4774 * control register is initialized to enable read and write into the I/O
4775 * registers of the device.
4777 * returns 0 on success and negative on failure.
4780 static int __devinit
4781 s2io_init_nic(struct pci_dev *pdev, const struct pci_device_id *pre)
4784 struct net_device *dev;
4786 int dma_flag = FALSE;
4787 u32 mac_up, mac_down;
4788 u64 val64 = 0, tmp64 = 0;
4789 XENA_dev_config_t __iomem *bar0 = NULL;
4791 mac_info_t *mac_control;
4792 struct config_param *config;
4794 #ifdef CONFIG_S2IO_NAPI
4795 DBG_PRINT(ERR_DBG, "NAPI support has been enabled\n");
4798 if ((ret = pci_enable_device(pdev))) {
4800 "s2io_init_nic: pci_enable_device failed\n");
4804 if (!pci_set_dma_mask(pdev, DMA_64BIT_MASK)) {
4805 DBG_PRINT(INIT_DBG, "s2io_init_nic: Using 64bit DMA\n");
4807 if (pci_set_consistent_dma_mask
4808 (pdev, DMA_64BIT_MASK)) {
4810 "Unable to obtain 64bit DMA for \
4811 consistent allocations\n");
4812 pci_disable_device(pdev);
4815 } else if (!pci_set_dma_mask(pdev, DMA_32BIT_MASK)) {
4816 DBG_PRINT(INIT_DBG, "s2io_init_nic: Using 32bit DMA\n");
4818 pci_disable_device(pdev);
4822 if (pci_request_regions(pdev, s2io_driver_name)) {
4823 DBG_PRINT(ERR_DBG, "Request Regions failed\n"),
4824 pci_disable_device(pdev);
4828 dev = alloc_etherdev(sizeof(nic_t));
4830 DBG_PRINT(ERR_DBG, "Device allocation failed\n");
4831 pci_disable_device(pdev);
4832 pci_release_regions(pdev);
4836 pci_set_master(pdev);
4837 pci_set_drvdata(pdev, dev);
4838 SET_MODULE_OWNER(dev);
4839 SET_NETDEV_DEV(dev, &pdev->dev);
4841 /* Private member variable initialized to s2io NIC structure */
4843 memset(sp, 0, sizeof(nic_t));
4846 sp->high_dma_flag = dma_flag;
4847 sp->device_enabled_once = FALSE;
4849 /* Initialize some PCI/PCI-X fields of the NIC. */
4853 * Setting the device configuration parameters.
4854 * Most of these parameters can be specified by the user during
4855 * module insertion as they are module loadable parameters. If
4856 * these parameters are not not specified during load time, they
4857 * are initialized with default values.
4859 mac_control = &sp->mac_control;
4860 config = &sp->config;
4862 /* Tx side parameters. */
4863 tx_fifo_len[0] = DEFAULT_FIFO_LEN; /* Default value. */
4864 config->tx_fifo_num = tx_fifo_num;
4865 for (i = 0; i < MAX_TX_FIFOS; i++) {
4866 config->tx_cfg[i].fifo_len = tx_fifo_len[i];
4867 config->tx_cfg[i].fifo_priority = i;
4870 /* mapping the QoS priority to the configured fifos */
4871 for (i = 0; i < MAX_TX_FIFOS; i++)
4872 config->fifo_mapping[i] = fifo_map[config->tx_fifo_num][i];
4874 config->tx_intr_type = TXD_INT_TYPE_UTILZ;
4875 for (i = 0; i < config->tx_fifo_num; i++) {
4876 config->tx_cfg[i].f_no_snoop =
4877 (NO_SNOOP_TXD | NO_SNOOP_TXD_BUFFER);
4878 if (config->tx_cfg[i].fifo_len < 65) {
4879 config->tx_intr_type = TXD_INT_TYPE_PER_LIST;
4883 config->max_txds = MAX_SKB_FRAGS;
4885 /* Rx side parameters. */
4886 rx_ring_sz[0] = SMALL_BLK_CNT; /* Default value. */
4887 config->rx_ring_num = rx_ring_num;
4888 for (i = 0; i < MAX_RX_RINGS; i++) {
4889 config->rx_cfg[i].num_rxd = rx_ring_sz[i] *
4890 (MAX_RXDS_PER_BLOCK + 1);
4891 config->rx_cfg[i].ring_priority = i;
4894 for (i = 0; i < rx_ring_num; i++) {
4895 config->rx_cfg[i].ring_org = RING_ORG_BUFF1;
4896 config->rx_cfg[i].f_no_snoop =
4897 (NO_SNOOP_RXD | NO_SNOOP_RXD_BUFFER);
4900 /* Setting Mac Control parameters */
4901 mac_control->rmac_pause_time = rmac_pause_time;
4902 mac_control->mc_pause_threshold_q0q3 = mc_pause_threshold_q0q3;
4903 mac_control->mc_pause_threshold_q4q7 = mc_pause_threshold_q4q7;
4906 /* Initialize Ring buffer parameters. */
4907 for (i = 0; i < config->rx_ring_num; i++)
4908 atomic_set(&sp->rx_bufs_left[i], 0);
4910 /* Initialize the number of ISRs currently running */
4911 atomic_set(&sp->isr_cnt, 0);
4913 /* initialize the shared memory used by the NIC and the host */
4914 if (init_shared_mem(sp)) {
4915 DBG_PRINT(ERR_DBG, "%s: Memory allocation failed\n",
4918 goto mem_alloc_failed;
4921 sp->bar0 = ioremap(pci_resource_start(pdev, 0),
4922 pci_resource_len(pdev, 0));
4924 DBG_PRINT(ERR_DBG, "%s: S2IO: cannot remap io mem1\n",
4927 goto bar0_remap_failed;
4930 sp->bar1 = ioremap(pci_resource_start(pdev, 2),
4931 pci_resource_len(pdev, 2));
4933 DBG_PRINT(ERR_DBG, "%s: S2IO: cannot remap io mem2\n",
4936 goto bar1_remap_failed;
4939 dev->irq = pdev->irq;
4940 dev->base_addr = (unsigned long) sp->bar0;
4942 /* Initializing the BAR1 address as the start of the FIFO pointer. */
4943 for (j = 0; j < MAX_TX_FIFOS; j++) {
4944 mac_control->tx_FIFO_start[j] = (TxFIFO_element_t __iomem *)
4945 (sp->bar1 + (j * 0x00020000));
4948 /* Driver entry points */
4949 dev->open = &s2io_open;
4950 dev->stop = &s2io_close;
4951 dev->hard_start_xmit = &s2io_xmit;
4952 dev->get_stats = &s2io_get_stats;
4953 dev->set_multicast_list = &s2io_set_multicast;
4954 dev->do_ioctl = &s2io_ioctl;
4955 dev->change_mtu = &s2io_change_mtu;
4956 SET_ETHTOOL_OPS(dev, &netdev_ethtool_ops);
4959 * will use eth_mac_addr() for dev->set_mac_address
4960 * mac address will be set every time dev->open() is called
4962 #if defined(CONFIG_S2IO_NAPI)
4963 dev->poll = s2io_poll;
4967 dev->features |= NETIF_F_SG | NETIF_F_IP_CSUM;
4968 if (sp->high_dma_flag == TRUE)
4969 dev->features |= NETIF_F_HIGHDMA;
4971 dev->features |= NETIF_F_TSO;
4974 dev->tx_timeout = &s2io_tx_watchdog;
4975 dev->watchdog_timeo = WATCH_DOG_TIMEOUT;
4976 INIT_WORK(&sp->rst_timer_task,
4977 (void (*)(void *)) s2io_restart_nic, dev);
4978 INIT_WORK(&sp->set_link_task,
4979 (void (*)(void *)) s2io_set_link, sp);
4981 pci_save_state(sp->pdev);
4983 /* Setting swapper control on the NIC, for proper reset operation */
4984 if (s2io_set_swapper(sp)) {
4985 DBG_PRINT(ERR_DBG, "%s:swapper settings are wrong\n",
4988 goto set_swap_failed;
4992 * Fix for all "FFs" MAC address problems observed on
4995 fix_mac_address(sp);
4999 * MAC address initialization.
5000 * For now only one mac address will be read and used.
5003 val64 = RMAC_ADDR_CMD_MEM_RD | RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD |
5004 RMAC_ADDR_CMD_MEM_OFFSET(0 + MAC_MAC_ADDR_START_OFFSET);
5005 writeq(val64, &bar0->rmac_addr_cmd_mem);
5006 wait_for_cmd_complete(sp);
5008 tmp64 = readq(&bar0->rmac_addr_data0_mem);
5009 mac_down = (u32) tmp64;
5010 mac_up = (u32) (tmp64 >> 32);
5012 memset(sp->def_mac_addr[0].mac_addr, 0, sizeof(ETH_ALEN));
5014 sp->def_mac_addr[0].mac_addr[3] = (u8) (mac_up);
5015 sp->def_mac_addr[0].mac_addr[2] = (u8) (mac_up >> 8);
5016 sp->def_mac_addr[0].mac_addr[1] = (u8) (mac_up >> 16);
5017 sp->def_mac_addr[0].mac_addr[0] = (u8) (mac_up >> 24);
5018 sp->def_mac_addr[0].mac_addr[5] = (u8) (mac_down >> 16);
5019 sp->def_mac_addr[0].mac_addr[4] = (u8) (mac_down >> 24);
5022 "DEFAULT MAC ADDR:0x%02x-%02x-%02x-%02x-%02x-%02x\n",
5023 sp->def_mac_addr[0].mac_addr[0],
5024 sp->def_mac_addr[0].mac_addr[1],
5025 sp->def_mac_addr[0].mac_addr[2],
5026 sp->def_mac_addr[0].mac_addr[3],
5027 sp->def_mac_addr[0].mac_addr[4],
5028 sp->def_mac_addr[0].mac_addr[5]);
5030 /* Set the factory defined MAC address initially */
5031 dev->addr_len = ETH_ALEN;
5032 memcpy(dev->dev_addr, sp->def_mac_addr, ETH_ALEN);
5035 * Initialize the tasklet status and link state flags
5036 * and the card statte parameter
5038 atomic_set(&(sp->card_state), 0);
5039 sp->tasklet_status = 0;
5042 /* Initialize spinlocks */
5043 spin_lock_init(&sp->tx_lock);
5044 #ifndef CONFIG_S2IO_NAPI
5045 spin_lock_init(&sp->put_lock);
5047 spin_lock_init(&sp->rx_lock);
5050 * SXE-002: Configure link and activity LED to init state
5053 subid = sp->pdev->subsystem_device;
5054 if ((subid & 0xFF) >= 0x07) {
5055 val64 = readq(&bar0->gpio_control);
5056 val64 |= 0x0000800000000000ULL;
5057 writeq(val64, &bar0->gpio_control);
5058 val64 = 0x0411040400000000ULL;
5059 writeq(val64, (void __iomem *) bar0 + 0x2700);
5060 val64 = readq(&bar0->gpio_control);
5063 sp->rx_csum = 1; /* Rx chksum verify enabled by default */
5065 if (register_netdev(dev)) {
5066 DBG_PRINT(ERR_DBG, "Device registration failed\n");
5068 goto register_failed;
5071 /* Initialize device name */
5072 strcpy(sp->name, dev->name);
5073 strcat(sp->name, ": Neterion Xframe I 10GbE adapter");
5076 * Make Link state as off at this point, when the Link change
5077 * interrupt comes the state will be automatically changed to
5080 netif_carrier_off(dev);
5091 free_shared_mem(sp);
5092 pci_disable_device(pdev);
5093 pci_release_regions(pdev);
5094 pci_set_drvdata(pdev, NULL);
5101 * s2io_rem_nic - Free the PCI device
5102 * @pdev: structure containing the PCI related information of the device.
5103 * Description: This function is called by the Pci subsystem to release a
5104 * PCI device and free up all resource held up by the device. This could
5105 * be in response to a Hot plug event or when the driver is to be removed
5109 static void __devexit s2io_rem_nic(struct pci_dev *pdev)
5111 struct net_device *dev =
5112 (struct net_device *) pci_get_drvdata(pdev);
5116 DBG_PRINT(ERR_DBG, "Driver Data is NULL!!\n");
5121 unregister_netdev(dev);
5123 free_shared_mem(sp);
5126 pci_disable_device(pdev);
5127 pci_release_regions(pdev);
5128 pci_set_drvdata(pdev, NULL);
5133 * s2io_starter - Entry point for the driver
5134 * Description: This function is the entry point for the driver. It verifies
5135 * the module loadable parameters and initializes PCI configuration space.
5138 int __init s2io_starter(void)
5140 return pci_module_init(&s2io_driver);
5144 * s2io_closer - Cleanup routine for the driver
5145 * Description: This function is the cleanup routine for the driver. It unregist * ers the driver.
5148 void s2io_closer(void)
5150 pci_unregister_driver(&s2io_driver);
5151 DBG_PRINT(INIT_DBG, "cleanup done\n");
5154 module_init(s2io_starter);
5155 module_exit(s2io_closer);