1 /************************************************************************
2 * s2io.c: A Linux PCI-X Ethernet driver for Neterion 10GbE Server NIC
3 * Copyright(c) 2002-2005 Neterion Inc.
5 * This software may be used and distributed according to the terms of
6 * the GNU General Public License (GPL), incorporated herein by reference.
7 * Drivers based on or derived from this code fall under the GPL and must
8 * retain the authorship, copyright and license notice. This file is not
9 * a complete program and may only be used when the entire operating
10 * system is licensed under the GPL.
11 * See the file COPYING in this distribution for more information.
14 * Jeff Garzik : For pointing out the improper error condition
15 * check in the s2io_xmit routine and also some
16 * issues in the Tx watch dog function. Also for
17 * patiently answering all those innumerable
18 * questions regaring the 2.6 porting issues.
19 * Stephen Hemminger : Providing proper 2.6 porting mechanism for some
20 * macros available only in 2.6 Kernel.
21 * Francois Romieu : For pointing out all code part that were
22 * deprecated and also styling related comments.
23 * Grant Grundler : For helping me get rid of some Architecture
25 * Christopher Hellwig : Some more 2.6 specific issues in the driver.
27 * The module loadable parameters that are supported by the driver and a brief
28 * explaination of all the variables.
30 * rx_ring_num : This can be used to program the number of receive rings used
32 * rx_ring_sz: This defines the number of receive blocks each ring can have.
33 * This is also an array of size 8.
34 * rx_ring_mode: This defines the operation mode of all 8 rings. The valid
35 * values are 1, 2 and 3.
36 * tx_fifo_num: This defines the number of Tx FIFOs thats used int the driver.
37 * tx_fifo_len: This too is an array of 8. Each element defines the number of
38 * Tx descriptors that can be associated with each corresponding FIFO.
39 * intr_type: This defines the type of interrupt. The values can be 0(INTA),
40 * 1(MSI), 2(MSI_X). Default value is '0(INTA)'
41 * lro: Specifies whether to enable Large Receive Offload (LRO) or not.
42 * Possible values '1' for enable '0' for disable. Default is '0'
43 * lro_max_pkts: This parameter defines maximum number of packets can be
44 * aggregated as a single large packet
45 ************************************************************************/
47 #include <linux/module.h>
48 #include <linux/types.h>
49 #include <linux/errno.h>
50 #include <linux/ioport.h>
51 #include <linux/pci.h>
52 #include <linux/dma-mapping.h>
53 #include <linux/kernel.h>
54 #include <linux/netdevice.h>
55 #include <linux/etherdevice.h>
56 #include <linux/skbuff.h>
57 #include <linux/init.h>
58 #include <linux/delay.h>
59 #include <linux/stddef.h>
60 #include <linux/ioctl.h>
61 #include <linux/timex.h>
62 #include <linux/sched.h>
63 #include <linux/ethtool.h>
64 #include <linux/workqueue.h>
65 #include <linux/if_vlan.h>
67 #include <linux/tcp.h>
70 #include <asm/system.h>
71 #include <asm/uaccess.h>
73 #include <asm/div64.h>
78 #include "s2io-regs.h"
80 #define DRV_VERSION "2.0.15.2"
82 /* S2io Driver name & version. */
83 static char s2io_driver_name[] = "Neterion";
84 static char s2io_driver_version[] = DRV_VERSION;
86 static int rxd_size[4] = {32,48,48,64};
87 static int rxd_count[4] = {127,85,85,63};
89 static inline int RXD_IS_UP2DT(RxD_t *rxdp)
93 ret = ((!(rxdp->Control_1 & RXD_OWN_XENA)) &&
94 (GET_RXD_MARKER(rxdp->Control_2) != THE_RXD_MARK));
100 * Cards with following subsystem_id have a link state indication
101 * problem, 600B, 600C, 600D, 640B, 640C and 640D.
102 * macro below identifies these cards given the subsystem_id.
104 #define CARDS_WITH_FAULTY_LINK_INDICATORS(dev_type, subid) \
105 (dev_type == XFRAME_I_DEVICE) ? \
106 ((((subid >= 0x600B) && (subid <= 0x600D)) || \
107 ((subid >= 0x640B) && (subid <= 0x640D))) ? 1 : 0) : 0
109 #define LINK_IS_UP(val64) (!(val64 & (ADAPTER_STATUS_RMAC_REMOTE_FAULT | \
110 ADAPTER_STATUS_RMAC_LOCAL_FAULT)))
111 #define TASKLET_IN_USE test_and_set_bit(0, (&sp->tasklet_status))
114 static inline int rx_buffer_level(nic_t * sp, int rxb_size, int ring)
116 mac_info_t *mac_control;
118 mac_control = &sp->mac_control;
119 if (rxb_size <= rxd_count[sp->rxd_mode])
121 else if ((mac_control->rings[ring].pkt_cnt - rxb_size) > 16)
126 /* Ethtool related variables and Macros. */
127 static char s2io_gstrings[][ETH_GSTRING_LEN] = {
128 "Register test\t(offline)",
129 "Eeprom test\t(offline)",
130 "Link test\t(online)",
131 "RLDRAM test\t(offline)",
132 "BIST Test\t(offline)"
135 static char ethtool_stats_keys[][ETH_GSTRING_LEN] = {
137 {"tmac_data_octets"},
141 {"tmac_pause_ctrl_frms"},
145 {"tmac_any_err_frms"},
146 {"tmac_ttl_less_fb_octets"},
147 {"tmac_vld_ip_octets"},
155 {"rmac_data_octets"},
156 {"rmac_fcs_err_frms"},
158 {"rmac_vld_mcst_frms"},
159 {"rmac_vld_bcst_frms"},
160 {"rmac_in_rng_len_err_frms"},
161 {"rmac_out_rng_len_err_frms"},
163 {"rmac_pause_ctrl_frms"},
164 {"rmac_unsup_ctrl_frms"},
166 {"rmac_accepted_ucst_frms"},
167 {"rmac_accepted_nucst_frms"},
168 {"rmac_discarded_frms"},
169 {"rmac_drop_events"},
170 {"rmac_ttl_less_fb_octets"},
172 {"rmac_usized_frms"},
173 {"rmac_osized_frms"},
175 {"rmac_jabber_frms"},
176 {"rmac_ttl_64_frms"},
177 {"rmac_ttl_65_127_frms"},
178 {"rmac_ttl_128_255_frms"},
179 {"rmac_ttl_256_511_frms"},
180 {"rmac_ttl_512_1023_frms"},
181 {"rmac_ttl_1024_1518_frms"},
189 {"rmac_err_drp_udp"},
190 {"rmac_xgmii_err_sym"},
208 {"rmac_xgmii_data_err_cnt"},
209 {"rmac_xgmii_ctrl_err_cnt"},
210 {"rmac_accepted_ip"},
214 {"new_rd_req_rtry_cnt"},
216 {"wr_rtry_rd_ack_cnt"},
219 {"new_wr_req_rtry_cnt"},
222 {"rd_rtry_wr_ack_cnt"},
230 {"rmac_ttl_1519_4095_frms"},
231 {"rmac_ttl_4096_8191_frms"},
232 {"rmac_ttl_8192_max_frms"},
233 {"rmac_ttl_gt_max_frms"},
234 {"rmac_osized_alt_frms"},
235 {"rmac_jabber_alt_frms"},
236 {"rmac_gt_max_alt_frms"},
238 {"rmac_len_discard"},
239 {"rmac_fcs_discard"},
242 {"rmac_red_discard"},
243 {"rmac_rts_discard"},
244 {"rmac_ingm_full_discard"},
246 {"\n DRIVER STATISTICS"},
247 {"single_bit_ecc_errs"},
248 {"double_bit_ecc_errs"},
254 ("alarm_transceiver_temp_high"),
255 ("alarm_transceiver_temp_low"),
256 ("alarm_laser_bias_current_high"),
257 ("alarm_laser_bias_current_low"),
258 ("alarm_laser_output_power_high"),
259 ("alarm_laser_output_power_low"),
260 ("warn_transceiver_temp_high"),
261 ("warn_transceiver_temp_low"),
262 ("warn_laser_bias_current_high"),
263 ("warn_laser_bias_current_low"),
264 ("warn_laser_output_power_high"),
265 ("warn_laser_output_power_low"),
266 ("lro_aggregated_pkts"),
267 ("lro_flush_both_count"),
268 ("lro_out_of_sequence_pkts"),
269 ("lro_flush_due_to_max_pkts"),
270 ("lro_avg_aggr_pkts"),
273 #define S2IO_STAT_LEN sizeof(ethtool_stats_keys)/ ETH_GSTRING_LEN
274 #define S2IO_STAT_STRINGS_LEN S2IO_STAT_LEN * ETH_GSTRING_LEN
276 #define S2IO_TEST_LEN sizeof(s2io_gstrings) / ETH_GSTRING_LEN
277 #define S2IO_STRINGS_LEN S2IO_TEST_LEN * ETH_GSTRING_LEN
279 #define S2IO_TIMER_CONF(timer, handle, arg, exp) \
280 init_timer(&timer); \
281 timer.function = handle; \
282 timer.data = (unsigned long) arg; \
283 mod_timer(&timer, (jiffies + exp)) \
286 static void s2io_vlan_rx_register(struct net_device *dev,
287 struct vlan_group *grp)
289 nic_t *nic = dev->priv;
292 spin_lock_irqsave(&nic->tx_lock, flags);
294 spin_unlock_irqrestore(&nic->tx_lock, flags);
297 /* Unregister the vlan */
298 static void s2io_vlan_rx_kill_vid(struct net_device *dev, unsigned long vid)
300 nic_t *nic = dev->priv;
303 spin_lock_irqsave(&nic->tx_lock, flags);
305 nic->vlgrp->vlan_devices[vid] = NULL;
306 spin_unlock_irqrestore(&nic->tx_lock, flags);
310 * Constants to be programmed into the Xena's registers, to configure
315 static const u64 herc_act_dtx_cfg[] = {
317 0x8000051536750000ULL, 0x80000515367500E0ULL,
319 0x8000051536750004ULL, 0x80000515367500E4ULL,
321 0x80010515003F0000ULL, 0x80010515003F00E0ULL,
323 0x80010515003F0004ULL, 0x80010515003F00E4ULL,
325 0x801205150D440000ULL, 0x801205150D4400E0ULL,
327 0x801205150D440004ULL, 0x801205150D4400E4ULL,
329 0x80020515F2100000ULL, 0x80020515F21000E0ULL,
331 0x80020515F2100004ULL, 0x80020515F21000E4ULL,
336 static const u64 xena_dtx_cfg[] = {
338 0x8000051500000000ULL, 0x80000515000000E0ULL,
340 0x80000515D9350004ULL, 0x80000515D93500E4ULL,
342 0x8001051500000000ULL, 0x80010515000000E0ULL,
344 0x80010515001E0004ULL, 0x80010515001E00E4ULL,
346 0x8002051500000000ULL, 0x80020515000000E0ULL,
348 0x80020515F2100004ULL, 0x80020515F21000E4ULL,
353 * Constants for Fixing the MacAddress problem seen mostly on
356 static const u64 fix_mac[] = {
357 0x0060000000000000ULL, 0x0060600000000000ULL,
358 0x0040600000000000ULL, 0x0000600000000000ULL,
359 0x0020600000000000ULL, 0x0060600000000000ULL,
360 0x0020600000000000ULL, 0x0060600000000000ULL,
361 0x0020600000000000ULL, 0x0060600000000000ULL,
362 0x0020600000000000ULL, 0x0060600000000000ULL,
363 0x0020600000000000ULL, 0x0060600000000000ULL,
364 0x0020600000000000ULL, 0x0060600000000000ULL,
365 0x0020600000000000ULL, 0x0060600000000000ULL,
366 0x0020600000000000ULL, 0x0060600000000000ULL,
367 0x0020600000000000ULL, 0x0060600000000000ULL,
368 0x0020600000000000ULL, 0x0060600000000000ULL,
369 0x0020600000000000ULL, 0x0000600000000000ULL,
370 0x0040600000000000ULL, 0x0060600000000000ULL,
374 MODULE_AUTHOR("Raghavendra Koushik <raghavendra.koushik@neterion.com>");
375 MODULE_LICENSE("GPL");
376 MODULE_VERSION(DRV_VERSION);
379 /* Module Loadable parameters. */
380 S2IO_PARM_INT(tx_fifo_num, 1);
381 S2IO_PARM_INT(rx_ring_num, 1);
384 S2IO_PARM_INT(rx_ring_mode, 1);
385 S2IO_PARM_INT(use_continuous_tx_intrs, 1);
386 S2IO_PARM_INT(rmac_pause_time, 0x100);
387 S2IO_PARM_INT(mc_pause_threshold_q0q3, 187);
388 S2IO_PARM_INT(mc_pause_threshold_q4q7, 187);
389 S2IO_PARM_INT(shared_splits, 0);
390 S2IO_PARM_INT(tmac_util_period, 5);
391 S2IO_PARM_INT(rmac_util_period, 5);
392 S2IO_PARM_INT(bimodal, 0);
393 S2IO_PARM_INT(l3l4hdr_size, 128);
394 /* Frequency of Rx desc syncs expressed as power of 2 */
395 S2IO_PARM_INT(rxsync_frequency, 3);
396 /* Interrupt type. Values can be 0(INTA), 1(MSI), 2(MSI_X) */
397 S2IO_PARM_INT(intr_type, 0);
398 /* Large receive offload feature */
399 S2IO_PARM_INT(lro, 0);
400 /* Max pkts to be aggregated by LRO at one time. If not specified,
401 * aggregation happens until we hit max IP pkt size(64K)
403 S2IO_PARM_INT(lro_max_pkts, 0xFFFF);
404 S2IO_PARM_INT(indicate_max_pkts, 0);
406 S2IO_PARM_INT(napi, 1);
407 S2IO_PARM_INT(ufo, 0);
409 static unsigned int tx_fifo_len[MAX_TX_FIFOS] =
410 {DEFAULT_FIFO_0_LEN, [1 ...(MAX_TX_FIFOS - 1)] = DEFAULT_FIFO_1_7_LEN};
411 static unsigned int rx_ring_sz[MAX_RX_RINGS] =
412 {[0 ...(MAX_RX_RINGS - 1)] = SMALL_BLK_CNT};
413 static unsigned int rts_frm_len[MAX_RX_RINGS] =
414 {[0 ...(MAX_RX_RINGS - 1)] = 0 };
416 module_param_array(tx_fifo_len, uint, NULL, 0);
417 module_param_array(rx_ring_sz, uint, NULL, 0);
418 module_param_array(rts_frm_len, uint, NULL, 0);
422 * This table lists all the devices that this driver supports.
424 static struct pci_device_id s2io_tbl[] __devinitdata = {
425 {PCI_VENDOR_ID_S2IO, PCI_DEVICE_ID_S2IO_WIN,
426 PCI_ANY_ID, PCI_ANY_ID},
427 {PCI_VENDOR_ID_S2IO, PCI_DEVICE_ID_S2IO_UNI,
428 PCI_ANY_ID, PCI_ANY_ID},
429 {PCI_VENDOR_ID_S2IO, PCI_DEVICE_ID_HERC_WIN,
430 PCI_ANY_ID, PCI_ANY_ID},
431 {PCI_VENDOR_ID_S2IO, PCI_DEVICE_ID_HERC_UNI,
432 PCI_ANY_ID, PCI_ANY_ID},
436 MODULE_DEVICE_TABLE(pci, s2io_tbl);
438 static struct pci_driver s2io_driver = {
440 .id_table = s2io_tbl,
441 .probe = s2io_init_nic,
442 .remove = __devexit_p(s2io_rem_nic),
445 /* A simplifier macro used both by init and free shared_mem Fns(). */
446 #define TXD_MEM_PAGE_CNT(len, per_each) ((len+per_each - 1) / per_each)
449 * init_shared_mem - Allocation and Initialization of Memory
450 * @nic: Device private variable.
451 * Description: The function allocates all the memory areas shared
452 * between the NIC and the driver. This includes Tx descriptors,
453 * Rx descriptors and the statistics block.
456 static int init_shared_mem(struct s2io_nic *nic)
459 void *tmp_v_addr, *tmp_v_addr_next;
460 dma_addr_t tmp_p_addr, tmp_p_addr_next;
461 RxD_block_t *pre_rxd_blk = NULL;
463 int lst_size, lst_per_page;
464 struct net_device *dev = nic->dev;
468 mac_info_t *mac_control;
469 struct config_param *config;
471 mac_control = &nic->mac_control;
472 config = &nic->config;
475 /* Allocation and initialization of TXDLs in FIOFs */
477 for (i = 0; i < config->tx_fifo_num; i++) {
478 size += config->tx_cfg[i].fifo_len;
480 if (size > MAX_AVAILABLE_TXDS) {
481 DBG_PRINT(ERR_DBG, "s2io: Requested TxDs too high, ");
482 DBG_PRINT(ERR_DBG, "Requested: %d, max supported: 8192\n", size);
486 lst_size = (sizeof(TxD_t) * config->max_txds);
487 lst_per_page = PAGE_SIZE / lst_size;
489 for (i = 0; i < config->tx_fifo_num; i++) {
490 int fifo_len = config->tx_cfg[i].fifo_len;
491 int list_holder_size = fifo_len * sizeof(list_info_hold_t);
492 mac_control->fifos[i].list_info = kmalloc(list_holder_size,
494 if (!mac_control->fifos[i].list_info) {
496 "Malloc failed for list_info\n");
499 memset(mac_control->fifos[i].list_info, 0, list_holder_size);
501 for (i = 0; i < config->tx_fifo_num; i++) {
502 int page_num = TXD_MEM_PAGE_CNT(config->tx_cfg[i].fifo_len,
504 mac_control->fifos[i].tx_curr_put_info.offset = 0;
505 mac_control->fifos[i].tx_curr_put_info.fifo_len =
506 config->tx_cfg[i].fifo_len - 1;
507 mac_control->fifos[i].tx_curr_get_info.offset = 0;
508 mac_control->fifos[i].tx_curr_get_info.fifo_len =
509 config->tx_cfg[i].fifo_len - 1;
510 mac_control->fifos[i].fifo_no = i;
511 mac_control->fifos[i].nic = nic;
512 mac_control->fifos[i].max_txds = MAX_SKB_FRAGS + 2;
514 for (j = 0; j < page_num; j++) {
518 tmp_v = pci_alloc_consistent(nic->pdev,
522 "pci_alloc_consistent ");
523 DBG_PRINT(ERR_DBG, "failed for TxDL\n");
526 /* If we got a zero DMA address(can happen on
527 * certain platforms like PPC), reallocate.
528 * Store virtual address of page we don't want,
532 mac_control->zerodma_virt_addr = tmp_v;
534 "%s: Zero DMA address for TxDL. ", dev->name);
536 "Virtual address %p\n", tmp_v);
537 tmp_v = pci_alloc_consistent(nic->pdev,
541 "pci_alloc_consistent ");
542 DBG_PRINT(ERR_DBG, "failed for TxDL\n");
546 while (k < lst_per_page) {
547 int l = (j * lst_per_page) + k;
548 if (l == config->tx_cfg[i].fifo_len)
550 mac_control->fifos[i].list_info[l].list_virt_addr =
551 tmp_v + (k * lst_size);
552 mac_control->fifos[i].list_info[l].list_phy_addr =
553 tmp_p + (k * lst_size);
559 nic->ufo_in_band_v = kcalloc(size, sizeof(u64), GFP_KERNEL);
560 if (!nic->ufo_in_band_v)
563 /* Allocation and initialization of RXDs in Rings */
565 for (i = 0; i < config->rx_ring_num; i++) {
566 if (config->rx_cfg[i].num_rxd %
567 (rxd_count[nic->rxd_mode] + 1)) {
568 DBG_PRINT(ERR_DBG, "%s: RxD count of ", dev->name);
569 DBG_PRINT(ERR_DBG, "Ring%d is not a multiple of ",
571 DBG_PRINT(ERR_DBG, "RxDs per Block");
574 size += config->rx_cfg[i].num_rxd;
575 mac_control->rings[i].block_count =
576 config->rx_cfg[i].num_rxd /
577 (rxd_count[nic->rxd_mode] + 1 );
578 mac_control->rings[i].pkt_cnt = config->rx_cfg[i].num_rxd -
579 mac_control->rings[i].block_count;
581 if (nic->rxd_mode == RXD_MODE_1)
582 size = (size * (sizeof(RxD1_t)));
584 size = (size * (sizeof(RxD3_t)));
586 for (i = 0; i < config->rx_ring_num; i++) {
587 mac_control->rings[i].rx_curr_get_info.block_index = 0;
588 mac_control->rings[i].rx_curr_get_info.offset = 0;
589 mac_control->rings[i].rx_curr_get_info.ring_len =
590 config->rx_cfg[i].num_rxd - 1;
591 mac_control->rings[i].rx_curr_put_info.block_index = 0;
592 mac_control->rings[i].rx_curr_put_info.offset = 0;
593 mac_control->rings[i].rx_curr_put_info.ring_len =
594 config->rx_cfg[i].num_rxd - 1;
595 mac_control->rings[i].nic = nic;
596 mac_control->rings[i].ring_no = i;
598 blk_cnt = config->rx_cfg[i].num_rxd /
599 (rxd_count[nic->rxd_mode] + 1);
600 /* Allocating all the Rx blocks */
601 for (j = 0; j < blk_cnt; j++) {
602 rx_block_info_t *rx_blocks;
605 rx_blocks = &mac_control->rings[i].rx_blocks[j];
606 size = SIZE_OF_BLOCK; //size is always page size
607 tmp_v_addr = pci_alloc_consistent(nic->pdev, size,
609 if (tmp_v_addr == NULL) {
611 * In case of failure, free_shared_mem()
612 * is called, which should free any
613 * memory that was alloced till the
616 rx_blocks->block_virt_addr = tmp_v_addr;
619 memset(tmp_v_addr, 0, size);
620 rx_blocks->block_virt_addr = tmp_v_addr;
621 rx_blocks->block_dma_addr = tmp_p_addr;
622 rx_blocks->rxds = kmalloc(sizeof(rxd_info_t)*
623 rxd_count[nic->rxd_mode],
625 if (!rx_blocks->rxds)
627 for (l=0; l<rxd_count[nic->rxd_mode];l++) {
628 rx_blocks->rxds[l].virt_addr =
629 rx_blocks->block_virt_addr +
630 (rxd_size[nic->rxd_mode] * l);
631 rx_blocks->rxds[l].dma_addr =
632 rx_blocks->block_dma_addr +
633 (rxd_size[nic->rxd_mode] * l);
636 /* Interlinking all Rx Blocks */
637 for (j = 0; j < blk_cnt; j++) {
639 mac_control->rings[i].rx_blocks[j].block_virt_addr;
641 mac_control->rings[i].rx_blocks[(j + 1) %
642 blk_cnt].block_virt_addr;
644 mac_control->rings[i].rx_blocks[j].block_dma_addr;
646 mac_control->rings[i].rx_blocks[(j + 1) %
647 blk_cnt].block_dma_addr;
649 pre_rxd_blk = (RxD_block_t *) tmp_v_addr;
650 pre_rxd_blk->reserved_2_pNext_RxD_block =
651 (unsigned long) tmp_v_addr_next;
652 pre_rxd_blk->pNext_RxD_Blk_physical =
653 (u64) tmp_p_addr_next;
656 if (nic->rxd_mode >= RXD_MODE_3A) {
658 * Allocation of Storages for buffer addresses in 2BUFF mode
659 * and the buffers as well.
661 for (i = 0; i < config->rx_ring_num; i++) {
662 blk_cnt = config->rx_cfg[i].num_rxd /
663 (rxd_count[nic->rxd_mode]+ 1);
664 mac_control->rings[i].ba =
665 kmalloc((sizeof(buffAdd_t *) * blk_cnt),
667 if (!mac_control->rings[i].ba)
669 for (j = 0; j < blk_cnt; j++) {
671 mac_control->rings[i].ba[j] =
672 kmalloc((sizeof(buffAdd_t) *
673 (rxd_count[nic->rxd_mode] + 1)),
675 if (!mac_control->rings[i].ba[j])
677 while (k != rxd_count[nic->rxd_mode]) {
678 ba = &mac_control->rings[i].ba[j][k];
680 ba->ba_0_org = (void *) kmalloc
681 (BUF0_LEN + ALIGN_SIZE, GFP_KERNEL);
684 tmp = (unsigned long)ba->ba_0_org;
686 tmp &= ~((unsigned long) ALIGN_SIZE);
687 ba->ba_0 = (void *) tmp;
689 ba->ba_1_org = (void *) kmalloc
690 (BUF1_LEN + ALIGN_SIZE, GFP_KERNEL);
693 tmp = (unsigned long) ba->ba_1_org;
695 tmp &= ~((unsigned long) ALIGN_SIZE);
696 ba->ba_1 = (void *) tmp;
703 /* Allocation and initialization of Statistics block */
704 size = sizeof(StatInfo_t);
705 mac_control->stats_mem = pci_alloc_consistent
706 (nic->pdev, size, &mac_control->stats_mem_phy);
708 if (!mac_control->stats_mem) {
710 * In case of failure, free_shared_mem() is called, which
711 * should free any memory that was alloced till the
716 mac_control->stats_mem_sz = size;
718 tmp_v_addr = mac_control->stats_mem;
719 mac_control->stats_info = (StatInfo_t *) tmp_v_addr;
720 memset(tmp_v_addr, 0, size);
721 DBG_PRINT(INIT_DBG, "%s:Ring Mem PHY: 0x%llx\n", dev->name,
722 (unsigned long long) tmp_p_addr);
728 * free_shared_mem - Free the allocated Memory
729 * @nic: Device private variable.
730 * Description: This function is to free all memory locations allocated by
731 * the init_shared_mem() function and return it to the kernel.
734 static void free_shared_mem(struct s2io_nic *nic)
736 int i, j, blk_cnt, size;
738 dma_addr_t tmp_p_addr;
739 mac_info_t *mac_control;
740 struct config_param *config;
741 int lst_size, lst_per_page;
742 struct net_device *dev = nic->dev;
747 mac_control = &nic->mac_control;
748 config = &nic->config;
750 lst_size = (sizeof(TxD_t) * config->max_txds);
751 lst_per_page = PAGE_SIZE / lst_size;
753 for (i = 0; i < config->tx_fifo_num; i++) {
754 int page_num = TXD_MEM_PAGE_CNT(config->tx_cfg[i].fifo_len,
756 for (j = 0; j < page_num; j++) {
757 int mem_blks = (j * lst_per_page);
758 if (!mac_control->fifos[i].list_info)
760 if (!mac_control->fifos[i].list_info[mem_blks].
763 pci_free_consistent(nic->pdev, PAGE_SIZE,
764 mac_control->fifos[i].
767 mac_control->fifos[i].
771 /* If we got a zero DMA address during allocation,
774 if (mac_control->zerodma_virt_addr) {
775 pci_free_consistent(nic->pdev, PAGE_SIZE,
776 mac_control->zerodma_virt_addr,
779 "%s: Freeing TxDL with zero DMA addr. ",
781 DBG_PRINT(INIT_DBG, "Virtual address %p\n",
782 mac_control->zerodma_virt_addr);
784 kfree(mac_control->fifos[i].list_info);
787 size = SIZE_OF_BLOCK;
788 for (i = 0; i < config->rx_ring_num; i++) {
789 blk_cnt = mac_control->rings[i].block_count;
790 for (j = 0; j < blk_cnt; j++) {
791 tmp_v_addr = mac_control->rings[i].rx_blocks[j].
793 tmp_p_addr = mac_control->rings[i].rx_blocks[j].
795 if (tmp_v_addr == NULL)
797 pci_free_consistent(nic->pdev, size,
798 tmp_v_addr, tmp_p_addr);
799 kfree(mac_control->rings[i].rx_blocks[j].rxds);
803 if (nic->rxd_mode >= RXD_MODE_3A) {
804 /* Freeing buffer storage addresses in 2BUFF mode. */
805 for (i = 0; i < config->rx_ring_num; i++) {
806 blk_cnt = config->rx_cfg[i].num_rxd /
807 (rxd_count[nic->rxd_mode] + 1);
808 for (j = 0; j < blk_cnt; j++) {
810 if (!mac_control->rings[i].ba[j])
812 while (k != rxd_count[nic->rxd_mode]) {
814 &mac_control->rings[i].ba[j][k];
819 kfree(mac_control->rings[i].ba[j]);
821 kfree(mac_control->rings[i].ba);
825 if (mac_control->stats_mem) {
826 pci_free_consistent(nic->pdev,
827 mac_control->stats_mem_sz,
828 mac_control->stats_mem,
829 mac_control->stats_mem_phy);
831 if (nic->ufo_in_band_v)
832 kfree(nic->ufo_in_band_v);
836 * s2io_verify_pci_mode -
839 static int s2io_verify_pci_mode(nic_t *nic)
841 XENA_dev_config_t __iomem *bar0 = nic->bar0;
842 register u64 val64 = 0;
845 val64 = readq(&bar0->pci_mode);
846 mode = (u8)GET_PCI_MODE(val64);
848 if ( val64 & PCI_MODE_UNKNOWN_MODE)
849 return -1; /* Unknown PCI mode */
853 #define NEC_VENID 0x1033
854 #define NEC_DEVID 0x0125
855 static int s2io_on_nec_bridge(struct pci_dev *s2io_pdev)
857 struct pci_dev *tdev = NULL;
858 while ((tdev = pci_get_device(PCI_ANY_ID, PCI_ANY_ID, tdev)) != NULL) {
859 if (tdev->vendor == NEC_VENID && tdev->device == NEC_DEVID) {
860 if (tdev->bus == s2io_pdev->bus->parent)
868 static int bus_speed[8] = {33, 133, 133, 200, 266, 133, 200, 266};
870 * s2io_print_pci_mode -
872 static int s2io_print_pci_mode(nic_t *nic)
874 XENA_dev_config_t __iomem *bar0 = nic->bar0;
875 register u64 val64 = 0;
877 struct config_param *config = &nic->config;
879 val64 = readq(&bar0->pci_mode);
880 mode = (u8)GET_PCI_MODE(val64);
882 if ( val64 & PCI_MODE_UNKNOWN_MODE)
883 return -1; /* Unknown PCI mode */
885 config->bus_speed = bus_speed[mode];
887 if (s2io_on_nec_bridge(nic->pdev)) {
888 DBG_PRINT(ERR_DBG, "%s: Device is on PCI-E bus\n",
893 if (val64 & PCI_MODE_32_BITS) {
894 DBG_PRINT(ERR_DBG, "%s: Device is on 32 bit ", nic->dev->name);
896 DBG_PRINT(ERR_DBG, "%s: Device is on 64 bit ", nic->dev->name);
900 case PCI_MODE_PCI_33:
901 DBG_PRINT(ERR_DBG, "33MHz PCI bus\n");
903 case PCI_MODE_PCI_66:
904 DBG_PRINT(ERR_DBG, "66MHz PCI bus\n");
906 case PCI_MODE_PCIX_M1_66:
907 DBG_PRINT(ERR_DBG, "66MHz PCIX(M1) bus\n");
909 case PCI_MODE_PCIX_M1_100:
910 DBG_PRINT(ERR_DBG, "100MHz PCIX(M1) bus\n");
912 case PCI_MODE_PCIX_M1_133:
913 DBG_PRINT(ERR_DBG, "133MHz PCIX(M1) bus\n");
915 case PCI_MODE_PCIX_M2_66:
916 DBG_PRINT(ERR_DBG, "133MHz PCIX(M2) bus\n");
918 case PCI_MODE_PCIX_M2_100:
919 DBG_PRINT(ERR_DBG, "200MHz PCIX(M2) bus\n");
921 case PCI_MODE_PCIX_M2_133:
922 DBG_PRINT(ERR_DBG, "266MHz PCIX(M2) bus\n");
925 return -1; /* Unsupported bus speed */
932 * init_nic - Initialization of hardware
933 * @nic: device peivate variable
934 * Description: The function sequentially configures every block
935 * of the H/W from their reset values.
936 * Return Value: SUCCESS on success and
937 * '-1' on failure (endian settings incorrect).
940 static int init_nic(struct s2io_nic *nic)
942 XENA_dev_config_t __iomem *bar0 = nic->bar0;
943 struct net_device *dev = nic->dev;
944 register u64 val64 = 0;
948 mac_info_t *mac_control;
949 struct config_param *config;
951 unsigned long long mem_share;
954 mac_control = &nic->mac_control;
955 config = &nic->config;
957 /* to set the swapper controle on the card */
958 if(s2io_set_swapper(nic)) {
959 DBG_PRINT(ERR_DBG,"ERROR: Setting Swapper failed\n");
964 * Herc requires EOI to be removed from reset before XGXS, so..
966 if (nic->device_type & XFRAME_II_DEVICE) {
967 val64 = 0xA500000000ULL;
968 writeq(val64, &bar0->sw_reset);
970 val64 = readq(&bar0->sw_reset);
973 /* Remove XGXS from reset state */
975 writeq(val64, &bar0->sw_reset);
977 val64 = readq(&bar0->sw_reset);
979 /* Enable Receiving broadcasts */
980 add = &bar0->mac_cfg;
981 val64 = readq(&bar0->mac_cfg);
982 val64 |= MAC_RMAC_BCAST_ENABLE;
983 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
984 writel((u32) val64, add);
985 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
986 writel((u32) (val64 >> 32), (add + 4));
988 /* Read registers in all blocks */
989 val64 = readq(&bar0->mac_int_mask);
990 val64 = readq(&bar0->mc_int_mask);
991 val64 = readq(&bar0->xgxs_int_mask);
995 writeq(vBIT(val64, 2, 14), &bar0->rmac_max_pyld_len);
997 if (nic->device_type & XFRAME_II_DEVICE) {
998 while (herc_act_dtx_cfg[dtx_cnt] != END_SIGN) {
999 SPECIAL_REG_WRITE(herc_act_dtx_cfg[dtx_cnt],
1000 &bar0->dtx_control, UF);
1002 msleep(1); /* Necessary!! */
1006 while (xena_dtx_cfg[dtx_cnt] != END_SIGN) {
1007 SPECIAL_REG_WRITE(xena_dtx_cfg[dtx_cnt],
1008 &bar0->dtx_control, UF);
1009 val64 = readq(&bar0->dtx_control);
1014 /* Tx DMA Initialization */
1016 writeq(val64, &bar0->tx_fifo_partition_0);
1017 writeq(val64, &bar0->tx_fifo_partition_1);
1018 writeq(val64, &bar0->tx_fifo_partition_2);
1019 writeq(val64, &bar0->tx_fifo_partition_3);
1022 for (i = 0, j = 0; i < config->tx_fifo_num; i++) {
1024 vBIT(config->tx_cfg[i].fifo_len - 1, ((i * 32) + 19),
1025 13) | vBIT(config->tx_cfg[i].fifo_priority,
1028 if (i == (config->tx_fifo_num - 1)) {
1035 writeq(val64, &bar0->tx_fifo_partition_0);
1039 writeq(val64, &bar0->tx_fifo_partition_1);
1043 writeq(val64, &bar0->tx_fifo_partition_2);
1047 writeq(val64, &bar0->tx_fifo_partition_3);
1053 * Disable 4 PCCs for Xena1, 2 and 3 as per H/W bug
1054 * SXE-008 TRANSMIT DMA ARBITRATION ISSUE.
1056 if ((nic->device_type == XFRAME_I_DEVICE) &&
1057 (get_xena_rev_id(nic->pdev) < 4))
1058 writeq(PCC_ENABLE_FOUR, &bar0->pcc_enable);
1060 val64 = readq(&bar0->tx_fifo_partition_0);
1061 DBG_PRINT(INIT_DBG, "Fifo partition at: 0x%p is: 0x%llx\n",
1062 &bar0->tx_fifo_partition_0, (unsigned long long) val64);
1065 * Initialization of Tx_PA_CONFIG register to ignore packet
1066 * integrity checking.
1068 val64 = readq(&bar0->tx_pa_cfg);
1069 val64 |= TX_PA_CFG_IGNORE_FRM_ERR | TX_PA_CFG_IGNORE_SNAP_OUI |
1070 TX_PA_CFG_IGNORE_LLC_CTRL | TX_PA_CFG_IGNORE_L2_ERR;
1071 writeq(val64, &bar0->tx_pa_cfg);
1073 /* Rx DMA intialization. */
1075 for (i = 0; i < config->rx_ring_num; i++) {
1077 vBIT(config->rx_cfg[i].ring_priority, (5 + (i * 8)),
1080 writeq(val64, &bar0->rx_queue_priority);
1083 * Allocating equal share of memory to all the
1087 if (nic->device_type & XFRAME_II_DEVICE)
1092 for (i = 0; i < config->rx_ring_num; i++) {
1095 mem_share = (mem_size / config->rx_ring_num +
1096 mem_size % config->rx_ring_num);
1097 val64 |= RX_QUEUE_CFG_Q0_SZ(mem_share);
1100 mem_share = (mem_size / config->rx_ring_num);
1101 val64 |= RX_QUEUE_CFG_Q1_SZ(mem_share);
1104 mem_share = (mem_size / config->rx_ring_num);
1105 val64 |= RX_QUEUE_CFG_Q2_SZ(mem_share);
1108 mem_share = (mem_size / config->rx_ring_num);
1109 val64 |= RX_QUEUE_CFG_Q3_SZ(mem_share);
1112 mem_share = (mem_size / config->rx_ring_num);
1113 val64 |= RX_QUEUE_CFG_Q4_SZ(mem_share);
1116 mem_share = (mem_size / config->rx_ring_num);
1117 val64 |= RX_QUEUE_CFG_Q5_SZ(mem_share);
1120 mem_share = (mem_size / config->rx_ring_num);
1121 val64 |= RX_QUEUE_CFG_Q6_SZ(mem_share);
1124 mem_share = (mem_size / config->rx_ring_num);
1125 val64 |= RX_QUEUE_CFG_Q7_SZ(mem_share);
1129 writeq(val64, &bar0->rx_queue_cfg);
1132 * Filling Tx round robin registers
1133 * as per the number of FIFOs
1135 switch (config->tx_fifo_num) {
1137 val64 = 0x0000000000000000ULL;
1138 writeq(val64, &bar0->tx_w_round_robin_0);
1139 writeq(val64, &bar0->tx_w_round_robin_1);
1140 writeq(val64, &bar0->tx_w_round_robin_2);
1141 writeq(val64, &bar0->tx_w_round_robin_3);
1142 writeq(val64, &bar0->tx_w_round_robin_4);
1145 val64 = 0x0000010000010000ULL;
1146 writeq(val64, &bar0->tx_w_round_robin_0);
1147 val64 = 0x0100000100000100ULL;
1148 writeq(val64, &bar0->tx_w_round_robin_1);
1149 val64 = 0x0001000001000001ULL;
1150 writeq(val64, &bar0->tx_w_round_robin_2);
1151 val64 = 0x0000010000010000ULL;
1152 writeq(val64, &bar0->tx_w_round_robin_3);
1153 val64 = 0x0100000000000000ULL;
1154 writeq(val64, &bar0->tx_w_round_robin_4);
1157 val64 = 0x0001000102000001ULL;
1158 writeq(val64, &bar0->tx_w_round_robin_0);
1159 val64 = 0x0001020000010001ULL;
1160 writeq(val64, &bar0->tx_w_round_robin_1);
1161 val64 = 0x0200000100010200ULL;
1162 writeq(val64, &bar0->tx_w_round_robin_2);
1163 val64 = 0x0001000102000001ULL;
1164 writeq(val64, &bar0->tx_w_round_robin_3);
1165 val64 = 0x0001020000000000ULL;
1166 writeq(val64, &bar0->tx_w_round_robin_4);
1169 val64 = 0x0001020300010200ULL;
1170 writeq(val64, &bar0->tx_w_round_robin_0);
1171 val64 = 0x0100000102030001ULL;
1172 writeq(val64, &bar0->tx_w_round_robin_1);
1173 val64 = 0x0200010000010203ULL;
1174 writeq(val64, &bar0->tx_w_round_robin_2);
1175 val64 = 0x0001020001000001ULL;
1176 writeq(val64, &bar0->tx_w_round_robin_3);
1177 val64 = 0x0203000100000000ULL;
1178 writeq(val64, &bar0->tx_w_round_robin_4);
1181 val64 = 0x0001000203000102ULL;
1182 writeq(val64, &bar0->tx_w_round_robin_0);
1183 val64 = 0x0001020001030004ULL;
1184 writeq(val64, &bar0->tx_w_round_robin_1);
1185 val64 = 0x0001000203000102ULL;
1186 writeq(val64, &bar0->tx_w_round_robin_2);
1187 val64 = 0x0001020001030004ULL;
1188 writeq(val64, &bar0->tx_w_round_robin_3);
1189 val64 = 0x0001000000000000ULL;
1190 writeq(val64, &bar0->tx_w_round_robin_4);
1193 val64 = 0x0001020304000102ULL;
1194 writeq(val64, &bar0->tx_w_round_robin_0);
1195 val64 = 0x0304050001020001ULL;
1196 writeq(val64, &bar0->tx_w_round_robin_1);
1197 val64 = 0x0203000100000102ULL;
1198 writeq(val64, &bar0->tx_w_round_robin_2);
1199 val64 = 0x0304000102030405ULL;
1200 writeq(val64, &bar0->tx_w_round_robin_3);
1201 val64 = 0x0001000200000000ULL;
1202 writeq(val64, &bar0->tx_w_round_robin_4);
1205 val64 = 0x0001020001020300ULL;
1206 writeq(val64, &bar0->tx_w_round_robin_0);
1207 val64 = 0x0102030400010203ULL;
1208 writeq(val64, &bar0->tx_w_round_robin_1);
1209 val64 = 0x0405060001020001ULL;
1210 writeq(val64, &bar0->tx_w_round_robin_2);
1211 val64 = 0x0304050000010200ULL;
1212 writeq(val64, &bar0->tx_w_round_robin_3);
1213 val64 = 0x0102030000000000ULL;
1214 writeq(val64, &bar0->tx_w_round_robin_4);
1217 val64 = 0x0001020300040105ULL;
1218 writeq(val64, &bar0->tx_w_round_robin_0);
1219 val64 = 0x0200030106000204ULL;
1220 writeq(val64, &bar0->tx_w_round_robin_1);
1221 val64 = 0x0103000502010007ULL;
1222 writeq(val64, &bar0->tx_w_round_robin_2);
1223 val64 = 0x0304010002060500ULL;
1224 writeq(val64, &bar0->tx_w_round_robin_3);
1225 val64 = 0x0103020400000000ULL;
1226 writeq(val64, &bar0->tx_w_round_robin_4);
1230 /* Enable all configured Tx FIFO partitions */
1231 val64 = readq(&bar0->tx_fifo_partition_0);
1232 val64 |= (TX_FIFO_PARTITION_EN);
1233 writeq(val64, &bar0->tx_fifo_partition_0);
1235 /* Filling the Rx round robin registers as per the
1236 * number of Rings and steering based on QoS.
1238 switch (config->rx_ring_num) {
1240 val64 = 0x8080808080808080ULL;
1241 writeq(val64, &bar0->rts_qos_steering);
1244 val64 = 0x0000010000010000ULL;
1245 writeq(val64, &bar0->rx_w_round_robin_0);
1246 val64 = 0x0100000100000100ULL;
1247 writeq(val64, &bar0->rx_w_round_robin_1);
1248 val64 = 0x0001000001000001ULL;
1249 writeq(val64, &bar0->rx_w_round_robin_2);
1250 val64 = 0x0000010000010000ULL;
1251 writeq(val64, &bar0->rx_w_round_robin_3);
1252 val64 = 0x0100000000000000ULL;
1253 writeq(val64, &bar0->rx_w_round_robin_4);
1255 val64 = 0x8080808040404040ULL;
1256 writeq(val64, &bar0->rts_qos_steering);
1259 val64 = 0x0001000102000001ULL;
1260 writeq(val64, &bar0->rx_w_round_robin_0);
1261 val64 = 0x0001020000010001ULL;
1262 writeq(val64, &bar0->rx_w_round_robin_1);
1263 val64 = 0x0200000100010200ULL;
1264 writeq(val64, &bar0->rx_w_round_robin_2);
1265 val64 = 0x0001000102000001ULL;
1266 writeq(val64, &bar0->rx_w_round_robin_3);
1267 val64 = 0x0001020000000000ULL;
1268 writeq(val64, &bar0->rx_w_round_robin_4);
1270 val64 = 0x8080804040402020ULL;
1271 writeq(val64, &bar0->rts_qos_steering);
1274 val64 = 0x0001020300010200ULL;
1275 writeq(val64, &bar0->rx_w_round_robin_0);
1276 val64 = 0x0100000102030001ULL;
1277 writeq(val64, &bar0->rx_w_round_robin_1);
1278 val64 = 0x0200010000010203ULL;
1279 writeq(val64, &bar0->rx_w_round_robin_2);
1280 val64 = 0x0001020001000001ULL;
1281 writeq(val64, &bar0->rx_w_round_robin_3);
1282 val64 = 0x0203000100000000ULL;
1283 writeq(val64, &bar0->rx_w_round_robin_4);
1285 val64 = 0x8080404020201010ULL;
1286 writeq(val64, &bar0->rts_qos_steering);
1289 val64 = 0x0001000203000102ULL;
1290 writeq(val64, &bar0->rx_w_round_robin_0);
1291 val64 = 0x0001020001030004ULL;
1292 writeq(val64, &bar0->rx_w_round_robin_1);
1293 val64 = 0x0001000203000102ULL;
1294 writeq(val64, &bar0->rx_w_round_robin_2);
1295 val64 = 0x0001020001030004ULL;
1296 writeq(val64, &bar0->rx_w_round_robin_3);
1297 val64 = 0x0001000000000000ULL;
1298 writeq(val64, &bar0->rx_w_round_robin_4);
1300 val64 = 0x8080404020201008ULL;
1301 writeq(val64, &bar0->rts_qos_steering);
1304 val64 = 0x0001020304000102ULL;
1305 writeq(val64, &bar0->rx_w_round_robin_0);
1306 val64 = 0x0304050001020001ULL;
1307 writeq(val64, &bar0->rx_w_round_robin_1);
1308 val64 = 0x0203000100000102ULL;
1309 writeq(val64, &bar0->rx_w_round_robin_2);
1310 val64 = 0x0304000102030405ULL;
1311 writeq(val64, &bar0->rx_w_round_robin_3);
1312 val64 = 0x0001000200000000ULL;
1313 writeq(val64, &bar0->rx_w_round_robin_4);
1315 val64 = 0x8080404020100804ULL;
1316 writeq(val64, &bar0->rts_qos_steering);
1319 val64 = 0x0001020001020300ULL;
1320 writeq(val64, &bar0->rx_w_round_robin_0);
1321 val64 = 0x0102030400010203ULL;
1322 writeq(val64, &bar0->rx_w_round_robin_1);
1323 val64 = 0x0405060001020001ULL;
1324 writeq(val64, &bar0->rx_w_round_robin_2);
1325 val64 = 0x0304050000010200ULL;
1326 writeq(val64, &bar0->rx_w_round_robin_3);
1327 val64 = 0x0102030000000000ULL;
1328 writeq(val64, &bar0->rx_w_round_robin_4);
1330 val64 = 0x8080402010080402ULL;
1331 writeq(val64, &bar0->rts_qos_steering);
1334 val64 = 0x0001020300040105ULL;
1335 writeq(val64, &bar0->rx_w_round_robin_0);
1336 val64 = 0x0200030106000204ULL;
1337 writeq(val64, &bar0->rx_w_round_robin_1);
1338 val64 = 0x0103000502010007ULL;
1339 writeq(val64, &bar0->rx_w_round_robin_2);
1340 val64 = 0x0304010002060500ULL;
1341 writeq(val64, &bar0->rx_w_round_robin_3);
1342 val64 = 0x0103020400000000ULL;
1343 writeq(val64, &bar0->rx_w_round_robin_4);
1345 val64 = 0x8040201008040201ULL;
1346 writeq(val64, &bar0->rts_qos_steering);
1352 for (i = 0; i < 8; i++)
1353 writeq(val64, &bar0->rts_frm_len_n[i]);
1355 /* Set the default rts frame length for the rings configured */
1356 val64 = MAC_RTS_FRM_LEN_SET(dev->mtu+22);
1357 for (i = 0 ; i < config->rx_ring_num ; i++)
1358 writeq(val64, &bar0->rts_frm_len_n[i]);
1360 /* Set the frame length for the configured rings
1361 * desired by the user
1363 for (i = 0; i < config->rx_ring_num; i++) {
1364 /* If rts_frm_len[i] == 0 then it is assumed that user not
1365 * specified frame length steering.
1366 * If the user provides the frame length then program
1367 * the rts_frm_len register for those values or else
1368 * leave it as it is.
1370 if (rts_frm_len[i] != 0) {
1371 writeq(MAC_RTS_FRM_LEN_SET(rts_frm_len[i]),
1372 &bar0->rts_frm_len_n[i]);
1376 /* Program statistics memory */
1377 writeq(mac_control->stats_mem_phy, &bar0->stat_addr);
1379 if (nic->device_type == XFRAME_II_DEVICE) {
1380 val64 = STAT_BC(0x320);
1381 writeq(val64, &bar0->stat_byte_cnt);
1385 * Initializing the sampling rate for the device to calculate the
1386 * bandwidth utilization.
1388 val64 = MAC_TX_LINK_UTIL_VAL(tmac_util_period) |
1389 MAC_RX_LINK_UTIL_VAL(rmac_util_period);
1390 writeq(val64, &bar0->mac_link_util);
1394 * Initializing the Transmit and Receive Traffic Interrupt
1398 * TTI Initialization. Default Tx timer gets us about
1399 * 250 interrupts per sec. Continuous interrupts are enabled
1402 if (nic->device_type == XFRAME_II_DEVICE) {
1403 int count = (nic->config.bus_speed * 125)/2;
1404 val64 = TTI_DATA1_MEM_TX_TIMER_VAL(count);
1407 val64 = TTI_DATA1_MEM_TX_TIMER_VAL(0x2078);
1409 val64 |= TTI_DATA1_MEM_TX_URNG_A(0xA) |
1410 TTI_DATA1_MEM_TX_URNG_B(0x10) |
1411 TTI_DATA1_MEM_TX_URNG_C(0x30) | TTI_DATA1_MEM_TX_TIMER_AC_EN;
1412 if (use_continuous_tx_intrs)
1413 val64 |= TTI_DATA1_MEM_TX_TIMER_CI_EN;
1414 writeq(val64, &bar0->tti_data1_mem);
1416 val64 = TTI_DATA2_MEM_TX_UFC_A(0x10) |
1417 TTI_DATA2_MEM_TX_UFC_B(0x20) |
1418 TTI_DATA2_MEM_TX_UFC_C(0x40) | TTI_DATA2_MEM_TX_UFC_D(0x80);
1419 writeq(val64, &bar0->tti_data2_mem);
1421 val64 = TTI_CMD_MEM_WE | TTI_CMD_MEM_STROBE_NEW_CMD;
1422 writeq(val64, &bar0->tti_command_mem);
1425 * Once the operation completes, the Strobe bit of the command
1426 * register will be reset. We poll for this particular condition
1427 * We wait for a maximum of 500ms for the operation to complete,
1428 * if it's not complete by then we return error.
1432 val64 = readq(&bar0->tti_command_mem);
1433 if (!(val64 & TTI_CMD_MEM_STROBE_NEW_CMD)) {
1437 DBG_PRINT(ERR_DBG, "%s: TTI init Failed\n",
1445 if (nic->config.bimodal) {
1447 for (k = 0; k < config->rx_ring_num; k++) {
1448 val64 = TTI_CMD_MEM_WE | TTI_CMD_MEM_STROBE_NEW_CMD;
1449 val64 |= TTI_CMD_MEM_OFFSET(0x38+k);
1450 writeq(val64, &bar0->tti_command_mem);
1453 * Once the operation completes, the Strobe bit of the command
1454 * register will be reset. We poll for this particular condition
1455 * We wait for a maximum of 500ms for the operation to complete,
1456 * if it's not complete by then we return error.
1460 val64 = readq(&bar0->tti_command_mem);
1461 if (!(val64 & TTI_CMD_MEM_STROBE_NEW_CMD)) {
1466 "%s: TTI init Failed\n",
1476 /* RTI Initialization */
1477 if (nic->device_type == XFRAME_II_DEVICE) {
1479 * Programmed to generate Apprx 500 Intrs per
1482 int count = (nic->config.bus_speed * 125)/4;
1483 val64 = RTI_DATA1_MEM_RX_TIMER_VAL(count);
1485 val64 = RTI_DATA1_MEM_RX_TIMER_VAL(0xFFF);
1487 val64 |= RTI_DATA1_MEM_RX_URNG_A(0xA) |
1488 RTI_DATA1_MEM_RX_URNG_B(0x10) |
1489 RTI_DATA1_MEM_RX_URNG_C(0x30) | RTI_DATA1_MEM_RX_TIMER_AC_EN;
1491 writeq(val64, &bar0->rti_data1_mem);
1493 val64 = RTI_DATA2_MEM_RX_UFC_A(0x1) |
1494 RTI_DATA2_MEM_RX_UFC_B(0x2) ;
1495 if (nic->intr_type == MSI_X)
1496 val64 |= (RTI_DATA2_MEM_RX_UFC_C(0x20) | \
1497 RTI_DATA2_MEM_RX_UFC_D(0x40));
1499 val64 |= (RTI_DATA2_MEM_RX_UFC_C(0x40) | \
1500 RTI_DATA2_MEM_RX_UFC_D(0x80));
1501 writeq(val64, &bar0->rti_data2_mem);
1503 for (i = 0; i < config->rx_ring_num; i++) {
1504 val64 = RTI_CMD_MEM_WE | RTI_CMD_MEM_STROBE_NEW_CMD
1505 | RTI_CMD_MEM_OFFSET(i);
1506 writeq(val64, &bar0->rti_command_mem);
1509 * Once the operation completes, the Strobe bit of the
1510 * command register will be reset. We poll for this
1511 * particular condition. We wait for a maximum of 500ms
1512 * for the operation to complete, if it's not complete
1513 * by then we return error.
1517 val64 = readq(&bar0->rti_command_mem);
1518 if (!(val64 & RTI_CMD_MEM_STROBE_NEW_CMD)) {
1522 DBG_PRINT(ERR_DBG, "%s: RTI init Failed\n",
1533 * Initializing proper values as Pause threshold into all
1534 * the 8 Queues on Rx side.
1536 writeq(0xffbbffbbffbbffbbULL, &bar0->mc_pause_thresh_q0q3);
1537 writeq(0xffbbffbbffbbffbbULL, &bar0->mc_pause_thresh_q4q7);
1539 /* Disable RMAC PAD STRIPPING */
1540 add = &bar0->mac_cfg;
1541 val64 = readq(&bar0->mac_cfg);
1542 val64 &= ~(MAC_CFG_RMAC_STRIP_PAD);
1543 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
1544 writel((u32) (val64), add);
1545 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
1546 writel((u32) (val64 >> 32), (add + 4));
1547 val64 = readq(&bar0->mac_cfg);
1549 /* Enable FCS stripping by adapter */
1550 add = &bar0->mac_cfg;
1551 val64 = readq(&bar0->mac_cfg);
1552 val64 |= MAC_CFG_RMAC_STRIP_FCS;
1553 if (nic->device_type == XFRAME_II_DEVICE)
1554 writeq(val64, &bar0->mac_cfg);
1556 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
1557 writel((u32) (val64), add);
1558 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
1559 writel((u32) (val64 >> 32), (add + 4));
1563 * Set the time value to be inserted in the pause frame
1564 * generated by xena.
1566 val64 = readq(&bar0->rmac_pause_cfg);
1567 val64 &= ~(RMAC_PAUSE_HG_PTIME(0xffff));
1568 val64 |= RMAC_PAUSE_HG_PTIME(nic->mac_control.rmac_pause_time);
1569 writeq(val64, &bar0->rmac_pause_cfg);
1572 * Set the Threshold Limit for Generating the pause frame
1573 * If the amount of data in any Queue exceeds ratio of
1574 * (mac_control.mc_pause_threshold_q0q3 or q4q7)/256
1575 * pause frame is generated
1578 for (i = 0; i < 4; i++) {
1580 (((u64) 0xFF00 | nic->mac_control.
1581 mc_pause_threshold_q0q3)
1584 writeq(val64, &bar0->mc_pause_thresh_q0q3);
1587 for (i = 0; i < 4; i++) {
1589 (((u64) 0xFF00 | nic->mac_control.
1590 mc_pause_threshold_q4q7)
1593 writeq(val64, &bar0->mc_pause_thresh_q4q7);
1596 * TxDMA will stop Read request if the number of read split has
1597 * exceeded the limit pointed by shared_splits
1599 val64 = readq(&bar0->pic_control);
1600 val64 |= PIC_CNTL_SHARED_SPLITS(shared_splits);
1601 writeq(val64, &bar0->pic_control);
1603 if (nic->config.bus_speed == 266) {
1604 writeq(TXREQTO_VAL(0x7f) | TXREQTO_EN, &bar0->txreqtimeout);
1605 writeq(0x0, &bar0->read_retry_delay);
1606 writeq(0x0, &bar0->write_retry_delay);
1610 * Programming the Herc to split every write transaction
1611 * that does not start on an ADB to reduce disconnects.
1613 if (nic->device_type == XFRAME_II_DEVICE) {
1614 val64 = FAULT_BEHAVIOUR | EXT_REQ_EN |
1615 MISC_LINK_STABILITY_PRD(3);
1616 writeq(val64, &bar0->misc_control);
1617 val64 = readq(&bar0->pic_control2);
1618 val64 &= ~(BIT(13)|BIT(14)|BIT(15));
1619 writeq(val64, &bar0->pic_control2);
1621 if (strstr(nic->product_name, "CX4")) {
1622 val64 = TMAC_AVG_IPG(0x17);
1623 writeq(val64, &bar0->tmac_avg_ipg);
1628 #define LINK_UP_DOWN_INTERRUPT 1
1629 #define MAC_RMAC_ERR_TIMER 2
1631 static int s2io_link_fault_indication(nic_t *nic)
1633 if (nic->intr_type != INTA)
1634 return MAC_RMAC_ERR_TIMER;
1635 if (nic->device_type == XFRAME_II_DEVICE)
1636 return LINK_UP_DOWN_INTERRUPT;
1638 return MAC_RMAC_ERR_TIMER;
1642 * en_dis_able_nic_intrs - Enable or Disable the interrupts
1643 * @nic: device private variable,
1644 * @mask: A mask indicating which Intr block must be modified and,
1645 * @flag: A flag indicating whether to enable or disable the Intrs.
1646 * Description: This function will either disable or enable the interrupts
1647 * depending on the flag argument. The mask argument can be used to
1648 * enable/disable any Intr block.
1649 * Return Value: NONE.
1652 static void en_dis_able_nic_intrs(struct s2io_nic *nic, u16 mask, int flag)
1654 XENA_dev_config_t __iomem *bar0 = nic->bar0;
1655 register u64 val64 = 0, temp64 = 0;
1657 /* Top level interrupt classification */
1658 /* PIC Interrupts */
1659 if ((mask & (TX_PIC_INTR | RX_PIC_INTR))) {
1660 /* Enable PIC Intrs in the general intr mask register */
1661 val64 = TXPIC_INT_M;
1662 if (flag == ENABLE_INTRS) {
1663 temp64 = readq(&bar0->general_int_mask);
1664 temp64 &= ~((u64) val64);
1665 writeq(temp64, &bar0->general_int_mask);
1667 * If Hercules adapter enable GPIO otherwise
1668 * disable all PCIX, Flash, MDIO, IIC and GPIO
1669 * interrupts for now.
1672 if (s2io_link_fault_indication(nic) ==
1673 LINK_UP_DOWN_INTERRUPT ) {
1674 temp64 = readq(&bar0->pic_int_mask);
1675 temp64 &= ~((u64) PIC_INT_GPIO);
1676 writeq(temp64, &bar0->pic_int_mask);
1677 temp64 = readq(&bar0->gpio_int_mask);
1678 temp64 &= ~((u64) GPIO_INT_MASK_LINK_UP);
1679 writeq(temp64, &bar0->gpio_int_mask);
1681 writeq(DISABLE_ALL_INTRS, &bar0->pic_int_mask);
1684 * No MSI Support is available presently, so TTI and
1685 * RTI interrupts are also disabled.
1687 } else if (flag == DISABLE_INTRS) {
1689 * Disable PIC Intrs in the general
1690 * intr mask register
1692 writeq(DISABLE_ALL_INTRS, &bar0->pic_int_mask);
1693 temp64 = readq(&bar0->general_int_mask);
1695 writeq(val64, &bar0->general_int_mask);
1699 /* MAC Interrupts */
1700 /* Enabling/Disabling MAC interrupts */
1701 if (mask & (TX_MAC_INTR | RX_MAC_INTR)) {
1702 val64 = TXMAC_INT_M | RXMAC_INT_M;
1703 if (flag == ENABLE_INTRS) {
1704 temp64 = readq(&bar0->general_int_mask);
1705 temp64 &= ~((u64) val64);
1706 writeq(temp64, &bar0->general_int_mask);
1708 * All MAC block error interrupts are disabled for now
1711 } else if (flag == DISABLE_INTRS) {
1713 * Disable MAC Intrs in the general intr mask register
1715 writeq(DISABLE_ALL_INTRS, &bar0->mac_int_mask);
1716 writeq(DISABLE_ALL_INTRS,
1717 &bar0->mac_rmac_err_mask);
1719 temp64 = readq(&bar0->general_int_mask);
1721 writeq(val64, &bar0->general_int_mask);
1725 /* Tx traffic interrupts */
1726 if (mask & TX_TRAFFIC_INTR) {
1727 val64 = TXTRAFFIC_INT_M;
1728 if (flag == ENABLE_INTRS) {
1729 temp64 = readq(&bar0->general_int_mask);
1730 temp64 &= ~((u64) val64);
1731 writeq(temp64, &bar0->general_int_mask);
1733 * Enable all the Tx side interrupts
1734 * writing 0 Enables all 64 TX interrupt levels
1736 writeq(0x0, &bar0->tx_traffic_mask);
1737 } else if (flag == DISABLE_INTRS) {
1739 * Disable Tx Traffic Intrs in the general intr mask
1742 writeq(DISABLE_ALL_INTRS, &bar0->tx_traffic_mask);
1743 temp64 = readq(&bar0->general_int_mask);
1745 writeq(val64, &bar0->general_int_mask);
1749 /* Rx traffic interrupts */
1750 if (mask & RX_TRAFFIC_INTR) {
1751 val64 = RXTRAFFIC_INT_M;
1752 if (flag == ENABLE_INTRS) {
1753 temp64 = readq(&bar0->general_int_mask);
1754 temp64 &= ~((u64) val64);
1755 writeq(temp64, &bar0->general_int_mask);
1756 /* writing 0 Enables all 8 RX interrupt levels */
1757 writeq(0x0, &bar0->rx_traffic_mask);
1758 } else if (flag == DISABLE_INTRS) {
1760 * Disable Rx Traffic Intrs in the general intr mask
1763 writeq(DISABLE_ALL_INTRS, &bar0->rx_traffic_mask);
1764 temp64 = readq(&bar0->general_int_mask);
1766 writeq(val64, &bar0->general_int_mask);
1772 * verify_pcc_quiescent- Checks for PCC quiescent state
1773 * Return: 1 If PCC is quiescence
1774 * 0 If PCC is not quiescence
1776 static int verify_pcc_quiescent(nic_t *sp, int flag)
1779 XENA_dev_config_t __iomem *bar0 = sp->bar0;
1780 u64 val64 = readq(&bar0->adapter_status);
1782 herc = (sp->device_type == XFRAME_II_DEVICE);
1784 if (flag == FALSE) {
1785 if ((!herc && (get_xena_rev_id(sp->pdev) >= 4)) || herc) {
1786 if (!(val64 & ADAPTER_STATUS_RMAC_PCC_IDLE))
1789 if (!(val64 & ADAPTER_STATUS_RMAC_PCC_FOUR_IDLE))
1793 if ((!herc && (get_xena_rev_id(sp->pdev) >= 4)) || herc) {
1794 if (((val64 & ADAPTER_STATUS_RMAC_PCC_IDLE) ==
1795 ADAPTER_STATUS_RMAC_PCC_IDLE))
1798 if (((val64 & ADAPTER_STATUS_RMAC_PCC_FOUR_IDLE) ==
1799 ADAPTER_STATUS_RMAC_PCC_FOUR_IDLE))
1807 * verify_xena_quiescence - Checks whether the H/W is ready
1808 * Description: Returns whether the H/W is ready to go or not. Depending
1809 * on whether adapter enable bit was written or not the comparison
1810 * differs and the calling function passes the input argument flag to
1812 * Return: 1 If xena is quiescence
1813 * 0 If Xena is not quiescence
1816 static int verify_xena_quiescence(nic_t *sp)
1819 XENA_dev_config_t __iomem *bar0 = sp->bar0;
1820 u64 val64 = readq(&bar0->adapter_status);
1821 mode = s2io_verify_pci_mode(sp);
1823 if (!(val64 & ADAPTER_STATUS_TDMA_READY)) {
1824 DBG_PRINT(ERR_DBG, "%s", "TDMA is not ready!");
1827 if (!(val64 & ADAPTER_STATUS_RDMA_READY)) {
1828 DBG_PRINT(ERR_DBG, "%s", "RDMA is not ready!");
1831 if (!(val64 & ADAPTER_STATUS_PFC_READY)) {
1832 DBG_PRINT(ERR_DBG, "%s", "PFC is not ready!");
1835 if (!(val64 & ADAPTER_STATUS_TMAC_BUF_EMPTY)) {
1836 DBG_PRINT(ERR_DBG, "%s", "TMAC BUF is not empty!");
1839 if (!(val64 & ADAPTER_STATUS_PIC_QUIESCENT)) {
1840 DBG_PRINT(ERR_DBG, "%s", "PIC is not QUIESCENT!");
1843 if (!(val64 & ADAPTER_STATUS_MC_DRAM_READY)) {
1844 DBG_PRINT(ERR_DBG, "%s", "MC_DRAM is not ready!");
1847 if (!(val64 & ADAPTER_STATUS_MC_QUEUES_READY)) {
1848 DBG_PRINT(ERR_DBG, "%s", "MC_QUEUES is not ready!");
1851 if (!(val64 & ADAPTER_STATUS_M_PLL_LOCK)) {
1852 DBG_PRINT(ERR_DBG, "%s", "M_PLL is not locked!");
1857 * In PCI 33 mode, the P_PLL is not used, and therefore,
1858 * the the P_PLL_LOCK bit in the adapter_status register will
1861 if (!(val64 & ADAPTER_STATUS_P_PLL_LOCK) &&
1862 sp->device_type == XFRAME_II_DEVICE && mode !=
1864 DBG_PRINT(ERR_DBG, "%s", "P_PLL is not locked!");
1867 if (!((val64 & ADAPTER_STATUS_RC_PRC_QUIESCENT) ==
1868 ADAPTER_STATUS_RC_PRC_QUIESCENT)) {
1869 DBG_PRINT(ERR_DBG, "%s", "RC_PRC is not QUIESCENT!");
1876 * fix_mac_address - Fix for Mac addr problem on Alpha platforms
1877 * @sp: Pointer to device specifc structure
1879 * New procedure to clear mac address reading problems on Alpha platforms
1883 static void fix_mac_address(nic_t * sp)
1885 XENA_dev_config_t __iomem *bar0 = sp->bar0;
1889 while (fix_mac[i] != END_SIGN) {
1890 writeq(fix_mac[i++], &bar0->gpio_control);
1892 val64 = readq(&bar0->gpio_control);
1897 * start_nic - Turns the device on
1898 * @nic : device private variable.
1900 * This function actually turns the device on. Before this function is
1901 * called,all Registers are configured from their reset states
1902 * and shared memory is allocated but the NIC is still quiescent. On
1903 * calling this function, the device interrupts are cleared and the NIC is
1904 * literally switched on by writing into the adapter control register.
1906 * SUCCESS on success and -1 on failure.
1909 static int start_nic(struct s2io_nic *nic)
1911 XENA_dev_config_t __iomem *bar0 = nic->bar0;
1912 struct net_device *dev = nic->dev;
1913 register u64 val64 = 0;
1915 mac_info_t *mac_control;
1916 struct config_param *config;
1918 mac_control = &nic->mac_control;
1919 config = &nic->config;
1921 /* PRC Initialization and configuration */
1922 for (i = 0; i < config->rx_ring_num; i++) {
1923 writeq((u64) mac_control->rings[i].rx_blocks[0].block_dma_addr,
1924 &bar0->prc_rxd0_n[i]);
1926 val64 = readq(&bar0->prc_ctrl_n[i]);
1927 if (nic->config.bimodal)
1928 val64 |= PRC_CTRL_BIMODAL_INTERRUPT;
1929 if (nic->rxd_mode == RXD_MODE_1)
1930 val64 |= PRC_CTRL_RC_ENABLED;
1932 val64 |= PRC_CTRL_RC_ENABLED | PRC_CTRL_RING_MODE_3;
1933 if (nic->device_type == XFRAME_II_DEVICE)
1934 val64 |= PRC_CTRL_GROUP_READS;
1935 val64 &= ~PRC_CTRL_RXD_BACKOFF_INTERVAL(0xFFFFFF);
1936 val64 |= PRC_CTRL_RXD_BACKOFF_INTERVAL(0x1000);
1937 writeq(val64, &bar0->prc_ctrl_n[i]);
1940 if (nic->rxd_mode == RXD_MODE_3B) {
1941 /* Enabling 2 buffer mode by writing into Rx_pa_cfg reg. */
1942 val64 = readq(&bar0->rx_pa_cfg);
1943 val64 |= RX_PA_CFG_IGNORE_L2_ERR;
1944 writeq(val64, &bar0->rx_pa_cfg);
1948 * Enabling MC-RLDRAM. After enabling the device, we timeout
1949 * for around 100ms, which is approximately the time required
1950 * for the device to be ready for operation.
1952 val64 = readq(&bar0->mc_rldram_mrs);
1953 val64 |= MC_RLDRAM_QUEUE_SIZE_ENABLE | MC_RLDRAM_MRS_ENABLE;
1954 SPECIAL_REG_WRITE(val64, &bar0->mc_rldram_mrs, UF);
1955 val64 = readq(&bar0->mc_rldram_mrs);
1957 msleep(100); /* Delay by around 100 ms. */
1959 /* Enabling ECC Protection. */
1960 val64 = readq(&bar0->adapter_control);
1961 val64 &= ~ADAPTER_ECC_EN;
1962 writeq(val64, &bar0->adapter_control);
1965 * Clearing any possible Link state change interrupts that
1966 * could have popped up just before Enabling the card.
1968 val64 = readq(&bar0->mac_rmac_err_reg);
1970 writeq(val64, &bar0->mac_rmac_err_reg);
1973 * Verify if the device is ready to be enabled, if so enable
1976 val64 = readq(&bar0->adapter_status);
1977 if (!verify_xena_quiescence(nic)) {
1978 DBG_PRINT(ERR_DBG, "%s: device is not ready, ", dev->name);
1979 DBG_PRINT(ERR_DBG, "Adapter status reads: 0x%llx\n",
1980 (unsigned long long) val64);
1985 * With some switches, link might be already up at this point.
1986 * Because of this weird behavior, when we enable laser,
1987 * we may not get link. We need to handle this. We cannot
1988 * figure out which switch is misbehaving. So we are forced to
1989 * make a global change.
1992 /* Enabling Laser. */
1993 val64 = readq(&bar0->adapter_control);
1994 val64 |= ADAPTER_EOI_TX_ON;
1995 writeq(val64, &bar0->adapter_control);
1997 if (s2io_link_fault_indication(nic) == MAC_RMAC_ERR_TIMER) {
1999 * Dont see link state interrupts initally on some switches,
2000 * so directly scheduling the link state task here.
2002 schedule_work(&nic->set_link_task);
2004 /* SXE-002: Initialize link and activity LED */
2005 subid = nic->pdev->subsystem_device;
2006 if (((subid & 0xFF) >= 0x07) &&
2007 (nic->device_type == XFRAME_I_DEVICE)) {
2008 val64 = readq(&bar0->gpio_control);
2009 val64 |= 0x0000800000000000ULL;
2010 writeq(val64, &bar0->gpio_control);
2011 val64 = 0x0411040400000000ULL;
2012 writeq(val64, (void __iomem *)bar0 + 0x2700);
2018 * s2io_txdl_getskb - Get the skb from txdl, unmap and return skb
2020 static struct sk_buff *s2io_txdl_getskb(fifo_info_t *fifo_data, TxD_t *txdlp, int get_off)
2022 nic_t *nic = fifo_data->nic;
2023 struct sk_buff *skb;
2028 if (txds->Host_Control == (u64)(long)nic->ufo_in_band_v) {
2029 pci_unmap_single(nic->pdev, (dma_addr_t)
2030 txds->Buffer_Pointer, sizeof(u64),
2035 skb = (struct sk_buff *) ((unsigned long)
2036 txds->Host_Control);
2038 memset(txdlp, 0, (sizeof(TxD_t) * fifo_data->max_txds));
2041 pci_unmap_single(nic->pdev, (dma_addr_t)
2042 txds->Buffer_Pointer,
2043 skb->len - skb->data_len,
2045 frg_cnt = skb_shinfo(skb)->nr_frags;
2048 for (j = 0; j < frg_cnt; j++, txds++) {
2049 skb_frag_t *frag = &skb_shinfo(skb)->frags[j];
2050 if (!txds->Buffer_Pointer)
2052 pci_unmap_page(nic->pdev, (dma_addr_t)
2053 txds->Buffer_Pointer,
2054 frag->size, PCI_DMA_TODEVICE);
2057 memset(txdlp,0, (sizeof(TxD_t) * fifo_data->max_txds));
2062 * free_tx_buffers - Free all queued Tx buffers
2063 * @nic : device private variable.
2065 * Free all queued Tx buffers.
2066 * Return Value: void
2069 static void free_tx_buffers(struct s2io_nic *nic)
2071 struct net_device *dev = nic->dev;
2072 struct sk_buff *skb;
2075 mac_info_t *mac_control;
2076 struct config_param *config;
2079 mac_control = &nic->mac_control;
2080 config = &nic->config;
2082 for (i = 0; i < config->tx_fifo_num; i++) {
2083 for (j = 0; j < config->tx_cfg[i].fifo_len - 1; j++) {
2084 txdp = (TxD_t *) mac_control->fifos[i].list_info[j].
2086 skb = s2io_txdl_getskb(&mac_control->fifos[i], txdp, j);
2093 "%s:forcibly freeing %d skbs on FIFO%d\n",
2095 mac_control->fifos[i].tx_curr_get_info.offset = 0;
2096 mac_control->fifos[i].tx_curr_put_info.offset = 0;
2101 * stop_nic - To stop the nic
2102 * @nic ; device private variable.
2104 * This function does exactly the opposite of what the start_nic()
2105 * function does. This function is called to stop the device.
2110 static void stop_nic(struct s2io_nic *nic)
2112 XENA_dev_config_t __iomem *bar0 = nic->bar0;
2113 register u64 val64 = 0;
2115 mac_info_t *mac_control;
2116 struct config_param *config;
2118 mac_control = &nic->mac_control;
2119 config = &nic->config;
2121 /* Disable all interrupts */
2122 interruptible = TX_TRAFFIC_INTR | RX_TRAFFIC_INTR;
2123 interruptible |= TX_PIC_INTR | RX_PIC_INTR;
2124 interruptible |= TX_MAC_INTR | RX_MAC_INTR;
2125 en_dis_able_nic_intrs(nic, interruptible, DISABLE_INTRS);
2127 /* Clearing Adapter_En bit of ADAPTER_CONTROL Register */
2128 val64 = readq(&bar0->adapter_control);
2129 val64 &= ~(ADAPTER_CNTL_EN);
2130 writeq(val64, &bar0->adapter_control);
2133 static int fill_rxd_3buf(nic_t *nic, RxD_t *rxdp, struct sk_buff *skb)
2135 struct net_device *dev = nic->dev;
2136 struct sk_buff *frag_list;
2139 /* Buffer-1 receives L3/L4 headers */
2140 ((RxD3_t*)rxdp)->Buffer1_ptr = pci_map_single
2141 (nic->pdev, skb->data, l3l4hdr_size + 4,
2142 PCI_DMA_FROMDEVICE);
2144 /* skb_shinfo(skb)->frag_list will have L4 data payload */
2145 skb_shinfo(skb)->frag_list = dev_alloc_skb(dev->mtu + ALIGN_SIZE);
2146 if (skb_shinfo(skb)->frag_list == NULL) {
2147 DBG_PRINT(ERR_DBG, "%s: dev_alloc_skb failed\n ", dev->name);
2150 frag_list = skb_shinfo(skb)->frag_list;
2151 skb->truesize += frag_list->truesize;
2152 frag_list->next = NULL;
2153 tmp = (void *)ALIGN((long)frag_list->data, ALIGN_SIZE + 1);
2154 frag_list->data = tmp;
2155 frag_list->tail = tmp;
2157 /* Buffer-2 receives L4 data payload */
2158 ((RxD3_t*)rxdp)->Buffer2_ptr = pci_map_single(nic->pdev,
2159 frag_list->data, dev->mtu,
2160 PCI_DMA_FROMDEVICE);
2161 rxdp->Control_2 |= SET_BUFFER1_SIZE_3(l3l4hdr_size + 4);
2162 rxdp->Control_2 |= SET_BUFFER2_SIZE_3(dev->mtu);
2168 * fill_rx_buffers - Allocates the Rx side skbs
2169 * @nic: device private variable
2170 * @ring_no: ring number
2172 * The function allocates Rx side skbs and puts the physical
2173 * address of these buffers into the RxD buffer pointers, so that the NIC
2174 * can DMA the received frame into these locations.
2175 * The NIC supports 3 receive modes, viz
2177 * 2. three buffer and
2178 * 3. Five buffer modes.
2179 * Each mode defines how many fragments the received frame will be split
2180 * up into by the NIC. The frame is split into L3 header, L4 Header,
2181 * L4 payload in three buffer mode and in 5 buffer mode, L4 payload itself
2182 * is split into 3 fragments. As of now only single buffer mode is
2185 * SUCCESS on success or an appropriate -ve value on failure.
2188 static int fill_rx_buffers(struct s2io_nic *nic, int ring_no)
2190 struct net_device *dev = nic->dev;
2191 struct sk_buff *skb;
2193 int off, off1, size, block_no, block_no1;
2196 mac_info_t *mac_control;
2197 struct config_param *config;
2200 unsigned long flags;
2201 RxD_t *first_rxdp = NULL;
2203 mac_control = &nic->mac_control;
2204 config = &nic->config;
2205 alloc_cnt = mac_control->rings[ring_no].pkt_cnt -
2206 atomic_read(&nic->rx_bufs_left[ring_no]);
2208 block_no1 = mac_control->rings[ring_no].rx_curr_get_info.block_index;
2209 off1 = mac_control->rings[ring_no].rx_curr_get_info.offset;
2210 while (alloc_tab < alloc_cnt) {
2211 block_no = mac_control->rings[ring_no].rx_curr_put_info.
2213 off = mac_control->rings[ring_no].rx_curr_put_info.offset;
2215 rxdp = mac_control->rings[ring_no].
2216 rx_blocks[block_no].rxds[off].virt_addr;
2218 if ((block_no == block_no1) && (off == off1) &&
2219 (rxdp->Host_Control)) {
2220 DBG_PRINT(INTR_DBG, "%s: Get and Put",
2222 DBG_PRINT(INTR_DBG, " info equated\n");
2225 if (off && (off == rxd_count[nic->rxd_mode])) {
2226 mac_control->rings[ring_no].rx_curr_put_info.
2228 if (mac_control->rings[ring_no].rx_curr_put_info.
2229 block_index == mac_control->rings[ring_no].
2231 mac_control->rings[ring_no].rx_curr_put_info.
2233 block_no = mac_control->rings[ring_no].
2234 rx_curr_put_info.block_index;
2235 if (off == rxd_count[nic->rxd_mode])
2237 mac_control->rings[ring_no].rx_curr_put_info.
2239 rxdp = mac_control->rings[ring_no].
2240 rx_blocks[block_no].block_virt_addr;
2241 DBG_PRINT(INTR_DBG, "%s: Next block at: %p\n",
2245 spin_lock_irqsave(&nic->put_lock, flags);
2246 mac_control->rings[ring_no].put_pos =
2247 (block_no * (rxd_count[nic->rxd_mode] + 1)) + off;
2248 spin_unlock_irqrestore(&nic->put_lock, flags);
2250 mac_control->rings[ring_no].put_pos =
2251 (block_no * (rxd_count[nic->rxd_mode] + 1)) + off;
2253 if ((rxdp->Control_1 & RXD_OWN_XENA) &&
2254 ((nic->rxd_mode >= RXD_MODE_3A) &&
2255 (rxdp->Control_2 & BIT(0)))) {
2256 mac_control->rings[ring_no].rx_curr_put_info.
2260 /* calculate size of skb based on ring mode */
2261 size = dev->mtu + HEADER_ETHERNET_II_802_3_SIZE +
2262 HEADER_802_2_SIZE + HEADER_SNAP_SIZE;
2263 if (nic->rxd_mode == RXD_MODE_1)
2264 size += NET_IP_ALIGN;
2265 else if (nic->rxd_mode == RXD_MODE_3B)
2266 size = dev->mtu + ALIGN_SIZE + BUF0_LEN + 4;
2268 size = l3l4hdr_size + ALIGN_SIZE + BUF0_LEN + 4;
2271 skb = dev_alloc_skb(size);
2273 DBG_PRINT(ERR_DBG, "%s: Out of ", dev->name);
2274 DBG_PRINT(ERR_DBG, "memory to allocate SKBs\n");
2277 first_rxdp->Control_1 |= RXD_OWN_XENA;
2281 if (nic->rxd_mode == RXD_MODE_1) {
2282 /* 1 buffer mode - normal operation mode */
2283 memset(rxdp, 0, sizeof(RxD1_t));
2284 skb_reserve(skb, NET_IP_ALIGN);
2285 ((RxD1_t*)rxdp)->Buffer0_ptr = pci_map_single
2286 (nic->pdev, skb->data, size - NET_IP_ALIGN,
2287 PCI_DMA_FROMDEVICE);
2288 rxdp->Control_2 = SET_BUFFER0_SIZE_1(size - NET_IP_ALIGN);
2290 } else if (nic->rxd_mode >= RXD_MODE_3A) {
2292 * 2 or 3 buffer mode -
2293 * Both 2 buffer mode and 3 buffer mode provides 128
2294 * byte aligned receive buffers.
2296 * 3 buffer mode provides header separation where in
2297 * skb->data will have L3/L4 headers where as
2298 * skb_shinfo(skb)->frag_list will have the L4 data
2302 memset(rxdp, 0, sizeof(RxD3_t));
2303 ba = &mac_control->rings[ring_no].ba[block_no][off];
2304 skb_reserve(skb, BUF0_LEN);
2305 tmp = (u64)(unsigned long) skb->data;
2308 skb->data = (void *) (unsigned long)tmp;
2309 skb->tail = (void *) (unsigned long)tmp;
2311 if (!(((RxD3_t*)rxdp)->Buffer0_ptr))
2312 ((RxD3_t*)rxdp)->Buffer0_ptr =
2313 pci_map_single(nic->pdev, ba->ba_0, BUF0_LEN,
2314 PCI_DMA_FROMDEVICE);
2316 pci_dma_sync_single_for_device(nic->pdev,
2317 (dma_addr_t) ((RxD3_t*)rxdp)->Buffer0_ptr,
2318 BUF0_LEN, PCI_DMA_FROMDEVICE);
2319 rxdp->Control_2 = SET_BUFFER0_SIZE_3(BUF0_LEN);
2320 if (nic->rxd_mode == RXD_MODE_3B) {
2321 /* Two buffer mode */
2324 * Buffer2 will have L3/L4 header plus
2327 ((RxD3_t*)rxdp)->Buffer2_ptr = pci_map_single
2328 (nic->pdev, skb->data, dev->mtu + 4,
2329 PCI_DMA_FROMDEVICE);
2331 /* Buffer-1 will be dummy buffer. Not used */
2332 if (!(((RxD3_t*)rxdp)->Buffer1_ptr)) {
2333 ((RxD3_t*)rxdp)->Buffer1_ptr =
2334 pci_map_single(nic->pdev,
2336 PCI_DMA_FROMDEVICE);
2338 rxdp->Control_2 |= SET_BUFFER1_SIZE_3(1);
2339 rxdp->Control_2 |= SET_BUFFER2_SIZE_3
2343 if (fill_rxd_3buf(nic, rxdp, skb) == -ENOMEM) {
2344 dev_kfree_skb_irq(skb);
2347 first_rxdp->Control_1 |=
2353 rxdp->Control_2 |= BIT(0);
2355 rxdp->Host_Control = (unsigned long) (skb);
2356 if (alloc_tab & ((1 << rxsync_frequency) - 1))
2357 rxdp->Control_1 |= RXD_OWN_XENA;
2359 if (off == (rxd_count[nic->rxd_mode] + 1))
2361 mac_control->rings[ring_no].rx_curr_put_info.offset = off;
2363 rxdp->Control_2 |= SET_RXD_MARKER;
2364 if (!(alloc_tab & ((1 << rxsync_frequency) - 1))) {
2367 first_rxdp->Control_1 |= RXD_OWN_XENA;
2371 atomic_inc(&nic->rx_bufs_left[ring_no]);
2376 /* Transfer ownership of first descriptor to adapter just before
2377 * exiting. Before that, use memory barrier so that ownership
2378 * and other fields are seen by adapter correctly.
2382 first_rxdp->Control_1 |= RXD_OWN_XENA;
2388 static void free_rxd_blk(struct s2io_nic *sp, int ring_no, int blk)
2390 struct net_device *dev = sp->dev;
2392 struct sk_buff *skb;
2394 mac_info_t *mac_control;
2397 mac_control = &sp->mac_control;
2398 for (j = 0 ; j < rxd_count[sp->rxd_mode]; j++) {
2399 rxdp = mac_control->rings[ring_no].
2400 rx_blocks[blk].rxds[j].virt_addr;
2401 skb = (struct sk_buff *)
2402 ((unsigned long) rxdp->Host_Control);
2406 if (sp->rxd_mode == RXD_MODE_1) {
2407 pci_unmap_single(sp->pdev, (dma_addr_t)
2408 ((RxD1_t*)rxdp)->Buffer0_ptr,
2410 HEADER_ETHERNET_II_802_3_SIZE
2411 + HEADER_802_2_SIZE +
2413 PCI_DMA_FROMDEVICE);
2414 memset(rxdp, 0, sizeof(RxD1_t));
2415 } else if(sp->rxd_mode == RXD_MODE_3B) {
2416 ba = &mac_control->rings[ring_no].
2418 pci_unmap_single(sp->pdev, (dma_addr_t)
2419 ((RxD3_t*)rxdp)->Buffer0_ptr,
2421 PCI_DMA_FROMDEVICE);
2422 pci_unmap_single(sp->pdev, (dma_addr_t)
2423 ((RxD3_t*)rxdp)->Buffer1_ptr,
2425 PCI_DMA_FROMDEVICE);
2426 pci_unmap_single(sp->pdev, (dma_addr_t)
2427 ((RxD3_t*)rxdp)->Buffer2_ptr,
2429 PCI_DMA_FROMDEVICE);
2430 memset(rxdp, 0, sizeof(RxD3_t));
2432 pci_unmap_single(sp->pdev, (dma_addr_t)
2433 ((RxD3_t*)rxdp)->Buffer0_ptr, BUF0_LEN,
2434 PCI_DMA_FROMDEVICE);
2435 pci_unmap_single(sp->pdev, (dma_addr_t)
2436 ((RxD3_t*)rxdp)->Buffer1_ptr,
2438 PCI_DMA_FROMDEVICE);
2439 pci_unmap_single(sp->pdev, (dma_addr_t)
2440 ((RxD3_t*)rxdp)->Buffer2_ptr, dev->mtu,
2441 PCI_DMA_FROMDEVICE);
2442 memset(rxdp, 0, sizeof(RxD3_t));
2445 atomic_dec(&sp->rx_bufs_left[ring_no]);
2450 * free_rx_buffers - Frees all Rx buffers
2451 * @sp: device private variable.
2453 * This function will free all Rx buffers allocated by host.
2458 static void free_rx_buffers(struct s2io_nic *sp)
2460 struct net_device *dev = sp->dev;
2461 int i, blk = 0, buf_cnt = 0;
2462 mac_info_t *mac_control;
2463 struct config_param *config;
2465 mac_control = &sp->mac_control;
2466 config = &sp->config;
2468 for (i = 0; i < config->rx_ring_num; i++) {
2469 for (blk = 0; blk < rx_ring_sz[i]; blk++)
2470 free_rxd_blk(sp,i,blk);
2472 mac_control->rings[i].rx_curr_put_info.block_index = 0;
2473 mac_control->rings[i].rx_curr_get_info.block_index = 0;
2474 mac_control->rings[i].rx_curr_put_info.offset = 0;
2475 mac_control->rings[i].rx_curr_get_info.offset = 0;
2476 atomic_set(&sp->rx_bufs_left[i], 0);
2477 DBG_PRINT(INIT_DBG, "%s:Freed 0x%x Rx Buffers on ring%d\n",
2478 dev->name, buf_cnt, i);
2483 * s2io_poll - Rx interrupt handler for NAPI support
2484 * @dev : pointer to the device structure.
2485 * @budget : The number of packets that were budgeted to be processed
2486 * during one pass through the 'Poll" function.
2488 * Comes into picture only if NAPI support has been incorporated. It does
2489 * the same thing that rx_intr_handler does, but not in a interrupt context
2490 * also It will process only a given number of packets.
2492 * 0 on success and 1 if there are No Rx packets to be processed.
2495 static int s2io_poll(struct net_device *dev, int *budget)
2497 nic_t *nic = dev->priv;
2498 int pkt_cnt = 0, org_pkts_to_process;
2499 mac_info_t *mac_control;
2500 struct config_param *config;
2501 XENA_dev_config_t __iomem *bar0 = nic->bar0;
2504 atomic_inc(&nic->isr_cnt);
2505 mac_control = &nic->mac_control;
2506 config = &nic->config;
2508 nic->pkts_to_process = *budget;
2509 if (nic->pkts_to_process > dev->quota)
2510 nic->pkts_to_process = dev->quota;
2511 org_pkts_to_process = nic->pkts_to_process;
2513 writeq(S2IO_MINUS_ONE, &bar0->rx_traffic_int);
2514 readl(&bar0->rx_traffic_int);
2516 for (i = 0; i < config->rx_ring_num; i++) {
2517 rx_intr_handler(&mac_control->rings[i]);
2518 pkt_cnt = org_pkts_to_process - nic->pkts_to_process;
2519 if (!nic->pkts_to_process) {
2520 /* Quota for the current iteration has been met */
2527 dev->quota -= pkt_cnt;
2529 netif_rx_complete(dev);
2531 for (i = 0; i < config->rx_ring_num; i++) {
2532 if (fill_rx_buffers(nic, i) == -ENOMEM) {
2533 DBG_PRINT(ERR_DBG, "%s:Out of memory", dev->name);
2534 DBG_PRINT(ERR_DBG, " in Rx Poll!!\n");
2538 /* Re enable the Rx interrupts. */
2539 writeq(0x0, &bar0->rx_traffic_mask);
2540 readl(&bar0->rx_traffic_mask);
2541 atomic_dec(&nic->isr_cnt);
2545 dev->quota -= pkt_cnt;
2548 for (i = 0; i < config->rx_ring_num; i++) {
2549 if (fill_rx_buffers(nic, i) == -ENOMEM) {
2550 DBG_PRINT(ERR_DBG, "%s:Out of memory", dev->name);
2551 DBG_PRINT(ERR_DBG, " in Rx Poll!!\n");
2555 atomic_dec(&nic->isr_cnt);
2559 #ifdef CONFIG_NET_POLL_CONTROLLER
2561 * s2io_netpoll - netpoll event handler entry point
2562 * @dev : pointer to the device structure.
2564 * This function will be called by upper layer to check for events on the
2565 * interface in situations where interrupts are disabled. It is used for
2566 * specific in-kernel networking tasks, such as remote consoles and kernel
2567 * debugging over the network (example netdump in RedHat).
2569 static void s2io_netpoll(struct net_device *dev)
2571 nic_t *nic = dev->priv;
2572 mac_info_t *mac_control;
2573 struct config_param *config;
2574 XENA_dev_config_t __iomem *bar0 = nic->bar0;
2575 u64 val64 = 0xFFFFFFFFFFFFFFFFULL;
2578 disable_irq(dev->irq);
2580 atomic_inc(&nic->isr_cnt);
2581 mac_control = &nic->mac_control;
2582 config = &nic->config;
2584 writeq(val64, &bar0->rx_traffic_int);
2585 writeq(val64, &bar0->tx_traffic_int);
2587 /* we need to free up the transmitted skbufs or else netpoll will
2588 * run out of skbs and will fail and eventually netpoll application such
2589 * as netdump will fail.
2591 for (i = 0; i < config->tx_fifo_num; i++)
2592 tx_intr_handler(&mac_control->fifos[i]);
2594 /* check for received packet and indicate up to network */
2595 for (i = 0; i < config->rx_ring_num; i++)
2596 rx_intr_handler(&mac_control->rings[i]);
2598 for (i = 0; i < config->rx_ring_num; i++) {
2599 if (fill_rx_buffers(nic, i) == -ENOMEM) {
2600 DBG_PRINT(ERR_DBG, "%s:Out of memory", dev->name);
2601 DBG_PRINT(ERR_DBG, " in Rx Netpoll!!\n");
2605 atomic_dec(&nic->isr_cnt);
2606 enable_irq(dev->irq);
2612 * rx_intr_handler - Rx interrupt handler
2613 * @nic: device private variable.
2615 * If the interrupt is because of a received frame or if the
2616 * receive ring contains fresh as yet un-processed frames,this function is
2617 * called. It picks out the RxD at which place the last Rx processing had
2618 * stopped and sends the skb to the OSM's Rx handler and then increments
2623 static void rx_intr_handler(ring_info_t *ring_data)
2625 nic_t *nic = ring_data->nic;
2626 struct net_device *dev = (struct net_device *) nic->dev;
2627 int get_block, put_block, put_offset;
2628 rx_curr_get_info_t get_info, put_info;
2630 struct sk_buff *skb;
2634 spin_lock(&nic->rx_lock);
2635 if (atomic_read(&nic->card_state) == CARD_DOWN) {
2636 DBG_PRINT(INTR_DBG, "%s: %s going down for reset\n",
2637 __FUNCTION__, dev->name);
2638 spin_unlock(&nic->rx_lock);
2642 get_info = ring_data->rx_curr_get_info;
2643 get_block = get_info.block_index;
2644 put_info = ring_data->rx_curr_put_info;
2645 put_block = put_info.block_index;
2646 rxdp = ring_data->rx_blocks[get_block].rxds[get_info.offset].virt_addr;
2648 spin_lock(&nic->put_lock);
2649 put_offset = ring_data->put_pos;
2650 spin_unlock(&nic->put_lock);
2652 put_offset = ring_data->put_pos;
2654 while (RXD_IS_UP2DT(rxdp)) {
2656 * If your are next to put index then it's
2657 * FIFO full condition
2659 if ((get_block == put_block) &&
2660 (get_info.offset + 1) == put_info.offset) {
2661 DBG_PRINT(INTR_DBG, "%s: Ring Full\n",dev->name);
2664 skb = (struct sk_buff *) ((unsigned long)rxdp->Host_Control);
2666 DBG_PRINT(ERR_DBG, "%s: The skb is ",
2668 DBG_PRINT(ERR_DBG, "Null in Rx Intr\n");
2669 spin_unlock(&nic->rx_lock);
2672 if (nic->rxd_mode == RXD_MODE_1) {
2673 pci_unmap_single(nic->pdev, (dma_addr_t)
2674 ((RxD1_t*)rxdp)->Buffer0_ptr,
2676 HEADER_ETHERNET_II_802_3_SIZE +
2679 PCI_DMA_FROMDEVICE);
2680 } else if (nic->rxd_mode == RXD_MODE_3B) {
2681 pci_dma_sync_single_for_cpu(nic->pdev, (dma_addr_t)
2682 ((RxD3_t*)rxdp)->Buffer0_ptr,
2683 BUF0_LEN, PCI_DMA_FROMDEVICE);
2684 pci_unmap_single(nic->pdev, (dma_addr_t)
2685 ((RxD3_t*)rxdp)->Buffer2_ptr,
2687 PCI_DMA_FROMDEVICE);
2689 pci_dma_sync_single_for_cpu(nic->pdev, (dma_addr_t)
2690 ((RxD3_t*)rxdp)->Buffer0_ptr, BUF0_LEN,
2691 PCI_DMA_FROMDEVICE);
2692 pci_unmap_single(nic->pdev, (dma_addr_t)
2693 ((RxD3_t*)rxdp)->Buffer1_ptr,
2695 PCI_DMA_FROMDEVICE);
2696 pci_unmap_single(nic->pdev, (dma_addr_t)
2697 ((RxD3_t*)rxdp)->Buffer2_ptr,
2698 dev->mtu, PCI_DMA_FROMDEVICE);
2700 prefetch(skb->data);
2701 rx_osm_handler(ring_data, rxdp);
2703 ring_data->rx_curr_get_info.offset = get_info.offset;
2704 rxdp = ring_data->rx_blocks[get_block].
2705 rxds[get_info.offset].virt_addr;
2706 if (get_info.offset == rxd_count[nic->rxd_mode]) {
2707 get_info.offset = 0;
2708 ring_data->rx_curr_get_info.offset = get_info.offset;
2710 if (get_block == ring_data->block_count)
2712 ring_data->rx_curr_get_info.block_index = get_block;
2713 rxdp = ring_data->rx_blocks[get_block].block_virt_addr;
2716 nic->pkts_to_process -= 1;
2717 if ((napi) && (!nic->pkts_to_process))
2720 if ((indicate_max_pkts) && (pkt_cnt > indicate_max_pkts))
2724 /* Clear all LRO sessions before exiting */
2725 for (i=0; i<MAX_LRO_SESSIONS; i++) {
2726 lro_t *lro = &nic->lro0_n[i];
2728 update_L3L4_header(nic, lro);
2729 queue_rx_frame(lro->parent);
2730 clear_lro_session(lro);
2735 spin_unlock(&nic->rx_lock);
2739 * tx_intr_handler - Transmit interrupt handler
2740 * @nic : device private variable
2742 * If an interrupt was raised to indicate DMA complete of the
2743 * Tx packet, this function is called. It identifies the last TxD
2744 * whose buffer was freed and frees all skbs whose data have already
2745 * DMA'ed into the NICs internal memory.
2750 static void tx_intr_handler(fifo_info_t *fifo_data)
2752 nic_t *nic = fifo_data->nic;
2753 struct net_device *dev = (struct net_device *) nic->dev;
2754 tx_curr_get_info_t get_info, put_info;
2755 struct sk_buff *skb;
2758 get_info = fifo_data->tx_curr_get_info;
2759 put_info = fifo_data->tx_curr_put_info;
2760 txdlp = (TxD_t *) fifo_data->list_info[get_info.offset].
2762 while ((!(txdlp->Control_1 & TXD_LIST_OWN_XENA)) &&
2763 (get_info.offset != put_info.offset) &&
2764 (txdlp->Host_Control)) {
2765 /* Check for TxD errors */
2766 if (txdlp->Control_1 & TXD_T_CODE) {
2767 unsigned long long err;
2768 err = txdlp->Control_1 & TXD_T_CODE;
2770 nic->mac_control.stats_info->sw_stat.
2773 if ((err >> 48) == 0xA) {
2774 DBG_PRINT(TX_DBG, "TxD returned due \
2775 to loss of link\n");
2778 DBG_PRINT(ERR_DBG, "***TxD error %llx\n", err);
2782 skb = s2io_txdl_getskb(fifo_data, txdlp, get_info.offset);
2784 DBG_PRINT(ERR_DBG, "%s: Null skb ",
2786 DBG_PRINT(ERR_DBG, "in Tx Free Intr\n");
2790 /* Updating the statistics block */
2791 nic->stats.tx_bytes += skb->len;
2792 dev_kfree_skb_irq(skb);
2795 if (get_info.offset == get_info.fifo_len + 1)
2796 get_info.offset = 0;
2797 txdlp = (TxD_t *) fifo_data->list_info
2798 [get_info.offset].list_virt_addr;
2799 fifo_data->tx_curr_get_info.offset =
2803 spin_lock(&nic->tx_lock);
2804 if (netif_queue_stopped(dev))
2805 netif_wake_queue(dev);
2806 spin_unlock(&nic->tx_lock);
2810 * s2io_mdio_write - Function to write in to MDIO registers
2811 * @mmd_type : MMD type value (PMA/PMD/WIS/PCS/PHYXS)
2812 * @addr : address value
2813 * @value : data value
2814 * @dev : pointer to net_device structure
2816 * This function is used to write values to the MDIO registers
2819 static void s2io_mdio_write(u32 mmd_type, u64 addr, u16 value, struct net_device *dev)
2822 nic_t *sp = dev->priv;
2823 XENA_dev_config_t __iomem *bar0 = sp->bar0;
2825 //address transaction
2826 val64 = val64 | MDIO_MMD_INDX_ADDR(addr)
2827 | MDIO_MMD_DEV_ADDR(mmd_type)
2828 | MDIO_MMS_PRT_ADDR(0x0);
2829 writeq(val64, &bar0->mdio_control);
2830 val64 = val64 | MDIO_CTRL_START_TRANS(0xE);
2831 writeq(val64, &bar0->mdio_control);
2836 val64 = val64 | MDIO_MMD_INDX_ADDR(addr)
2837 | MDIO_MMD_DEV_ADDR(mmd_type)
2838 | MDIO_MMS_PRT_ADDR(0x0)
2839 | MDIO_MDIO_DATA(value)
2840 | MDIO_OP(MDIO_OP_WRITE_TRANS);
2841 writeq(val64, &bar0->mdio_control);
2842 val64 = val64 | MDIO_CTRL_START_TRANS(0xE);
2843 writeq(val64, &bar0->mdio_control);
2847 val64 = val64 | MDIO_MMD_INDX_ADDR(addr)
2848 | MDIO_MMD_DEV_ADDR(mmd_type)
2849 | MDIO_MMS_PRT_ADDR(0x0)
2850 | MDIO_OP(MDIO_OP_READ_TRANS);
2851 writeq(val64, &bar0->mdio_control);
2852 val64 = val64 | MDIO_CTRL_START_TRANS(0xE);
2853 writeq(val64, &bar0->mdio_control);
2859 * s2io_mdio_read - Function to write in to MDIO registers
2860 * @mmd_type : MMD type value (PMA/PMD/WIS/PCS/PHYXS)
2861 * @addr : address value
2862 * @dev : pointer to net_device structure
2864 * This function is used to read values to the MDIO registers
2867 static u64 s2io_mdio_read(u32 mmd_type, u64 addr, struct net_device *dev)
2871 nic_t *sp = dev->priv;
2872 XENA_dev_config_t __iomem *bar0 = sp->bar0;
2874 /* address transaction */
2875 val64 = val64 | MDIO_MMD_INDX_ADDR(addr)
2876 | MDIO_MMD_DEV_ADDR(mmd_type)
2877 | MDIO_MMS_PRT_ADDR(0x0);
2878 writeq(val64, &bar0->mdio_control);
2879 val64 = val64 | MDIO_CTRL_START_TRANS(0xE);
2880 writeq(val64, &bar0->mdio_control);
2883 /* Data transaction */
2885 val64 = val64 | MDIO_MMD_INDX_ADDR(addr)
2886 | MDIO_MMD_DEV_ADDR(mmd_type)
2887 | MDIO_MMS_PRT_ADDR(0x0)
2888 | MDIO_OP(MDIO_OP_READ_TRANS);
2889 writeq(val64, &bar0->mdio_control);
2890 val64 = val64 | MDIO_CTRL_START_TRANS(0xE);
2891 writeq(val64, &bar0->mdio_control);
2894 /* Read the value from regs */
2895 rval64 = readq(&bar0->mdio_control);
2896 rval64 = rval64 & 0xFFFF0000;
2897 rval64 = rval64 >> 16;
2901 * s2io_chk_xpak_counter - Function to check the status of the xpak counters
2902 * @counter : couter value to be updated
2903 * @flag : flag to indicate the status
2904 * @type : counter type
2906 * This function is to check the status of the xpak counters value
2910 static void s2io_chk_xpak_counter(u64 *counter, u64 * regs_stat, u32 index, u16 flag, u16 type)
2915 for(i = 0; i <index; i++)
2920 *counter = *counter + 1;
2921 val64 = *regs_stat & mask;
2922 val64 = val64 >> (index * 0x2);
2929 DBG_PRINT(ERR_DBG, "Take Xframe NIC out of "
2930 "service. Excessive temperatures may "
2931 "result in premature transceiver "
2935 DBG_PRINT(ERR_DBG, "Take Xframe NIC out of "
2936 "service Excessive bias currents may "
2937 "indicate imminent laser diode "
2941 DBG_PRINT(ERR_DBG, "Take Xframe NIC out of "
2942 "service Excessive laser output "
2943 "power may saturate far-end "
2947 DBG_PRINT(ERR_DBG, "Incorrect XPAK Alarm "
2952 val64 = val64 << (index * 0x2);
2953 *regs_stat = (*regs_stat & (~mask)) | (val64);
2956 *regs_stat = *regs_stat & (~mask);
2961 * s2io_updt_xpak_counter - Function to update the xpak counters
2962 * @dev : pointer to net_device struct
2964 * This function is to upate the status of the xpak counters value
2967 static void s2io_updt_xpak_counter(struct net_device *dev)
2975 nic_t *sp = dev->priv;
2976 StatInfo_t *stat_info = sp->mac_control.stats_info;
2978 /* Check the communication with the MDIO slave */
2981 val64 = s2io_mdio_read(MDIO_MMD_PMA_DEV_ADDR, addr, dev);
2982 if((val64 == 0xFFFF) || (val64 == 0x0000))
2984 DBG_PRINT(ERR_DBG, "ERR: MDIO slave access failed - "
2985 "Returned %llx\n", (unsigned long long)val64);
2989 /* Check for the expecte value of 2040 at PMA address 0x0000 */
2992 DBG_PRINT(ERR_DBG, "Incorrect value at PMA address 0x0000 - ");
2993 DBG_PRINT(ERR_DBG, "Returned: %llx- Expected: 0x2040\n",
2994 (unsigned long long)val64);
2998 /* Loading the DOM register to MDIO register */
3000 s2io_mdio_write(MDIO_MMD_PMA_DEV_ADDR, addr, val16, dev);
3001 val64 = s2io_mdio_read(MDIO_MMD_PMA_DEV_ADDR, addr, dev);
3003 /* Reading the Alarm flags */
3006 val64 = s2io_mdio_read(MDIO_MMD_PMA_DEV_ADDR, addr, dev);
3008 flag = CHECKBIT(val64, 0x7);
3010 s2io_chk_xpak_counter(&stat_info->xpak_stat.alarm_transceiver_temp_high,
3011 &stat_info->xpak_stat.xpak_regs_stat,
3014 if(CHECKBIT(val64, 0x6))
3015 stat_info->xpak_stat.alarm_transceiver_temp_low++;
3017 flag = CHECKBIT(val64, 0x3);
3019 s2io_chk_xpak_counter(&stat_info->xpak_stat.alarm_laser_bias_current_high,
3020 &stat_info->xpak_stat.xpak_regs_stat,
3023 if(CHECKBIT(val64, 0x2))
3024 stat_info->xpak_stat.alarm_laser_bias_current_low++;
3026 flag = CHECKBIT(val64, 0x1);
3028 s2io_chk_xpak_counter(&stat_info->xpak_stat.alarm_laser_output_power_high,
3029 &stat_info->xpak_stat.xpak_regs_stat,
3032 if(CHECKBIT(val64, 0x0))
3033 stat_info->xpak_stat.alarm_laser_output_power_low++;
3035 /* Reading the Warning flags */
3038 val64 = s2io_mdio_read(MDIO_MMD_PMA_DEV_ADDR, addr, dev);
3040 if(CHECKBIT(val64, 0x7))
3041 stat_info->xpak_stat.warn_transceiver_temp_high++;
3043 if(CHECKBIT(val64, 0x6))
3044 stat_info->xpak_stat.warn_transceiver_temp_low++;
3046 if(CHECKBIT(val64, 0x3))
3047 stat_info->xpak_stat.warn_laser_bias_current_high++;
3049 if(CHECKBIT(val64, 0x2))
3050 stat_info->xpak_stat.warn_laser_bias_current_low++;
3052 if(CHECKBIT(val64, 0x1))
3053 stat_info->xpak_stat.warn_laser_output_power_high++;
3055 if(CHECKBIT(val64, 0x0))
3056 stat_info->xpak_stat.warn_laser_output_power_low++;
3060 * alarm_intr_handler - Alarm Interrrupt handler
3061 * @nic: device private variable
3062 * Description: If the interrupt was neither because of Rx packet or Tx
3063 * complete, this function is called. If the interrupt was to indicate
3064 * a loss of link, the OSM link status handler is invoked for any other
3065 * alarm interrupt the block that raised the interrupt is displayed
3066 * and a H/W reset is issued.
3071 static void alarm_intr_handler(struct s2io_nic *nic)
3073 struct net_device *dev = (struct net_device *) nic->dev;
3074 XENA_dev_config_t __iomem *bar0 = nic->bar0;
3075 register u64 val64 = 0, err_reg = 0;
3078 if (atomic_read(&nic->card_state) == CARD_DOWN)
3080 nic->mac_control.stats_info->sw_stat.ring_full_cnt = 0;
3081 /* Handling the XPAK counters update */
3082 if(nic->mac_control.stats_info->xpak_stat.xpak_timer_count < 72000) {
3083 /* waiting for an hour */
3084 nic->mac_control.stats_info->xpak_stat.xpak_timer_count++;
3086 s2io_updt_xpak_counter(dev);
3087 /* reset the count to zero */
3088 nic->mac_control.stats_info->xpak_stat.xpak_timer_count = 0;
3091 /* Handling link status change error Intr */
3092 if (s2io_link_fault_indication(nic) == MAC_RMAC_ERR_TIMER) {
3093 err_reg = readq(&bar0->mac_rmac_err_reg);
3094 writeq(err_reg, &bar0->mac_rmac_err_reg);
3095 if (err_reg & RMAC_LINK_STATE_CHANGE_INT) {
3096 schedule_work(&nic->set_link_task);
3100 /* Handling Ecc errors */
3101 val64 = readq(&bar0->mc_err_reg);
3102 writeq(val64, &bar0->mc_err_reg);
3103 if (val64 & (MC_ERR_REG_ECC_ALL_SNG | MC_ERR_REG_ECC_ALL_DBL)) {
3104 if (val64 & MC_ERR_REG_ECC_ALL_DBL) {
3105 nic->mac_control.stats_info->sw_stat.
3107 DBG_PRINT(INIT_DBG, "%s: Device indicates ",
3109 DBG_PRINT(INIT_DBG, "double ECC error!!\n");
3110 if (nic->device_type != XFRAME_II_DEVICE) {
3111 /* Reset XframeI only if critical error */
3112 if (val64 & (MC_ERR_REG_MIRI_ECC_DB_ERR_0 |
3113 MC_ERR_REG_MIRI_ECC_DB_ERR_1)) {
3114 netif_stop_queue(dev);
3115 schedule_work(&nic->rst_timer_task);
3116 nic->mac_control.stats_info->sw_stat.
3121 nic->mac_control.stats_info->sw_stat.
3126 /* In case of a serious error, the device will be Reset. */
3127 val64 = readq(&bar0->serr_source);
3128 if (val64 & SERR_SOURCE_ANY) {
3129 nic->mac_control.stats_info->sw_stat.serious_err_cnt++;
3130 DBG_PRINT(ERR_DBG, "%s: Device indicates ", dev->name);
3131 DBG_PRINT(ERR_DBG, "serious error %llx!!\n",
3132 (unsigned long long)val64);
3133 netif_stop_queue(dev);
3134 schedule_work(&nic->rst_timer_task);
3135 nic->mac_control.stats_info->sw_stat.soft_reset_cnt++;
3139 * Also as mentioned in the latest Errata sheets if the PCC_FB_ECC
3140 * Error occurs, the adapter will be recycled by disabling the
3141 * adapter enable bit and enabling it again after the device
3142 * becomes Quiescent.
3144 val64 = readq(&bar0->pcc_err_reg);
3145 writeq(val64, &bar0->pcc_err_reg);
3146 if (val64 & PCC_FB_ECC_DB_ERR) {
3147 u64 ac = readq(&bar0->adapter_control);
3148 ac &= ~(ADAPTER_CNTL_EN);
3149 writeq(ac, &bar0->adapter_control);
3150 ac = readq(&bar0->adapter_control);
3151 schedule_work(&nic->set_link_task);
3153 /* Check for data parity error */
3154 val64 = readq(&bar0->pic_int_status);
3155 if (val64 & PIC_INT_GPIO) {
3156 val64 = readq(&bar0->gpio_int_reg);
3157 if (val64 & GPIO_INT_REG_DP_ERR_INT) {
3158 nic->mac_control.stats_info->sw_stat.parity_err_cnt++;
3159 schedule_work(&nic->rst_timer_task);
3160 nic->mac_control.stats_info->sw_stat.soft_reset_cnt++;
3164 /* Check for ring full counter */
3165 if (nic->device_type & XFRAME_II_DEVICE) {
3166 val64 = readq(&bar0->ring_bump_counter1);
3167 for (i=0; i<4; i++) {
3168 cnt = ( val64 & vBIT(0xFFFF,(i*16),16));
3169 cnt >>= 64 - ((i+1)*16);
3170 nic->mac_control.stats_info->sw_stat.ring_full_cnt
3174 val64 = readq(&bar0->ring_bump_counter2);
3175 for (i=0; i<4; i++) {
3176 cnt = ( val64 & vBIT(0xFFFF,(i*16),16));
3177 cnt >>= 64 - ((i+1)*16);
3178 nic->mac_control.stats_info->sw_stat.ring_full_cnt
3183 /* Other type of interrupts are not being handled now, TODO */
3187 * wait_for_cmd_complete - waits for a command to complete.
3188 * @sp : private member of the device structure, which is a pointer to the
3189 * s2io_nic structure.
3190 * Description: Function that waits for a command to Write into RMAC
3191 * ADDR DATA registers to be completed and returns either success or
3192 * error depending on whether the command was complete or not.
3194 * SUCCESS on success and FAILURE on failure.
3197 static int wait_for_cmd_complete(void __iomem *addr, u64 busy_bit)
3199 int ret = FAILURE, cnt = 0;
3203 val64 = readq(addr);
3204 if (!(val64 & busy_bit)) {
3220 * check_pci_device_id - Checks if the device id is supported
3222 * Description: Function to check if the pci device id is supported by driver.
3223 * Return value: Actual device id if supported else PCI_ANY_ID
3225 static u16 check_pci_device_id(u16 id)
3228 case PCI_DEVICE_ID_HERC_WIN:
3229 case PCI_DEVICE_ID_HERC_UNI:
3230 return XFRAME_II_DEVICE;
3231 case PCI_DEVICE_ID_S2IO_UNI:
3232 case PCI_DEVICE_ID_S2IO_WIN:
3233 return XFRAME_I_DEVICE;
3240 * s2io_reset - Resets the card.
3241 * @sp : private member of the device structure.
3242 * Description: Function to Reset the card. This function then also
3243 * restores the previously saved PCI configuration space registers as
3244 * the card reset also resets the configuration space.
3249 static void s2io_reset(nic_t * sp)
3251 XENA_dev_config_t __iomem *bar0 = sp->bar0;
3256 DBG_PRINT(INIT_DBG,"%s - Resetting XFrame card %s\n",
3257 __FUNCTION__, sp->dev->name);
3259 /* Back up the PCI-X CMD reg, dont want to lose MMRBC, OST settings */
3260 pci_read_config_word(sp->pdev, PCIX_COMMAND_REGISTER, &(pci_cmd));
3262 if (sp->device_type == XFRAME_II_DEVICE) {
3264 ret = pci_set_power_state(sp->pdev, 3);
3266 ret = pci_set_power_state(sp->pdev, 0);
3268 DBG_PRINT(ERR_DBG,"%s PME based SW_Reset failed!\n",
3276 val64 = SW_RESET_ALL;
3277 writeq(val64, &bar0->sw_reset);
3279 if (strstr(sp->product_name, "CX4")) {
3283 for (i = 0; i < S2IO_MAX_PCI_CONFIG_SPACE_REINIT; i++) {
3285 /* Restore the PCI state saved during initialization. */
3286 pci_restore_state(sp->pdev);
3287 pci_read_config_word(sp->pdev, 0x2, &val16);
3288 if (check_pci_device_id(val16) != (u16)PCI_ANY_ID)
3293 if (check_pci_device_id(val16) == (u16)PCI_ANY_ID) {
3294 DBG_PRINT(ERR_DBG,"%s SW_Reset failed!\n", __FUNCTION__);
3297 pci_write_config_word(sp->pdev, PCIX_COMMAND_REGISTER, pci_cmd);
3301 /* Set swapper to enable I/O register access */
3302 s2io_set_swapper(sp);
3304 /* Restore the MSIX table entries from local variables */
3305 restore_xmsi_data(sp);
3307 /* Clear certain PCI/PCI-X fields after reset */
3308 if (sp->device_type == XFRAME_II_DEVICE) {
3309 /* Clear "detected parity error" bit */
3310 pci_write_config_word(sp->pdev, PCI_STATUS, 0x8000);
3312 /* Clearing PCIX Ecc status register */
3313 pci_write_config_dword(sp->pdev, 0x68, 0x7C);
3315 /* Clearing PCI_STATUS error reflected here */
3316 writeq(BIT(62), &bar0->txpic_int_reg);
3319 /* Reset device statistics maintained by OS */
3320 memset(&sp->stats, 0, sizeof (struct net_device_stats));
3322 /* SXE-002: Configure link and activity LED to turn it off */
3323 subid = sp->pdev->subsystem_device;
3324 if (((subid & 0xFF) >= 0x07) &&
3325 (sp->device_type == XFRAME_I_DEVICE)) {
3326 val64 = readq(&bar0->gpio_control);
3327 val64 |= 0x0000800000000000ULL;
3328 writeq(val64, &bar0->gpio_control);
3329 val64 = 0x0411040400000000ULL;
3330 writeq(val64, (void __iomem *)bar0 + 0x2700);
3334 * Clear spurious ECC interrupts that would have occured on
3335 * XFRAME II cards after reset.
3337 if (sp->device_type == XFRAME_II_DEVICE) {
3338 val64 = readq(&bar0->pcc_err_reg);
3339 writeq(val64, &bar0->pcc_err_reg);
3342 sp->device_enabled_once = FALSE;
3346 * s2io_set_swapper - to set the swapper controle on the card
3347 * @sp : private member of the device structure,
3348 * pointer to the s2io_nic structure.
3349 * Description: Function to set the swapper control on the card
3350 * correctly depending on the 'endianness' of the system.
3352 * SUCCESS on success and FAILURE on failure.
3355 static int s2io_set_swapper(nic_t * sp)
3357 struct net_device *dev = sp->dev;
3358 XENA_dev_config_t __iomem *bar0 = sp->bar0;
3359 u64 val64, valt, valr;
3362 * Set proper endian settings and verify the same by reading
3363 * the PIF Feed-back register.
3366 val64 = readq(&bar0->pif_rd_swapper_fb);
3367 if (val64 != 0x0123456789ABCDEFULL) {
3369 u64 value[] = { 0xC30000C3C30000C3ULL, /* FE=1, SE=1 */
3370 0x8100008181000081ULL, /* FE=1, SE=0 */
3371 0x4200004242000042ULL, /* FE=0, SE=1 */
3372 0}; /* FE=0, SE=0 */
3375 writeq(value[i], &bar0->swapper_ctrl);
3376 val64 = readq(&bar0->pif_rd_swapper_fb);
3377 if (val64 == 0x0123456789ABCDEFULL)
3382 DBG_PRINT(ERR_DBG, "%s: Endian settings are wrong, ",
3384 DBG_PRINT(ERR_DBG, "feedback read %llx\n",
3385 (unsigned long long) val64);
3390 valr = readq(&bar0->swapper_ctrl);
3393 valt = 0x0123456789ABCDEFULL;
3394 writeq(valt, &bar0->xmsi_address);
3395 val64 = readq(&bar0->xmsi_address);
3399 u64 value[] = { 0x00C3C30000C3C300ULL, /* FE=1, SE=1 */
3400 0x0081810000818100ULL, /* FE=1, SE=0 */
3401 0x0042420000424200ULL, /* FE=0, SE=1 */
3402 0}; /* FE=0, SE=0 */
3405 writeq((value[i] | valr), &bar0->swapper_ctrl);
3406 writeq(valt, &bar0->xmsi_address);
3407 val64 = readq(&bar0->xmsi_address);
3413 unsigned long long x = val64;
3414 DBG_PRINT(ERR_DBG, "Write failed, Xmsi_addr ");
3415 DBG_PRINT(ERR_DBG, "reads:0x%llx\n", x);
3419 val64 = readq(&bar0->swapper_ctrl);
3420 val64 &= 0xFFFF000000000000ULL;
3424 * The device by default set to a big endian format, so a
3425 * big endian driver need not set anything.
3427 val64 |= (SWAPPER_CTRL_TXP_FE |
3428 SWAPPER_CTRL_TXP_SE |
3429 SWAPPER_CTRL_TXD_R_FE |
3430 SWAPPER_CTRL_TXD_W_FE |
3431 SWAPPER_CTRL_TXF_R_FE |
3432 SWAPPER_CTRL_RXD_R_FE |
3433 SWAPPER_CTRL_RXD_W_FE |
3434 SWAPPER_CTRL_RXF_W_FE |
3435 SWAPPER_CTRL_XMSI_FE |
3436 SWAPPER_CTRL_STATS_FE | SWAPPER_CTRL_STATS_SE);
3437 if (sp->intr_type == INTA)
3438 val64 |= SWAPPER_CTRL_XMSI_SE;
3439 writeq(val64, &bar0->swapper_ctrl);
3442 * Initially we enable all bits to make it accessible by the
3443 * driver, then we selectively enable only those bits that
3446 val64 |= (SWAPPER_CTRL_TXP_FE |
3447 SWAPPER_CTRL_TXP_SE |
3448 SWAPPER_CTRL_TXD_R_FE |
3449 SWAPPER_CTRL_TXD_R_SE |
3450 SWAPPER_CTRL_TXD_W_FE |
3451 SWAPPER_CTRL_TXD_W_SE |
3452 SWAPPER_CTRL_TXF_R_FE |
3453 SWAPPER_CTRL_RXD_R_FE |
3454 SWAPPER_CTRL_RXD_R_SE |
3455 SWAPPER_CTRL_RXD_W_FE |
3456 SWAPPER_CTRL_RXD_W_SE |
3457 SWAPPER_CTRL_RXF_W_FE |
3458 SWAPPER_CTRL_XMSI_FE |
3459 SWAPPER_CTRL_STATS_FE | SWAPPER_CTRL_STATS_SE);
3460 if (sp->intr_type == INTA)
3461 val64 |= SWAPPER_CTRL_XMSI_SE;
3462 writeq(val64, &bar0->swapper_ctrl);
3464 val64 = readq(&bar0->swapper_ctrl);
3467 * Verifying if endian settings are accurate by reading a
3468 * feedback register.
3470 val64 = readq(&bar0->pif_rd_swapper_fb);
3471 if (val64 != 0x0123456789ABCDEFULL) {
3472 /* Endian settings are incorrect, calls for another dekko. */
3473 DBG_PRINT(ERR_DBG, "%s: Endian settings are wrong, ",
3475 DBG_PRINT(ERR_DBG, "feedback read %llx\n",
3476 (unsigned long long) val64);
3483 static int wait_for_msix_trans(nic_t *nic, int i)
3485 XENA_dev_config_t __iomem *bar0 = nic->bar0;
3487 int ret = 0, cnt = 0;
3490 val64 = readq(&bar0->xmsi_access);
3491 if (!(val64 & BIT(15)))
3497 DBG_PRINT(ERR_DBG, "XMSI # %d Access failed\n", i);
3504 static void restore_xmsi_data(nic_t *nic)
3506 XENA_dev_config_t __iomem *bar0 = nic->bar0;
3510 for (i=0; i < MAX_REQUESTED_MSI_X; i++) {
3511 writeq(nic->msix_info[i].addr, &bar0->xmsi_address);
3512 writeq(nic->msix_info[i].data, &bar0->xmsi_data);
3513 val64 = (BIT(7) | BIT(15) | vBIT(i, 26, 6));
3514 writeq(val64, &bar0->xmsi_access);
3515 if (wait_for_msix_trans(nic, i)) {
3516 DBG_PRINT(ERR_DBG, "failed in %s\n", __FUNCTION__);
3522 static void store_xmsi_data(nic_t *nic)
3524 XENA_dev_config_t __iomem *bar0 = nic->bar0;
3525 u64 val64, addr, data;
3528 /* Store and display */
3529 for (i=0; i < MAX_REQUESTED_MSI_X; i++) {
3530 val64 = (BIT(15) | vBIT(i, 26, 6));
3531 writeq(val64, &bar0->xmsi_access);
3532 if (wait_for_msix_trans(nic, i)) {
3533 DBG_PRINT(ERR_DBG, "failed in %s\n", __FUNCTION__);
3536 addr = readq(&bar0->xmsi_address);
3537 data = readq(&bar0->xmsi_data);
3539 nic->msix_info[i].addr = addr;
3540 nic->msix_info[i].data = data;
3545 int s2io_enable_msi(nic_t *nic)
3547 XENA_dev_config_t __iomem *bar0 = nic->bar0;
3548 u16 msi_ctrl, msg_val;
3549 struct config_param *config = &nic->config;
3550 struct net_device *dev = nic->dev;
3551 u64 val64, tx_mat, rx_mat;
3554 val64 = readq(&bar0->pic_control);
3556 writeq(val64, &bar0->pic_control);
3558 err = pci_enable_msi(nic->pdev);
3560 DBG_PRINT(ERR_DBG, "%s: enabling MSI failed\n",
3566 * Enable MSI and use MSI-1 in stead of the standard MSI-0
3567 * for interrupt handling.
3569 pci_read_config_word(nic->pdev, 0x4c, &msg_val);
3571 pci_write_config_word(nic->pdev, 0x4c, msg_val);
3572 pci_read_config_word(nic->pdev, 0x4c, &msg_val);
3574 pci_read_config_word(nic->pdev, 0x42, &msi_ctrl);
3576 pci_write_config_word(nic->pdev, 0x42, msi_ctrl);
3578 /* program MSI-1 into all usable Tx_Mat and Rx_Mat fields */
3579 tx_mat = readq(&bar0->tx_mat0_n[0]);
3580 for (i=0; i<config->tx_fifo_num; i++) {
3581 tx_mat |= TX_MAT_SET(i, 1);
3583 writeq(tx_mat, &bar0->tx_mat0_n[0]);
3585 rx_mat = readq(&bar0->rx_mat);
3586 for (i=0; i<config->rx_ring_num; i++) {
3587 rx_mat |= RX_MAT_SET(i, 1);
3589 writeq(rx_mat, &bar0->rx_mat);
3591 dev->irq = nic->pdev->irq;
3595 static int s2io_enable_msi_x(nic_t *nic)
3597 XENA_dev_config_t __iomem *bar0 = nic->bar0;
3599 u16 msi_control; /* Temp variable */
3600 int ret, i, j, msix_indx = 1;
3602 nic->entries = kmalloc(MAX_REQUESTED_MSI_X * sizeof(struct msix_entry),
3604 if (nic->entries == NULL) {
3605 DBG_PRINT(ERR_DBG, "%s: Memory allocation failed\n", __FUNCTION__);
3608 memset(nic->entries, 0, MAX_REQUESTED_MSI_X * sizeof(struct msix_entry));
3611 kmalloc(MAX_REQUESTED_MSI_X * sizeof(struct s2io_msix_entry),
3613 if (nic->s2io_entries == NULL) {
3614 DBG_PRINT(ERR_DBG, "%s: Memory allocation failed\n", __FUNCTION__);
3615 kfree(nic->entries);
3618 memset(nic->s2io_entries, 0,
3619 MAX_REQUESTED_MSI_X * sizeof(struct s2io_msix_entry));
3621 for (i=0; i< MAX_REQUESTED_MSI_X; i++) {
3622 nic->entries[i].entry = i;
3623 nic->s2io_entries[i].entry = i;
3624 nic->s2io_entries[i].arg = NULL;
3625 nic->s2io_entries[i].in_use = 0;
3628 tx_mat = readq(&bar0->tx_mat0_n[0]);
3629 for (i=0; i<nic->config.tx_fifo_num; i++, msix_indx++) {
3630 tx_mat |= TX_MAT_SET(i, msix_indx);
3631 nic->s2io_entries[msix_indx].arg = &nic->mac_control.fifos[i];
3632 nic->s2io_entries[msix_indx].type = MSIX_FIFO_TYPE;
3633 nic->s2io_entries[msix_indx].in_use = MSIX_FLG;
3635 writeq(tx_mat, &bar0->tx_mat0_n[0]);
3637 if (!nic->config.bimodal) {
3638 rx_mat = readq(&bar0->rx_mat);
3639 for (j=0; j<nic->config.rx_ring_num; j++, msix_indx++) {
3640 rx_mat |= RX_MAT_SET(j, msix_indx);
3641 nic->s2io_entries[msix_indx].arg = &nic->mac_control.rings[j];
3642 nic->s2io_entries[msix_indx].type = MSIX_RING_TYPE;
3643 nic->s2io_entries[msix_indx].in_use = MSIX_FLG;
3645 writeq(rx_mat, &bar0->rx_mat);
3647 tx_mat = readq(&bar0->tx_mat0_n[7]);
3648 for (j=0; j<nic->config.rx_ring_num; j++, msix_indx++) {
3649 tx_mat |= TX_MAT_SET(i, msix_indx);
3650 nic->s2io_entries[msix_indx].arg = &nic->mac_control.rings[j];
3651 nic->s2io_entries[msix_indx].type = MSIX_RING_TYPE;
3652 nic->s2io_entries[msix_indx].in_use = MSIX_FLG;
3654 writeq(tx_mat, &bar0->tx_mat0_n[7]);
3657 nic->avail_msix_vectors = 0;
3658 ret = pci_enable_msix(nic->pdev, nic->entries, MAX_REQUESTED_MSI_X);
3659 /* We fail init if error or we get less vectors than min required */
3660 if (ret >= (nic->config.tx_fifo_num + nic->config.rx_ring_num + 1)) {
3661 nic->avail_msix_vectors = ret;
3662 ret = pci_enable_msix(nic->pdev, nic->entries, ret);
3665 DBG_PRINT(ERR_DBG, "%s: Enabling MSIX failed\n", nic->dev->name);
3666 kfree(nic->entries);
3667 kfree(nic->s2io_entries);
3668 nic->entries = NULL;
3669 nic->s2io_entries = NULL;
3670 nic->avail_msix_vectors = 0;
3673 if (!nic->avail_msix_vectors)
3674 nic->avail_msix_vectors = MAX_REQUESTED_MSI_X;
3677 * To enable MSI-X, MSI also needs to be enabled, due to a bug
3678 * in the herc NIC. (Temp change, needs to be removed later)
3680 pci_read_config_word(nic->pdev, 0x42, &msi_control);
3681 msi_control |= 0x1; /* Enable MSI */
3682 pci_write_config_word(nic->pdev, 0x42, msi_control);
3687 /* ********************************************************* *
3688 * Functions defined below concern the OS part of the driver *
3689 * ********************************************************* */
3692 * s2io_open - open entry point of the driver
3693 * @dev : pointer to the device structure.
3695 * This function is the open entry point of the driver. It mainly calls a
3696 * function to allocate Rx buffers and inserts them into the buffer
3697 * descriptors and then enables the Rx part of the NIC.
3699 * 0 on success and an appropriate (-)ve integer as defined in errno.h
3703 static int s2io_open(struct net_device *dev)
3705 nic_t *sp = dev->priv;
3709 * Make sure you have link off by default every time
3710 * Nic is initialized
3712 netif_carrier_off(dev);
3713 sp->last_link_state = 0;
3715 /* Initialize H/W and enable interrupts */
3716 err = s2io_card_up(sp);
3718 DBG_PRINT(ERR_DBG, "%s: H/W initialization failed\n",
3720 goto hw_init_failed;
3723 if (s2io_set_mac_addr(dev, dev->dev_addr) == FAILURE) {
3724 DBG_PRINT(ERR_DBG, "Set Mac Address Failed\n");
3727 goto hw_init_failed;
3730 netif_start_queue(dev);
3734 if (sp->intr_type == MSI_X) {
3737 if (sp->s2io_entries)
3738 kfree(sp->s2io_entries);
3744 * s2io_close -close entry point of the driver
3745 * @dev : device pointer.
3747 * This is the stop entry point of the driver. It needs to undo exactly
3748 * whatever was done by the open entry point,thus it's usually referred to
3749 * as the close function.Among other things this function mainly stops the
3750 * Rx side of the NIC and frees all the Rx buffers in the Rx rings.
3752 * 0 on success and an appropriate (-)ve integer as defined in errno.h
3756 static int s2io_close(struct net_device *dev)
3758 nic_t *sp = dev->priv;
3760 flush_scheduled_work();
3761 netif_stop_queue(dev);
3762 /* Reset card, kill tasklet and free Tx and Rx buffers. */
3765 sp->device_close_flag = TRUE; /* Device is shut down. */
3770 * s2io_xmit - Tx entry point of te driver
3771 * @skb : the socket buffer containing the Tx data.
3772 * @dev : device pointer.
3774 * This function is the Tx entry point of the driver. S2IO NIC supports
3775 * certain protocol assist features on Tx side, namely CSO, S/G, LSO.
3776 * NOTE: when device cant queue the pkt,just the trans_start variable will
3779 * 0 on success & 1 on failure.
3782 static int s2io_xmit(struct sk_buff *skb, struct net_device *dev)
3784 nic_t *sp = dev->priv;
3785 u16 frg_cnt, frg_len, i, queue, queue_len, put_off, get_off;
3788 TxFIFO_element_t __iomem *tx_fifo;
3789 unsigned long flags;
3791 int vlan_priority = 0;
3792 mac_info_t *mac_control;
3793 struct config_param *config;
3796 mac_control = &sp->mac_control;
3797 config = &sp->config;
3799 DBG_PRINT(TX_DBG, "%s: In Neterion Tx routine\n", dev->name);
3800 spin_lock_irqsave(&sp->tx_lock, flags);
3801 if (atomic_read(&sp->card_state) == CARD_DOWN) {
3802 DBG_PRINT(TX_DBG, "%s: Card going down for reset\n",
3804 spin_unlock_irqrestore(&sp->tx_lock, flags);
3811 /* Get Fifo number to Transmit based on vlan priority */
3812 if (sp->vlgrp && vlan_tx_tag_present(skb)) {
3813 vlan_tag = vlan_tx_tag_get(skb);
3814 vlan_priority = vlan_tag >> 13;
3815 queue = config->fifo_mapping[vlan_priority];
3818 put_off = (u16) mac_control->fifos[queue].tx_curr_put_info.offset;
3819 get_off = (u16) mac_control->fifos[queue].tx_curr_get_info.offset;
3820 txdp = (TxD_t *) mac_control->fifos[queue].list_info[put_off].
3823 queue_len = mac_control->fifos[queue].tx_curr_put_info.fifo_len + 1;
3824 /* Avoid "put" pointer going beyond "get" pointer */
3825 if (txdp->Host_Control ||
3826 ((put_off+1) == queue_len ? 0 : (put_off+1)) == get_off) {
3827 DBG_PRINT(TX_DBG, "Error in xmit, No free TXDs.\n");
3828 netif_stop_queue(dev);
3830 spin_unlock_irqrestore(&sp->tx_lock, flags);
3834 /* A buffer with no data will be dropped */
3836 DBG_PRINT(TX_DBG, "%s:Buffer has no data..\n", dev->name);
3838 spin_unlock_irqrestore(&sp->tx_lock, flags);
3842 offload_type = s2io_offload_type(skb);
3843 if (offload_type & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6)) {
3844 txdp->Control_1 |= TXD_TCP_LSO_EN;
3845 txdp->Control_1 |= TXD_TCP_LSO_MSS(s2io_tcp_mss(skb));
3847 if (skb->ip_summed == CHECKSUM_PARTIAL) {
3849 (TXD_TX_CKO_IPV4_EN | TXD_TX_CKO_TCP_EN |
3852 txdp->Control_1 |= TXD_GATHER_CODE_FIRST;
3853 txdp->Control_1 |= TXD_LIST_OWN_XENA;
3854 txdp->Control_2 |= config->tx_intr_type;
3856 if (sp->vlgrp && vlan_tx_tag_present(skb)) {
3857 txdp->Control_2 |= TXD_VLAN_ENABLE;
3858 txdp->Control_2 |= TXD_VLAN_TAG(vlan_tag);
3861 frg_len = skb->len - skb->data_len;
3862 if (offload_type == SKB_GSO_UDP) {
3865 ufo_size = s2io_udp_mss(skb);
3867 txdp->Control_1 |= TXD_UFO_EN;
3868 txdp->Control_1 |= TXD_UFO_MSS(ufo_size);
3869 txdp->Control_1 |= TXD_BUFFER0_SIZE(8);
3871 sp->ufo_in_band_v[put_off] =
3872 (u64)skb_shinfo(skb)->ip6_frag_id;
3874 sp->ufo_in_band_v[put_off] =
3875 (u64)skb_shinfo(skb)->ip6_frag_id << 32;
3877 txdp->Host_Control = (unsigned long)sp->ufo_in_band_v;
3878 txdp->Buffer_Pointer = pci_map_single(sp->pdev,
3880 sizeof(u64), PCI_DMA_TODEVICE);
3884 txdp->Buffer_Pointer = pci_map_single
3885 (sp->pdev, skb->data, frg_len, PCI_DMA_TODEVICE);
3886 txdp->Host_Control = (unsigned long) skb;
3887 txdp->Control_1 |= TXD_BUFFER0_SIZE(frg_len);
3888 if (offload_type == SKB_GSO_UDP)
3889 txdp->Control_1 |= TXD_UFO_EN;
3891 frg_cnt = skb_shinfo(skb)->nr_frags;
3892 /* For fragmented SKB. */
3893 for (i = 0; i < frg_cnt; i++) {
3894 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
3895 /* A '0' length fragment will be ignored */
3899 txdp->Buffer_Pointer = (u64) pci_map_page
3900 (sp->pdev, frag->page, frag->page_offset,
3901 frag->size, PCI_DMA_TODEVICE);
3902 txdp->Control_1 = TXD_BUFFER0_SIZE(frag->size);
3903 if (offload_type == SKB_GSO_UDP)
3904 txdp->Control_1 |= TXD_UFO_EN;
3906 txdp->Control_1 |= TXD_GATHER_CODE_LAST;
3908 if (offload_type == SKB_GSO_UDP)
3909 frg_cnt++; /* as Txd0 was used for inband header */
3911 tx_fifo = mac_control->tx_FIFO_start[queue];
3912 val64 = mac_control->fifos[queue].list_info[put_off].list_phy_addr;
3913 writeq(val64, &tx_fifo->TxDL_Pointer);
3915 val64 = (TX_FIFO_LAST_TXD_NUM(frg_cnt) | TX_FIFO_FIRST_LIST |
3918 val64 |= TX_FIFO_SPECIAL_FUNC;
3920 writeq(val64, &tx_fifo->List_Control);
3925 if (put_off == mac_control->fifos[queue].tx_curr_put_info.fifo_len + 1)
3927 mac_control->fifos[queue].tx_curr_put_info.offset = put_off;
3929 /* Avoid "put" pointer going beyond "get" pointer */
3930 if (((put_off+1) == queue_len ? 0 : (put_off+1)) == get_off) {
3931 sp->mac_control.stats_info->sw_stat.fifo_full_cnt++;
3933 "No free TxDs for xmit, Put: 0x%x Get:0x%x\n",
3935 netif_stop_queue(dev);
3938 dev->trans_start = jiffies;
3939 spin_unlock_irqrestore(&sp->tx_lock, flags);
3945 s2io_alarm_handle(unsigned long data)
3947 nic_t *sp = (nic_t *)data;
3949 alarm_intr_handler(sp);
3950 mod_timer(&sp->alarm_timer, jiffies + HZ / 2);
3953 static int s2io_chk_rx_buffers(nic_t *sp, int rng_n)
3955 int rxb_size, level;
3958 rxb_size = atomic_read(&sp->rx_bufs_left[rng_n]);
3959 level = rx_buffer_level(sp, rxb_size, rng_n);
3961 if ((level == PANIC) && (!TASKLET_IN_USE)) {
3963 DBG_PRINT(INTR_DBG, "%s: Rx BD hit ", __FUNCTION__);
3964 DBG_PRINT(INTR_DBG, "PANIC levels\n");
3965 if ((ret = fill_rx_buffers(sp, rng_n)) == -ENOMEM) {
3966 DBG_PRINT(ERR_DBG, "Out of memory in %s",
3968 clear_bit(0, (&sp->tasklet_status));
3971 clear_bit(0, (&sp->tasklet_status));
3972 } else if (level == LOW)
3973 tasklet_schedule(&sp->task);
3975 } else if (fill_rx_buffers(sp, rng_n) == -ENOMEM) {
3976 DBG_PRINT(ERR_DBG, "%s:Out of memory", sp->dev->name);
3977 DBG_PRINT(ERR_DBG, " in Rx Intr!!\n");
3982 static irqreturn_t s2io_msi_handle(int irq, void *dev_id)
3984 struct net_device *dev = (struct net_device *) dev_id;
3985 nic_t *sp = dev->priv;
3987 mac_info_t *mac_control;
3988 struct config_param *config;
3990 atomic_inc(&sp->isr_cnt);
3991 mac_control = &sp->mac_control;
3992 config = &sp->config;
3993 DBG_PRINT(INTR_DBG, "%s: MSI handler\n", __FUNCTION__);
3995 /* If Intr is because of Rx Traffic */
3996 for (i = 0; i < config->rx_ring_num; i++)
3997 rx_intr_handler(&mac_control->rings[i]);
3999 /* If Intr is because of Tx Traffic */
4000 for (i = 0; i < config->tx_fifo_num; i++)
4001 tx_intr_handler(&mac_control->fifos[i]);
4004 * If the Rx buffer count is below the panic threshold then
4005 * reallocate the buffers from the interrupt handler itself,
4006 * else schedule a tasklet to reallocate the buffers.
4008 for (i = 0; i < config->rx_ring_num; i++)
4009 s2io_chk_rx_buffers(sp, i);
4011 atomic_dec(&sp->isr_cnt);
4015 static irqreturn_t s2io_msix_ring_handle(int irq, void *dev_id)
4017 ring_info_t *ring = (ring_info_t *)dev_id;
4018 nic_t *sp = ring->nic;
4020 atomic_inc(&sp->isr_cnt);
4022 rx_intr_handler(ring);
4023 s2io_chk_rx_buffers(sp, ring->ring_no);
4025 atomic_dec(&sp->isr_cnt);
4029 static irqreturn_t s2io_msix_fifo_handle(int irq, void *dev_id)
4031 fifo_info_t *fifo = (fifo_info_t *)dev_id;
4032 nic_t *sp = fifo->nic;
4034 atomic_inc(&sp->isr_cnt);
4035 tx_intr_handler(fifo);
4036 atomic_dec(&sp->isr_cnt);
4039 static void s2io_txpic_intr_handle(nic_t *sp)
4041 XENA_dev_config_t __iomem *bar0 = sp->bar0;
4044 val64 = readq(&bar0->pic_int_status);
4045 if (val64 & PIC_INT_GPIO) {
4046 val64 = readq(&bar0->gpio_int_reg);
4047 if ((val64 & GPIO_INT_REG_LINK_DOWN) &&
4048 (val64 & GPIO_INT_REG_LINK_UP)) {
4050 * This is unstable state so clear both up/down
4051 * interrupt and adapter to re-evaluate the link state.
4053 val64 |= GPIO_INT_REG_LINK_DOWN;
4054 val64 |= GPIO_INT_REG_LINK_UP;
4055 writeq(val64, &bar0->gpio_int_reg);
4056 val64 = readq(&bar0->gpio_int_mask);
4057 val64 &= ~(GPIO_INT_MASK_LINK_UP |
4058 GPIO_INT_MASK_LINK_DOWN);
4059 writeq(val64, &bar0->gpio_int_mask);
4061 else if (val64 & GPIO_INT_REG_LINK_UP) {
4062 val64 = readq(&bar0->adapter_status);
4063 /* Enable Adapter */
4064 val64 = readq(&bar0->adapter_control);
4065 val64 |= ADAPTER_CNTL_EN;
4066 writeq(val64, &bar0->adapter_control);
4067 val64 |= ADAPTER_LED_ON;
4068 writeq(val64, &bar0->adapter_control);
4069 if (!sp->device_enabled_once)
4070 sp->device_enabled_once = 1;
4072 s2io_link(sp, LINK_UP);
4074 * unmask link down interrupt and mask link-up
4077 val64 = readq(&bar0->gpio_int_mask);
4078 val64 &= ~GPIO_INT_MASK_LINK_DOWN;
4079 val64 |= GPIO_INT_MASK_LINK_UP;
4080 writeq(val64, &bar0->gpio_int_mask);
4082 }else if (val64 & GPIO_INT_REG_LINK_DOWN) {
4083 val64 = readq(&bar0->adapter_status);
4084 s2io_link(sp, LINK_DOWN);
4085 /* Link is down so unmaks link up interrupt */
4086 val64 = readq(&bar0->gpio_int_mask);
4087 val64 &= ~GPIO_INT_MASK_LINK_UP;
4088 val64 |= GPIO_INT_MASK_LINK_DOWN;
4089 writeq(val64, &bar0->gpio_int_mask);
4092 val64 = readq(&bar0->gpio_int_mask);
4096 * s2io_isr - ISR handler of the device .
4097 * @irq: the irq of the device.
4098 * @dev_id: a void pointer to the dev structure of the NIC.
4099 * Description: This function is the ISR handler of the device. It
4100 * identifies the reason for the interrupt and calls the relevant
4101 * service routines. As a contongency measure, this ISR allocates the
4102 * recv buffers, if their numbers are below the panic value which is
4103 * presently set to 25% of the original number of rcv buffers allocated.
4105 * IRQ_HANDLED: will be returned if IRQ was handled by this routine
4106 * IRQ_NONE: will be returned if interrupt is not from our device
4108 static irqreturn_t s2io_isr(int irq, void *dev_id)
4110 struct net_device *dev = (struct net_device *) dev_id;
4111 nic_t *sp = dev->priv;
4112 XENA_dev_config_t __iomem *bar0 = sp->bar0;
4115 mac_info_t *mac_control;
4116 struct config_param *config;
4118 atomic_inc(&sp->isr_cnt);
4119 mac_control = &sp->mac_control;
4120 config = &sp->config;
4123 * Identify the cause for interrupt and call the appropriate
4124 * interrupt handler. Causes for the interrupt could be;
4128 * 4. Error in any functional blocks of the NIC.
4130 reason = readq(&bar0->general_int_status);
4133 /* The interrupt was not raised by us. */
4134 atomic_dec(&sp->isr_cnt);
4137 else if (unlikely(reason == S2IO_MINUS_ONE) ) {
4138 /* Disable device and get out */
4139 atomic_dec(&sp->isr_cnt);
4144 if (reason & GEN_INTR_RXTRAFFIC) {
4145 if ( likely ( netif_rx_schedule_prep(dev)) ) {
4146 __netif_rx_schedule(dev);
4147 writeq(S2IO_MINUS_ONE, &bar0->rx_traffic_mask);
4150 writeq(S2IO_MINUS_ONE, &bar0->rx_traffic_int);
4154 * Rx handler is called by default, without checking for the
4155 * cause of interrupt.
4156 * rx_traffic_int reg is an R1 register, writing all 1's
4157 * will ensure that the actual interrupt causing bit get's
4158 * cleared and hence a read can be avoided.
4160 if (reason & GEN_INTR_RXTRAFFIC)
4161 writeq(S2IO_MINUS_ONE, &bar0->rx_traffic_int);
4163 for (i = 0; i < config->rx_ring_num; i++) {
4164 rx_intr_handler(&mac_control->rings[i]);
4169 * tx_traffic_int reg is an R1 register, writing all 1's
4170 * will ensure that the actual interrupt causing bit get's
4171 * cleared and hence a read can be avoided.
4173 if (reason & GEN_INTR_TXTRAFFIC)
4174 writeq(S2IO_MINUS_ONE, &bar0->tx_traffic_int);
4176 for (i = 0; i < config->tx_fifo_num; i++)
4177 tx_intr_handler(&mac_control->fifos[i]);
4179 if (reason & GEN_INTR_TXPIC)
4180 s2io_txpic_intr_handle(sp);
4182 * If the Rx buffer count is below the panic threshold then
4183 * reallocate the buffers from the interrupt handler itself,
4184 * else schedule a tasklet to reallocate the buffers.
4187 for (i = 0; i < config->rx_ring_num; i++)
4188 s2io_chk_rx_buffers(sp, i);
4191 writeq(0, &bar0->general_int_mask);
4192 readl(&bar0->general_int_status);
4194 atomic_dec(&sp->isr_cnt);
4201 static void s2io_updt_stats(nic_t *sp)
4203 XENA_dev_config_t __iomem *bar0 = sp->bar0;
4207 if (atomic_read(&sp->card_state) == CARD_UP) {
4208 /* Apprx 30us on a 133 MHz bus */
4209 val64 = SET_UPDT_CLICKS(10) |
4210 STAT_CFG_ONE_SHOT_EN | STAT_CFG_STAT_EN;
4211 writeq(val64, &bar0->stat_cfg);
4214 val64 = readq(&bar0->stat_cfg);
4215 if (!(val64 & BIT(0)))
4219 break; /* Updt failed */
4222 memset(sp->mac_control.stats_info, 0, sizeof(StatInfo_t));
4227 * s2io_get_stats - Updates the device statistics structure.
4228 * @dev : pointer to the device structure.
4230 * This function updates the device statistics structure in the s2io_nic
4231 * structure and returns a pointer to the same.
4233 * pointer to the updated net_device_stats structure.
4236 static struct net_device_stats *s2io_get_stats(struct net_device *dev)
4238 nic_t *sp = dev->priv;
4239 mac_info_t *mac_control;
4240 struct config_param *config;
4243 mac_control = &sp->mac_control;
4244 config = &sp->config;
4246 /* Configure Stats for immediate updt */
4247 s2io_updt_stats(sp);
4249 sp->stats.tx_packets =
4250 le32_to_cpu(mac_control->stats_info->tmac_frms);
4251 sp->stats.tx_errors =
4252 le32_to_cpu(mac_control->stats_info->tmac_any_err_frms);
4253 sp->stats.rx_errors =
4254 le64_to_cpu(mac_control->stats_info->rmac_drop_frms);
4255 sp->stats.multicast =
4256 le32_to_cpu(mac_control->stats_info->rmac_vld_mcst_frms);
4257 sp->stats.rx_length_errors =
4258 le64_to_cpu(mac_control->stats_info->rmac_long_frms);
4260 return (&sp->stats);
4264 * s2io_set_multicast - entry point for multicast address enable/disable.
4265 * @dev : pointer to the device structure
4267 * This function is a driver entry point which gets called by the kernel
4268 * whenever multicast addresses must be enabled/disabled. This also gets
4269 * called to set/reset promiscuous mode. Depending on the deivce flag, we
4270 * determine, if multicast address must be enabled or if promiscuous mode
4271 * is to be disabled etc.
4276 static void s2io_set_multicast(struct net_device *dev)
4279 struct dev_mc_list *mclist;
4280 nic_t *sp = dev->priv;
4281 XENA_dev_config_t __iomem *bar0 = sp->bar0;
4282 u64 val64 = 0, multi_mac = 0x010203040506ULL, mask =
4284 u64 dis_addr = 0xffffffffffffULL, mac_addr = 0;
4287 if ((dev->flags & IFF_ALLMULTI) && (!sp->m_cast_flg)) {
4288 /* Enable all Multicast addresses */
4289 writeq(RMAC_ADDR_DATA0_MEM_ADDR(multi_mac),
4290 &bar0->rmac_addr_data0_mem);
4291 writeq(RMAC_ADDR_DATA1_MEM_MASK(mask),
4292 &bar0->rmac_addr_data1_mem);
4293 val64 = RMAC_ADDR_CMD_MEM_WE |
4294 RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD |
4295 RMAC_ADDR_CMD_MEM_OFFSET(MAC_MC_ALL_MC_ADDR_OFFSET);
4296 writeq(val64, &bar0->rmac_addr_cmd_mem);
4297 /* Wait till command completes */
4298 wait_for_cmd_complete(&bar0->rmac_addr_cmd_mem,
4299 RMAC_ADDR_CMD_MEM_STROBE_CMD_EXECUTING);
4302 sp->all_multi_pos = MAC_MC_ALL_MC_ADDR_OFFSET;
4303 } else if ((dev->flags & IFF_ALLMULTI) && (sp->m_cast_flg)) {
4304 /* Disable all Multicast addresses */
4305 writeq(RMAC_ADDR_DATA0_MEM_ADDR(dis_addr),
4306 &bar0->rmac_addr_data0_mem);
4307 writeq(RMAC_ADDR_DATA1_MEM_MASK(0x0),
4308 &bar0->rmac_addr_data1_mem);
4309 val64 = RMAC_ADDR_CMD_MEM_WE |
4310 RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD |
4311 RMAC_ADDR_CMD_MEM_OFFSET(sp->all_multi_pos);
4312 writeq(val64, &bar0->rmac_addr_cmd_mem);
4313 /* Wait till command completes */
4314 wait_for_cmd_complete(&bar0->rmac_addr_cmd_mem,
4315 RMAC_ADDR_CMD_MEM_STROBE_CMD_EXECUTING);
4318 sp->all_multi_pos = 0;
4321 if ((dev->flags & IFF_PROMISC) && (!sp->promisc_flg)) {
4322 /* Put the NIC into promiscuous mode */
4323 add = &bar0->mac_cfg;
4324 val64 = readq(&bar0->mac_cfg);
4325 val64 |= MAC_CFG_RMAC_PROM_ENABLE;
4327 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
4328 writel((u32) val64, add);
4329 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
4330 writel((u32) (val64 >> 32), (add + 4));
4332 val64 = readq(&bar0->mac_cfg);
4333 sp->promisc_flg = 1;
4334 DBG_PRINT(INFO_DBG, "%s: entered promiscuous mode\n",
4336 } else if (!(dev->flags & IFF_PROMISC) && (sp->promisc_flg)) {
4337 /* Remove the NIC from promiscuous mode */
4338 add = &bar0->mac_cfg;
4339 val64 = readq(&bar0->mac_cfg);
4340 val64 &= ~MAC_CFG_RMAC_PROM_ENABLE;
4342 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
4343 writel((u32) val64, add);
4344 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
4345 writel((u32) (val64 >> 32), (add + 4));
4347 val64 = readq(&bar0->mac_cfg);
4348 sp->promisc_flg = 0;
4349 DBG_PRINT(INFO_DBG, "%s: left promiscuous mode\n",
4353 /* Update individual M_CAST address list */
4354 if ((!sp->m_cast_flg) && dev->mc_count) {
4356 (MAX_ADDRS_SUPPORTED - MAC_MC_ADDR_START_OFFSET - 1)) {
4357 DBG_PRINT(ERR_DBG, "%s: No more Rx filters ",
4359 DBG_PRINT(ERR_DBG, "can be added, please enable ");
4360 DBG_PRINT(ERR_DBG, "ALL_MULTI instead\n");
4364 prev_cnt = sp->mc_addr_count;
4365 sp->mc_addr_count = dev->mc_count;
4367 /* Clear out the previous list of Mc in the H/W. */
4368 for (i = 0; i < prev_cnt; i++) {
4369 writeq(RMAC_ADDR_DATA0_MEM_ADDR(dis_addr),
4370 &bar0->rmac_addr_data0_mem);
4371 writeq(RMAC_ADDR_DATA1_MEM_MASK(0ULL),
4372 &bar0->rmac_addr_data1_mem);
4373 val64 = RMAC_ADDR_CMD_MEM_WE |
4374 RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD |
4375 RMAC_ADDR_CMD_MEM_OFFSET
4376 (MAC_MC_ADDR_START_OFFSET + i);
4377 writeq(val64, &bar0->rmac_addr_cmd_mem);
4379 /* Wait for command completes */
4380 if (wait_for_cmd_complete(&bar0->rmac_addr_cmd_mem,
4381 RMAC_ADDR_CMD_MEM_STROBE_CMD_EXECUTING)) {
4382 DBG_PRINT(ERR_DBG, "%s: Adding ",
4384 DBG_PRINT(ERR_DBG, "Multicasts failed\n");
4389 /* Create the new Rx filter list and update the same in H/W. */
4390 for (i = 0, mclist = dev->mc_list; i < dev->mc_count;
4391 i++, mclist = mclist->next) {
4392 memcpy(sp->usr_addrs[i].addr, mclist->dmi_addr,
4395 for (j = 0; j < ETH_ALEN; j++) {
4396 mac_addr |= mclist->dmi_addr[j];
4400 writeq(RMAC_ADDR_DATA0_MEM_ADDR(mac_addr),
4401 &bar0->rmac_addr_data0_mem);
4402 writeq(RMAC_ADDR_DATA1_MEM_MASK(0ULL),
4403 &bar0->rmac_addr_data1_mem);
4404 val64 = RMAC_ADDR_CMD_MEM_WE |
4405 RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD |
4406 RMAC_ADDR_CMD_MEM_OFFSET
4407 (i + MAC_MC_ADDR_START_OFFSET);
4408 writeq(val64, &bar0->rmac_addr_cmd_mem);
4410 /* Wait for command completes */
4411 if (wait_for_cmd_complete(&bar0->rmac_addr_cmd_mem,
4412 RMAC_ADDR_CMD_MEM_STROBE_CMD_EXECUTING)) {
4413 DBG_PRINT(ERR_DBG, "%s: Adding ",
4415 DBG_PRINT(ERR_DBG, "Multicasts failed\n");
4423 * s2io_set_mac_addr - Programs the Xframe mac address
4424 * @dev : pointer to the device structure.
4425 * @addr: a uchar pointer to the new mac address which is to be set.
4426 * Description : This procedure will program the Xframe to receive
4427 * frames with new Mac Address
4428 * Return value: SUCCESS on success and an appropriate (-)ve integer
4429 * as defined in errno.h file on failure.
4432 static int s2io_set_mac_addr(struct net_device *dev, u8 * addr)
4434 nic_t *sp = dev->priv;
4435 XENA_dev_config_t __iomem *bar0 = sp->bar0;
4436 register u64 val64, mac_addr = 0;
4440 * Set the new MAC address as the new unicast filter and reflect this
4441 * change on the device address registered with the OS. It will be
4444 for (i = 0; i < ETH_ALEN; i++) {
4446 mac_addr |= addr[i];
4449 writeq(RMAC_ADDR_DATA0_MEM_ADDR(mac_addr),
4450 &bar0->rmac_addr_data0_mem);
4453 RMAC_ADDR_CMD_MEM_WE | RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD |
4454 RMAC_ADDR_CMD_MEM_OFFSET(0);
4455 writeq(val64, &bar0->rmac_addr_cmd_mem);
4456 /* Wait till command completes */
4457 if (wait_for_cmd_complete(&bar0->rmac_addr_cmd_mem,
4458 RMAC_ADDR_CMD_MEM_STROBE_CMD_EXECUTING)) {
4459 DBG_PRINT(ERR_DBG, "%s: set_mac_addr failed\n", dev->name);
4467 * s2io_ethtool_sset - Sets different link parameters.
4468 * @sp : private member of the device structure, which is a pointer to the * s2io_nic structure.
4469 * @info: pointer to the structure with parameters given by ethtool to set
4472 * The function sets different link parameters provided by the user onto
4478 static int s2io_ethtool_sset(struct net_device *dev,
4479 struct ethtool_cmd *info)
4481 nic_t *sp = dev->priv;
4482 if ((info->autoneg == AUTONEG_ENABLE) ||
4483 (info->speed != SPEED_10000) || (info->duplex != DUPLEX_FULL))
4486 s2io_close(sp->dev);
4494 * s2io_ethtol_gset - Return link specific information.
4495 * @sp : private member of the device structure, pointer to the
4496 * s2io_nic structure.
4497 * @info : pointer to the structure with parameters given by ethtool
4498 * to return link information.
4500 * Returns link specific information like speed, duplex etc.. to ethtool.
4502 * return 0 on success.
4505 static int s2io_ethtool_gset(struct net_device *dev, struct ethtool_cmd *info)
4507 nic_t *sp = dev->priv;
4508 info->supported = (SUPPORTED_10000baseT_Full | SUPPORTED_FIBRE);
4509 info->advertising = (SUPPORTED_10000baseT_Full | SUPPORTED_FIBRE);
4510 info->port = PORT_FIBRE;
4511 /* info->transceiver?? TODO */
4513 if (netif_carrier_ok(sp->dev)) {
4514 info->speed = 10000;
4515 info->duplex = DUPLEX_FULL;
4521 info->autoneg = AUTONEG_DISABLE;
4526 * s2io_ethtool_gdrvinfo - Returns driver specific information.
4527 * @sp : private member of the device structure, which is a pointer to the
4528 * s2io_nic structure.
4529 * @info : pointer to the structure with parameters given by ethtool to
4530 * return driver information.
4532 * Returns driver specefic information like name, version etc.. to ethtool.
4537 static void s2io_ethtool_gdrvinfo(struct net_device *dev,
4538 struct ethtool_drvinfo *info)
4540 nic_t *sp = dev->priv;
4542 strncpy(info->driver, s2io_driver_name, sizeof(info->driver));
4543 strncpy(info->version, s2io_driver_version, sizeof(info->version));
4544 strncpy(info->fw_version, "", sizeof(info->fw_version));
4545 strncpy(info->bus_info, pci_name(sp->pdev), sizeof(info->bus_info));
4546 info->regdump_len = XENA_REG_SPACE;
4547 info->eedump_len = XENA_EEPROM_SPACE;
4548 info->testinfo_len = S2IO_TEST_LEN;
4549 info->n_stats = S2IO_STAT_LEN;
4553 * s2io_ethtool_gregs - dumps the entire space of Xfame into the buffer.
4554 * @sp: private member of the device structure, which is a pointer to the
4555 * s2io_nic structure.
4556 * @regs : pointer to the structure with parameters given by ethtool for
4557 * dumping the registers.
4558 * @reg_space: The input argumnet into which all the registers are dumped.
4560 * Dumps the entire register space of xFrame NIC into the user given
4566 static void s2io_ethtool_gregs(struct net_device *dev,
4567 struct ethtool_regs *regs, void *space)
4571 u8 *reg_space = (u8 *) space;
4572 nic_t *sp = dev->priv;
4574 regs->len = XENA_REG_SPACE;
4575 regs->version = sp->pdev->subsystem_device;
4577 for (i = 0; i < regs->len; i += 8) {
4578 reg = readq(sp->bar0 + i);
4579 memcpy((reg_space + i), ®, 8);
4584 * s2io_phy_id - timer function that alternates adapter LED.
4585 * @data : address of the private member of the device structure, which
4586 * is a pointer to the s2io_nic structure, provided as an u32.
4587 * Description: This is actually the timer function that alternates the
4588 * adapter LED bit of the adapter control bit to set/reset every time on
4589 * invocation. The timer is set for 1/2 a second, hence tha NIC blinks
4590 * once every second.
4592 static void s2io_phy_id(unsigned long data)
4594 nic_t *sp = (nic_t *) data;
4595 XENA_dev_config_t __iomem *bar0 = sp->bar0;
4599 subid = sp->pdev->subsystem_device;
4600 if ((sp->device_type == XFRAME_II_DEVICE) ||
4601 ((subid & 0xFF) >= 0x07)) {
4602 val64 = readq(&bar0->gpio_control);
4603 val64 ^= GPIO_CTRL_GPIO_0;
4604 writeq(val64, &bar0->gpio_control);
4606 val64 = readq(&bar0->adapter_control);
4607 val64 ^= ADAPTER_LED_ON;
4608 writeq(val64, &bar0->adapter_control);
4611 mod_timer(&sp->id_timer, jiffies + HZ / 2);
4615 * s2io_ethtool_idnic - To physically identify the nic on the system.
4616 * @sp : private member of the device structure, which is a pointer to the
4617 * s2io_nic structure.
4618 * @id : pointer to the structure with identification parameters given by
4620 * Description: Used to physically identify the NIC on the system.
4621 * The Link LED will blink for a time specified by the user for
4623 * NOTE: The Link has to be Up to be able to blink the LED. Hence
4624 * identification is possible only if it's link is up.
4626 * int , returns 0 on success
4629 static int s2io_ethtool_idnic(struct net_device *dev, u32 data)
4631 u64 val64 = 0, last_gpio_ctrl_val;
4632 nic_t *sp = dev->priv;
4633 XENA_dev_config_t __iomem *bar0 = sp->bar0;
4636 subid = sp->pdev->subsystem_device;
4637 last_gpio_ctrl_val = readq(&bar0->gpio_control);
4638 if ((sp->device_type == XFRAME_I_DEVICE) &&
4639 ((subid & 0xFF) < 0x07)) {
4640 val64 = readq(&bar0->adapter_control);
4641 if (!(val64 & ADAPTER_CNTL_EN)) {
4643 "Adapter Link down, cannot blink LED\n");
4647 if (sp->id_timer.function == NULL) {
4648 init_timer(&sp->id_timer);
4649 sp->id_timer.function = s2io_phy_id;
4650 sp->id_timer.data = (unsigned long) sp;
4652 mod_timer(&sp->id_timer, jiffies);
4654 msleep_interruptible(data * HZ);
4656 msleep_interruptible(MAX_FLICKER_TIME);
4657 del_timer_sync(&sp->id_timer);
4659 if (CARDS_WITH_FAULTY_LINK_INDICATORS(sp->device_type, subid)) {
4660 writeq(last_gpio_ctrl_val, &bar0->gpio_control);
4661 last_gpio_ctrl_val = readq(&bar0->gpio_control);
4668 * s2io_ethtool_getpause_data -Pause frame frame generation and reception.
4669 * @sp : private member of the device structure, which is a pointer to the
4670 * s2io_nic structure.
4671 * @ep : pointer to the structure with pause parameters given by ethtool.
4673 * Returns the Pause frame generation and reception capability of the NIC.
4677 static void s2io_ethtool_getpause_data(struct net_device *dev,
4678 struct ethtool_pauseparam *ep)
4681 nic_t *sp = dev->priv;
4682 XENA_dev_config_t __iomem *bar0 = sp->bar0;
4684 val64 = readq(&bar0->rmac_pause_cfg);
4685 if (val64 & RMAC_PAUSE_GEN_ENABLE)
4686 ep->tx_pause = TRUE;
4687 if (val64 & RMAC_PAUSE_RX_ENABLE)
4688 ep->rx_pause = TRUE;
4689 ep->autoneg = FALSE;
4693 * s2io_ethtool_setpause_data - set/reset pause frame generation.
4694 * @sp : private member of the device structure, which is a pointer to the
4695 * s2io_nic structure.
4696 * @ep : pointer to the structure with pause parameters given by ethtool.
4698 * It can be used to set or reset Pause frame generation or reception
4699 * support of the NIC.
4701 * int, returns 0 on Success
4704 static int s2io_ethtool_setpause_data(struct net_device *dev,
4705 struct ethtool_pauseparam *ep)
4708 nic_t *sp = dev->priv;
4709 XENA_dev_config_t __iomem *bar0 = sp->bar0;
4711 val64 = readq(&bar0->rmac_pause_cfg);
4713 val64 |= RMAC_PAUSE_GEN_ENABLE;
4715 val64 &= ~RMAC_PAUSE_GEN_ENABLE;
4717 val64 |= RMAC_PAUSE_RX_ENABLE;
4719 val64 &= ~RMAC_PAUSE_RX_ENABLE;
4720 writeq(val64, &bar0->rmac_pause_cfg);
4725 * read_eeprom - reads 4 bytes of data from user given offset.
4726 * @sp : private member of the device structure, which is a pointer to the
4727 * s2io_nic structure.
4728 * @off : offset at which the data must be written
4729 * @data : Its an output parameter where the data read at the given
4732 * Will read 4 bytes of data from the user given offset and return the
4734 * NOTE: Will allow to read only part of the EEPROM visible through the
4737 * -1 on failure and 0 on success.
4740 #define S2IO_DEV_ID 5
4741 static int read_eeprom(nic_t * sp, int off, u64 * data)
4746 XENA_dev_config_t __iomem *bar0 = sp->bar0;
4748 if (sp->device_type == XFRAME_I_DEVICE) {
4749 val64 = I2C_CONTROL_DEV_ID(S2IO_DEV_ID) | I2C_CONTROL_ADDR(off) |
4750 I2C_CONTROL_BYTE_CNT(0x3) | I2C_CONTROL_READ |
4751 I2C_CONTROL_CNTL_START;
4752 SPECIAL_REG_WRITE(val64, &bar0->i2c_control, LF);
4754 while (exit_cnt < 5) {
4755 val64 = readq(&bar0->i2c_control);
4756 if (I2C_CONTROL_CNTL_END(val64)) {
4757 *data = I2C_CONTROL_GET_DATA(val64);
4766 if (sp->device_type == XFRAME_II_DEVICE) {
4767 val64 = SPI_CONTROL_KEY(0x9) | SPI_CONTROL_SEL1 |
4768 SPI_CONTROL_BYTECNT(0x3) |
4769 SPI_CONTROL_CMD(0x3) | SPI_CONTROL_ADDR(off);
4770 SPECIAL_REG_WRITE(val64, &bar0->spi_control, LF);
4771 val64 |= SPI_CONTROL_REQ;
4772 SPECIAL_REG_WRITE(val64, &bar0->spi_control, LF);
4773 while (exit_cnt < 5) {
4774 val64 = readq(&bar0->spi_control);
4775 if (val64 & SPI_CONTROL_NACK) {
4778 } else if (val64 & SPI_CONTROL_DONE) {
4779 *data = readq(&bar0->spi_data);
4792 * write_eeprom - actually writes the relevant part of the data value.
4793 * @sp : private member of the device structure, which is a pointer to the
4794 * s2io_nic structure.
4795 * @off : offset at which the data must be written
4796 * @data : The data that is to be written
4797 * @cnt : Number of bytes of the data that are actually to be written into
4798 * the Eeprom. (max of 3)
4800 * Actually writes the relevant part of the data value into the Eeprom
4801 * through the I2C bus.
4803 * 0 on success, -1 on failure.
4806 static int write_eeprom(nic_t * sp, int off, u64 data, int cnt)
4808 int exit_cnt = 0, ret = -1;
4810 XENA_dev_config_t __iomem *bar0 = sp->bar0;
4812 if (sp->device_type == XFRAME_I_DEVICE) {
4813 val64 = I2C_CONTROL_DEV_ID(S2IO_DEV_ID) | I2C_CONTROL_ADDR(off) |
4814 I2C_CONTROL_BYTE_CNT(cnt) | I2C_CONTROL_SET_DATA((u32)data) |
4815 I2C_CONTROL_CNTL_START;
4816 SPECIAL_REG_WRITE(val64, &bar0->i2c_control, LF);
4818 while (exit_cnt < 5) {
4819 val64 = readq(&bar0->i2c_control);
4820 if (I2C_CONTROL_CNTL_END(val64)) {
4821 if (!(val64 & I2C_CONTROL_NACK))
4830 if (sp->device_type == XFRAME_II_DEVICE) {
4831 int write_cnt = (cnt == 8) ? 0 : cnt;
4832 writeq(SPI_DATA_WRITE(data,(cnt<<3)), &bar0->spi_data);
4834 val64 = SPI_CONTROL_KEY(0x9) | SPI_CONTROL_SEL1 |
4835 SPI_CONTROL_BYTECNT(write_cnt) |
4836 SPI_CONTROL_CMD(0x2) | SPI_CONTROL_ADDR(off);
4837 SPECIAL_REG_WRITE(val64, &bar0->spi_control, LF);
4838 val64 |= SPI_CONTROL_REQ;
4839 SPECIAL_REG_WRITE(val64, &bar0->spi_control, LF);
4840 while (exit_cnt < 5) {
4841 val64 = readq(&bar0->spi_control);
4842 if (val64 & SPI_CONTROL_NACK) {
4845 } else if (val64 & SPI_CONTROL_DONE) {
4855 static void s2io_vpd_read(nic_t *nic)
4859 int i=0, cnt, fail = 0;
4860 int vpd_addr = 0x80;
4862 if (nic->device_type == XFRAME_II_DEVICE) {
4863 strcpy(nic->product_name, "Xframe II 10GbE network adapter");
4867 strcpy(nic->product_name, "Xframe I 10GbE network adapter");
4870 strcpy(nic->serial_num, "NOT AVAILABLE");
4872 vpd_data = kmalloc(256, GFP_KERNEL);
4876 for (i = 0; i < 256; i +=4 ) {
4877 pci_write_config_byte(nic->pdev, (vpd_addr + 2), i);
4878 pci_read_config_byte(nic->pdev, (vpd_addr + 2), &data);
4879 pci_write_config_byte(nic->pdev, (vpd_addr + 3), 0);
4880 for (cnt = 0; cnt <5; cnt++) {
4882 pci_read_config_byte(nic->pdev, (vpd_addr + 3), &data);
4887 DBG_PRINT(ERR_DBG, "Read of VPD data failed\n");
4891 pci_read_config_dword(nic->pdev, (vpd_addr + 4),
4892 (u32 *)&vpd_data[i]);
4896 /* read serial number of adapter */
4897 for (cnt = 0; cnt < 256; cnt++) {
4898 if ((vpd_data[cnt] == 'S') &&
4899 (vpd_data[cnt+1] == 'N') &&
4900 (vpd_data[cnt+2] < VPD_STRING_LEN)) {
4901 memset(nic->serial_num, 0, VPD_STRING_LEN);
4902 memcpy(nic->serial_num, &vpd_data[cnt + 3],
4909 if ((!fail) && (vpd_data[1] < VPD_STRING_LEN)) {
4910 memset(nic->product_name, 0, vpd_data[1]);
4911 memcpy(nic->product_name, &vpd_data[3], vpd_data[1]);
4917 * s2io_ethtool_geeprom - reads the value stored in the Eeprom.
4918 * @sp : private member of the device structure, which is a pointer to the * s2io_nic structure.
4919 * @eeprom : pointer to the user level structure provided by ethtool,
4920 * containing all relevant information.
4921 * @data_buf : user defined value to be written into Eeprom.
4922 * Description: Reads the values stored in the Eeprom at given offset
4923 * for a given length. Stores these values int the input argument data
4924 * buffer 'data_buf' and returns these to the caller (ethtool.)
4929 static int s2io_ethtool_geeprom(struct net_device *dev,
4930 struct ethtool_eeprom *eeprom, u8 * data_buf)
4934 nic_t *sp = dev->priv;
4936 eeprom->magic = sp->pdev->vendor | (sp->pdev->device << 16);
4938 if ((eeprom->offset + eeprom->len) > (XENA_EEPROM_SPACE))
4939 eeprom->len = XENA_EEPROM_SPACE - eeprom->offset;
4941 for (i = 0; i < eeprom->len; i += 4) {
4942 if (read_eeprom(sp, (eeprom->offset + i), &data)) {
4943 DBG_PRINT(ERR_DBG, "Read of EEPROM failed\n");
4947 memcpy((data_buf + i), &valid, 4);
4953 * s2io_ethtool_seeprom - tries to write the user provided value in Eeprom
4954 * @sp : private member of the device structure, which is a pointer to the
4955 * s2io_nic structure.
4956 * @eeprom : pointer to the user level structure provided by ethtool,
4957 * containing all relevant information.
4958 * @data_buf ; user defined value to be written into Eeprom.
4960 * Tries to write the user provided value in the Eeprom, at the offset
4961 * given by the user.
4963 * 0 on success, -EFAULT on failure.
4966 static int s2io_ethtool_seeprom(struct net_device *dev,
4967 struct ethtool_eeprom *eeprom,
4970 int len = eeprom->len, cnt = 0;
4971 u64 valid = 0, data;
4972 nic_t *sp = dev->priv;
4974 if (eeprom->magic != (sp->pdev->vendor | (sp->pdev->device << 16))) {
4976 "ETHTOOL_WRITE_EEPROM Err: Magic value ");
4977 DBG_PRINT(ERR_DBG, "is wrong, Its not 0x%x\n",
4983 data = (u32) data_buf[cnt] & 0x000000FF;
4985 valid = (u32) (data << 24);
4989 if (write_eeprom(sp, (eeprom->offset + cnt), valid, 0)) {
4991 "ETHTOOL_WRITE_EEPROM Err: Cannot ");
4993 "write into the specified offset\n");
5004 * s2io_register_test - reads and writes into all clock domains.
5005 * @sp : private member of the device structure, which is a pointer to the
5006 * s2io_nic structure.
5007 * @data : variable that returns the result of each of the test conducted b
5010 * Read and write into all clock domains. The NIC has 3 clock domains,
5011 * see that registers in all the three regions are accessible.
5016 static int s2io_register_test(nic_t * sp, uint64_t * data)
5018 XENA_dev_config_t __iomem *bar0 = sp->bar0;
5019 u64 val64 = 0, exp_val;
5022 val64 = readq(&bar0->pif_rd_swapper_fb);
5023 if (val64 != 0x123456789abcdefULL) {
5025 DBG_PRINT(INFO_DBG, "Read Test level 1 fails\n");
5028 val64 = readq(&bar0->rmac_pause_cfg);
5029 if (val64 != 0xc000ffff00000000ULL) {
5031 DBG_PRINT(INFO_DBG, "Read Test level 2 fails\n");
5034 val64 = readq(&bar0->rx_queue_cfg);
5035 if (sp->device_type == XFRAME_II_DEVICE)
5036 exp_val = 0x0404040404040404ULL;
5038 exp_val = 0x0808080808080808ULL;
5039 if (val64 != exp_val) {
5041 DBG_PRINT(INFO_DBG, "Read Test level 3 fails\n");
5044 val64 = readq(&bar0->xgxs_efifo_cfg);
5045 if (val64 != 0x000000001923141EULL) {
5047 DBG_PRINT(INFO_DBG, "Read Test level 4 fails\n");
5050 val64 = 0x5A5A5A5A5A5A5A5AULL;
5051 writeq(val64, &bar0->xmsi_data);
5052 val64 = readq(&bar0->xmsi_data);
5053 if (val64 != 0x5A5A5A5A5A5A5A5AULL) {
5055 DBG_PRINT(ERR_DBG, "Write Test level 1 fails\n");
5058 val64 = 0xA5A5A5A5A5A5A5A5ULL;
5059 writeq(val64, &bar0->xmsi_data);
5060 val64 = readq(&bar0->xmsi_data);
5061 if (val64 != 0xA5A5A5A5A5A5A5A5ULL) {
5063 DBG_PRINT(ERR_DBG, "Write Test level 2 fails\n");
5071 * s2io_eeprom_test - to verify that EEprom in the xena can be programmed.
5072 * @sp : private member of the device structure, which is a pointer to the
5073 * s2io_nic structure.
5074 * @data:variable that returns the result of each of the test conducted by
5077 * Verify that EEPROM in the xena can be programmed using I2C_CONTROL
5083 static int s2io_eeprom_test(nic_t * sp, uint64_t * data)
5086 u64 ret_data, org_4F0, org_7F0;
5087 u8 saved_4F0 = 0, saved_7F0 = 0;
5088 struct net_device *dev = sp->dev;
5090 /* Test Write Error at offset 0 */
5091 /* Note that SPI interface allows write access to all areas
5092 * of EEPROM. Hence doing all negative testing only for Xframe I.
5094 if (sp->device_type == XFRAME_I_DEVICE)
5095 if (!write_eeprom(sp, 0, 0, 3))
5098 /* Save current values at offsets 0x4F0 and 0x7F0 */
5099 if (!read_eeprom(sp, 0x4F0, &org_4F0))
5101 if (!read_eeprom(sp, 0x7F0, &org_7F0))
5104 /* Test Write at offset 4f0 */
5105 if (write_eeprom(sp, 0x4F0, 0x012345, 3))
5107 if (read_eeprom(sp, 0x4F0, &ret_data))
5110 if (ret_data != 0x012345) {
5111 DBG_PRINT(ERR_DBG, "%s: eeprom test error at offset 0x4F0. "
5112 "Data written %llx Data read %llx\n",
5113 dev->name, (unsigned long long)0x12345,
5114 (unsigned long long)ret_data);
5118 /* Reset the EEPROM data go FFFF */
5119 write_eeprom(sp, 0x4F0, 0xFFFFFF, 3);
5121 /* Test Write Request Error at offset 0x7c */
5122 if (sp->device_type == XFRAME_I_DEVICE)
5123 if (!write_eeprom(sp, 0x07C, 0, 3))
5126 /* Test Write Request at offset 0x7f0 */
5127 if (write_eeprom(sp, 0x7F0, 0x012345, 3))
5129 if (read_eeprom(sp, 0x7F0, &ret_data))
5132 if (ret_data != 0x012345) {
5133 DBG_PRINT(ERR_DBG, "%s: eeprom test error at offset 0x7F0. "
5134 "Data written %llx Data read %llx\n",
5135 dev->name, (unsigned long long)0x12345,
5136 (unsigned long long)ret_data);
5140 /* Reset the EEPROM data go FFFF */
5141 write_eeprom(sp, 0x7F0, 0xFFFFFF, 3);
5143 if (sp->device_type == XFRAME_I_DEVICE) {
5144 /* Test Write Error at offset 0x80 */
5145 if (!write_eeprom(sp, 0x080, 0, 3))
5148 /* Test Write Error at offset 0xfc */
5149 if (!write_eeprom(sp, 0x0FC, 0, 3))
5152 /* Test Write Error at offset 0x100 */
5153 if (!write_eeprom(sp, 0x100, 0, 3))
5156 /* Test Write Error at offset 4ec */
5157 if (!write_eeprom(sp, 0x4EC, 0, 3))
5161 /* Restore values at offsets 0x4F0 and 0x7F0 */
5163 write_eeprom(sp, 0x4F0, org_4F0, 3);
5165 write_eeprom(sp, 0x7F0, org_7F0, 3);
5172 * s2io_bist_test - invokes the MemBist test of the card .
5173 * @sp : private member of the device structure, which is a pointer to the
5174 * s2io_nic structure.
5175 * @data:variable that returns the result of each of the test conducted by
5178 * This invokes the MemBist test of the card. We give around
5179 * 2 secs time for the Test to complete. If it's still not complete
5180 * within this peiod, we consider that the test failed.
5182 * 0 on success and -1 on failure.
5185 static int s2io_bist_test(nic_t * sp, uint64_t * data)
5188 int cnt = 0, ret = -1;
5190 pci_read_config_byte(sp->pdev, PCI_BIST, &bist);
5191 bist |= PCI_BIST_START;
5192 pci_write_config_word(sp->pdev, PCI_BIST, bist);
5195 pci_read_config_byte(sp->pdev, PCI_BIST, &bist);
5196 if (!(bist & PCI_BIST_START)) {
5197 *data = (bist & PCI_BIST_CODE_MASK);
5209 * s2io-link_test - verifies the link state of the nic
5210 * @sp ; private member of the device structure, which is a pointer to the
5211 * s2io_nic structure.
5212 * @data: variable that returns the result of each of the test conducted by
5215 * The function verifies the link state of the NIC and updates the input
5216 * argument 'data' appropriately.
5221 static int s2io_link_test(nic_t * sp, uint64_t * data)
5223 XENA_dev_config_t __iomem *bar0 = sp->bar0;
5226 val64 = readq(&bar0->adapter_status);
5227 if(!(LINK_IS_UP(val64)))
5236 * s2io_rldram_test - offline test for access to the RldRam chip on the NIC
5237 * @sp - private member of the device structure, which is a pointer to the
5238 * s2io_nic structure.
5239 * @data - variable that returns the result of each of the test
5240 * conducted by the driver.
5242 * This is one of the offline test that tests the read and write
5243 * access to the RldRam chip on the NIC.
5248 static int s2io_rldram_test(nic_t * sp, uint64_t * data)
5250 XENA_dev_config_t __iomem *bar0 = sp->bar0;
5252 int cnt, iteration = 0, test_fail = 0;
5254 val64 = readq(&bar0->adapter_control);
5255 val64 &= ~ADAPTER_ECC_EN;
5256 writeq(val64, &bar0->adapter_control);
5258 val64 = readq(&bar0->mc_rldram_test_ctrl);
5259 val64 |= MC_RLDRAM_TEST_MODE;
5260 SPECIAL_REG_WRITE(val64, &bar0->mc_rldram_test_ctrl, LF);
5262 val64 = readq(&bar0->mc_rldram_mrs);
5263 val64 |= MC_RLDRAM_QUEUE_SIZE_ENABLE;
5264 SPECIAL_REG_WRITE(val64, &bar0->mc_rldram_mrs, UF);
5266 val64 |= MC_RLDRAM_MRS_ENABLE;
5267 SPECIAL_REG_WRITE(val64, &bar0->mc_rldram_mrs, UF);
5269 while (iteration < 2) {
5270 val64 = 0x55555555aaaa0000ULL;
5271 if (iteration == 1) {
5272 val64 ^= 0xFFFFFFFFFFFF0000ULL;
5274 writeq(val64, &bar0->mc_rldram_test_d0);
5276 val64 = 0xaaaa5a5555550000ULL;
5277 if (iteration == 1) {
5278 val64 ^= 0xFFFFFFFFFFFF0000ULL;
5280 writeq(val64, &bar0->mc_rldram_test_d1);
5282 val64 = 0x55aaaaaaaa5a0000ULL;
5283 if (iteration == 1) {
5284 val64 ^= 0xFFFFFFFFFFFF0000ULL;
5286 writeq(val64, &bar0->mc_rldram_test_d2);
5288 val64 = (u64) (0x0000003ffffe0100ULL);
5289 writeq(val64, &bar0->mc_rldram_test_add);
5291 val64 = MC_RLDRAM_TEST_MODE | MC_RLDRAM_TEST_WRITE |
5293 SPECIAL_REG_WRITE(val64, &bar0->mc_rldram_test_ctrl, LF);
5295 for (cnt = 0; cnt < 5; cnt++) {
5296 val64 = readq(&bar0->mc_rldram_test_ctrl);
5297 if (val64 & MC_RLDRAM_TEST_DONE)
5305 val64 = MC_RLDRAM_TEST_MODE | MC_RLDRAM_TEST_GO;
5306 SPECIAL_REG_WRITE(val64, &bar0->mc_rldram_test_ctrl, LF);
5308 for (cnt = 0; cnt < 5; cnt++) {
5309 val64 = readq(&bar0->mc_rldram_test_ctrl);
5310 if (val64 & MC_RLDRAM_TEST_DONE)
5318 val64 = readq(&bar0->mc_rldram_test_ctrl);
5319 if (!(val64 & MC_RLDRAM_TEST_PASS))
5327 /* Bring the adapter out of test mode */
5328 SPECIAL_REG_WRITE(0, &bar0->mc_rldram_test_ctrl, LF);
5334 * s2io_ethtool_test - conducts 6 tsets to determine the health of card.
5335 * @sp : private member of the device structure, which is a pointer to the
5336 * s2io_nic structure.
5337 * @ethtest : pointer to a ethtool command specific structure that will be
5338 * returned to the user.
5339 * @data : variable that returns the result of each of the test
5340 * conducted by the driver.
5342 * This function conducts 6 tests ( 4 offline and 2 online) to determine
5343 * the health of the card.
5348 static void s2io_ethtool_test(struct net_device *dev,
5349 struct ethtool_test *ethtest,
5352 nic_t *sp = dev->priv;
5353 int orig_state = netif_running(sp->dev);
5355 if (ethtest->flags == ETH_TEST_FL_OFFLINE) {
5356 /* Offline Tests. */
5358 s2io_close(sp->dev);
5360 if (s2io_register_test(sp, &data[0]))
5361 ethtest->flags |= ETH_TEST_FL_FAILED;
5365 if (s2io_rldram_test(sp, &data[3]))
5366 ethtest->flags |= ETH_TEST_FL_FAILED;
5370 if (s2io_eeprom_test(sp, &data[1]))
5371 ethtest->flags |= ETH_TEST_FL_FAILED;
5373 if (s2io_bist_test(sp, &data[4]))
5374 ethtest->flags |= ETH_TEST_FL_FAILED;
5384 "%s: is not up, cannot run test\n",
5393 if (s2io_link_test(sp, &data[2]))
5394 ethtest->flags |= ETH_TEST_FL_FAILED;
5403 static void s2io_get_ethtool_stats(struct net_device *dev,
5404 struct ethtool_stats *estats,
5408 nic_t *sp = dev->priv;
5409 StatInfo_t *stat_info = sp->mac_control.stats_info;
5411 s2io_updt_stats(sp);
5413 (u64)le32_to_cpu(stat_info->tmac_frms_oflow) << 32 |
5414 le32_to_cpu(stat_info->tmac_frms);
5416 (u64)le32_to_cpu(stat_info->tmac_data_octets_oflow) << 32 |
5417 le32_to_cpu(stat_info->tmac_data_octets);
5418 tmp_stats[i++] = le64_to_cpu(stat_info->tmac_drop_frms);
5420 (u64)le32_to_cpu(stat_info->tmac_mcst_frms_oflow) << 32 |
5421 le32_to_cpu(stat_info->tmac_mcst_frms);
5423 (u64)le32_to_cpu(stat_info->tmac_bcst_frms_oflow) << 32 |
5424 le32_to_cpu(stat_info->tmac_bcst_frms);
5425 tmp_stats[i++] = le64_to_cpu(stat_info->tmac_pause_ctrl_frms);
5427 (u64)le32_to_cpu(stat_info->tmac_ttl_octets_oflow) << 32 |
5428 le32_to_cpu(stat_info->tmac_ttl_octets);
5430 (u64)le32_to_cpu(stat_info->tmac_ucst_frms_oflow) << 32 |
5431 le32_to_cpu(stat_info->tmac_ucst_frms);
5433 (u64)le32_to_cpu(stat_info->tmac_nucst_frms_oflow) << 32 |
5434 le32_to_cpu(stat_info->tmac_nucst_frms);
5436 (u64)le32_to_cpu(stat_info->tmac_any_err_frms_oflow) << 32 |
5437 le32_to_cpu(stat_info->tmac_any_err_frms);
5438 tmp_stats[i++] = le64_to_cpu(stat_info->tmac_ttl_less_fb_octets);
5439 tmp_stats[i++] = le64_to_cpu(stat_info->tmac_vld_ip_octets);
5441 (u64)le32_to_cpu(stat_info->tmac_vld_ip_oflow) << 32 |
5442 le32_to_cpu(stat_info->tmac_vld_ip);
5444 (u64)le32_to_cpu(stat_info->tmac_drop_ip_oflow) << 32 |
5445 le32_to_cpu(stat_info->tmac_drop_ip);
5447 (u64)le32_to_cpu(stat_info->tmac_icmp_oflow) << 32 |
5448 le32_to_cpu(stat_info->tmac_icmp);
5450 (u64)le32_to_cpu(stat_info->tmac_rst_tcp_oflow) << 32 |
5451 le32_to_cpu(stat_info->tmac_rst_tcp);
5452 tmp_stats[i++] = le64_to_cpu(stat_info->tmac_tcp);
5453 tmp_stats[i++] = (u64)le32_to_cpu(stat_info->tmac_udp_oflow) << 32 |
5454 le32_to_cpu(stat_info->tmac_udp);
5456 (u64)le32_to_cpu(stat_info->rmac_vld_frms_oflow) << 32 |
5457 le32_to_cpu(stat_info->rmac_vld_frms);
5459 (u64)le32_to_cpu(stat_info->rmac_data_octets_oflow) << 32 |
5460 le32_to_cpu(stat_info->rmac_data_octets);
5461 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_fcs_err_frms);
5462 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_drop_frms);
5464 (u64)le32_to_cpu(stat_info->rmac_vld_mcst_frms_oflow) << 32 |
5465 le32_to_cpu(stat_info->rmac_vld_mcst_frms);
5467 (u64)le32_to_cpu(stat_info->rmac_vld_bcst_frms_oflow) << 32 |
5468 le32_to_cpu(stat_info->rmac_vld_bcst_frms);
5469 tmp_stats[i++] = le32_to_cpu(stat_info->rmac_in_rng_len_err_frms);
5470 tmp_stats[i++] = le32_to_cpu(stat_info->rmac_out_rng_len_err_frms);
5471 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_long_frms);
5472 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_pause_ctrl_frms);
5473 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_unsup_ctrl_frms);
5475 (u64)le32_to_cpu(stat_info->rmac_ttl_octets_oflow) << 32 |
5476 le32_to_cpu(stat_info->rmac_ttl_octets);
5478 (u64)le32_to_cpu(stat_info->rmac_accepted_ucst_frms_oflow)
5479 << 32 | le32_to_cpu(stat_info->rmac_accepted_ucst_frms);
5481 (u64)le32_to_cpu(stat_info->rmac_accepted_nucst_frms_oflow)
5482 << 32 | le32_to_cpu(stat_info->rmac_accepted_nucst_frms);
5484 (u64)le32_to_cpu(stat_info->rmac_discarded_frms_oflow) << 32 |
5485 le32_to_cpu(stat_info->rmac_discarded_frms);
5487 (u64)le32_to_cpu(stat_info->rmac_drop_events_oflow)
5488 << 32 | le32_to_cpu(stat_info->rmac_drop_events);
5489 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_ttl_less_fb_octets);
5490 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_ttl_frms);
5492 (u64)le32_to_cpu(stat_info->rmac_usized_frms_oflow) << 32 |
5493 le32_to_cpu(stat_info->rmac_usized_frms);
5495 (u64)le32_to_cpu(stat_info->rmac_osized_frms_oflow) << 32 |
5496 le32_to_cpu(stat_info->rmac_osized_frms);
5498 (u64)le32_to_cpu(stat_info->rmac_frag_frms_oflow) << 32 |
5499 le32_to_cpu(stat_info->rmac_frag_frms);
5501 (u64)le32_to_cpu(stat_info->rmac_jabber_frms_oflow) << 32 |
5502 le32_to_cpu(stat_info->rmac_jabber_frms);
5503 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_ttl_64_frms);
5504 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_ttl_65_127_frms);
5505 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_ttl_128_255_frms);
5506 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_ttl_256_511_frms);
5507 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_ttl_512_1023_frms);
5508 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_ttl_1024_1518_frms);
5510 (u64)le32_to_cpu(stat_info->rmac_ip_oflow) << 32 |
5511 le32_to_cpu(stat_info->rmac_ip);
5512 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_ip_octets);
5513 tmp_stats[i++] = le32_to_cpu(stat_info->rmac_hdr_err_ip);
5515 (u64)le32_to_cpu(stat_info->rmac_drop_ip_oflow) << 32 |
5516 le32_to_cpu(stat_info->rmac_drop_ip);
5518 (u64)le32_to_cpu(stat_info->rmac_icmp_oflow) << 32 |
5519 le32_to_cpu(stat_info->rmac_icmp);
5520 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_tcp);
5522 (u64)le32_to_cpu(stat_info->rmac_udp_oflow) << 32 |
5523 le32_to_cpu(stat_info->rmac_udp);
5525 (u64)le32_to_cpu(stat_info->rmac_err_drp_udp_oflow) << 32 |
5526 le32_to_cpu(stat_info->rmac_err_drp_udp);
5527 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_xgmii_err_sym);
5528 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_frms_q0);
5529 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_frms_q1);
5530 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_frms_q2);
5531 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_frms_q3);
5532 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_frms_q4);
5533 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_frms_q5);
5534 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_frms_q6);
5535 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_frms_q7);
5536 tmp_stats[i++] = le16_to_cpu(stat_info->rmac_full_q0);
5537 tmp_stats[i++] = le16_to_cpu(stat_info->rmac_full_q1);
5538 tmp_stats[i++] = le16_to_cpu(stat_info->rmac_full_q2);
5539 tmp_stats[i++] = le16_to_cpu(stat_info->rmac_full_q3);
5540 tmp_stats[i++] = le16_to_cpu(stat_info->rmac_full_q4);
5541 tmp_stats[i++] = le16_to_cpu(stat_info->rmac_full_q5);
5542 tmp_stats[i++] = le16_to_cpu(stat_info->rmac_full_q6);
5543 tmp_stats[i++] = le16_to_cpu(stat_info->rmac_full_q7);
5545 (u64)le32_to_cpu(stat_info->rmac_pause_cnt_oflow) << 32 |
5546 le32_to_cpu(stat_info->rmac_pause_cnt);
5547 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_xgmii_data_err_cnt);
5548 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_xgmii_ctrl_err_cnt);
5550 (u64)le32_to_cpu(stat_info->rmac_accepted_ip_oflow) << 32 |
5551 le32_to_cpu(stat_info->rmac_accepted_ip);
5552 tmp_stats[i++] = le32_to_cpu(stat_info->rmac_err_tcp);
5553 tmp_stats[i++] = le32_to_cpu(stat_info->rd_req_cnt);
5554 tmp_stats[i++] = le32_to_cpu(stat_info->new_rd_req_cnt);
5555 tmp_stats[i++] = le32_to_cpu(stat_info->new_rd_req_rtry_cnt);
5556 tmp_stats[i++] = le32_to_cpu(stat_info->rd_rtry_cnt);
5557 tmp_stats[i++] = le32_to_cpu(stat_info->wr_rtry_rd_ack_cnt);
5558 tmp_stats[i++] = le32_to_cpu(stat_info->wr_req_cnt);
5559 tmp_stats[i++] = le32_to_cpu(stat_info->new_wr_req_cnt);
5560 tmp_stats[i++] = le32_to_cpu(stat_info->new_wr_req_rtry_cnt);
5561 tmp_stats[i++] = le32_to_cpu(stat_info->wr_rtry_cnt);
5562 tmp_stats[i++] = le32_to_cpu(stat_info->wr_disc_cnt);
5563 tmp_stats[i++] = le32_to_cpu(stat_info->rd_rtry_wr_ack_cnt);
5564 tmp_stats[i++] = le32_to_cpu(stat_info->txp_wr_cnt);
5565 tmp_stats[i++] = le32_to_cpu(stat_info->txd_rd_cnt);
5566 tmp_stats[i++] = le32_to_cpu(stat_info->txd_wr_cnt);
5567 tmp_stats[i++] = le32_to_cpu(stat_info->rxd_rd_cnt);
5568 tmp_stats[i++] = le32_to_cpu(stat_info->rxd_wr_cnt);
5569 tmp_stats[i++] = le32_to_cpu(stat_info->txf_rd_cnt);
5570 tmp_stats[i++] = le32_to_cpu(stat_info->rxf_wr_cnt);
5571 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_ttl_1519_4095_frms);
5572 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_ttl_4096_8191_frms);
5573 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_ttl_8192_max_frms);
5574 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_ttl_gt_max_frms);
5575 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_osized_alt_frms);
5576 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_jabber_alt_frms);
5577 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_gt_max_alt_frms);
5578 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_vlan_frms);
5579 tmp_stats[i++] = le32_to_cpu(stat_info->rmac_len_discard);
5580 tmp_stats[i++] = le32_to_cpu(stat_info->rmac_fcs_discard);
5581 tmp_stats[i++] = le32_to_cpu(stat_info->rmac_pf_discard);
5582 tmp_stats[i++] = le32_to_cpu(stat_info->rmac_da_discard);
5583 tmp_stats[i++] = le32_to_cpu(stat_info->rmac_red_discard);
5584 tmp_stats[i++] = le32_to_cpu(stat_info->rmac_rts_discard);
5585 tmp_stats[i++] = le32_to_cpu(stat_info->rmac_ingm_full_discard);
5586 tmp_stats[i++] = le32_to_cpu(stat_info->link_fault_cnt);
5588 tmp_stats[i++] = stat_info->sw_stat.single_ecc_errs;
5589 tmp_stats[i++] = stat_info->sw_stat.double_ecc_errs;
5590 tmp_stats[i++] = stat_info->sw_stat.parity_err_cnt;
5591 tmp_stats[i++] = stat_info->sw_stat.serious_err_cnt;
5592 tmp_stats[i++] = stat_info->sw_stat.soft_reset_cnt;
5593 tmp_stats[i++] = stat_info->sw_stat.fifo_full_cnt;
5594 tmp_stats[i++] = stat_info->sw_stat.ring_full_cnt;
5595 tmp_stats[i++] = stat_info->xpak_stat.alarm_transceiver_temp_high;
5596 tmp_stats[i++] = stat_info->xpak_stat.alarm_transceiver_temp_low;
5597 tmp_stats[i++] = stat_info->xpak_stat.alarm_laser_bias_current_high;
5598 tmp_stats[i++] = stat_info->xpak_stat.alarm_laser_bias_current_low;
5599 tmp_stats[i++] = stat_info->xpak_stat.alarm_laser_output_power_high;
5600 tmp_stats[i++] = stat_info->xpak_stat.alarm_laser_output_power_low;
5601 tmp_stats[i++] = stat_info->xpak_stat.warn_transceiver_temp_high;
5602 tmp_stats[i++] = stat_info->xpak_stat.warn_transceiver_temp_low;
5603 tmp_stats[i++] = stat_info->xpak_stat.warn_laser_bias_current_high;
5604 tmp_stats[i++] = stat_info->xpak_stat.warn_laser_bias_current_low;
5605 tmp_stats[i++] = stat_info->xpak_stat.warn_laser_output_power_high;
5606 tmp_stats[i++] = stat_info->xpak_stat.warn_laser_output_power_low;
5607 tmp_stats[i++] = stat_info->sw_stat.clubbed_frms_cnt;
5608 tmp_stats[i++] = stat_info->sw_stat.sending_both;
5609 tmp_stats[i++] = stat_info->sw_stat.outof_sequence_pkts;
5610 tmp_stats[i++] = stat_info->sw_stat.flush_max_pkts;
5611 if (stat_info->sw_stat.num_aggregations) {
5612 u64 tmp = stat_info->sw_stat.sum_avg_pkts_aggregated;
5615 * Since 64-bit divide does not work on all platforms,
5616 * do repeated subtraction.
5618 while (tmp >= stat_info->sw_stat.num_aggregations) {
5619 tmp -= stat_info->sw_stat.num_aggregations;
5622 tmp_stats[i++] = count;
5628 static int s2io_ethtool_get_regs_len(struct net_device *dev)
5630 return (XENA_REG_SPACE);
5634 static u32 s2io_ethtool_get_rx_csum(struct net_device * dev)
5636 nic_t *sp = dev->priv;
5638 return (sp->rx_csum);
5641 static int s2io_ethtool_set_rx_csum(struct net_device *dev, u32 data)
5643 nic_t *sp = dev->priv;
5653 static int s2io_get_eeprom_len(struct net_device *dev)
5655 return (XENA_EEPROM_SPACE);
5658 static int s2io_ethtool_self_test_count(struct net_device *dev)
5660 return (S2IO_TEST_LEN);
5663 static void s2io_ethtool_get_strings(struct net_device *dev,
5664 u32 stringset, u8 * data)
5666 switch (stringset) {
5668 memcpy(data, s2io_gstrings, S2IO_STRINGS_LEN);
5671 memcpy(data, ðtool_stats_keys,
5672 sizeof(ethtool_stats_keys));
5675 static int s2io_ethtool_get_stats_count(struct net_device *dev)
5677 return (S2IO_STAT_LEN);
5680 static int s2io_ethtool_op_set_tx_csum(struct net_device *dev, u32 data)
5683 dev->features |= NETIF_F_IP_CSUM;
5685 dev->features &= ~NETIF_F_IP_CSUM;
5690 static u32 s2io_ethtool_op_get_tso(struct net_device *dev)
5692 return (dev->features & NETIF_F_TSO) != 0;
5694 static int s2io_ethtool_op_set_tso(struct net_device *dev, u32 data)
5697 dev->features |= (NETIF_F_TSO | NETIF_F_TSO6);
5699 dev->features &= ~(NETIF_F_TSO | NETIF_F_TSO6);
5704 static const struct ethtool_ops netdev_ethtool_ops = {
5705 .get_settings = s2io_ethtool_gset,
5706 .set_settings = s2io_ethtool_sset,
5707 .get_drvinfo = s2io_ethtool_gdrvinfo,
5708 .get_regs_len = s2io_ethtool_get_regs_len,
5709 .get_regs = s2io_ethtool_gregs,
5710 .get_link = ethtool_op_get_link,
5711 .get_eeprom_len = s2io_get_eeprom_len,
5712 .get_eeprom = s2io_ethtool_geeprom,
5713 .set_eeprom = s2io_ethtool_seeprom,
5714 .get_pauseparam = s2io_ethtool_getpause_data,
5715 .set_pauseparam = s2io_ethtool_setpause_data,
5716 .get_rx_csum = s2io_ethtool_get_rx_csum,
5717 .set_rx_csum = s2io_ethtool_set_rx_csum,
5718 .get_tx_csum = ethtool_op_get_tx_csum,
5719 .set_tx_csum = s2io_ethtool_op_set_tx_csum,
5720 .get_sg = ethtool_op_get_sg,
5721 .set_sg = ethtool_op_set_sg,
5722 .get_tso = s2io_ethtool_op_get_tso,
5723 .set_tso = s2io_ethtool_op_set_tso,
5724 .get_ufo = ethtool_op_get_ufo,
5725 .set_ufo = ethtool_op_set_ufo,
5726 .self_test_count = s2io_ethtool_self_test_count,
5727 .self_test = s2io_ethtool_test,
5728 .get_strings = s2io_ethtool_get_strings,
5729 .phys_id = s2io_ethtool_idnic,
5730 .get_stats_count = s2io_ethtool_get_stats_count,
5731 .get_ethtool_stats = s2io_get_ethtool_stats
5735 * s2io_ioctl - Entry point for the Ioctl
5736 * @dev : Device pointer.
5737 * @ifr : An IOCTL specefic structure, that can contain a pointer to
5738 * a proprietary structure used to pass information to the driver.
5739 * @cmd : This is used to distinguish between the different commands that
5740 * can be passed to the IOCTL functions.
5742 * Currently there are no special functionality supported in IOCTL, hence
5743 * function always return EOPNOTSUPPORTED
5746 static int s2io_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
5752 * s2io_change_mtu - entry point to change MTU size for the device.
5753 * @dev : device pointer.
5754 * @new_mtu : the new MTU size for the device.
5755 * Description: A driver entry point to change MTU size for the device.
5756 * Before changing the MTU the device must be stopped.
5758 * 0 on success and an appropriate (-)ve integer as defined in errno.h
5762 static int s2io_change_mtu(struct net_device *dev, int new_mtu)
5764 nic_t *sp = dev->priv;
5766 if ((new_mtu < MIN_MTU) || (new_mtu > S2IO_JUMBO_SIZE)) {
5767 DBG_PRINT(ERR_DBG, "%s: MTU size is invalid.\n",
5773 if (netif_running(dev)) {
5775 netif_stop_queue(dev);
5776 if (s2io_card_up(sp)) {
5777 DBG_PRINT(ERR_DBG, "%s: Device bring up failed\n",
5780 if (netif_queue_stopped(dev))
5781 netif_wake_queue(dev);
5782 } else { /* Device is down */
5783 XENA_dev_config_t __iomem *bar0 = sp->bar0;
5784 u64 val64 = new_mtu;
5786 writeq(vBIT(val64, 2, 14), &bar0->rmac_max_pyld_len);
5793 * s2io_tasklet - Bottom half of the ISR.
5794 * @dev_adr : address of the device structure in dma_addr_t format.
5796 * This is the tasklet or the bottom half of the ISR. This is
5797 * an extension of the ISR which is scheduled by the scheduler to be run
5798 * when the load on the CPU is low. All low priority tasks of the ISR can
5799 * be pushed into the tasklet. For now the tasklet is used only to
5800 * replenish the Rx buffers in the Rx buffer descriptors.
5805 static void s2io_tasklet(unsigned long dev_addr)
5807 struct net_device *dev = (struct net_device *) dev_addr;
5808 nic_t *sp = dev->priv;
5810 mac_info_t *mac_control;
5811 struct config_param *config;
5813 mac_control = &sp->mac_control;
5814 config = &sp->config;
5816 if (!TASKLET_IN_USE) {
5817 for (i = 0; i < config->rx_ring_num; i++) {
5818 ret = fill_rx_buffers(sp, i);
5819 if (ret == -ENOMEM) {
5820 DBG_PRINT(ERR_DBG, "%s: Out of ",
5822 DBG_PRINT(ERR_DBG, "memory in tasklet\n");
5824 } else if (ret == -EFILL) {
5826 "%s: Rx Ring %d is full\n",
5831 clear_bit(0, (&sp->tasklet_status));
5836 * s2io_set_link - Set the LInk status
5837 * @data: long pointer to device private structue
5838 * Description: Sets the link status for the adapter
5841 static void s2io_set_link(struct work_struct *work)
5843 nic_t *nic = container_of(work, nic_t, set_link_task);
5844 struct net_device *dev = nic->dev;
5845 XENA_dev_config_t __iomem *bar0 = nic->bar0;
5849 if (test_and_set_bit(0, &(nic->link_state))) {
5850 /* The card is being reset, no point doing anything */
5854 subid = nic->pdev->subsystem_device;
5855 if (s2io_link_fault_indication(nic) == MAC_RMAC_ERR_TIMER) {
5857 * Allow a small delay for the NICs self initiated
5858 * cleanup to complete.
5863 val64 = readq(&bar0->adapter_status);
5864 if (LINK_IS_UP(val64)) {
5865 if (!(readq(&bar0->adapter_control) & ADAPTER_CNTL_EN)) {
5866 if (verify_xena_quiescence(nic)) {
5867 val64 = readq(&bar0->adapter_control);
5868 val64 |= ADAPTER_CNTL_EN;
5869 writeq(val64, &bar0->adapter_control);
5870 if (CARDS_WITH_FAULTY_LINK_INDICATORS(
5871 nic->device_type, subid)) {
5872 val64 = readq(&bar0->gpio_control);
5873 val64 |= GPIO_CTRL_GPIO_0;
5874 writeq(val64, &bar0->gpio_control);
5875 val64 = readq(&bar0->gpio_control);
5877 val64 |= ADAPTER_LED_ON;
5878 writeq(val64, &bar0->adapter_control);
5880 nic->device_enabled_once = TRUE;
5882 DBG_PRINT(ERR_DBG, "%s: Error: ", dev->name);
5883 DBG_PRINT(ERR_DBG, "device is not Quiescent\n");
5884 netif_stop_queue(dev);
5887 val64 = readq(&bar0->adapter_status);
5888 if (!LINK_IS_UP(val64)) {
5889 DBG_PRINT(ERR_DBG, "%s:", dev->name);
5890 DBG_PRINT(ERR_DBG, " Link down after enabling ");
5891 DBG_PRINT(ERR_DBG, "device \n");
5893 s2io_link(nic, LINK_UP);
5895 if (CARDS_WITH_FAULTY_LINK_INDICATORS(nic->device_type,
5897 val64 = readq(&bar0->gpio_control);
5898 val64 &= ~GPIO_CTRL_GPIO_0;
5899 writeq(val64, &bar0->gpio_control);
5900 val64 = readq(&bar0->gpio_control);
5902 s2io_link(nic, LINK_DOWN);
5904 clear_bit(0, &(nic->link_state));
5907 static int set_rxd_buffer_pointer(nic_t *sp, RxD_t *rxdp, buffAdd_t *ba,
5908 struct sk_buff **skb, u64 *temp0, u64 *temp1,
5909 u64 *temp2, int size)
5911 struct net_device *dev = sp->dev;
5912 struct sk_buff *frag_list;
5914 if ((sp->rxd_mode == RXD_MODE_1) && (rxdp->Host_Control == 0)) {
5917 DBG_PRINT(INFO_DBG, "SKB is not NULL\n");
5919 * As Rx frame are not going to be processed,
5920 * using same mapped address for the Rxd
5923 ((RxD1_t*)rxdp)->Buffer0_ptr = *temp0;
5925 *skb = dev_alloc_skb(size);
5927 DBG_PRINT(ERR_DBG, "%s: Out of ", dev->name);
5928 DBG_PRINT(ERR_DBG, "memory to allocate SKBs\n");
5931 /* storing the mapped addr in a temp variable
5932 * such it will be used for next rxd whose
5933 * Host Control is NULL
5935 ((RxD1_t*)rxdp)->Buffer0_ptr = *temp0 =
5936 pci_map_single( sp->pdev, (*skb)->data,
5937 size - NET_IP_ALIGN,
5938 PCI_DMA_FROMDEVICE);
5939 rxdp->Host_Control = (unsigned long) (*skb);
5941 } else if ((sp->rxd_mode == RXD_MODE_3B) && (rxdp->Host_Control == 0)) {
5942 /* Two buffer Mode */
5944 ((RxD3_t*)rxdp)->Buffer2_ptr = *temp2;
5945 ((RxD3_t*)rxdp)->Buffer0_ptr = *temp0;
5946 ((RxD3_t*)rxdp)->Buffer1_ptr = *temp1;
5948 *skb = dev_alloc_skb(size);
5950 DBG_PRINT(ERR_DBG, "%s: dev_alloc_skb failed\n",
5954 ((RxD3_t*)rxdp)->Buffer2_ptr = *temp2 =
5955 pci_map_single(sp->pdev, (*skb)->data,
5957 PCI_DMA_FROMDEVICE);
5958 ((RxD3_t*)rxdp)->Buffer0_ptr = *temp0 =
5959 pci_map_single( sp->pdev, ba->ba_0, BUF0_LEN,
5960 PCI_DMA_FROMDEVICE);
5961 rxdp->Host_Control = (unsigned long) (*skb);
5963 /* Buffer-1 will be dummy buffer not used */
5964 ((RxD3_t*)rxdp)->Buffer1_ptr = *temp1 =
5965 pci_map_single(sp->pdev, ba->ba_1, BUF1_LEN,
5966 PCI_DMA_FROMDEVICE);
5968 } else if ((rxdp->Host_Control == 0)) {
5969 /* Three buffer mode */
5971 ((RxD3_t*)rxdp)->Buffer0_ptr = *temp0;
5972 ((RxD3_t*)rxdp)->Buffer1_ptr = *temp1;
5973 ((RxD3_t*)rxdp)->Buffer2_ptr = *temp2;
5975 *skb = dev_alloc_skb(size);
5977 DBG_PRINT(ERR_DBG, "%s: dev_alloc_skb failed\n",
5981 ((RxD3_t*)rxdp)->Buffer0_ptr = *temp0 =
5982 pci_map_single(sp->pdev, ba->ba_0, BUF0_LEN,
5983 PCI_DMA_FROMDEVICE);
5984 /* Buffer-1 receives L3/L4 headers */
5985 ((RxD3_t*)rxdp)->Buffer1_ptr = *temp1 =
5986 pci_map_single( sp->pdev, (*skb)->data,
5988 PCI_DMA_FROMDEVICE);
5990 * skb_shinfo(skb)->frag_list will have L4
5993 skb_shinfo(*skb)->frag_list = dev_alloc_skb(dev->mtu +
5995 if (skb_shinfo(*skb)->frag_list == NULL) {
5996 DBG_PRINT(ERR_DBG, "%s: dev_alloc_skb \
5997 failed\n ", dev->name);
6000 frag_list = skb_shinfo(*skb)->frag_list;
6001 frag_list->next = NULL;
6003 * Buffer-2 receives L4 data payload
6005 ((RxD3_t*)rxdp)->Buffer2_ptr = *temp2 =
6006 pci_map_single( sp->pdev, frag_list->data,
6007 dev->mtu, PCI_DMA_FROMDEVICE);
6012 static void set_rxd_buffer_size(nic_t *sp, RxD_t *rxdp, int size)
6014 struct net_device *dev = sp->dev;
6015 if (sp->rxd_mode == RXD_MODE_1) {
6016 rxdp->Control_2 = SET_BUFFER0_SIZE_1( size - NET_IP_ALIGN);
6017 } else if (sp->rxd_mode == RXD_MODE_3B) {
6018 rxdp->Control_2 = SET_BUFFER0_SIZE_3(BUF0_LEN);
6019 rxdp->Control_2 |= SET_BUFFER1_SIZE_3(1);
6020 rxdp->Control_2 |= SET_BUFFER2_SIZE_3( dev->mtu + 4);
6022 rxdp->Control_2 = SET_BUFFER0_SIZE_3(BUF0_LEN);
6023 rxdp->Control_2 |= SET_BUFFER1_SIZE_3(l3l4hdr_size + 4);
6024 rxdp->Control_2 |= SET_BUFFER2_SIZE_3(dev->mtu);
6028 static int rxd_owner_bit_reset(nic_t *sp)
6030 int i, j, k, blk_cnt = 0, size;
6031 mac_info_t * mac_control = &sp->mac_control;
6032 struct config_param *config = &sp->config;
6033 struct net_device *dev = sp->dev;
6035 struct sk_buff *skb = NULL;
6036 buffAdd_t *ba = NULL;
6037 u64 temp0_64 = 0, temp1_64 = 0, temp2_64 = 0;
6039 /* Calculate the size based on ring mode */
6040 size = dev->mtu + HEADER_ETHERNET_II_802_3_SIZE +
6041 HEADER_802_2_SIZE + HEADER_SNAP_SIZE;
6042 if (sp->rxd_mode == RXD_MODE_1)
6043 size += NET_IP_ALIGN;
6044 else if (sp->rxd_mode == RXD_MODE_3B)
6045 size = dev->mtu + ALIGN_SIZE + BUF0_LEN + 4;
6047 size = l3l4hdr_size + ALIGN_SIZE + BUF0_LEN + 4;
6049 for (i = 0; i < config->rx_ring_num; i++) {
6050 blk_cnt = config->rx_cfg[i].num_rxd /
6051 (rxd_count[sp->rxd_mode] +1);
6053 for (j = 0; j < blk_cnt; j++) {
6054 for (k = 0; k < rxd_count[sp->rxd_mode]; k++) {
6055 rxdp = mac_control->rings[i].
6056 rx_blocks[j].rxds[k].virt_addr;
6057 if(sp->rxd_mode >= RXD_MODE_3A)
6058 ba = &mac_control->rings[i].ba[j][k];
6059 set_rxd_buffer_pointer(sp, rxdp, ba,
6060 &skb,(u64 *)&temp0_64,
6062 (u64 *)&temp2_64, size);
6064 set_rxd_buffer_size(sp, rxdp, size);
6066 /* flip the Ownership bit to Hardware */
6067 rxdp->Control_1 |= RXD_OWN_XENA;
6075 static int s2io_add_isr(nic_t * sp)
6078 struct net_device *dev = sp->dev;
6081 if (sp->intr_type == MSI)
6082 ret = s2io_enable_msi(sp);
6083 else if (sp->intr_type == MSI_X)
6084 ret = s2io_enable_msi_x(sp);
6086 DBG_PRINT(ERR_DBG, "%s: Defaulting to INTA\n", dev->name);
6087 sp->intr_type = INTA;
6090 /* Store the values of the MSIX table in the nic_t structure */
6091 store_xmsi_data(sp);
6093 /* After proper initialization of H/W, register ISR */
6094 if (sp->intr_type == MSI) {
6095 err = request_irq((int) sp->pdev->irq, s2io_msi_handle,
6096 IRQF_SHARED, sp->name, dev);
6098 pci_disable_msi(sp->pdev);
6099 DBG_PRINT(ERR_DBG, "%s: MSI registration failed\n",
6104 if (sp->intr_type == MSI_X) {
6107 for (i=1; (sp->s2io_entries[i].in_use == MSIX_FLG); i++) {
6108 if (sp->s2io_entries[i].type == MSIX_FIFO_TYPE) {
6109 sprintf(sp->desc[i], "%s:MSI-X-%d-TX",
6111 err = request_irq(sp->entries[i].vector,
6112 s2io_msix_fifo_handle, 0, sp->desc[i],
6113 sp->s2io_entries[i].arg);
6114 DBG_PRINT(ERR_DBG, "%s @ 0x%llx\n", sp->desc[i],
6115 (unsigned long long)sp->msix_info[i].addr);
6117 sprintf(sp->desc[i], "%s:MSI-X-%d-RX",
6119 err = request_irq(sp->entries[i].vector,
6120 s2io_msix_ring_handle, 0, sp->desc[i],
6121 sp->s2io_entries[i].arg);
6122 DBG_PRINT(ERR_DBG, "%s @ 0x%llx\n", sp->desc[i],
6123 (unsigned long long)sp->msix_info[i].addr);
6126 DBG_PRINT(ERR_DBG,"%s:MSI-X-%d registration "
6127 "failed\n", dev->name, i);
6128 DBG_PRINT(ERR_DBG, "Returned: %d\n", err);
6131 sp->s2io_entries[i].in_use = MSIX_REGISTERED_SUCCESS;
6134 if (sp->intr_type == INTA) {
6135 err = request_irq((int) sp->pdev->irq, s2io_isr, IRQF_SHARED,
6138 DBG_PRINT(ERR_DBG, "%s: ISR registration failed\n",
6145 static void s2io_rem_isr(nic_t * sp)
6148 struct net_device *dev = sp->dev;
6150 if (sp->intr_type == MSI_X) {
6154 for (i=1; (sp->s2io_entries[i].in_use ==
6155 MSIX_REGISTERED_SUCCESS); i++) {
6156 int vector = sp->entries[i].vector;
6157 void *arg = sp->s2io_entries[i].arg;
6159 free_irq(vector, arg);
6161 pci_read_config_word(sp->pdev, 0x42, &msi_control);
6162 msi_control &= 0xFFFE; /* Disable MSI */
6163 pci_write_config_word(sp->pdev, 0x42, msi_control);
6165 pci_disable_msix(sp->pdev);
6167 free_irq(sp->pdev->irq, dev);
6168 if (sp->intr_type == MSI) {
6171 pci_disable_msi(sp->pdev);
6172 pci_read_config_word(sp->pdev, 0x4c, &val);
6174 pci_write_config_word(sp->pdev, 0x4c, val);
6177 /* Waiting till all Interrupt handlers are complete */
6181 if (!atomic_read(&sp->isr_cnt))
6187 static void s2io_card_down(nic_t * sp)
6190 XENA_dev_config_t __iomem *bar0 = sp->bar0;
6191 unsigned long flags;
6192 register u64 val64 = 0;
6194 del_timer_sync(&sp->alarm_timer);
6195 /* If s2io_set_link task is executing, wait till it completes. */
6196 while (test_and_set_bit(0, &(sp->link_state))) {
6199 atomic_set(&sp->card_state, CARD_DOWN);
6201 /* disable Tx and Rx traffic on the NIC */
6207 tasklet_kill(&sp->task);
6209 /* Check if the device is Quiescent and then Reset the NIC */
6211 /* As per the HW requirement we need to replenish the
6212 * receive buffer to avoid the ring bump. Since there is
6213 * no intention of processing the Rx frame at this pointwe are
6214 * just settting the ownership bit of rxd in Each Rx
6215 * ring to HW and set the appropriate buffer size
6216 * based on the ring mode
6218 rxd_owner_bit_reset(sp);
6220 val64 = readq(&bar0->adapter_status);
6221 if (verify_xena_quiescence(sp)) {
6222 if(verify_pcc_quiescent(sp, sp->device_enabled_once))
6230 "s2io_close:Device not Quiescent ");
6231 DBG_PRINT(ERR_DBG, "adaper status reads 0x%llx\n",
6232 (unsigned long long) val64);
6238 spin_lock_irqsave(&sp->tx_lock, flags);
6239 /* Free all Tx buffers */
6240 free_tx_buffers(sp);
6241 spin_unlock_irqrestore(&sp->tx_lock, flags);
6243 /* Free all Rx buffers */
6244 spin_lock_irqsave(&sp->rx_lock, flags);
6245 free_rx_buffers(sp);
6246 spin_unlock_irqrestore(&sp->rx_lock, flags);
6248 clear_bit(0, &(sp->link_state));
6251 static int s2io_card_up(nic_t * sp)
6254 mac_info_t *mac_control;
6255 struct config_param *config;
6256 struct net_device *dev = (struct net_device *) sp->dev;
6259 /* Initialize the H/W I/O registers */
6260 if (init_nic(sp) != 0) {
6261 DBG_PRINT(ERR_DBG, "%s: H/W initialization failed\n",
6268 * Initializing the Rx buffers. For now we are considering only 1
6269 * Rx ring and initializing buffers into 30 Rx blocks
6271 mac_control = &sp->mac_control;
6272 config = &sp->config;
6274 for (i = 0; i < config->rx_ring_num; i++) {
6275 if ((ret = fill_rx_buffers(sp, i))) {
6276 DBG_PRINT(ERR_DBG, "%s: Out of memory in Open\n",
6279 free_rx_buffers(sp);
6282 DBG_PRINT(INFO_DBG, "Buf in ring:%d is %d:\n", i,
6283 atomic_read(&sp->rx_bufs_left[i]));
6285 /* Maintain the state prior to the open */
6286 if (sp->promisc_flg)
6287 sp->promisc_flg = 0;
6288 if (sp->m_cast_flg) {
6290 sp->all_multi_pos= 0;
6293 /* Setting its receive mode */
6294 s2io_set_multicast(dev);
6297 /* Initialize max aggregatable pkts per session based on MTU */
6298 sp->lro_max_aggr_per_sess = ((1<<16) - 1) / dev->mtu;
6299 /* Check if we can use(if specified) user provided value */
6300 if (lro_max_pkts < sp->lro_max_aggr_per_sess)
6301 sp->lro_max_aggr_per_sess = lro_max_pkts;
6304 /* Enable Rx Traffic and interrupts on the NIC */
6305 if (start_nic(sp)) {
6306 DBG_PRINT(ERR_DBG, "%s: Starting NIC failed\n", dev->name);
6308 free_rx_buffers(sp);
6312 /* Add interrupt service routine */
6313 if (s2io_add_isr(sp) != 0) {
6314 if (sp->intr_type == MSI_X)
6317 free_rx_buffers(sp);
6321 S2IO_TIMER_CONF(sp->alarm_timer, s2io_alarm_handle, sp, (HZ/2));
6323 /* Enable tasklet for the device */
6324 tasklet_init(&sp->task, s2io_tasklet, (unsigned long) dev);
6326 /* Enable select interrupts */
6327 if (sp->intr_type != INTA)
6328 en_dis_able_nic_intrs(sp, ENA_ALL_INTRS, DISABLE_INTRS);
6330 interruptible = TX_TRAFFIC_INTR | RX_TRAFFIC_INTR;
6331 interruptible |= TX_PIC_INTR | RX_PIC_INTR;
6332 interruptible |= TX_MAC_INTR | RX_MAC_INTR;
6333 en_dis_able_nic_intrs(sp, interruptible, ENABLE_INTRS);
6337 atomic_set(&sp->card_state, CARD_UP);
6342 * s2io_restart_nic - Resets the NIC.
6343 * @data : long pointer to the device private structure
6345 * This function is scheduled to be run by the s2io_tx_watchdog
6346 * function after 0.5 secs to reset the NIC. The idea is to reduce
6347 * the run time of the watch dog routine which is run holding a
6351 static void s2io_restart_nic(struct work_struct *work)
6353 nic_t *sp = container_of(work, nic_t, rst_timer_task);
6354 struct net_device *dev = sp->dev;
6357 if (s2io_card_up(sp)) {
6358 DBG_PRINT(ERR_DBG, "%s: Device bring up failed\n",
6361 netif_wake_queue(dev);
6362 DBG_PRINT(ERR_DBG, "%s: was reset by Tx watchdog timer\n",
6368 * s2io_tx_watchdog - Watchdog for transmit side.
6369 * @dev : Pointer to net device structure
6371 * This function is triggered if the Tx Queue is stopped
6372 * for a pre-defined amount of time when the Interface is still up.
6373 * If the Interface is jammed in such a situation, the hardware is
6374 * reset (by s2io_close) and restarted again (by s2io_open) to
6375 * overcome any problem that might have been caused in the hardware.
6380 static void s2io_tx_watchdog(struct net_device *dev)
6382 nic_t *sp = dev->priv;
6384 if (netif_carrier_ok(dev)) {
6385 schedule_work(&sp->rst_timer_task);
6386 sp->mac_control.stats_info->sw_stat.soft_reset_cnt++;
6391 * rx_osm_handler - To perform some OS related operations on SKB.
6392 * @sp: private member of the device structure,pointer to s2io_nic structure.
6393 * @skb : the socket buffer pointer.
6394 * @len : length of the packet
6395 * @cksum : FCS checksum of the frame.
6396 * @ring_no : the ring from which this RxD was extracted.
6398 * This function is called by the Rx interrupt serivce routine to perform
6399 * some OS related operations on the SKB before passing it to the upper
6400 * layers. It mainly checks if the checksum is OK, if so adds it to the
6401 * SKBs cksum variable, increments the Rx packet count and passes the SKB
6402 * to the upper layer. If the checksum is wrong, it increments the Rx
6403 * packet error count, frees the SKB and returns error.
6405 * SUCCESS on success and -1 on failure.
6407 static int rx_osm_handler(ring_info_t *ring_data, RxD_t * rxdp)
6409 nic_t *sp = ring_data->nic;
6410 struct net_device *dev = (struct net_device *) sp->dev;
6411 struct sk_buff *skb = (struct sk_buff *)
6412 ((unsigned long) rxdp->Host_Control);
6413 int ring_no = ring_data->ring_no;
6414 u16 l3_csum, l4_csum;
6415 unsigned long long err = rxdp->Control_1 & RXD_T_CODE;
6421 /* Check for parity error */
6423 sp->mac_control.stats_info->sw_stat.parity_err_cnt++;
6427 * Drop the packet if bad transfer code. Exception being
6428 * 0x5, which could be due to unsupported IPv6 extension header.
6429 * In this case, we let stack handle the packet.
6430 * Note that in this case, since checksum will be incorrect,
6431 * stack will validate the same.
6433 if (err && ((err >> 48) != 0x5)) {
6434 DBG_PRINT(ERR_DBG, "%s: Rx error Value: 0x%llx\n",
6436 sp->stats.rx_crc_errors++;
6438 atomic_dec(&sp->rx_bufs_left[ring_no]);
6439 rxdp->Host_Control = 0;
6444 /* Updating statistics */
6445 rxdp->Host_Control = 0;
6447 sp->stats.rx_packets++;
6448 if (sp->rxd_mode == RXD_MODE_1) {
6449 int len = RXD_GET_BUFFER0_SIZE_1(rxdp->Control_2);
6451 sp->stats.rx_bytes += len;
6454 } else if (sp->rxd_mode >= RXD_MODE_3A) {
6455 int get_block = ring_data->rx_curr_get_info.block_index;
6456 int get_off = ring_data->rx_curr_get_info.offset;
6457 int buf0_len = RXD_GET_BUFFER0_SIZE_3(rxdp->Control_2);
6458 int buf2_len = RXD_GET_BUFFER2_SIZE_3(rxdp->Control_2);
6459 unsigned char *buff = skb_push(skb, buf0_len);
6461 buffAdd_t *ba = &ring_data->ba[get_block][get_off];
6462 sp->stats.rx_bytes += buf0_len + buf2_len;
6463 memcpy(buff, ba->ba_0, buf0_len);
6465 if (sp->rxd_mode == RXD_MODE_3A) {
6466 int buf1_len = RXD_GET_BUFFER1_SIZE_3(rxdp->Control_2);
6468 skb_put(skb, buf1_len);
6469 skb->len += buf2_len;
6470 skb->data_len += buf2_len;
6471 skb_put(skb_shinfo(skb)->frag_list, buf2_len);
6472 sp->stats.rx_bytes += buf1_len;
6475 skb_put(skb, buf2_len);
6478 if ((rxdp->Control_1 & TCP_OR_UDP_FRAME) && ((!sp->lro) ||
6479 (sp->lro && (!(rxdp->Control_1 & RXD_FRAME_IP_FRAG)))) &&
6481 l3_csum = RXD_GET_L3_CKSUM(rxdp->Control_1);
6482 l4_csum = RXD_GET_L4_CKSUM(rxdp->Control_1);
6483 if ((l3_csum == L3_CKSUM_OK) && (l4_csum == L4_CKSUM_OK)) {
6485 * NIC verifies if the Checksum of the received
6486 * frame is Ok or not and accordingly returns
6487 * a flag in the RxD.
6489 skb->ip_summed = CHECKSUM_UNNECESSARY;
6495 ret = s2io_club_tcp_session(skb->data, &tcp,
6496 &tcp_len, &lro, rxdp, sp);
6498 case 3: /* Begin anew */
6501 case 1: /* Aggregate */
6503 lro_append_pkt(sp, lro,
6507 case 4: /* Flush session */
6509 lro_append_pkt(sp, lro,
6511 queue_rx_frame(lro->parent);
6512 clear_lro_session(lro);
6513 sp->mac_control.stats_info->
6514 sw_stat.flush_max_pkts++;
6517 case 2: /* Flush both */
6518 lro->parent->data_len =
6520 sp->mac_control.stats_info->
6521 sw_stat.sending_both++;
6522 queue_rx_frame(lro->parent);
6523 clear_lro_session(lro);
6525 case 0: /* sessions exceeded */
6526 case -1: /* non-TCP or not
6530 * First pkt in session not
6531 * L3/L4 aggregatable
6536 "%s: Samadhana!!\n",
6543 * Packet with erroneous checksum, let the
6544 * upper layers deal with it.
6546 skb->ip_summed = CHECKSUM_NONE;
6549 skb->ip_summed = CHECKSUM_NONE;
6553 skb->protocol = eth_type_trans(skb, dev);
6554 if (sp->vlgrp && RXD_GET_VLAN_TAG(rxdp->Control_2)) {
6555 /* Queueing the vlan frame to the upper layer */
6557 vlan_hwaccel_receive_skb(skb, sp->vlgrp,
6558 RXD_GET_VLAN_TAG(rxdp->Control_2));
6560 vlan_hwaccel_rx(skb, sp->vlgrp,
6561 RXD_GET_VLAN_TAG(rxdp->Control_2));
6564 netif_receive_skb(skb);
6570 queue_rx_frame(skb);
6572 dev->last_rx = jiffies;
6574 atomic_dec(&sp->rx_bufs_left[ring_no]);
6579 * s2io_link - stops/starts the Tx queue.
6580 * @sp : private member of the device structure, which is a pointer to the
6581 * s2io_nic structure.
6582 * @link : inidicates whether link is UP/DOWN.
6584 * This function stops/starts the Tx queue depending on whether the link
6585 * status of the NIC is is down or up. This is called by the Alarm
6586 * interrupt handler whenever a link change interrupt comes up.
6591 static void s2io_link(nic_t * sp, int link)
6593 struct net_device *dev = (struct net_device *) sp->dev;
6595 if (link != sp->last_link_state) {
6596 if (link == LINK_DOWN) {
6597 DBG_PRINT(ERR_DBG, "%s: Link down\n", dev->name);
6598 netif_carrier_off(dev);
6600 DBG_PRINT(ERR_DBG, "%s: Link Up\n", dev->name);
6601 netif_carrier_on(dev);
6604 sp->last_link_state = link;
6608 * get_xena_rev_id - to identify revision ID of xena.
6609 * @pdev : PCI Dev structure
6611 * Function to identify the Revision ID of xena.
6613 * returns the revision ID of the device.
6616 static int get_xena_rev_id(struct pci_dev *pdev)
6620 ret = pci_read_config_byte(pdev, PCI_REVISION_ID, (u8 *) & id);
6625 * s2io_init_pci -Initialization of PCI and PCI-X configuration registers .
6626 * @sp : private member of the device structure, which is a pointer to the
6627 * s2io_nic structure.
6629 * This function initializes a few of the PCI and PCI-X configuration registers
6630 * with recommended values.
6635 static void s2io_init_pci(nic_t * sp)
6637 u16 pci_cmd = 0, pcix_cmd = 0;
6639 /* Enable Data Parity Error Recovery in PCI-X command register. */
6640 pci_read_config_word(sp->pdev, PCIX_COMMAND_REGISTER,
6642 pci_write_config_word(sp->pdev, PCIX_COMMAND_REGISTER,
6644 pci_read_config_word(sp->pdev, PCIX_COMMAND_REGISTER,
6647 /* Set the PErr Response bit in PCI command register. */
6648 pci_read_config_word(sp->pdev, PCI_COMMAND, &pci_cmd);
6649 pci_write_config_word(sp->pdev, PCI_COMMAND,
6650 (pci_cmd | PCI_COMMAND_PARITY));
6651 pci_read_config_word(sp->pdev, PCI_COMMAND, &pci_cmd);
6654 static int s2io_verify_parm(struct pci_dev *pdev, u8 *dev_intr_type)
6656 if ( tx_fifo_num > 8) {
6657 DBG_PRINT(ERR_DBG, "s2io: Requested number of Tx fifos not "
6659 DBG_PRINT(ERR_DBG, "s2io: Default to 8 Tx fifos\n");
6662 if ( rx_ring_num > 8) {
6663 DBG_PRINT(ERR_DBG, "s2io: Requested number of Rx rings not "
6665 DBG_PRINT(ERR_DBG, "s2io: Default to 8 Rx rings\n");
6668 if (*dev_intr_type != INTA)
6671 #ifndef CONFIG_PCI_MSI
6672 if (*dev_intr_type != INTA) {
6673 DBG_PRINT(ERR_DBG, "s2io: This kernel does not support"
6674 "MSI/MSI-X. Defaulting to INTA\n");
6675 *dev_intr_type = INTA;
6678 if (*dev_intr_type > MSI_X) {
6679 DBG_PRINT(ERR_DBG, "s2io: Wrong intr_type requested. "
6680 "Defaulting to INTA\n");
6681 *dev_intr_type = INTA;
6684 if ((*dev_intr_type == MSI_X) &&
6685 ((pdev->device != PCI_DEVICE_ID_HERC_WIN) &&
6686 (pdev->device != PCI_DEVICE_ID_HERC_UNI))) {
6687 DBG_PRINT(ERR_DBG, "s2io: Xframe I does not support MSI_X. "
6688 "Defaulting to INTA\n");
6689 *dev_intr_type = INTA;
6691 if ( (rx_ring_num > 1) && (*dev_intr_type != INTA) )
6693 if (rx_ring_mode > 3) {
6694 DBG_PRINT(ERR_DBG, "s2io: Requested ring mode not supported\n");
6695 DBG_PRINT(ERR_DBG, "s2io: Defaulting to 3-buffer mode\n");
6702 * s2io_init_nic - Initialization of the adapter .
6703 * @pdev : structure containing the PCI related information of the device.
6704 * @pre: List of PCI devices supported by the driver listed in s2io_tbl.
6706 * The function initializes an adapter identified by the pci_dec structure.
6707 * All OS related initialization including memory and device structure and
6708 * initlaization of the device private variable is done. Also the swapper
6709 * control register is initialized to enable read and write into the I/O
6710 * registers of the device.
6712 * returns 0 on success and negative on failure.
6715 static int __devinit
6716 s2io_init_nic(struct pci_dev *pdev, const struct pci_device_id *pre)
6719 struct net_device *dev;
6721 int dma_flag = FALSE;
6722 u32 mac_up, mac_down;
6723 u64 val64 = 0, tmp64 = 0;
6724 XENA_dev_config_t __iomem *bar0 = NULL;
6726 mac_info_t *mac_control;
6727 struct config_param *config;
6729 u8 dev_intr_type = intr_type;
6731 if ((ret = s2io_verify_parm(pdev, &dev_intr_type)))
6734 if ((ret = pci_enable_device(pdev))) {
6736 "s2io_init_nic: pci_enable_device failed\n");
6740 if (!pci_set_dma_mask(pdev, DMA_64BIT_MASK)) {
6741 DBG_PRINT(INIT_DBG, "s2io_init_nic: Using 64bit DMA\n");
6743 if (pci_set_consistent_dma_mask
6744 (pdev, DMA_64BIT_MASK)) {
6746 "Unable to obtain 64bit DMA for \
6747 consistent allocations\n");
6748 pci_disable_device(pdev);
6751 } else if (!pci_set_dma_mask(pdev, DMA_32BIT_MASK)) {
6752 DBG_PRINT(INIT_DBG, "s2io_init_nic: Using 32bit DMA\n");
6754 pci_disable_device(pdev);
6757 if (dev_intr_type != MSI_X) {
6758 if (pci_request_regions(pdev, s2io_driver_name)) {
6759 DBG_PRINT(ERR_DBG, "Request Regions failed\n");
6760 pci_disable_device(pdev);
6765 if (!(request_mem_region(pci_resource_start(pdev, 0),
6766 pci_resource_len(pdev, 0), s2io_driver_name))) {
6767 DBG_PRINT(ERR_DBG, "bar0 Request Regions failed\n");
6768 pci_disable_device(pdev);
6771 if (!(request_mem_region(pci_resource_start(pdev, 2),
6772 pci_resource_len(pdev, 2), s2io_driver_name))) {
6773 DBG_PRINT(ERR_DBG, "bar1 Request Regions failed\n");
6774 release_mem_region(pci_resource_start(pdev, 0),
6775 pci_resource_len(pdev, 0));
6776 pci_disable_device(pdev);
6781 dev = alloc_etherdev(sizeof(nic_t));
6783 DBG_PRINT(ERR_DBG, "Device allocation failed\n");
6784 pci_disable_device(pdev);
6785 pci_release_regions(pdev);
6789 pci_set_master(pdev);
6790 pci_set_drvdata(pdev, dev);
6791 SET_MODULE_OWNER(dev);
6792 SET_NETDEV_DEV(dev, &pdev->dev);
6794 /* Private member variable initialized to s2io NIC structure */
6796 memset(sp, 0, sizeof(nic_t));
6799 sp->high_dma_flag = dma_flag;
6800 sp->device_enabled_once = FALSE;
6801 if (rx_ring_mode == 1)
6802 sp->rxd_mode = RXD_MODE_1;
6803 if (rx_ring_mode == 2)
6804 sp->rxd_mode = RXD_MODE_3B;
6805 if (rx_ring_mode == 3)
6806 sp->rxd_mode = RXD_MODE_3A;
6808 sp->intr_type = dev_intr_type;
6810 if ((pdev->device == PCI_DEVICE_ID_HERC_WIN) ||
6811 (pdev->device == PCI_DEVICE_ID_HERC_UNI))
6812 sp->device_type = XFRAME_II_DEVICE;
6814 sp->device_type = XFRAME_I_DEVICE;
6818 /* Initialize some PCI/PCI-X fields of the NIC. */
6822 * Setting the device configuration parameters.
6823 * Most of these parameters can be specified by the user during
6824 * module insertion as they are module loadable parameters. If
6825 * these parameters are not not specified during load time, they
6826 * are initialized with default values.
6828 mac_control = &sp->mac_control;
6829 config = &sp->config;
6831 /* Tx side parameters. */
6832 config->tx_fifo_num = tx_fifo_num;
6833 for (i = 0; i < MAX_TX_FIFOS; i++) {
6834 config->tx_cfg[i].fifo_len = tx_fifo_len[i];
6835 config->tx_cfg[i].fifo_priority = i;
6838 /* mapping the QoS priority to the configured fifos */
6839 for (i = 0; i < MAX_TX_FIFOS; i++)
6840 config->fifo_mapping[i] = fifo_map[config->tx_fifo_num][i];
6842 config->tx_intr_type = TXD_INT_TYPE_UTILZ;
6843 for (i = 0; i < config->tx_fifo_num; i++) {
6844 config->tx_cfg[i].f_no_snoop =
6845 (NO_SNOOP_TXD | NO_SNOOP_TXD_BUFFER);
6846 if (config->tx_cfg[i].fifo_len < 65) {
6847 config->tx_intr_type = TXD_INT_TYPE_PER_LIST;
6851 /* + 2 because one Txd for skb->data and one Txd for UFO */
6852 config->max_txds = MAX_SKB_FRAGS + 2;
6854 /* Rx side parameters. */
6855 config->rx_ring_num = rx_ring_num;
6856 for (i = 0; i < MAX_RX_RINGS; i++) {
6857 config->rx_cfg[i].num_rxd = rx_ring_sz[i] *
6858 (rxd_count[sp->rxd_mode] + 1);
6859 config->rx_cfg[i].ring_priority = i;
6862 for (i = 0; i < rx_ring_num; i++) {
6863 config->rx_cfg[i].ring_org = RING_ORG_BUFF1;
6864 config->rx_cfg[i].f_no_snoop =
6865 (NO_SNOOP_RXD | NO_SNOOP_RXD_BUFFER);
6868 /* Setting Mac Control parameters */
6869 mac_control->rmac_pause_time = rmac_pause_time;
6870 mac_control->mc_pause_threshold_q0q3 = mc_pause_threshold_q0q3;
6871 mac_control->mc_pause_threshold_q4q7 = mc_pause_threshold_q4q7;
6874 /* Initialize Ring buffer parameters. */
6875 for (i = 0; i < config->rx_ring_num; i++)
6876 atomic_set(&sp->rx_bufs_left[i], 0);
6878 /* Initialize the number of ISRs currently running */
6879 atomic_set(&sp->isr_cnt, 0);
6881 /* initialize the shared memory used by the NIC and the host */
6882 if (init_shared_mem(sp)) {
6883 DBG_PRINT(ERR_DBG, "%s: Memory allocation failed\n",
6886 goto mem_alloc_failed;
6889 sp->bar0 = ioremap(pci_resource_start(pdev, 0),
6890 pci_resource_len(pdev, 0));
6892 DBG_PRINT(ERR_DBG, "%s: Neterion: cannot remap io mem1\n",
6895 goto bar0_remap_failed;
6898 sp->bar1 = ioremap(pci_resource_start(pdev, 2),
6899 pci_resource_len(pdev, 2));
6901 DBG_PRINT(ERR_DBG, "%s: Neterion: cannot remap io mem2\n",
6904 goto bar1_remap_failed;
6907 dev->irq = pdev->irq;
6908 dev->base_addr = (unsigned long) sp->bar0;
6910 /* Initializing the BAR1 address as the start of the FIFO pointer. */
6911 for (j = 0; j < MAX_TX_FIFOS; j++) {
6912 mac_control->tx_FIFO_start[j] = (TxFIFO_element_t __iomem *)
6913 (sp->bar1 + (j * 0x00020000));
6916 /* Driver entry points */
6917 dev->open = &s2io_open;
6918 dev->stop = &s2io_close;
6919 dev->hard_start_xmit = &s2io_xmit;
6920 dev->get_stats = &s2io_get_stats;
6921 dev->set_multicast_list = &s2io_set_multicast;
6922 dev->do_ioctl = &s2io_ioctl;
6923 dev->change_mtu = &s2io_change_mtu;
6924 SET_ETHTOOL_OPS(dev, &netdev_ethtool_ops);
6925 dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
6926 dev->vlan_rx_register = s2io_vlan_rx_register;
6927 dev->vlan_rx_kill_vid = (void *)s2io_vlan_rx_kill_vid;
6930 * will use eth_mac_addr() for dev->set_mac_address
6931 * mac address will be set every time dev->open() is called
6933 dev->poll = s2io_poll;
6936 #ifdef CONFIG_NET_POLL_CONTROLLER
6937 dev->poll_controller = s2io_netpoll;
6940 dev->features |= NETIF_F_SG | NETIF_F_IP_CSUM;
6941 if (sp->high_dma_flag == TRUE)
6942 dev->features |= NETIF_F_HIGHDMA;
6943 dev->features |= NETIF_F_TSO;
6944 dev->features |= NETIF_F_TSO6;
6945 if ((sp->device_type & XFRAME_II_DEVICE) && (ufo)) {
6946 dev->features |= NETIF_F_UFO;
6947 dev->features |= NETIF_F_HW_CSUM;
6950 dev->tx_timeout = &s2io_tx_watchdog;
6951 dev->watchdog_timeo = WATCH_DOG_TIMEOUT;
6952 INIT_WORK(&sp->rst_timer_task, s2io_restart_nic);
6953 INIT_WORK(&sp->set_link_task, s2io_set_link);
6955 pci_save_state(sp->pdev);
6957 /* Setting swapper control on the NIC, for proper reset operation */
6958 if (s2io_set_swapper(sp)) {
6959 DBG_PRINT(ERR_DBG, "%s:swapper settings are wrong\n",
6962 goto set_swap_failed;
6965 /* Verify if the Herc works on the slot its placed into */
6966 if (sp->device_type & XFRAME_II_DEVICE) {
6967 mode = s2io_verify_pci_mode(sp);
6969 DBG_PRINT(ERR_DBG, "%s: ", __FUNCTION__);
6970 DBG_PRINT(ERR_DBG, " Unsupported PCI bus mode\n");
6972 goto set_swap_failed;
6976 /* Not needed for Herc */
6977 if (sp->device_type & XFRAME_I_DEVICE) {
6979 * Fix for all "FFs" MAC address problems observed on
6982 fix_mac_address(sp);
6987 * MAC address initialization.
6988 * For now only one mac address will be read and used.
6991 val64 = RMAC_ADDR_CMD_MEM_RD | RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD |
6992 RMAC_ADDR_CMD_MEM_OFFSET(0 + MAC_MAC_ADDR_START_OFFSET);
6993 writeq(val64, &bar0->rmac_addr_cmd_mem);
6994 wait_for_cmd_complete(&bar0->rmac_addr_cmd_mem,
6995 RMAC_ADDR_CMD_MEM_STROBE_CMD_EXECUTING);
6996 tmp64 = readq(&bar0->rmac_addr_data0_mem);
6997 mac_down = (u32) tmp64;
6998 mac_up = (u32) (tmp64 >> 32);
7000 memset(sp->def_mac_addr[0].mac_addr, 0, sizeof(ETH_ALEN));
7002 sp->def_mac_addr[0].mac_addr[3] = (u8) (mac_up);
7003 sp->def_mac_addr[0].mac_addr[2] = (u8) (mac_up >> 8);
7004 sp->def_mac_addr[0].mac_addr[1] = (u8) (mac_up >> 16);
7005 sp->def_mac_addr[0].mac_addr[0] = (u8) (mac_up >> 24);
7006 sp->def_mac_addr[0].mac_addr[5] = (u8) (mac_down >> 16);
7007 sp->def_mac_addr[0].mac_addr[4] = (u8) (mac_down >> 24);
7009 /* Set the factory defined MAC address initially */
7010 dev->addr_len = ETH_ALEN;
7011 memcpy(dev->dev_addr, sp->def_mac_addr, ETH_ALEN);
7013 /* reset Nic and bring it to known state */
7017 * Initialize the tasklet status and link state flags
7018 * and the card state parameter
7020 atomic_set(&(sp->card_state), 0);
7021 sp->tasklet_status = 0;
7024 /* Initialize spinlocks */
7025 spin_lock_init(&sp->tx_lock);
7028 spin_lock_init(&sp->put_lock);
7029 spin_lock_init(&sp->rx_lock);
7032 * SXE-002: Configure link and activity LED to init state
7035 subid = sp->pdev->subsystem_device;
7036 if ((subid & 0xFF) >= 0x07) {
7037 val64 = readq(&bar0->gpio_control);
7038 val64 |= 0x0000800000000000ULL;
7039 writeq(val64, &bar0->gpio_control);
7040 val64 = 0x0411040400000000ULL;
7041 writeq(val64, (void __iomem *) bar0 + 0x2700);
7042 val64 = readq(&bar0->gpio_control);
7045 sp->rx_csum = 1; /* Rx chksum verify enabled by default */
7047 if (register_netdev(dev)) {
7048 DBG_PRINT(ERR_DBG, "Device registration failed\n");
7050 goto register_failed;
7053 DBG_PRINT(ERR_DBG, "Copyright(c) 2002-2005 Neterion Inc.\n");
7054 DBG_PRINT(ERR_DBG, "%s: Neterion %s (rev %d)\n",dev->name,
7055 sp->product_name, get_xena_rev_id(sp->pdev));
7056 DBG_PRINT(ERR_DBG, "%s: Driver version %s\n", dev->name,
7057 s2io_driver_version);
7058 DBG_PRINT(ERR_DBG, "%s: MAC ADDR: "
7059 "%02x:%02x:%02x:%02x:%02x:%02x", dev->name,
7060 sp->def_mac_addr[0].mac_addr[0],
7061 sp->def_mac_addr[0].mac_addr[1],
7062 sp->def_mac_addr[0].mac_addr[2],
7063 sp->def_mac_addr[0].mac_addr[3],
7064 sp->def_mac_addr[0].mac_addr[4],
7065 sp->def_mac_addr[0].mac_addr[5]);
7066 DBG_PRINT(ERR_DBG, "SERIAL NUMBER: %s\n", sp->serial_num);
7067 if (sp->device_type & XFRAME_II_DEVICE) {
7068 mode = s2io_print_pci_mode(sp);
7070 DBG_PRINT(ERR_DBG, " Unsupported PCI bus mode\n");
7072 unregister_netdev(dev);
7073 goto set_swap_failed;
7076 switch(sp->rxd_mode) {
7078 DBG_PRINT(ERR_DBG, "%s: 1-Buffer receive mode enabled\n",
7082 DBG_PRINT(ERR_DBG, "%s: 2-Buffer receive mode enabled\n",
7086 DBG_PRINT(ERR_DBG, "%s: 3-Buffer receive mode enabled\n",
7092 DBG_PRINT(ERR_DBG, "%s: NAPI enabled\n", dev->name);
7093 switch(sp->intr_type) {
7095 DBG_PRINT(ERR_DBG, "%s: Interrupt type INTA\n", dev->name);
7098 DBG_PRINT(ERR_DBG, "%s: Interrupt type MSI\n", dev->name);
7101 DBG_PRINT(ERR_DBG, "%s: Interrupt type MSI-X\n", dev->name);
7105 DBG_PRINT(ERR_DBG, "%s: Large receive offload enabled\n",
7108 DBG_PRINT(ERR_DBG, "%s: UDP Fragmentation Offload(UFO)"
7109 " enabled\n", dev->name);
7110 /* Initialize device name */
7111 sprintf(sp->name, "%s Neterion %s", dev->name, sp->product_name);
7113 /* Initialize bimodal Interrupts */
7114 sp->config.bimodal = bimodal;
7115 if (!(sp->device_type & XFRAME_II_DEVICE) && bimodal) {
7116 sp->config.bimodal = 0;
7117 DBG_PRINT(ERR_DBG,"%s:Bimodal intr not supported by Xframe I\n",
7122 * Make Link state as off at this point, when the Link change
7123 * interrupt comes the state will be automatically changed to
7126 netif_carrier_off(dev);
7137 free_shared_mem(sp);
7138 pci_disable_device(pdev);
7139 if (dev_intr_type != MSI_X)
7140 pci_release_regions(pdev);
7142 release_mem_region(pci_resource_start(pdev, 0),
7143 pci_resource_len(pdev, 0));
7144 release_mem_region(pci_resource_start(pdev, 2),
7145 pci_resource_len(pdev, 2));
7147 pci_set_drvdata(pdev, NULL);
7154 * s2io_rem_nic - Free the PCI device
7155 * @pdev: structure containing the PCI related information of the device.
7156 * Description: This function is called by the Pci subsystem to release a
7157 * PCI device and free up all resource held up by the device. This could
7158 * be in response to a Hot plug event or when the driver is to be removed
7162 static void __devexit s2io_rem_nic(struct pci_dev *pdev)
7164 struct net_device *dev =
7165 (struct net_device *) pci_get_drvdata(pdev);
7169 DBG_PRINT(ERR_DBG, "Driver Data is NULL!!\n");
7174 unregister_netdev(dev);
7176 free_shared_mem(sp);
7179 if (sp->intr_type != MSI_X)
7180 pci_release_regions(pdev);
7182 release_mem_region(pci_resource_start(pdev, 0),
7183 pci_resource_len(pdev, 0));
7184 release_mem_region(pci_resource_start(pdev, 2),
7185 pci_resource_len(pdev, 2));
7187 pci_set_drvdata(pdev, NULL);
7189 pci_disable_device(pdev);
7193 * s2io_starter - Entry point for the driver
7194 * Description: This function is the entry point for the driver. It verifies
7195 * the module loadable parameters and initializes PCI configuration space.
7198 int __init s2io_starter(void)
7200 return pci_register_driver(&s2io_driver);
7204 * s2io_closer - Cleanup routine for the driver
7205 * Description: This function is the cleanup routine for the driver. It unregist * ers the driver.
7208 static __exit void s2io_closer(void)
7210 pci_unregister_driver(&s2io_driver);
7211 DBG_PRINT(INIT_DBG, "cleanup done\n");
7214 module_init(s2io_starter);
7215 module_exit(s2io_closer);
7217 static int check_L2_lro_capable(u8 *buffer, struct iphdr **ip,
7218 struct tcphdr **tcp, RxD_t *rxdp)
7221 u8 l2_type = (u8)((rxdp->Control_1 >> 37) & 0x7), ip_len;
7223 if (!(rxdp->Control_1 & RXD_FRAME_PROTO_TCP)) {
7224 DBG_PRINT(INIT_DBG,"%s: Non-TCP frames not supported for LRO\n",
7230 * By default the VLAN field in the MAC is stripped by the card, if this
7231 * feature is turned off in rx_pa_cfg register, then the ip_off field
7232 * has to be shifted by a further 2 bytes
7235 case 0: /* DIX type */
7236 case 4: /* DIX type with VLAN */
7237 ip_off = HEADER_ETHERNET_II_802_3_SIZE;
7239 /* LLC, SNAP etc are considered non-mergeable */
7244 *ip = (struct iphdr *)((u8 *)buffer + ip_off);
7245 ip_len = (u8)((*ip)->ihl);
7247 *tcp = (struct tcphdr *)((unsigned long)*ip + ip_len);
7252 static int check_for_socket_match(lro_t *lro, struct iphdr *ip,
7255 DBG_PRINT(INFO_DBG,"%s: Been here...\n", __FUNCTION__);
7256 if ((lro->iph->saddr != ip->saddr) || (lro->iph->daddr != ip->daddr) ||
7257 (lro->tcph->source != tcp->source) || (lro->tcph->dest != tcp->dest))
7262 static inline int get_l4_pyld_length(struct iphdr *ip, struct tcphdr *tcp)
7264 return(ntohs(ip->tot_len) - (ip->ihl << 2) - (tcp->doff << 2));
7267 static void initiate_new_session(lro_t *lro, u8 *l2h,
7268 struct iphdr *ip, struct tcphdr *tcp, u32 tcp_pyld_len)
7270 DBG_PRINT(INFO_DBG,"%s: Been here...\n", __FUNCTION__);
7274 lro->tcp_next_seq = tcp_pyld_len + ntohl(tcp->seq);
7275 lro->tcp_ack = ntohl(tcp->ack_seq);
7277 lro->total_len = ntohs(ip->tot_len);
7280 * check if we saw TCP timestamp. Other consistency checks have
7281 * already been done.
7283 if (tcp->doff == 8) {
7285 ptr = (u32 *)(tcp+1);
7287 lro->cur_tsval = *(ptr+1);
7288 lro->cur_tsecr = *(ptr+2);
7293 static void update_L3L4_header(nic_t *sp, lro_t *lro)
7295 struct iphdr *ip = lro->iph;
7296 struct tcphdr *tcp = lro->tcph;
7298 StatInfo_t *statinfo = sp->mac_control.stats_info;
7299 DBG_PRINT(INFO_DBG,"%s: Been here...\n", __FUNCTION__);
7301 /* Update L3 header */
7302 ip->tot_len = htons(lro->total_len);
7304 nchk = ip_fast_csum((u8 *)lro->iph, ip->ihl);
7307 /* Update L4 header */
7308 tcp->ack_seq = lro->tcp_ack;
7309 tcp->window = lro->window;
7311 /* Update tsecr field if this session has timestamps enabled */
7313 u32 *ptr = (u32 *)(tcp + 1);
7314 *(ptr+2) = lro->cur_tsecr;
7317 /* Update counters required for calculation of
7318 * average no. of packets aggregated.
7320 statinfo->sw_stat.sum_avg_pkts_aggregated += lro->sg_num;
7321 statinfo->sw_stat.num_aggregations++;
7324 static void aggregate_new_rx(lro_t *lro, struct iphdr *ip,
7325 struct tcphdr *tcp, u32 l4_pyld)
7327 DBG_PRINT(INFO_DBG,"%s: Been here...\n", __FUNCTION__);
7328 lro->total_len += l4_pyld;
7329 lro->frags_len += l4_pyld;
7330 lro->tcp_next_seq += l4_pyld;
7333 /* Update ack seq no. and window ad(from this pkt) in LRO object */
7334 lro->tcp_ack = tcp->ack_seq;
7335 lro->window = tcp->window;
7339 /* Update tsecr and tsval from this packet */
7340 ptr = (u32 *) (tcp + 1);
7341 lro->cur_tsval = *(ptr + 1);
7342 lro->cur_tsecr = *(ptr + 2);
7346 static int verify_l3_l4_lro_capable(lro_t *l_lro, struct iphdr *ip,
7347 struct tcphdr *tcp, u32 tcp_pyld_len)
7351 DBG_PRINT(INFO_DBG,"%s: Been here...\n", __FUNCTION__);
7353 if (!tcp_pyld_len) {
7354 /* Runt frame or a pure ack */
7358 if (ip->ihl != 5) /* IP has options */
7361 /* If we see CE codepoint in IP header, packet is not mergeable */
7362 if (INET_ECN_is_ce(ipv4_get_dsfield(ip)))
7365 /* If we see ECE or CWR flags in TCP header, packet is not mergeable */
7366 if (tcp->urg || tcp->psh || tcp->rst || tcp->syn || tcp->fin ||
7367 tcp->ece || tcp->cwr || !tcp->ack) {
7369 * Currently recognize only the ack control word and
7370 * any other control field being set would result in
7371 * flushing the LRO session
7377 * Allow only one TCP timestamp option. Don't aggregate if
7378 * any other options are detected.
7380 if (tcp->doff != 5 && tcp->doff != 8)
7383 if (tcp->doff == 8) {
7384 ptr = (u8 *)(tcp + 1);
7385 while (*ptr == TCPOPT_NOP)
7387 if (*ptr != TCPOPT_TIMESTAMP || *(ptr+1) != TCPOLEN_TIMESTAMP)
7390 /* Ensure timestamp value increases monotonically */
7392 if (l_lro->cur_tsval > *((u32 *)(ptr+2)))
7395 /* timestamp echo reply should be non-zero */
7396 if (*((u32 *)(ptr+6)) == 0)
7404 s2io_club_tcp_session(u8 *buffer, u8 **tcp, u32 *tcp_len, lro_t **lro,
7405 RxD_t *rxdp, nic_t *sp)
7408 struct tcphdr *tcph;
7411 if (!(ret = check_L2_lro_capable(buffer, &ip, (struct tcphdr **)tcp,
7413 DBG_PRINT(INFO_DBG,"IP Saddr: %x Daddr: %x\n",
7414 ip->saddr, ip->daddr);
7419 tcph = (struct tcphdr *)*tcp;
7420 *tcp_len = get_l4_pyld_length(ip, tcph);
7421 for (i=0; i<MAX_LRO_SESSIONS; i++) {
7422 lro_t *l_lro = &sp->lro0_n[i];
7423 if (l_lro->in_use) {
7424 if (check_for_socket_match(l_lro, ip, tcph))
7426 /* Sock pair matched */
7429 if ((*lro)->tcp_next_seq != ntohl(tcph->seq)) {
7430 DBG_PRINT(INFO_DBG, "%s:Out of order. expected "
7431 "0x%x, actual 0x%x\n", __FUNCTION__,
7432 (*lro)->tcp_next_seq,
7435 sp->mac_control.stats_info->
7436 sw_stat.outof_sequence_pkts++;
7441 if (!verify_l3_l4_lro_capable(l_lro, ip, tcph,*tcp_len))
7442 ret = 1; /* Aggregate */
7444 ret = 2; /* Flush both */
7450 /* Before searching for available LRO objects,
7451 * check if the pkt is L3/L4 aggregatable. If not
7452 * don't create new LRO session. Just send this
7455 if (verify_l3_l4_lro_capable(NULL, ip, tcph, *tcp_len)) {
7459 for (i=0; i<MAX_LRO_SESSIONS; i++) {
7460 lro_t *l_lro = &sp->lro0_n[i];
7461 if (!(l_lro->in_use)) {
7463 ret = 3; /* Begin anew */
7469 if (ret == 0) { /* sessions exceeded */
7470 DBG_PRINT(INFO_DBG,"%s:All LRO sessions already in use\n",
7478 initiate_new_session(*lro, buffer, ip, tcph, *tcp_len);
7481 update_L3L4_header(sp, *lro);
7484 aggregate_new_rx(*lro, ip, tcph, *tcp_len);
7485 if ((*lro)->sg_num == sp->lro_max_aggr_per_sess) {
7486 update_L3L4_header(sp, *lro);
7487 ret = 4; /* Flush the LRO */
7491 DBG_PRINT(ERR_DBG,"%s:Dont know, can't say!!\n",
7499 static void clear_lro_session(lro_t *lro)
7501 static u16 lro_struct_size = sizeof(lro_t);
7503 memset(lro, 0, lro_struct_size);
7506 static void queue_rx_frame(struct sk_buff *skb)
7508 struct net_device *dev = skb->dev;
7510 skb->protocol = eth_type_trans(skb, dev);
7512 netif_receive_skb(skb);
7517 static void lro_append_pkt(nic_t *sp, lro_t *lro, struct sk_buff *skb,
7520 struct sk_buff *first = lro->parent;
7522 first->len += tcp_len;
7523 first->data_len = lro->frags_len;
7524 skb_pull(skb, (skb->len - tcp_len));
7525 if (skb_shinfo(first)->frag_list)
7526 lro->last_frag->next = skb;
7528 skb_shinfo(first)->frag_list = skb;
7529 first->truesize += skb->truesize;
7530 lro->last_frag = skb;
7531 sp->mac_control.stats_info->sw_stat.clubbed_frms_cnt++;