1 /************************************************************************
2 * s2io.c: A Linux PCI-X Ethernet driver for Neterion 10GbE Server NIC
3 * Copyright(c) 2002-2005 Neterion Inc.
5 * This software may be used and distributed according to the terms of
6 * the GNU General Public License (GPL), incorporated herein by reference.
7 * Drivers based on or derived from this code fall under the GPL and must
8 * retain the authorship, copyright and license notice. This file is not
9 * a complete program and may only be used when the entire operating
10 * system is licensed under the GPL.
11 * See the file COPYING in this distribution for more information.
14 * Jeff Garzik : For pointing out the improper error condition
15 * check in the s2io_xmit routine and also some
16 * issues in the Tx watch dog function. Also for
17 * patiently answering all those innumerable
18 * questions regaring the 2.6 porting issues.
19 * Stephen Hemminger : Providing proper 2.6 porting mechanism for some
20 * macros available only in 2.6 Kernel.
21 * Francois Romieu : For pointing out all code part that were
22 * deprecated and also styling related comments.
23 * Grant Grundler : For helping me get rid of some Architecture
25 * Christopher Hellwig : Some more 2.6 specific issues in the driver.
27 * The module loadable parameters that are supported by the driver and a brief
28 * explaination of all the variables.
30 * rx_ring_num : This can be used to program the number of receive rings used
32 * rx_ring_sz: This defines the number of receive blocks each ring can have.
33 * This is also an array of size 8.
34 * rx_ring_mode: This defines the operation mode of all 8 rings. The valid
35 * values are 1, 2 and 3.
36 * tx_fifo_num: This defines the number of Tx FIFOs thats used int the driver.
37 * tx_fifo_len: This too is an array of 8. Each element defines the number of
38 * Tx descriptors that can be associated with each corresponding FIFO.
39 * intr_type: This defines the type of interrupt. The values can be 0(INTA),
40 * 1(MSI), 2(MSI_X). Default value is '0(INTA)'
41 * lro: Specifies whether to enable Large Receive Offload (LRO) or not.
42 * Possible values '1' for enable '0' for disable. Default is '0'
43 * lro_max_pkts: This parameter defines maximum number of packets can be
44 * aggregated as a single large packet
45 * napi: This parameter used to enable/disable NAPI (polling Rx)
46 * Possible values '1' for enable and '0' for disable. Default is '1'
47 * ufo: This parameter used to enable/disable UDP Fragmentation Offload(UFO)
48 * Possible values '1' for enable and '0' for disable. Default is '0'
49 * vlan_tag_strip: This can be used to enable or disable vlan stripping.
50 * Possible values '1' for enable , '0' for disable.
51 * Default is '2' - which means disable in promisc mode
52 * and enable in non-promiscuous mode.
53 ************************************************************************/
55 #include <linux/module.h>
56 #include <linux/types.h>
57 #include <linux/errno.h>
58 #include <linux/ioport.h>
59 #include <linux/pci.h>
60 #include <linux/dma-mapping.h>
61 #include <linux/kernel.h>
62 #include <linux/netdevice.h>
63 #include <linux/etherdevice.h>
64 #include <linux/skbuff.h>
65 #include <linux/init.h>
66 #include <linux/delay.h>
67 #include <linux/stddef.h>
68 #include <linux/ioctl.h>
69 #include <linux/timex.h>
70 #include <linux/ethtool.h>
71 #include <linux/workqueue.h>
72 #include <linux/if_vlan.h>
74 #include <linux/tcp.h>
77 #include <asm/system.h>
78 #include <asm/uaccess.h>
80 #include <asm/div64.h>
85 #include "s2io-regs.h"
87 #define DRV_VERSION "2.0.16.1"
89 /* S2io Driver name & version. */
90 static char s2io_driver_name[] = "Neterion";
91 static char s2io_driver_version[] = DRV_VERSION;
93 static int rxd_size[4] = {32,48,48,64};
94 static int rxd_count[4] = {127,85,85,63};
96 static inline int RXD_IS_UP2DT(struct RxD_t *rxdp)
100 ret = ((!(rxdp->Control_1 & RXD_OWN_XENA)) &&
101 (GET_RXD_MARKER(rxdp->Control_2) != THE_RXD_MARK));
107 * Cards with following subsystem_id have a link state indication
108 * problem, 600B, 600C, 600D, 640B, 640C and 640D.
109 * macro below identifies these cards given the subsystem_id.
111 #define CARDS_WITH_FAULTY_LINK_INDICATORS(dev_type, subid) \
112 (dev_type == XFRAME_I_DEVICE) ? \
113 ((((subid >= 0x600B) && (subid <= 0x600D)) || \
114 ((subid >= 0x640B) && (subid <= 0x640D))) ? 1 : 0) : 0
116 #define LINK_IS_UP(val64) (!(val64 & (ADAPTER_STATUS_RMAC_REMOTE_FAULT | \
117 ADAPTER_STATUS_RMAC_LOCAL_FAULT)))
118 #define TASKLET_IN_USE test_and_set_bit(0, (&sp->tasklet_status))
121 static inline int rx_buffer_level(struct s2io_nic * sp, int rxb_size, int ring)
123 struct mac_info *mac_control;
125 mac_control = &sp->mac_control;
126 if (rxb_size <= rxd_count[sp->rxd_mode])
128 else if ((mac_control->rings[ring].pkt_cnt - rxb_size) > 16)
133 /* Ethtool related variables and Macros. */
134 static char s2io_gstrings[][ETH_GSTRING_LEN] = {
135 "Register test\t(offline)",
136 "Eeprom test\t(offline)",
137 "Link test\t(online)",
138 "RLDRAM test\t(offline)",
139 "BIST Test\t(offline)"
142 static char ethtool_stats_keys[][ETH_GSTRING_LEN] = {
144 {"tmac_data_octets"},
148 {"tmac_pause_ctrl_frms"},
152 {"tmac_any_err_frms"},
153 {"tmac_ttl_less_fb_octets"},
154 {"tmac_vld_ip_octets"},
162 {"rmac_data_octets"},
163 {"rmac_fcs_err_frms"},
165 {"rmac_vld_mcst_frms"},
166 {"rmac_vld_bcst_frms"},
167 {"rmac_in_rng_len_err_frms"},
168 {"rmac_out_rng_len_err_frms"},
170 {"rmac_pause_ctrl_frms"},
171 {"rmac_unsup_ctrl_frms"},
173 {"rmac_accepted_ucst_frms"},
174 {"rmac_accepted_nucst_frms"},
175 {"rmac_discarded_frms"},
176 {"rmac_drop_events"},
177 {"rmac_ttl_less_fb_octets"},
179 {"rmac_usized_frms"},
180 {"rmac_osized_frms"},
182 {"rmac_jabber_frms"},
183 {"rmac_ttl_64_frms"},
184 {"rmac_ttl_65_127_frms"},
185 {"rmac_ttl_128_255_frms"},
186 {"rmac_ttl_256_511_frms"},
187 {"rmac_ttl_512_1023_frms"},
188 {"rmac_ttl_1024_1518_frms"},
196 {"rmac_err_drp_udp"},
197 {"rmac_xgmii_err_sym"},
215 {"rmac_xgmii_data_err_cnt"},
216 {"rmac_xgmii_ctrl_err_cnt"},
217 {"rmac_accepted_ip"},
221 {"new_rd_req_rtry_cnt"},
223 {"wr_rtry_rd_ack_cnt"},
226 {"new_wr_req_rtry_cnt"},
229 {"rd_rtry_wr_ack_cnt"},
237 {"rmac_ttl_1519_4095_frms"},
238 {"rmac_ttl_4096_8191_frms"},
239 {"rmac_ttl_8192_max_frms"},
240 {"rmac_ttl_gt_max_frms"},
241 {"rmac_osized_alt_frms"},
242 {"rmac_jabber_alt_frms"},
243 {"rmac_gt_max_alt_frms"},
245 {"rmac_len_discard"},
246 {"rmac_fcs_discard"},
249 {"rmac_red_discard"},
250 {"rmac_rts_discard"},
251 {"rmac_ingm_full_discard"},
253 {"\n DRIVER STATISTICS"},
254 {"single_bit_ecc_errs"},
255 {"double_bit_ecc_errs"},
261 ("alarm_transceiver_temp_high"),
262 ("alarm_transceiver_temp_low"),
263 ("alarm_laser_bias_current_high"),
264 ("alarm_laser_bias_current_low"),
265 ("alarm_laser_output_power_high"),
266 ("alarm_laser_output_power_low"),
267 ("warn_transceiver_temp_high"),
268 ("warn_transceiver_temp_low"),
269 ("warn_laser_bias_current_high"),
270 ("warn_laser_bias_current_low"),
271 ("warn_laser_output_power_high"),
272 ("warn_laser_output_power_low"),
273 ("lro_aggregated_pkts"),
274 ("lro_flush_both_count"),
275 ("lro_out_of_sequence_pkts"),
276 ("lro_flush_due_to_max_pkts"),
277 ("lro_avg_aggr_pkts"),
280 #define S2IO_STAT_LEN sizeof(ethtool_stats_keys)/ ETH_GSTRING_LEN
281 #define S2IO_STAT_STRINGS_LEN S2IO_STAT_LEN * ETH_GSTRING_LEN
283 #define S2IO_TEST_LEN sizeof(s2io_gstrings) / ETH_GSTRING_LEN
284 #define S2IO_STRINGS_LEN S2IO_TEST_LEN * ETH_GSTRING_LEN
286 #define S2IO_TIMER_CONF(timer, handle, arg, exp) \
287 init_timer(&timer); \
288 timer.function = handle; \
289 timer.data = (unsigned long) arg; \
290 mod_timer(&timer, (jiffies + exp)) \
293 static void s2io_vlan_rx_register(struct net_device *dev,
294 struct vlan_group *grp)
296 struct s2io_nic *nic = dev->priv;
299 spin_lock_irqsave(&nic->tx_lock, flags);
301 spin_unlock_irqrestore(&nic->tx_lock, flags);
304 /* A flag indicating whether 'RX_PA_CFG_STRIP_VLAN_TAG' bit is set or not */
307 /* Unregister the vlan */
308 static void s2io_vlan_rx_kill_vid(struct net_device *dev, unsigned long vid)
310 struct s2io_nic *nic = dev->priv;
313 spin_lock_irqsave(&nic->tx_lock, flags);
315 nic->vlgrp->vlan_devices[vid] = NULL;
316 spin_unlock_irqrestore(&nic->tx_lock, flags);
320 * Constants to be programmed into the Xena's registers, to configure
325 static const u64 herc_act_dtx_cfg[] = {
327 0x8000051536750000ULL, 0x80000515367500E0ULL,
329 0x8000051536750004ULL, 0x80000515367500E4ULL,
331 0x80010515003F0000ULL, 0x80010515003F00E0ULL,
333 0x80010515003F0004ULL, 0x80010515003F00E4ULL,
335 0x801205150D440000ULL, 0x801205150D4400E0ULL,
337 0x801205150D440004ULL, 0x801205150D4400E4ULL,
339 0x80020515F2100000ULL, 0x80020515F21000E0ULL,
341 0x80020515F2100004ULL, 0x80020515F21000E4ULL,
346 static const u64 xena_dtx_cfg[] = {
348 0x8000051500000000ULL, 0x80000515000000E0ULL,
350 0x80000515D9350004ULL, 0x80000515D93500E4ULL,
352 0x8001051500000000ULL, 0x80010515000000E0ULL,
354 0x80010515001E0004ULL, 0x80010515001E00E4ULL,
356 0x8002051500000000ULL, 0x80020515000000E0ULL,
358 0x80020515F2100004ULL, 0x80020515F21000E4ULL,
363 * Constants for Fixing the MacAddress problem seen mostly on
366 static const u64 fix_mac[] = {
367 0x0060000000000000ULL, 0x0060600000000000ULL,
368 0x0040600000000000ULL, 0x0000600000000000ULL,
369 0x0020600000000000ULL, 0x0060600000000000ULL,
370 0x0020600000000000ULL, 0x0060600000000000ULL,
371 0x0020600000000000ULL, 0x0060600000000000ULL,
372 0x0020600000000000ULL, 0x0060600000000000ULL,
373 0x0020600000000000ULL, 0x0060600000000000ULL,
374 0x0020600000000000ULL, 0x0060600000000000ULL,
375 0x0020600000000000ULL, 0x0060600000000000ULL,
376 0x0020600000000000ULL, 0x0060600000000000ULL,
377 0x0020600000000000ULL, 0x0060600000000000ULL,
378 0x0020600000000000ULL, 0x0060600000000000ULL,
379 0x0020600000000000ULL, 0x0000600000000000ULL,
380 0x0040600000000000ULL, 0x0060600000000000ULL,
384 MODULE_AUTHOR("Raghavendra Koushik <raghavendra.koushik@neterion.com>");
385 MODULE_LICENSE("GPL");
386 MODULE_VERSION(DRV_VERSION);
389 /* Module Loadable parameters. */
390 S2IO_PARM_INT(tx_fifo_num, 1);
391 S2IO_PARM_INT(rx_ring_num, 1);
394 S2IO_PARM_INT(rx_ring_mode, 1);
395 S2IO_PARM_INT(use_continuous_tx_intrs, 1);
396 S2IO_PARM_INT(rmac_pause_time, 0x100);
397 S2IO_PARM_INT(mc_pause_threshold_q0q3, 187);
398 S2IO_PARM_INT(mc_pause_threshold_q4q7, 187);
399 S2IO_PARM_INT(shared_splits, 0);
400 S2IO_PARM_INT(tmac_util_period, 5);
401 S2IO_PARM_INT(rmac_util_period, 5);
402 S2IO_PARM_INT(bimodal, 0);
403 S2IO_PARM_INT(l3l4hdr_size, 128);
404 /* Frequency of Rx desc syncs expressed as power of 2 */
405 S2IO_PARM_INT(rxsync_frequency, 3);
406 /* Interrupt type. Values can be 0(INTA), 1(MSI), 2(MSI_X) */
407 S2IO_PARM_INT(intr_type, 0);
408 /* Large receive offload feature */
409 S2IO_PARM_INT(lro, 0);
410 /* Max pkts to be aggregated by LRO at one time. If not specified,
411 * aggregation happens until we hit max IP pkt size(64K)
413 S2IO_PARM_INT(lro_max_pkts, 0xFFFF);
414 S2IO_PARM_INT(indicate_max_pkts, 0);
416 S2IO_PARM_INT(napi, 1);
417 S2IO_PARM_INT(ufo, 0);
418 S2IO_PARM_INT(vlan_tag_strip, NO_STRIP_IN_PROMISC);
420 static unsigned int tx_fifo_len[MAX_TX_FIFOS] =
421 {DEFAULT_FIFO_0_LEN, [1 ...(MAX_TX_FIFOS - 1)] = DEFAULT_FIFO_1_7_LEN};
422 static unsigned int rx_ring_sz[MAX_RX_RINGS] =
423 {[0 ...(MAX_RX_RINGS - 1)] = SMALL_BLK_CNT};
424 static unsigned int rts_frm_len[MAX_RX_RINGS] =
425 {[0 ...(MAX_RX_RINGS - 1)] = 0 };
427 module_param_array(tx_fifo_len, uint, NULL, 0);
428 module_param_array(rx_ring_sz, uint, NULL, 0);
429 module_param_array(rts_frm_len, uint, NULL, 0);
433 * This table lists all the devices that this driver supports.
435 static struct pci_device_id s2io_tbl[] __devinitdata = {
436 {PCI_VENDOR_ID_S2IO, PCI_DEVICE_ID_S2IO_WIN,
437 PCI_ANY_ID, PCI_ANY_ID},
438 {PCI_VENDOR_ID_S2IO, PCI_DEVICE_ID_S2IO_UNI,
439 PCI_ANY_ID, PCI_ANY_ID},
440 {PCI_VENDOR_ID_S2IO, PCI_DEVICE_ID_HERC_WIN,
441 PCI_ANY_ID, PCI_ANY_ID},
442 {PCI_VENDOR_ID_S2IO, PCI_DEVICE_ID_HERC_UNI,
443 PCI_ANY_ID, PCI_ANY_ID},
447 MODULE_DEVICE_TABLE(pci, s2io_tbl);
449 static struct pci_driver s2io_driver = {
451 .id_table = s2io_tbl,
452 .probe = s2io_init_nic,
453 .remove = __devexit_p(s2io_rem_nic),
456 /* A simplifier macro used both by init and free shared_mem Fns(). */
457 #define TXD_MEM_PAGE_CNT(len, per_each) ((len+per_each - 1) / per_each)
460 * init_shared_mem - Allocation and Initialization of Memory
461 * @nic: Device private variable.
462 * Description: The function allocates all the memory areas shared
463 * between the NIC and the driver. This includes Tx descriptors,
464 * Rx descriptors and the statistics block.
467 static int init_shared_mem(struct s2io_nic *nic)
470 void *tmp_v_addr, *tmp_v_addr_next;
471 dma_addr_t tmp_p_addr, tmp_p_addr_next;
472 struct RxD_block *pre_rxd_blk = NULL;
474 int lst_size, lst_per_page;
475 struct net_device *dev = nic->dev;
479 struct mac_info *mac_control;
480 struct config_param *config;
482 mac_control = &nic->mac_control;
483 config = &nic->config;
486 /* Allocation and initialization of TXDLs in FIOFs */
488 for (i = 0; i < config->tx_fifo_num; i++) {
489 size += config->tx_cfg[i].fifo_len;
491 if (size > MAX_AVAILABLE_TXDS) {
492 DBG_PRINT(ERR_DBG, "s2io: Requested TxDs too high, ");
493 DBG_PRINT(ERR_DBG, "Requested: %d, max supported: 8192\n", size);
497 lst_size = (sizeof(struct TxD) * config->max_txds);
498 lst_per_page = PAGE_SIZE / lst_size;
500 for (i = 0; i < config->tx_fifo_num; i++) {
501 int fifo_len = config->tx_cfg[i].fifo_len;
502 int list_holder_size = fifo_len * sizeof(struct list_info_hold);
503 mac_control->fifos[i].list_info = kmalloc(list_holder_size,
505 if (!mac_control->fifos[i].list_info) {
507 "Malloc failed for list_info\n");
510 memset(mac_control->fifos[i].list_info, 0, list_holder_size);
512 for (i = 0; i < config->tx_fifo_num; i++) {
513 int page_num = TXD_MEM_PAGE_CNT(config->tx_cfg[i].fifo_len,
515 mac_control->fifos[i].tx_curr_put_info.offset = 0;
516 mac_control->fifos[i].tx_curr_put_info.fifo_len =
517 config->tx_cfg[i].fifo_len - 1;
518 mac_control->fifos[i].tx_curr_get_info.offset = 0;
519 mac_control->fifos[i].tx_curr_get_info.fifo_len =
520 config->tx_cfg[i].fifo_len - 1;
521 mac_control->fifos[i].fifo_no = i;
522 mac_control->fifos[i].nic = nic;
523 mac_control->fifos[i].max_txds = MAX_SKB_FRAGS + 2;
525 for (j = 0; j < page_num; j++) {
529 tmp_v = pci_alloc_consistent(nic->pdev,
533 "pci_alloc_consistent ");
534 DBG_PRINT(ERR_DBG, "failed for TxDL\n");
537 /* If we got a zero DMA address(can happen on
538 * certain platforms like PPC), reallocate.
539 * Store virtual address of page we don't want,
543 mac_control->zerodma_virt_addr = tmp_v;
545 "%s: Zero DMA address for TxDL. ", dev->name);
547 "Virtual address %p\n", tmp_v);
548 tmp_v = pci_alloc_consistent(nic->pdev,
552 "pci_alloc_consistent ");
553 DBG_PRINT(ERR_DBG, "failed for TxDL\n");
557 while (k < lst_per_page) {
558 int l = (j * lst_per_page) + k;
559 if (l == config->tx_cfg[i].fifo_len)
561 mac_control->fifos[i].list_info[l].list_virt_addr =
562 tmp_v + (k * lst_size);
563 mac_control->fifos[i].list_info[l].list_phy_addr =
564 tmp_p + (k * lst_size);
570 nic->ufo_in_band_v = kcalloc(size, sizeof(u64), GFP_KERNEL);
571 if (!nic->ufo_in_band_v)
574 /* Allocation and initialization of RXDs in Rings */
576 for (i = 0; i < config->rx_ring_num; i++) {
577 if (config->rx_cfg[i].num_rxd %
578 (rxd_count[nic->rxd_mode] + 1)) {
579 DBG_PRINT(ERR_DBG, "%s: RxD count of ", dev->name);
580 DBG_PRINT(ERR_DBG, "Ring%d is not a multiple of ",
582 DBG_PRINT(ERR_DBG, "RxDs per Block");
585 size += config->rx_cfg[i].num_rxd;
586 mac_control->rings[i].block_count =
587 config->rx_cfg[i].num_rxd /
588 (rxd_count[nic->rxd_mode] + 1 );
589 mac_control->rings[i].pkt_cnt = config->rx_cfg[i].num_rxd -
590 mac_control->rings[i].block_count;
592 if (nic->rxd_mode == RXD_MODE_1)
593 size = (size * (sizeof(struct RxD1)));
595 size = (size * (sizeof(struct RxD3)));
597 for (i = 0; i < config->rx_ring_num; i++) {
598 mac_control->rings[i].rx_curr_get_info.block_index = 0;
599 mac_control->rings[i].rx_curr_get_info.offset = 0;
600 mac_control->rings[i].rx_curr_get_info.ring_len =
601 config->rx_cfg[i].num_rxd - 1;
602 mac_control->rings[i].rx_curr_put_info.block_index = 0;
603 mac_control->rings[i].rx_curr_put_info.offset = 0;
604 mac_control->rings[i].rx_curr_put_info.ring_len =
605 config->rx_cfg[i].num_rxd - 1;
606 mac_control->rings[i].nic = nic;
607 mac_control->rings[i].ring_no = i;
609 blk_cnt = config->rx_cfg[i].num_rxd /
610 (rxd_count[nic->rxd_mode] + 1);
611 /* Allocating all the Rx blocks */
612 for (j = 0; j < blk_cnt; j++) {
613 struct rx_block_info *rx_blocks;
616 rx_blocks = &mac_control->rings[i].rx_blocks[j];
617 size = SIZE_OF_BLOCK; //size is always page size
618 tmp_v_addr = pci_alloc_consistent(nic->pdev, size,
620 if (tmp_v_addr == NULL) {
622 * In case of failure, free_shared_mem()
623 * is called, which should free any
624 * memory that was alloced till the
627 rx_blocks->block_virt_addr = tmp_v_addr;
630 memset(tmp_v_addr, 0, size);
631 rx_blocks->block_virt_addr = tmp_v_addr;
632 rx_blocks->block_dma_addr = tmp_p_addr;
633 rx_blocks->rxds = kmalloc(sizeof(struct rxd_info)*
634 rxd_count[nic->rxd_mode],
636 if (!rx_blocks->rxds)
638 for (l=0; l<rxd_count[nic->rxd_mode];l++) {
639 rx_blocks->rxds[l].virt_addr =
640 rx_blocks->block_virt_addr +
641 (rxd_size[nic->rxd_mode] * l);
642 rx_blocks->rxds[l].dma_addr =
643 rx_blocks->block_dma_addr +
644 (rxd_size[nic->rxd_mode] * l);
647 /* Interlinking all Rx Blocks */
648 for (j = 0; j < blk_cnt; j++) {
650 mac_control->rings[i].rx_blocks[j].block_virt_addr;
652 mac_control->rings[i].rx_blocks[(j + 1) %
653 blk_cnt].block_virt_addr;
655 mac_control->rings[i].rx_blocks[j].block_dma_addr;
657 mac_control->rings[i].rx_blocks[(j + 1) %
658 blk_cnt].block_dma_addr;
660 pre_rxd_blk = (struct RxD_block *) tmp_v_addr;
661 pre_rxd_blk->reserved_2_pNext_RxD_block =
662 (unsigned long) tmp_v_addr_next;
663 pre_rxd_blk->pNext_RxD_Blk_physical =
664 (u64) tmp_p_addr_next;
667 if (nic->rxd_mode >= RXD_MODE_3A) {
669 * Allocation of Storages for buffer addresses in 2BUFF mode
670 * and the buffers as well.
672 for (i = 0; i < config->rx_ring_num; i++) {
673 blk_cnt = config->rx_cfg[i].num_rxd /
674 (rxd_count[nic->rxd_mode]+ 1);
675 mac_control->rings[i].ba =
676 kmalloc((sizeof(struct buffAdd *) * blk_cnt),
678 if (!mac_control->rings[i].ba)
680 for (j = 0; j < blk_cnt; j++) {
682 mac_control->rings[i].ba[j] =
683 kmalloc((sizeof(struct buffAdd) *
684 (rxd_count[nic->rxd_mode] + 1)),
686 if (!mac_control->rings[i].ba[j])
688 while (k != rxd_count[nic->rxd_mode]) {
689 ba = &mac_control->rings[i].ba[j][k];
691 ba->ba_0_org = (void *) kmalloc
692 (BUF0_LEN + ALIGN_SIZE, GFP_KERNEL);
695 tmp = (unsigned long)ba->ba_0_org;
697 tmp &= ~((unsigned long) ALIGN_SIZE);
698 ba->ba_0 = (void *) tmp;
700 ba->ba_1_org = (void *) kmalloc
701 (BUF1_LEN + ALIGN_SIZE, GFP_KERNEL);
704 tmp = (unsigned long) ba->ba_1_org;
706 tmp &= ~((unsigned long) ALIGN_SIZE);
707 ba->ba_1 = (void *) tmp;
714 /* Allocation and initialization of Statistics block */
715 size = sizeof(struct stat_block);
716 mac_control->stats_mem = pci_alloc_consistent
717 (nic->pdev, size, &mac_control->stats_mem_phy);
719 if (!mac_control->stats_mem) {
721 * In case of failure, free_shared_mem() is called, which
722 * should free any memory that was alloced till the
727 mac_control->stats_mem_sz = size;
729 tmp_v_addr = mac_control->stats_mem;
730 mac_control->stats_info = (struct stat_block *) tmp_v_addr;
731 memset(tmp_v_addr, 0, size);
732 DBG_PRINT(INIT_DBG, "%s:Ring Mem PHY: 0x%llx\n", dev->name,
733 (unsigned long long) tmp_p_addr);
739 * free_shared_mem - Free the allocated Memory
740 * @nic: Device private variable.
741 * Description: This function is to free all memory locations allocated by
742 * the init_shared_mem() function and return it to the kernel.
745 static void free_shared_mem(struct s2io_nic *nic)
747 int i, j, blk_cnt, size;
749 dma_addr_t tmp_p_addr;
750 struct mac_info *mac_control;
751 struct config_param *config;
752 int lst_size, lst_per_page;
753 struct net_device *dev = nic->dev;
758 mac_control = &nic->mac_control;
759 config = &nic->config;
761 lst_size = (sizeof(struct TxD) * config->max_txds);
762 lst_per_page = PAGE_SIZE / lst_size;
764 for (i = 0; i < config->tx_fifo_num; i++) {
765 int page_num = TXD_MEM_PAGE_CNT(config->tx_cfg[i].fifo_len,
767 for (j = 0; j < page_num; j++) {
768 int mem_blks = (j * lst_per_page);
769 if (!mac_control->fifos[i].list_info)
771 if (!mac_control->fifos[i].list_info[mem_blks].
774 pci_free_consistent(nic->pdev, PAGE_SIZE,
775 mac_control->fifos[i].
778 mac_control->fifos[i].
782 /* If we got a zero DMA address during allocation,
785 if (mac_control->zerodma_virt_addr) {
786 pci_free_consistent(nic->pdev, PAGE_SIZE,
787 mac_control->zerodma_virt_addr,
790 "%s: Freeing TxDL with zero DMA addr. ",
792 DBG_PRINT(INIT_DBG, "Virtual address %p\n",
793 mac_control->zerodma_virt_addr);
795 kfree(mac_control->fifos[i].list_info);
798 size = SIZE_OF_BLOCK;
799 for (i = 0; i < config->rx_ring_num; i++) {
800 blk_cnt = mac_control->rings[i].block_count;
801 for (j = 0; j < blk_cnt; j++) {
802 tmp_v_addr = mac_control->rings[i].rx_blocks[j].
804 tmp_p_addr = mac_control->rings[i].rx_blocks[j].
806 if (tmp_v_addr == NULL)
808 pci_free_consistent(nic->pdev, size,
809 tmp_v_addr, tmp_p_addr);
810 kfree(mac_control->rings[i].rx_blocks[j].rxds);
814 if (nic->rxd_mode >= RXD_MODE_3A) {
815 /* Freeing buffer storage addresses in 2BUFF mode. */
816 for (i = 0; i < config->rx_ring_num; i++) {
817 blk_cnt = config->rx_cfg[i].num_rxd /
818 (rxd_count[nic->rxd_mode] + 1);
819 for (j = 0; j < blk_cnt; j++) {
821 if (!mac_control->rings[i].ba[j])
823 while (k != rxd_count[nic->rxd_mode]) {
825 &mac_control->rings[i].ba[j][k];
830 kfree(mac_control->rings[i].ba[j]);
832 kfree(mac_control->rings[i].ba);
836 if (mac_control->stats_mem) {
837 pci_free_consistent(nic->pdev,
838 mac_control->stats_mem_sz,
839 mac_control->stats_mem,
840 mac_control->stats_mem_phy);
842 if (nic->ufo_in_band_v)
843 kfree(nic->ufo_in_band_v);
847 * s2io_verify_pci_mode -
850 static int s2io_verify_pci_mode(struct s2io_nic *nic)
852 struct XENA_dev_config __iomem *bar0 = nic->bar0;
853 register u64 val64 = 0;
856 val64 = readq(&bar0->pci_mode);
857 mode = (u8)GET_PCI_MODE(val64);
859 if ( val64 & PCI_MODE_UNKNOWN_MODE)
860 return -1; /* Unknown PCI mode */
864 #define NEC_VENID 0x1033
865 #define NEC_DEVID 0x0125
866 static int s2io_on_nec_bridge(struct pci_dev *s2io_pdev)
868 struct pci_dev *tdev = NULL;
869 while ((tdev = pci_get_device(PCI_ANY_ID, PCI_ANY_ID, tdev)) != NULL) {
870 if (tdev->vendor == NEC_VENID && tdev->device == NEC_DEVID) {
871 if (tdev->bus == s2io_pdev->bus->parent)
879 static int bus_speed[8] = {33, 133, 133, 200, 266, 133, 200, 266};
881 * s2io_print_pci_mode -
883 static int s2io_print_pci_mode(struct s2io_nic *nic)
885 struct XENA_dev_config __iomem *bar0 = nic->bar0;
886 register u64 val64 = 0;
888 struct config_param *config = &nic->config;
890 val64 = readq(&bar0->pci_mode);
891 mode = (u8)GET_PCI_MODE(val64);
893 if ( val64 & PCI_MODE_UNKNOWN_MODE)
894 return -1; /* Unknown PCI mode */
896 config->bus_speed = bus_speed[mode];
898 if (s2io_on_nec_bridge(nic->pdev)) {
899 DBG_PRINT(ERR_DBG, "%s: Device is on PCI-E bus\n",
904 if (val64 & PCI_MODE_32_BITS) {
905 DBG_PRINT(ERR_DBG, "%s: Device is on 32 bit ", nic->dev->name);
907 DBG_PRINT(ERR_DBG, "%s: Device is on 64 bit ", nic->dev->name);
911 case PCI_MODE_PCI_33:
912 DBG_PRINT(ERR_DBG, "33MHz PCI bus\n");
914 case PCI_MODE_PCI_66:
915 DBG_PRINT(ERR_DBG, "66MHz PCI bus\n");
917 case PCI_MODE_PCIX_M1_66:
918 DBG_PRINT(ERR_DBG, "66MHz PCIX(M1) bus\n");
920 case PCI_MODE_PCIX_M1_100:
921 DBG_PRINT(ERR_DBG, "100MHz PCIX(M1) bus\n");
923 case PCI_MODE_PCIX_M1_133:
924 DBG_PRINT(ERR_DBG, "133MHz PCIX(M1) bus\n");
926 case PCI_MODE_PCIX_M2_66:
927 DBG_PRINT(ERR_DBG, "133MHz PCIX(M2) bus\n");
929 case PCI_MODE_PCIX_M2_100:
930 DBG_PRINT(ERR_DBG, "200MHz PCIX(M2) bus\n");
932 case PCI_MODE_PCIX_M2_133:
933 DBG_PRINT(ERR_DBG, "266MHz PCIX(M2) bus\n");
936 return -1; /* Unsupported bus speed */
943 * init_nic - Initialization of hardware
944 * @nic: device peivate variable
945 * Description: The function sequentially configures every block
946 * of the H/W from their reset values.
947 * Return Value: SUCCESS on success and
948 * '-1' on failure (endian settings incorrect).
951 static int init_nic(struct s2io_nic *nic)
953 struct XENA_dev_config __iomem *bar0 = nic->bar0;
954 struct net_device *dev = nic->dev;
955 register u64 val64 = 0;
959 struct mac_info *mac_control;
960 struct config_param *config;
962 unsigned long long mem_share;
965 mac_control = &nic->mac_control;
966 config = &nic->config;
968 /* to set the swapper controle on the card */
969 if(s2io_set_swapper(nic)) {
970 DBG_PRINT(ERR_DBG,"ERROR: Setting Swapper failed\n");
975 * Herc requires EOI to be removed from reset before XGXS, so..
977 if (nic->device_type & XFRAME_II_DEVICE) {
978 val64 = 0xA500000000ULL;
979 writeq(val64, &bar0->sw_reset);
981 val64 = readq(&bar0->sw_reset);
984 /* Remove XGXS from reset state */
986 writeq(val64, &bar0->sw_reset);
988 val64 = readq(&bar0->sw_reset);
990 /* Enable Receiving broadcasts */
991 add = &bar0->mac_cfg;
992 val64 = readq(&bar0->mac_cfg);
993 val64 |= MAC_RMAC_BCAST_ENABLE;
994 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
995 writel((u32) val64, add);
996 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
997 writel((u32) (val64 >> 32), (add + 4));
999 /* Read registers in all blocks */
1000 val64 = readq(&bar0->mac_int_mask);
1001 val64 = readq(&bar0->mc_int_mask);
1002 val64 = readq(&bar0->xgxs_int_mask);
1006 writeq(vBIT(val64, 2, 14), &bar0->rmac_max_pyld_len);
1008 if (nic->device_type & XFRAME_II_DEVICE) {
1009 while (herc_act_dtx_cfg[dtx_cnt] != END_SIGN) {
1010 SPECIAL_REG_WRITE(herc_act_dtx_cfg[dtx_cnt],
1011 &bar0->dtx_control, UF);
1013 msleep(1); /* Necessary!! */
1017 while (xena_dtx_cfg[dtx_cnt] != END_SIGN) {
1018 SPECIAL_REG_WRITE(xena_dtx_cfg[dtx_cnt],
1019 &bar0->dtx_control, UF);
1020 val64 = readq(&bar0->dtx_control);
1025 /* Tx DMA Initialization */
1027 writeq(val64, &bar0->tx_fifo_partition_0);
1028 writeq(val64, &bar0->tx_fifo_partition_1);
1029 writeq(val64, &bar0->tx_fifo_partition_2);
1030 writeq(val64, &bar0->tx_fifo_partition_3);
1033 for (i = 0, j = 0; i < config->tx_fifo_num; i++) {
1035 vBIT(config->tx_cfg[i].fifo_len - 1, ((i * 32) + 19),
1036 13) | vBIT(config->tx_cfg[i].fifo_priority,
1039 if (i == (config->tx_fifo_num - 1)) {
1046 writeq(val64, &bar0->tx_fifo_partition_0);
1050 writeq(val64, &bar0->tx_fifo_partition_1);
1054 writeq(val64, &bar0->tx_fifo_partition_2);
1058 writeq(val64, &bar0->tx_fifo_partition_3);
1064 * Disable 4 PCCs for Xena1, 2 and 3 as per H/W bug
1065 * SXE-008 TRANSMIT DMA ARBITRATION ISSUE.
1067 if ((nic->device_type == XFRAME_I_DEVICE) &&
1068 (get_xena_rev_id(nic->pdev) < 4))
1069 writeq(PCC_ENABLE_FOUR, &bar0->pcc_enable);
1071 val64 = readq(&bar0->tx_fifo_partition_0);
1072 DBG_PRINT(INIT_DBG, "Fifo partition at: 0x%p is: 0x%llx\n",
1073 &bar0->tx_fifo_partition_0, (unsigned long long) val64);
1076 * Initialization of Tx_PA_CONFIG register to ignore packet
1077 * integrity checking.
1079 val64 = readq(&bar0->tx_pa_cfg);
1080 val64 |= TX_PA_CFG_IGNORE_FRM_ERR | TX_PA_CFG_IGNORE_SNAP_OUI |
1081 TX_PA_CFG_IGNORE_LLC_CTRL | TX_PA_CFG_IGNORE_L2_ERR;
1082 writeq(val64, &bar0->tx_pa_cfg);
1084 /* Rx DMA intialization. */
1086 for (i = 0; i < config->rx_ring_num; i++) {
1088 vBIT(config->rx_cfg[i].ring_priority, (5 + (i * 8)),
1091 writeq(val64, &bar0->rx_queue_priority);
1094 * Allocating equal share of memory to all the
1098 if (nic->device_type & XFRAME_II_DEVICE)
1103 for (i = 0; i < config->rx_ring_num; i++) {
1106 mem_share = (mem_size / config->rx_ring_num +
1107 mem_size % config->rx_ring_num);
1108 val64 |= RX_QUEUE_CFG_Q0_SZ(mem_share);
1111 mem_share = (mem_size / config->rx_ring_num);
1112 val64 |= RX_QUEUE_CFG_Q1_SZ(mem_share);
1115 mem_share = (mem_size / config->rx_ring_num);
1116 val64 |= RX_QUEUE_CFG_Q2_SZ(mem_share);
1119 mem_share = (mem_size / config->rx_ring_num);
1120 val64 |= RX_QUEUE_CFG_Q3_SZ(mem_share);
1123 mem_share = (mem_size / config->rx_ring_num);
1124 val64 |= RX_QUEUE_CFG_Q4_SZ(mem_share);
1127 mem_share = (mem_size / config->rx_ring_num);
1128 val64 |= RX_QUEUE_CFG_Q5_SZ(mem_share);
1131 mem_share = (mem_size / config->rx_ring_num);
1132 val64 |= RX_QUEUE_CFG_Q6_SZ(mem_share);
1135 mem_share = (mem_size / config->rx_ring_num);
1136 val64 |= RX_QUEUE_CFG_Q7_SZ(mem_share);
1140 writeq(val64, &bar0->rx_queue_cfg);
1143 * Filling Tx round robin registers
1144 * as per the number of FIFOs
1146 switch (config->tx_fifo_num) {
1148 val64 = 0x0000000000000000ULL;
1149 writeq(val64, &bar0->tx_w_round_robin_0);
1150 writeq(val64, &bar0->tx_w_round_robin_1);
1151 writeq(val64, &bar0->tx_w_round_robin_2);
1152 writeq(val64, &bar0->tx_w_round_robin_3);
1153 writeq(val64, &bar0->tx_w_round_robin_4);
1156 val64 = 0x0000010000010000ULL;
1157 writeq(val64, &bar0->tx_w_round_robin_0);
1158 val64 = 0x0100000100000100ULL;
1159 writeq(val64, &bar0->tx_w_round_robin_1);
1160 val64 = 0x0001000001000001ULL;
1161 writeq(val64, &bar0->tx_w_round_robin_2);
1162 val64 = 0x0000010000010000ULL;
1163 writeq(val64, &bar0->tx_w_round_robin_3);
1164 val64 = 0x0100000000000000ULL;
1165 writeq(val64, &bar0->tx_w_round_robin_4);
1168 val64 = 0x0001000102000001ULL;
1169 writeq(val64, &bar0->tx_w_round_robin_0);
1170 val64 = 0x0001020000010001ULL;
1171 writeq(val64, &bar0->tx_w_round_robin_1);
1172 val64 = 0x0200000100010200ULL;
1173 writeq(val64, &bar0->tx_w_round_robin_2);
1174 val64 = 0x0001000102000001ULL;
1175 writeq(val64, &bar0->tx_w_round_robin_3);
1176 val64 = 0x0001020000000000ULL;
1177 writeq(val64, &bar0->tx_w_round_robin_4);
1180 val64 = 0x0001020300010200ULL;
1181 writeq(val64, &bar0->tx_w_round_robin_0);
1182 val64 = 0x0100000102030001ULL;
1183 writeq(val64, &bar0->tx_w_round_robin_1);
1184 val64 = 0x0200010000010203ULL;
1185 writeq(val64, &bar0->tx_w_round_robin_2);
1186 val64 = 0x0001020001000001ULL;
1187 writeq(val64, &bar0->tx_w_round_robin_3);
1188 val64 = 0x0203000100000000ULL;
1189 writeq(val64, &bar0->tx_w_round_robin_4);
1192 val64 = 0x0001000203000102ULL;
1193 writeq(val64, &bar0->tx_w_round_robin_0);
1194 val64 = 0x0001020001030004ULL;
1195 writeq(val64, &bar0->tx_w_round_robin_1);
1196 val64 = 0x0001000203000102ULL;
1197 writeq(val64, &bar0->tx_w_round_robin_2);
1198 val64 = 0x0001020001030004ULL;
1199 writeq(val64, &bar0->tx_w_round_robin_3);
1200 val64 = 0x0001000000000000ULL;
1201 writeq(val64, &bar0->tx_w_round_robin_4);
1204 val64 = 0x0001020304000102ULL;
1205 writeq(val64, &bar0->tx_w_round_robin_0);
1206 val64 = 0x0304050001020001ULL;
1207 writeq(val64, &bar0->tx_w_round_robin_1);
1208 val64 = 0x0203000100000102ULL;
1209 writeq(val64, &bar0->tx_w_round_robin_2);
1210 val64 = 0x0304000102030405ULL;
1211 writeq(val64, &bar0->tx_w_round_robin_3);
1212 val64 = 0x0001000200000000ULL;
1213 writeq(val64, &bar0->tx_w_round_robin_4);
1216 val64 = 0x0001020001020300ULL;
1217 writeq(val64, &bar0->tx_w_round_robin_0);
1218 val64 = 0x0102030400010203ULL;
1219 writeq(val64, &bar0->tx_w_round_robin_1);
1220 val64 = 0x0405060001020001ULL;
1221 writeq(val64, &bar0->tx_w_round_robin_2);
1222 val64 = 0x0304050000010200ULL;
1223 writeq(val64, &bar0->tx_w_round_robin_3);
1224 val64 = 0x0102030000000000ULL;
1225 writeq(val64, &bar0->tx_w_round_robin_4);
1228 val64 = 0x0001020300040105ULL;
1229 writeq(val64, &bar0->tx_w_round_robin_0);
1230 val64 = 0x0200030106000204ULL;
1231 writeq(val64, &bar0->tx_w_round_robin_1);
1232 val64 = 0x0103000502010007ULL;
1233 writeq(val64, &bar0->tx_w_round_robin_2);
1234 val64 = 0x0304010002060500ULL;
1235 writeq(val64, &bar0->tx_w_round_robin_3);
1236 val64 = 0x0103020400000000ULL;
1237 writeq(val64, &bar0->tx_w_round_robin_4);
1241 /* Enable all configured Tx FIFO partitions */
1242 val64 = readq(&bar0->tx_fifo_partition_0);
1243 val64 |= (TX_FIFO_PARTITION_EN);
1244 writeq(val64, &bar0->tx_fifo_partition_0);
1246 /* Filling the Rx round robin registers as per the
1247 * number of Rings and steering based on QoS.
1249 switch (config->rx_ring_num) {
1251 val64 = 0x8080808080808080ULL;
1252 writeq(val64, &bar0->rts_qos_steering);
1255 val64 = 0x0000010000010000ULL;
1256 writeq(val64, &bar0->rx_w_round_robin_0);
1257 val64 = 0x0100000100000100ULL;
1258 writeq(val64, &bar0->rx_w_round_robin_1);
1259 val64 = 0x0001000001000001ULL;
1260 writeq(val64, &bar0->rx_w_round_robin_2);
1261 val64 = 0x0000010000010000ULL;
1262 writeq(val64, &bar0->rx_w_round_robin_3);
1263 val64 = 0x0100000000000000ULL;
1264 writeq(val64, &bar0->rx_w_round_robin_4);
1266 val64 = 0x8080808040404040ULL;
1267 writeq(val64, &bar0->rts_qos_steering);
1270 val64 = 0x0001000102000001ULL;
1271 writeq(val64, &bar0->rx_w_round_robin_0);
1272 val64 = 0x0001020000010001ULL;
1273 writeq(val64, &bar0->rx_w_round_robin_1);
1274 val64 = 0x0200000100010200ULL;
1275 writeq(val64, &bar0->rx_w_round_robin_2);
1276 val64 = 0x0001000102000001ULL;
1277 writeq(val64, &bar0->rx_w_round_robin_3);
1278 val64 = 0x0001020000000000ULL;
1279 writeq(val64, &bar0->rx_w_round_robin_4);
1281 val64 = 0x8080804040402020ULL;
1282 writeq(val64, &bar0->rts_qos_steering);
1285 val64 = 0x0001020300010200ULL;
1286 writeq(val64, &bar0->rx_w_round_robin_0);
1287 val64 = 0x0100000102030001ULL;
1288 writeq(val64, &bar0->rx_w_round_robin_1);
1289 val64 = 0x0200010000010203ULL;
1290 writeq(val64, &bar0->rx_w_round_robin_2);
1291 val64 = 0x0001020001000001ULL;
1292 writeq(val64, &bar0->rx_w_round_robin_3);
1293 val64 = 0x0203000100000000ULL;
1294 writeq(val64, &bar0->rx_w_round_robin_4);
1296 val64 = 0x8080404020201010ULL;
1297 writeq(val64, &bar0->rts_qos_steering);
1300 val64 = 0x0001000203000102ULL;
1301 writeq(val64, &bar0->rx_w_round_robin_0);
1302 val64 = 0x0001020001030004ULL;
1303 writeq(val64, &bar0->rx_w_round_robin_1);
1304 val64 = 0x0001000203000102ULL;
1305 writeq(val64, &bar0->rx_w_round_robin_2);
1306 val64 = 0x0001020001030004ULL;
1307 writeq(val64, &bar0->rx_w_round_robin_3);
1308 val64 = 0x0001000000000000ULL;
1309 writeq(val64, &bar0->rx_w_round_robin_4);
1311 val64 = 0x8080404020201008ULL;
1312 writeq(val64, &bar0->rts_qos_steering);
1315 val64 = 0x0001020304000102ULL;
1316 writeq(val64, &bar0->rx_w_round_robin_0);
1317 val64 = 0x0304050001020001ULL;
1318 writeq(val64, &bar0->rx_w_round_robin_1);
1319 val64 = 0x0203000100000102ULL;
1320 writeq(val64, &bar0->rx_w_round_robin_2);
1321 val64 = 0x0304000102030405ULL;
1322 writeq(val64, &bar0->rx_w_round_robin_3);
1323 val64 = 0x0001000200000000ULL;
1324 writeq(val64, &bar0->rx_w_round_robin_4);
1326 val64 = 0x8080404020100804ULL;
1327 writeq(val64, &bar0->rts_qos_steering);
1330 val64 = 0x0001020001020300ULL;
1331 writeq(val64, &bar0->rx_w_round_robin_0);
1332 val64 = 0x0102030400010203ULL;
1333 writeq(val64, &bar0->rx_w_round_robin_1);
1334 val64 = 0x0405060001020001ULL;
1335 writeq(val64, &bar0->rx_w_round_robin_2);
1336 val64 = 0x0304050000010200ULL;
1337 writeq(val64, &bar0->rx_w_round_robin_3);
1338 val64 = 0x0102030000000000ULL;
1339 writeq(val64, &bar0->rx_w_round_robin_4);
1341 val64 = 0x8080402010080402ULL;
1342 writeq(val64, &bar0->rts_qos_steering);
1345 val64 = 0x0001020300040105ULL;
1346 writeq(val64, &bar0->rx_w_round_robin_0);
1347 val64 = 0x0200030106000204ULL;
1348 writeq(val64, &bar0->rx_w_round_robin_1);
1349 val64 = 0x0103000502010007ULL;
1350 writeq(val64, &bar0->rx_w_round_robin_2);
1351 val64 = 0x0304010002060500ULL;
1352 writeq(val64, &bar0->rx_w_round_robin_3);
1353 val64 = 0x0103020400000000ULL;
1354 writeq(val64, &bar0->rx_w_round_robin_4);
1356 val64 = 0x8040201008040201ULL;
1357 writeq(val64, &bar0->rts_qos_steering);
1363 for (i = 0; i < 8; i++)
1364 writeq(val64, &bar0->rts_frm_len_n[i]);
1366 /* Set the default rts frame length for the rings configured */
1367 val64 = MAC_RTS_FRM_LEN_SET(dev->mtu+22);
1368 for (i = 0 ; i < config->rx_ring_num ; i++)
1369 writeq(val64, &bar0->rts_frm_len_n[i]);
1371 /* Set the frame length for the configured rings
1372 * desired by the user
1374 for (i = 0; i < config->rx_ring_num; i++) {
1375 /* If rts_frm_len[i] == 0 then it is assumed that user not
1376 * specified frame length steering.
1377 * If the user provides the frame length then program
1378 * the rts_frm_len register for those values or else
1379 * leave it as it is.
1381 if (rts_frm_len[i] != 0) {
1382 writeq(MAC_RTS_FRM_LEN_SET(rts_frm_len[i]),
1383 &bar0->rts_frm_len_n[i]);
1387 /* Disable differentiated services steering logic */
1388 for (i = 0; i < 64; i++) {
1389 if (rts_ds_steer(nic, i, 0) == FAILURE) {
1390 DBG_PRINT(ERR_DBG, "%s: failed rts ds steering",
1392 DBG_PRINT(ERR_DBG, "set on codepoint %d\n", i);
1397 /* Program statistics memory */
1398 writeq(mac_control->stats_mem_phy, &bar0->stat_addr);
1400 if (nic->device_type == XFRAME_II_DEVICE) {
1401 val64 = STAT_BC(0x320);
1402 writeq(val64, &bar0->stat_byte_cnt);
1406 * Initializing the sampling rate for the device to calculate the
1407 * bandwidth utilization.
1409 val64 = MAC_TX_LINK_UTIL_VAL(tmac_util_period) |
1410 MAC_RX_LINK_UTIL_VAL(rmac_util_period);
1411 writeq(val64, &bar0->mac_link_util);
1415 * Initializing the Transmit and Receive Traffic Interrupt
1419 * TTI Initialization. Default Tx timer gets us about
1420 * 250 interrupts per sec. Continuous interrupts are enabled
1423 if (nic->device_type == XFRAME_II_DEVICE) {
1424 int count = (nic->config.bus_speed * 125)/2;
1425 val64 = TTI_DATA1_MEM_TX_TIMER_VAL(count);
1428 val64 = TTI_DATA1_MEM_TX_TIMER_VAL(0x2078);
1430 val64 |= TTI_DATA1_MEM_TX_URNG_A(0xA) |
1431 TTI_DATA1_MEM_TX_URNG_B(0x10) |
1432 TTI_DATA1_MEM_TX_URNG_C(0x30) | TTI_DATA1_MEM_TX_TIMER_AC_EN;
1433 if (use_continuous_tx_intrs)
1434 val64 |= TTI_DATA1_MEM_TX_TIMER_CI_EN;
1435 writeq(val64, &bar0->tti_data1_mem);
1437 val64 = TTI_DATA2_MEM_TX_UFC_A(0x10) |
1438 TTI_DATA2_MEM_TX_UFC_B(0x20) |
1439 TTI_DATA2_MEM_TX_UFC_C(0x40) | TTI_DATA2_MEM_TX_UFC_D(0x80);
1440 writeq(val64, &bar0->tti_data2_mem);
1442 val64 = TTI_CMD_MEM_WE | TTI_CMD_MEM_STROBE_NEW_CMD;
1443 writeq(val64, &bar0->tti_command_mem);
1446 * Once the operation completes, the Strobe bit of the command
1447 * register will be reset. We poll for this particular condition
1448 * We wait for a maximum of 500ms for the operation to complete,
1449 * if it's not complete by then we return error.
1453 val64 = readq(&bar0->tti_command_mem);
1454 if (!(val64 & TTI_CMD_MEM_STROBE_NEW_CMD)) {
1458 DBG_PRINT(ERR_DBG, "%s: TTI init Failed\n",
1466 if (nic->config.bimodal) {
1468 for (k = 0; k < config->rx_ring_num; k++) {
1469 val64 = TTI_CMD_MEM_WE | TTI_CMD_MEM_STROBE_NEW_CMD;
1470 val64 |= TTI_CMD_MEM_OFFSET(0x38+k);
1471 writeq(val64, &bar0->tti_command_mem);
1474 * Once the operation completes, the Strobe bit of the command
1475 * register will be reset. We poll for this particular condition
1476 * We wait for a maximum of 500ms for the operation to complete,
1477 * if it's not complete by then we return error.
1481 val64 = readq(&bar0->tti_command_mem);
1482 if (!(val64 & TTI_CMD_MEM_STROBE_NEW_CMD)) {
1487 "%s: TTI init Failed\n",
1497 /* RTI Initialization */
1498 if (nic->device_type == XFRAME_II_DEVICE) {
1500 * Programmed to generate Apprx 500 Intrs per
1503 int count = (nic->config.bus_speed * 125)/4;
1504 val64 = RTI_DATA1_MEM_RX_TIMER_VAL(count);
1506 val64 = RTI_DATA1_MEM_RX_TIMER_VAL(0xFFF);
1508 val64 |= RTI_DATA1_MEM_RX_URNG_A(0xA) |
1509 RTI_DATA1_MEM_RX_URNG_B(0x10) |
1510 RTI_DATA1_MEM_RX_URNG_C(0x30) | RTI_DATA1_MEM_RX_TIMER_AC_EN;
1512 writeq(val64, &bar0->rti_data1_mem);
1514 val64 = RTI_DATA2_MEM_RX_UFC_A(0x1) |
1515 RTI_DATA2_MEM_RX_UFC_B(0x2) ;
1516 if (nic->intr_type == MSI_X)
1517 val64 |= (RTI_DATA2_MEM_RX_UFC_C(0x20) | \
1518 RTI_DATA2_MEM_RX_UFC_D(0x40));
1520 val64 |= (RTI_DATA2_MEM_RX_UFC_C(0x40) | \
1521 RTI_DATA2_MEM_RX_UFC_D(0x80));
1522 writeq(val64, &bar0->rti_data2_mem);
1524 for (i = 0; i < config->rx_ring_num; i++) {
1525 val64 = RTI_CMD_MEM_WE | RTI_CMD_MEM_STROBE_NEW_CMD
1526 | RTI_CMD_MEM_OFFSET(i);
1527 writeq(val64, &bar0->rti_command_mem);
1530 * Once the operation completes, the Strobe bit of the
1531 * command register will be reset. We poll for this
1532 * particular condition. We wait for a maximum of 500ms
1533 * for the operation to complete, if it's not complete
1534 * by then we return error.
1538 val64 = readq(&bar0->rti_command_mem);
1539 if (!(val64 & RTI_CMD_MEM_STROBE_NEW_CMD)) {
1543 DBG_PRINT(ERR_DBG, "%s: RTI init Failed\n",
1554 * Initializing proper values as Pause threshold into all
1555 * the 8 Queues on Rx side.
1557 writeq(0xffbbffbbffbbffbbULL, &bar0->mc_pause_thresh_q0q3);
1558 writeq(0xffbbffbbffbbffbbULL, &bar0->mc_pause_thresh_q4q7);
1560 /* Disable RMAC PAD STRIPPING */
1561 add = &bar0->mac_cfg;
1562 val64 = readq(&bar0->mac_cfg);
1563 val64 &= ~(MAC_CFG_RMAC_STRIP_PAD);
1564 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
1565 writel((u32) (val64), add);
1566 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
1567 writel((u32) (val64 >> 32), (add + 4));
1568 val64 = readq(&bar0->mac_cfg);
1570 /* Enable FCS stripping by adapter */
1571 add = &bar0->mac_cfg;
1572 val64 = readq(&bar0->mac_cfg);
1573 val64 |= MAC_CFG_RMAC_STRIP_FCS;
1574 if (nic->device_type == XFRAME_II_DEVICE)
1575 writeq(val64, &bar0->mac_cfg);
1577 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
1578 writel((u32) (val64), add);
1579 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
1580 writel((u32) (val64 >> 32), (add + 4));
1584 * Set the time value to be inserted in the pause frame
1585 * generated by xena.
1587 val64 = readq(&bar0->rmac_pause_cfg);
1588 val64 &= ~(RMAC_PAUSE_HG_PTIME(0xffff));
1589 val64 |= RMAC_PAUSE_HG_PTIME(nic->mac_control.rmac_pause_time);
1590 writeq(val64, &bar0->rmac_pause_cfg);
1593 * Set the Threshold Limit for Generating the pause frame
1594 * If the amount of data in any Queue exceeds ratio of
1595 * (mac_control.mc_pause_threshold_q0q3 or q4q7)/256
1596 * pause frame is generated
1599 for (i = 0; i < 4; i++) {
1601 (((u64) 0xFF00 | nic->mac_control.
1602 mc_pause_threshold_q0q3)
1605 writeq(val64, &bar0->mc_pause_thresh_q0q3);
1608 for (i = 0; i < 4; i++) {
1610 (((u64) 0xFF00 | nic->mac_control.
1611 mc_pause_threshold_q4q7)
1614 writeq(val64, &bar0->mc_pause_thresh_q4q7);
1617 * TxDMA will stop Read request if the number of read split has
1618 * exceeded the limit pointed by shared_splits
1620 val64 = readq(&bar0->pic_control);
1621 val64 |= PIC_CNTL_SHARED_SPLITS(shared_splits);
1622 writeq(val64, &bar0->pic_control);
1624 if (nic->config.bus_speed == 266) {
1625 writeq(TXREQTO_VAL(0x7f) | TXREQTO_EN, &bar0->txreqtimeout);
1626 writeq(0x0, &bar0->read_retry_delay);
1627 writeq(0x0, &bar0->write_retry_delay);
1631 * Programming the Herc to split every write transaction
1632 * that does not start on an ADB to reduce disconnects.
1634 if (nic->device_type == XFRAME_II_DEVICE) {
1635 val64 = FAULT_BEHAVIOUR | EXT_REQ_EN |
1636 MISC_LINK_STABILITY_PRD(3);
1637 writeq(val64, &bar0->misc_control);
1638 val64 = readq(&bar0->pic_control2);
1639 val64 &= ~(BIT(13)|BIT(14)|BIT(15));
1640 writeq(val64, &bar0->pic_control2);
1642 if (strstr(nic->product_name, "CX4")) {
1643 val64 = TMAC_AVG_IPG(0x17);
1644 writeq(val64, &bar0->tmac_avg_ipg);
1649 #define LINK_UP_DOWN_INTERRUPT 1
1650 #define MAC_RMAC_ERR_TIMER 2
1652 static int s2io_link_fault_indication(struct s2io_nic *nic)
1654 if (nic->intr_type != INTA)
1655 return MAC_RMAC_ERR_TIMER;
1656 if (nic->device_type == XFRAME_II_DEVICE)
1657 return LINK_UP_DOWN_INTERRUPT;
1659 return MAC_RMAC_ERR_TIMER;
1663 * en_dis_able_nic_intrs - Enable or Disable the interrupts
1664 * @nic: device private variable,
1665 * @mask: A mask indicating which Intr block must be modified and,
1666 * @flag: A flag indicating whether to enable or disable the Intrs.
1667 * Description: This function will either disable or enable the interrupts
1668 * depending on the flag argument. The mask argument can be used to
1669 * enable/disable any Intr block.
1670 * Return Value: NONE.
1673 static void en_dis_able_nic_intrs(struct s2io_nic *nic, u16 mask, int flag)
1675 struct XENA_dev_config __iomem *bar0 = nic->bar0;
1676 register u64 val64 = 0, temp64 = 0;
1678 /* Top level interrupt classification */
1679 /* PIC Interrupts */
1680 if ((mask & (TX_PIC_INTR | RX_PIC_INTR))) {
1681 /* Enable PIC Intrs in the general intr mask register */
1682 val64 = TXPIC_INT_M;
1683 if (flag == ENABLE_INTRS) {
1684 temp64 = readq(&bar0->general_int_mask);
1685 temp64 &= ~((u64) val64);
1686 writeq(temp64, &bar0->general_int_mask);
1688 * If Hercules adapter enable GPIO otherwise
1689 * disable all PCIX, Flash, MDIO, IIC and GPIO
1690 * interrupts for now.
1693 if (s2io_link_fault_indication(nic) ==
1694 LINK_UP_DOWN_INTERRUPT ) {
1695 temp64 = readq(&bar0->pic_int_mask);
1696 temp64 &= ~((u64) PIC_INT_GPIO);
1697 writeq(temp64, &bar0->pic_int_mask);
1698 temp64 = readq(&bar0->gpio_int_mask);
1699 temp64 &= ~((u64) GPIO_INT_MASK_LINK_UP);
1700 writeq(temp64, &bar0->gpio_int_mask);
1702 writeq(DISABLE_ALL_INTRS, &bar0->pic_int_mask);
1705 * No MSI Support is available presently, so TTI and
1706 * RTI interrupts are also disabled.
1708 } else if (flag == DISABLE_INTRS) {
1710 * Disable PIC Intrs in the general
1711 * intr mask register
1713 writeq(DISABLE_ALL_INTRS, &bar0->pic_int_mask);
1714 temp64 = readq(&bar0->general_int_mask);
1716 writeq(val64, &bar0->general_int_mask);
1720 /* MAC Interrupts */
1721 /* Enabling/Disabling MAC interrupts */
1722 if (mask & (TX_MAC_INTR | RX_MAC_INTR)) {
1723 val64 = TXMAC_INT_M | RXMAC_INT_M;
1724 if (flag == ENABLE_INTRS) {
1725 temp64 = readq(&bar0->general_int_mask);
1726 temp64 &= ~((u64) val64);
1727 writeq(temp64, &bar0->general_int_mask);
1729 * All MAC block error interrupts are disabled for now
1732 } else if (flag == DISABLE_INTRS) {
1734 * Disable MAC Intrs in the general intr mask register
1736 writeq(DISABLE_ALL_INTRS, &bar0->mac_int_mask);
1737 writeq(DISABLE_ALL_INTRS,
1738 &bar0->mac_rmac_err_mask);
1740 temp64 = readq(&bar0->general_int_mask);
1742 writeq(val64, &bar0->general_int_mask);
1746 /* Tx traffic interrupts */
1747 if (mask & TX_TRAFFIC_INTR) {
1748 val64 = TXTRAFFIC_INT_M;
1749 if (flag == ENABLE_INTRS) {
1750 temp64 = readq(&bar0->general_int_mask);
1751 temp64 &= ~((u64) val64);
1752 writeq(temp64, &bar0->general_int_mask);
1754 * Enable all the Tx side interrupts
1755 * writing 0 Enables all 64 TX interrupt levels
1757 writeq(0x0, &bar0->tx_traffic_mask);
1758 } else if (flag == DISABLE_INTRS) {
1760 * Disable Tx Traffic Intrs in the general intr mask
1763 writeq(DISABLE_ALL_INTRS, &bar0->tx_traffic_mask);
1764 temp64 = readq(&bar0->general_int_mask);
1766 writeq(val64, &bar0->general_int_mask);
1770 /* Rx traffic interrupts */
1771 if (mask & RX_TRAFFIC_INTR) {
1772 val64 = RXTRAFFIC_INT_M;
1773 if (flag == ENABLE_INTRS) {
1774 temp64 = readq(&bar0->general_int_mask);
1775 temp64 &= ~((u64) val64);
1776 writeq(temp64, &bar0->general_int_mask);
1777 /* writing 0 Enables all 8 RX interrupt levels */
1778 writeq(0x0, &bar0->rx_traffic_mask);
1779 } else if (flag == DISABLE_INTRS) {
1781 * Disable Rx Traffic Intrs in the general intr mask
1784 writeq(DISABLE_ALL_INTRS, &bar0->rx_traffic_mask);
1785 temp64 = readq(&bar0->general_int_mask);
1787 writeq(val64, &bar0->general_int_mask);
1793 * verify_pcc_quiescent- Checks for PCC quiescent state
1794 * Return: 1 If PCC is quiescence
1795 * 0 If PCC is not quiescence
1797 static int verify_pcc_quiescent(struct s2io_nic *sp, int flag)
1800 struct XENA_dev_config __iomem *bar0 = sp->bar0;
1801 u64 val64 = readq(&bar0->adapter_status);
1803 herc = (sp->device_type == XFRAME_II_DEVICE);
1805 if (flag == FALSE) {
1806 if ((!herc && (get_xena_rev_id(sp->pdev) >= 4)) || herc) {
1807 if (!(val64 & ADAPTER_STATUS_RMAC_PCC_IDLE))
1810 if (!(val64 & ADAPTER_STATUS_RMAC_PCC_FOUR_IDLE))
1814 if ((!herc && (get_xena_rev_id(sp->pdev) >= 4)) || herc) {
1815 if (((val64 & ADAPTER_STATUS_RMAC_PCC_IDLE) ==
1816 ADAPTER_STATUS_RMAC_PCC_IDLE))
1819 if (((val64 & ADAPTER_STATUS_RMAC_PCC_FOUR_IDLE) ==
1820 ADAPTER_STATUS_RMAC_PCC_FOUR_IDLE))
1828 * verify_xena_quiescence - Checks whether the H/W is ready
1829 * Description: Returns whether the H/W is ready to go or not. Depending
1830 * on whether adapter enable bit was written or not the comparison
1831 * differs and the calling function passes the input argument flag to
1833 * Return: 1 If xena is quiescence
1834 * 0 If Xena is not quiescence
1837 static int verify_xena_quiescence(struct s2io_nic *sp)
1840 struct XENA_dev_config __iomem *bar0 = sp->bar0;
1841 u64 val64 = readq(&bar0->adapter_status);
1842 mode = s2io_verify_pci_mode(sp);
1844 if (!(val64 & ADAPTER_STATUS_TDMA_READY)) {
1845 DBG_PRINT(ERR_DBG, "%s", "TDMA is not ready!");
1848 if (!(val64 & ADAPTER_STATUS_RDMA_READY)) {
1849 DBG_PRINT(ERR_DBG, "%s", "RDMA is not ready!");
1852 if (!(val64 & ADAPTER_STATUS_PFC_READY)) {
1853 DBG_PRINT(ERR_DBG, "%s", "PFC is not ready!");
1856 if (!(val64 & ADAPTER_STATUS_TMAC_BUF_EMPTY)) {
1857 DBG_PRINT(ERR_DBG, "%s", "TMAC BUF is not empty!");
1860 if (!(val64 & ADAPTER_STATUS_PIC_QUIESCENT)) {
1861 DBG_PRINT(ERR_DBG, "%s", "PIC is not QUIESCENT!");
1864 if (!(val64 & ADAPTER_STATUS_MC_DRAM_READY)) {
1865 DBG_PRINT(ERR_DBG, "%s", "MC_DRAM is not ready!");
1868 if (!(val64 & ADAPTER_STATUS_MC_QUEUES_READY)) {
1869 DBG_PRINT(ERR_DBG, "%s", "MC_QUEUES is not ready!");
1872 if (!(val64 & ADAPTER_STATUS_M_PLL_LOCK)) {
1873 DBG_PRINT(ERR_DBG, "%s", "M_PLL is not locked!");
1878 * In PCI 33 mode, the P_PLL is not used, and therefore,
1879 * the the P_PLL_LOCK bit in the adapter_status register will
1882 if (!(val64 & ADAPTER_STATUS_P_PLL_LOCK) &&
1883 sp->device_type == XFRAME_II_DEVICE && mode !=
1885 DBG_PRINT(ERR_DBG, "%s", "P_PLL is not locked!");
1888 if (!((val64 & ADAPTER_STATUS_RC_PRC_QUIESCENT) ==
1889 ADAPTER_STATUS_RC_PRC_QUIESCENT)) {
1890 DBG_PRINT(ERR_DBG, "%s", "RC_PRC is not QUIESCENT!");
1897 * fix_mac_address - Fix for Mac addr problem on Alpha platforms
1898 * @sp: Pointer to device specifc structure
1900 * New procedure to clear mac address reading problems on Alpha platforms
1904 static void fix_mac_address(struct s2io_nic * sp)
1906 struct XENA_dev_config __iomem *bar0 = sp->bar0;
1910 while (fix_mac[i] != END_SIGN) {
1911 writeq(fix_mac[i++], &bar0->gpio_control);
1913 val64 = readq(&bar0->gpio_control);
1918 * start_nic - Turns the device on
1919 * @nic : device private variable.
1921 * This function actually turns the device on. Before this function is
1922 * called,all Registers are configured from their reset states
1923 * and shared memory is allocated but the NIC is still quiescent. On
1924 * calling this function, the device interrupts are cleared and the NIC is
1925 * literally switched on by writing into the adapter control register.
1927 * SUCCESS on success and -1 on failure.
1930 static int start_nic(struct s2io_nic *nic)
1932 struct XENA_dev_config __iomem *bar0 = nic->bar0;
1933 struct net_device *dev = nic->dev;
1934 register u64 val64 = 0;
1936 struct mac_info *mac_control;
1937 struct config_param *config;
1939 mac_control = &nic->mac_control;
1940 config = &nic->config;
1942 /* PRC Initialization and configuration */
1943 for (i = 0; i < config->rx_ring_num; i++) {
1944 writeq((u64) mac_control->rings[i].rx_blocks[0].block_dma_addr,
1945 &bar0->prc_rxd0_n[i]);
1947 val64 = readq(&bar0->prc_ctrl_n[i]);
1948 if (nic->config.bimodal)
1949 val64 |= PRC_CTRL_BIMODAL_INTERRUPT;
1950 if (nic->rxd_mode == RXD_MODE_1)
1951 val64 |= PRC_CTRL_RC_ENABLED;
1953 val64 |= PRC_CTRL_RC_ENABLED | PRC_CTRL_RING_MODE_3;
1954 if (nic->device_type == XFRAME_II_DEVICE)
1955 val64 |= PRC_CTRL_GROUP_READS;
1956 val64 &= ~PRC_CTRL_RXD_BACKOFF_INTERVAL(0xFFFFFF);
1957 val64 |= PRC_CTRL_RXD_BACKOFF_INTERVAL(0x1000);
1958 writeq(val64, &bar0->prc_ctrl_n[i]);
1961 if (nic->rxd_mode == RXD_MODE_3B) {
1962 /* Enabling 2 buffer mode by writing into Rx_pa_cfg reg. */
1963 val64 = readq(&bar0->rx_pa_cfg);
1964 val64 |= RX_PA_CFG_IGNORE_L2_ERR;
1965 writeq(val64, &bar0->rx_pa_cfg);
1968 if (vlan_tag_strip == 0) {
1969 val64 = readq(&bar0->rx_pa_cfg);
1970 val64 &= ~RX_PA_CFG_STRIP_VLAN_TAG;
1971 writeq(val64, &bar0->rx_pa_cfg);
1972 vlan_strip_flag = 0;
1976 * Enabling MC-RLDRAM. After enabling the device, we timeout
1977 * for around 100ms, which is approximately the time required
1978 * for the device to be ready for operation.
1980 val64 = readq(&bar0->mc_rldram_mrs);
1981 val64 |= MC_RLDRAM_QUEUE_SIZE_ENABLE | MC_RLDRAM_MRS_ENABLE;
1982 SPECIAL_REG_WRITE(val64, &bar0->mc_rldram_mrs, UF);
1983 val64 = readq(&bar0->mc_rldram_mrs);
1985 msleep(100); /* Delay by around 100 ms. */
1987 /* Enabling ECC Protection. */
1988 val64 = readq(&bar0->adapter_control);
1989 val64 &= ~ADAPTER_ECC_EN;
1990 writeq(val64, &bar0->adapter_control);
1993 * Clearing any possible Link state change interrupts that
1994 * could have popped up just before Enabling the card.
1996 val64 = readq(&bar0->mac_rmac_err_reg);
1998 writeq(val64, &bar0->mac_rmac_err_reg);
2001 * Verify if the device is ready to be enabled, if so enable
2004 val64 = readq(&bar0->adapter_status);
2005 if (!verify_xena_quiescence(nic)) {
2006 DBG_PRINT(ERR_DBG, "%s: device is not ready, ", dev->name);
2007 DBG_PRINT(ERR_DBG, "Adapter status reads: 0x%llx\n",
2008 (unsigned long long) val64);
2013 * With some switches, link might be already up at this point.
2014 * Because of this weird behavior, when we enable laser,
2015 * we may not get link. We need to handle this. We cannot
2016 * figure out which switch is misbehaving. So we are forced to
2017 * make a global change.
2020 /* Enabling Laser. */
2021 val64 = readq(&bar0->adapter_control);
2022 val64 |= ADAPTER_EOI_TX_ON;
2023 writeq(val64, &bar0->adapter_control);
2025 if (s2io_link_fault_indication(nic) == MAC_RMAC_ERR_TIMER) {
2027 * Dont see link state interrupts initally on some switches,
2028 * so directly scheduling the link state task here.
2030 schedule_work(&nic->set_link_task);
2032 /* SXE-002: Initialize link and activity LED */
2033 subid = nic->pdev->subsystem_device;
2034 if (((subid & 0xFF) >= 0x07) &&
2035 (nic->device_type == XFRAME_I_DEVICE)) {
2036 val64 = readq(&bar0->gpio_control);
2037 val64 |= 0x0000800000000000ULL;
2038 writeq(val64, &bar0->gpio_control);
2039 val64 = 0x0411040400000000ULL;
2040 writeq(val64, (void __iomem *)bar0 + 0x2700);
2046 * s2io_txdl_getskb - Get the skb from txdl, unmap and return skb
2048 static struct sk_buff *s2io_txdl_getskb(struct fifo_info *fifo_data, struct \
2049 TxD *txdlp, int get_off)
2051 struct s2io_nic *nic = fifo_data->nic;
2052 struct sk_buff *skb;
2057 if (txds->Host_Control == (u64)(long)nic->ufo_in_band_v) {
2058 pci_unmap_single(nic->pdev, (dma_addr_t)
2059 txds->Buffer_Pointer, sizeof(u64),
2064 skb = (struct sk_buff *) ((unsigned long)
2065 txds->Host_Control);
2067 memset(txdlp, 0, (sizeof(struct TxD) * fifo_data->max_txds));
2070 pci_unmap_single(nic->pdev, (dma_addr_t)
2071 txds->Buffer_Pointer,
2072 skb->len - skb->data_len,
2074 frg_cnt = skb_shinfo(skb)->nr_frags;
2077 for (j = 0; j < frg_cnt; j++, txds++) {
2078 skb_frag_t *frag = &skb_shinfo(skb)->frags[j];
2079 if (!txds->Buffer_Pointer)
2081 pci_unmap_page(nic->pdev, (dma_addr_t)
2082 txds->Buffer_Pointer,
2083 frag->size, PCI_DMA_TODEVICE);
2086 memset(txdlp,0, (sizeof(struct TxD) * fifo_data->max_txds));
2091 * free_tx_buffers - Free all queued Tx buffers
2092 * @nic : device private variable.
2094 * Free all queued Tx buffers.
2095 * Return Value: void
2098 static void free_tx_buffers(struct s2io_nic *nic)
2100 struct net_device *dev = nic->dev;
2101 struct sk_buff *skb;
2104 struct mac_info *mac_control;
2105 struct config_param *config;
2108 mac_control = &nic->mac_control;
2109 config = &nic->config;
2111 for (i = 0; i < config->tx_fifo_num; i++) {
2112 for (j = 0; j < config->tx_cfg[i].fifo_len - 1; j++) {
2113 txdp = (struct TxD *) mac_control->fifos[i].list_info[j].
2115 skb = s2io_txdl_getskb(&mac_control->fifos[i], txdp, j);
2122 "%s:forcibly freeing %d skbs on FIFO%d\n",
2124 mac_control->fifos[i].tx_curr_get_info.offset = 0;
2125 mac_control->fifos[i].tx_curr_put_info.offset = 0;
2130 * stop_nic - To stop the nic
2131 * @nic ; device private variable.
2133 * This function does exactly the opposite of what the start_nic()
2134 * function does. This function is called to stop the device.
2139 static void stop_nic(struct s2io_nic *nic)
2141 struct XENA_dev_config __iomem *bar0 = nic->bar0;
2142 register u64 val64 = 0;
2144 struct mac_info *mac_control;
2145 struct config_param *config;
2147 mac_control = &nic->mac_control;
2148 config = &nic->config;
2150 /* Disable all interrupts */
2151 interruptible = TX_TRAFFIC_INTR | RX_TRAFFIC_INTR;
2152 interruptible |= TX_PIC_INTR | RX_PIC_INTR;
2153 interruptible |= TX_MAC_INTR | RX_MAC_INTR;
2154 en_dis_able_nic_intrs(nic, interruptible, DISABLE_INTRS);
2156 /* Clearing Adapter_En bit of ADAPTER_CONTROL Register */
2157 val64 = readq(&bar0->adapter_control);
2158 val64 &= ~(ADAPTER_CNTL_EN);
2159 writeq(val64, &bar0->adapter_control);
2162 static int fill_rxd_3buf(struct s2io_nic *nic, struct RxD_t *rxdp, struct \
2165 struct net_device *dev = nic->dev;
2166 struct sk_buff *frag_list;
2169 /* Buffer-1 receives L3/L4 headers */
2170 ((struct RxD3*)rxdp)->Buffer1_ptr = pci_map_single
2171 (nic->pdev, skb->data, l3l4hdr_size + 4,
2172 PCI_DMA_FROMDEVICE);
2174 /* skb_shinfo(skb)->frag_list will have L4 data payload */
2175 skb_shinfo(skb)->frag_list = dev_alloc_skb(dev->mtu + ALIGN_SIZE);
2176 if (skb_shinfo(skb)->frag_list == NULL) {
2177 DBG_PRINT(ERR_DBG, "%s: dev_alloc_skb failed\n ", dev->name);
2180 frag_list = skb_shinfo(skb)->frag_list;
2181 skb->truesize += frag_list->truesize;
2182 frag_list->next = NULL;
2183 tmp = (void *)ALIGN((long)frag_list->data, ALIGN_SIZE + 1);
2184 frag_list->data = tmp;
2185 frag_list->tail = tmp;
2187 /* Buffer-2 receives L4 data payload */
2188 ((struct RxD3*)rxdp)->Buffer2_ptr = pci_map_single(nic->pdev,
2189 frag_list->data, dev->mtu,
2190 PCI_DMA_FROMDEVICE);
2191 rxdp->Control_2 |= SET_BUFFER1_SIZE_3(l3l4hdr_size + 4);
2192 rxdp->Control_2 |= SET_BUFFER2_SIZE_3(dev->mtu);
2198 * fill_rx_buffers - Allocates the Rx side skbs
2199 * @nic: device private variable
2200 * @ring_no: ring number
2202 * The function allocates Rx side skbs and puts the physical
2203 * address of these buffers into the RxD buffer pointers, so that the NIC
2204 * can DMA the received frame into these locations.
2205 * The NIC supports 3 receive modes, viz
2207 * 2. three buffer and
2208 * 3. Five buffer modes.
2209 * Each mode defines how many fragments the received frame will be split
2210 * up into by the NIC. The frame is split into L3 header, L4 Header,
2211 * L4 payload in three buffer mode and in 5 buffer mode, L4 payload itself
2212 * is split into 3 fragments. As of now only single buffer mode is
2215 * SUCCESS on success or an appropriate -ve value on failure.
2218 static int fill_rx_buffers(struct s2io_nic *nic, int ring_no)
2220 struct net_device *dev = nic->dev;
2221 struct sk_buff *skb;
2223 int off, off1, size, block_no, block_no1;
2226 struct mac_info *mac_control;
2227 struct config_param *config;
2230 unsigned long flags;
2231 struct RxD_t *first_rxdp = NULL;
2233 mac_control = &nic->mac_control;
2234 config = &nic->config;
2235 alloc_cnt = mac_control->rings[ring_no].pkt_cnt -
2236 atomic_read(&nic->rx_bufs_left[ring_no]);
2238 block_no1 = mac_control->rings[ring_no].rx_curr_get_info.block_index;
2239 off1 = mac_control->rings[ring_no].rx_curr_get_info.offset;
2240 while (alloc_tab < alloc_cnt) {
2241 block_no = mac_control->rings[ring_no].rx_curr_put_info.
2243 off = mac_control->rings[ring_no].rx_curr_put_info.offset;
2245 rxdp = mac_control->rings[ring_no].
2246 rx_blocks[block_no].rxds[off].virt_addr;
2248 if ((block_no == block_no1) && (off == off1) &&
2249 (rxdp->Host_Control)) {
2250 DBG_PRINT(INTR_DBG, "%s: Get and Put",
2252 DBG_PRINT(INTR_DBG, " info equated\n");
2255 if (off && (off == rxd_count[nic->rxd_mode])) {
2256 mac_control->rings[ring_no].rx_curr_put_info.
2258 if (mac_control->rings[ring_no].rx_curr_put_info.
2259 block_index == mac_control->rings[ring_no].
2261 mac_control->rings[ring_no].rx_curr_put_info.
2263 block_no = mac_control->rings[ring_no].
2264 rx_curr_put_info.block_index;
2265 if (off == rxd_count[nic->rxd_mode])
2267 mac_control->rings[ring_no].rx_curr_put_info.
2269 rxdp = mac_control->rings[ring_no].
2270 rx_blocks[block_no].block_virt_addr;
2271 DBG_PRINT(INTR_DBG, "%s: Next block at: %p\n",
2275 spin_lock_irqsave(&nic->put_lock, flags);
2276 mac_control->rings[ring_no].put_pos =
2277 (block_no * (rxd_count[nic->rxd_mode] + 1)) + off;
2278 spin_unlock_irqrestore(&nic->put_lock, flags);
2280 mac_control->rings[ring_no].put_pos =
2281 (block_no * (rxd_count[nic->rxd_mode] + 1)) + off;
2283 if ((rxdp->Control_1 & RXD_OWN_XENA) &&
2284 ((nic->rxd_mode >= RXD_MODE_3A) &&
2285 (rxdp->Control_2 & BIT(0)))) {
2286 mac_control->rings[ring_no].rx_curr_put_info.
2290 /* calculate size of skb based on ring mode */
2291 size = dev->mtu + HEADER_ETHERNET_II_802_3_SIZE +
2292 HEADER_802_2_SIZE + HEADER_SNAP_SIZE;
2293 if (nic->rxd_mode == RXD_MODE_1)
2294 size += NET_IP_ALIGN;
2295 else if (nic->rxd_mode == RXD_MODE_3B)
2296 size = dev->mtu + ALIGN_SIZE + BUF0_LEN + 4;
2298 size = l3l4hdr_size + ALIGN_SIZE + BUF0_LEN + 4;
2301 skb = dev_alloc_skb(size);
2303 DBG_PRINT(ERR_DBG, "%s: Out of ", dev->name);
2304 DBG_PRINT(ERR_DBG, "memory to allocate SKBs\n");
2307 first_rxdp->Control_1 |= RXD_OWN_XENA;
2311 if (nic->rxd_mode == RXD_MODE_1) {
2312 /* 1 buffer mode - normal operation mode */
2313 memset(rxdp, 0, sizeof(struct RxD1));
2314 skb_reserve(skb, NET_IP_ALIGN);
2315 ((struct RxD1*)rxdp)->Buffer0_ptr = pci_map_single
2316 (nic->pdev, skb->data, size - NET_IP_ALIGN,
2317 PCI_DMA_FROMDEVICE);
2318 rxdp->Control_2 = SET_BUFFER0_SIZE_1(size - NET_IP_ALIGN);
2320 } else if (nic->rxd_mode >= RXD_MODE_3A) {
2322 * 2 or 3 buffer mode -
2323 * Both 2 buffer mode and 3 buffer mode provides 128
2324 * byte aligned receive buffers.
2326 * 3 buffer mode provides header separation where in
2327 * skb->data will have L3/L4 headers where as
2328 * skb_shinfo(skb)->frag_list will have the L4 data
2332 memset(rxdp, 0, sizeof(struct RxD3));
2333 ba = &mac_control->rings[ring_no].ba[block_no][off];
2334 skb_reserve(skb, BUF0_LEN);
2335 tmp = (u64)(unsigned long) skb->data;
2338 skb->data = (void *) (unsigned long)tmp;
2339 skb->tail = (void *) (unsigned long)tmp;
2341 if (!(((struct RxD3*)rxdp)->Buffer0_ptr))
2342 ((struct RxD3*)rxdp)->Buffer0_ptr =
2343 pci_map_single(nic->pdev, ba->ba_0, BUF0_LEN,
2344 PCI_DMA_FROMDEVICE);
2346 pci_dma_sync_single_for_device(nic->pdev,
2347 (dma_addr_t) ((struct RxD3*)rxdp)->Buffer0_ptr,
2348 BUF0_LEN, PCI_DMA_FROMDEVICE);
2349 rxdp->Control_2 = SET_BUFFER0_SIZE_3(BUF0_LEN);
2350 if (nic->rxd_mode == RXD_MODE_3B) {
2351 /* Two buffer mode */
2354 * Buffer2 will have L3/L4 header plus
2357 ((struct RxD3*)rxdp)->Buffer2_ptr = pci_map_single
2358 (nic->pdev, skb->data, dev->mtu + 4,
2359 PCI_DMA_FROMDEVICE);
2361 /* Buffer-1 will be dummy buffer. Not used */
2362 if (!(((struct RxD3*)rxdp)->Buffer1_ptr)) {
2363 ((struct RxD3*)rxdp)->Buffer1_ptr =
2364 pci_map_single(nic->pdev,
2366 PCI_DMA_FROMDEVICE);
2368 rxdp->Control_2 |= SET_BUFFER1_SIZE_3(1);
2369 rxdp->Control_2 |= SET_BUFFER2_SIZE_3
2373 if (fill_rxd_3buf(nic, rxdp, skb) == -ENOMEM) {
2374 dev_kfree_skb_irq(skb);
2377 first_rxdp->Control_1 |=
2383 rxdp->Control_2 |= BIT(0);
2385 rxdp->Host_Control = (unsigned long) (skb);
2386 if (alloc_tab & ((1 << rxsync_frequency) - 1))
2387 rxdp->Control_1 |= RXD_OWN_XENA;
2389 if (off == (rxd_count[nic->rxd_mode] + 1))
2391 mac_control->rings[ring_no].rx_curr_put_info.offset = off;
2393 rxdp->Control_2 |= SET_RXD_MARKER;
2394 if (!(alloc_tab & ((1 << rxsync_frequency) - 1))) {
2397 first_rxdp->Control_1 |= RXD_OWN_XENA;
2401 atomic_inc(&nic->rx_bufs_left[ring_no]);
2406 /* Transfer ownership of first descriptor to adapter just before
2407 * exiting. Before that, use memory barrier so that ownership
2408 * and other fields are seen by adapter correctly.
2412 first_rxdp->Control_1 |= RXD_OWN_XENA;
2418 static void free_rxd_blk(struct s2io_nic *sp, int ring_no, int blk)
2420 struct net_device *dev = sp->dev;
2422 struct sk_buff *skb;
2424 struct mac_info *mac_control;
2427 mac_control = &sp->mac_control;
2428 for (j = 0 ; j < rxd_count[sp->rxd_mode]; j++) {
2429 rxdp = mac_control->rings[ring_no].
2430 rx_blocks[blk].rxds[j].virt_addr;
2431 skb = (struct sk_buff *)
2432 ((unsigned long) rxdp->Host_Control);
2436 if (sp->rxd_mode == RXD_MODE_1) {
2437 pci_unmap_single(sp->pdev, (dma_addr_t)
2438 ((struct RxD1*)rxdp)->Buffer0_ptr,
2440 HEADER_ETHERNET_II_802_3_SIZE
2441 + HEADER_802_2_SIZE +
2443 PCI_DMA_FROMDEVICE);
2444 memset(rxdp, 0, sizeof(struct RxD1));
2445 } else if(sp->rxd_mode == RXD_MODE_3B) {
2446 ba = &mac_control->rings[ring_no].
2448 pci_unmap_single(sp->pdev, (dma_addr_t)
2449 ((struct RxD3*)rxdp)->Buffer0_ptr,
2451 PCI_DMA_FROMDEVICE);
2452 pci_unmap_single(sp->pdev, (dma_addr_t)
2453 ((struct RxD3*)rxdp)->Buffer1_ptr,
2455 PCI_DMA_FROMDEVICE);
2456 pci_unmap_single(sp->pdev, (dma_addr_t)
2457 ((struct RxD3*)rxdp)->Buffer2_ptr,
2459 PCI_DMA_FROMDEVICE);
2460 memset(rxdp, 0, sizeof(struct RxD3));
2462 pci_unmap_single(sp->pdev, (dma_addr_t)
2463 ((struct RxD3*)rxdp)->Buffer0_ptr, BUF0_LEN,
2464 PCI_DMA_FROMDEVICE);
2465 pci_unmap_single(sp->pdev, (dma_addr_t)
2466 ((struct RxD3*)rxdp)->Buffer1_ptr,
2468 PCI_DMA_FROMDEVICE);
2469 pci_unmap_single(sp->pdev, (dma_addr_t)
2470 ((struct RxD3*)rxdp)->Buffer2_ptr, dev->mtu,
2471 PCI_DMA_FROMDEVICE);
2472 memset(rxdp, 0, sizeof(struct RxD3));
2475 atomic_dec(&sp->rx_bufs_left[ring_no]);
2480 * free_rx_buffers - Frees all Rx buffers
2481 * @sp: device private variable.
2483 * This function will free all Rx buffers allocated by host.
2488 static void free_rx_buffers(struct s2io_nic *sp)
2490 struct net_device *dev = sp->dev;
2491 int i, blk = 0, buf_cnt = 0;
2492 struct mac_info *mac_control;
2493 struct config_param *config;
2495 mac_control = &sp->mac_control;
2496 config = &sp->config;
2498 for (i = 0; i < config->rx_ring_num; i++) {
2499 for (blk = 0; blk < rx_ring_sz[i]; blk++)
2500 free_rxd_blk(sp,i,blk);
2502 mac_control->rings[i].rx_curr_put_info.block_index = 0;
2503 mac_control->rings[i].rx_curr_get_info.block_index = 0;
2504 mac_control->rings[i].rx_curr_put_info.offset = 0;
2505 mac_control->rings[i].rx_curr_get_info.offset = 0;
2506 atomic_set(&sp->rx_bufs_left[i], 0);
2507 DBG_PRINT(INIT_DBG, "%s:Freed 0x%x Rx Buffers on ring%d\n",
2508 dev->name, buf_cnt, i);
2513 * s2io_poll - Rx interrupt handler for NAPI support
2514 * @dev : pointer to the device structure.
2515 * @budget : The number of packets that were budgeted to be processed
2516 * during one pass through the 'Poll" function.
2518 * Comes into picture only if NAPI support has been incorporated. It does
2519 * the same thing that rx_intr_handler does, but not in a interrupt context
2520 * also It will process only a given number of packets.
2522 * 0 on success and 1 if there are No Rx packets to be processed.
2525 static int s2io_poll(struct net_device *dev, int *budget)
2527 struct s2io_nic *nic = dev->priv;
2528 int pkt_cnt = 0, org_pkts_to_process;
2529 struct mac_info *mac_control;
2530 struct config_param *config;
2531 struct XENA_dev_config __iomem *bar0 = nic->bar0;
2534 atomic_inc(&nic->isr_cnt);
2535 mac_control = &nic->mac_control;
2536 config = &nic->config;
2538 nic->pkts_to_process = *budget;
2539 if (nic->pkts_to_process > dev->quota)
2540 nic->pkts_to_process = dev->quota;
2541 org_pkts_to_process = nic->pkts_to_process;
2543 writeq(S2IO_MINUS_ONE, &bar0->rx_traffic_int);
2544 readl(&bar0->rx_traffic_int);
2546 for (i = 0; i < config->rx_ring_num; i++) {
2547 rx_intr_handler(&mac_control->rings[i]);
2548 pkt_cnt = org_pkts_to_process - nic->pkts_to_process;
2549 if (!nic->pkts_to_process) {
2550 /* Quota for the current iteration has been met */
2557 dev->quota -= pkt_cnt;
2559 netif_rx_complete(dev);
2561 for (i = 0; i < config->rx_ring_num; i++) {
2562 if (fill_rx_buffers(nic, i) == -ENOMEM) {
2563 DBG_PRINT(ERR_DBG, "%s:Out of memory", dev->name);
2564 DBG_PRINT(ERR_DBG, " in Rx Poll!!\n");
2568 /* Re enable the Rx interrupts. */
2569 writeq(0x0, &bar0->rx_traffic_mask);
2570 readl(&bar0->rx_traffic_mask);
2571 atomic_dec(&nic->isr_cnt);
2575 dev->quota -= pkt_cnt;
2578 for (i = 0; i < config->rx_ring_num; i++) {
2579 if (fill_rx_buffers(nic, i) == -ENOMEM) {
2580 DBG_PRINT(ERR_DBG, "%s:Out of memory", dev->name);
2581 DBG_PRINT(ERR_DBG, " in Rx Poll!!\n");
2585 atomic_dec(&nic->isr_cnt);
2589 #ifdef CONFIG_NET_POLL_CONTROLLER
2591 * s2io_netpoll - netpoll event handler entry point
2592 * @dev : pointer to the device structure.
2594 * This function will be called by upper layer to check for events on the
2595 * interface in situations where interrupts are disabled. It is used for
2596 * specific in-kernel networking tasks, such as remote consoles and kernel
2597 * debugging over the network (example netdump in RedHat).
2599 static void s2io_netpoll(struct net_device *dev)
2601 struct s2io_nic *nic = dev->priv;
2602 struct mac_info *mac_control;
2603 struct config_param *config;
2604 struct XENA_dev_config __iomem *bar0 = nic->bar0;
2605 u64 val64 = 0xFFFFFFFFFFFFFFFFULL;
2608 disable_irq(dev->irq);
2610 atomic_inc(&nic->isr_cnt);
2611 mac_control = &nic->mac_control;
2612 config = &nic->config;
2614 writeq(val64, &bar0->rx_traffic_int);
2615 writeq(val64, &bar0->tx_traffic_int);
2617 /* we need to free up the transmitted skbufs or else netpoll will
2618 * run out of skbs and will fail and eventually netpoll application such
2619 * as netdump will fail.
2621 for (i = 0; i < config->tx_fifo_num; i++)
2622 tx_intr_handler(&mac_control->fifos[i]);
2624 /* check for received packet and indicate up to network */
2625 for (i = 0; i < config->rx_ring_num; i++)
2626 rx_intr_handler(&mac_control->rings[i]);
2628 for (i = 0; i < config->rx_ring_num; i++) {
2629 if (fill_rx_buffers(nic, i) == -ENOMEM) {
2630 DBG_PRINT(ERR_DBG, "%s:Out of memory", dev->name);
2631 DBG_PRINT(ERR_DBG, " in Rx Netpoll!!\n");
2635 atomic_dec(&nic->isr_cnt);
2636 enable_irq(dev->irq);
2642 * rx_intr_handler - Rx interrupt handler
2643 * @nic: device private variable.
2645 * If the interrupt is because of a received frame or if the
2646 * receive ring contains fresh as yet un-processed frames,this function is
2647 * called. It picks out the RxD at which place the last Rx processing had
2648 * stopped and sends the skb to the OSM's Rx handler and then increments
2653 static void rx_intr_handler(struct ring_info *ring_data)
2655 struct s2io_nic *nic = ring_data->nic;
2656 struct net_device *dev = (struct net_device *) nic->dev;
2657 int get_block, put_block, put_offset;
2658 struct rx_curr_get_info get_info, put_info;
2660 struct sk_buff *skb;
2664 spin_lock(&nic->rx_lock);
2665 if (atomic_read(&nic->card_state) == CARD_DOWN) {
2666 DBG_PRINT(INTR_DBG, "%s: %s going down for reset\n",
2667 __FUNCTION__, dev->name);
2668 spin_unlock(&nic->rx_lock);
2672 get_info = ring_data->rx_curr_get_info;
2673 get_block = get_info.block_index;
2674 memcpy(&put_info, &ring_data->rx_curr_put_info, sizeof(put_info));
2675 put_block = put_info.block_index;
2676 rxdp = ring_data->rx_blocks[get_block].rxds[get_info.offset].virt_addr;
2678 spin_lock(&nic->put_lock);
2679 put_offset = ring_data->put_pos;
2680 spin_unlock(&nic->put_lock);
2682 put_offset = ring_data->put_pos;
2684 while (RXD_IS_UP2DT(rxdp)) {
2686 * If your are next to put index then it's
2687 * FIFO full condition
2689 if ((get_block == put_block) &&
2690 (get_info.offset + 1) == put_info.offset) {
2691 DBG_PRINT(INTR_DBG, "%s: Ring Full\n",dev->name);
2694 skb = (struct sk_buff *) ((unsigned long)rxdp->Host_Control);
2696 DBG_PRINT(ERR_DBG, "%s: The skb is ",
2698 DBG_PRINT(ERR_DBG, "Null in Rx Intr\n");
2699 spin_unlock(&nic->rx_lock);
2702 if (nic->rxd_mode == RXD_MODE_1) {
2703 pci_unmap_single(nic->pdev, (dma_addr_t)
2704 ((struct RxD1*)rxdp)->Buffer0_ptr,
2706 HEADER_ETHERNET_II_802_3_SIZE +
2709 PCI_DMA_FROMDEVICE);
2710 } else if (nic->rxd_mode == RXD_MODE_3B) {
2711 pci_dma_sync_single_for_cpu(nic->pdev, (dma_addr_t)
2712 ((struct RxD3*)rxdp)->Buffer0_ptr,
2713 BUF0_LEN, PCI_DMA_FROMDEVICE);
2714 pci_unmap_single(nic->pdev, (dma_addr_t)
2715 ((struct RxD3*)rxdp)->Buffer2_ptr,
2717 PCI_DMA_FROMDEVICE);
2719 pci_dma_sync_single_for_cpu(nic->pdev, (dma_addr_t)
2720 ((struct RxD3*)rxdp)->Buffer0_ptr, BUF0_LEN,
2721 PCI_DMA_FROMDEVICE);
2722 pci_unmap_single(nic->pdev, (dma_addr_t)
2723 ((struct RxD3*)rxdp)->Buffer1_ptr,
2725 PCI_DMA_FROMDEVICE);
2726 pci_unmap_single(nic->pdev, (dma_addr_t)
2727 ((struct RxD3*)rxdp)->Buffer2_ptr,
2728 dev->mtu, PCI_DMA_FROMDEVICE);
2730 prefetch(skb->data);
2731 rx_osm_handler(ring_data, rxdp);
2733 ring_data->rx_curr_get_info.offset = get_info.offset;
2734 rxdp = ring_data->rx_blocks[get_block].
2735 rxds[get_info.offset].virt_addr;
2736 if (get_info.offset == rxd_count[nic->rxd_mode]) {
2737 get_info.offset = 0;
2738 ring_data->rx_curr_get_info.offset = get_info.offset;
2740 if (get_block == ring_data->block_count)
2742 ring_data->rx_curr_get_info.block_index = get_block;
2743 rxdp = ring_data->rx_blocks[get_block].block_virt_addr;
2746 nic->pkts_to_process -= 1;
2747 if ((napi) && (!nic->pkts_to_process))
2750 if ((indicate_max_pkts) && (pkt_cnt > indicate_max_pkts))
2754 /* Clear all LRO sessions before exiting */
2755 for (i=0; i<MAX_LRO_SESSIONS; i++) {
2756 struct lro *lro = &nic->lro0_n[i];
2758 update_L3L4_header(nic, lro);
2759 queue_rx_frame(lro->parent);
2760 clear_lro_session(lro);
2765 spin_unlock(&nic->rx_lock);
2769 * tx_intr_handler - Transmit interrupt handler
2770 * @nic : device private variable
2772 * If an interrupt was raised to indicate DMA complete of the
2773 * Tx packet, this function is called. It identifies the last TxD
2774 * whose buffer was freed and frees all skbs whose data have already
2775 * DMA'ed into the NICs internal memory.
2780 static void tx_intr_handler(struct fifo_info *fifo_data)
2782 struct s2io_nic *nic = fifo_data->nic;
2783 struct net_device *dev = (struct net_device *) nic->dev;
2784 struct tx_curr_get_info get_info, put_info;
2785 struct sk_buff *skb;
2788 get_info = fifo_data->tx_curr_get_info;
2789 memcpy(&put_info, &fifo_data->tx_curr_put_info, sizeof(put_info));
2790 txdlp = (struct TxD *) fifo_data->list_info[get_info.offset].
2792 while ((!(txdlp->Control_1 & TXD_LIST_OWN_XENA)) &&
2793 (get_info.offset != put_info.offset) &&
2794 (txdlp->Host_Control)) {
2795 /* Check for TxD errors */
2796 if (txdlp->Control_1 & TXD_T_CODE) {
2797 unsigned long long err;
2798 err = txdlp->Control_1 & TXD_T_CODE;
2800 nic->mac_control.stats_info->sw_stat.
2803 if ((err >> 48) == 0xA) {
2804 DBG_PRINT(TX_DBG, "TxD returned due \
2805 to loss of link\n");
2808 DBG_PRINT(ERR_DBG, "***TxD error %llx\n", err);
2812 skb = s2io_txdl_getskb(fifo_data, txdlp, get_info.offset);
2814 DBG_PRINT(ERR_DBG, "%s: Null skb ",
2816 DBG_PRINT(ERR_DBG, "in Tx Free Intr\n");
2820 /* Updating the statistics block */
2821 nic->stats.tx_bytes += skb->len;
2822 dev_kfree_skb_irq(skb);
2825 if (get_info.offset == get_info.fifo_len + 1)
2826 get_info.offset = 0;
2827 txdlp = (struct TxD *) fifo_data->list_info
2828 [get_info.offset].list_virt_addr;
2829 fifo_data->tx_curr_get_info.offset =
2833 spin_lock(&nic->tx_lock);
2834 if (netif_queue_stopped(dev))
2835 netif_wake_queue(dev);
2836 spin_unlock(&nic->tx_lock);
2840 * s2io_mdio_write - Function to write in to MDIO registers
2841 * @mmd_type : MMD type value (PMA/PMD/WIS/PCS/PHYXS)
2842 * @addr : address value
2843 * @value : data value
2844 * @dev : pointer to net_device structure
2846 * This function is used to write values to the MDIO registers
2849 static void s2io_mdio_write(u32 mmd_type, u64 addr, u16 value, struct net_device *dev)
2852 struct s2io_nic *sp = dev->priv;
2853 struct XENA_dev_config __iomem *bar0 = sp->bar0;
2855 //address transaction
2856 val64 = val64 | MDIO_MMD_INDX_ADDR(addr)
2857 | MDIO_MMD_DEV_ADDR(mmd_type)
2858 | MDIO_MMS_PRT_ADDR(0x0);
2859 writeq(val64, &bar0->mdio_control);
2860 val64 = val64 | MDIO_CTRL_START_TRANS(0xE);
2861 writeq(val64, &bar0->mdio_control);
2866 val64 = val64 | MDIO_MMD_INDX_ADDR(addr)
2867 | MDIO_MMD_DEV_ADDR(mmd_type)
2868 | MDIO_MMS_PRT_ADDR(0x0)
2869 | MDIO_MDIO_DATA(value)
2870 | MDIO_OP(MDIO_OP_WRITE_TRANS);
2871 writeq(val64, &bar0->mdio_control);
2872 val64 = val64 | MDIO_CTRL_START_TRANS(0xE);
2873 writeq(val64, &bar0->mdio_control);
2877 val64 = val64 | MDIO_MMD_INDX_ADDR(addr)
2878 | MDIO_MMD_DEV_ADDR(mmd_type)
2879 | MDIO_MMS_PRT_ADDR(0x0)
2880 | MDIO_OP(MDIO_OP_READ_TRANS);
2881 writeq(val64, &bar0->mdio_control);
2882 val64 = val64 | MDIO_CTRL_START_TRANS(0xE);
2883 writeq(val64, &bar0->mdio_control);
2889 * s2io_mdio_read - Function to write in to MDIO registers
2890 * @mmd_type : MMD type value (PMA/PMD/WIS/PCS/PHYXS)
2891 * @addr : address value
2892 * @dev : pointer to net_device structure
2894 * This function is used to read values to the MDIO registers
2897 static u64 s2io_mdio_read(u32 mmd_type, u64 addr, struct net_device *dev)
2901 struct s2io_nic *sp = dev->priv;
2902 struct XENA_dev_config __iomem *bar0 = sp->bar0;
2904 /* address transaction */
2905 val64 = val64 | MDIO_MMD_INDX_ADDR(addr)
2906 | MDIO_MMD_DEV_ADDR(mmd_type)
2907 | MDIO_MMS_PRT_ADDR(0x0);
2908 writeq(val64, &bar0->mdio_control);
2909 val64 = val64 | MDIO_CTRL_START_TRANS(0xE);
2910 writeq(val64, &bar0->mdio_control);
2913 /* Data transaction */
2915 val64 = val64 | MDIO_MMD_INDX_ADDR(addr)
2916 | MDIO_MMD_DEV_ADDR(mmd_type)
2917 | MDIO_MMS_PRT_ADDR(0x0)
2918 | MDIO_OP(MDIO_OP_READ_TRANS);
2919 writeq(val64, &bar0->mdio_control);
2920 val64 = val64 | MDIO_CTRL_START_TRANS(0xE);
2921 writeq(val64, &bar0->mdio_control);
2924 /* Read the value from regs */
2925 rval64 = readq(&bar0->mdio_control);
2926 rval64 = rval64 & 0xFFFF0000;
2927 rval64 = rval64 >> 16;
2931 * s2io_chk_xpak_counter - Function to check the status of the xpak counters
2932 * @counter : couter value to be updated
2933 * @flag : flag to indicate the status
2934 * @type : counter type
2936 * This function is to check the status of the xpak counters value
2940 static void s2io_chk_xpak_counter(u64 *counter, u64 * regs_stat, u32 index, u16 flag, u16 type)
2945 for(i = 0; i <index; i++)
2950 *counter = *counter + 1;
2951 val64 = *regs_stat & mask;
2952 val64 = val64 >> (index * 0x2);
2959 DBG_PRINT(ERR_DBG, "Take Xframe NIC out of "
2960 "service. Excessive temperatures may "
2961 "result in premature transceiver "
2965 DBG_PRINT(ERR_DBG, "Take Xframe NIC out of "
2966 "service Excessive bias currents may "
2967 "indicate imminent laser diode "
2971 DBG_PRINT(ERR_DBG, "Take Xframe NIC out of "
2972 "service Excessive laser output "
2973 "power may saturate far-end "
2977 DBG_PRINT(ERR_DBG, "Incorrect XPAK Alarm "
2982 val64 = val64 << (index * 0x2);
2983 *regs_stat = (*regs_stat & (~mask)) | (val64);
2986 *regs_stat = *regs_stat & (~mask);
2991 * s2io_updt_xpak_counter - Function to update the xpak counters
2992 * @dev : pointer to net_device struct
2994 * This function is to upate the status of the xpak counters value
2997 static void s2io_updt_xpak_counter(struct net_device *dev)
3005 struct s2io_nic *sp = dev->priv;
3006 struct stat_block *stat_info = sp->mac_control.stats_info;
3008 /* Check the communication with the MDIO slave */
3011 val64 = s2io_mdio_read(MDIO_MMD_PMA_DEV_ADDR, addr, dev);
3012 if((val64 == 0xFFFF) || (val64 == 0x0000))
3014 DBG_PRINT(ERR_DBG, "ERR: MDIO slave access failed - "
3015 "Returned %llx\n", (unsigned long long)val64);
3019 /* Check for the expecte value of 2040 at PMA address 0x0000 */
3022 DBG_PRINT(ERR_DBG, "Incorrect value at PMA address 0x0000 - ");
3023 DBG_PRINT(ERR_DBG, "Returned: %llx- Expected: 0x2040\n",
3024 (unsigned long long)val64);
3028 /* Loading the DOM register to MDIO register */
3030 s2io_mdio_write(MDIO_MMD_PMA_DEV_ADDR, addr, val16, dev);
3031 val64 = s2io_mdio_read(MDIO_MMD_PMA_DEV_ADDR, addr, dev);
3033 /* Reading the Alarm flags */
3036 val64 = s2io_mdio_read(MDIO_MMD_PMA_DEV_ADDR, addr, dev);
3038 flag = CHECKBIT(val64, 0x7);
3040 s2io_chk_xpak_counter(&stat_info->xpak_stat.alarm_transceiver_temp_high,
3041 &stat_info->xpak_stat.xpak_regs_stat,
3044 if(CHECKBIT(val64, 0x6))
3045 stat_info->xpak_stat.alarm_transceiver_temp_low++;
3047 flag = CHECKBIT(val64, 0x3);
3049 s2io_chk_xpak_counter(&stat_info->xpak_stat.alarm_laser_bias_current_high,
3050 &stat_info->xpak_stat.xpak_regs_stat,
3053 if(CHECKBIT(val64, 0x2))
3054 stat_info->xpak_stat.alarm_laser_bias_current_low++;
3056 flag = CHECKBIT(val64, 0x1);
3058 s2io_chk_xpak_counter(&stat_info->xpak_stat.alarm_laser_output_power_high,
3059 &stat_info->xpak_stat.xpak_regs_stat,
3062 if(CHECKBIT(val64, 0x0))
3063 stat_info->xpak_stat.alarm_laser_output_power_low++;
3065 /* Reading the Warning flags */
3068 val64 = s2io_mdio_read(MDIO_MMD_PMA_DEV_ADDR, addr, dev);
3070 if(CHECKBIT(val64, 0x7))
3071 stat_info->xpak_stat.warn_transceiver_temp_high++;
3073 if(CHECKBIT(val64, 0x6))
3074 stat_info->xpak_stat.warn_transceiver_temp_low++;
3076 if(CHECKBIT(val64, 0x3))
3077 stat_info->xpak_stat.warn_laser_bias_current_high++;
3079 if(CHECKBIT(val64, 0x2))
3080 stat_info->xpak_stat.warn_laser_bias_current_low++;
3082 if(CHECKBIT(val64, 0x1))
3083 stat_info->xpak_stat.warn_laser_output_power_high++;
3085 if(CHECKBIT(val64, 0x0))
3086 stat_info->xpak_stat.warn_laser_output_power_low++;
3090 * alarm_intr_handler - Alarm Interrrupt handler
3091 * @nic: device private variable
3092 * Description: If the interrupt was neither because of Rx packet or Tx
3093 * complete, this function is called. If the interrupt was to indicate
3094 * a loss of link, the OSM link status handler is invoked for any other
3095 * alarm interrupt the block that raised the interrupt is displayed
3096 * and a H/W reset is issued.
3101 static void alarm_intr_handler(struct s2io_nic *nic)
3103 struct net_device *dev = (struct net_device *) nic->dev;
3104 struct XENA_dev_config __iomem *bar0 = nic->bar0;
3105 register u64 val64 = 0, err_reg = 0;
3108 if (atomic_read(&nic->card_state) == CARD_DOWN)
3110 nic->mac_control.stats_info->sw_stat.ring_full_cnt = 0;
3111 /* Handling the XPAK counters update */
3112 if(nic->mac_control.stats_info->xpak_stat.xpak_timer_count < 72000) {
3113 /* waiting for an hour */
3114 nic->mac_control.stats_info->xpak_stat.xpak_timer_count++;
3116 s2io_updt_xpak_counter(dev);
3117 /* reset the count to zero */
3118 nic->mac_control.stats_info->xpak_stat.xpak_timer_count = 0;
3121 /* Handling link status change error Intr */
3122 if (s2io_link_fault_indication(nic) == MAC_RMAC_ERR_TIMER) {
3123 err_reg = readq(&bar0->mac_rmac_err_reg);
3124 writeq(err_reg, &bar0->mac_rmac_err_reg);
3125 if (err_reg & RMAC_LINK_STATE_CHANGE_INT) {
3126 schedule_work(&nic->set_link_task);
3130 /* Handling Ecc errors */
3131 val64 = readq(&bar0->mc_err_reg);
3132 writeq(val64, &bar0->mc_err_reg);
3133 if (val64 & (MC_ERR_REG_ECC_ALL_SNG | MC_ERR_REG_ECC_ALL_DBL)) {
3134 if (val64 & MC_ERR_REG_ECC_ALL_DBL) {
3135 nic->mac_control.stats_info->sw_stat.
3137 DBG_PRINT(INIT_DBG, "%s: Device indicates ",
3139 DBG_PRINT(INIT_DBG, "double ECC error!!\n");
3140 if (nic->device_type != XFRAME_II_DEVICE) {
3141 /* Reset XframeI only if critical error */
3142 if (val64 & (MC_ERR_REG_MIRI_ECC_DB_ERR_0 |
3143 MC_ERR_REG_MIRI_ECC_DB_ERR_1)) {
3144 netif_stop_queue(dev);
3145 schedule_work(&nic->rst_timer_task);
3146 nic->mac_control.stats_info->sw_stat.
3151 nic->mac_control.stats_info->sw_stat.
3156 /* In case of a serious error, the device will be Reset. */
3157 val64 = readq(&bar0->serr_source);
3158 if (val64 & SERR_SOURCE_ANY) {
3159 nic->mac_control.stats_info->sw_stat.serious_err_cnt++;
3160 DBG_PRINT(ERR_DBG, "%s: Device indicates ", dev->name);
3161 DBG_PRINT(ERR_DBG, "serious error %llx!!\n",
3162 (unsigned long long)val64);
3163 netif_stop_queue(dev);
3164 schedule_work(&nic->rst_timer_task);
3165 nic->mac_control.stats_info->sw_stat.soft_reset_cnt++;
3169 * Also as mentioned in the latest Errata sheets if the PCC_FB_ECC
3170 * Error occurs, the adapter will be recycled by disabling the
3171 * adapter enable bit and enabling it again after the device
3172 * becomes Quiescent.
3174 val64 = readq(&bar0->pcc_err_reg);
3175 writeq(val64, &bar0->pcc_err_reg);
3176 if (val64 & PCC_FB_ECC_DB_ERR) {
3177 u64 ac = readq(&bar0->adapter_control);
3178 ac &= ~(ADAPTER_CNTL_EN);
3179 writeq(ac, &bar0->adapter_control);
3180 ac = readq(&bar0->adapter_control);
3181 schedule_work(&nic->set_link_task);
3183 /* Check for data parity error */
3184 val64 = readq(&bar0->pic_int_status);
3185 if (val64 & PIC_INT_GPIO) {
3186 val64 = readq(&bar0->gpio_int_reg);
3187 if (val64 & GPIO_INT_REG_DP_ERR_INT) {
3188 nic->mac_control.stats_info->sw_stat.parity_err_cnt++;
3189 schedule_work(&nic->rst_timer_task);
3190 nic->mac_control.stats_info->sw_stat.soft_reset_cnt++;
3194 /* Check for ring full counter */
3195 if (nic->device_type & XFRAME_II_DEVICE) {
3196 val64 = readq(&bar0->ring_bump_counter1);
3197 for (i=0; i<4; i++) {
3198 cnt = ( val64 & vBIT(0xFFFF,(i*16),16));
3199 cnt >>= 64 - ((i+1)*16);
3200 nic->mac_control.stats_info->sw_stat.ring_full_cnt
3204 val64 = readq(&bar0->ring_bump_counter2);
3205 for (i=0; i<4; i++) {
3206 cnt = ( val64 & vBIT(0xFFFF,(i*16),16));
3207 cnt >>= 64 - ((i+1)*16);
3208 nic->mac_control.stats_info->sw_stat.ring_full_cnt
3213 /* Other type of interrupts are not being handled now, TODO */
3217 * wait_for_cmd_complete - waits for a command to complete.
3218 * @sp : private member of the device structure, which is a pointer to the
3219 * s2io_nic structure.
3220 * Description: Function that waits for a command to Write into RMAC
3221 * ADDR DATA registers to be completed and returns either success or
3222 * error depending on whether the command was complete or not.
3224 * SUCCESS on success and FAILURE on failure.
3227 static int wait_for_cmd_complete(void __iomem *addr, u64 busy_bit,
3230 int ret = FAILURE, cnt = 0, delay = 1;
3233 if ((bit_state != S2IO_BIT_RESET) && (bit_state != S2IO_BIT_SET))
3237 val64 = readq(addr);
3238 if (bit_state == S2IO_BIT_RESET) {
3239 if (!(val64 & busy_bit)) {
3244 if (!(val64 & busy_bit)) {
3261 * check_pci_device_id - Checks if the device id is supported
3263 * Description: Function to check if the pci device id is supported by driver.
3264 * Return value: Actual device id if supported else PCI_ANY_ID
3266 static u16 check_pci_device_id(u16 id)
3269 case PCI_DEVICE_ID_HERC_WIN:
3270 case PCI_DEVICE_ID_HERC_UNI:
3271 return XFRAME_II_DEVICE;
3272 case PCI_DEVICE_ID_S2IO_UNI:
3273 case PCI_DEVICE_ID_S2IO_WIN:
3274 return XFRAME_I_DEVICE;
3281 * s2io_reset - Resets the card.
3282 * @sp : private member of the device structure.
3283 * Description: Function to Reset the card. This function then also
3284 * restores the previously saved PCI configuration space registers as
3285 * the card reset also resets the configuration space.
3290 static void s2io_reset(struct s2io_nic * sp)
3292 struct XENA_dev_config __iomem *bar0 = sp->bar0;
3297 DBG_PRINT(INIT_DBG,"%s - Resetting XFrame card %s\n",
3298 __FUNCTION__, sp->dev->name);
3300 /* Back up the PCI-X CMD reg, dont want to lose MMRBC, OST settings */
3301 pci_read_config_word(sp->pdev, PCIX_COMMAND_REGISTER, &(pci_cmd));
3303 if (sp->device_type == XFRAME_II_DEVICE) {
3305 ret = pci_set_power_state(sp->pdev, 3);
3307 ret = pci_set_power_state(sp->pdev, 0);
3309 DBG_PRINT(ERR_DBG,"%s PME based SW_Reset failed!\n",
3317 val64 = SW_RESET_ALL;
3318 writeq(val64, &bar0->sw_reset);
3320 if (strstr(sp->product_name, "CX4")) {
3324 for (i = 0; i < S2IO_MAX_PCI_CONFIG_SPACE_REINIT; i++) {
3326 /* Restore the PCI state saved during initialization. */
3327 pci_restore_state(sp->pdev);
3328 pci_read_config_word(sp->pdev, 0x2, &val16);
3329 if (check_pci_device_id(val16) != (u16)PCI_ANY_ID)
3334 if (check_pci_device_id(val16) == (u16)PCI_ANY_ID) {
3335 DBG_PRINT(ERR_DBG,"%s SW_Reset failed!\n", __FUNCTION__);
3338 pci_write_config_word(sp->pdev, PCIX_COMMAND_REGISTER, pci_cmd);
3342 /* Set swapper to enable I/O register access */
3343 s2io_set_swapper(sp);
3345 /* Restore the MSIX table entries from local variables */
3346 restore_xmsi_data(sp);
3348 /* Clear certain PCI/PCI-X fields after reset */
3349 if (sp->device_type == XFRAME_II_DEVICE) {
3350 /* Clear "detected parity error" bit */
3351 pci_write_config_word(sp->pdev, PCI_STATUS, 0x8000);
3353 /* Clearing PCIX Ecc status register */
3354 pci_write_config_dword(sp->pdev, 0x68, 0x7C);
3356 /* Clearing PCI_STATUS error reflected here */
3357 writeq(BIT(62), &bar0->txpic_int_reg);
3360 /* Reset device statistics maintained by OS */
3361 memset(&sp->stats, 0, sizeof (struct net_device_stats));
3363 /* SXE-002: Configure link and activity LED to turn it off */
3364 subid = sp->pdev->subsystem_device;
3365 if (((subid & 0xFF) >= 0x07) &&
3366 (sp->device_type == XFRAME_I_DEVICE)) {
3367 val64 = readq(&bar0->gpio_control);
3368 val64 |= 0x0000800000000000ULL;
3369 writeq(val64, &bar0->gpio_control);
3370 val64 = 0x0411040400000000ULL;
3371 writeq(val64, (void __iomem *)bar0 + 0x2700);
3375 * Clear spurious ECC interrupts that would have occured on
3376 * XFRAME II cards after reset.
3378 if (sp->device_type == XFRAME_II_DEVICE) {
3379 val64 = readq(&bar0->pcc_err_reg);
3380 writeq(val64, &bar0->pcc_err_reg);
3383 sp->device_enabled_once = FALSE;
3387 * s2io_set_swapper - to set the swapper controle on the card
3388 * @sp : private member of the device structure,
3389 * pointer to the s2io_nic structure.
3390 * Description: Function to set the swapper control on the card
3391 * correctly depending on the 'endianness' of the system.
3393 * SUCCESS on success and FAILURE on failure.
3396 static int s2io_set_swapper(struct s2io_nic * sp)
3398 struct net_device *dev = sp->dev;
3399 struct XENA_dev_config __iomem *bar0 = sp->bar0;
3400 u64 val64, valt, valr;
3403 * Set proper endian settings and verify the same by reading
3404 * the PIF Feed-back register.
3407 val64 = readq(&bar0->pif_rd_swapper_fb);
3408 if (val64 != 0x0123456789ABCDEFULL) {
3410 u64 value[] = { 0xC30000C3C30000C3ULL, /* FE=1, SE=1 */
3411 0x8100008181000081ULL, /* FE=1, SE=0 */
3412 0x4200004242000042ULL, /* FE=0, SE=1 */
3413 0}; /* FE=0, SE=0 */
3416 writeq(value[i], &bar0->swapper_ctrl);
3417 val64 = readq(&bar0->pif_rd_swapper_fb);
3418 if (val64 == 0x0123456789ABCDEFULL)
3423 DBG_PRINT(ERR_DBG, "%s: Endian settings are wrong, ",
3425 DBG_PRINT(ERR_DBG, "feedback read %llx\n",
3426 (unsigned long long) val64);
3431 valr = readq(&bar0->swapper_ctrl);
3434 valt = 0x0123456789ABCDEFULL;
3435 writeq(valt, &bar0->xmsi_address);
3436 val64 = readq(&bar0->xmsi_address);
3440 u64 value[] = { 0x00C3C30000C3C300ULL, /* FE=1, SE=1 */
3441 0x0081810000818100ULL, /* FE=1, SE=0 */
3442 0x0042420000424200ULL, /* FE=0, SE=1 */
3443 0}; /* FE=0, SE=0 */
3446 writeq((value[i] | valr), &bar0->swapper_ctrl);
3447 writeq(valt, &bar0->xmsi_address);
3448 val64 = readq(&bar0->xmsi_address);
3454 unsigned long long x = val64;
3455 DBG_PRINT(ERR_DBG, "Write failed, Xmsi_addr ");
3456 DBG_PRINT(ERR_DBG, "reads:0x%llx\n", x);
3460 val64 = readq(&bar0->swapper_ctrl);
3461 val64 &= 0xFFFF000000000000ULL;
3465 * The device by default set to a big endian format, so a
3466 * big endian driver need not set anything.
3468 val64 |= (SWAPPER_CTRL_TXP_FE |
3469 SWAPPER_CTRL_TXP_SE |
3470 SWAPPER_CTRL_TXD_R_FE |
3471 SWAPPER_CTRL_TXD_W_FE |
3472 SWAPPER_CTRL_TXF_R_FE |
3473 SWAPPER_CTRL_RXD_R_FE |
3474 SWAPPER_CTRL_RXD_W_FE |
3475 SWAPPER_CTRL_RXF_W_FE |
3476 SWAPPER_CTRL_XMSI_FE |
3477 SWAPPER_CTRL_STATS_FE | SWAPPER_CTRL_STATS_SE);
3478 if (sp->intr_type == INTA)
3479 val64 |= SWAPPER_CTRL_XMSI_SE;
3480 writeq(val64, &bar0->swapper_ctrl);
3483 * Initially we enable all bits to make it accessible by the
3484 * driver, then we selectively enable only those bits that
3487 val64 |= (SWAPPER_CTRL_TXP_FE |
3488 SWAPPER_CTRL_TXP_SE |
3489 SWAPPER_CTRL_TXD_R_FE |
3490 SWAPPER_CTRL_TXD_R_SE |
3491 SWAPPER_CTRL_TXD_W_FE |
3492 SWAPPER_CTRL_TXD_W_SE |
3493 SWAPPER_CTRL_TXF_R_FE |
3494 SWAPPER_CTRL_RXD_R_FE |
3495 SWAPPER_CTRL_RXD_R_SE |
3496 SWAPPER_CTRL_RXD_W_FE |
3497 SWAPPER_CTRL_RXD_W_SE |
3498 SWAPPER_CTRL_RXF_W_FE |
3499 SWAPPER_CTRL_XMSI_FE |
3500 SWAPPER_CTRL_STATS_FE | SWAPPER_CTRL_STATS_SE);
3501 if (sp->intr_type == INTA)
3502 val64 |= SWAPPER_CTRL_XMSI_SE;
3503 writeq(val64, &bar0->swapper_ctrl);
3505 val64 = readq(&bar0->swapper_ctrl);
3508 * Verifying if endian settings are accurate by reading a
3509 * feedback register.
3511 val64 = readq(&bar0->pif_rd_swapper_fb);
3512 if (val64 != 0x0123456789ABCDEFULL) {
3513 /* Endian settings are incorrect, calls for another dekko. */
3514 DBG_PRINT(ERR_DBG, "%s: Endian settings are wrong, ",
3516 DBG_PRINT(ERR_DBG, "feedback read %llx\n",
3517 (unsigned long long) val64);
3524 static int wait_for_msix_trans(struct s2io_nic *nic, int i)
3526 struct XENA_dev_config __iomem *bar0 = nic->bar0;
3528 int ret = 0, cnt = 0;
3531 val64 = readq(&bar0->xmsi_access);
3532 if (!(val64 & BIT(15)))
3538 DBG_PRINT(ERR_DBG, "XMSI # %d Access failed\n", i);
3545 static void restore_xmsi_data(struct s2io_nic *nic)
3547 struct XENA_dev_config __iomem *bar0 = nic->bar0;
3551 for (i=0; i < MAX_REQUESTED_MSI_X; i++) {
3552 writeq(nic->msix_info[i].addr, &bar0->xmsi_address);
3553 writeq(nic->msix_info[i].data, &bar0->xmsi_data);
3554 val64 = (BIT(7) | BIT(15) | vBIT(i, 26, 6));
3555 writeq(val64, &bar0->xmsi_access);
3556 if (wait_for_msix_trans(nic, i)) {
3557 DBG_PRINT(ERR_DBG, "failed in %s\n", __FUNCTION__);
3563 static void store_xmsi_data(struct s2io_nic *nic)
3565 struct XENA_dev_config __iomem *bar0 = nic->bar0;
3566 u64 val64, addr, data;
3569 /* Store and display */
3570 for (i=0; i < MAX_REQUESTED_MSI_X; i++) {
3571 val64 = (BIT(15) | vBIT(i, 26, 6));
3572 writeq(val64, &bar0->xmsi_access);
3573 if (wait_for_msix_trans(nic, i)) {
3574 DBG_PRINT(ERR_DBG, "failed in %s\n", __FUNCTION__);
3577 addr = readq(&bar0->xmsi_address);
3578 data = readq(&bar0->xmsi_data);
3580 nic->msix_info[i].addr = addr;
3581 nic->msix_info[i].data = data;
3586 int s2io_enable_msi(struct s2io_nic *nic)
3588 struct XENA_dev_config __iomem *bar0 = nic->bar0;
3589 u16 msi_ctrl, msg_val;
3590 struct config_param *config = &nic->config;
3591 struct net_device *dev = nic->dev;
3592 u64 val64, tx_mat, rx_mat;
3595 val64 = readq(&bar0->pic_control);
3597 writeq(val64, &bar0->pic_control);
3599 err = pci_enable_msi(nic->pdev);
3601 DBG_PRINT(ERR_DBG, "%s: enabling MSI failed\n",
3607 * Enable MSI and use MSI-1 in stead of the standard MSI-0
3608 * for interrupt handling.
3610 pci_read_config_word(nic->pdev, 0x4c, &msg_val);
3612 pci_write_config_word(nic->pdev, 0x4c, msg_val);
3613 pci_read_config_word(nic->pdev, 0x4c, &msg_val);
3615 pci_read_config_word(nic->pdev, 0x42, &msi_ctrl);
3617 pci_write_config_word(nic->pdev, 0x42, msi_ctrl);
3619 /* program MSI-1 into all usable Tx_Mat and Rx_Mat fields */
3620 tx_mat = readq(&bar0->tx_mat0_n[0]);
3621 for (i=0; i<config->tx_fifo_num; i++) {
3622 tx_mat |= TX_MAT_SET(i, 1);
3624 writeq(tx_mat, &bar0->tx_mat0_n[0]);
3626 rx_mat = readq(&bar0->rx_mat);
3627 for (i=0; i<config->rx_ring_num; i++) {
3628 rx_mat |= RX_MAT_SET(i, 1);
3630 writeq(rx_mat, &bar0->rx_mat);
3632 dev->irq = nic->pdev->irq;
3636 static int s2io_enable_msi_x(struct s2io_nic *nic)
3638 struct XENA_dev_config __iomem *bar0 = nic->bar0;
3640 u16 msi_control; /* Temp variable */
3641 int ret, i, j, msix_indx = 1;
3643 nic->entries = kmalloc(MAX_REQUESTED_MSI_X * sizeof(struct msix_entry),
3645 if (nic->entries == NULL) {
3646 DBG_PRINT(ERR_DBG, "%s: Memory allocation failed\n", __FUNCTION__);
3649 memset(nic->entries, 0, MAX_REQUESTED_MSI_X * sizeof(struct msix_entry));
3652 kmalloc(MAX_REQUESTED_MSI_X * sizeof(struct s2io_msix_entry),
3654 if (nic->s2io_entries == NULL) {
3655 DBG_PRINT(ERR_DBG, "%s: Memory allocation failed\n", __FUNCTION__);
3656 kfree(nic->entries);
3659 memset(nic->s2io_entries, 0,
3660 MAX_REQUESTED_MSI_X * sizeof(struct s2io_msix_entry));
3662 for (i=0; i< MAX_REQUESTED_MSI_X; i++) {
3663 nic->entries[i].entry = i;
3664 nic->s2io_entries[i].entry = i;
3665 nic->s2io_entries[i].arg = NULL;
3666 nic->s2io_entries[i].in_use = 0;
3669 tx_mat = readq(&bar0->tx_mat0_n[0]);
3670 for (i=0; i<nic->config.tx_fifo_num; i++, msix_indx++) {
3671 tx_mat |= TX_MAT_SET(i, msix_indx);
3672 nic->s2io_entries[msix_indx].arg = &nic->mac_control.fifos[i];
3673 nic->s2io_entries[msix_indx].type = MSIX_FIFO_TYPE;
3674 nic->s2io_entries[msix_indx].in_use = MSIX_FLG;
3676 writeq(tx_mat, &bar0->tx_mat0_n[0]);
3678 if (!nic->config.bimodal) {
3679 rx_mat = readq(&bar0->rx_mat);
3680 for (j=0; j<nic->config.rx_ring_num; j++, msix_indx++) {
3681 rx_mat |= RX_MAT_SET(j, msix_indx);
3682 nic->s2io_entries[msix_indx].arg = &nic->mac_control.rings[j];
3683 nic->s2io_entries[msix_indx].type = MSIX_RING_TYPE;
3684 nic->s2io_entries[msix_indx].in_use = MSIX_FLG;
3686 writeq(rx_mat, &bar0->rx_mat);
3688 tx_mat = readq(&bar0->tx_mat0_n[7]);
3689 for (j=0; j<nic->config.rx_ring_num; j++, msix_indx++) {
3690 tx_mat |= TX_MAT_SET(i, msix_indx);
3691 nic->s2io_entries[msix_indx].arg = &nic->mac_control.rings[j];
3692 nic->s2io_entries[msix_indx].type = MSIX_RING_TYPE;
3693 nic->s2io_entries[msix_indx].in_use = MSIX_FLG;
3695 writeq(tx_mat, &bar0->tx_mat0_n[7]);
3698 nic->avail_msix_vectors = 0;
3699 ret = pci_enable_msix(nic->pdev, nic->entries, MAX_REQUESTED_MSI_X);
3700 /* We fail init if error or we get less vectors than min required */
3701 if (ret >= (nic->config.tx_fifo_num + nic->config.rx_ring_num + 1)) {
3702 nic->avail_msix_vectors = ret;
3703 ret = pci_enable_msix(nic->pdev, nic->entries, ret);
3706 DBG_PRINT(ERR_DBG, "%s: Enabling MSIX failed\n", nic->dev->name);
3707 kfree(nic->entries);
3708 kfree(nic->s2io_entries);
3709 nic->entries = NULL;
3710 nic->s2io_entries = NULL;
3711 nic->avail_msix_vectors = 0;
3714 if (!nic->avail_msix_vectors)
3715 nic->avail_msix_vectors = MAX_REQUESTED_MSI_X;
3718 * To enable MSI-X, MSI also needs to be enabled, due to a bug
3719 * in the herc NIC. (Temp change, needs to be removed later)
3721 pci_read_config_word(nic->pdev, 0x42, &msi_control);
3722 msi_control |= 0x1; /* Enable MSI */
3723 pci_write_config_word(nic->pdev, 0x42, msi_control);
3728 /* ********************************************************* *
3729 * Functions defined below concern the OS part of the driver *
3730 * ********************************************************* */
3733 * s2io_open - open entry point of the driver
3734 * @dev : pointer to the device structure.
3736 * This function is the open entry point of the driver. It mainly calls a
3737 * function to allocate Rx buffers and inserts them into the buffer
3738 * descriptors and then enables the Rx part of the NIC.
3740 * 0 on success and an appropriate (-)ve integer as defined in errno.h
3744 static int s2io_open(struct net_device *dev)
3746 struct s2io_nic *sp = dev->priv;
3750 * Make sure you have link off by default every time
3751 * Nic is initialized
3753 netif_carrier_off(dev);
3754 sp->last_link_state = 0;
3756 /* Initialize H/W and enable interrupts */
3757 err = s2io_card_up(sp);
3759 DBG_PRINT(ERR_DBG, "%s: H/W initialization failed\n",
3761 goto hw_init_failed;
3764 if (s2io_set_mac_addr(dev, dev->dev_addr) == FAILURE) {
3765 DBG_PRINT(ERR_DBG, "Set Mac Address Failed\n");
3768 goto hw_init_failed;
3771 netif_start_queue(dev);
3775 if (sp->intr_type == MSI_X) {
3778 if (sp->s2io_entries)
3779 kfree(sp->s2io_entries);
3785 * s2io_close -close entry point of the driver
3786 * @dev : device pointer.
3788 * This is the stop entry point of the driver. It needs to undo exactly
3789 * whatever was done by the open entry point,thus it's usually referred to
3790 * as the close function.Among other things this function mainly stops the
3791 * Rx side of the NIC and frees all the Rx buffers in the Rx rings.
3793 * 0 on success and an appropriate (-)ve integer as defined in errno.h
3797 static int s2io_close(struct net_device *dev)
3799 struct s2io_nic *sp = dev->priv;
3801 netif_stop_queue(dev);
3802 /* Reset card, kill tasklet and free Tx and Rx buffers. */
3805 sp->device_close_flag = TRUE; /* Device is shut down. */
3810 * s2io_xmit - Tx entry point of te driver
3811 * @skb : the socket buffer containing the Tx data.
3812 * @dev : device pointer.
3814 * This function is the Tx entry point of the driver. S2IO NIC supports
3815 * certain protocol assist features on Tx side, namely CSO, S/G, LSO.
3816 * NOTE: when device cant queue the pkt,just the trans_start variable will
3819 * 0 on success & 1 on failure.
3822 static int s2io_xmit(struct sk_buff *skb, struct net_device *dev)
3824 struct s2io_nic *sp = dev->priv;
3825 u16 frg_cnt, frg_len, i, queue, queue_len, put_off, get_off;
3828 struct TxFIFO_element __iomem *tx_fifo;
3829 unsigned long flags;
3831 int vlan_priority = 0;
3832 struct mac_info *mac_control;
3833 struct config_param *config;
3836 mac_control = &sp->mac_control;
3837 config = &sp->config;
3839 DBG_PRINT(TX_DBG, "%s: In Neterion Tx routine\n", dev->name);
3840 spin_lock_irqsave(&sp->tx_lock, flags);
3841 if (atomic_read(&sp->card_state) == CARD_DOWN) {
3842 DBG_PRINT(TX_DBG, "%s: Card going down for reset\n",
3844 spin_unlock_irqrestore(&sp->tx_lock, flags);
3851 /* Get Fifo number to Transmit based on vlan priority */
3852 if (sp->vlgrp && vlan_tx_tag_present(skb)) {
3853 vlan_tag = vlan_tx_tag_get(skb);
3854 vlan_priority = vlan_tag >> 13;
3855 queue = config->fifo_mapping[vlan_priority];
3858 put_off = (u16) mac_control->fifos[queue].tx_curr_put_info.offset;
3859 get_off = (u16) mac_control->fifos[queue].tx_curr_get_info.offset;
3860 txdp = (struct TxD *) mac_control->fifos[queue].list_info[put_off].
3863 queue_len = mac_control->fifos[queue].tx_curr_put_info.fifo_len + 1;
3864 /* Avoid "put" pointer going beyond "get" pointer */
3865 if (txdp->Host_Control ||
3866 ((put_off+1) == queue_len ? 0 : (put_off+1)) == get_off) {
3867 DBG_PRINT(TX_DBG, "Error in xmit, No free TXDs.\n");
3868 netif_stop_queue(dev);
3870 spin_unlock_irqrestore(&sp->tx_lock, flags);
3874 /* A buffer with no data will be dropped */
3876 DBG_PRINT(TX_DBG, "%s:Buffer has no data..\n", dev->name);
3878 spin_unlock_irqrestore(&sp->tx_lock, flags);
3882 offload_type = s2io_offload_type(skb);
3883 if (offload_type & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6)) {
3884 txdp->Control_1 |= TXD_TCP_LSO_EN;
3885 txdp->Control_1 |= TXD_TCP_LSO_MSS(s2io_tcp_mss(skb));
3887 if (skb->ip_summed == CHECKSUM_PARTIAL) {
3889 (TXD_TX_CKO_IPV4_EN | TXD_TX_CKO_TCP_EN |
3892 txdp->Control_1 |= TXD_GATHER_CODE_FIRST;
3893 txdp->Control_1 |= TXD_LIST_OWN_XENA;
3894 txdp->Control_2 |= config->tx_intr_type;
3896 if (sp->vlgrp && vlan_tx_tag_present(skb)) {
3897 txdp->Control_2 |= TXD_VLAN_ENABLE;
3898 txdp->Control_2 |= TXD_VLAN_TAG(vlan_tag);
3901 frg_len = skb->len - skb->data_len;
3902 if (offload_type == SKB_GSO_UDP) {
3905 ufo_size = s2io_udp_mss(skb);
3907 txdp->Control_1 |= TXD_UFO_EN;
3908 txdp->Control_1 |= TXD_UFO_MSS(ufo_size);
3909 txdp->Control_1 |= TXD_BUFFER0_SIZE(8);
3911 sp->ufo_in_band_v[put_off] =
3912 (u64)skb_shinfo(skb)->ip6_frag_id;
3914 sp->ufo_in_band_v[put_off] =
3915 (u64)skb_shinfo(skb)->ip6_frag_id << 32;
3917 txdp->Host_Control = (unsigned long)sp->ufo_in_band_v;
3918 txdp->Buffer_Pointer = pci_map_single(sp->pdev,
3920 sizeof(u64), PCI_DMA_TODEVICE);
3924 txdp->Buffer_Pointer = pci_map_single
3925 (sp->pdev, skb->data, frg_len, PCI_DMA_TODEVICE);
3926 txdp->Host_Control = (unsigned long) skb;
3927 txdp->Control_1 |= TXD_BUFFER0_SIZE(frg_len);
3928 if (offload_type == SKB_GSO_UDP)
3929 txdp->Control_1 |= TXD_UFO_EN;
3931 frg_cnt = skb_shinfo(skb)->nr_frags;
3932 /* For fragmented SKB. */
3933 for (i = 0; i < frg_cnt; i++) {
3934 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
3935 /* A '0' length fragment will be ignored */
3939 txdp->Buffer_Pointer = (u64) pci_map_page
3940 (sp->pdev, frag->page, frag->page_offset,
3941 frag->size, PCI_DMA_TODEVICE);
3942 txdp->Control_1 = TXD_BUFFER0_SIZE(frag->size);
3943 if (offload_type == SKB_GSO_UDP)
3944 txdp->Control_1 |= TXD_UFO_EN;
3946 txdp->Control_1 |= TXD_GATHER_CODE_LAST;
3948 if (offload_type == SKB_GSO_UDP)
3949 frg_cnt++; /* as Txd0 was used for inband header */
3951 tx_fifo = mac_control->tx_FIFO_start[queue];
3952 val64 = mac_control->fifos[queue].list_info[put_off].list_phy_addr;
3953 writeq(val64, &tx_fifo->TxDL_Pointer);
3955 val64 = (TX_FIFO_LAST_TXD_NUM(frg_cnt) | TX_FIFO_FIRST_LIST |
3958 val64 |= TX_FIFO_SPECIAL_FUNC;
3960 writeq(val64, &tx_fifo->List_Control);
3965 if (put_off == mac_control->fifos[queue].tx_curr_put_info.fifo_len + 1)
3967 mac_control->fifos[queue].tx_curr_put_info.offset = put_off;
3969 /* Avoid "put" pointer going beyond "get" pointer */
3970 if (((put_off+1) == queue_len ? 0 : (put_off+1)) == get_off) {
3971 sp->mac_control.stats_info->sw_stat.fifo_full_cnt++;
3973 "No free TxDs for xmit, Put: 0x%x Get:0x%x\n",
3975 netif_stop_queue(dev);
3978 dev->trans_start = jiffies;
3979 spin_unlock_irqrestore(&sp->tx_lock, flags);
3985 s2io_alarm_handle(unsigned long data)
3987 struct s2io_nic *sp = (struct s2io_nic *)data;
3989 alarm_intr_handler(sp);
3990 mod_timer(&sp->alarm_timer, jiffies + HZ / 2);
3993 static int s2io_chk_rx_buffers(struct s2io_nic *sp, int rng_n)
3995 int rxb_size, level;
3998 rxb_size = atomic_read(&sp->rx_bufs_left[rng_n]);
3999 level = rx_buffer_level(sp, rxb_size, rng_n);
4001 if ((level == PANIC) && (!TASKLET_IN_USE)) {
4003 DBG_PRINT(INTR_DBG, "%s: Rx BD hit ", __FUNCTION__);
4004 DBG_PRINT(INTR_DBG, "PANIC levels\n");
4005 if ((ret = fill_rx_buffers(sp, rng_n)) == -ENOMEM) {
4006 DBG_PRINT(ERR_DBG, "Out of memory in %s",
4008 clear_bit(0, (&sp->tasklet_status));
4011 clear_bit(0, (&sp->tasklet_status));
4012 } else if (level == LOW)
4013 tasklet_schedule(&sp->task);
4015 } else if (fill_rx_buffers(sp, rng_n) == -ENOMEM) {
4016 DBG_PRINT(ERR_DBG, "%s:Out of memory", sp->dev->name);
4017 DBG_PRINT(ERR_DBG, " in Rx Intr!!\n");
4022 static irqreturn_t s2io_msi_handle(int irq, void *dev_id)
4024 struct net_device *dev = (struct net_device *) dev_id;
4025 struct s2io_nic *sp = dev->priv;
4027 struct mac_info *mac_control;
4028 struct config_param *config;
4030 atomic_inc(&sp->isr_cnt);
4031 mac_control = &sp->mac_control;
4032 config = &sp->config;
4033 DBG_PRINT(INTR_DBG, "%s: MSI handler\n", __FUNCTION__);
4035 /* If Intr is because of Rx Traffic */
4036 for (i = 0; i < config->rx_ring_num; i++)
4037 rx_intr_handler(&mac_control->rings[i]);
4039 /* If Intr is because of Tx Traffic */
4040 for (i = 0; i < config->tx_fifo_num; i++)
4041 tx_intr_handler(&mac_control->fifos[i]);
4044 * If the Rx buffer count is below the panic threshold then
4045 * reallocate the buffers from the interrupt handler itself,
4046 * else schedule a tasklet to reallocate the buffers.
4048 for (i = 0; i < config->rx_ring_num; i++)
4049 s2io_chk_rx_buffers(sp, i);
4051 atomic_dec(&sp->isr_cnt);
4055 static irqreturn_t s2io_msix_ring_handle(int irq, void *dev_id)
4057 struct ring_info *ring = (struct ring_info *)dev_id;
4058 struct s2io_nic *sp = ring->nic;
4060 atomic_inc(&sp->isr_cnt);
4062 rx_intr_handler(ring);
4063 s2io_chk_rx_buffers(sp, ring->ring_no);
4065 atomic_dec(&sp->isr_cnt);
4069 static irqreturn_t s2io_msix_fifo_handle(int irq, void *dev_id)
4071 struct fifo_info *fifo = (struct fifo_info *)dev_id;
4072 struct s2io_nic *sp = fifo->nic;
4074 atomic_inc(&sp->isr_cnt);
4075 tx_intr_handler(fifo);
4076 atomic_dec(&sp->isr_cnt);
4079 static void s2io_txpic_intr_handle(struct s2io_nic *sp)
4081 struct XENA_dev_config __iomem *bar0 = sp->bar0;
4084 val64 = readq(&bar0->pic_int_status);
4085 if (val64 & PIC_INT_GPIO) {
4086 val64 = readq(&bar0->gpio_int_reg);
4087 if ((val64 & GPIO_INT_REG_LINK_DOWN) &&
4088 (val64 & GPIO_INT_REG_LINK_UP)) {
4090 * This is unstable state so clear both up/down
4091 * interrupt and adapter to re-evaluate the link state.
4093 val64 |= GPIO_INT_REG_LINK_DOWN;
4094 val64 |= GPIO_INT_REG_LINK_UP;
4095 writeq(val64, &bar0->gpio_int_reg);
4096 val64 = readq(&bar0->gpio_int_mask);
4097 val64 &= ~(GPIO_INT_MASK_LINK_UP |
4098 GPIO_INT_MASK_LINK_DOWN);
4099 writeq(val64, &bar0->gpio_int_mask);
4101 else if (val64 & GPIO_INT_REG_LINK_UP) {
4102 val64 = readq(&bar0->adapter_status);
4103 /* Enable Adapter */
4104 val64 = readq(&bar0->adapter_control);
4105 val64 |= ADAPTER_CNTL_EN;
4106 writeq(val64, &bar0->adapter_control);
4107 val64 |= ADAPTER_LED_ON;
4108 writeq(val64, &bar0->adapter_control);
4109 if (!sp->device_enabled_once)
4110 sp->device_enabled_once = 1;
4112 s2io_link(sp, LINK_UP);
4114 * unmask link down interrupt and mask link-up
4117 val64 = readq(&bar0->gpio_int_mask);
4118 val64 &= ~GPIO_INT_MASK_LINK_DOWN;
4119 val64 |= GPIO_INT_MASK_LINK_UP;
4120 writeq(val64, &bar0->gpio_int_mask);
4122 }else if (val64 & GPIO_INT_REG_LINK_DOWN) {
4123 val64 = readq(&bar0->adapter_status);
4124 s2io_link(sp, LINK_DOWN);
4125 /* Link is down so unmaks link up interrupt */
4126 val64 = readq(&bar0->gpio_int_mask);
4127 val64 &= ~GPIO_INT_MASK_LINK_UP;
4128 val64 |= GPIO_INT_MASK_LINK_DOWN;
4129 writeq(val64, &bar0->gpio_int_mask);
4132 val64 = readq(&bar0->adapter_control);
4133 val64 = val64 &(~ADAPTER_LED_ON);
4134 writeq(val64, &bar0->adapter_control);
4137 val64 = readq(&bar0->gpio_int_mask);
4141 * s2io_isr - ISR handler of the device .
4142 * @irq: the irq of the device.
4143 * @dev_id: a void pointer to the dev structure of the NIC.
4144 * Description: This function is the ISR handler of the device. It
4145 * identifies the reason for the interrupt and calls the relevant
4146 * service routines. As a contongency measure, this ISR allocates the
4147 * recv buffers, if their numbers are below the panic value which is
4148 * presently set to 25% of the original number of rcv buffers allocated.
4150 * IRQ_HANDLED: will be returned if IRQ was handled by this routine
4151 * IRQ_NONE: will be returned if interrupt is not from our device
4153 static irqreturn_t s2io_isr(int irq, void *dev_id)
4155 struct net_device *dev = (struct net_device *) dev_id;
4156 struct s2io_nic *sp = dev->priv;
4157 struct XENA_dev_config __iomem *bar0 = sp->bar0;
4160 struct mac_info *mac_control;
4161 struct config_param *config;
4163 atomic_inc(&sp->isr_cnt);
4164 mac_control = &sp->mac_control;
4165 config = &sp->config;
4168 * Identify the cause for interrupt and call the appropriate
4169 * interrupt handler. Causes for the interrupt could be;
4173 * 4. Error in any functional blocks of the NIC.
4175 reason = readq(&bar0->general_int_status);
4178 /* The interrupt was not raised by us. */
4179 atomic_dec(&sp->isr_cnt);
4182 else if (unlikely(reason == S2IO_MINUS_ONE) ) {
4183 /* Disable device and get out */
4184 atomic_dec(&sp->isr_cnt);
4189 if (reason & GEN_INTR_RXTRAFFIC) {
4190 if ( likely ( netif_rx_schedule_prep(dev)) ) {
4191 __netif_rx_schedule(dev);
4192 writeq(S2IO_MINUS_ONE, &bar0->rx_traffic_mask);
4195 writeq(S2IO_MINUS_ONE, &bar0->rx_traffic_int);
4199 * Rx handler is called by default, without checking for the
4200 * cause of interrupt.
4201 * rx_traffic_int reg is an R1 register, writing all 1's
4202 * will ensure that the actual interrupt causing bit get's
4203 * cleared and hence a read can be avoided.
4205 if (reason & GEN_INTR_RXTRAFFIC)
4206 writeq(S2IO_MINUS_ONE, &bar0->rx_traffic_int);
4208 for (i = 0; i < config->rx_ring_num; i++) {
4209 rx_intr_handler(&mac_control->rings[i]);
4214 * tx_traffic_int reg is an R1 register, writing all 1's
4215 * will ensure that the actual interrupt causing bit get's
4216 * cleared and hence a read can be avoided.
4218 if (reason & GEN_INTR_TXTRAFFIC)
4219 writeq(S2IO_MINUS_ONE, &bar0->tx_traffic_int);
4221 for (i = 0; i < config->tx_fifo_num; i++)
4222 tx_intr_handler(&mac_control->fifos[i]);
4224 if (reason & GEN_INTR_TXPIC)
4225 s2io_txpic_intr_handle(sp);
4227 * If the Rx buffer count is below the panic threshold then
4228 * reallocate the buffers from the interrupt handler itself,
4229 * else schedule a tasklet to reallocate the buffers.
4232 for (i = 0; i < config->rx_ring_num; i++)
4233 s2io_chk_rx_buffers(sp, i);
4236 writeq(0, &bar0->general_int_mask);
4237 readl(&bar0->general_int_status);
4239 atomic_dec(&sp->isr_cnt);
4246 static void s2io_updt_stats(struct s2io_nic *sp)
4248 struct XENA_dev_config __iomem *bar0 = sp->bar0;
4252 if (atomic_read(&sp->card_state) == CARD_UP) {
4253 /* Apprx 30us on a 133 MHz bus */
4254 val64 = SET_UPDT_CLICKS(10) |
4255 STAT_CFG_ONE_SHOT_EN | STAT_CFG_STAT_EN;
4256 writeq(val64, &bar0->stat_cfg);
4259 val64 = readq(&bar0->stat_cfg);
4260 if (!(val64 & BIT(0)))
4264 break; /* Updt failed */
4267 memset(sp->mac_control.stats_info, 0, sizeof(struct stat_block));
4272 * s2io_get_stats - Updates the device statistics structure.
4273 * @dev : pointer to the device structure.
4275 * This function updates the device statistics structure in the s2io_nic
4276 * structure and returns a pointer to the same.
4278 * pointer to the updated net_device_stats structure.
4281 static struct net_device_stats *s2io_get_stats(struct net_device *dev)
4283 struct s2io_nic *sp = dev->priv;
4284 struct mac_info *mac_control;
4285 struct config_param *config;
4288 mac_control = &sp->mac_control;
4289 config = &sp->config;
4291 /* Configure Stats for immediate updt */
4292 s2io_updt_stats(sp);
4294 sp->stats.tx_packets =
4295 le32_to_cpu(mac_control->stats_info->tmac_frms);
4296 sp->stats.tx_errors =
4297 le32_to_cpu(mac_control->stats_info->tmac_any_err_frms);
4298 sp->stats.rx_errors =
4299 le64_to_cpu(mac_control->stats_info->rmac_drop_frms);
4300 sp->stats.multicast =
4301 le32_to_cpu(mac_control->stats_info->rmac_vld_mcst_frms);
4302 sp->stats.rx_length_errors =
4303 le64_to_cpu(mac_control->stats_info->rmac_long_frms);
4305 return (&sp->stats);
4309 * s2io_set_multicast - entry point for multicast address enable/disable.
4310 * @dev : pointer to the device structure
4312 * This function is a driver entry point which gets called by the kernel
4313 * whenever multicast addresses must be enabled/disabled. This also gets
4314 * called to set/reset promiscuous mode. Depending on the deivce flag, we
4315 * determine, if multicast address must be enabled or if promiscuous mode
4316 * is to be disabled etc.
4321 static void s2io_set_multicast(struct net_device *dev)
4324 struct dev_mc_list *mclist;
4325 struct s2io_nic *sp = dev->priv;
4326 struct XENA_dev_config __iomem *bar0 = sp->bar0;
4327 u64 val64 = 0, multi_mac = 0x010203040506ULL, mask =
4329 u64 dis_addr = 0xffffffffffffULL, mac_addr = 0;
4332 if ((dev->flags & IFF_ALLMULTI) && (!sp->m_cast_flg)) {
4333 /* Enable all Multicast addresses */
4334 writeq(RMAC_ADDR_DATA0_MEM_ADDR(multi_mac),
4335 &bar0->rmac_addr_data0_mem);
4336 writeq(RMAC_ADDR_DATA1_MEM_MASK(mask),
4337 &bar0->rmac_addr_data1_mem);
4338 val64 = RMAC_ADDR_CMD_MEM_WE |
4339 RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD |
4340 RMAC_ADDR_CMD_MEM_OFFSET(MAC_MC_ALL_MC_ADDR_OFFSET);
4341 writeq(val64, &bar0->rmac_addr_cmd_mem);
4342 /* Wait till command completes */
4343 wait_for_cmd_complete(&bar0->rmac_addr_cmd_mem,
4344 RMAC_ADDR_CMD_MEM_STROBE_CMD_EXECUTING,
4348 sp->all_multi_pos = MAC_MC_ALL_MC_ADDR_OFFSET;
4349 } else if ((dev->flags & IFF_ALLMULTI) && (sp->m_cast_flg)) {
4350 /* Disable all Multicast addresses */
4351 writeq(RMAC_ADDR_DATA0_MEM_ADDR(dis_addr),
4352 &bar0->rmac_addr_data0_mem);
4353 writeq(RMAC_ADDR_DATA1_MEM_MASK(0x0),
4354 &bar0->rmac_addr_data1_mem);
4355 val64 = RMAC_ADDR_CMD_MEM_WE |
4356 RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD |
4357 RMAC_ADDR_CMD_MEM_OFFSET(sp->all_multi_pos);
4358 writeq(val64, &bar0->rmac_addr_cmd_mem);
4359 /* Wait till command completes */
4360 wait_for_cmd_complete(&bar0->rmac_addr_cmd_mem,
4361 RMAC_ADDR_CMD_MEM_STROBE_CMD_EXECUTING,
4365 sp->all_multi_pos = 0;
4368 if ((dev->flags & IFF_PROMISC) && (!sp->promisc_flg)) {
4369 /* Put the NIC into promiscuous mode */
4370 add = &bar0->mac_cfg;
4371 val64 = readq(&bar0->mac_cfg);
4372 val64 |= MAC_CFG_RMAC_PROM_ENABLE;
4374 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
4375 writel((u32) val64, add);
4376 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
4377 writel((u32) (val64 >> 32), (add + 4));
4379 if (vlan_tag_strip != 1) {
4380 val64 = readq(&bar0->rx_pa_cfg);
4381 val64 &= ~RX_PA_CFG_STRIP_VLAN_TAG;
4382 writeq(val64, &bar0->rx_pa_cfg);
4383 vlan_strip_flag = 0;
4386 val64 = readq(&bar0->mac_cfg);
4387 sp->promisc_flg = 1;
4388 DBG_PRINT(INFO_DBG, "%s: entered promiscuous mode\n",
4390 } else if (!(dev->flags & IFF_PROMISC) && (sp->promisc_flg)) {
4391 /* Remove the NIC from promiscuous mode */
4392 add = &bar0->mac_cfg;
4393 val64 = readq(&bar0->mac_cfg);
4394 val64 &= ~MAC_CFG_RMAC_PROM_ENABLE;
4396 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
4397 writel((u32) val64, add);
4398 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
4399 writel((u32) (val64 >> 32), (add + 4));
4401 if (vlan_tag_strip != 0) {
4402 val64 = readq(&bar0->rx_pa_cfg);
4403 val64 |= RX_PA_CFG_STRIP_VLAN_TAG;
4404 writeq(val64, &bar0->rx_pa_cfg);
4405 vlan_strip_flag = 1;
4408 val64 = readq(&bar0->mac_cfg);
4409 sp->promisc_flg = 0;
4410 DBG_PRINT(INFO_DBG, "%s: left promiscuous mode\n",
4414 /* Update individual M_CAST address list */
4415 if ((!sp->m_cast_flg) && dev->mc_count) {
4417 (MAX_ADDRS_SUPPORTED - MAC_MC_ADDR_START_OFFSET - 1)) {
4418 DBG_PRINT(ERR_DBG, "%s: No more Rx filters ",
4420 DBG_PRINT(ERR_DBG, "can be added, please enable ");
4421 DBG_PRINT(ERR_DBG, "ALL_MULTI instead\n");
4425 prev_cnt = sp->mc_addr_count;
4426 sp->mc_addr_count = dev->mc_count;
4428 /* Clear out the previous list of Mc in the H/W. */
4429 for (i = 0; i < prev_cnt; i++) {
4430 writeq(RMAC_ADDR_DATA0_MEM_ADDR(dis_addr),
4431 &bar0->rmac_addr_data0_mem);
4432 writeq(RMAC_ADDR_DATA1_MEM_MASK(0ULL),
4433 &bar0->rmac_addr_data1_mem);
4434 val64 = RMAC_ADDR_CMD_MEM_WE |
4435 RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD |
4436 RMAC_ADDR_CMD_MEM_OFFSET
4437 (MAC_MC_ADDR_START_OFFSET + i);
4438 writeq(val64, &bar0->rmac_addr_cmd_mem);
4440 /* Wait for command completes */
4441 if (wait_for_cmd_complete(&bar0->rmac_addr_cmd_mem,
4442 RMAC_ADDR_CMD_MEM_STROBE_CMD_EXECUTING,
4444 DBG_PRINT(ERR_DBG, "%s: Adding ",
4446 DBG_PRINT(ERR_DBG, "Multicasts failed\n");
4451 /* Create the new Rx filter list and update the same in H/W. */
4452 for (i = 0, mclist = dev->mc_list; i < dev->mc_count;
4453 i++, mclist = mclist->next) {
4454 memcpy(sp->usr_addrs[i].addr, mclist->dmi_addr,
4457 for (j = 0; j < ETH_ALEN; j++) {
4458 mac_addr |= mclist->dmi_addr[j];
4462 writeq(RMAC_ADDR_DATA0_MEM_ADDR(mac_addr),
4463 &bar0->rmac_addr_data0_mem);
4464 writeq(RMAC_ADDR_DATA1_MEM_MASK(0ULL),
4465 &bar0->rmac_addr_data1_mem);
4466 val64 = RMAC_ADDR_CMD_MEM_WE |
4467 RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD |
4468 RMAC_ADDR_CMD_MEM_OFFSET
4469 (i + MAC_MC_ADDR_START_OFFSET);
4470 writeq(val64, &bar0->rmac_addr_cmd_mem);
4472 /* Wait for command completes */
4473 if (wait_for_cmd_complete(&bar0->rmac_addr_cmd_mem,
4474 RMAC_ADDR_CMD_MEM_STROBE_CMD_EXECUTING,
4476 DBG_PRINT(ERR_DBG, "%s: Adding ",
4478 DBG_PRINT(ERR_DBG, "Multicasts failed\n");
4486 * s2io_set_mac_addr - Programs the Xframe mac address
4487 * @dev : pointer to the device structure.
4488 * @addr: a uchar pointer to the new mac address which is to be set.
4489 * Description : This procedure will program the Xframe to receive
4490 * frames with new Mac Address
4491 * Return value: SUCCESS on success and an appropriate (-)ve integer
4492 * as defined in errno.h file on failure.
4495 static int s2io_set_mac_addr(struct net_device *dev, u8 * addr)
4497 struct s2io_nic *sp = dev->priv;
4498 struct XENA_dev_config __iomem *bar0 = sp->bar0;
4499 register u64 val64, mac_addr = 0;
4503 * Set the new MAC address as the new unicast filter and reflect this
4504 * change on the device address registered with the OS. It will be
4507 for (i = 0; i < ETH_ALEN; i++) {
4509 mac_addr |= addr[i];
4512 writeq(RMAC_ADDR_DATA0_MEM_ADDR(mac_addr),
4513 &bar0->rmac_addr_data0_mem);
4516 RMAC_ADDR_CMD_MEM_WE | RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD |
4517 RMAC_ADDR_CMD_MEM_OFFSET(0);
4518 writeq(val64, &bar0->rmac_addr_cmd_mem);
4519 /* Wait till command completes */
4520 if (wait_for_cmd_complete(&bar0->rmac_addr_cmd_mem,
4521 RMAC_ADDR_CMD_MEM_STROBE_CMD_EXECUTING, S2IO_BIT_RESET)) {
4522 DBG_PRINT(ERR_DBG, "%s: set_mac_addr failed\n", dev->name);
4530 * s2io_ethtool_sset - Sets different link parameters.
4531 * @sp : private member of the device structure, which is a pointer to the * s2io_nic structure.
4532 * @info: pointer to the structure with parameters given by ethtool to set
4535 * The function sets different link parameters provided by the user onto
4541 static int s2io_ethtool_sset(struct net_device *dev,
4542 struct ethtool_cmd *info)
4544 struct s2io_nic *sp = dev->priv;
4545 if ((info->autoneg == AUTONEG_ENABLE) ||
4546 (info->speed != SPEED_10000) || (info->duplex != DUPLEX_FULL))
4549 s2io_close(sp->dev);
4557 * s2io_ethtol_gset - Return link specific information.
4558 * @sp : private member of the device structure, pointer to the
4559 * s2io_nic structure.
4560 * @info : pointer to the structure with parameters given by ethtool
4561 * to return link information.
4563 * Returns link specific information like speed, duplex etc.. to ethtool.
4565 * return 0 on success.
4568 static int s2io_ethtool_gset(struct net_device *dev, struct ethtool_cmd *info)
4570 struct s2io_nic *sp = dev->priv;
4571 info->supported = (SUPPORTED_10000baseT_Full | SUPPORTED_FIBRE);
4572 info->advertising = (SUPPORTED_10000baseT_Full | SUPPORTED_FIBRE);
4573 info->port = PORT_FIBRE;
4574 /* info->transceiver?? TODO */
4576 if (netif_carrier_ok(sp->dev)) {
4577 info->speed = 10000;
4578 info->duplex = DUPLEX_FULL;
4584 info->autoneg = AUTONEG_DISABLE;
4589 * s2io_ethtool_gdrvinfo - Returns driver specific information.
4590 * @sp : private member of the device structure, which is a pointer to the
4591 * s2io_nic structure.
4592 * @info : pointer to the structure with parameters given by ethtool to
4593 * return driver information.
4595 * Returns driver specefic information like name, version etc.. to ethtool.
4600 static void s2io_ethtool_gdrvinfo(struct net_device *dev,
4601 struct ethtool_drvinfo *info)
4603 struct s2io_nic *sp = dev->priv;
4605 strncpy(info->driver, s2io_driver_name, sizeof(info->driver));
4606 strncpy(info->version, s2io_driver_version, sizeof(info->version));
4607 strncpy(info->fw_version, "", sizeof(info->fw_version));
4608 strncpy(info->bus_info, pci_name(sp->pdev), sizeof(info->bus_info));
4609 info->regdump_len = XENA_REG_SPACE;
4610 info->eedump_len = XENA_EEPROM_SPACE;
4611 info->testinfo_len = S2IO_TEST_LEN;
4612 info->n_stats = S2IO_STAT_LEN;
4616 * s2io_ethtool_gregs - dumps the entire space of Xfame into the buffer.
4617 * @sp: private member of the device structure, which is a pointer to the
4618 * s2io_nic structure.
4619 * @regs : pointer to the structure with parameters given by ethtool for
4620 * dumping the registers.
4621 * @reg_space: The input argumnet into which all the registers are dumped.
4623 * Dumps the entire register space of xFrame NIC into the user given
4629 static void s2io_ethtool_gregs(struct net_device *dev,
4630 struct ethtool_regs *regs, void *space)
4634 u8 *reg_space = (u8 *) space;
4635 struct s2io_nic *sp = dev->priv;
4637 regs->len = XENA_REG_SPACE;
4638 regs->version = sp->pdev->subsystem_device;
4640 for (i = 0; i < regs->len; i += 8) {
4641 reg = readq(sp->bar0 + i);
4642 memcpy((reg_space + i), ®, 8);
4647 * s2io_phy_id - timer function that alternates adapter LED.
4648 * @data : address of the private member of the device structure, which
4649 * is a pointer to the s2io_nic structure, provided as an u32.
4650 * Description: This is actually the timer function that alternates the
4651 * adapter LED bit of the adapter control bit to set/reset every time on
4652 * invocation. The timer is set for 1/2 a second, hence tha NIC blinks
4653 * once every second.
4655 static void s2io_phy_id(unsigned long data)
4657 struct s2io_nic *sp = (struct s2io_nic *) data;
4658 struct XENA_dev_config __iomem *bar0 = sp->bar0;
4662 subid = sp->pdev->subsystem_device;
4663 if ((sp->device_type == XFRAME_II_DEVICE) ||
4664 ((subid & 0xFF) >= 0x07)) {
4665 val64 = readq(&bar0->gpio_control);
4666 val64 ^= GPIO_CTRL_GPIO_0;
4667 writeq(val64, &bar0->gpio_control);
4669 val64 = readq(&bar0->adapter_control);
4670 val64 ^= ADAPTER_LED_ON;
4671 writeq(val64, &bar0->adapter_control);
4674 mod_timer(&sp->id_timer, jiffies + HZ / 2);
4678 * s2io_ethtool_idnic - To physically identify the nic on the system.
4679 * @sp : private member of the device structure, which is a pointer to the
4680 * s2io_nic structure.
4681 * @id : pointer to the structure with identification parameters given by
4683 * Description: Used to physically identify the NIC on the system.
4684 * The Link LED will blink for a time specified by the user for
4686 * NOTE: The Link has to be Up to be able to blink the LED. Hence
4687 * identification is possible only if it's link is up.
4689 * int , returns 0 on success
4692 static int s2io_ethtool_idnic(struct net_device *dev, u32 data)
4694 u64 val64 = 0, last_gpio_ctrl_val;
4695 struct s2io_nic *sp = dev->priv;
4696 struct XENA_dev_config __iomem *bar0 = sp->bar0;
4699 subid = sp->pdev->subsystem_device;
4700 last_gpio_ctrl_val = readq(&bar0->gpio_control);
4701 if ((sp->device_type == XFRAME_I_DEVICE) &&
4702 ((subid & 0xFF) < 0x07)) {
4703 val64 = readq(&bar0->adapter_control);
4704 if (!(val64 & ADAPTER_CNTL_EN)) {
4706 "Adapter Link down, cannot blink LED\n");
4710 if (sp->id_timer.function == NULL) {
4711 init_timer(&sp->id_timer);
4712 sp->id_timer.function = s2io_phy_id;
4713 sp->id_timer.data = (unsigned long) sp;
4715 mod_timer(&sp->id_timer, jiffies);
4717 msleep_interruptible(data * HZ);
4719 msleep_interruptible(MAX_FLICKER_TIME);
4720 del_timer_sync(&sp->id_timer);
4722 if (CARDS_WITH_FAULTY_LINK_INDICATORS(sp->device_type, subid)) {
4723 writeq(last_gpio_ctrl_val, &bar0->gpio_control);
4724 last_gpio_ctrl_val = readq(&bar0->gpio_control);
4731 * s2io_ethtool_getpause_data -Pause frame frame generation and reception.
4732 * @sp : private member of the device structure, which is a pointer to the
4733 * s2io_nic structure.
4734 * @ep : pointer to the structure with pause parameters given by ethtool.
4736 * Returns the Pause frame generation and reception capability of the NIC.
4740 static void s2io_ethtool_getpause_data(struct net_device *dev,
4741 struct ethtool_pauseparam *ep)
4744 struct s2io_nic *sp = dev->priv;
4745 struct XENA_dev_config __iomem *bar0 = sp->bar0;
4747 val64 = readq(&bar0->rmac_pause_cfg);
4748 if (val64 & RMAC_PAUSE_GEN_ENABLE)
4749 ep->tx_pause = TRUE;
4750 if (val64 & RMAC_PAUSE_RX_ENABLE)
4751 ep->rx_pause = TRUE;
4752 ep->autoneg = FALSE;
4756 * s2io_ethtool_setpause_data - set/reset pause frame generation.
4757 * @sp : private member of the device structure, which is a pointer to the
4758 * s2io_nic structure.
4759 * @ep : pointer to the structure with pause parameters given by ethtool.
4761 * It can be used to set or reset Pause frame generation or reception
4762 * support of the NIC.
4764 * int, returns 0 on Success
4767 static int s2io_ethtool_setpause_data(struct net_device *dev,
4768 struct ethtool_pauseparam *ep)
4771 struct s2io_nic *sp = dev->priv;
4772 struct XENA_dev_config __iomem *bar0 = sp->bar0;
4774 val64 = readq(&bar0->rmac_pause_cfg);
4776 val64 |= RMAC_PAUSE_GEN_ENABLE;
4778 val64 &= ~RMAC_PAUSE_GEN_ENABLE;
4780 val64 |= RMAC_PAUSE_RX_ENABLE;
4782 val64 &= ~RMAC_PAUSE_RX_ENABLE;
4783 writeq(val64, &bar0->rmac_pause_cfg);
4788 * read_eeprom - reads 4 bytes of data from user given offset.
4789 * @sp : private member of the device structure, which is a pointer to the
4790 * s2io_nic structure.
4791 * @off : offset at which the data must be written
4792 * @data : Its an output parameter where the data read at the given
4795 * Will read 4 bytes of data from the user given offset and return the
4797 * NOTE: Will allow to read only part of the EEPROM visible through the
4800 * -1 on failure and 0 on success.
4803 #define S2IO_DEV_ID 5
4804 static int read_eeprom(struct s2io_nic * sp, int off, u64 * data)
4809 struct XENA_dev_config __iomem *bar0 = sp->bar0;
4811 if (sp->device_type == XFRAME_I_DEVICE) {
4812 val64 = I2C_CONTROL_DEV_ID(S2IO_DEV_ID) | I2C_CONTROL_ADDR(off) |
4813 I2C_CONTROL_BYTE_CNT(0x3) | I2C_CONTROL_READ |
4814 I2C_CONTROL_CNTL_START;
4815 SPECIAL_REG_WRITE(val64, &bar0->i2c_control, LF);
4817 while (exit_cnt < 5) {
4818 val64 = readq(&bar0->i2c_control);
4819 if (I2C_CONTROL_CNTL_END(val64)) {
4820 *data = I2C_CONTROL_GET_DATA(val64);
4829 if (sp->device_type == XFRAME_II_DEVICE) {
4830 val64 = SPI_CONTROL_KEY(0x9) | SPI_CONTROL_SEL1 |
4831 SPI_CONTROL_BYTECNT(0x3) |
4832 SPI_CONTROL_CMD(0x3) | SPI_CONTROL_ADDR(off);
4833 SPECIAL_REG_WRITE(val64, &bar0->spi_control, LF);
4834 val64 |= SPI_CONTROL_REQ;
4835 SPECIAL_REG_WRITE(val64, &bar0->spi_control, LF);
4836 while (exit_cnt < 5) {
4837 val64 = readq(&bar0->spi_control);
4838 if (val64 & SPI_CONTROL_NACK) {
4841 } else if (val64 & SPI_CONTROL_DONE) {
4842 *data = readq(&bar0->spi_data);
4855 * write_eeprom - actually writes the relevant part of the data value.
4856 * @sp : private member of the device structure, which is a pointer to the
4857 * s2io_nic structure.
4858 * @off : offset at which the data must be written
4859 * @data : The data that is to be written
4860 * @cnt : Number of bytes of the data that are actually to be written into
4861 * the Eeprom. (max of 3)
4863 * Actually writes the relevant part of the data value into the Eeprom
4864 * through the I2C bus.
4866 * 0 on success, -1 on failure.
4869 static int write_eeprom(struct s2io_nic * sp, int off, u64 data, int cnt)
4871 int exit_cnt = 0, ret = -1;
4873 struct XENA_dev_config __iomem *bar0 = sp->bar0;
4875 if (sp->device_type == XFRAME_I_DEVICE) {
4876 val64 = I2C_CONTROL_DEV_ID(S2IO_DEV_ID) | I2C_CONTROL_ADDR(off) |
4877 I2C_CONTROL_BYTE_CNT(cnt) | I2C_CONTROL_SET_DATA((u32)data) |
4878 I2C_CONTROL_CNTL_START;
4879 SPECIAL_REG_WRITE(val64, &bar0->i2c_control, LF);
4881 while (exit_cnt < 5) {
4882 val64 = readq(&bar0->i2c_control);
4883 if (I2C_CONTROL_CNTL_END(val64)) {
4884 if (!(val64 & I2C_CONTROL_NACK))
4893 if (sp->device_type == XFRAME_II_DEVICE) {
4894 int write_cnt = (cnt == 8) ? 0 : cnt;
4895 writeq(SPI_DATA_WRITE(data,(cnt<<3)), &bar0->spi_data);
4897 val64 = SPI_CONTROL_KEY(0x9) | SPI_CONTROL_SEL1 |
4898 SPI_CONTROL_BYTECNT(write_cnt) |
4899 SPI_CONTROL_CMD(0x2) | SPI_CONTROL_ADDR(off);
4900 SPECIAL_REG_WRITE(val64, &bar0->spi_control, LF);
4901 val64 |= SPI_CONTROL_REQ;
4902 SPECIAL_REG_WRITE(val64, &bar0->spi_control, LF);
4903 while (exit_cnt < 5) {
4904 val64 = readq(&bar0->spi_control);
4905 if (val64 & SPI_CONTROL_NACK) {
4908 } else if (val64 & SPI_CONTROL_DONE) {
4918 static void s2io_vpd_read(struct s2io_nic *nic)
4922 int i=0, cnt, fail = 0;
4923 int vpd_addr = 0x80;
4925 if (nic->device_type == XFRAME_II_DEVICE) {
4926 strcpy(nic->product_name, "Xframe II 10GbE network adapter");
4930 strcpy(nic->product_name, "Xframe I 10GbE network adapter");
4933 strcpy(nic->serial_num, "NOT AVAILABLE");
4935 vpd_data = kmalloc(256, GFP_KERNEL);
4939 for (i = 0; i < 256; i +=4 ) {
4940 pci_write_config_byte(nic->pdev, (vpd_addr + 2), i);
4941 pci_read_config_byte(nic->pdev, (vpd_addr + 2), &data);
4942 pci_write_config_byte(nic->pdev, (vpd_addr + 3), 0);
4943 for (cnt = 0; cnt <5; cnt++) {
4945 pci_read_config_byte(nic->pdev, (vpd_addr + 3), &data);
4950 DBG_PRINT(ERR_DBG, "Read of VPD data failed\n");
4954 pci_read_config_dword(nic->pdev, (vpd_addr + 4),
4955 (u32 *)&vpd_data[i]);
4959 /* read serial number of adapter */
4960 for (cnt = 0; cnt < 256; cnt++) {
4961 if ((vpd_data[cnt] == 'S') &&
4962 (vpd_data[cnt+1] == 'N') &&
4963 (vpd_data[cnt+2] < VPD_STRING_LEN)) {
4964 memset(nic->serial_num, 0, VPD_STRING_LEN);
4965 memcpy(nic->serial_num, &vpd_data[cnt + 3],
4972 if ((!fail) && (vpd_data[1] < VPD_STRING_LEN)) {
4973 memset(nic->product_name, 0, vpd_data[1]);
4974 memcpy(nic->product_name, &vpd_data[3], vpd_data[1]);
4980 * s2io_ethtool_geeprom - reads the value stored in the Eeprom.
4981 * @sp : private member of the device structure, which is a pointer to the * s2io_nic structure.
4982 * @eeprom : pointer to the user level structure provided by ethtool,
4983 * containing all relevant information.
4984 * @data_buf : user defined value to be written into Eeprom.
4985 * Description: Reads the values stored in the Eeprom at given offset
4986 * for a given length. Stores these values int the input argument data
4987 * buffer 'data_buf' and returns these to the caller (ethtool.)
4992 static int s2io_ethtool_geeprom(struct net_device *dev,
4993 struct ethtool_eeprom *eeprom, u8 * data_buf)
4997 struct s2io_nic *sp = dev->priv;
4999 eeprom->magic = sp->pdev->vendor | (sp->pdev->device << 16);
5001 if ((eeprom->offset + eeprom->len) > (XENA_EEPROM_SPACE))
5002 eeprom->len = XENA_EEPROM_SPACE - eeprom->offset;
5004 for (i = 0; i < eeprom->len; i += 4) {
5005 if (read_eeprom(sp, (eeprom->offset + i), &data)) {
5006 DBG_PRINT(ERR_DBG, "Read of EEPROM failed\n");
5010 memcpy((data_buf + i), &valid, 4);
5016 * s2io_ethtool_seeprom - tries to write the user provided value in Eeprom
5017 * @sp : private member of the device structure, which is a pointer to the
5018 * s2io_nic structure.
5019 * @eeprom : pointer to the user level structure provided by ethtool,
5020 * containing all relevant information.
5021 * @data_buf ; user defined value to be written into Eeprom.
5023 * Tries to write the user provided value in the Eeprom, at the offset
5024 * given by the user.
5026 * 0 on success, -EFAULT on failure.
5029 static int s2io_ethtool_seeprom(struct net_device *dev,
5030 struct ethtool_eeprom *eeprom,
5033 int len = eeprom->len, cnt = 0;
5034 u64 valid = 0, data;
5035 struct s2io_nic *sp = dev->priv;
5037 if (eeprom->magic != (sp->pdev->vendor | (sp->pdev->device << 16))) {
5039 "ETHTOOL_WRITE_EEPROM Err: Magic value ");
5040 DBG_PRINT(ERR_DBG, "is wrong, Its not 0x%x\n",
5046 data = (u32) data_buf[cnt] & 0x000000FF;
5048 valid = (u32) (data << 24);
5052 if (write_eeprom(sp, (eeprom->offset + cnt), valid, 0)) {
5054 "ETHTOOL_WRITE_EEPROM Err: Cannot ");
5056 "write into the specified offset\n");
5067 * s2io_register_test - reads and writes into all clock domains.
5068 * @sp : private member of the device structure, which is a pointer to the
5069 * s2io_nic structure.
5070 * @data : variable that returns the result of each of the test conducted b
5073 * Read and write into all clock domains. The NIC has 3 clock domains,
5074 * see that registers in all the three regions are accessible.
5079 static int s2io_register_test(struct s2io_nic * sp, uint64_t * data)
5081 struct XENA_dev_config __iomem *bar0 = sp->bar0;
5082 u64 val64 = 0, exp_val;
5085 val64 = readq(&bar0->pif_rd_swapper_fb);
5086 if (val64 != 0x123456789abcdefULL) {
5088 DBG_PRINT(INFO_DBG, "Read Test level 1 fails\n");
5091 val64 = readq(&bar0->rmac_pause_cfg);
5092 if (val64 != 0xc000ffff00000000ULL) {
5094 DBG_PRINT(INFO_DBG, "Read Test level 2 fails\n");
5097 val64 = readq(&bar0->rx_queue_cfg);
5098 if (sp->device_type == XFRAME_II_DEVICE)
5099 exp_val = 0x0404040404040404ULL;
5101 exp_val = 0x0808080808080808ULL;
5102 if (val64 != exp_val) {
5104 DBG_PRINT(INFO_DBG, "Read Test level 3 fails\n");
5107 val64 = readq(&bar0->xgxs_efifo_cfg);
5108 if (val64 != 0x000000001923141EULL) {
5110 DBG_PRINT(INFO_DBG, "Read Test level 4 fails\n");
5113 val64 = 0x5A5A5A5A5A5A5A5AULL;
5114 writeq(val64, &bar0->xmsi_data);
5115 val64 = readq(&bar0->xmsi_data);
5116 if (val64 != 0x5A5A5A5A5A5A5A5AULL) {
5118 DBG_PRINT(ERR_DBG, "Write Test level 1 fails\n");
5121 val64 = 0xA5A5A5A5A5A5A5A5ULL;
5122 writeq(val64, &bar0->xmsi_data);
5123 val64 = readq(&bar0->xmsi_data);
5124 if (val64 != 0xA5A5A5A5A5A5A5A5ULL) {
5126 DBG_PRINT(ERR_DBG, "Write Test level 2 fails\n");
5134 * s2io_eeprom_test - to verify that EEprom in the xena can be programmed.
5135 * @sp : private member of the device structure, which is a pointer to the
5136 * s2io_nic structure.
5137 * @data:variable that returns the result of each of the test conducted by
5140 * Verify that EEPROM in the xena can be programmed using I2C_CONTROL
5146 static int s2io_eeprom_test(struct s2io_nic * sp, uint64_t * data)
5149 u64 ret_data, org_4F0, org_7F0;
5150 u8 saved_4F0 = 0, saved_7F0 = 0;
5151 struct net_device *dev = sp->dev;
5153 /* Test Write Error at offset 0 */
5154 /* Note that SPI interface allows write access to all areas
5155 * of EEPROM. Hence doing all negative testing only for Xframe I.
5157 if (sp->device_type == XFRAME_I_DEVICE)
5158 if (!write_eeprom(sp, 0, 0, 3))
5161 /* Save current values at offsets 0x4F0 and 0x7F0 */
5162 if (!read_eeprom(sp, 0x4F0, &org_4F0))
5164 if (!read_eeprom(sp, 0x7F0, &org_7F0))
5167 /* Test Write at offset 4f0 */
5168 if (write_eeprom(sp, 0x4F0, 0x012345, 3))
5170 if (read_eeprom(sp, 0x4F0, &ret_data))
5173 if (ret_data != 0x012345) {
5174 DBG_PRINT(ERR_DBG, "%s: eeprom test error at offset 0x4F0. "
5175 "Data written %llx Data read %llx\n",
5176 dev->name, (unsigned long long)0x12345,
5177 (unsigned long long)ret_data);
5181 /* Reset the EEPROM data go FFFF */
5182 write_eeprom(sp, 0x4F0, 0xFFFFFF, 3);
5184 /* Test Write Request Error at offset 0x7c */
5185 if (sp->device_type == XFRAME_I_DEVICE)
5186 if (!write_eeprom(sp, 0x07C, 0, 3))
5189 /* Test Write Request at offset 0x7f0 */
5190 if (write_eeprom(sp, 0x7F0, 0x012345, 3))
5192 if (read_eeprom(sp, 0x7F0, &ret_data))
5195 if (ret_data != 0x012345) {
5196 DBG_PRINT(ERR_DBG, "%s: eeprom test error at offset 0x7F0. "
5197 "Data written %llx Data read %llx\n",
5198 dev->name, (unsigned long long)0x12345,
5199 (unsigned long long)ret_data);
5203 /* Reset the EEPROM data go FFFF */
5204 write_eeprom(sp, 0x7F0, 0xFFFFFF, 3);
5206 if (sp->device_type == XFRAME_I_DEVICE) {
5207 /* Test Write Error at offset 0x80 */
5208 if (!write_eeprom(sp, 0x080, 0, 3))
5211 /* Test Write Error at offset 0xfc */
5212 if (!write_eeprom(sp, 0x0FC, 0, 3))
5215 /* Test Write Error at offset 0x100 */
5216 if (!write_eeprom(sp, 0x100, 0, 3))
5219 /* Test Write Error at offset 4ec */
5220 if (!write_eeprom(sp, 0x4EC, 0, 3))
5224 /* Restore values at offsets 0x4F0 and 0x7F0 */
5226 write_eeprom(sp, 0x4F0, org_4F0, 3);
5228 write_eeprom(sp, 0x7F0, org_7F0, 3);
5235 * s2io_bist_test - invokes the MemBist test of the card .
5236 * @sp : private member of the device structure, which is a pointer to the
5237 * s2io_nic structure.
5238 * @data:variable that returns the result of each of the test conducted by
5241 * This invokes the MemBist test of the card. We give around
5242 * 2 secs time for the Test to complete. If it's still not complete
5243 * within this peiod, we consider that the test failed.
5245 * 0 on success and -1 on failure.
5248 static int s2io_bist_test(struct s2io_nic * sp, uint64_t * data)
5251 int cnt = 0, ret = -1;
5253 pci_read_config_byte(sp->pdev, PCI_BIST, &bist);
5254 bist |= PCI_BIST_START;
5255 pci_write_config_word(sp->pdev, PCI_BIST, bist);
5258 pci_read_config_byte(sp->pdev, PCI_BIST, &bist);
5259 if (!(bist & PCI_BIST_START)) {
5260 *data = (bist & PCI_BIST_CODE_MASK);
5272 * s2io-link_test - verifies the link state of the nic
5273 * @sp ; private member of the device structure, which is a pointer to the
5274 * s2io_nic structure.
5275 * @data: variable that returns the result of each of the test conducted by
5278 * The function verifies the link state of the NIC and updates the input
5279 * argument 'data' appropriately.
5284 static int s2io_link_test(struct s2io_nic * sp, uint64_t * data)
5286 struct XENA_dev_config __iomem *bar0 = sp->bar0;
5289 val64 = readq(&bar0->adapter_status);
5290 if(!(LINK_IS_UP(val64)))
5299 * s2io_rldram_test - offline test for access to the RldRam chip on the NIC
5300 * @sp - private member of the device structure, which is a pointer to the
5301 * s2io_nic structure.
5302 * @data - variable that returns the result of each of the test
5303 * conducted by the driver.
5305 * This is one of the offline test that tests the read and write
5306 * access to the RldRam chip on the NIC.
5311 static int s2io_rldram_test(struct s2io_nic * sp, uint64_t * data)
5313 struct XENA_dev_config __iomem *bar0 = sp->bar0;
5315 int cnt, iteration = 0, test_fail = 0;
5317 val64 = readq(&bar0->adapter_control);
5318 val64 &= ~ADAPTER_ECC_EN;
5319 writeq(val64, &bar0->adapter_control);
5321 val64 = readq(&bar0->mc_rldram_test_ctrl);
5322 val64 |= MC_RLDRAM_TEST_MODE;
5323 SPECIAL_REG_WRITE(val64, &bar0->mc_rldram_test_ctrl, LF);
5325 val64 = readq(&bar0->mc_rldram_mrs);
5326 val64 |= MC_RLDRAM_QUEUE_SIZE_ENABLE;
5327 SPECIAL_REG_WRITE(val64, &bar0->mc_rldram_mrs, UF);
5329 val64 |= MC_RLDRAM_MRS_ENABLE;
5330 SPECIAL_REG_WRITE(val64, &bar0->mc_rldram_mrs, UF);
5332 while (iteration < 2) {
5333 val64 = 0x55555555aaaa0000ULL;
5334 if (iteration == 1) {
5335 val64 ^= 0xFFFFFFFFFFFF0000ULL;
5337 writeq(val64, &bar0->mc_rldram_test_d0);
5339 val64 = 0xaaaa5a5555550000ULL;
5340 if (iteration == 1) {
5341 val64 ^= 0xFFFFFFFFFFFF0000ULL;
5343 writeq(val64, &bar0->mc_rldram_test_d1);
5345 val64 = 0x55aaaaaaaa5a0000ULL;
5346 if (iteration == 1) {
5347 val64 ^= 0xFFFFFFFFFFFF0000ULL;
5349 writeq(val64, &bar0->mc_rldram_test_d2);
5351 val64 = (u64) (0x0000003ffffe0100ULL);
5352 writeq(val64, &bar0->mc_rldram_test_add);
5354 val64 = MC_RLDRAM_TEST_MODE | MC_RLDRAM_TEST_WRITE |
5356 SPECIAL_REG_WRITE(val64, &bar0->mc_rldram_test_ctrl, LF);
5358 for (cnt = 0; cnt < 5; cnt++) {
5359 val64 = readq(&bar0->mc_rldram_test_ctrl);
5360 if (val64 & MC_RLDRAM_TEST_DONE)
5368 val64 = MC_RLDRAM_TEST_MODE | MC_RLDRAM_TEST_GO;
5369 SPECIAL_REG_WRITE(val64, &bar0->mc_rldram_test_ctrl, LF);
5371 for (cnt = 0; cnt < 5; cnt++) {
5372 val64 = readq(&bar0->mc_rldram_test_ctrl);
5373 if (val64 & MC_RLDRAM_TEST_DONE)
5381 val64 = readq(&bar0->mc_rldram_test_ctrl);
5382 if (!(val64 & MC_RLDRAM_TEST_PASS))
5390 /* Bring the adapter out of test mode */
5391 SPECIAL_REG_WRITE(0, &bar0->mc_rldram_test_ctrl, LF);
5397 * s2io_ethtool_test - conducts 6 tsets to determine the health of card.
5398 * @sp : private member of the device structure, which is a pointer to the
5399 * s2io_nic structure.
5400 * @ethtest : pointer to a ethtool command specific structure that will be
5401 * returned to the user.
5402 * @data : variable that returns the result of each of the test
5403 * conducted by the driver.
5405 * This function conducts 6 tests ( 4 offline and 2 online) to determine
5406 * the health of the card.
5411 static void s2io_ethtool_test(struct net_device *dev,
5412 struct ethtool_test *ethtest,
5415 struct s2io_nic *sp = dev->priv;
5416 int orig_state = netif_running(sp->dev);
5418 if (ethtest->flags == ETH_TEST_FL_OFFLINE) {
5419 /* Offline Tests. */
5421 s2io_close(sp->dev);
5423 if (s2io_register_test(sp, &data[0]))
5424 ethtest->flags |= ETH_TEST_FL_FAILED;
5428 if (s2io_rldram_test(sp, &data[3]))
5429 ethtest->flags |= ETH_TEST_FL_FAILED;
5433 if (s2io_eeprom_test(sp, &data[1]))
5434 ethtest->flags |= ETH_TEST_FL_FAILED;
5436 if (s2io_bist_test(sp, &data[4]))
5437 ethtest->flags |= ETH_TEST_FL_FAILED;
5447 "%s: is not up, cannot run test\n",
5456 if (s2io_link_test(sp, &data[2]))
5457 ethtest->flags |= ETH_TEST_FL_FAILED;
5466 static void s2io_get_ethtool_stats(struct net_device *dev,
5467 struct ethtool_stats *estats,
5471 struct s2io_nic *sp = dev->priv;
5472 struct stat_block *stat_info = sp->mac_control.stats_info;
5474 s2io_updt_stats(sp);
5476 (u64)le32_to_cpu(stat_info->tmac_frms_oflow) << 32 |
5477 le32_to_cpu(stat_info->tmac_frms);
5479 (u64)le32_to_cpu(stat_info->tmac_data_octets_oflow) << 32 |
5480 le32_to_cpu(stat_info->tmac_data_octets);
5481 tmp_stats[i++] = le64_to_cpu(stat_info->tmac_drop_frms);
5483 (u64)le32_to_cpu(stat_info->tmac_mcst_frms_oflow) << 32 |
5484 le32_to_cpu(stat_info->tmac_mcst_frms);
5486 (u64)le32_to_cpu(stat_info->tmac_bcst_frms_oflow) << 32 |
5487 le32_to_cpu(stat_info->tmac_bcst_frms);
5488 tmp_stats[i++] = le64_to_cpu(stat_info->tmac_pause_ctrl_frms);
5490 (u64)le32_to_cpu(stat_info->tmac_ttl_octets_oflow) << 32 |
5491 le32_to_cpu(stat_info->tmac_ttl_octets);
5493 (u64)le32_to_cpu(stat_info->tmac_ucst_frms_oflow) << 32 |
5494 le32_to_cpu(stat_info->tmac_ucst_frms);
5496 (u64)le32_to_cpu(stat_info->tmac_nucst_frms_oflow) << 32 |
5497 le32_to_cpu(stat_info->tmac_nucst_frms);
5499 (u64)le32_to_cpu(stat_info->tmac_any_err_frms_oflow) << 32 |
5500 le32_to_cpu(stat_info->tmac_any_err_frms);
5501 tmp_stats[i++] = le64_to_cpu(stat_info->tmac_ttl_less_fb_octets);
5502 tmp_stats[i++] = le64_to_cpu(stat_info->tmac_vld_ip_octets);
5504 (u64)le32_to_cpu(stat_info->tmac_vld_ip_oflow) << 32 |
5505 le32_to_cpu(stat_info->tmac_vld_ip);
5507 (u64)le32_to_cpu(stat_info->tmac_drop_ip_oflow) << 32 |
5508 le32_to_cpu(stat_info->tmac_drop_ip);
5510 (u64)le32_to_cpu(stat_info->tmac_icmp_oflow) << 32 |
5511 le32_to_cpu(stat_info->tmac_icmp);
5513 (u64)le32_to_cpu(stat_info->tmac_rst_tcp_oflow) << 32 |
5514 le32_to_cpu(stat_info->tmac_rst_tcp);
5515 tmp_stats[i++] = le64_to_cpu(stat_info->tmac_tcp);
5516 tmp_stats[i++] = (u64)le32_to_cpu(stat_info->tmac_udp_oflow) << 32 |
5517 le32_to_cpu(stat_info->tmac_udp);
5519 (u64)le32_to_cpu(stat_info->rmac_vld_frms_oflow) << 32 |
5520 le32_to_cpu(stat_info->rmac_vld_frms);
5522 (u64)le32_to_cpu(stat_info->rmac_data_octets_oflow) << 32 |
5523 le32_to_cpu(stat_info->rmac_data_octets);
5524 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_fcs_err_frms);
5525 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_drop_frms);
5527 (u64)le32_to_cpu(stat_info->rmac_vld_mcst_frms_oflow) << 32 |
5528 le32_to_cpu(stat_info->rmac_vld_mcst_frms);
5530 (u64)le32_to_cpu(stat_info->rmac_vld_bcst_frms_oflow) << 32 |
5531 le32_to_cpu(stat_info->rmac_vld_bcst_frms);
5532 tmp_stats[i++] = le32_to_cpu(stat_info->rmac_in_rng_len_err_frms);
5533 tmp_stats[i++] = le32_to_cpu(stat_info->rmac_out_rng_len_err_frms);
5534 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_long_frms);
5535 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_pause_ctrl_frms);
5536 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_unsup_ctrl_frms);
5538 (u64)le32_to_cpu(stat_info->rmac_ttl_octets_oflow) << 32 |
5539 le32_to_cpu(stat_info->rmac_ttl_octets);
5541 (u64)le32_to_cpu(stat_info->rmac_accepted_ucst_frms_oflow)
5542 << 32 | le32_to_cpu(stat_info->rmac_accepted_ucst_frms);
5544 (u64)le32_to_cpu(stat_info->rmac_accepted_nucst_frms_oflow)
5545 << 32 | le32_to_cpu(stat_info->rmac_accepted_nucst_frms);
5547 (u64)le32_to_cpu(stat_info->rmac_discarded_frms_oflow) << 32 |
5548 le32_to_cpu(stat_info->rmac_discarded_frms);
5550 (u64)le32_to_cpu(stat_info->rmac_drop_events_oflow)
5551 << 32 | le32_to_cpu(stat_info->rmac_drop_events);
5552 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_ttl_less_fb_octets);
5553 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_ttl_frms);
5555 (u64)le32_to_cpu(stat_info->rmac_usized_frms_oflow) << 32 |
5556 le32_to_cpu(stat_info->rmac_usized_frms);
5558 (u64)le32_to_cpu(stat_info->rmac_osized_frms_oflow) << 32 |
5559 le32_to_cpu(stat_info->rmac_osized_frms);
5561 (u64)le32_to_cpu(stat_info->rmac_frag_frms_oflow) << 32 |
5562 le32_to_cpu(stat_info->rmac_frag_frms);
5564 (u64)le32_to_cpu(stat_info->rmac_jabber_frms_oflow) << 32 |
5565 le32_to_cpu(stat_info->rmac_jabber_frms);
5566 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_ttl_64_frms);
5567 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_ttl_65_127_frms);
5568 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_ttl_128_255_frms);
5569 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_ttl_256_511_frms);
5570 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_ttl_512_1023_frms);
5571 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_ttl_1024_1518_frms);
5573 (u64)le32_to_cpu(stat_info->rmac_ip_oflow) << 32 |
5574 le32_to_cpu(stat_info->rmac_ip);
5575 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_ip_octets);
5576 tmp_stats[i++] = le32_to_cpu(stat_info->rmac_hdr_err_ip);
5578 (u64)le32_to_cpu(stat_info->rmac_drop_ip_oflow) << 32 |
5579 le32_to_cpu(stat_info->rmac_drop_ip);
5581 (u64)le32_to_cpu(stat_info->rmac_icmp_oflow) << 32 |
5582 le32_to_cpu(stat_info->rmac_icmp);
5583 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_tcp);
5585 (u64)le32_to_cpu(stat_info->rmac_udp_oflow) << 32 |
5586 le32_to_cpu(stat_info->rmac_udp);
5588 (u64)le32_to_cpu(stat_info->rmac_err_drp_udp_oflow) << 32 |
5589 le32_to_cpu(stat_info->rmac_err_drp_udp);
5590 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_xgmii_err_sym);
5591 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_frms_q0);
5592 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_frms_q1);
5593 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_frms_q2);
5594 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_frms_q3);
5595 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_frms_q4);
5596 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_frms_q5);
5597 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_frms_q6);
5598 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_frms_q7);
5599 tmp_stats[i++] = le16_to_cpu(stat_info->rmac_full_q0);
5600 tmp_stats[i++] = le16_to_cpu(stat_info->rmac_full_q1);
5601 tmp_stats[i++] = le16_to_cpu(stat_info->rmac_full_q2);
5602 tmp_stats[i++] = le16_to_cpu(stat_info->rmac_full_q3);
5603 tmp_stats[i++] = le16_to_cpu(stat_info->rmac_full_q4);
5604 tmp_stats[i++] = le16_to_cpu(stat_info->rmac_full_q5);
5605 tmp_stats[i++] = le16_to_cpu(stat_info->rmac_full_q6);
5606 tmp_stats[i++] = le16_to_cpu(stat_info->rmac_full_q7);
5608 (u64)le32_to_cpu(stat_info->rmac_pause_cnt_oflow) << 32 |
5609 le32_to_cpu(stat_info->rmac_pause_cnt);
5610 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_xgmii_data_err_cnt);
5611 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_xgmii_ctrl_err_cnt);
5613 (u64)le32_to_cpu(stat_info->rmac_accepted_ip_oflow) << 32 |
5614 le32_to_cpu(stat_info->rmac_accepted_ip);
5615 tmp_stats[i++] = le32_to_cpu(stat_info->rmac_err_tcp);
5616 tmp_stats[i++] = le32_to_cpu(stat_info->rd_req_cnt);
5617 tmp_stats[i++] = le32_to_cpu(stat_info->new_rd_req_cnt);
5618 tmp_stats[i++] = le32_to_cpu(stat_info->new_rd_req_rtry_cnt);
5619 tmp_stats[i++] = le32_to_cpu(stat_info->rd_rtry_cnt);
5620 tmp_stats[i++] = le32_to_cpu(stat_info->wr_rtry_rd_ack_cnt);
5621 tmp_stats[i++] = le32_to_cpu(stat_info->wr_req_cnt);
5622 tmp_stats[i++] = le32_to_cpu(stat_info->new_wr_req_cnt);
5623 tmp_stats[i++] = le32_to_cpu(stat_info->new_wr_req_rtry_cnt);
5624 tmp_stats[i++] = le32_to_cpu(stat_info->wr_rtry_cnt);
5625 tmp_stats[i++] = le32_to_cpu(stat_info->wr_disc_cnt);
5626 tmp_stats[i++] = le32_to_cpu(stat_info->rd_rtry_wr_ack_cnt);
5627 tmp_stats[i++] = le32_to_cpu(stat_info->txp_wr_cnt);
5628 tmp_stats[i++] = le32_to_cpu(stat_info->txd_rd_cnt);
5629 tmp_stats[i++] = le32_to_cpu(stat_info->txd_wr_cnt);
5630 tmp_stats[i++] = le32_to_cpu(stat_info->rxd_rd_cnt);
5631 tmp_stats[i++] = le32_to_cpu(stat_info->rxd_wr_cnt);
5632 tmp_stats[i++] = le32_to_cpu(stat_info->txf_rd_cnt);
5633 tmp_stats[i++] = le32_to_cpu(stat_info->rxf_wr_cnt);
5634 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_ttl_1519_4095_frms);
5635 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_ttl_4096_8191_frms);
5636 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_ttl_8192_max_frms);
5637 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_ttl_gt_max_frms);
5638 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_osized_alt_frms);
5639 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_jabber_alt_frms);
5640 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_gt_max_alt_frms);
5641 tmp_stats[i++] = le64_to_cpu(stat_info->rmac_vlan_frms);
5642 tmp_stats[i++] = le32_to_cpu(stat_info->rmac_len_discard);
5643 tmp_stats[i++] = le32_to_cpu(stat_info->rmac_fcs_discard);
5644 tmp_stats[i++] = le32_to_cpu(stat_info->rmac_pf_discard);
5645 tmp_stats[i++] = le32_to_cpu(stat_info->rmac_da_discard);
5646 tmp_stats[i++] = le32_to_cpu(stat_info->rmac_red_discard);
5647 tmp_stats[i++] = le32_to_cpu(stat_info->rmac_rts_discard);
5648 tmp_stats[i++] = le32_to_cpu(stat_info->rmac_ingm_full_discard);
5649 tmp_stats[i++] = le32_to_cpu(stat_info->link_fault_cnt);
5651 tmp_stats[i++] = stat_info->sw_stat.single_ecc_errs;
5652 tmp_stats[i++] = stat_info->sw_stat.double_ecc_errs;
5653 tmp_stats[i++] = stat_info->sw_stat.parity_err_cnt;
5654 tmp_stats[i++] = stat_info->sw_stat.serious_err_cnt;
5655 tmp_stats[i++] = stat_info->sw_stat.soft_reset_cnt;
5656 tmp_stats[i++] = stat_info->sw_stat.fifo_full_cnt;
5657 tmp_stats[i++] = stat_info->sw_stat.ring_full_cnt;
5658 tmp_stats[i++] = stat_info->xpak_stat.alarm_transceiver_temp_high;
5659 tmp_stats[i++] = stat_info->xpak_stat.alarm_transceiver_temp_low;
5660 tmp_stats[i++] = stat_info->xpak_stat.alarm_laser_bias_current_high;
5661 tmp_stats[i++] = stat_info->xpak_stat.alarm_laser_bias_current_low;
5662 tmp_stats[i++] = stat_info->xpak_stat.alarm_laser_output_power_high;
5663 tmp_stats[i++] = stat_info->xpak_stat.alarm_laser_output_power_low;
5664 tmp_stats[i++] = stat_info->xpak_stat.warn_transceiver_temp_high;
5665 tmp_stats[i++] = stat_info->xpak_stat.warn_transceiver_temp_low;
5666 tmp_stats[i++] = stat_info->xpak_stat.warn_laser_bias_current_high;
5667 tmp_stats[i++] = stat_info->xpak_stat.warn_laser_bias_current_low;
5668 tmp_stats[i++] = stat_info->xpak_stat.warn_laser_output_power_high;
5669 tmp_stats[i++] = stat_info->xpak_stat.warn_laser_output_power_low;
5670 tmp_stats[i++] = stat_info->sw_stat.clubbed_frms_cnt;
5671 tmp_stats[i++] = stat_info->sw_stat.sending_both;
5672 tmp_stats[i++] = stat_info->sw_stat.outof_sequence_pkts;
5673 tmp_stats[i++] = stat_info->sw_stat.flush_max_pkts;
5674 if (stat_info->sw_stat.num_aggregations) {
5675 u64 tmp = stat_info->sw_stat.sum_avg_pkts_aggregated;
5678 * Since 64-bit divide does not work on all platforms,
5679 * do repeated subtraction.
5681 while (tmp >= stat_info->sw_stat.num_aggregations) {
5682 tmp -= stat_info->sw_stat.num_aggregations;
5685 tmp_stats[i++] = count;
5691 static int s2io_ethtool_get_regs_len(struct net_device *dev)
5693 return (XENA_REG_SPACE);
5697 static u32 s2io_ethtool_get_rx_csum(struct net_device * dev)
5699 struct s2io_nic *sp = dev->priv;
5701 return (sp->rx_csum);
5704 static int s2io_ethtool_set_rx_csum(struct net_device *dev, u32 data)
5706 struct s2io_nic *sp = dev->priv;
5716 static int s2io_get_eeprom_len(struct net_device *dev)
5718 return (XENA_EEPROM_SPACE);
5721 static int s2io_ethtool_self_test_count(struct net_device *dev)
5723 return (S2IO_TEST_LEN);
5726 static void s2io_ethtool_get_strings(struct net_device *dev,
5727 u32 stringset, u8 * data)
5729 switch (stringset) {
5731 memcpy(data, s2io_gstrings, S2IO_STRINGS_LEN);
5734 memcpy(data, ðtool_stats_keys,
5735 sizeof(ethtool_stats_keys));
5738 static int s2io_ethtool_get_stats_count(struct net_device *dev)
5740 return (S2IO_STAT_LEN);
5743 static int s2io_ethtool_op_set_tx_csum(struct net_device *dev, u32 data)
5746 dev->features |= NETIF_F_IP_CSUM;
5748 dev->features &= ~NETIF_F_IP_CSUM;
5753 static u32 s2io_ethtool_op_get_tso(struct net_device *dev)
5755 return (dev->features & NETIF_F_TSO) != 0;
5757 static int s2io_ethtool_op_set_tso(struct net_device *dev, u32 data)
5760 dev->features |= (NETIF_F_TSO | NETIF_F_TSO6);
5762 dev->features &= ~(NETIF_F_TSO | NETIF_F_TSO6);
5767 static const struct ethtool_ops netdev_ethtool_ops = {
5768 .get_settings = s2io_ethtool_gset,
5769 .set_settings = s2io_ethtool_sset,
5770 .get_drvinfo = s2io_ethtool_gdrvinfo,
5771 .get_regs_len = s2io_ethtool_get_regs_len,
5772 .get_regs = s2io_ethtool_gregs,
5773 .get_link = ethtool_op_get_link,
5774 .get_eeprom_len = s2io_get_eeprom_len,
5775 .get_eeprom = s2io_ethtool_geeprom,
5776 .set_eeprom = s2io_ethtool_seeprom,
5777 .get_pauseparam = s2io_ethtool_getpause_data,
5778 .set_pauseparam = s2io_ethtool_setpause_data,
5779 .get_rx_csum = s2io_ethtool_get_rx_csum,
5780 .set_rx_csum = s2io_ethtool_set_rx_csum,
5781 .get_tx_csum = ethtool_op_get_tx_csum,
5782 .set_tx_csum = s2io_ethtool_op_set_tx_csum,
5783 .get_sg = ethtool_op_get_sg,
5784 .set_sg = ethtool_op_set_sg,
5785 .get_tso = s2io_ethtool_op_get_tso,
5786 .set_tso = s2io_ethtool_op_set_tso,
5787 .get_ufo = ethtool_op_get_ufo,
5788 .set_ufo = ethtool_op_set_ufo,
5789 .self_test_count = s2io_ethtool_self_test_count,
5790 .self_test = s2io_ethtool_test,
5791 .get_strings = s2io_ethtool_get_strings,
5792 .phys_id = s2io_ethtool_idnic,
5793 .get_stats_count = s2io_ethtool_get_stats_count,
5794 .get_ethtool_stats = s2io_get_ethtool_stats
5798 * s2io_ioctl - Entry point for the Ioctl
5799 * @dev : Device pointer.
5800 * @ifr : An IOCTL specefic structure, that can contain a pointer to
5801 * a proprietary structure used to pass information to the driver.
5802 * @cmd : This is used to distinguish between the different commands that
5803 * can be passed to the IOCTL functions.
5805 * Currently there are no special functionality supported in IOCTL, hence
5806 * function always return EOPNOTSUPPORTED
5809 static int s2io_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
5815 * s2io_change_mtu - entry point to change MTU size for the device.
5816 * @dev : device pointer.
5817 * @new_mtu : the new MTU size for the device.
5818 * Description: A driver entry point to change MTU size for the device.
5819 * Before changing the MTU the device must be stopped.
5821 * 0 on success and an appropriate (-)ve integer as defined in errno.h
5825 static int s2io_change_mtu(struct net_device *dev, int new_mtu)
5827 struct s2io_nic *sp = dev->priv;
5829 if ((new_mtu < MIN_MTU) || (new_mtu > S2IO_JUMBO_SIZE)) {
5830 DBG_PRINT(ERR_DBG, "%s: MTU size is invalid.\n",
5836 if (netif_running(dev)) {
5838 netif_stop_queue(dev);
5839 if (s2io_card_up(sp)) {
5840 DBG_PRINT(ERR_DBG, "%s: Device bring up failed\n",
5843 if (netif_queue_stopped(dev))
5844 netif_wake_queue(dev);
5845 } else { /* Device is down */
5846 struct XENA_dev_config __iomem *bar0 = sp->bar0;
5847 u64 val64 = new_mtu;
5849 writeq(vBIT(val64, 2, 14), &bar0->rmac_max_pyld_len);
5856 * s2io_tasklet - Bottom half of the ISR.
5857 * @dev_adr : address of the device structure in dma_addr_t format.
5859 * This is the tasklet or the bottom half of the ISR. This is
5860 * an extension of the ISR which is scheduled by the scheduler to be run
5861 * when the load on the CPU is low. All low priority tasks of the ISR can
5862 * be pushed into the tasklet. For now the tasklet is used only to
5863 * replenish the Rx buffers in the Rx buffer descriptors.
5868 static void s2io_tasklet(unsigned long dev_addr)
5870 struct net_device *dev = (struct net_device *) dev_addr;
5871 struct s2io_nic *sp = dev->priv;
5873 struct mac_info *mac_control;
5874 struct config_param *config;
5876 mac_control = &sp->mac_control;
5877 config = &sp->config;
5879 if (!TASKLET_IN_USE) {
5880 for (i = 0; i < config->rx_ring_num; i++) {
5881 ret = fill_rx_buffers(sp, i);
5882 if (ret == -ENOMEM) {
5883 DBG_PRINT(ERR_DBG, "%s: Out of ",
5885 DBG_PRINT(ERR_DBG, "memory in tasklet\n");
5887 } else if (ret == -EFILL) {
5889 "%s: Rx Ring %d is full\n",
5894 clear_bit(0, (&sp->tasklet_status));
5899 * s2io_set_link - Set the LInk status
5900 * @data: long pointer to device private structue
5901 * Description: Sets the link status for the adapter
5904 static void s2io_set_link(struct work_struct *work)
5906 struct s2io_nic *nic = container_of(work, struct s2io_nic, set_link_task);
5907 struct net_device *dev = nic->dev;
5908 struct XENA_dev_config __iomem *bar0 = nic->bar0;
5914 if (!netif_running(dev))
5917 if (test_and_set_bit(0, &(nic->link_state))) {
5918 /* The card is being reset, no point doing anything */
5922 subid = nic->pdev->subsystem_device;
5923 if (s2io_link_fault_indication(nic) == MAC_RMAC_ERR_TIMER) {
5925 * Allow a small delay for the NICs self initiated
5926 * cleanup to complete.
5931 val64 = readq(&bar0->adapter_status);
5932 if (LINK_IS_UP(val64)) {
5933 if (!(readq(&bar0->adapter_control) & ADAPTER_CNTL_EN)) {
5934 if (verify_xena_quiescence(nic)) {
5935 val64 = readq(&bar0->adapter_control);
5936 val64 |= ADAPTER_CNTL_EN;
5937 writeq(val64, &bar0->adapter_control);
5938 if (CARDS_WITH_FAULTY_LINK_INDICATORS(
5939 nic->device_type, subid)) {
5940 val64 = readq(&bar0->gpio_control);
5941 val64 |= GPIO_CTRL_GPIO_0;
5942 writeq(val64, &bar0->gpio_control);
5943 val64 = readq(&bar0->gpio_control);
5945 val64 |= ADAPTER_LED_ON;
5946 writeq(val64, &bar0->adapter_control);
5948 nic->device_enabled_once = TRUE;
5950 DBG_PRINT(ERR_DBG, "%s: Error: ", dev->name);
5951 DBG_PRINT(ERR_DBG, "device is not Quiescent\n");
5952 netif_stop_queue(dev);
5955 val64 = readq(&bar0->adapter_status);
5956 if (!LINK_IS_UP(val64)) {
5957 DBG_PRINT(ERR_DBG, "%s:", dev->name);
5958 DBG_PRINT(ERR_DBG, " Link down after enabling ");
5959 DBG_PRINT(ERR_DBG, "device \n");
5961 s2io_link(nic, LINK_UP);
5963 if (CARDS_WITH_FAULTY_LINK_INDICATORS(nic->device_type,
5965 val64 = readq(&bar0->gpio_control);
5966 val64 &= ~GPIO_CTRL_GPIO_0;
5967 writeq(val64, &bar0->gpio_control);
5968 val64 = readq(&bar0->gpio_control);
5970 s2io_link(nic, LINK_DOWN);
5972 clear_bit(0, &(nic->link_state));
5978 static int set_rxd_buffer_pointer(struct s2io_nic *sp, struct RxD_t *rxdp,
5980 struct sk_buff **skb, u64 *temp0, u64 *temp1,
5981 u64 *temp2, int size)
5983 struct net_device *dev = sp->dev;
5984 struct sk_buff *frag_list;
5986 if ((sp->rxd_mode == RXD_MODE_1) && (rxdp->Host_Control == 0)) {
5989 DBG_PRINT(INFO_DBG, "SKB is not NULL\n");
5991 * As Rx frame are not going to be processed,
5992 * using same mapped address for the Rxd
5995 ((struct RxD1*)rxdp)->Buffer0_ptr = *temp0;
5997 *skb = dev_alloc_skb(size);
5999 DBG_PRINT(ERR_DBG, "%s: Out of ", dev->name);
6000 DBG_PRINT(ERR_DBG, "memory to allocate SKBs\n");
6003 /* storing the mapped addr in a temp variable
6004 * such it will be used for next rxd whose
6005 * Host Control is NULL
6007 ((struct RxD1*)rxdp)->Buffer0_ptr = *temp0 =
6008 pci_map_single( sp->pdev, (*skb)->data,
6009 size - NET_IP_ALIGN,
6010 PCI_DMA_FROMDEVICE);
6011 rxdp->Host_Control = (unsigned long) (*skb);
6013 } else if ((sp->rxd_mode == RXD_MODE_3B) && (rxdp->Host_Control == 0)) {
6014 /* Two buffer Mode */
6016 ((struct RxD3*)rxdp)->Buffer2_ptr = *temp2;
6017 ((struct RxD3*)rxdp)->Buffer0_ptr = *temp0;
6018 ((struct RxD3*)rxdp)->Buffer1_ptr = *temp1;
6020 *skb = dev_alloc_skb(size);
6022 DBG_PRINT(ERR_DBG, "%s: dev_alloc_skb failed\n",
6026 ((struct RxD3*)rxdp)->Buffer2_ptr = *temp2 =
6027 pci_map_single(sp->pdev, (*skb)->data,
6029 PCI_DMA_FROMDEVICE);
6030 ((struct RxD3*)rxdp)->Buffer0_ptr = *temp0 =
6031 pci_map_single( sp->pdev, ba->ba_0, BUF0_LEN,
6032 PCI_DMA_FROMDEVICE);
6033 rxdp->Host_Control = (unsigned long) (*skb);
6035 /* Buffer-1 will be dummy buffer not used */
6036 ((struct RxD3*)rxdp)->Buffer1_ptr = *temp1 =
6037 pci_map_single(sp->pdev, ba->ba_1, BUF1_LEN,
6038 PCI_DMA_FROMDEVICE);
6040 } else if ((rxdp->Host_Control == 0)) {
6041 /* Three buffer mode */
6043 ((struct RxD3*)rxdp)->Buffer0_ptr = *temp0;
6044 ((struct RxD3*)rxdp)->Buffer1_ptr = *temp1;
6045 ((struct RxD3*)rxdp)->Buffer2_ptr = *temp2;
6047 *skb = dev_alloc_skb(size);
6049 DBG_PRINT(ERR_DBG, "%s: dev_alloc_skb failed\n",
6053 ((struct RxD3*)rxdp)->Buffer0_ptr = *temp0 =
6054 pci_map_single(sp->pdev, ba->ba_0, BUF0_LEN,
6055 PCI_DMA_FROMDEVICE);
6056 /* Buffer-1 receives L3/L4 headers */
6057 ((struct RxD3*)rxdp)->Buffer1_ptr = *temp1 =
6058 pci_map_single( sp->pdev, (*skb)->data,
6060 PCI_DMA_FROMDEVICE);
6062 * skb_shinfo(skb)->frag_list will have L4
6065 skb_shinfo(*skb)->frag_list = dev_alloc_skb(dev->mtu +
6067 if (skb_shinfo(*skb)->frag_list == NULL) {
6068 DBG_PRINT(ERR_DBG, "%s: dev_alloc_skb \
6069 failed\n ", dev->name);
6072 frag_list = skb_shinfo(*skb)->frag_list;
6073 frag_list->next = NULL;
6075 * Buffer-2 receives L4 data payload
6077 ((struct RxD3*)rxdp)->Buffer2_ptr = *temp2 =
6078 pci_map_single( sp->pdev, frag_list->data,
6079 dev->mtu, PCI_DMA_FROMDEVICE);
6084 static void set_rxd_buffer_size(struct s2io_nic *sp, struct RxD_t *rxdp,
6087 struct net_device *dev = sp->dev;
6088 if (sp->rxd_mode == RXD_MODE_1) {
6089 rxdp->Control_2 = SET_BUFFER0_SIZE_1( size - NET_IP_ALIGN);
6090 } else if (sp->rxd_mode == RXD_MODE_3B) {
6091 rxdp->Control_2 = SET_BUFFER0_SIZE_3(BUF0_LEN);
6092 rxdp->Control_2 |= SET_BUFFER1_SIZE_3(1);
6093 rxdp->Control_2 |= SET_BUFFER2_SIZE_3( dev->mtu + 4);
6095 rxdp->Control_2 = SET_BUFFER0_SIZE_3(BUF0_LEN);
6096 rxdp->Control_2 |= SET_BUFFER1_SIZE_3(l3l4hdr_size + 4);
6097 rxdp->Control_2 |= SET_BUFFER2_SIZE_3(dev->mtu);
6101 static int rxd_owner_bit_reset(struct s2io_nic *sp)
6103 int i, j, k, blk_cnt = 0, size;
6104 struct mac_info * mac_control = &sp->mac_control;
6105 struct config_param *config = &sp->config;
6106 struct net_device *dev = sp->dev;
6107 struct RxD_t *rxdp = NULL;
6108 struct sk_buff *skb = NULL;
6109 struct buffAdd *ba = NULL;
6110 u64 temp0_64 = 0, temp1_64 = 0, temp2_64 = 0;
6112 /* Calculate the size based on ring mode */
6113 size = dev->mtu + HEADER_ETHERNET_II_802_3_SIZE +
6114 HEADER_802_2_SIZE + HEADER_SNAP_SIZE;
6115 if (sp->rxd_mode == RXD_MODE_1)
6116 size += NET_IP_ALIGN;
6117 else if (sp->rxd_mode == RXD_MODE_3B)
6118 size = dev->mtu + ALIGN_SIZE + BUF0_LEN + 4;
6120 size = l3l4hdr_size + ALIGN_SIZE + BUF0_LEN + 4;
6122 for (i = 0; i < config->rx_ring_num; i++) {
6123 blk_cnt = config->rx_cfg[i].num_rxd /
6124 (rxd_count[sp->rxd_mode] +1);
6126 for (j = 0; j < blk_cnt; j++) {
6127 for (k = 0; k < rxd_count[sp->rxd_mode]; k++) {
6128 rxdp = mac_control->rings[i].
6129 rx_blocks[j].rxds[k].virt_addr;
6130 if(sp->rxd_mode >= RXD_MODE_3A)
6131 ba = &mac_control->rings[i].ba[j][k];
6132 if (set_rxd_buffer_pointer(sp, rxdp, ba,
6133 &skb,(u64 *)&temp0_64,
6140 set_rxd_buffer_size(sp, rxdp, size);
6142 /* flip the Ownership bit to Hardware */
6143 rxdp->Control_1 |= RXD_OWN_XENA;
6151 static int s2io_add_isr(struct s2io_nic * sp)
6154 struct net_device *dev = sp->dev;
6157 if (sp->intr_type == MSI)
6158 ret = s2io_enable_msi(sp);
6159 else if (sp->intr_type == MSI_X)
6160 ret = s2io_enable_msi_x(sp);
6162 DBG_PRINT(ERR_DBG, "%s: Defaulting to INTA\n", dev->name);
6163 sp->intr_type = INTA;
6166 /* Store the values of the MSIX table in the struct s2io_nic structure */
6167 store_xmsi_data(sp);
6169 /* After proper initialization of H/W, register ISR */
6170 if (sp->intr_type == MSI) {
6171 err = request_irq((int) sp->pdev->irq, s2io_msi_handle,
6172 IRQF_SHARED, sp->name, dev);
6174 pci_disable_msi(sp->pdev);
6175 DBG_PRINT(ERR_DBG, "%s: MSI registration failed\n",
6180 if (sp->intr_type == MSI_X) {
6181 int i, msix_tx_cnt=0,msix_rx_cnt=0;
6183 for (i=1; (sp->s2io_entries[i].in_use == MSIX_FLG); i++) {
6184 if (sp->s2io_entries[i].type == MSIX_FIFO_TYPE) {
6185 sprintf(sp->desc[i], "%s:MSI-X-%d-TX",
6187 err = request_irq(sp->entries[i].vector,
6188 s2io_msix_fifo_handle, 0, sp->desc[i],
6189 sp->s2io_entries[i].arg);
6190 /* If either data or addr is zero print it */
6191 if(!(sp->msix_info[i].addr &&
6192 sp->msix_info[i].data)) {
6193 DBG_PRINT(ERR_DBG, "%s @ Addr:0x%llx"
6194 "Data:0x%lx\n",sp->desc[i],
6195 (unsigned long long)
6196 sp->msix_info[i].addr,
6198 ntohl(sp->msix_info[i].data));
6203 sprintf(sp->desc[i], "%s:MSI-X-%d-RX",
6205 err = request_irq(sp->entries[i].vector,
6206 s2io_msix_ring_handle, 0, sp->desc[i],
6207 sp->s2io_entries[i].arg);
6208 /* If either data or addr is zero print it */
6209 if(!(sp->msix_info[i].addr &&
6210 sp->msix_info[i].data)) {
6211 DBG_PRINT(ERR_DBG, "%s @ Addr:0x%llx"
6212 "Data:0x%lx\n",sp->desc[i],
6213 (unsigned long long)
6214 sp->msix_info[i].addr,
6216 ntohl(sp->msix_info[i].data));
6222 DBG_PRINT(ERR_DBG,"%s:MSI-X-%d registration "
6223 "failed\n", dev->name, i);
6224 DBG_PRINT(ERR_DBG, "Returned: %d\n", err);
6227 sp->s2io_entries[i].in_use = MSIX_REGISTERED_SUCCESS;
6229 printk("MSI-X-TX %d entries enabled\n",msix_tx_cnt);
6230 printk("MSI-X-RX %d entries enabled\n",msix_rx_cnt);
6232 if (sp->intr_type == INTA) {
6233 err = request_irq((int) sp->pdev->irq, s2io_isr, IRQF_SHARED,
6236 DBG_PRINT(ERR_DBG, "%s: ISR registration failed\n",
6243 static void s2io_rem_isr(struct s2io_nic * sp)
6246 struct net_device *dev = sp->dev;
6248 if (sp->intr_type == MSI_X) {
6252 for (i=1; (sp->s2io_entries[i].in_use ==
6253 MSIX_REGISTERED_SUCCESS); i++) {
6254 int vector = sp->entries[i].vector;
6255 void *arg = sp->s2io_entries[i].arg;
6257 free_irq(vector, arg);
6259 pci_read_config_word(sp->pdev, 0x42, &msi_control);
6260 msi_control &= 0xFFFE; /* Disable MSI */
6261 pci_write_config_word(sp->pdev, 0x42, msi_control);
6263 pci_disable_msix(sp->pdev);
6265 free_irq(sp->pdev->irq, dev);
6266 if (sp->intr_type == MSI) {
6269 pci_disable_msi(sp->pdev);
6270 pci_read_config_word(sp->pdev, 0x4c, &val);
6272 pci_write_config_word(sp->pdev, 0x4c, val);
6275 /* Waiting till all Interrupt handlers are complete */
6279 if (!atomic_read(&sp->isr_cnt))
6285 static void s2io_card_down(struct s2io_nic * sp)
6288 struct XENA_dev_config __iomem *bar0 = sp->bar0;
6289 unsigned long flags;
6290 register u64 val64 = 0;
6292 del_timer_sync(&sp->alarm_timer);
6293 /* If s2io_set_link task is executing, wait till it completes. */
6294 while (test_and_set_bit(0, &(sp->link_state))) {
6297 atomic_set(&sp->card_state, CARD_DOWN);
6299 /* disable Tx and Rx traffic on the NIC */
6305 tasklet_kill(&sp->task);
6307 /* Check if the device is Quiescent and then Reset the NIC */
6309 /* As per the HW requirement we need to replenish the
6310 * receive buffer to avoid the ring bump. Since there is
6311 * no intention of processing the Rx frame at this pointwe are
6312 * just settting the ownership bit of rxd in Each Rx
6313 * ring to HW and set the appropriate buffer size
6314 * based on the ring mode
6316 rxd_owner_bit_reset(sp);
6318 val64 = readq(&bar0->adapter_status);
6319 if (verify_xena_quiescence(sp)) {
6320 if(verify_pcc_quiescent(sp, sp->device_enabled_once))
6328 "s2io_close:Device not Quiescent ");
6329 DBG_PRINT(ERR_DBG, "adaper status reads 0x%llx\n",
6330 (unsigned long long) val64);
6336 spin_lock_irqsave(&sp->tx_lock, flags);
6337 /* Free all Tx buffers */
6338 free_tx_buffers(sp);
6339 spin_unlock_irqrestore(&sp->tx_lock, flags);
6341 /* Free all Rx buffers */
6342 spin_lock_irqsave(&sp->rx_lock, flags);
6343 free_rx_buffers(sp);
6344 spin_unlock_irqrestore(&sp->rx_lock, flags);
6346 clear_bit(0, &(sp->link_state));
6349 static int s2io_card_up(struct s2io_nic * sp)
6352 struct mac_info *mac_control;
6353 struct config_param *config;
6354 struct net_device *dev = (struct net_device *) sp->dev;
6357 /* Initialize the H/W I/O registers */
6358 if (init_nic(sp) != 0) {
6359 DBG_PRINT(ERR_DBG, "%s: H/W initialization failed\n",
6366 * Initializing the Rx buffers. For now we are considering only 1
6367 * Rx ring and initializing buffers into 30 Rx blocks
6369 mac_control = &sp->mac_control;
6370 config = &sp->config;
6372 for (i = 0; i < config->rx_ring_num; i++) {
6373 if ((ret = fill_rx_buffers(sp, i))) {
6374 DBG_PRINT(ERR_DBG, "%s: Out of memory in Open\n",
6377 free_rx_buffers(sp);
6380 DBG_PRINT(INFO_DBG, "Buf in ring:%d is %d:\n", i,
6381 atomic_read(&sp->rx_bufs_left[i]));
6383 /* Maintain the state prior to the open */
6384 if (sp->promisc_flg)
6385 sp->promisc_flg = 0;
6386 if (sp->m_cast_flg) {
6388 sp->all_multi_pos= 0;
6391 /* Setting its receive mode */
6392 s2io_set_multicast(dev);
6395 /* Initialize max aggregatable pkts per session based on MTU */
6396 sp->lro_max_aggr_per_sess = ((1<<16) - 1) / dev->mtu;
6397 /* Check if we can use(if specified) user provided value */
6398 if (lro_max_pkts < sp->lro_max_aggr_per_sess)
6399 sp->lro_max_aggr_per_sess = lro_max_pkts;
6402 /* Enable Rx Traffic and interrupts on the NIC */
6403 if (start_nic(sp)) {
6404 DBG_PRINT(ERR_DBG, "%s: Starting NIC failed\n", dev->name);
6406 free_rx_buffers(sp);
6410 /* Add interrupt service routine */
6411 if (s2io_add_isr(sp) != 0) {
6412 if (sp->intr_type == MSI_X)
6415 free_rx_buffers(sp);
6419 S2IO_TIMER_CONF(sp->alarm_timer, s2io_alarm_handle, sp, (HZ/2));
6421 /* Enable tasklet for the device */
6422 tasklet_init(&sp->task, s2io_tasklet, (unsigned long) dev);
6424 /* Enable select interrupts */
6425 if (sp->intr_type != INTA)
6426 en_dis_able_nic_intrs(sp, ENA_ALL_INTRS, DISABLE_INTRS);
6428 interruptible = TX_TRAFFIC_INTR | RX_TRAFFIC_INTR;
6429 interruptible |= TX_PIC_INTR | RX_PIC_INTR;
6430 interruptible |= TX_MAC_INTR | RX_MAC_INTR;
6431 en_dis_able_nic_intrs(sp, interruptible, ENABLE_INTRS);
6435 atomic_set(&sp->card_state, CARD_UP);
6440 * s2io_restart_nic - Resets the NIC.
6441 * @data : long pointer to the device private structure
6443 * This function is scheduled to be run by the s2io_tx_watchdog
6444 * function after 0.5 secs to reset the NIC. The idea is to reduce
6445 * the run time of the watch dog routine which is run holding a
6449 static void s2io_restart_nic(struct work_struct *work)
6451 struct s2io_nic *sp = container_of(work, struct s2io_nic, rst_timer_task);
6452 struct net_device *dev = sp->dev;
6456 if (!netif_running(dev))
6460 if (s2io_card_up(sp)) {
6461 DBG_PRINT(ERR_DBG, "%s: Device bring up failed\n",
6464 netif_wake_queue(dev);
6465 DBG_PRINT(ERR_DBG, "%s: was reset by Tx watchdog timer\n",
6472 * s2io_tx_watchdog - Watchdog for transmit side.
6473 * @dev : Pointer to net device structure
6475 * This function is triggered if the Tx Queue is stopped
6476 * for a pre-defined amount of time when the Interface is still up.
6477 * If the Interface is jammed in such a situation, the hardware is
6478 * reset (by s2io_close) and restarted again (by s2io_open) to
6479 * overcome any problem that might have been caused in the hardware.
6484 static void s2io_tx_watchdog(struct net_device *dev)
6486 struct s2io_nic *sp = dev->priv;
6488 if (netif_carrier_ok(dev)) {
6489 schedule_work(&sp->rst_timer_task);
6490 sp->mac_control.stats_info->sw_stat.soft_reset_cnt++;
6495 * rx_osm_handler - To perform some OS related operations on SKB.
6496 * @sp: private member of the device structure,pointer to s2io_nic structure.
6497 * @skb : the socket buffer pointer.
6498 * @len : length of the packet
6499 * @cksum : FCS checksum of the frame.
6500 * @ring_no : the ring from which this RxD was extracted.
6502 * This function is called by the Rx interrupt serivce routine to perform
6503 * some OS related operations on the SKB before passing it to the upper
6504 * layers. It mainly checks if the checksum is OK, if so adds it to the
6505 * SKBs cksum variable, increments the Rx packet count and passes the SKB
6506 * to the upper layer. If the checksum is wrong, it increments the Rx
6507 * packet error count, frees the SKB and returns error.
6509 * SUCCESS on success and -1 on failure.
6511 static int rx_osm_handler(struct ring_info *ring_data, struct RxD_t * rxdp)
6513 struct s2io_nic *sp = ring_data->nic;
6514 struct net_device *dev = (struct net_device *) sp->dev;
6515 struct sk_buff *skb = (struct sk_buff *)
6516 ((unsigned long) rxdp->Host_Control);
6517 int ring_no = ring_data->ring_no;
6518 u16 l3_csum, l4_csum;
6519 unsigned long long err = rxdp->Control_1 & RXD_T_CODE;
6525 /* Check for parity error */
6527 sp->mac_control.stats_info->sw_stat.parity_err_cnt++;
6531 * Drop the packet if bad transfer code. Exception being
6532 * 0x5, which could be due to unsupported IPv6 extension header.
6533 * In this case, we let stack handle the packet.
6534 * Note that in this case, since checksum will be incorrect,
6535 * stack will validate the same.
6537 if (err && ((err >> 48) != 0x5)) {
6538 DBG_PRINT(ERR_DBG, "%s: Rx error Value: 0x%llx\n",
6540 sp->stats.rx_crc_errors++;
6542 atomic_dec(&sp->rx_bufs_left[ring_no]);
6543 rxdp->Host_Control = 0;
6548 /* Updating statistics */
6549 rxdp->Host_Control = 0;
6551 sp->stats.rx_packets++;
6552 if (sp->rxd_mode == RXD_MODE_1) {
6553 int len = RXD_GET_BUFFER0_SIZE_1(rxdp->Control_2);
6555 sp->stats.rx_bytes += len;
6558 } else if (sp->rxd_mode >= RXD_MODE_3A) {
6559 int get_block = ring_data->rx_curr_get_info.block_index;
6560 int get_off = ring_data->rx_curr_get_info.offset;
6561 int buf0_len = RXD_GET_BUFFER0_SIZE_3(rxdp->Control_2);
6562 int buf2_len = RXD_GET_BUFFER2_SIZE_3(rxdp->Control_2);
6563 unsigned char *buff = skb_push(skb, buf0_len);
6565 struct buffAdd *ba = &ring_data->ba[get_block][get_off];
6566 sp->stats.rx_bytes += buf0_len + buf2_len;
6567 memcpy(buff, ba->ba_0, buf0_len);
6569 if (sp->rxd_mode == RXD_MODE_3A) {
6570 int buf1_len = RXD_GET_BUFFER1_SIZE_3(rxdp->Control_2);
6572 skb_put(skb, buf1_len);
6573 skb->len += buf2_len;
6574 skb->data_len += buf2_len;
6575 skb_put(skb_shinfo(skb)->frag_list, buf2_len);
6576 sp->stats.rx_bytes += buf1_len;
6579 skb_put(skb, buf2_len);
6582 if ((rxdp->Control_1 & TCP_OR_UDP_FRAME) && ((!sp->lro) ||
6583 (sp->lro && (!(rxdp->Control_1 & RXD_FRAME_IP_FRAG)))) &&
6585 l3_csum = RXD_GET_L3_CKSUM(rxdp->Control_1);
6586 l4_csum = RXD_GET_L4_CKSUM(rxdp->Control_1);
6587 if ((l3_csum == L3_CKSUM_OK) && (l4_csum == L4_CKSUM_OK)) {
6589 * NIC verifies if the Checksum of the received
6590 * frame is Ok or not and accordingly returns
6591 * a flag in the RxD.
6593 skb->ip_summed = CHECKSUM_UNNECESSARY;
6599 ret = s2io_club_tcp_session(skb->data, &tcp,
6600 &tcp_len, &lro, rxdp, sp);
6602 case 3: /* Begin anew */
6605 case 1: /* Aggregate */
6607 lro_append_pkt(sp, lro,
6611 case 4: /* Flush session */
6613 lro_append_pkt(sp, lro,
6615 queue_rx_frame(lro->parent);
6616 clear_lro_session(lro);
6617 sp->mac_control.stats_info->
6618 sw_stat.flush_max_pkts++;
6621 case 2: /* Flush both */
6622 lro->parent->data_len =
6624 sp->mac_control.stats_info->
6625 sw_stat.sending_both++;
6626 queue_rx_frame(lro->parent);
6627 clear_lro_session(lro);
6629 case 0: /* sessions exceeded */
6630 case -1: /* non-TCP or not
6634 * First pkt in session not
6635 * L3/L4 aggregatable
6640 "%s: Samadhana!!\n",
6647 * Packet with erroneous checksum, let the
6648 * upper layers deal with it.
6650 skb->ip_summed = CHECKSUM_NONE;
6653 skb->ip_summed = CHECKSUM_NONE;
6657 skb->protocol = eth_type_trans(skb, dev);
6658 if ((sp->vlgrp && RXD_GET_VLAN_TAG(rxdp->Control_2) &&
6660 /* Queueing the vlan frame to the upper layer */
6662 vlan_hwaccel_receive_skb(skb, sp->vlgrp,
6663 RXD_GET_VLAN_TAG(rxdp->Control_2));
6665 vlan_hwaccel_rx(skb, sp->vlgrp,
6666 RXD_GET_VLAN_TAG(rxdp->Control_2));
6669 netif_receive_skb(skb);
6675 queue_rx_frame(skb);
6677 dev->last_rx = jiffies;
6679 atomic_dec(&sp->rx_bufs_left[ring_no]);
6684 * s2io_link - stops/starts the Tx queue.
6685 * @sp : private member of the device structure, which is a pointer to the
6686 * s2io_nic structure.
6687 * @link : inidicates whether link is UP/DOWN.
6689 * This function stops/starts the Tx queue depending on whether the link
6690 * status of the NIC is is down or up. This is called by the Alarm
6691 * interrupt handler whenever a link change interrupt comes up.
6696 static void s2io_link(struct s2io_nic * sp, int link)
6698 struct net_device *dev = (struct net_device *) sp->dev;
6700 if (link != sp->last_link_state) {
6701 if (link == LINK_DOWN) {
6702 DBG_PRINT(ERR_DBG, "%s: Link down\n", dev->name);
6703 netif_carrier_off(dev);
6705 DBG_PRINT(ERR_DBG, "%s: Link Up\n", dev->name);
6706 netif_carrier_on(dev);
6709 sp->last_link_state = link;
6713 * get_xena_rev_id - to identify revision ID of xena.
6714 * @pdev : PCI Dev structure
6716 * Function to identify the Revision ID of xena.
6718 * returns the revision ID of the device.
6721 static int get_xena_rev_id(struct pci_dev *pdev)
6725 ret = pci_read_config_byte(pdev, PCI_REVISION_ID, (u8 *) & id);
6730 * s2io_init_pci -Initialization of PCI and PCI-X configuration registers .
6731 * @sp : private member of the device structure, which is a pointer to the
6732 * s2io_nic structure.
6734 * This function initializes a few of the PCI and PCI-X configuration registers
6735 * with recommended values.
6740 static void s2io_init_pci(struct s2io_nic * sp)
6742 u16 pci_cmd = 0, pcix_cmd = 0;
6744 /* Enable Data Parity Error Recovery in PCI-X command register. */
6745 pci_read_config_word(sp->pdev, PCIX_COMMAND_REGISTER,
6747 pci_write_config_word(sp->pdev, PCIX_COMMAND_REGISTER,
6749 pci_read_config_word(sp->pdev, PCIX_COMMAND_REGISTER,
6752 /* Set the PErr Response bit in PCI command register. */
6753 pci_read_config_word(sp->pdev, PCI_COMMAND, &pci_cmd);
6754 pci_write_config_word(sp->pdev, PCI_COMMAND,
6755 (pci_cmd | PCI_COMMAND_PARITY));
6756 pci_read_config_word(sp->pdev, PCI_COMMAND, &pci_cmd);
6759 static int s2io_verify_parm(struct pci_dev *pdev, u8 *dev_intr_type)
6761 if ( tx_fifo_num > 8) {
6762 DBG_PRINT(ERR_DBG, "s2io: Requested number of Tx fifos not "
6764 DBG_PRINT(ERR_DBG, "s2io: Default to 8 Tx fifos\n");
6767 if ( rx_ring_num > 8) {
6768 DBG_PRINT(ERR_DBG, "s2io: Requested number of Rx rings not "
6770 DBG_PRINT(ERR_DBG, "s2io: Default to 8 Rx rings\n");
6773 if (*dev_intr_type != INTA)
6776 #ifndef CONFIG_PCI_MSI
6777 if (*dev_intr_type != INTA) {
6778 DBG_PRINT(ERR_DBG, "s2io: This kernel does not support"
6779 "MSI/MSI-X. Defaulting to INTA\n");
6780 *dev_intr_type = INTA;
6783 if (*dev_intr_type > MSI_X) {
6784 DBG_PRINT(ERR_DBG, "s2io: Wrong intr_type requested. "
6785 "Defaulting to INTA\n");
6786 *dev_intr_type = INTA;
6789 if ((*dev_intr_type == MSI_X) &&
6790 ((pdev->device != PCI_DEVICE_ID_HERC_WIN) &&
6791 (pdev->device != PCI_DEVICE_ID_HERC_UNI))) {
6792 DBG_PRINT(ERR_DBG, "s2io: Xframe I does not support MSI_X. "
6793 "Defaulting to INTA\n");
6794 *dev_intr_type = INTA;
6797 if (rx_ring_mode > 3) {
6798 DBG_PRINT(ERR_DBG, "s2io: Requested ring mode not supported\n");
6799 DBG_PRINT(ERR_DBG, "s2io: Defaulting to 3-buffer mode\n");
6806 * rts_ds_steer - Receive traffic steering based on IPv4 or IPv6 TOS
6807 * or Traffic class respectively.
6808 * @nic: device peivate variable
6809 * Description: The function configures the receive steering to
6810 * desired receive ring.
6811 * Return Value: SUCCESS on success and
6812 * '-1' on failure (endian settings incorrect).
6814 static int rts_ds_steer(struct s2io_nic *nic, u8 ds_codepoint, u8 ring)
6816 struct XENA_dev_config __iomem *bar0 = nic->bar0;
6817 register u64 val64 = 0;
6819 if (ds_codepoint > 63)
6822 val64 = RTS_DS_MEM_DATA(ring);
6823 writeq(val64, &bar0->rts_ds_mem_data);
6825 val64 = RTS_DS_MEM_CTRL_WE |
6826 RTS_DS_MEM_CTRL_STROBE_NEW_CMD |
6827 RTS_DS_MEM_CTRL_OFFSET(ds_codepoint);
6829 writeq(val64, &bar0->rts_ds_mem_ctrl);
6831 return wait_for_cmd_complete(&bar0->rts_ds_mem_ctrl,
6832 RTS_DS_MEM_CTRL_STROBE_CMD_BEING_EXECUTED,
6837 * s2io_init_nic - Initialization of the adapter .
6838 * @pdev : structure containing the PCI related information of the device.
6839 * @pre: List of PCI devices supported by the driver listed in s2io_tbl.
6841 * The function initializes an adapter identified by the pci_dec structure.
6842 * All OS related initialization including memory and device structure and
6843 * initlaization of the device private variable is done. Also the swapper
6844 * control register is initialized to enable read and write into the I/O
6845 * registers of the device.
6847 * returns 0 on success and negative on failure.
6850 static int __devinit
6851 s2io_init_nic(struct pci_dev *pdev, const struct pci_device_id *pre)
6853 struct s2io_nic *sp;
6854 struct net_device *dev;
6856 int dma_flag = FALSE;
6857 u32 mac_up, mac_down;
6858 u64 val64 = 0, tmp64 = 0;
6859 struct XENA_dev_config __iomem *bar0 = NULL;
6861 struct mac_info *mac_control;
6862 struct config_param *config;
6864 u8 dev_intr_type = intr_type;
6866 if ((ret = s2io_verify_parm(pdev, &dev_intr_type)))
6869 if ((ret = pci_enable_device(pdev))) {
6871 "s2io_init_nic: pci_enable_device failed\n");
6875 if (!pci_set_dma_mask(pdev, DMA_64BIT_MASK)) {
6876 DBG_PRINT(INIT_DBG, "s2io_init_nic: Using 64bit DMA\n");
6878 if (pci_set_consistent_dma_mask
6879 (pdev, DMA_64BIT_MASK)) {
6881 "Unable to obtain 64bit DMA for \
6882 consistent allocations\n");
6883 pci_disable_device(pdev);
6886 } else if (!pci_set_dma_mask(pdev, DMA_32BIT_MASK)) {
6887 DBG_PRINT(INIT_DBG, "s2io_init_nic: Using 32bit DMA\n");
6889 pci_disable_device(pdev);
6892 if (dev_intr_type != MSI_X) {
6893 if (pci_request_regions(pdev, s2io_driver_name)) {
6894 DBG_PRINT(ERR_DBG, "Request Regions failed\n");
6895 pci_disable_device(pdev);
6900 if (!(request_mem_region(pci_resource_start(pdev, 0),
6901 pci_resource_len(pdev, 0), s2io_driver_name))) {
6902 DBG_PRINT(ERR_DBG, "bar0 Request Regions failed\n");
6903 pci_disable_device(pdev);
6906 if (!(request_mem_region(pci_resource_start(pdev, 2),
6907 pci_resource_len(pdev, 2), s2io_driver_name))) {
6908 DBG_PRINT(ERR_DBG, "bar1 Request Regions failed\n");
6909 release_mem_region(pci_resource_start(pdev, 0),
6910 pci_resource_len(pdev, 0));
6911 pci_disable_device(pdev);
6916 dev = alloc_etherdev(sizeof(struct s2io_nic));
6918 DBG_PRINT(ERR_DBG, "Device allocation failed\n");
6919 pci_disable_device(pdev);
6920 pci_release_regions(pdev);
6924 pci_set_master(pdev);
6925 pci_set_drvdata(pdev, dev);
6926 SET_MODULE_OWNER(dev);
6927 SET_NETDEV_DEV(dev, &pdev->dev);
6929 /* Private member variable initialized to s2io NIC structure */
6931 memset(sp, 0, sizeof(struct s2io_nic));
6934 sp->high_dma_flag = dma_flag;
6935 sp->device_enabled_once = FALSE;
6936 if (rx_ring_mode == 1)
6937 sp->rxd_mode = RXD_MODE_1;
6938 if (rx_ring_mode == 2)
6939 sp->rxd_mode = RXD_MODE_3B;
6940 if (rx_ring_mode == 3)
6941 sp->rxd_mode = RXD_MODE_3A;
6943 sp->intr_type = dev_intr_type;
6945 if ((pdev->device == PCI_DEVICE_ID_HERC_WIN) ||
6946 (pdev->device == PCI_DEVICE_ID_HERC_UNI))
6947 sp->device_type = XFRAME_II_DEVICE;
6949 sp->device_type = XFRAME_I_DEVICE;
6953 /* Initialize some PCI/PCI-X fields of the NIC. */
6957 * Setting the device configuration parameters.
6958 * Most of these parameters can be specified by the user during
6959 * module insertion as they are module loadable parameters. If
6960 * these parameters are not not specified during load time, they
6961 * are initialized with default values.
6963 mac_control = &sp->mac_control;
6964 config = &sp->config;
6966 /* Tx side parameters. */
6967 config->tx_fifo_num = tx_fifo_num;
6968 for (i = 0; i < MAX_TX_FIFOS; i++) {
6969 config->tx_cfg[i].fifo_len = tx_fifo_len[i];
6970 config->tx_cfg[i].fifo_priority = i;
6973 /* mapping the QoS priority to the configured fifos */
6974 for (i = 0; i < MAX_TX_FIFOS; i++)
6975 config->fifo_mapping[i] = fifo_map[config->tx_fifo_num][i];
6977 config->tx_intr_type = TXD_INT_TYPE_UTILZ;
6978 for (i = 0; i < config->tx_fifo_num; i++) {
6979 config->tx_cfg[i].f_no_snoop =
6980 (NO_SNOOP_TXD | NO_SNOOP_TXD_BUFFER);
6981 if (config->tx_cfg[i].fifo_len < 65) {
6982 config->tx_intr_type = TXD_INT_TYPE_PER_LIST;
6986 /* + 2 because one Txd for skb->data and one Txd for UFO */
6987 config->max_txds = MAX_SKB_FRAGS + 2;
6989 /* Rx side parameters. */
6990 config->rx_ring_num = rx_ring_num;
6991 for (i = 0; i < MAX_RX_RINGS; i++) {
6992 config->rx_cfg[i].num_rxd = rx_ring_sz[i] *
6993 (rxd_count[sp->rxd_mode] + 1);
6994 config->rx_cfg[i].ring_priority = i;
6997 for (i = 0; i < rx_ring_num; i++) {
6998 config->rx_cfg[i].ring_org = RING_ORG_BUFF1;
6999 config->rx_cfg[i].f_no_snoop =
7000 (NO_SNOOP_RXD | NO_SNOOP_RXD_BUFFER);
7003 /* Setting Mac Control parameters */
7004 mac_control->rmac_pause_time = rmac_pause_time;
7005 mac_control->mc_pause_threshold_q0q3 = mc_pause_threshold_q0q3;
7006 mac_control->mc_pause_threshold_q4q7 = mc_pause_threshold_q4q7;
7009 /* Initialize Ring buffer parameters. */
7010 for (i = 0; i < config->rx_ring_num; i++)
7011 atomic_set(&sp->rx_bufs_left[i], 0);
7013 /* Initialize the number of ISRs currently running */
7014 atomic_set(&sp->isr_cnt, 0);
7016 /* initialize the shared memory used by the NIC and the host */
7017 if (init_shared_mem(sp)) {
7018 DBG_PRINT(ERR_DBG, "%s: Memory allocation failed\n",
7021 goto mem_alloc_failed;
7024 sp->bar0 = ioremap(pci_resource_start(pdev, 0),
7025 pci_resource_len(pdev, 0));
7027 DBG_PRINT(ERR_DBG, "%s: Neterion: cannot remap io mem1\n",
7030 goto bar0_remap_failed;
7033 sp->bar1 = ioremap(pci_resource_start(pdev, 2),
7034 pci_resource_len(pdev, 2));
7036 DBG_PRINT(ERR_DBG, "%s: Neterion: cannot remap io mem2\n",
7039 goto bar1_remap_failed;
7042 dev->irq = pdev->irq;
7043 dev->base_addr = (unsigned long) sp->bar0;
7045 /* Initializing the BAR1 address as the start of the FIFO pointer. */
7046 for (j = 0; j < MAX_TX_FIFOS; j++) {
7047 mac_control->tx_FIFO_start[j] = (struct TxFIFO_element __iomem *)
7048 (sp->bar1 + (j * 0x00020000));
7051 /* Driver entry points */
7052 dev->open = &s2io_open;
7053 dev->stop = &s2io_close;
7054 dev->hard_start_xmit = &s2io_xmit;
7055 dev->get_stats = &s2io_get_stats;
7056 dev->set_multicast_list = &s2io_set_multicast;
7057 dev->do_ioctl = &s2io_ioctl;
7058 dev->change_mtu = &s2io_change_mtu;
7059 SET_ETHTOOL_OPS(dev, &netdev_ethtool_ops);
7060 dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
7061 dev->vlan_rx_register = s2io_vlan_rx_register;
7062 dev->vlan_rx_kill_vid = (void *)s2io_vlan_rx_kill_vid;
7065 * will use eth_mac_addr() for dev->set_mac_address
7066 * mac address will be set every time dev->open() is called
7068 dev->poll = s2io_poll;
7071 #ifdef CONFIG_NET_POLL_CONTROLLER
7072 dev->poll_controller = s2io_netpoll;
7075 dev->features |= NETIF_F_SG | NETIF_F_IP_CSUM;
7076 if (sp->high_dma_flag == TRUE)
7077 dev->features |= NETIF_F_HIGHDMA;
7078 dev->features |= NETIF_F_TSO;
7079 dev->features |= NETIF_F_TSO6;
7080 if ((sp->device_type & XFRAME_II_DEVICE) && (ufo)) {
7081 dev->features |= NETIF_F_UFO;
7082 dev->features |= NETIF_F_HW_CSUM;
7085 dev->tx_timeout = &s2io_tx_watchdog;
7086 dev->watchdog_timeo = WATCH_DOG_TIMEOUT;
7087 INIT_WORK(&sp->rst_timer_task, s2io_restart_nic);
7088 INIT_WORK(&sp->set_link_task, s2io_set_link);
7090 pci_save_state(sp->pdev);
7092 /* Setting swapper control on the NIC, for proper reset operation */
7093 if (s2io_set_swapper(sp)) {
7094 DBG_PRINT(ERR_DBG, "%s:swapper settings are wrong\n",
7097 goto set_swap_failed;
7100 /* Verify if the Herc works on the slot its placed into */
7101 if (sp->device_type & XFRAME_II_DEVICE) {
7102 mode = s2io_verify_pci_mode(sp);
7104 DBG_PRINT(ERR_DBG, "%s: ", __FUNCTION__);
7105 DBG_PRINT(ERR_DBG, " Unsupported PCI bus mode\n");
7107 goto set_swap_failed;
7111 /* Not needed for Herc */
7112 if (sp->device_type & XFRAME_I_DEVICE) {
7114 * Fix for all "FFs" MAC address problems observed on
7117 fix_mac_address(sp);
7122 * MAC address initialization.
7123 * For now only one mac address will be read and used.
7126 val64 = RMAC_ADDR_CMD_MEM_RD | RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD |
7127 RMAC_ADDR_CMD_MEM_OFFSET(0 + MAC_MAC_ADDR_START_OFFSET);
7128 writeq(val64, &bar0->rmac_addr_cmd_mem);
7129 wait_for_cmd_complete(&bar0->rmac_addr_cmd_mem,
7130 RMAC_ADDR_CMD_MEM_STROBE_CMD_EXECUTING, S2IO_BIT_RESET);
7131 tmp64 = readq(&bar0->rmac_addr_data0_mem);
7132 mac_down = (u32) tmp64;
7133 mac_up = (u32) (tmp64 >> 32);
7135 memset(sp->def_mac_addr[0].mac_addr, 0, sizeof(ETH_ALEN));
7137 sp->def_mac_addr[0].mac_addr[3] = (u8) (mac_up);
7138 sp->def_mac_addr[0].mac_addr[2] = (u8) (mac_up >> 8);
7139 sp->def_mac_addr[0].mac_addr[1] = (u8) (mac_up >> 16);
7140 sp->def_mac_addr[0].mac_addr[0] = (u8) (mac_up >> 24);
7141 sp->def_mac_addr[0].mac_addr[5] = (u8) (mac_down >> 16);
7142 sp->def_mac_addr[0].mac_addr[4] = (u8) (mac_down >> 24);
7144 /* Set the factory defined MAC address initially */
7145 dev->addr_len = ETH_ALEN;
7146 memcpy(dev->dev_addr, sp->def_mac_addr, ETH_ALEN);
7148 /* reset Nic and bring it to known state */
7152 * Initialize the tasklet status and link state flags
7153 * and the card state parameter
7155 atomic_set(&(sp->card_state), 0);
7156 sp->tasklet_status = 0;
7159 /* Initialize spinlocks */
7160 spin_lock_init(&sp->tx_lock);
7163 spin_lock_init(&sp->put_lock);
7164 spin_lock_init(&sp->rx_lock);
7167 * SXE-002: Configure link and activity LED to init state
7170 subid = sp->pdev->subsystem_device;
7171 if ((subid & 0xFF) >= 0x07) {
7172 val64 = readq(&bar0->gpio_control);
7173 val64 |= 0x0000800000000000ULL;
7174 writeq(val64, &bar0->gpio_control);
7175 val64 = 0x0411040400000000ULL;
7176 writeq(val64, (void __iomem *) bar0 + 0x2700);
7177 val64 = readq(&bar0->gpio_control);
7180 sp->rx_csum = 1; /* Rx chksum verify enabled by default */
7182 if (register_netdev(dev)) {
7183 DBG_PRINT(ERR_DBG, "Device registration failed\n");
7185 goto register_failed;
7188 DBG_PRINT(ERR_DBG, "Copyright(c) 2002-2005 Neterion Inc.\n");
7189 DBG_PRINT(ERR_DBG, "%s: Neterion %s (rev %d)\n",dev->name,
7190 sp->product_name, get_xena_rev_id(sp->pdev));
7191 DBG_PRINT(ERR_DBG, "%s: Driver version %s\n", dev->name,
7192 s2io_driver_version);
7193 DBG_PRINT(ERR_DBG, "%s: MAC ADDR: "
7194 "%02x:%02x:%02x:%02x:%02x:%02x", dev->name,
7195 sp->def_mac_addr[0].mac_addr[0],
7196 sp->def_mac_addr[0].mac_addr[1],
7197 sp->def_mac_addr[0].mac_addr[2],
7198 sp->def_mac_addr[0].mac_addr[3],
7199 sp->def_mac_addr[0].mac_addr[4],
7200 sp->def_mac_addr[0].mac_addr[5]);
7201 DBG_PRINT(ERR_DBG, "SERIAL NUMBER: %s\n", sp->serial_num);
7202 if (sp->device_type & XFRAME_II_DEVICE) {
7203 mode = s2io_print_pci_mode(sp);
7205 DBG_PRINT(ERR_DBG, " Unsupported PCI bus mode\n");
7207 unregister_netdev(dev);
7208 goto set_swap_failed;
7211 switch(sp->rxd_mode) {
7213 DBG_PRINT(ERR_DBG, "%s: 1-Buffer receive mode enabled\n",
7217 DBG_PRINT(ERR_DBG, "%s: 2-Buffer receive mode enabled\n",
7221 DBG_PRINT(ERR_DBG, "%s: 3-Buffer receive mode enabled\n",
7227 DBG_PRINT(ERR_DBG, "%s: NAPI enabled\n", dev->name);
7228 switch(sp->intr_type) {
7230 DBG_PRINT(ERR_DBG, "%s: Interrupt type INTA\n", dev->name);
7233 DBG_PRINT(ERR_DBG, "%s: Interrupt type MSI\n", dev->name);
7236 DBG_PRINT(ERR_DBG, "%s: Interrupt type MSI-X\n", dev->name);
7240 DBG_PRINT(ERR_DBG, "%s: Large receive offload enabled\n",
7243 DBG_PRINT(ERR_DBG, "%s: UDP Fragmentation Offload(UFO)"
7244 " enabled\n", dev->name);
7245 /* Initialize device name */
7246 sprintf(sp->name, "%s Neterion %s", dev->name, sp->product_name);
7248 /* Initialize bimodal Interrupts */
7249 sp->config.bimodal = bimodal;
7250 if (!(sp->device_type & XFRAME_II_DEVICE) && bimodal) {
7251 sp->config.bimodal = 0;
7252 DBG_PRINT(ERR_DBG,"%s:Bimodal intr not supported by Xframe I\n",
7257 * Make Link state as off at this point, when the Link change
7258 * interrupt comes the state will be automatically changed to
7261 netif_carrier_off(dev);
7272 free_shared_mem(sp);
7273 pci_disable_device(pdev);
7274 if (dev_intr_type != MSI_X)
7275 pci_release_regions(pdev);
7277 release_mem_region(pci_resource_start(pdev, 0),
7278 pci_resource_len(pdev, 0));
7279 release_mem_region(pci_resource_start(pdev, 2),
7280 pci_resource_len(pdev, 2));
7282 pci_set_drvdata(pdev, NULL);
7289 * s2io_rem_nic - Free the PCI device
7290 * @pdev: structure containing the PCI related information of the device.
7291 * Description: This function is called by the Pci subsystem to release a
7292 * PCI device and free up all resource held up by the device. This could
7293 * be in response to a Hot plug event or when the driver is to be removed
7297 static void __devexit s2io_rem_nic(struct pci_dev *pdev)
7299 struct net_device *dev =
7300 (struct net_device *) pci_get_drvdata(pdev);
7301 struct s2io_nic *sp;
7304 DBG_PRINT(ERR_DBG, "Driver Data is NULL!!\n");
7308 flush_scheduled_work();
7311 unregister_netdev(dev);
7313 free_shared_mem(sp);
7316 if (sp->intr_type != MSI_X)
7317 pci_release_regions(pdev);
7319 release_mem_region(pci_resource_start(pdev, 0),
7320 pci_resource_len(pdev, 0));
7321 release_mem_region(pci_resource_start(pdev, 2),
7322 pci_resource_len(pdev, 2));
7324 pci_set_drvdata(pdev, NULL);
7326 pci_disable_device(pdev);
7330 * s2io_starter - Entry point for the driver
7331 * Description: This function is the entry point for the driver. It verifies
7332 * the module loadable parameters and initializes PCI configuration space.
7335 int __init s2io_starter(void)
7337 return pci_register_driver(&s2io_driver);
7341 * s2io_closer - Cleanup routine for the driver
7342 * Description: This function is the cleanup routine for the driver. It unregist * ers the driver.
7345 static __exit void s2io_closer(void)
7347 pci_unregister_driver(&s2io_driver);
7348 DBG_PRINT(INIT_DBG, "cleanup done\n");
7351 module_init(s2io_starter);
7352 module_exit(s2io_closer);
7354 static int check_L2_lro_capable(u8 *buffer, struct iphdr **ip,
7355 struct tcphdr **tcp, struct RxD_t *rxdp)
7358 u8 l2_type = (u8)((rxdp->Control_1 >> 37) & 0x7), ip_len;
7360 if (!(rxdp->Control_1 & RXD_FRAME_PROTO_TCP)) {
7361 DBG_PRINT(INIT_DBG,"%s: Non-TCP frames not supported for LRO\n",
7367 * By default the VLAN field in the MAC is stripped by the card, if this
7368 * feature is turned off in rx_pa_cfg register, then the ip_off field
7369 * has to be shifted by a further 2 bytes
7372 case 0: /* DIX type */
7373 case 4: /* DIX type with VLAN */
7374 ip_off = HEADER_ETHERNET_II_802_3_SIZE;
7376 /* LLC, SNAP etc are considered non-mergeable */
7381 *ip = (struct iphdr *)((u8 *)buffer + ip_off);
7382 ip_len = (u8)((*ip)->ihl);
7384 *tcp = (struct tcphdr *)((unsigned long)*ip + ip_len);
7389 static int check_for_socket_match(struct lro *lro, struct iphdr *ip,
7392 DBG_PRINT(INFO_DBG,"%s: Been here...\n", __FUNCTION__);
7393 if ((lro->iph->saddr != ip->saddr) || (lro->iph->daddr != ip->daddr) ||
7394 (lro->tcph->source != tcp->source) || (lro->tcph->dest != tcp->dest))
7399 static inline int get_l4_pyld_length(struct iphdr *ip, struct tcphdr *tcp)
7401 return(ntohs(ip->tot_len) - (ip->ihl << 2) - (tcp->doff << 2));
7404 static void initiate_new_session(struct lro *lro, u8 *l2h,
7405 struct iphdr *ip, struct tcphdr *tcp, u32 tcp_pyld_len)
7407 DBG_PRINT(INFO_DBG,"%s: Been here...\n", __FUNCTION__);
7411 lro->tcp_next_seq = tcp_pyld_len + ntohl(tcp->seq);
7412 lro->tcp_ack = ntohl(tcp->ack_seq);
7414 lro->total_len = ntohs(ip->tot_len);
7417 * check if we saw TCP timestamp. Other consistency checks have
7418 * already been done.
7420 if (tcp->doff == 8) {
7422 ptr = (u32 *)(tcp+1);
7424 lro->cur_tsval = *(ptr+1);
7425 lro->cur_tsecr = *(ptr+2);
7430 static void update_L3L4_header(struct s2io_nic *sp, struct lro *lro)
7432 struct iphdr *ip = lro->iph;
7433 struct tcphdr *tcp = lro->tcph;
7435 struct stat_block *statinfo = sp->mac_control.stats_info;
7436 DBG_PRINT(INFO_DBG,"%s: Been here...\n", __FUNCTION__);
7438 /* Update L3 header */
7439 ip->tot_len = htons(lro->total_len);
7441 nchk = ip_fast_csum((u8 *)lro->iph, ip->ihl);
7444 /* Update L4 header */
7445 tcp->ack_seq = lro->tcp_ack;
7446 tcp->window = lro->window;
7448 /* Update tsecr field if this session has timestamps enabled */
7450 u32 *ptr = (u32 *)(tcp + 1);
7451 *(ptr+2) = lro->cur_tsecr;
7454 /* Update counters required for calculation of
7455 * average no. of packets aggregated.
7457 statinfo->sw_stat.sum_avg_pkts_aggregated += lro->sg_num;
7458 statinfo->sw_stat.num_aggregations++;
7461 static void aggregate_new_rx(struct lro *lro, struct iphdr *ip,
7462 struct tcphdr *tcp, u32 l4_pyld)
7464 DBG_PRINT(INFO_DBG,"%s: Been here...\n", __FUNCTION__);
7465 lro->total_len += l4_pyld;
7466 lro->frags_len += l4_pyld;
7467 lro->tcp_next_seq += l4_pyld;
7470 /* Update ack seq no. and window ad(from this pkt) in LRO object */
7471 lro->tcp_ack = tcp->ack_seq;
7472 lro->window = tcp->window;
7476 /* Update tsecr and tsval from this packet */
7477 ptr = (u32 *) (tcp + 1);
7478 lro->cur_tsval = *(ptr + 1);
7479 lro->cur_tsecr = *(ptr + 2);
7483 static int verify_l3_l4_lro_capable(struct lro *l_lro, struct iphdr *ip,
7484 struct tcphdr *tcp, u32 tcp_pyld_len)
7488 DBG_PRINT(INFO_DBG,"%s: Been here...\n", __FUNCTION__);
7490 if (!tcp_pyld_len) {
7491 /* Runt frame or a pure ack */
7495 if (ip->ihl != 5) /* IP has options */
7498 /* If we see CE codepoint in IP header, packet is not mergeable */
7499 if (INET_ECN_is_ce(ipv4_get_dsfield(ip)))
7502 /* If we see ECE or CWR flags in TCP header, packet is not mergeable */
7503 if (tcp->urg || tcp->psh || tcp->rst || tcp->syn || tcp->fin ||
7504 tcp->ece || tcp->cwr || !tcp->ack) {
7506 * Currently recognize only the ack control word and
7507 * any other control field being set would result in
7508 * flushing the LRO session
7514 * Allow only one TCP timestamp option. Don't aggregate if
7515 * any other options are detected.
7517 if (tcp->doff != 5 && tcp->doff != 8)
7520 if (tcp->doff == 8) {
7521 ptr = (u8 *)(tcp + 1);
7522 while (*ptr == TCPOPT_NOP)
7524 if (*ptr != TCPOPT_TIMESTAMP || *(ptr+1) != TCPOLEN_TIMESTAMP)
7527 /* Ensure timestamp value increases monotonically */
7529 if (l_lro->cur_tsval > *((u32 *)(ptr+2)))
7532 /* timestamp echo reply should be non-zero */
7533 if (*((u32 *)(ptr+6)) == 0)
7541 s2io_club_tcp_session(u8 *buffer, u8 **tcp, u32 *tcp_len, struct lro **lro,
7542 struct RxD_t *rxdp, struct s2io_nic *sp)
7545 struct tcphdr *tcph;
7548 if (!(ret = check_L2_lro_capable(buffer, &ip, (struct tcphdr **)tcp,
7550 DBG_PRINT(INFO_DBG,"IP Saddr: %x Daddr: %x\n",
7551 ip->saddr, ip->daddr);
7556 tcph = (struct tcphdr *)*tcp;
7557 *tcp_len = get_l4_pyld_length(ip, tcph);
7558 for (i=0; i<MAX_LRO_SESSIONS; i++) {
7559 struct lro *l_lro = &sp->lro0_n[i];
7560 if (l_lro->in_use) {
7561 if (check_for_socket_match(l_lro, ip, tcph))
7563 /* Sock pair matched */
7566 if ((*lro)->tcp_next_seq != ntohl(tcph->seq)) {
7567 DBG_PRINT(INFO_DBG, "%s:Out of order. expected "
7568 "0x%x, actual 0x%x\n", __FUNCTION__,
7569 (*lro)->tcp_next_seq,
7572 sp->mac_control.stats_info->
7573 sw_stat.outof_sequence_pkts++;
7578 if (!verify_l3_l4_lro_capable(l_lro, ip, tcph,*tcp_len))
7579 ret = 1; /* Aggregate */
7581 ret = 2; /* Flush both */
7587 /* Before searching for available LRO objects,
7588 * check if the pkt is L3/L4 aggregatable. If not
7589 * don't create new LRO session. Just send this
7592 if (verify_l3_l4_lro_capable(NULL, ip, tcph, *tcp_len)) {
7596 for (i=0; i<MAX_LRO_SESSIONS; i++) {
7597 struct lro *l_lro = &sp->lro0_n[i];
7598 if (!(l_lro->in_use)) {
7600 ret = 3; /* Begin anew */
7606 if (ret == 0) { /* sessions exceeded */
7607 DBG_PRINT(INFO_DBG,"%s:All LRO sessions already in use\n",
7615 initiate_new_session(*lro, buffer, ip, tcph, *tcp_len);
7618 update_L3L4_header(sp, *lro);
7621 aggregate_new_rx(*lro, ip, tcph, *tcp_len);
7622 if ((*lro)->sg_num == sp->lro_max_aggr_per_sess) {
7623 update_L3L4_header(sp, *lro);
7624 ret = 4; /* Flush the LRO */
7628 DBG_PRINT(ERR_DBG,"%s:Dont know, can't say!!\n",
7636 static void clear_lro_session(struct lro *lro)
7638 static u16 lro_struct_size = sizeof(struct lro);
7640 memset(lro, 0, lro_struct_size);
7643 static void queue_rx_frame(struct sk_buff *skb)
7645 struct net_device *dev = skb->dev;
7647 skb->protocol = eth_type_trans(skb, dev);
7649 netif_receive_skb(skb);
7654 static void lro_append_pkt(struct s2io_nic *sp, struct lro *lro,
7655 struct sk_buff *skb,
7658 struct sk_buff *first = lro->parent;
7660 first->len += tcp_len;
7661 first->data_len = lro->frags_len;
7662 skb_pull(skb, (skb->len - tcp_len));
7663 if (skb_shinfo(first)->frag_list)
7664 lro->last_frag->next = skb;
7666 skb_shinfo(first)->frag_list = skb;
7667 first->truesize += skb->truesize;
7668 lro->last_frag = skb;
7669 sp->mac_control.stats_info->sw_stat.clubbed_frms_cnt++;