2 * Copyright (c) 2003-2007 Chelsio, Inc. All rights reserved.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
35 #include "firmware_exports.h"
38 * t3_wait_op_done_val - wait until an operation is completed
39 * @adapter: the adapter performing the operation
40 * @reg: the register to check for completion
41 * @mask: a single-bit field within @reg that indicates completion
42 * @polarity: the value of the field when the operation is completed
43 * @attempts: number of check iterations
44 * @delay: delay in usecs between iterations
45 * @valp: where to store the value of the register at completion time
47 * Wait until an operation is completed by checking a bit in a register
48 * up to @attempts times. If @valp is not NULL the value of the register
49 * at the time it indicated completion is stored there. Returns 0 if the
50 * operation completes and -EAGAIN otherwise.
53 int t3_wait_op_done_val(struct adapter *adapter, int reg, u32 mask,
54 int polarity, int attempts, int delay, u32 *valp)
57 u32 val = t3_read_reg(adapter, reg);
59 if (!!(val & mask) == polarity) {
72 * t3_write_regs - write a bunch of registers
73 * @adapter: the adapter to program
74 * @p: an array of register address/register value pairs
75 * @n: the number of address/value pairs
76 * @offset: register address offset
78 * Takes an array of register address/register value pairs and writes each
79 * value to the corresponding register. Register addresses are adjusted
80 * by the supplied offset.
82 void t3_write_regs(struct adapter *adapter, const struct addr_val_pair *p,
83 int n, unsigned int offset)
86 t3_write_reg(adapter, p->reg_addr + offset, p->val);
92 * t3_set_reg_field - set a register field to a value
93 * @adapter: the adapter to program
94 * @addr: the register address
95 * @mask: specifies the portion of the register to modify
96 * @val: the new value for the register field
98 * Sets a register field specified by the supplied mask to the
101 void t3_set_reg_field(struct adapter *adapter, unsigned int addr, u32 mask,
104 u32 v = t3_read_reg(adapter, addr) & ~mask;
106 t3_write_reg(adapter, addr, v | val);
107 t3_read_reg(adapter, addr); /* flush */
111 * t3_read_indirect - read indirectly addressed registers
113 * @addr_reg: register holding the indirect address
114 * @data_reg: register holding the value of the indirect register
115 * @vals: where the read register values are stored
116 * @start_idx: index of first indirect register to read
117 * @nregs: how many indirect registers to read
119 * Reads registers that are accessed indirectly through an address/data
122 void t3_read_indirect(struct adapter *adap, unsigned int addr_reg,
123 unsigned int data_reg, u32 *vals, unsigned int nregs,
124 unsigned int start_idx)
127 t3_write_reg(adap, addr_reg, start_idx);
128 *vals++ = t3_read_reg(adap, data_reg);
134 * t3_mc7_bd_read - read from MC7 through backdoor accesses
135 * @mc7: identifies MC7 to read from
136 * @start: index of first 64-bit word to read
137 * @n: number of 64-bit words to read
138 * @buf: where to store the read result
140 * Read n 64-bit words from MC7 starting at word start, using backdoor
143 int t3_mc7_bd_read(struct mc7 *mc7, unsigned int start, unsigned int n,
146 static const int shift[] = { 0, 0, 16, 24 };
147 static const int step[] = { 0, 32, 16, 8 };
149 unsigned int size64 = mc7->size / 8; /* # of 64-bit words */
150 struct adapter *adap = mc7->adapter;
152 if (start >= size64 || start + n > size64)
155 start *= (8 << mc7->width);
160 for (i = (1 << mc7->width) - 1; i >= 0; --i) {
164 t3_write_reg(adap, mc7->offset + A_MC7_BD_ADDR, start);
165 t3_write_reg(adap, mc7->offset + A_MC7_BD_OP, 0);
166 val = t3_read_reg(adap, mc7->offset + A_MC7_BD_OP);
167 while ((val & F_BUSY) && attempts--)
168 val = t3_read_reg(adap,
169 mc7->offset + A_MC7_BD_OP);
173 val = t3_read_reg(adap, mc7->offset + A_MC7_BD_DATA1);
174 if (mc7->width == 0) {
175 val64 = t3_read_reg(adap,
178 val64 |= (u64) val << 32;
181 val >>= shift[mc7->width];
182 val64 |= (u64) val << (step[mc7->width] * i);
194 static void mi1_init(struct adapter *adap, const struct adapter_info *ai)
196 u32 clkdiv = adap->params.vpd.cclk / (2 * adap->params.vpd.mdc) - 1;
197 u32 val = F_PREEN | V_MDIINV(ai->mdiinv) | V_MDIEN(ai->mdien) |
200 if (!(ai->caps & SUPPORTED_10000baseT_Full))
202 t3_write_reg(adap, A_MI1_CFG, val);
205 #define MDIO_ATTEMPTS 10
208 * MI1 read/write operations for direct-addressed PHYs.
210 static int mi1_read(struct adapter *adapter, int phy_addr, int mmd_addr,
211 int reg_addr, unsigned int *valp)
214 u32 addr = V_REGADDR(reg_addr) | V_PHYADDR(phy_addr);
219 mutex_lock(&adapter->mdio_lock);
220 t3_write_reg(adapter, A_MI1_ADDR, addr);
221 t3_write_reg(adapter, A_MI1_OP, V_MDI_OP(2));
222 ret = t3_wait_op_done(adapter, A_MI1_OP, F_BUSY, 0, MDIO_ATTEMPTS, 20);
224 *valp = t3_read_reg(adapter, A_MI1_DATA);
225 mutex_unlock(&adapter->mdio_lock);
229 static int mi1_write(struct adapter *adapter, int phy_addr, int mmd_addr,
230 int reg_addr, unsigned int val)
233 u32 addr = V_REGADDR(reg_addr) | V_PHYADDR(phy_addr);
238 mutex_lock(&adapter->mdio_lock);
239 t3_write_reg(adapter, A_MI1_ADDR, addr);
240 t3_write_reg(adapter, A_MI1_DATA, val);
241 t3_write_reg(adapter, A_MI1_OP, V_MDI_OP(1));
242 ret = t3_wait_op_done(adapter, A_MI1_OP, F_BUSY, 0, MDIO_ATTEMPTS, 20);
243 mutex_unlock(&adapter->mdio_lock);
247 static const struct mdio_ops mi1_mdio_ops = {
253 * MI1 read/write operations for indirect-addressed PHYs.
255 static int mi1_ext_read(struct adapter *adapter, int phy_addr, int mmd_addr,
256 int reg_addr, unsigned int *valp)
259 u32 addr = V_REGADDR(mmd_addr) | V_PHYADDR(phy_addr);
261 mutex_lock(&adapter->mdio_lock);
262 t3_write_reg(adapter, A_MI1_ADDR, addr);
263 t3_write_reg(adapter, A_MI1_DATA, reg_addr);
264 t3_write_reg(adapter, A_MI1_OP, V_MDI_OP(0));
265 ret = t3_wait_op_done(adapter, A_MI1_OP, F_BUSY, 0, MDIO_ATTEMPTS, 20);
267 t3_write_reg(adapter, A_MI1_OP, V_MDI_OP(3));
268 ret = t3_wait_op_done(adapter, A_MI1_OP, F_BUSY, 0,
271 *valp = t3_read_reg(adapter, A_MI1_DATA);
273 mutex_unlock(&adapter->mdio_lock);
277 static int mi1_ext_write(struct adapter *adapter, int phy_addr, int mmd_addr,
278 int reg_addr, unsigned int val)
281 u32 addr = V_REGADDR(mmd_addr) | V_PHYADDR(phy_addr);
283 mutex_lock(&adapter->mdio_lock);
284 t3_write_reg(adapter, A_MI1_ADDR, addr);
285 t3_write_reg(adapter, A_MI1_DATA, reg_addr);
286 t3_write_reg(adapter, A_MI1_OP, V_MDI_OP(0));
287 ret = t3_wait_op_done(adapter, A_MI1_OP, F_BUSY, 0, MDIO_ATTEMPTS, 20);
289 t3_write_reg(adapter, A_MI1_DATA, val);
290 t3_write_reg(adapter, A_MI1_OP, V_MDI_OP(1));
291 ret = t3_wait_op_done(adapter, A_MI1_OP, F_BUSY, 0,
294 mutex_unlock(&adapter->mdio_lock);
298 static const struct mdio_ops mi1_mdio_ext_ops = {
304 * t3_mdio_change_bits - modify the value of a PHY register
305 * @phy: the PHY to operate on
306 * @mmd: the device address
307 * @reg: the register address
308 * @clear: what part of the register value to mask off
309 * @set: what part of the register value to set
311 * Changes the value of a PHY register by applying a mask to its current
312 * value and ORing the result with a new value.
314 int t3_mdio_change_bits(struct cphy *phy, int mmd, int reg, unsigned int clear,
320 ret = mdio_read(phy, mmd, reg, &val);
323 ret = mdio_write(phy, mmd, reg, val | set);
329 * t3_phy_reset - reset a PHY block
330 * @phy: the PHY to operate on
331 * @mmd: the device address of the PHY block to reset
332 * @wait: how long to wait for the reset to complete in 1ms increments
334 * Resets a PHY block and optionally waits for the reset to complete.
335 * @mmd should be 0 for 10/100/1000 PHYs and the device address to reset
338 int t3_phy_reset(struct cphy *phy, int mmd, int wait)
343 err = t3_mdio_change_bits(phy, mmd, MII_BMCR, BMCR_PDOWN, BMCR_RESET);
348 err = mdio_read(phy, mmd, MII_BMCR, &ctl);
354 } while (ctl && --wait);
360 * t3_phy_advertise - set the PHY advertisement registers for autoneg
361 * @phy: the PHY to operate on
362 * @advert: bitmap of capabilities the PHY should advertise
364 * Sets a 10/100/1000 PHY's advertisement registers to advertise the
365 * requested capabilities.
367 int t3_phy_advertise(struct cphy *phy, unsigned int advert)
370 unsigned int val = 0;
372 err = mdio_read(phy, 0, MII_CTRL1000, &val);
376 val &= ~(ADVERTISE_1000HALF | ADVERTISE_1000FULL);
377 if (advert & ADVERTISED_1000baseT_Half)
378 val |= ADVERTISE_1000HALF;
379 if (advert & ADVERTISED_1000baseT_Full)
380 val |= ADVERTISE_1000FULL;
382 err = mdio_write(phy, 0, MII_CTRL1000, val);
387 if (advert & ADVERTISED_10baseT_Half)
388 val |= ADVERTISE_10HALF;
389 if (advert & ADVERTISED_10baseT_Full)
390 val |= ADVERTISE_10FULL;
391 if (advert & ADVERTISED_100baseT_Half)
392 val |= ADVERTISE_100HALF;
393 if (advert & ADVERTISED_100baseT_Full)
394 val |= ADVERTISE_100FULL;
395 if (advert & ADVERTISED_Pause)
396 val |= ADVERTISE_PAUSE_CAP;
397 if (advert & ADVERTISED_Asym_Pause)
398 val |= ADVERTISE_PAUSE_ASYM;
399 return mdio_write(phy, 0, MII_ADVERTISE, val);
403 * t3_set_phy_speed_duplex - force PHY speed and duplex
404 * @phy: the PHY to operate on
405 * @speed: requested PHY speed
406 * @duplex: requested PHY duplex
408 * Force a 10/100/1000 PHY's speed and duplex. This also disables
409 * auto-negotiation except for GigE, where auto-negotiation is mandatory.
411 int t3_set_phy_speed_duplex(struct cphy *phy, int speed, int duplex)
416 err = mdio_read(phy, 0, MII_BMCR, &ctl);
421 ctl &= ~(BMCR_SPEED100 | BMCR_SPEED1000 | BMCR_ANENABLE);
422 if (speed == SPEED_100)
423 ctl |= BMCR_SPEED100;
424 else if (speed == SPEED_1000)
425 ctl |= BMCR_SPEED1000;
428 ctl &= ~(BMCR_FULLDPLX | BMCR_ANENABLE);
429 if (duplex == DUPLEX_FULL)
430 ctl |= BMCR_FULLDPLX;
432 if (ctl & BMCR_SPEED1000) /* auto-negotiation required for GigE */
433 ctl |= BMCR_ANENABLE;
434 return mdio_write(phy, 0, MII_BMCR, ctl);
437 static const struct adapter_info t3_adap_info[] = {
439 F_GPIO2_OEN | F_GPIO4_OEN |
440 F_GPIO2_OUT_VAL | F_GPIO4_OUT_VAL, F_GPIO3 | F_GPIO5,
442 &mi1_mdio_ops, "Chelsio PE9000"},
444 F_GPIO2_OEN | F_GPIO4_OEN |
445 F_GPIO2_OUT_VAL | F_GPIO4_OUT_VAL, F_GPIO3 | F_GPIO5,
447 &mi1_mdio_ops, "Chelsio T302"},
449 F_GPIO1_OEN | F_GPIO6_OEN | F_GPIO7_OEN | F_GPIO10_OEN |
450 F_GPIO1_OUT_VAL | F_GPIO6_OUT_VAL | F_GPIO10_OUT_VAL, 0,
451 SUPPORTED_10000baseT_Full | SUPPORTED_AUI,
452 &mi1_mdio_ext_ops, "Chelsio T310"},
454 F_GPIO1_OEN | F_GPIO2_OEN | F_GPIO4_OEN | F_GPIO5_OEN | F_GPIO6_OEN |
455 F_GPIO7_OEN | F_GPIO10_OEN | F_GPIO11_OEN | F_GPIO1_OUT_VAL |
456 F_GPIO5_OUT_VAL | F_GPIO6_OUT_VAL | F_GPIO10_OUT_VAL, 0,
457 SUPPORTED_10000baseT_Full | SUPPORTED_AUI,
458 &mi1_mdio_ext_ops, "Chelsio T320"},
462 * Return the adapter_info structure with a given index. Out-of-range indices
465 const struct adapter_info *t3_get_adapter_info(unsigned int id)
467 return id < ARRAY_SIZE(t3_adap_info) ? &t3_adap_info[id] : NULL;
470 #define CAPS_1G (SUPPORTED_10baseT_Full | SUPPORTED_100baseT_Full | \
471 SUPPORTED_1000baseT_Full | SUPPORTED_Autoneg | SUPPORTED_MII)
472 #define CAPS_10G (SUPPORTED_10000baseT_Full | SUPPORTED_AUI)
474 static const struct port_type_info port_types[] = {
476 {t3_ael1002_phy_prep, CAPS_10G | SUPPORTED_FIBRE,
478 {t3_vsc8211_phy_prep, CAPS_1G | SUPPORTED_TP | SUPPORTED_IRQ,
479 "10/100/1000BASE-T"},
480 {NULL, CAPS_1G | SUPPORTED_TP | SUPPORTED_IRQ,
481 "10/100/1000BASE-T"},
482 {t3_xaui_direct_phy_prep, CAPS_10G | SUPPORTED_TP, "10GBASE-CX4"},
483 {NULL, CAPS_10G, "10GBASE-KX4"},
484 {t3_qt2045_phy_prep, CAPS_10G | SUPPORTED_TP, "10GBASE-CX4"},
485 {t3_ael1006_phy_prep, CAPS_10G | SUPPORTED_FIBRE,
487 {NULL, CAPS_10G | SUPPORTED_TP, "10GBASE-CX4"},
493 #define VPD_ENTRY(name, len) \
494 u8 name##_kword[2]; u8 name##_len; u8 name##_data[len]
497 * Partial EEPROM Vital Product Data structure. Includes only the ID and
506 VPD_ENTRY(pn, 16); /* part number */
507 VPD_ENTRY(ec, 16); /* EC level */
508 VPD_ENTRY(sn, SERNUM_LEN); /* serial number */
509 VPD_ENTRY(na, 12); /* MAC address base */
510 VPD_ENTRY(cclk, 6); /* core clock */
511 VPD_ENTRY(mclk, 6); /* mem clock */
512 VPD_ENTRY(uclk, 6); /* uP clk */
513 VPD_ENTRY(mdc, 6); /* MDIO clk */
514 VPD_ENTRY(mt, 2); /* mem timing */
515 VPD_ENTRY(xaui0cfg, 6); /* XAUI0 config */
516 VPD_ENTRY(xaui1cfg, 6); /* XAUI1 config */
517 VPD_ENTRY(port0, 2); /* PHY0 complex */
518 VPD_ENTRY(port1, 2); /* PHY1 complex */
519 VPD_ENTRY(port2, 2); /* PHY2 complex */
520 VPD_ENTRY(port3, 2); /* PHY3 complex */
521 VPD_ENTRY(rv, 1); /* csum */
522 u32 pad; /* for multiple-of-4 sizing and alignment */
525 #define EEPROM_MAX_POLL 4
526 #define EEPROM_STAT_ADDR 0x4000
527 #define VPD_BASE 0xc00
530 * t3_seeprom_read - read a VPD EEPROM location
531 * @adapter: adapter to read
532 * @addr: EEPROM address
533 * @data: where to store the read data
535 * Read a 32-bit word from a location in VPD EEPROM using the card's PCI
536 * VPD ROM capability. A zero is written to the flag bit when the
537 * addres is written to the control register. The hardware device will
538 * set the flag to 1 when 4 bytes have been read into the data register.
540 int t3_seeprom_read(struct adapter *adapter, u32 addr, u32 *data)
543 int attempts = EEPROM_MAX_POLL;
544 unsigned int base = adapter->params.pci.vpd_cap_addr;
546 if ((addr >= EEPROMSIZE && addr != EEPROM_STAT_ADDR) || (addr & 3))
549 pci_write_config_word(adapter->pdev, base + PCI_VPD_ADDR, addr);
552 pci_read_config_word(adapter->pdev, base + PCI_VPD_ADDR, &val);
553 } while (!(val & PCI_VPD_ADDR_F) && --attempts);
555 if (!(val & PCI_VPD_ADDR_F)) {
556 CH_ERR(adapter, "reading EEPROM address 0x%x failed\n", addr);
559 pci_read_config_dword(adapter->pdev, base + PCI_VPD_DATA, data);
560 *data = le32_to_cpu(*data);
565 * t3_seeprom_write - write a VPD EEPROM location
566 * @adapter: adapter to write
567 * @addr: EEPROM address
568 * @data: value to write
570 * Write a 32-bit word to a location in VPD EEPROM using the card's PCI
571 * VPD ROM capability.
573 int t3_seeprom_write(struct adapter *adapter, u32 addr, u32 data)
576 int attempts = EEPROM_MAX_POLL;
577 unsigned int base = adapter->params.pci.vpd_cap_addr;
579 if ((addr >= EEPROMSIZE && addr != EEPROM_STAT_ADDR) || (addr & 3))
582 pci_write_config_dword(adapter->pdev, base + PCI_VPD_DATA,
584 pci_write_config_word(adapter->pdev,base + PCI_VPD_ADDR,
585 addr | PCI_VPD_ADDR_F);
588 pci_read_config_word(adapter->pdev, base + PCI_VPD_ADDR, &val);
589 } while ((val & PCI_VPD_ADDR_F) && --attempts);
591 if (val & PCI_VPD_ADDR_F) {
592 CH_ERR(adapter, "write to EEPROM address 0x%x failed\n", addr);
599 * t3_seeprom_wp - enable/disable EEPROM write protection
600 * @adapter: the adapter
601 * @enable: 1 to enable write protection, 0 to disable it
603 * Enables or disables write protection on the serial EEPROM.
605 int t3_seeprom_wp(struct adapter *adapter, int enable)
607 return t3_seeprom_write(adapter, EEPROM_STAT_ADDR, enable ? 0xc : 0);
611 * Convert a character holding a hex digit to a number.
613 static unsigned int hex2int(unsigned char c)
615 return isdigit(c) ? c - '0' : toupper(c) - 'A' + 10;
619 * get_vpd_params - read VPD parameters from VPD EEPROM
620 * @adapter: adapter to read
621 * @p: where to store the parameters
623 * Reads card parameters stored in VPD EEPROM.
625 static int get_vpd_params(struct adapter *adapter, struct vpd_params *p)
631 * Card information is normally at VPD_BASE but some early cards had
634 ret = t3_seeprom_read(adapter, VPD_BASE, (u32 *)&vpd);
637 addr = vpd.id_tag == 0x82 ? VPD_BASE : 0;
639 for (i = 0; i < sizeof(vpd); i += 4) {
640 ret = t3_seeprom_read(adapter, addr + i,
641 (u32 *)((u8 *)&vpd + i));
646 p->cclk = simple_strtoul(vpd.cclk_data, NULL, 10);
647 p->mclk = simple_strtoul(vpd.mclk_data, NULL, 10);
648 p->uclk = simple_strtoul(vpd.uclk_data, NULL, 10);
649 p->mdc = simple_strtoul(vpd.mdc_data, NULL, 10);
650 p->mem_timing = simple_strtoul(vpd.mt_data, NULL, 10);
651 memcpy(p->sn, vpd.sn_data, SERNUM_LEN);
653 /* Old eeproms didn't have port information */
654 if (adapter->params.rev == 0 && !vpd.port0_data[0]) {
655 p->port_type[0] = uses_xaui(adapter) ? 1 : 2;
656 p->port_type[1] = uses_xaui(adapter) ? 6 : 2;
658 p->port_type[0] = hex2int(vpd.port0_data[0]);
659 p->port_type[1] = hex2int(vpd.port1_data[0]);
660 p->xauicfg[0] = simple_strtoul(vpd.xaui0cfg_data, NULL, 16);
661 p->xauicfg[1] = simple_strtoul(vpd.xaui1cfg_data, NULL, 16);
664 for (i = 0; i < 6; i++)
665 p->eth_base[i] = hex2int(vpd.na_data[2 * i]) * 16 +
666 hex2int(vpd.na_data[2 * i + 1]);
670 /* serial flash and firmware constants */
672 SF_ATTEMPTS = 5, /* max retries for SF1 operations */
673 SF_SEC_SIZE = 64 * 1024, /* serial flash sector size */
674 SF_SIZE = SF_SEC_SIZE * 8, /* serial flash size */
676 /* flash command opcodes */
677 SF_PROG_PAGE = 2, /* program page */
678 SF_WR_DISABLE = 4, /* disable writes */
679 SF_RD_STATUS = 5, /* read status register */
680 SF_WR_ENABLE = 6, /* enable writes */
681 SF_RD_DATA_FAST = 0xb, /* read flash */
682 SF_ERASE_SECTOR = 0xd8, /* erase sector */
684 FW_FLASH_BOOT_ADDR = 0x70000, /* start address of FW in flash */
685 FW_VERS_ADDR = 0x77ffc, /* flash address holding FW version */
686 FW_MIN_SIZE = 8 /* at least version and csum */
690 * sf1_read - read data from the serial flash
691 * @adapter: the adapter
692 * @byte_cnt: number of bytes to read
693 * @cont: whether another operation will be chained
694 * @valp: where to store the read data
696 * Reads up to 4 bytes of data from the serial flash. The location of
697 * the read needs to be specified prior to calling this by issuing the
698 * appropriate commands to the serial flash.
700 static int sf1_read(struct adapter *adapter, unsigned int byte_cnt, int cont,
705 if (!byte_cnt || byte_cnt > 4)
707 if (t3_read_reg(adapter, A_SF_OP) & F_BUSY)
709 t3_write_reg(adapter, A_SF_OP, V_CONT(cont) | V_BYTECNT(byte_cnt - 1));
710 ret = t3_wait_op_done(adapter, A_SF_OP, F_BUSY, 0, SF_ATTEMPTS, 10);
712 *valp = t3_read_reg(adapter, A_SF_DATA);
717 * sf1_write - write data to the serial flash
718 * @adapter: the adapter
719 * @byte_cnt: number of bytes to write
720 * @cont: whether another operation will be chained
721 * @val: value to write
723 * Writes up to 4 bytes of data to the serial flash. The location of
724 * the write needs to be specified prior to calling this by issuing the
725 * appropriate commands to the serial flash.
727 static int sf1_write(struct adapter *adapter, unsigned int byte_cnt, int cont,
730 if (!byte_cnt || byte_cnt > 4)
732 if (t3_read_reg(adapter, A_SF_OP) & F_BUSY)
734 t3_write_reg(adapter, A_SF_DATA, val);
735 t3_write_reg(adapter, A_SF_OP,
736 V_CONT(cont) | V_BYTECNT(byte_cnt - 1) | V_OP(1));
737 return t3_wait_op_done(adapter, A_SF_OP, F_BUSY, 0, SF_ATTEMPTS, 10);
741 * flash_wait_op - wait for a flash operation to complete
742 * @adapter: the adapter
743 * @attempts: max number of polls of the status register
744 * @delay: delay between polls in ms
746 * Wait for a flash operation to complete by polling the status register.
748 static int flash_wait_op(struct adapter *adapter, int attempts, int delay)
754 if ((ret = sf1_write(adapter, 1, 1, SF_RD_STATUS)) != 0 ||
755 (ret = sf1_read(adapter, 1, 0, &status)) != 0)
767 * t3_read_flash - read words from serial flash
768 * @adapter: the adapter
769 * @addr: the start address for the read
770 * @nwords: how many 32-bit words to read
771 * @data: where to store the read data
772 * @byte_oriented: whether to store data as bytes or as words
774 * Read the specified number of 32-bit words from the serial flash.
775 * If @byte_oriented is set the read data is stored as a byte array
776 * (i.e., big-endian), otherwise as 32-bit words in the platform's
779 int t3_read_flash(struct adapter *adapter, unsigned int addr,
780 unsigned int nwords, u32 *data, int byte_oriented)
784 if (addr + nwords * sizeof(u32) > SF_SIZE || (addr & 3))
787 addr = swab32(addr) | SF_RD_DATA_FAST;
789 if ((ret = sf1_write(adapter, 4, 1, addr)) != 0 ||
790 (ret = sf1_read(adapter, 1, 1, data)) != 0)
793 for (; nwords; nwords--, data++) {
794 ret = sf1_read(adapter, 4, nwords > 1, data);
798 *data = htonl(*data);
804 * t3_write_flash - write up to a page of data to the serial flash
805 * @adapter: the adapter
806 * @addr: the start address to write
807 * @n: length of data to write
808 * @data: the data to write
810 * Writes up to a page of data (256 bytes) to the serial flash starting
811 * at the given address.
813 static int t3_write_flash(struct adapter *adapter, unsigned int addr,
814 unsigned int n, const u8 *data)
818 unsigned int i, c, left, val, offset = addr & 0xff;
820 if (addr + n > SF_SIZE || offset + n > 256)
823 val = swab32(addr) | SF_PROG_PAGE;
825 if ((ret = sf1_write(adapter, 1, 0, SF_WR_ENABLE)) != 0 ||
826 (ret = sf1_write(adapter, 4, 1, val)) != 0)
829 for (left = n; left; left -= c) {
831 for (val = 0, i = 0; i < c; ++i)
832 val = (val << 8) + *data++;
834 ret = sf1_write(adapter, c, c != left, val);
838 if ((ret = flash_wait_op(adapter, 5, 1)) != 0)
841 /* Read the page to verify the write succeeded */
842 ret = t3_read_flash(adapter, addr & ~0xff, ARRAY_SIZE(buf), buf, 1);
846 if (memcmp(data - n, (u8 *) buf + offset, n))
852 * t3_get_tp_version - read the tp sram version
853 * @adapter: the adapter
854 * @vers: where to place the version
856 * Reads the protocol sram version from sram.
858 int t3_get_tp_version(struct adapter *adapter, u32 *vers)
862 /* Get version loaded in SRAM */
863 t3_write_reg(adapter, A_TP_EMBED_OP_FIELD0, 0);
864 ret = t3_wait_op_done(adapter, A_TP_EMBED_OP_FIELD0,
869 *vers = t3_read_reg(adapter, A_TP_EMBED_OP_FIELD1);
875 * t3_check_tpsram_version - read the tp sram version
876 * @adapter: the adapter
877 * @must_load: set to 1 if loading a new microcode image is required
879 * Reads the protocol sram version from flash.
881 int t3_check_tpsram_version(struct adapter *adapter, int *must_load)
885 unsigned int major, minor;
887 if (adapter->params.rev == T3_REV_A)
892 ret = t3_get_tp_version(adapter, &vers);
896 major = G_TP_VERSION_MAJOR(vers);
897 minor = G_TP_VERSION_MINOR(vers);
899 if (major == TP_VERSION_MAJOR && minor == TP_VERSION_MINOR)
902 if (major != TP_VERSION_MAJOR)
903 CH_ERR(adapter, "found wrong TP version (%u.%u), "
904 "driver needs version %d.%d\n", major, minor,
905 TP_VERSION_MAJOR, TP_VERSION_MINOR);
908 CH_ERR(adapter, "found wrong TP version (%u.%u), "
909 "driver compiled for version %d.%d\n", major, minor,
910 TP_VERSION_MAJOR, TP_VERSION_MINOR);
916 * t3_check_tpsram - check if provided protocol SRAM
917 * is compatible with this driver
918 * @adapter: the adapter
919 * @tp_sram: the firmware image to write
922 * Checks if an adapter's tp sram is compatible with the driver.
923 * Returns 0 if the versions are compatible, a negative error otherwise.
925 int t3_check_tpsram(struct adapter *adapter, u8 *tp_sram, unsigned int size)
929 const u32 *p = (const u32 *)tp_sram;
931 /* Verify checksum */
932 for (csum = 0, i = 0; i < size / sizeof(csum); i++)
934 if (csum != 0xffffffff) {
935 CH_ERR(adapter, "corrupted protocol SRAM image, checksum %u\n",
943 enum fw_version_type {
949 * t3_get_fw_version - read the firmware version
950 * @adapter: the adapter
951 * @vers: where to place the version
953 * Reads the FW version from flash.
955 int t3_get_fw_version(struct adapter *adapter, u32 *vers)
957 return t3_read_flash(adapter, FW_VERS_ADDR, 1, vers, 0);
961 * t3_check_fw_version - check if the FW is compatible with this driver
962 * @adapter: the adapter
963 * @must_load: set to 1 if loading a new FW image is required
965 * Checks if an adapter's FW is compatible with the driver. Returns 0
966 * if the versions are compatible, a negative error otherwise.
968 int t3_check_fw_version(struct adapter *adapter, int *must_load)
972 unsigned int type, major, minor;
975 ret = t3_get_fw_version(adapter, &vers);
979 type = G_FW_VERSION_TYPE(vers);
980 major = G_FW_VERSION_MAJOR(vers);
981 minor = G_FW_VERSION_MINOR(vers);
983 if (type == FW_VERSION_T3 && major == FW_VERSION_MAJOR &&
984 minor == FW_VERSION_MINOR)
987 if (major != FW_VERSION_MAJOR)
988 CH_ERR(adapter, "found wrong FW version(%u.%u), "
989 "driver needs version %u.%u\n", major, minor,
990 FW_VERSION_MAJOR, FW_VERSION_MINOR);
993 CH_WARN(adapter, "found wrong FW minor version(%u.%u), "
994 "driver compiled for version %u.%u\n", major, minor,
995 FW_VERSION_MAJOR, FW_VERSION_MINOR);
1002 * t3_flash_erase_sectors - erase a range of flash sectors
1003 * @adapter: the adapter
1004 * @start: the first sector to erase
1005 * @end: the last sector to erase
1007 * Erases the sectors in the given range.
1009 static int t3_flash_erase_sectors(struct adapter *adapter, int start, int end)
1011 while (start <= end) {
1014 if ((ret = sf1_write(adapter, 1, 0, SF_WR_ENABLE)) != 0 ||
1015 (ret = sf1_write(adapter, 4, 0,
1016 SF_ERASE_SECTOR | (start << 8))) != 0 ||
1017 (ret = flash_wait_op(adapter, 5, 500)) != 0)
1025 * t3_load_fw - download firmware
1026 * @adapter: the adapter
1027 * @fw_data: the firmware image to write
1030 * Write the supplied firmware image to the card's serial flash.
1031 * The FW image has the following sections: @size - 8 bytes of code and
1032 * data, followed by 4 bytes of FW version, followed by the 32-bit
1033 * 1's complement checksum of the whole image.
1035 int t3_load_fw(struct adapter *adapter, const u8 *fw_data, unsigned int size)
1039 const u32 *p = (const u32 *)fw_data;
1040 int ret, addr, fw_sector = FW_FLASH_BOOT_ADDR >> 16;
1042 if ((size & 3) || size < FW_MIN_SIZE)
1044 if (size > FW_VERS_ADDR + 8 - FW_FLASH_BOOT_ADDR)
1047 for (csum = 0, i = 0; i < size / sizeof(csum); i++)
1048 csum += ntohl(p[i]);
1049 if (csum != 0xffffffff) {
1050 CH_ERR(adapter, "corrupted firmware image, checksum %u\n",
1055 ret = t3_flash_erase_sectors(adapter, fw_sector, fw_sector);
1059 size -= 8; /* trim off version and checksum */
1060 for (addr = FW_FLASH_BOOT_ADDR; size;) {
1061 unsigned int chunk_size = min(size, 256U);
1063 ret = t3_write_flash(adapter, addr, chunk_size, fw_data);
1068 fw_data += chunk_size;
1072 ret = t3_write_flash(adapter, FW_VERS_ADDR, 4, fw_data);
1075 CH_ERR(adapter, "firmware download failed, error %d\n", ret);
1079 #define CIM_CTL_BASE 0x2000
1082 * t3_cim_ctl_blk_read - read a block from CIM control region
1084 * @adap: the adapter
1085 * @addr: the start address within the CIM control region
1086 * @n: number of words to read
1087 * @valp: where to store the result
1089 * Reads a block of 4-byte words from the CIM control region.
1091 int t3_cim_ctl_blk_read(struct adapter *adap, unsigned int addr,
1092 unsigned int n, unsigned int *valp)
1096 if (t3_read_reg(adap, A_CIM_HOST_ACC_CTRL) & F_HOSTBUSY)
1099 for ( ; !ret && n--; addr += 4) {
1100 t3_write_reg(adap, A_CIM_HOST_ACC_CTRL, CIM_CTL_BASE + addr);
1101 ret = t3_wait_op_done(adap, A_CIM_HOST_ACC_CTRL, F_HOSTBUSY,
1104 *valp++ = t3_read_reg(adap, A_CIM_HOST_ACC_DATA);
1111 * t3_link_changed - handle interface link changes
1112 * @adapter: the adapter
1113 * @port_id: the port index that changed link state
1115 * Called when a port's link settings change to propagate the new values
1116 * to the associated PHY and MAC. After performing the common tasks it
1117 * invokes an OS-specific handler.
1119 void t3_link_changed(struct adapter *adapter, int port_id)
1121 int link_ok, speed, duplex, fc;
1122 struct port_info *pi = adap2pinfo(adapter, port_id);
1123 struct cphy *phy = &pi->phy;
1124 struct cmac *mac = &pi->mac;
1125 struct link_config *lc = &pi->link_config;
1127 phy->ops->get_link_status(phy, &link_ok, &speed, &duplex, &fc);
1129 if (link_ok != lc->link_ok && adapter->params.rev > 0 &&
1130 uses_xaui(adapter)) {
1133 t3_write_reg(adapter, A_XGM_XAUI_ACT_CTRL + mac->offset,
1134 link_ok ? F_TXACTENABLE | F_RXEN : 0);
1136 lc->link_ok = link_ok;
1137 lc->speed = speed < 0 ? SPEED_INVALID : speed;
1138 lc->duplex = duplex < 0 ? DUPLEX_INVALID : duplex;
1139 if (lc->requested_fc & PAUSE_AUTONEG)
1140 fc &= lc->requested_fc;
1142 fc = lc->requested_fc & (PAUSE_RX | PAUSE_TX);
1144 if (link_ok && speed >= 0 && lc->autoneg == AUTONEG_ENABLE) {
1145 /* Set MAC speed, duplex, and flow control to match PHY. */
1146 t3_mac_set_speed_duplex_fc(mac, speed, duplex, fc);
1150 t3_os_link_changed(adapter, port_id, link_ok, speed, duplex, fc);
1154 * t3_link_start - apply link configuration to MAC/PHY
1155 * @phy: the PHY to setup
1156 * @mac: the MAC to setup
1157 * @lc: the requested link configuration
1159 * Set up a port's MAC and PHY according to a desired link configuration.
1160 * - If the PHY can auto-negotiate first decide what to advertise, then
1161 * enable/disable auto-negotiation as desired, and reset.
1162 * - If the PHY does not auto-negotiate just reset it.
1163 * - If auto-negotiation is off set the MAC to the proper speed/duplex/FC,
1164 * otherwise do it later based on the outcome of auto-negotiation.
1166 int t3_link_start(struct cphy *phy, struct cmac *mac, struct link_config *lc)
1168 unsigned int fc = lc->requested_fc & (PAUSE_RX | PAUSE_TX);
1171 if (lc->supported & SUPPORTED_Autoneg) {
1172 lc->advertising &= ~(ADVERTISED_Asym_Pause | ADVERTISED_Pause);
1174 lc->advertising |= ADVERTISED_Asym_Pause;
1176 lc->advertising |= ADVERTISED_Pause;
1178 phy->ops->advertise(phy, lc->advertising);
1180 if (lc->autoneg == AUTONEG_DISABLE) {
1181 lc->speed = lc->requested_speed;
1182 lc->duplex = lc->requested_duplex;
1183 lc->fc = (unsigned char)fc;
1184 t3_mac_set_speed_duplex_fc(mac, lc->speed, lc->duplex,
1186 /* Also disables autoneg */
1187 phy->ops->set_speed_duplex(phy, lc->speed, lc->duplex);
1188 phy->ops->reset(phy, 0);
1190 phy->ops->autoneg_enable(phy);
1192 t3_mac_set_speed_duplex_fc(mac, -1, -1, fc);
1193 lc->fc = (unsigned char)fc;
1194 phy->ops->reset(phy, 0);
1200 * t3_set_vlan_accel - control HW VLAN extraction
1201 * @adapter: the adapter
1202 * @ports: bitmap of adapter ports to operate on
1203 * @on: enable (1) or disable (0) HW VLAN extraction
1205 * Enables or disables HW extraction of VLAN tags for the given port.
1207 void t3_set_vlan_accel(struct adapter *adapter, unsigned int ports, int on)
1209 t3_set_reg_field(adapter, A_TP_OUT_CONFIG,
1210 ports << S_VLANEXTRACTIONENABLE,
1211 on ? (ports << S_VLANEXTRACTIONENABLE) : 0);
1215 unsigned int mask; /* bits to check in interrupt status */
1216 const char *msg; /* message to print or NULL */
1217 short stat_idx; /* stat counter to increment or -1 */
1218 unsigned short fatal:1; /* whether the condition reported is fatal */
1222 * t3_handle_intr_status - table driven interrupt handler
1223 * @adapter: the adapter that generated the interrupt
1224 * @reg: the interrupt status register to process
1225 * @mask: a mask to apply to the interrupt status
1226 * @acts: table of interrupt actions
1227 * @stats: statistics counters tracking interrupt occurences
1229 * A table driven interrupt handler that applies a set of masks to an
1230 * interrupt status word and performs the corresponding actions if the
1231 * interrupts described by the mask have occured. The actions include
1232 * optionally printing a warning or alert message, and optionally
1233 * incrementing a stat counter. The table is terminated by an entry
1234 * specifying mask 0. Returns the number of fatal interrupt conditions.
1236 static int t3_handle_intr_status(struct adapter *adapter, unsigned int reg,
1238 const struct intr_info *acts,
1239 unsigned long *stats)
1242 unsigned int status = t3_read_reg(adapter, reg) & mask;
1244 for (; acts->mask; ++acts) {
1245 if (!(status & acts->mask))
1249 CH_ALERT(adapter, "%s (0x%x)\n",
1250 acts->msg, status & acts->mask);
1251 } else if (acts->msg)
1252 CH_WARN(adapter, "%s (0x%x)\n",
1253 acts->msg, status & acts->mask);
1254 if (acts->stat_idx >= 0)
1255 stats[acts->stat_idx]++;
1257 if (status) /* clear processed interrupts */
1258 t3_write_reg(adapter, reg, status);
1262 #define SGE_INTR_MASK (F_RSPQDISABLED)
1263 #define MC5_INTR_MASK (F_PARITYERR | F_ACTRGNFULL | F_UNKNOWNCMD | \
1264 F_REQQPARERR | F_DISPQPARERR | F_DELACTEMPTY | \
1266 #define MC7_INTR_MASK (F_AE | F_UE | F_CE | V_PE(M_PE))
1267 #define XGM_INTR_MASK (V_TXFIFO_PRTY_ERR(M_TXFIFO_PRTY_ERR) | \
1268 V_RXFIFO_PRTY_ERR(M_RXFIFO_PRTY_ERR) | \
1269 F_TXFIFO_UNDERRUN | F_RXFIFO_OVERFLOW)
1270 #define PCIX_INTR_MASK (F_MSTDETPARERR | F_SIGTARABT | F_RCVTARABT | \
1271 F_RCVMSTABT | F_SIGSYSERR | F_DETPARERR | \
1272 F_SPLCMPDIS | F_UNXSPLCMP | F_RCVSPLCMPERR | \
1273 F_DETCORECCERR | F_DETUNCECCERR | F_PIOPARERR | \
1274 V_WFPARERR(M_WFPARERR) | V_RFPARERR(M_RFPARERR) | \
1275 V_CFPARERR(M_CFPARERR) /* | V_MSIXPARERR(M_MSIXPARERR) */)
1276 #define PCIE_INTR_MASK (F_UNXSPLCPLERRR | F_UNXSPLCPLERRC | F_PCIE_PIOPARERR |\
1277 F_PCIE_WFPARERR | F_PCIE_RFPARERR | F_PCIE_CFPARERR | \
1278 /* V_PCIE_MSIXPARERR(M_PCIE_MSIXPARERR) | */ \
1279 V_BISTERR(M_BISTERR) | F_PEXERR)
1280 #define ULPRX_INTR_MASK F_PARERR
1281 #define ULPTX_INTR_MASK 0
1282 #define CPLSW_INTR_MASK (F_TP_FRAMING_ERROR | \
1283 F_SGE_FRAMING_ERROR | F_CIM_FRAMING_ERROR | \
1284 F_ZERO_SWITCH_ERROR)
1285 #define CIM_INTR_MASK (F_BLKWRPLINT | F_BLKRDPLINT | F_BLKWRCTLINT | \
1286 F_BLKRDCTLINT | F_BLKWRFLASHINT | F_BLKRDFLASHINT | \
1287 F_SGLWRFLASHINT | F_WRBLKFLASHINT | F_BLKWRBOOTINT | \
1288 F_FLASHRANGEINT | F_SDRAMRANGEINT | F_RSVDSPACEINT)
1289 #define PMTX_INTR_MASK (F_ZERO_C_CMD_ERROR | ICSPI_FRM_ERR | OESPI_FRM_ERR | \
1290 V_ICSPI_PAR_ERROR(M_ICSPI_PAR_ERROR) | \
1291 V_OESPI_PAR_ERROR(M_OESPI_PAR_ERROR))
1292 #define PMRX_INTR_MASK (F_ZERO_E_CMD_ERROR | IESPI_FRM_ERR | OCSPI_FRM_ERR | \
1293 V_IESPI_PAR_ERROR(M_IESPI_PAR_ERROR) | \
1294 V_OCSPI_PAR_ERROR(M_OCSPI_PAR_ERROR))
1295 #define MPS_INTR_MASK (V_TX0TPPARERRENB(M_TX0TPPARERRENB) | \
1296 V_TX1TPPARERRENB(M_TX1TPPARERRENB) | \
1297 V_RXTPPARERRENB(M_RXTPPARERRENB) | \
1298 V_MCAPARERRENB(M_MCAPARERRENB))
1299 #define PL_INTR_MASK (F_T3DBG | F_XGMAC0_0 | F_XGMAC0_1 | F_MC5A | F_PM1_TX | \
1300 F_PM1_RX | F_ULP2_TX | F_ULP2_RX | F_TP1 | F_CIM | \
1301 F_MC7_CM | F_MC7_PMTX | F_MC7_PMRX | F_SGE3 | F_PCIM0 | \
1302 F_MPS0 | F_CPL_SWITCH)
1305 * Interrupt handler for the PCIX1 module.
1307 static void pci_intr_handler(struct adapter *adapter)
1309 static const struct intr_info pcix1_intr_info[] = {
1310 {F_MSTDETPARERR, "PCI master detected parity error", -1, 1},
1311 {F_SIGTARABT, "PCI signaled target abort", -1, 1},
1312 {F_RCVTARABT, "PCI received target abort", -1, 1},
1313 {F_RCVMSTABT, "PCI received master abort", -1, 1},
1314 {F_SIGSYSERR, "PCI signaled system error", -1, 1},
1315 {F_DETPARERR, "PCI detected parity error", -1, 1},
1316 {F_SPLCMPDIS, "PCI split completion discarded", -1, 1},
1317 {F_UNXSPLCMP, "PCI unexpected split completion error", -1, 1},
1318 {F_RCVSPLCMPERR, "PCI received split completion error", -1,
1320 {F_DETCORECCERR, "PCI correctable ECC error",
1321 STAT_PCI_CORR_ECC, 0},
1322 {F_DETUNCECCERR, "PCI uncorrectable ECC error", -1, 1},
1323 {F_PIOPARERR, "PCI PIO FIFO parity error", -1, 1},
1324 {V_WFPARERR(M_WFPARERR), "PCI write FIFO parity error", -1,
1326 {V_RFPARERR(M_RFPARERR), "PCI read FIFO parity error", -1,
1328 {V_CFPARERR(M_CFPARERR), "PCI command FIFO parity error", -1,
1330 {V_MSIXPARERR(M_MSIXPARERR), "PCI MSI-X table/PBA parity "
1335 if (t3_handle_intr_status(adapter, A_PCIX_INT_CAUSE, PCIX_INTR_MASK,
1336 pcix1_intr_info, adapter->irq_stats))
1337 t3_fatal_err(adapter);
1341 * Interrupt handler for the PCIE module.
1343 static void pcie_intr_handler(struct adapter *adapter)
1345 static const struct intr_info pcie_intr_info[] = {
1346 {F_PEXERR, "PCI PEX error", -1, 1},
1348 "PCI unexpected split completion DMA read error", -1, 1},
1350 "PCI unexpected split completion DMA command error", -1, 1},
1351 {F_PCIE_PIOPARERR, "PCI PIO FIFO parity error", -1, 1},
1352 {F_PCIE_WFPARERR, "PCI write FIFO parity error", -1, 1},
1353 {F_PCIE_RFPARERR, "PCI read FIFO parity error", -1, 1},
1354 {F_PCIE_CFPARERR, "PCI command FIFO parity error", -1, 1},
1355 {V_PCIE_MSIXPARERR(M_PCIE_MSIXPARERR),
1356 "PCI MSI-X table/PBA parity error", -1, 1},
1357 {V_BISTERR(M_BISTERR), "PCI BIST error", -1, 1},
1361 if (t3_handle_intr_status(adapter, A_PCIE_INT_CAUSE, PCIE_INTR_MASK,
1362 pcie_intr_info, adapter->irq_stats))
1363 t3_fatal_err(adapter);
1367 * TP interrupt handler.
1369 static void tp_intr_handler(struct adapter *adapter)
1371 static const struct intr_info tp_intr_info[] = {
1372 {0xffffff, "TP parity error", -1, 1},
1373 {0x1000000, "TP out of Rx pages", -1, 1},
1374 {0x2000000, "TP out of Tx pages", -1, 1},
1378 if (t3_handle_intr_status(adapter, A_TP_INT_CAUSE, 0xffffffff,
1379 tp_intr_info, NULL))
1380 t3_fatal_err(adapter);
1384 * CIM interrupt handler.
1386 static void cim_intr_handler(struct adapter *adapter)
1388 static const struct intr_info cim_intr_info[] = {
1389 {F_RSVDSPACEINT, "CIM reserved space write", -1, 1},
1390 {F_SDRAMRANGEINT, "CIM SDRAM address out of range", -1, 1},
1391 {F_FLASHRANGEINT, "CIM flash address out of range", -1, 1},
1392 {F_BLKWRBOOTINT, "CIM block write to boot space", -1, 1},
1393 {F_WRBLKFLASHINT, "CIM write to cached flash space", -1, 1},
1394 {F_SGLWRFLASHINT, "CIM single write to flash space", -1, 1},
1395 {F_BLKRDFLASHINT, "CIM block read from flash space", -1, 1},
1396 {F_BLKWRFLASHINT, "CIM block write to flash space", -1, 1},
1397 {F_BLKRDCTLINT, "CIM block read from CTL space", -1, 1},
1398 {F_BLKWRCTLINT, "CIM block write to CTL space", -1, 1},
1399 {F_BLKRDPLINT, "CIM block read from PL space", -1, 1},
1400 {F_BLKWRPLINT, "CIM block write to PL space", -1, 1},
1404 if (t3_handle_intr_status(adapter, A_CIM_HOST_INT_CAUSE, 0xffffffff,
1405 cim_intr_info, NULL))
1406 t3_fatal_err(adapter);
1410 * ULP RX interrupt handler.
1412 static void ulprx_intr_handler(struct adapter *adapter)
1414 static const struct intr_info ulprx_intr_info[] = {
1415 {F_PARERR, "ULP RX parity error", -1, 1},
1419 if (t3_handle_intr_status(adapter, A_ULPRX_INT_CAUSE, 0xffffffff,
1420 ulprx_intr_info, NULL))
1421 t3_fatal_err(adapter);
1425 * ULP TX interrupt handler.
1427 static void ulptx_intr_handler(struct adapter *adapter)
1429 static const struct intr_info ulptx_intr_info[] = {
1430 {F_PBL_BOUND_ERR_CH0, "ULP TX channel 0 PBL out of bounds",
1431 STAT_ULP_CH0_PBL_OOB, 0},
1432 {F_PBL_BOUND_ERR_CH1, "ULP TX channel 1 PBL out of bounds",
1433 STAT_ULP_CH1_PBL_OOB, 0},
1437 if (t3_handle_intr_status(adapter, A_ULPTX_INT_CAUSE, 0xffffffff,
1438 ulptx_intr_info, adapter->irq_stats))
1439 t3_fatal_err(adapter);
1442 #define ICSPI_FRM_ERR (F_ICSPI0_FIFO2X_RX_FRAMING_ERROR | \
1443 F_ICSPI1_FIFO2X_RX_FRAMING_ERROR | F_ICSPI0_RX_FRAMING_ERROR | \
1444 F_ICSPI1_RX_FRAMING_ERROR | F_ICSPI0_TX_FRAMING_ERROR | \
1445 F_ICSPI1_TX_FRAMING_ERROR)
1446 #define OESPI_FRM_ERR (F_OESPI0_RX_FRAMING_ERROR | \
1447 F_OESPI1_RX_FRAMING_ERROR | F_OESPI0_TX_FRAMING_ERROR | \
1448 F_OESPI1_TX_FRAMING_ERROR | F_OESPI0_OFIFO2X_TX_FRAMING_ERROR | \
1449 F_OESPI1_OFIFO2X_TX_FRAMING_ERROR)
1452 * PM TX interrupt handler.
1454 static void pmtx_intr_handler(struct adapter *adapter)
1456 static const struct intr_info pmtx_intr_info[] = {
1457 {F_ZERO_C_CMD_ERROR, "PMTX 0-length pcmd", -1, 1},
1458 {ICSPI_FRM_ERR, "PMTX ispi framing error", -1, 1},
1459 {OESPI_FRM_ERR, "PMTX ospi framing error", -1, 1},
1460 {V_ICSPI_PAR_ERROR(M_ICSPI_PAR_ERROR),
1461 "PMTX ispi parity error", -1, 1},
1462 {V_OESPI_PAR_ERROR(M_OESPI_PAR_ERROR),
1463 "PMTX ospi parity error", -1, 1},
1467 if (t3_handle_intr_status(adapter, A_PM1_TX_INT_CAUSE, 0xffffffff,
1468 pmtx_intr_info, NULL))
1469 t3_fatal_err(adapter);
1472 #define IESPI_FRM_ERR (F_IESPI0_FIFO2X_RX_FRAMING_ERROR | \
1473 F_IESPI1_FIFO2X_RX_FRAMING_ERROR | F_IESPI0_RX_FRAMING_ERROR | \
1474 F_IESPI1_RX_FRAMING_ERROR | F_IESPI0_TX_FRAMING_ERROR | \
1475 F_IESPI1_TX_FRAMING_ERROR)
1476 #define OCSPI_FRM_ERR (F_OCSPI0_RX_FRAMING_ERROR | \
1477 F_OCSPI1_RX_FRAMING_ERROR | F_OCSPI0_TX_FRAMING_ERROR | \
1478 F_OCSPI1_TX_FRAMING_ERROR | F_OCSPI0_OFIFO2X_TX_FRAMING_ERROR | \
1479 F_OCSPI1_OFIFO2X_TX_FRAMING_ERROR)
1482 * PM RX interrupt handler.
1484 static void pmrx_intr_handler(struct adapter *adapter)
1486 static const struct intr_info pmrx_intr_info[] = {
1487 {F_ZERO_E_CMD_ERROR, "PMRX 0-length pcmd", -1, 1},
1488 {IESPI_FRM_ERR, "PMRX ispi framing error", -1, 1},
1489 {OCSPI_FRM_ERR, "PMRX ospi framing error", -1, 1},
1490 {V_IESPI_PAR_ERROR(M_IESPI_PAR_ERROR),
1491 "PMRX ispi parity error", -1, 1},
1492 {V_OCSPI_PAR_ERROR(M_OCSPI_PAR_ERROR),
1493 "PMRX ospi parity error", -1, 1},
1497 if (t3_handle_intr_status(adapter, A_PM1_RX_INT_CAUSE, 0xffffffff,
1498 pmrx_intr_info, NULL))
1499 t3_fatal_err(adapter);
1503 * CPL switch interrupt handler.
1505 static void cplsw_intr_handler(struct adapter *adapter)
1507 static const struct intr_info cplsw_intr_info[] = {
1508 /* { F_CIM_OVFL_ERROR, "CPL switch CIM overflow", -1, 1 }, */
1509 {F_TP_FRAMING_ERROR, "CPL switch TP framing error", -1, 1},
1510 {F_SGE_FRAMING_ERROR, "CPL switch SGE framing error", -1, 1},
1511 {F_CIM_FRAMING_ERROR, "CPL switch CIM framing error", -1, 1},
1512 {F_ZERO_SWITCH_ERROR, "CPL switch no-switch error", -1, 1},
1516 if (t3_handle_intr_status(adapter, A_CPL_INTR_CAUSE, 0xffffffff,
1517 cplsw_intr_info, NULL))
1518 t3_fatal_err(adapter);
1522 * MPS interrupt handler.
1524 static void mps_intr_handler(struct adapter *adapter)
1526 static const struct intr_info mps_intr_info[] = {
1527 {0x1ff, "MPS parity error", -1, 1},
1531 if (t3_handle_intr_status(adapter, A_MPS_INT_CAUSE, 0xffffffff,
1532 mps_intr_info, NULL))
1533 t3_fatal_err(adapter);
1536 #define MC7_INTR_FATAL (F_UE | V_PE(M_PE) | F_AE)
1539 * MC7 interrupt handler.
1541 static void mc7_intr_handler(struct mc7 *mc7)
1543 struct adapter *adapter = mc7->adapter;
1544 u32 cause = t3_read_reg(adapter, mc7->offset + A_MC7_INT_CAUSE);
1547 mc7->stats.corr_err++;
1548 CH_WARN(adapter, "%s MC7 correctable error at addr 0x%x, "
1549 "data 0x%x 0x%x 0x%x\n", mc7->name,
1550 t3_read_reg(adapter, mc7->offset + A_MC7_CE_ADDR),
1551 t3_read_reg(adapter, mc7->offset + A_MC7_CE_DATA0),
1552 t3_read_reg(adapter, mc7->offset + A_MC7_CE_DATA1),
1553 t3_read_reg(adapter, mc7->offset + A_MC7_CE_DATA2));
1557 mc7->stats.uncorr_err++;
1558 CH_ALERT(adapter, "%s MC7 uncorrectable error at addr 0x%x, "
1559 "data 0x%x 0x%x 0x%x\n", mc7->name,
1560 t3_read_reg(adapter, mc7->offset + A_MC7_UE_ADDR),
1561 t3_read_reg(adapter, mc7->offset + A_MC7_UE_DATA0),
1562 t3_read_reg(adapter, mc7->offset + A_MC7_UE_DATA1),
1563 t3_read_reg(adapter, mc7->offset + A_MC7_UE_DATA2));
1567 mc7->stats.parity_err++;
1568 CH_ALERT(adapter, "%s MC7 parity error 0x%x\n",
1569 mc7->name, G_PE(cause));
1575 if (adapter->params.rev > 0)
1576 addr = t3_read_reg(adapter,
1577 mc7->offset + A_MC7_ERR_ADDR);
1578 mc7->stats.addr_err++;
1579 CH_ALERT(adapter, "%s MC7 address error: 0x%x\n",
1583 if (cause & MC7_INTR_FATAL)
1584 t3_fatal_err(adapter);
1586 t3_write_reg(adapter, mc7->offset + A_MC7_INT_CAUSE, cause);
1589 #define XGM_INTR_FATAL (V_TXFIFO_PRTY_ERR(M_TXFIFO_PRTY_ERR) | \
1590 V_RXFIFO_PRTY_ERR(M_RXFIFO_PRTY_ERR))
1592 * XGMAC interrupt handler.
1594 static int mac_intr_handler(struct adapter *adap, unsigned int idx)
1596 struct cmac *mac = &adap2pinfo(adap, idx)->mac;
1597 u32 cause = t3_read_reg(adap, A_XGM_INT_CAUSE + mac->offset);
1599 if (cause & V_TXFIFO_PRTY_ERR(M_TXFIFO_PRTY_ERR)) {
1600 mac->stats.tx_fifo_parity_err++;
1601 CH_ALERT(adap, "port%d: MAC TX FIFO parity error\n", idx);
1603 if (cause & V_RXFIFO_PRTY_ERR(M_RXFIFO_PRTY_ERR)) {
1604 mac->stats.rx_fifo_parity_err++;
1605 CH_ALERT(adap, "port%d: MAC RX FIFO parity error\n", idx);
1607 if (cause & F_TXFIFO_UNDERRUN)
1608 mac->stats.tx_fifo_urun++;
1609 if (cause & F_RXFIFO_OVERFLOW)
1610 mac->stats.rx_fifo_ovfl++;
1611 if (cause & V_SERDES_LOS(M_SERDES_LOS))
1612 mac->stats.serdes_signal_loss++;
1613 if (cause & F_XAUIPCSCTCERR)
1614 mac->stats.xaui_pcs_ctc_err++;
1615 if (cause & F_XAUIPCSALIGNCHANGE)
1616 mac->stats.xaui_pcs_align_change++;
1618 t3_write_reg(adap, A_XGM_INT_CAUSE + mac->offset, cause);
1619 if (cause & XGM_INTR_FATAL)
1625 * Interrupt handler for PHY events.
1627 int t3_phy_intr_handler(struct adapter *adapter)
1629 u32 mask, gpi = adapter_info(adapter)->gpio_intr;
1630 u32 i, cause = t3_read_reg(adapter, A_T3DBG_INT_CAUSE);
1632 for_each_port(adapter, i) {
1633 struct port_info *p = adap2pinfo(adapter, i);
1635 mask = gpi - (gpi & (gpi - 1));
1638 if (!(p->port_type->caps & SUPPORTED_IRQ))
1642 int phy_cause = p->phy.ops->intr_handler(&p->phy);
1644 if (phy_cause & cphy_cause_link_change)
1645 t3_link_changed(adapter, i);
1646 if (phy_cause & cphy_cause_fifo_error)
1647 p->phy.fifo_errors++;
1651 t3_write_reg(adapter, A_T3DBG_INT_CAUSE, cause);
1656 * T3 slow path (non-data) interrupt handler.
1658 int t3_slow_intr_handler(struct adapter *adapter)
1660 u32 cause = t3_read_reg(adapter, A_PL_INT_CAUSE0);
1662 cause &= adapter->slow_intr_mask;
1665 if (cause & F_PCIM0) {
1666 if (is_pcie(adapter))
1667 pcie_intr_handler(adapter);
1669 pci_intr_handler(adapter);
1672 t3_sge_err_intr_handler(adapter);
1673 if (cause & F_MC7_PMRX)
1674 mc7_intr_handler(&adapter->pmrx);
1675 if (cause & F_MC7_PMTX)
1676 mc7_intr_handler(&adapter->pmtx);
1677 if (cause & F_MC7_CM)
1678 mc7_intr_handler(&adapter->cm);
1680 cim_intr_handler(adapter);
1682 tp_intr_handler(adapter);
1683 if (cause & F_ULP2_RX)
1684 ulprx_intr_handler(adapter);
1685 if (cause & F_ULP2_TX)
1686 ulptx_intr_handler(adapter);
1687 if (cause & F_PM1_RX)
1688 pmrx_intr_handler(adapter);
1689 if (cause & F_PM1_TX)
1690 pmtx_intr_handler(adapter);
1691 if (cause & F_CPL_SWITCH)
1692 cplsw_intr_handler(adapter);
1694 mps_intr_handler(adapter);
1696 t3_mc5_intr_handler(&adapter->mc5);
1697 if (cause & F_XGMAC0_0)
1698 mac_intr_handler(adapter, 0);
1699 if (cause & F_XGMAC0_1)
1700 mac_intr_handler(adapter, 1);
1701 if (cause & F_T3DBG)
1702 t3_os_ext_intr_handler(adapter);
1704 /* Clear the interrupts just processed. */
1705 t3_write_reg(adapter, A_PL_INT_CAUSE0, cause);
1706 t3_read_reg(adapter, A_PL_INT_CAUSE0); /* flush */
1711 * t3_intr_enable - enable interrupts
1712 * @adapter: the adapter whose interrupts should be enabled
1714 * Enable interrupts by setting the interrupt enable registers of the
1715 * various HW modules and then enabling the top-level interrupt
1718 void t3_intr_enable(struct adapter *adapter)
1720 static const struct addr_val_pair intr_en_avp[] = {
1721 {A_SG_INT_ENABLE, SGE_INTR_MASK},
1722 {A_MC7_INT_ENABLE, MC7_INTR_MASK},
1723 {A_MC7_INT_ENABLE - MC7_PMRX_BASE_ADDR + MC7_PMTX_BASE_ADDR,
1725 {A_MC7_INT_ENABLE - MC7_PMRX_BASE_ADDR + MC7_CM_BASE_ADDR,
1727 {A_MC5_DB_INT_ENABLE, MC5_INTR_MASK},
1728 {A_ULPRX_INT_ENABLE, ULPRX_INTR_MASK},
1729 {A_TP_INT_ENABLE, 0x3bfffff},
1730 {A_PM1_TX_INT_ENABLE, PMTX_INTR_MASK},
1731 {A_PM1_RX_INT_ENABLE, PMRX_INTR_MASK},
1732 {A_CIM_HOST_INT_ENABLE, CIM_INTR_MASK},
1733 {A_MPS_INT_ENABLE, MPS_INTR_MASK},
1736 adapter->slow_intr_mask = PL_INTR_MASK;
1738 t3_write_regs(adapter, intr_en_avp, ARRAY_SIZE(intr_en_avp), 0);
1740 if (adapter->params.rev > 0) {
1741 t3_write_reg(adapter, A_CPL_INTR_ENABLE,
1742 CPLSW_INTR_MASK | F_CIM_OVFL_ERROR);
1743 t3_write_reg(adapter, A_ULPTX_INT_ENABLE,
1744 ULPTX_INTR_MASK | F_PBL_BOUND_ERR_CH0 |
1745 F_PBL_BOUND_ERR_CH1);
1747 t3_write_reg(adapter, A_CPL_INTR_ENABLE, CPLSW_INTR_MASK);
1748 t3_write_reg(adapter, A_ULPTX_INT_ENABLE, ULPTX_INTR_MASK);
1751 t3_write_reg(adapter, A_T3DBG_GPIO_ACT_LOW,
1752 adapter_info(adapter)->gpio_intr);
1753 t3_write_reg(adapter, A_T3DBG_INT_ENABLE,
1754 adapter_info(adapter)->gpio_intr);
1755 if (is_pcie(adapter))
1756 t3_write_reg(adapter, A_PCIE_INT_ENABLE, PCIE_INTR_MASK);
1758 t3_write_reg(adapter, A_PCIX_INT_ENABLE, PCIX_INTR_MASK);
1759 t3_write_reg(adapter, A_PL_INT_ENABLE0, adapter->slow_intr_mask);
1760 t3_read_reg(adapter, A_PL_INT_ENABLE0); /* flush */
1764 * t3_intr_disable - disable a card's interrupts
1765 * @adapter: the adapter whose interrupts should be disabled
1767 * Disable interrupts. We only disable the top-level interrupt
1768 * concentrator and the SGE data interrupts.
1770 void t3_intr_disable(struct adapter *adapter)
1772 t3_write_reg(adapter, A_PL_INT_ENABLE0, 0);
1773 t3_read_reg(adapter, A_PL_INT_ENABLE0); /* flush */
1774 adapter->slow_intr_mask = 0;
1778 * t3_intr_clear - clear all interrupts
1779 * @adapter: the adapter whose interrupts should be cleared
1781 * Clears all interrupts.
1783 void t3_intr_clear(struct adapter *adapter)
1785 static const unsigned int cause_reg_addr[] = {
1787 A_SG_RSPQ_FL_STATUS,
1790 A_MC7_INT_CAUSE - MC7_PMRX_BASE_ADDR + MC7_PMTX_BASE_ADDR,
1791 A_MC7_INT_CAUSE - MC7_PMRX_BASE_ADDR + MC7_CM_BASE_ADDR,
1792 A_CIM_HOST_INT_CAUSE,
1805 /* Clear PHY and MAC interrupts for each port. */
1806 for_each_port(adapter, i)
1807 t3_port_intr_clear(adapter, i);
1809 for (i = 0; i < ARRAY_SIZE(cause_reg_addr); ++i)
1810 t3_write_reg(adapter, cause_reg_addr[i], 0xffffffff);
1812 t3_write_reg(adapter, A_PL_INT_CAUSE0, 0xffffffff);
1813 t3_read_reg(adapter, A_PL_INT_CAUSE0); /* flush */
1817 * t3_port_intr_enable - enable port-specific interrupts
1818 * @adapter: associated adapter
1819 * @idx: index of port whose interrupts should be enabled
1821 * Enable port-specific (i.e., MAC and PHY) interrupts for the given
1824 void t3_port_intr_enable(struct adapter *adapter, int idx)
1826 struct cphy *phy = &adap2pinfo(adapter, idx)->phy;
1828 t3_write_reg(adapter, XGM_REG(A_XGM_INT_ENABLE, idx), XGM_INTR_MASK);
1829 t3_read_reg(adapter, XGM_REG(A_XGM_INT_ENABLE, idx)); /* flush */
1830 phy->ops->intr_enable(phy);
1834 * t3_port_intr_disable - disable port-specific interrupts
1835 * @adapter: associated adapter
1836 * @idx: index of port whose interrupts should be disabled
1838 * Disable port-specific (i.e., MAC and PHY) interrupts for the given
1841 void t3_port_intr_disable(struct adapter *adapter, int idx)
1843 struct cphy *phy = &adap2pinfo(adapter, idx)->phy;
1845 t3_write_reg(adapter, XGM_REG(A_XGM_INT_ENABLE, idx), 0);
1846 t3_read_reg(adapter, XGM_REG(A_XGM_INT_ENABLE, idx)); /* flush */
1847 phy->ops->intr_disable(phy);
1851 * t3_port_intr_clear - clear port-specific interrupts
1852 * @adapter: associated adapter
1853 * @idx: index of port whose interrupts to clear
1855 * Clear port-specific (i.e., MAC and PHY) interrupts for the given
1858 void t3_port_intr_clear(struct adapter *adapter, int idx)
1860 struct cphy *phy = &adap2pinfo(adapter, idx)->phy;
1862 t3_write_reg(adapter, XGM_REG(A_XGM_INT_CAUSE, idx), 0xffffffff);
1863 t3_read_reg(adapter, XGM_REG(A_XGM_INT_CAUSE, idx)); /* flush */
1864 phy->ops->intr_clear(phy);
1868 * t3_sge_write_context - write an SGE context
1869 * @adapter: the adapter
1870 * @id: the context id
1871 * @type: the context type
1873 * Program an SGE context with the values already loaded in the
1874 * CONTEXT_DATA? registers.
1876 static int t3_sge_write_context(struct adapter *adapter, unsigned int id,
1879 t3_write_reg(adapter, A_SG_CONTEXT_MASK0, 0xffffffff);
1880 t3_write_reg(adapter, A_SG_CONTEXT_MASK1, 0xffffffff);
1881 t3_write_reg(adapter, A_SG_CONTEXT_MASK2, 0xffffffff);
1882 t3_write_reg(adapter, A_SG_CONTEXT_MASK3, 0xffffffff);
1883 t3_write_reg(adapter, A_SG_CONTEXT_CMD,
1884 V_CONTEXT_CMD_OPCODE(1) | type | V_CONTEXT(id));
1885 return t3_wait_op_done(adapter, A_SG_CONTEXT_CMD, F_CONTEXT_CMD_BUSY,
1890 * t3_sge_init_ecntxt - initialize an SGE egress context
1891 * @adapter: the adapter to configure
1892 * @id: the context id
1893 * @gts_enable: whether to enable GTS for the context
1894 * @type: the egress context type
1895 * @respq: associated response queue
1896 * @base_addr: base address of queue
1897 * @size: number of queue entries
1899 * @gen: initial generation value for the context
1900 * @cidx: consumer pointer
1902 * Initialize an SGE egress context and make it ready for use. If the
1903 * platform allows concurrent context operations, the caller is
1904 * responsible for appropriate locking.
1906 int t3_sge_init_ecntxt(struct adapter *adapter, unsigned int id, int gts_enable,
1907 enum sge_context_type type, int respq, u64 base_addr,
1908 unsigned int size, unsigned int token, int gen,
1911 unsigned int credits = type == SGE_CNTXT_OFLD ? 0 : FW_WR_NUM;
1913 if (base_addr & 0xfff) /* must be 4K aligned */
1915 if (t3_read_reg(adapter, A_SG_CONTEXT_CMD) & F_CONTEXT_CMD_BUSY)
1919 t3_write_reg(adapter, A_SG_CONTEXT_DATA0, V_EC_INDEX(cidx) |
1920 V_EC_CREDITS(credits) | V_EC_GTS(gts_enable));
1921 t3_write_reg(adapter, A_SG_CONTEXT_DATA1, V_EC_SIZE(size) |
1922 V_EC_BASE_LO(base_addr & 0xffff));
1924 t3_write_reg(adapter, A_SG_CONTEXT_DATA2, base_addr);
1926 t3_write_reg(adapter, A_SG_CONTEXT_DATA3,
1927 V_EC_BASE_HI(base_addr & 0xf) | V_EC_RESPQ(respq) |
1928 V_EC_TYPE(type) | V_EC_GEN(gen) | V_EC_UP_TOKEN(token) |
1930 return t3_sge_write_context(adapter, id, F_EGRESS);
1934 * t3_sge_init_flcntxt - initialize an SGE free-buffer list context
1935 * @adapter: the adapter to configure
1936 * @id: the context id
1937 * @gts_enable: whether to enable GTS for the context
1938 * @base_addr: base address of queue
1939 * @size: number of queue entries
1940 * @bsize: size of each buffer for this queue
1941 * @cong_thres: threshold to signal congestion to upstream producers
1942 * @gen: initial generation value for the context
1943 * @cidx: consumer pointer
1945 * Initialize an SGE free list context and make it ready for use. The
1946 * caller is responsible for ensuring only one context operation occurs
1949 int t3_sge_init_flcntxt(struct adapter *adapter, unsigned int id,
1950 int gts_enable, u64 base_addr, unsigned int size,
1951 unsigned int bsize, unsigned int cong_thres, int gen,
1954 if (base_addr & 0xfff) /* must be 4K aligned */
1956 if (t3_read_reg(adapter, A_SG_CONTEXT_CMD) & F_CONTEXT_CMD_BUSY)
1960 t3_write_reg(adapter, A_SG_CONTEXT_DATA0, base_addr);
1962 t3_write_reg(adapter, A_SG_CONTEXT_DATA1,
1963 V_FL_BASE_HI((u32) base_addr) |
1964 V_FL_INDEX_LO(cidx & M_FL_INDEX_LO));
1965 t3_write_reg(adapter, A_SG_CONTEXT_DATA2, V_FL_SIZE(size) |
1966 V_FL_GEN(gen) | V_FL_INDEX_HI(cidx >> 12) |
1967 V_FL_ENTRY_SIZE_LO(bsize & M_FL_ENTRY_SIZE_LO));
1968 t3_write_reg(adapter, A_SG_CONTEXT_DATA3,
1969 V_FL_ENTRY_SIZE_HI(bsize >> (32 - S_FL_ENTRY_SIZE_LO)) |
1970 V_FL_CONG_THRES(cong_thres) | V_FL_GTS(gts_enable));
1971 return t3_sge_write_context(adapter, id, F_FREELIST);
1975 * t3_sge_init_rspcntxt - initialize an SGE response queue context
1976 * @adapter: the adapter to configure
1977 * @id: the context id
1978 * @irq_vec_idx: MSI-X interrupt vector index, 0 if no MSI-X, -1 if no IRQ
1979 * @base_addr: base address of queue
1980 * @size: number of queue entries
1981 * @fl_thres: threshold for selecting the normal or jumbo free list
1982 * @gen: initial generation value for the context
1983 * @cidx: consumer pointer
1985 * Initialize an SGE response queue context and make it ready for use.
1986 * The caller is responsible for ensuring only one context operation
1989 int t3_sge_init_rspcntxt(struct adapter *adapter, unsigned int id,
1990 int irq_vec_idx, u64 base_addr, unsigned int size,
1991 unsigned int fl_thres, int gen, unsigned int cidx)
1993 unsigned int intr = 0;
1995 if (base_addr & 0xfff) /* must be 4K aligned */
1997 if (t3_read_reg(adapter, A_SG_CONTEXT_CMD) & F_CONTEXT_CMD_BUSY)
2001 t3_write_reg(adapter, A_SG_CONTEXT_DATA0, V_CQ_SIZE(size) |
2003 t3_write_reg(adapter, A_SG_CONTEXT_DATA1, base_addr);
2005 if (irq_vec_idx >= 0)
2006 intr = V_RQ_MSI_VEC(irq_vec_idx) | F_RQ_INTR_EN;
2007 t3_write_reg(adapter, A_SG_CONTEXT_DATA2,
2008 V_CQ_BASE_HI((u32) base_addr) | intr | V_RQ_GEN(gen));
2009 t3_write_reg(adapter, A_SG_CONTEXT_DATA3, fl_thres);
2010 return t3_sge_write_context(adapter, id, F_RESPONSEQ);
2014 * t3_sge_init_cqcntxt - initialize an SGE completion queue context
2015 * @adapter: the adapter to configure
2016 * @id: the context id
2017 * @base_addr: base address of queue
2018 * @size: number of queue entries
2019 * @rspq: response queue for async notifications
2020 * @ovfl_mode: CQ overflow mode
2021 * @credits: completion queue credits
2022 * @credit_thres: the credit threshold
2024 * Initialize an SGE completion queue context and make it ready for use.
2025 * The caller is responsible for ensuring only one context operation
2028 int t3_sge_init_cqcntxt(struct adapter *adapter, unsigned int id, u64 base_addr,
2029 unsigned int size, int rspq, int ovfl_mode,
2030 unsigned int credits, unsigned int credit_thres)
2032 if (base_addr & 0xfff) /* must be 4K aligned */
2034 if (t3_read_reg(adapter, A_SG_CONTEXT_CMD) & F_CONTEXT_CMD_BUSY)
2038 t3_write_reg(adapter, A_SG_CONTEXT_DATA0, V_CQ_SIZE(size));
2039 t3_write_reg(adapter, A_SG_CONTEXT_DATA1, base_addr);
2041 t3_write_reg(adapter, A_SG_CONTEXT_DATA2,
2042 V_CQ_BASE_HI((u32) base_addr) | V_CQ_RSPQ(rspq) |
2043 V_CQ_GEN(1) | V_CQ_OVERFLOW_MODE(ovfl_mode));
2044 t3_write_reg(adapter, A_SG_CONTEXT_DATA3, V_CQ_CREDITS(credits) |
2045 V_CQ_CREDIT_THRES(credit_thres));
2046 return t3_sge_write_context(adapter, id, F_CQ);
2050 * t3_sge_enable_ecntxt - enable/disable an SGE egress context
2051 * @adapter: the adapter
2052 * @id: the egress context id
2053 * @enable: enable (1) or disable (0) the context
2055 * Enable or disable an SGE egress context. The caller is responsible for
2056 * ensuring only one context operation occurs at a time.
2058 int t3_sge_enable_ecntxt(struct adapter *adapter, unsigned int id, int enable)
2060 if (t3_read_reg(adapter, A_SG_CONTEXT_CMD) & F_CONTEXT_CMD_BUSY)
2063 t3_write_reg(adapter, A_SG_CONTEXT_MASK0, 0);
2064 t3_write_reg(adapter, A_SG_CONTEXT_MASK1, 0);
2065 t3_write_reg(adapter, A_SG_CONTEXT_MASK2, 0);
2066 t3_write_reg(adapter, A_SG_CONTEXT_MASK3, F_EC_VALID);
2067 t3_write_reg(adapter, A_SG_CONTEXT_DATA3, V_EC_VALID(enable));
2068 t3_write_reg(adapter, A_SG_CONTEXT_CMD,
2069 V_CONTEXT_CMD_OPCODE(1) | F_EGRESS | V_CONTEXT(id));
2070 return t3_wait_op_done(adapter, A_SG_CONTEXT_CMD, F_CONTEXT_CMD_BUSY,
2075 * t3_sge_disable_fl - disable an SGE free-buffer list
2076 * @adapter: the adapter
2077 * @id: the free list context id
2079 * Disable an SGE free-buffer list. The caller is responsible for
2080 * ensuring only one context operation occurs at a time.
2082 int t3_sge_disable_fl(struct adapter *adapter, unsigned int id)
2084 if (t3_read_reg(adapter, A_SG_CONTEXT_CMD) & F_CONTEXT_CMD_BUSY)
2087 t3_write_reg(adapter, A_SG_CONTEXT_MASK0, 0);
2088 t3_write_reg(adapter, A_SG_CONTEXT_MASK1, 0);
2089 t3_write_reg(adapter, A_SG_CONTEXT_MASK2, V_FL_SIZE(M_FL_SIZE));
2090 t3_write_reg(adapter, A_SG_CONTEXT_MASK3, 0);
2091 t3_write_reg(adapter, A_SG_CONTEXT_DATA2, 0);
2092 t3_write_reg(adapter, A_SG_CONTEXT_CMD,
2093 V_CONTEXT_CMD_OPCODE(1) | F_FREELIST | V_CONTEXT(id));
2094 return t3_wait_op_done(adapter, A_SG_CONTEXT_CMD, F_CONTEXT_CMD_BUSY,
2099 * t3_sge_disable_rspcntxt - disable an SGE response queue
2100 * @adapter: the adapter
2101 * @id: the response queue context id
2103 * Disable an SGE response queue. The caller is responsible for
2104 * ensuring only one context operation occurs at a time.
2106 int t3_sge_disable_rspcntxt(struct adapter *adapter, unsigned int id)
2108 if (t3_read_reg(adapter, A_SG_CONTEXT_CMD) & F_CONTEXT_CMD_BUSY)
2111 t3_write_reg(adapter, A_SG_CONTEXT_MASK0, V_CQ_SIZE(M_CQ_SIZE));
2112 t3_write_reg(adapter, A_SG_CONTEXT_MASK1, 0);
2113 t3_write_reg(adapter, A_SG_CONTEXT_MASK2, 0);
2114 t3_write_reg(adapter, A_SG_CONTEXT_MASK3, 0);
2115 t3_write_reg(adapter, A_SG_CONTEXT_DATA0, 0);
2116 t3_write_reg(adapter, A_SG_CONTEXT_CMD,
2117 V_CONTEXT_CMD_OPCODE(1) | F_RESPONSEQ | V_CONTEXT(id));
2118 return t3_wait_op_done(adapter, A_SG_CONTEXT_CMD, F_CONTEXT_CMD_BUSY,
2123 * t3_sge_disable_cqcntxt - disable an SGE completion queue
2124 * @adapter: the adapter
2125 * @id: the completion queue context id
2127 * Disable an SGE completion queue. The caller is responsible for
2128 * ensuring only one context operation occurs at a time.
2130 int t3_sge_disable_cqcntxt(struct adapter *adapter, unsigned int id)
2132 if (t3_read_reg(adapter, A_SG_CONTEXT_CMD) & F_CONTEXT_CMD_BUSY)
2135 t3_write_reg(adapter, A_SG_CONTEXT_MASK0, V_CQ_SIZE(M_CQ_SIZE));
2136 t3_write_reg(adapter, A_SG_CONTEXT_MASK1, 0);
2137 t3_write_reg(adapter, A_SG_CONTEXT_MASK2, 0);
2138 t3_write_reg(adapter, A_SG_CONTEXT_MASK3, 0);
2139 t3_write_reg(adapter, A_SG_CONTEXT_DATA0, 0);
2140 t3_write_reg(adapter, A_SG_CONTEXT_CMD,
2141 V_CONTEXT_CMD_OPCODE(1) | F_CQ | V_CONTEXT(id));
2142 return t3_wait_op_done(adapter, A_SG_CONTEXT_CMD, F_CONTEXT_CMD_BUSY,
2147 * t3_sge_cqcntxt_op - perform an operation on a completion queue context
2148 * @adapter: the adapter
2149 * @id: the context id
2150 * @op: the operation to perform
2152 * Perform the selected operation on an SGE completion queue context.
2153 * The caller is responsible for ensuring only one context operation
2156 int t3_sge_cqcntxt_op(struct adapter *adapter, unsigned int id, unsigned int op,
2157 unsigned int credits)
2161 if (t3_read_reg(adapter, A_SG_CONTEXT_CMD) & F_CONTEXT_CMD_BUSY)
2164 t3_write_reg(adapter, A_SG_CONTEXT_DATA0, credits << 16);
2165 t3_write_reg(adapter, A_SG_CONTEXT_CMD, V_CONTEXT_CMD_OPCODE(op) |
2166 V_CONTEXT(id) | F_CQ);
2167 if (t3_wait_op_done_val(adapter, A_SG_CONTEXT_CMD, F_CONTEXT_CMD_BUSY,
2171 if (op >= 2 && op < 7) {
2172 if (adapter->params.rev > 0)
2173 return G_CQ_INDEX(val);
2175 t3_write_reg(adapter, A_SG_CONTEXT_CMD,
2176 V_CONTEXT_CMD_OPCODE(0) | F_CQ | V_CONTEXT(id));
2177 if (t3_wait_op_done(adapter, A_SG_CONTEXT_CMD,
2178 F_CONTEXT_CMD_BUSY, 0, 5, 1))
2180 return G_CQ_INDEX(t3_read_reg(adapter, A_SG_CONTEXT_DATA0));
2186 * t3_sge_read_context - read an SGE context
2187 * @type: the context type
2188 * @adapter: the adapter
2189 * @id: the context id
2190 * @data: holds the retrieved context
2192 * Read an SGE egress context. The caller is responsible for ensuring
2193 * only one context operation occurs at a time.
2195 static int t3_sge_read_context(unsigned int type, struct adapter *adapter,
2196 unsigned int id, u32 data[4])
2198 if (t3_read_reg(adapter, A_SG_CONTEXT_CMD) & F_CONTEXT_CMD_BUSY)
2201 t3_write_reg(adapter, A_SG_CONTEXT_CMD,
2202 V_CONTEXT_CMD_OPCODE(0) | type | V_CONTEXT(id));
2203 if (t3_wait_op_done(adapter, A_SG_CONTEXT_CMD, F_CONTEXT_CMD_BUSY, 0,
2206 data[0] = t3_read_reg(adapter, A_SG_CONTEXT_DATA0);
2207 data[1] = t3_read_reg(adapter, A_SG_CONTEXT_DATA1);
2208 data[2] = t3_read_reg(adapter, A_SG_CONTEXT_DATA2);
2209 data[3] = t3_read_reg(adapter, A_SG_CONTEXT_DATA3);
2214 * t3_sge_read_ecntxt - read an SGE egress context
2215 * @adapter: the adapter
2216 * @id: the context id
2217 * @data: holds the retrieved context
2219 * Read an SGE egress context. The caller is responsible for ensuring
2220 * only one context operation occurs at a time.
2222 int t3_sge_read_ecntxt(struct adapter *adapter, unsigned int id, u32 data[4])
2226 return t3_sge_read_context(F_EGRESS, adapter, id, data);
2230 * t3_sge_read_cq - read an SGE CQ context
2231 * @adapter: the adapter
2232 * @id: the context id
2233 * @data: holds the retrieved context
2235 * Read an SGE CQ context. The caller is responsible for ensuring
2236 * only one context operation occurs at a time.
2238 int t3_sge_read_cq(struct adapter *adapter, unsigned int id, u32 data[4])
2242 return t3_sge_read_context(F_CQ, adapter, id, data);
2246 * t3_sge_read_fl - read an SGE free-list context
2247 * @adapter: the adapter
2248 * @id: the context id
2249 * @data: holds the retrieved context
2251 * Read an SGE free-list context. The caller is responsible for ensuring
2252 * only one context operation occurs at a time.
2254 int t3_sge_read_fl(struct adapter *adapter, unsigned int id, u32 data[4])
2256 if (id >= SGE_QSETS * 2)
2258 return t3_sge_read_context(F_FREELIST, adapter, id, data);
2262 * t3_sge_read_rspq - read an SGE response queue context
2263 * @adapter: the adapter
2264 * @id: the context id
2265 * @data: holds the retrieved context
2267 * Read an SGE response queue context. The caller is responsible for
2268 * ensuring only one context operation occurs at a time.
2270 int t3_sge_read_rspq(struct adapter *adapter, unsigned int id, u32 data[4])
2272 if (id >= SGE_QSETS)
2274 return t3_sge_read_context(F_RESPONSEQ, adapter, id, data);
2278 * t3_config_rss - configure Rx packet steering
2279 * @adapter: the adapter
2280 * @rss_config: RSS settings (written to TP_RSS_CONFIG)
2281 * @cpus: values for the CPU lookup table (0xff terminated)
2282 * @rspq: values for the response queue lookup table (0xffff terminated)
2284 * Programs the receive packet steering logic. @cpus and @rspq provide
2285 * the values for the CPU and response queue lookup tables. If they
2286 * provide fewer values than the size of the tables the supplied values
2287 * are used repeatedly until the tables are fully populated.
2289 void t3_config_rss(struct adapter *adapter, unsigned int rss_config,
2290 const u8 * cpus, const u16 *rspq)
2292 int i, j, cpu_idx = 0, q_idx = 0;
2295 for (i = 0; i < RSS_TABLE_SIZE; ++i) {
2298 for (j = 0; j < 2; ++j) {
2299 val |= (cpus[cpu_idx++] & 0x3f) << (8 * j);
2300 if (cpus[cpu_idx] == 0xff)
2303 t3_write_reg(adapter, A_TP_RSS_LKP_TABLE, val);
2307 for (i = 0; i < RSS_TABLE_SIZE; ++i) {
2308 t3_write_reg(adapter, A_TP_RSS_MAP_TABLE,
2309 (i << 16) | rspq[q_idx++]);
2310 if (rspq[q_idx] == 0xffff)
2314 t3_write_reg(adapter, A_TP_RSS_CONFIG, rss_config);
2318 * t3_read_rss - read the contents of the RSS tables
2319 * @adapter: the adapter
2320 * @lkup: holds the contents of the RSS lookup table
2321 * @map: holds the contents of the RSS map table
2323 * Reads the contents of the receive packet steering tables.
2325 int t3_read_rss(struct adapter *adapter, u8 * lkup, u16 *map)
2331 for (i = 0; i < RSS_TABLE_SIZE; ++i) {
2332 t3_write_reg(adapter, A_TP_RSS_LKP_TABLE,
2334 val = t3_read_reg(adapter, A_TP_RSS_LKP_TABLE);
2335 if (!(val & 0x80000000))
2338 *lkup++ = (val >> 8);
2342 for (i = 0; i < RSS_TABLE_SIZE; ++i) {
2343 t3_write_reg(adapter, A_TP_RSS_MAP_TABLE,
2345 val = t3_read_reg(adapter, A_TP_RSS_MAP_TABLE);
2346 if (!(val & 0x80000000))
2354 * t3_tp_set_offload_mode - put TP in NIC/offload mode
2355 * @adap: the adapter
2356 * @enable: 1 to select offload mode, 0 for regular NIC
2358 * Switches TP to NIC/offload mode.
2360 void t3_tp_set_offload_mode(struct adapter *adap, int enable)
2362 if (is_offload(adap) || !enable)
2363 t3_set_reg_field(adap, A_TP_IN_CONFIG, F_NICMODE,
2364 V_NICMODE(!enable));
2368 * pm_num_pages - calculate the number of pages of the payload memory
2369 * @mem_size: the size of the payload memory
2370 * @pg_size: the size of each payload memory page
2372 * Calculate the number of pages, each of the given size, that fit in a
2373 * memory of the specified size, respecting the HW requirement that the
2374 * number of pages must be a multiple of 24.
2376 static inline unsigned int pm_num_pages(unsigned int mem_size,
2377 unsigned int pg_size)
2379 unsigned int n = mem_size / pg_size;
2384 #define mem_region(adap, start, size, reg) \
2385 t3_write_reg((adap), A_ ## reg, (start)); \
2389 * partition_mem - partition memory and configure TP memory settings
2390 * @adap: the adapter
2391 * @p: the TP parameters
2393 * Partitions context and payload memory and configures TP's memory
2396 static void partition_mem(struct adapter *adap, const struct tp_params *p)
2398 unsigned int m, pstructs, tids = t3_mc5_size(&adap->mc5);
2399 unsigned int timers = 0, timers_shift = 22;
2401 if (adap->params.rev > 0) {
2402 if (tids <= 16 * 1024) {
2405 } else if (tids <= 64 * 1024) {
2408 } else if (tids <= 256 * 1024) {
2414 t3_write_reg(adap, A_TP_PMM_SIZE,
2415 p->chan_rx_size | (p->chan_tx_size >> 16));
2417 t3_write_reg(adap, A_TP_PMM_TX_BASE, 0);
2418 t3_write_reg(adap, A_TP_PMM_TX_PAGE_SIZE, p->tx_pg_size);
2419 t3_write_reg(adap, A_TP_PMM_TX_MAX_PAGE, p->tx_num_pgs);
2420 t3_set_reg_field(adap, A_TP_PARA_REG3, V_TXDATAACKIDX(M_TXDATAACKIDX),
2421 V_TXDATAACKIDX(fls(p->tx_pg_size) - 12));
2423 t3_write_reg(adap, A_TP_PMM_RX_BASE, 0);
2424 t3_write_reg(adap, A_TP_PMM_RX_PAGE_SIZE, p->rx_pg_size);
2425 t3_write_reg(adap, A_TP_PMM_RX_MAX_PAGE, p->rx_num_pgs);
2427 pstructs = p->rx_num_pgs + p->tx_num_pgs;
2428 /* Add a bit of headroom and make multiple of 24 */
2430 pstructs -= pstructs % 24;
2431 t3_write_reg(adap, A_TP_CMM_MM_MAX_PSTRUCT, pstructs);
2433 m = tids * TCB_SIZE;
2434 mem_region(adap, m, (64 << 10) * 64, SG_EGR_CNTX_BADDR);
2435 mem_region(adap, m, (64 << 10) * 64, SG_CQ_CONTEXT_BADDR);
2436 t3_write_reg(adap, A_TP_CMM_TIMER_BASE, V_CMTIMERMAXNUM(timers) | m);
2437 m += ((p->ntimer_qs - 1) << timers_shift) + (1 << 22);
2438 mem_region(adap, m, pstructs * 64, TP_CMM_MM_BASE);
2439 mem_region(adap, m, 64 * (pstructs / 24), TP_CMM_MM_PS_FLST_BASE);
2440 mem_region(adap, m, 64 * (p->rx_num_pgs / 24), TP_CMM_MM_RX_FLST_BASE);
2441 mem_region(adap, m, 64 * (p->tx_num_pgs / 24), TP_CMM_MM_TX_FLST_BASE);
2443 m = (m + 4095) & ~0xfff;
2444 t3_write_reg(adap, A_CIM_SDRAM_BASE_ADDR, m);
2445 t3_write_reg(adap, A_CIM_SDRAM_ADDR_SIZE, p->cm_size - m);
2447 tids = (p->cm_size - m - (3 << 20)) / 3072 - 32;
2448 m = t3_mc5_size(&adap->mc5) - adap->params.mc5.nservers -
2449 adap->params.mc5.nfilters - adap->params.mc5.nroutes;
2451 adap->params.mc5.nservers += m - tids;
2454 static inline void tp_wr_indirect(struct adapter *adap, unsigned int addr,
2457 t3_write_reg(adap, A_TP_PIO_ADDR, addr);
2458 t3_write_reg(adap, A_TP_PIO_DATA, val);
2461 static void tp_config(struct adapter *adap, const struct tp_params *p)
2463 t3_write_reg(adap, A_TP_GLOBAL_CONFIG, F_TXPACINGENABLE | F_PATHMTU |
2464 F_IPCHECKSUMOFFLOAD | F_UDPCHECKSUMOFFLOAD |
2465 F_TCPCHECKSUMOFFLOAD | V_IPTTL(64));
2466 t3_write_reg(adap, A_TP_TCP_OPTIONS, V_MTUDEFAULT(576) |
2467 F_MTUENABLE | V_WINDOWSCALEMODE(1) |
2468 V_TIMESTAMPSMODE(0) | V_SACKMODE(1) | V_SACKRX(1));
2469 t3_write_reg(adap, A_TP_DACK_CONFIG, V_AUTOSTATE3(1) |
2470 V_AUTOSTATE2(1) | V_AUTOSTATE1(0) |
2471 V_BYTETHRESHOLD(16384) | V_MSSTHRESHOLD(2) |
2472 F_AUTOCAREFUL | F_AUTOENABLE | V_DACK_MODE(1));
2473 t3_set_reg_field(adap, A_TP_IN_CONFIG, F_IPV6ENABLE | F_NICMODE,
2474 F_IPV6ENABLE | F_NICMODE);
2475 t3_write_reg(adap, A_TP_TX_RESOURCE_LIMIT, 0x18141814);
2476 t3_write_reg(adap, A_TP_PARA_REG4, 0x5050105);
2477 t3_set_reg_field(adap, A_TP_PARA_REG6, 0,
2478 adap->params.rev > 0 ? F_ENABLEESND :
2481 t3_set_reg_field(adap, A_TP_PC_CONFIG,
2483 F_ENABLEOCSPIFULL |F_TXDEFERENABLE | F_HEARBEATDACK |
2484 F_TXCONGESTIONMODE | F_RXCONGESTIONMODE);
2485 t3_set_reg_field(adap, A_TP_PC_CONFIG2, F_CHDRAFULL, 0);
2486 t3_write_reg(adap, A_TP_PROXY_FLOW_CNTL, 1080);
2487 t3_write_reg(adap, A_TP_PROXY_FLOW_CNTL, 1000);
2489 if (adap->params.rev > 0) {
2490 tp_wr_indirect(adap, A_TP_EGRESS_CONFIG, F_REWRITEFORCETOSIZE);
2491 t3_set_reg_field(adap, A_TP_PARA_REG3, F_TXPACEAUTO,
2493 t3_set_reg_field(adap, A_TP_PC_CONFIG, F_LOCKTID, F_LOCKTID);
2494 t3_set_reg_field(adap, A_TP_PARA_REG3, 0, F_TXPACEAUTOSTRICT);
2496 t3_set_reg_field(adap, A_TP_PARA_REG3, 0, F_TXPACEFIXED);
2498 t3_write_reg(adap, A_TP_TX_MOD_QUEUE_WEIGHT1, 0);
2499 t3_write_reg(adap, A_TP_TX_MOD_QUEUE_WEIGHT0, 0);
2500 t3_write_reg(adap, A_TP_MOD_CHANNEL_WEIGHT, 0);
2501 t3_write_reg(adap, A_TP_MOD_RATE_LIMIT, 0xf2200000);
2504 /* Desired TP timer resolution in usec */
2505 #define TP_TMR_RES 50
2507 /* TCP timer values in ms */
2508 #define TP_DACK_TIMER 50
2509 #define TP_RTO_MIN 250
2512 * tp_set_timers - set TP timing parameters
2513 * @adap: the adapter to set
2514 * @core_clk: the core clock frequency in Hz
2516 * Set TP's timing parameters, such as the various timer resolutions and
2517 * the TCP timer values.
2519 static void tp_set_timers(struct adapter *adap, unsigned int core_clk)
2521 unsigned int tre = fls(core_clk / (1000000 / TP_TMR_RES)) - 1;
2522 unsigned int dack_re = fls(core_clk / 5000) - 1; /* 200us */
2523 unsigned int tstamp_re = fls(core_clk / 1000); /* 1ms, at least */
2524 unsigned int tps = core_clk >> tre;
2526 t3_write_reg(adap, A_TP_TIMER_RESOLUTION, V_TIMERRESOLUTION(tre) |
2527 V_DELAYEDACKRESOLUTION(dack_re) |
2528 V_TIMESTAMPRESOLUTION(tstamp_re));
2529 t3_write_reg(adap, A_TP_DACK_TIMER,
2530 (core_clk >> dack_re) / (1000 / TP_DACK_TIMER));
2531 t3_write_reg(adap, A_TP_TCP_BACKOFF_REG0, 0x3020100);
2532 t3_write_reg(adap, A_TP_TCP_BACKOFF_REG1, 0x7060504);
2533 t3_write_reg(adap, A_TP_TCP_BACKOFF_REG2, 0xb0a0908);
2534 t3_write_reg(adap, A_TP_TCP_BACKOFF_REG3, 0xf0e0d0c);
2535 t3_write_reg(adap, A_TP_SHIFT_CNT, V_SYNSHIFTMAX(6) |
2536 V_RXTSHIFTMAXR1(4) | V_RXTSHIFTMAXR2(15) |
2537 V_PERSHIFTBACKOFFMAX(8) | V_PERSHIFTMAX(8) |
2540 #define SECONDS * tps
2542 t3_write_reg(adap, A_TP_MSL, adap->params.rev > 0 ? 0 : 2 SECONDS);
2543 t3_write_reg(adap, A_TP_RXT_MIN, tps / (1000 / TP_RTO_MIN));
2544 t3_write_reg(adap, A_TP_RXT_MAX, 64 SECONDS);
2545 t3_write_reg(adap, A_TP_PERS_MIN, 5 SECONDS);
2546 t3_write_reg(adap, A_TP_PERS_MAX, 64 SECONDS);
2547 t3_write_reg(adap, A_TP_KEEP_IDLE, 7200 SECONDS);
2548 t3_write_reg(adap, A_TP_KEEP_INTVL, 75 SECONDS);
2549 t3_write_reg(adap, A_TP_INIT_SRTT, 3 SECONDS);
2550 t3_write_reg(adap, A_TP_FINWAIT2_TIMER, 600 SECONDS);
2556 * t3_tp_set_coalescing_size - set receive coalescing size
2557 * @adap: the adapter
2558 * @size: the receive coalescing size
2559 * @psh: whether a set PSH bit should deliver coalesced data
2561 * Set the receive coalescing size and PSH bit handling.
2563 int t3_tp_set_coalescing_size(struct adapter *adap, unsigned int size, int psh)
2567 if (size > MAX_RX_COALESCING_LEN)
2570 val = t3_read_reg(adap, A_TP_PARA_REG3);
2571 val &= ~(F_RXCOALESCEENABLE | F_RXCOALESCEPSHEN);
2574 val |= F_RXCOALESCEENABLE;
2576 val |= F_RXCOALESCEPSHEN;
2577 size = min(MAX_RX_COALESCING_LEN, size);
2578 t3_write_reg(adap, A_TP_PARA_REG2, V_RXCOALESCESIZE(size) |
2579 V_MAXRXDATA(MAX_RX_COALESCING_LEN));
2581 t3_write_reg(adap, A_TP_PARA_REG3, val);
2586 * t3_tp_set_max_rxsize - set the max receive size
2587 * @adap: the adapter
2588 * @size: the max receive size
2590 * Set TP's max receive size. This is the limit that applies when
2591 * receive coalescing is disabled.
2593 void t3_tp_set_max_rxsize(struct adapter *adap, unsigned int size)
2595 t3_write_reg(adap, A_TP_PARA_REG7,
2596 V_PMMAXXFERLEN0(size) | V_PMMAXXFERLEN1(size));
2599 static void __devinit init_mtus(unsigned short mtus[])
2602 * See draft-mathis-plpmtud-00.txt for the values. The min is 88 so
2603 * it can accomodate max size TCP/IP headers when SACK and timestamps
2604 * are enabled and still have at least 8 bytes of payload.
2625 * Initial congestion control parameters.
2627 static void __devinit init_cong_ctrl(unsigned short *a, unsigned short *b)
2629 a[0] = a[1] = a[2] = a[3] = a[4] = a[5] = a[6] = a[7] = a[8] = 1;
2654 b[0] = b[1] = b[2] = b[3] = b[4] = b[5] = b[6] = b[7] = b[8] = 0;
2657 b[13] = b[14] = b[15] = b[16] = 3;
2658 b[17] = b[18] = b[19] = b[20] = b[21] = 4;
2659 b[22] = b[23] = b[24] = b[25] = b[26] = b[27] = 5;
2664 /* The minimum additive increment value for the congestion control table */
2665 #define CC_MIN_INCR 2U
2668 * t3_load_mtus - write the MTU and congestion control HW tables
2669 * @adap: the adapter
2670 * @mtus: the unrestricted values for the MTU table
2671 * @alphs: the values for the congestion control alpha parameter
2672 * @beta: the values for the congestion control beta parameter
2673 * @mtu_cap: the maximum permitted effective MTU
2675 * Write the MTU table with the supplied MTUs capping each at &mtu_cap.
2676 * Update the high-speed congestion control table with the supplied alpha,
2679 void t3_load_mtus(struct adapter *adap, unsigned short mtus[NMTUS],
2680 unsigned short alpha[NCCTRL_WIN],
2681 unsigned short beta[NCCTRL_WIN], unsigned short mtu_cap)
2683 static const unsigned int avg_pkts[NCCTRL_WIN] = {
2684 2, 6, 10, 14, 20, 28, 40, 56, 80, 112, 160, 224, 320, 448, 640,
2685 896, 1281, 1792, 2560, 3584, 5120, 7168, 10240, 14336, 20480,
2686 28672, 40960, 57344, 81920, 114688, 163840, 229376
2691 for (i = 0; i < NMTUS; ++i) {
2692 unsigned int mtu = min(mtus[i], mtu_cap);
2693 unsigned int log2 = fls(mtu);
2695 if (!(mtu & ((1 << log2) >> 2))) /* round */
2697 t3_write_reg(adap, A_TP_MTU_TABLE,
2698 (i << 24) | (log2 << 16) | mtu);
2700 for (w = 0; w < NCCTRL_WIN; ++w) {
2703 inc = max(((mtu - 40) * alpha[w]) / avg_pkts[w],
2706 t3_write_reg(adap, A_TP_CCTRL_TABLE, (i << 21) |
2707 (w << 16) | (beta[w] << 13) | inc);
2713 * t3_read_hw_mtus - returns the values in the HW MTU table
2714 * @adap: the adapter
2715 * @mtus: where to store the HW MTU values
2717 * Reads the HW MTU table.
2719 void t3_read_hw_mtus(struct adapter *adap, unsigned short mtus[NMTUS])
2723 for (i = 0; i < NMTUS; ++i) {
2726 t3_write_reg(adap, A_TP_MTU_TABLE, 0xff000000 | i);
2727 val = t3_read_reg(adap, A_TP_MTU_TABLE);
2728 mtus[i] = val & 0x3fff;
2733 * t3_get_cong_cntl_tab - reads the congestion control table
2734 * @adap: the adapter
2735 * @incr: where to store the alpha values
2737 * Reads the additive increments programmed into the HW congestion
2740 void t3_get_cong_cntl_tab(struct adapter *adap,
2741 unsigned short incr[NMTUS][NCCTRL_WIN])
2743 unsigned int mtu, w;
2745 for (mtu = 0; mtu < NMTUS; ++mtu)
2746 for (w = 0; w < NCCTRL_WIN; ++w) {
2747 t3_write_reg(adap, A_TP_CCTRL_TABLE,
2748 0xffff0000 | (mtu << 5) | w);
2749 incr[mtu][w] = t3_read_reg(adap, A_TP_CCTRL_TABLE) &
2755 * t3_tp_get_mib_stats - read TP's MIB counters
2756 * @adap: the adapter
2757 * @tps: holds the returned counter values
2759 * Returns the values of TP's MIB counters.
2761 void t3_tp_get_mib_stats(struct adapter *adap, struct tp_mib_stats *tps)
2763 t3_read_indirect(adap, A_TP_MIB_INDEX, A_TP_MIB_RDATA, (u32 *) tps,
2764 sizeof(*tps) / sizeof(u32), 0);
2767 #define ulp_region(adap, name, start, len) \
2768 t3_write_reg((adap), A_ULPRX_ ## name ## _LLIMIT, (start)); \
2769 t3_write_reg((adap), A_ULPRX_ ## name ## _ULIMIT, \
2770 (start) + (len) - 1); \
2773 #define ulptx_region(adap, name, start, len) \
2774 t3_write_reg((adap), A_ULPTX_ ## name ## _LLIMIT, (start)); \
2775 t3_write_reg((adap), A_ULPTX_ ## name ## _ULIMIT, \
2776 (start) + (len) - 1)
2778 static void ulp_config(struct adapter *adap, const struct tp_params *p)
2780 unsigned int m = p->chan_rx_size;
2782 ulp_region(adap, ISCSI, m, p->chan_rx_size / 8);
2783 ulp_region(adap, TDDP, m, p->chan_rx_size / 8);
2784 ulptx_region(adap, TPT, m, p->chan_rx_size / 4);
2785 ulp_region(adap, STAG, m, p->chan_rx_size / 4);
2786 ulp_region(adap, RQ, m, p->chan_rx_size / 4);
2787 ulptx_region(adap, PBL, m, p->chan_rx_size / 4);
2788 ulp_region(adap, PBL, m, p->chan_rx_size / 4);
2789 t3_write_reg(adap, A_ULPRX_TDDP_TAGMASK, 0xffffffff);
2793 * t3_set_proto_sram - set the contents of the protocol sram
2794 * @adapter: the adapter
2795 * @data: the protocol image
2797 * Write the contents of the protocol SRAM.
2799 int t3_set_proto_sram(struct adapter *adap, u8 *data)
2802 u32 *buf = (u32 *)data;
2804 for (i = 0; i < PROTO_SRAM_LINES; i++) {
2805 t3_write_reg(adap, A_TP_EMBED_OP_FIELD5, cpu_to_be32(*buf++));
2806 t3_write_reg(adap, A_TP_EMBED_OP_FIELD4, cpu_to_be32(*buf++));
2807 t3_write_reg(adap, A_TP_EMBED_OP_FIELD3, cpu_to_be32(*buf++));
2808 t3_write_reg(adap, A_TP_EMBED_OP_FIELD2, cpu_to_be32(*buf++));
2809 t3_write_reg(adap, A_TP_EMBED_OP_FIELD1, cpu_to_be32(*buf++));
2811 t3_write_reg(adap, A_TP_EMBED_OP_FIELD0, i << 1 | 1 << 31);
2812 if (t3_wait_op_done(adap, A_TP_EMBED_OP_FIELD0, 1, 1, 5, 1))
2815 t3_write_reg(adap, A_TP_EMBED_OP_FIELD0, 0);
2820 void t3_config_trace_filter(struct adapter *adapter,
2821 const struct trace_params *tp, int filter_index,
2822 int invert, int enable)
2824 u32 addr, key[4], mask[4];
2826 key[0] = tp->sport | (tp->sip << 16);
2827 key[1] = (tp->sip >> 16) | (tp->dport << 16);
2829 key[3] = tp->proto | (tp->vlan << 8) | (tp->intf << 20);
2831 mask[0] = tp->sport_mask | (tp->sip_mask << 16);
2832 mask[1] = (tp->sip_mask >> 16) | (tp->dport_mask << 16);
2833 mask[2] = tp->dip_mask;
2834 mask[3] = tp->proto_mask | (tp->vlan_mask << 8) | (tp->intf_mask << 20);
2837 key[3] |= (1 << 29);
2839 key[3] |= (1 << 28);
2841 addr = filter_index ? A_TP_RX_TRC_KEY0 : A_TP_TX_TRC_KEY0;
2842 tp_wr_indirect(adapter, addr++, key[0]);
2843 tp_wr_indirect(adapter, addr++, mask[0]);
2844 tp_wr_indirect(adapter, addr++, key[1]);
2845 tp_wr_indirect(adapter, addr++, mask[1]);
2846 tp_wr_indirect(adapter, addr++, key[2]);
2847 tp_wr_indirect(adapter, addr++, mask[2]);
2848 tp_wr_indirect(adapter, addr++, key[3]);
2849 tp_wr_indirect(adapter, addr, mask[3]);
2850 t3_read_reg(adapter, A_TP_PIO_DATA);
2854 * t3_config_sched - configure a HW traffic scheduler
2855 * @adap: the adapter
2856 * @kbps: target rate in Kbps
2857 * @sched: the scheduler index
2859 * Configure a HW scheduler for the target rate
2861 int t3_config_sched(struct adapter *adap, unsigned int kbps, int sched)
2863 unsigned int v, tps, cpt, bpt, delta, mindelta = ~0;
2864 unsigned int clk = adap->params.vpd.cclk * 1000;
2865 unsigned int selected_cpt = 0, selected_bpt = 0;
2868 kbps *= 125; /* -> bytes */
2869 for (cpt = 1; cpt <= 255; cpt++) {
2871 bpt = (kbps + tps / 2) / tps;
2872 if (bpt > 0 && bpt <= 255) {
2874 delta = v >= kbps ? v - kbps : kbps - v;
2875 if (delta <= mindelta) {
2880 } else if (selected_cpt)
2886 t3_write_reg(adap, A_TP_TM_PIO_ADDR,
2887 A_TP_TX_MOD_Q1_Q0_RATE_LIMIT - sched / 2);
2888 v = t3_read_reg(adap, A_TP_TM_PIO_DATA);
2890 v = (v & 0xffff) | (selected_cpt << 16) | (selected_bpt << 24);
2892 v = (v & 0xffff0000) | selected_cpt | (selected_bpt << 8);
2893 t3_write_reg(adap, A_TP_TM_PIO_DATA, v);
2897 static int tp_init(struct adapter *adap, const struct tp_params *p)
2902 t3_set_vlan_accel(adap, 3, 0);
2904 if (is_offload(adap)) {
2905 tp_set_timers(adap, adap->params.vpd.cclk * 1000);
2906 t3_write_reg(adap, A_TP_RESET, F_FLSTINITENABLE);
2907 busy = t3_wait_op_done(adap, A_TP_RESET, F_FLSTINITENABLE,
2910 CH_ERR(adap, "TP initialization timed out\n");
2914 t3_write_reg(adap, A_TP_RESET, F_TPRESET);
2918 int t3_mps_set_active_ports(struct adapter *adap, unsigned int port_mask)
2920 if (port_mask & ~((1 << adap->params.nports) - 1))
2922 t3_set_reg_field(adap, A_MPS_CFG, F_PORT1ACTIVE | F_PORT0ACTIVE,
2923 port_mask << S_PORT0ACTIVE);
2928 * Perform the bits of HW initialization that are dependent on the number
2929 * of available ports.
2931 static void init_hw_for_avail_ports(struct adapter *adap, int nports)
2936 t3_set_reg_field(adap, A_ULPRX_CTL, F_ROUND_ROBIN, 0);
2937 t3_set_reg_field(adap, A_ULPTX_CONFIG, F_CFG_RR_ARB, 0);
2938 t3_write_reg(adap, A_MPS_CFG, F_TPRXPORTEN | F_TPTXPORT0EN |
2939 F_PORT0ACTIVE | F_ENFORCEPKT);
2940 t3_write_reg(adap, A_PM1_TX_CFG, 0xffffffff);
2942 t3_set_reg_field(adap, A_ULPRX_CTL, 0, F_ROUND_ROBIN);
2943 t3_set_reg_field(adap, A_ULPTX_CONFIG, 0, F_CFG_RR_ARB);
2944 t3_write_reg(adap, A_ULPTX_DMA_WEIGHT,
2945 V_D1_WEIGHT(16) | V_D0_WEIGHT(16));
2946 t3_write_reg(adap, A_MPS_CFG, F_TPTXPORT0EN | F_TPTXPORT1EN |
2947 F_TPRXPORTEN | F_PORT0ACTIVE | F_PORT1ACTIVE |
2949 t3_write_reg(adap, A_PM1_TX_CFG, 0x80008000);
2950 t3_set_reg_field(adap, A_TP_PC_CONFIG, 0, F_TXTOSQUEUEMAPMODE);
2951 t3_write_reg(adap, A_TP_TX_MOD_QUEUE_REQ_MAP,
2952 V_TX_MOD_QUEUE_REQ_MAP(0xaa));
2953 for (i = 0; i < 16; i++)
2954 t3_write_reg(adap, A_TP_TX_MOD_QUE_TABLE,
2955 (i << 16) | 0x1010);
2959 static int calibrate_xgm(struct adapter *adapter)
2961 if (uses_xaui(adapter)) {
2964 for (i = 0; i < 5; ++i) {
2965 t3_write_reg(adapter, A_XGM_XAUI_IMP, 0);
2966 t3_read_reg(adapter, A_XGM_XAUI_IMP);
2968 v = t3_read_reg(adapter, A_XGM_XAUI_IMP);
2969 if (!(v & (F_XGM_CALFAULT | F_CALBUSY))) {
2970 t3_write_reg(adapter, A_XGM_XAUI_IMP,
2971 V_XAUIIMP(G_CALIMP(v) >> 2));
2975 CH_ERR(adapter, "MAC calibration failed\n");
2978 t3_write_reg(adapter, A_XGM_RGMII_IMP,
2979 V_RGMIIIMPPD(2) | V_RGMIIIMPPU(3));
2980 t3_set_reg_field(adapter, A_XGM_RGMII_IMP, F_XGM_IMPSETUPDATE,
2981 F_XGM_IMPSETUPDATE);
2986 static void calibrate_xgm_t3b(struct adapter *adapter)
2988 if (!uses_xaui(adapter)) {
2989 t3_write_reg(adapter, A_XGM_RGMII_IMP, F_CALRESET |
2990 F_CALUPDATE | V_RGMIIIMPPD(2) | V_RGMIIIMPPU(3));
2991 t3_set_reg_field(adapter, A_XGM_RGMII_IMP, F_CALRESET, 0);
2992 t3_set_reg_field(adapter, A_XGM_RGMII_IMP, 0,
2993 F_XGM_IMPSETUPDATE);
2994 t3_set_reg_field(adapter, A_XGM_RGMII_IMP, F_XGM_IMPSETUPDATE,
2996 t3_set_reg_field(adapter, A_XGM_RGMII_IMP, F_CALUPDATE, 0);
2997 t3_set_reg_field(adapter, A_XGM_RGMII_IMP, 0, F_CALUPDATE);
3001 struct mc7_timing_params {
3002 unsigned char ActToPreDly;
3003 unsigned char ActToRdWrDly;
3004 unsigned char PreCyc;
3005 unsigned char RefCyc[5];
3006 unsigned char BkCyc;
3007 unsigned char WrToRdDly;
3008 unsigned char RdToWrDly;
3012 * Write a value to a register and check that the write completed. These
3013 * writes normally complete in a cycle or two, so one read should suffice.
3014 * The very first read exists to flush the posted write to the device.
3016 static int wrreg_wait(struct adapter *adapter, unsigned int addr, u32 val)
3018 t3_write_reg(adapter, addr, val);
3019 t3_read_reg(adapter, addr); /* flush */
3020 if (!(t3_read_reg(adapter, addr) & F_BUSY))
3022 CH_ERR(adapter, "write to MC7 register 0x%x timed out\n", addr);
3026 static int mc7_init(struct mc7 *mc7, unsigned int mc7_clock, int mem_type)
3028 static const unsigned int mc7_mode[] = {
3029 0x632, 0x642, 0x652, 0x432, 0x442
3031 static const struct mc7_timing_params mc7_timings[] = {
3032 {12, 3, 4, {20, 28, 34, 52, 0}, 15, 6, 4},
3033 {12, 4, 5, {20, 28, 34, 52, 0}, 16, 7, 4},
3034 {12, 5, 6, {20, 28, 34, 52, 0}, 17, 8, 4},
3035 {9, 3, 4, {15, 21, 26, 39, 0}, 12, 6, 4},
3036 {9, 4, 5, {15, 21, 26, 39, 0}, 13, 7, 4}
3040 unsigned int width, density, slow, attempts;
3041 struct adapter *adapter = mc7->adapter;
3042 const struct mc7_timing_params *p = &mc7_timings[mem_type];
3047 val = t3_read_reg(adapter, mc7->offset + A_MC7_CFG);
3048 slow = val & F_SLOW;
3049 width = G_WIDTH(val);
3050 density = G_DEN(val);
3052 t3_write_reg(adapter, mc7->offset + A_MC7_CFG, val | F_IFEN);
3053 val = t3_read_reg(adapter, mc7->offset + A_MC7_CFG); /* flush */
3057 t3_write_reg(adapter, mc7->offset + A_MC7_CAL, F_SGL_CAL_EN);
3058 t3_read_reg(adapter, mc7->offset + A_MC7_CAL);
3060 if (t3_read_reg(adapter, mc7->offset + A_MC7_CAL) &
3061 (F_BUSY | F_SGL_CAL_EN | F_CAL_FAULT)) {
3062 CH_ERR(adapter, "%s MC7 calibration timed out\n",
3068 t3_write_reg(adapter, mc7->offset + A_MC7_PARM,
3069 V_ACTTOPREDLY(p->ActToPreDly) |
3070 V_ACTTORDWRDLY(p->ActToRdWrDly) | V_PRECYC(p->PreCyc) |
3071 V_REFCYC(p->RefCyc[density]) | V_BKCYC(p->BkCyc) |
3072 V_WRTORDDLY(p->WrToRdDly) | V_RDTOWRDLY(p->RdToWrDly));
3074 t3_write_reg(adapter, mc7->offset + A_MC7_CFG,
3075 val | F_CLKEN | F_TERM150);
3076 t3_read_reg(adapter, mc7->offset + A_MC7_CFG); /* flush */
3079 t3_set_reg_field(adapter, mc7->offset + A_MC7_DLL, F_DLLENB,
3084 if (wrreg_wait(adapter, mc7->offset + A_MC7_PRE, 0) ||
3085 wrreg_wait(adapter, mc7->offset + A_MC7_EXT_MODE2, 0) ||
3086 wrreg_wait(adapter, mc7->offset + A_MC7_EXT_MODE3, 0) ||
3087 wrreg_wait(adapter, mc7->offset + A_MC7_EXT_MODE1, val))
3091 t3_write_reg(adapter, mc7->offset + A_MC7_MODE, 0x100);
3092 t3_set_reg_field(adapter, mc7->offset + A_MC7_DLL, F_DLLRST, 0);
3096 if (wrreg_wait(adapter, mc7->offset + A_MC7_PRE, 0) ||
3097 wrreg_wait(adapter, mc7->offset + A_MC7_REF, 0) ||
3098 wrreg_wait(adapter, mc7->offset + A_MC7_REF, 0) ||
3099 wrreg_wait(adapter, mc7->offset + A_MC7_MODE,
3100 mc7_mode[mem_type]) ||
3101 wrreg_wait(adapter, mc7->offset + A_MC7_EXT_MODE1, val | 0x380) ||
3102 wrreg_wait(adapter, mc7->offset + A_MC7_EXT_MODE1, val))
3105 /* clock value is in KHz */
3106 mc7_clock = mc7_clock * 7812 + mc7_clock / 2; /* ns */
3107 mc7_clock /= 1000000; /* KHz->MHz, ns->us */
3109 t3_write_reg(adapter, mc7->offset + A_MC7_REF,
3110 F_PERREFEN | V_PREREFDIV(mc7_clock));
3111 t3_read_reg(adapter, mc7->offset + A_MC7_REF); /* flush */
3113 t3_write_reg(adapter, mc7->offset + A_MC7_ECC, F_ECCGENEN | F_ECCCHKEN);
3114 t3_write_reg(adapter, mc7->offset + A_MC7_BIST_DATA, 0);
3115 t3_write_reg(adapter, mc7->offset + A_MC7_BIST_ADDR_BEG, 0);
3116 t3_write_reg(adapter, mc7->offset + A_MC7_BIST_ADDR_END,
3117 (mc7->size << width) - 1);
3118 t3_write_reg(adapter, mc7->offset + A_MC7_BIST_OP, V_OP(1));
3119 t3_read_reg(adapter, mc7->offset + A_MC7_BIST_OP); /* flush */
3124 val = t3_read_reg(adapter, mc7->offset + A_MC7_BIST_OP);
3125 } while ((val & F_BUSY) && --attempts);
3127 CH_ERR(adapter, "%s MC7 BIST timed out\n", mc7->name);
3131 /* Enable normal memory accesses. */
3132 t3_set_reg_field(adapter, mc7->offset + A_MC7_CFG, 0, F_RDY);
3139 static void config_pcie(struct adapter *adap)
3141 static const u16 ack_lat[4][6] = {
3142 {237, 416, 559, 1071, 2095, 4143},
3143 {128, 217, 289, 545, 1057, 2081},
3144 {73, 118, 154, 282, 538, 1050},
3145 {67, 107, 86, 150, 278, 534}
3147 static const u16 rpl_tmr[4][6] = {
3148 {711, 1248, 1677, 3213, 6285, 12429},
3149 {384, 651, 867, 1635, 3171, 6243},
3150 {219, 354, 462, 846, 1614, 3150},
3151 {201, 321, 258, 450, 834, 1602}
3155 unsigned int log2_width, pldsize;
3156 unsigned int fst_trn_rx, fst_trn_tx, acklat, rpllmt;
3158 pci_read_config_word(adap->pdev,
3159 adap->params.pci.pcie_cap_addr + PCI_EXP_DEVCTL,
3161 pldsize = (val & PCI_EXP_DEVCTL_PAYLOAD) >> 5;
3162 pci_read_config_word(adap->pdev,
3163 adap->params.pci.pcie_cap_addr + PCI_EXP_LNKCTL,
3166 fst_trn_tx = G_NUMFSTTRNSEQ(t3_read_reg(adap, A_PCIE_PEX_CTRL0));
3167 fst_trn_rx = adap->params.rev == 0 ? fst_trn_tx :
3168 G_NUMFSTTRNSEQRX(t3_read_reg(adap, A_PCIE_MODE));
3169 log2_width = fls(adap->params.pci.width) - 1;
3170 acklat = ack_lat[log2_width][pldsize];
3171 if (val & 1) /* check LOsEnable */
3172 acklat += fst_trn_tx * 4;
3173 rpllmt = rpl_tmr[log2_width][pldsize] + fst_trn_rx * 4;
3175 if (adap->params.rev == 0)
3176 t3_set_reg_field(adap, A_PCIE_PEX_CTRL1,
3177 V_T3A_ACKLAT(M_T3A_ACKLAT),
3178 V_T3A_ACKLAT(acklat));
3180 t3_set_reg_field(adap, A_PCIE_PEX_CTRL1, V_ACKLAT(M_ACKLAT),
3183 t3_set_reg_field(adap, A_PCIE_PEX_CTRL0, V_REPLAYLMT(M_REPLAYLMT),
3184 V_REPLAYLMT(rpllmt));
3186 t3_write_reg(adap, A_PCIE_PEX_ERR, 0xffffffff);
3187 t3_set_reg_field(adap, A_PCIE_CFG, F_PCIE_CLIDECEN, F_PCIE_CLIDECEN);
3191 * Initialize and configure T3 HW modules. This performs the
3192 * initialization steps that need to be done once after a card is reset.
3193 * MAC and PHY initialization is handled separarely whenever a port is enabled.
3195 * fw_params are passed to FW and their value is platform dependent. Only the
3196 * top 8 bits are available for use, the rest must be 0.
3198 int t3_init_hw(struct adapter *adapter, u32 fw_params)
3200 int err = -EIO, attempts = 100;
3201 const struct vpd_params *vpd = &adapter->params.vpd;
3203 if (adapter->params.rev > 0)
3204 calibrate_xgm_t3b(adapter);
3205 else if (calibrate_xgm(adapter))
3209 partition_mem(adapter, &adapter->params.tp);
3211 if (mc7_init(&adapter->pmrx, vpd->mclk, vpd->mem_timing) ||
3212 mc7_init(&adapter->pmtx, vpd->mclk, vpd->mem_timing) ||
3213 mc7_init(&adapter->cm, vpd->mclk, vpd->mem_timing) ||
3214 t3_mc5_init(&adapter->mc5, adapter->params.mc5.nservers,
3215 adapter->params.mc5.nfilters,
3216 adapter->params.mc5.nroutes))
3220 if (tp_init(adapter, &adapter->params.tp))
3223 t3_tp_set_coalescing_size(adapter,
3224 min(adapter->params.sge.max_pkt_size,
3225 MAX_RX_COALESCING_LEN), 1);
3226 t3_tp_set_max_rxsize(adapter,
3227 min(adapter->params.sge.max_pkt_size, 16384U));
3228 ulp_config(adapter, &adapter->params.tp);
3230 if (is_pcie(adapter))
3231 config_pcie(adapter);
3233 t3_set_reg_field(adapter, A_PCIX_CFG, 0, F_CLIDECEN);
3235 t3_write_reg(adapter, A_PM1_RX_CFG, 0xffffffff);
3236 t3_write_reg(adapter, A_PM1_RX_MODE, 0);
3237 t3_write_reg(adapter, A_PM1_TX_MODE, 0);
3238 init_hw_for_avail_ports(adapter, adapter->params.nports);
3239 t3_sge_init(adapter, &adapter->params.sge);
3241 t3_write_reg(adapter, A_CIM_HOST_ACC_DATA, vpd->uclk | fw_params);
3242 t3_write_reg(adapter, A_CIM_BOOT_CFG,
3243 V_BOOTADDR(FW_FLASH_BOOT_ADDR >> 2));
3244 t3_read_reg(adapter, A_CIM_BOOT_CFG); /* flush */
3246 do { /* wait for uP to initialize */
3248 } while (t3_read_reg(adapter, A_CIM_HOST_ACC_DATA) && --attempts);
3250 CH_ERR(adapter, "uP initialization timed out\n");
3260 * get_pci_mode - determine a card's PCI mode
3261 * @adapter: the adapter
3262 * @p: where to store the PCI settings
3264 * Determines a card's PCI mode and associated parameters, such as speed
3267 static void __devinit get_pci_mode(struct adapter *adapter,
3268 struct pci_params *p)
3270 static unsigned short speed_map[] = { 33, 66, 100, 133 };
3271 u32 pci_mode, pcie_cap;
3273 pcie_cap = pci_find_capability(adapter->pdev, PCI_CAP_ID_EXP);
3277 p->variant = PCI_VARIANT_PCIE;
3278 p->pcie_cap_addr = pcie_cap;
3279 pci_read_config_word(adapter->pdev, pcie_cap + PCI_EXP_LNKSTA,
3281 p->width = (val >> 4) & 0x3f;
3285 pci_mode = t3_read_reg(adapter, A_PCIX_MODE);
3286 p->speed = speed_map[G_PCLKRANGE(pci_mode)];
3287 p->width = (pci_mode & F_64BIT) ? 64 : 32;
3288 pci_mode = G_PCIXINITPAT(pci_mode);
3290 p->variant = PCI_VARIANT_PCI;
3291 else if (pci_mode < 4)
3292 p->variant = PCI_VARIANT_PCIX_MODE1_PARITY;
3293 else if (pci_mode < 8)
3294 p->variant = PCI_VARIANT_PCIX_MODE1_ECC;
3296 p->variant = PCI_VARIANT_PCIX_266_MODE2;
3300 * init_link_config - initialize a link's SW state
3301 * @lc: structure holding the link state
3302 * @ai: information about the current card
3304 * Initializes the SW state maintained for each link, including the link's
3305 * capabilities and default speed/duplex/flow-control/autonegotiation
3308 static void __devinit init_link_config(struct link_config *lc,
3311 lc->supported = caps;
3312 lc->requested_speed = lc->speed = SPEED_INVALID;
3313 lc->requested_duplex = lc->duplex = DUPLEX_INVALID;
3314 lc->requested_fc = lc->fc = PAUSE_RX | PAUSE_TX;
3315 if (lc->supported & SUPPORTED_Autoneg) {
3316 lc->advertising = lc->supported;
3317 lc->autoneg = AUTONEG_ENABLE;
3318 lc->requested_fc |= PAUSE_AUTONEG;
3320 lc->advertising = 0;
3321 lc->autoneg = AUTONEG_DISABLE;
3326 * mc7_calc_size - calculate MC7 memory size
3327 * @cfg: the MC7 configuration
3329 * Calculates the size of an MC7 memory in bytes from the value of its
3330 * configuration register.
3332 static unsigned int __devinit mc7_calc_size(u32 cfg)
3334 unsigned int width = G_WIDTH(cfg);
3335 unsigned int banks = !!(cfg & F_BKS) + 1;
3336 unsigned int org = !!(cfg & F_ORG) + 1;
3337 unsigned int density = G_DEN(cfg);
3338 unsigned int MBs = ((256 << density) * banks) / (org << width);
3343 static void __devinit mc7_prep(struct adapter *adapter, struct mc7 *mc7,
3344 unsigned int base_addr, const char *name)
3348 mc7->adapter = adapter;
3350 mc7->offset = base_addr - MC7_PMRX_BASE_ADDR;
3351 cfg = t3_read_reg(adapter, mc7->offset + A_MC7_CFG);
3352 mc7->size = mc7->size = G_DEN(cfg) == M_DEN ? 0 : mc7_calc_size(cfg);
3353 mc7->width = G_WIDTH(cfg);
3356 void mac_prep(struct cmac *mac, struct adapter *adapter, int index)
3358 mac->adapter = adapter;
3359 mac->offset = (XGMAC0_1_BASE_ADDR - XGMAC0_0_BASE_ADDR) * index;
3362 if (adapter->params.rev == 0 && uses_xaui(adapter)) {
3363 t3_write_reg(adapter, A_XGM_SERDES_CTRL + mac->offset,
3364 is_10G(adapter) ? 0x2901c04 : 0x2301c04);
3365 t3_set_reg_field(adapter, A_XGM_PORT_CFG + mac->offset,
3370 void early_hw_init(struct adapter *adapter, const struct adapter_info *ai)
3372 u32 val = V_PORTSPEED(is_10G(adapter) ? 3 : 2);
3374 mi1_init(adapter, ai);
3375 t3_write_reg(adapter, A_I2C_CFG, /* set for 80KHz */
3376 V_I2C_CLKDIV(adapter->params.vpd.cclk / 80 - 1));
3377 t3_write_reg(adapter, A_T3DBG_GPIO_EN,
3378 ai->gpio_out | F_GPIO0_OEN | F_GPIO0_OUT_VAL);
3379 t3_write_reg(adapter, A_MC5_DB_SERVER_INDEX, 0);
3381 if (adapter->params.rev == 0 || !uses_xaui(adapter))
3384 /* Enable MAC clocks so we can access the registers */
3385 t3_write_reg(adapter, A_XGM_PORT_CFG, val);
3386 t3_read_reg(adapter, A_XGM_PORT_CFG);
3388 val |= F_CLKDIVRESET_;
3389 t3_write_reg(adapter, A_XGM_PORT_CFG, val);
3390 t3_read_reg(adapter, A_XGM_PORT_CFG);
3391 t3_write_reg(adapter, XGM_REG(A_XGM_PORT_CFG, 1), val);
3392 t3_read_reg(adapter, A_XGM_PORT_CFG);
3396 * Reset the adapter.
3397 * Older PCIe cards lose their config space during reset, PCI-X
3400 int t3_reset_adapter(struct adapter *adapter)
3402 int i, save_and_restore_pcie =
3403 adapter->params.rev < T3_REV_B2 && is_pcie(adapter);
3406 if (save_and_restore_pcie)
3407 pci_save_state(adapter->pdev);
3408 t3_write_reg(adapter, A_PL_RST, F_CRSTWRM | F_CRSTWRMMODE);
3411 * Delay. Give Some time to device to reset fully.
3412 * XXX The delay time should be modified.
3414 for (i = 0; i < 10; i++) {
3416 pci_read_config_word(adapter->pdev, 0x00, &devid);
3417 if (devid == 0x1425)
3421 if (devid != 0x1425)
3424 if (save_and_restore_pcie)
3425 pci_restore_state(adapter->pdev);
3430 * Initialize adapter SW state for the various HW modules, set initial values
3431 * for some adapter tunables, take PHYs out of reset, and initialize the MDIO
3434 int __devinit t3_prep_adapter(struct adapter *adapter,
3435 const struct adapter_info *ai, int reset)
3438 unsigned int i, j = 0;
3440 get_pci_mode(adapter, &adapter->params.pci);
3442 adapter->params.info = ai;
3443 adapter->params.nports = ai->nports;
3444 adapter->params.rev = t3_read_reg(adapter, A_PL_REV);
3445 adapter->params.linkpoll_period = 0;
3446 adapter->params.stats_update_period = is_10G(adapter) ?
3447 MAC_STATS_ACCUM_SECS : (MAC_STATS_ACCUM_SECS * 10);
3448 adapter->params.pci.vpd_cap_addr =
3449 pci_find_capability(adapter->pdev, PCI_CAP_ID_VPD);
3450 ret = get_vpd_params(adapter, &adapter->params.vpd);
3454 if (reset && t3_reset_adapter(adapter))
3457 t3_sge_prep(adapter, &adapter->params.sge);
3459 if (adapter->params.vpd.mclk) {
3460 struct tp_params *p = &adapter->params.tp;
3462 mc7_prep(adapter, &adapter->pmrx, MC7_PMRX_BASE_ADDR, "PMRX");
3463 mc7_prep(adapter, &adapter->pmtx, MC7_PMTX_BASE_ADDR, "PMTX");
3464 mc7_prep(adapter, &adapter->cm, MC7_CM_BASE_ADDR, "CM");
3466 p->nchan = ai->nports;
3467 p->pmrx_size = t3_mc7_size(&adapter->pmrx);
3468 p->pmtx_size = t3_mc7_size(&adapter->pmtx);
3469 p->cm_size = t3_mc7_size(&adapter->cm);
3470 p->chan_rx_size = p->pmrx_size / 2; /* only 1 Rx channel */
3471 p->chan_tx_size = p->pmtx_size / p->nchan;
3472 p->rx_pg_size = 64 * 1024;
3473 p->tx_pg_size = is_10G(adapter) ? 64 * 1024 : 16 * 1024;
3474 p->rx_num_pgs = pm_num_pages(p->chan_rx_size, p->rx_pg_size);
3475 p->tx_num_pgs = pm_num_pages(p->chan_tx_size, p->tx_pg_size);
3476 p->ntimer_qs = p->cm_size >= (128 << 20) ||
3477 adapter->params.rev > 0 ? 12 : 6;
3480 adapter->params.offload = t3_mc7_size(&adapter->pmrx) &&
3481 t3_mc7_size(&adapter->pmtx) &&
3482 t3_mc7_size(&adapter->cm);
3484 if (is_offload(adapter)) {
3485 adapter->params.mc5.nservers = DEFAULT_NSERVERS;
3486 adapter->params.mc5.nfilters = adapter->params.rev > 0 ?
3487 DEFAULT_NFILTERS : 0;
3488 adapter->params.mc5.nroutes = 0;
3489 t3_mc5_prep(adapter, &adapter->mc5, MC5_MODE_144_BIT);
3491 init_mtus(adapter->params.mtus);
3492 init_cong_ctrl(adapter->params.a_wnd, adapter->params.b_wnd);
3495 early_hw_init(adapter, ai);
3497 for_each_port(adapter, i) {
3499 struct port_info *p = adap2pinfo(adapter, i);
3501 while (!adapter->params.vpd.port_type[j])
3504 p->port_type = &port_types[adapter->params.vpd.port_type[j]];
3505 p->port_type->phy_prep(&p->phy, adapter, ai->phy_base_addr + j,
3507 mac_prep(&p->mac, adapter, j);
3511 * The VPD EEPROM stores the base Ethernet address for the
3512 * card. A port's address is derived from the base by adding
3513 * the port's index to the base's low octet.
3515 memcpy(hw_addr, adapter->params.vpd.eth_base, 5);
3516 hw_addr[5] = adapter->params.vpd.eth_base[5] + i;
3518 memcpy(adapter->port[i]->dev_addr, hw_addr,
3520 memcpy(adapter->port[i]->perm_addr, hw_addr,
3522 init_link_config(&p->link_config, p->port_type->caps);
3523 p->phy.ops->power_down(&p->phy, 1);
3524 if (!(p->port_type->caps & SUPPORTED_IRQ))
3525 adapter->params.linkpoll_period = 10;
3531 void t3_led_ready(struct adapter *adapter)
3533 t3_set_reg_field(adapter, A_T3DBG_GPIO_EN, F_GPIO0_OUT_VAL,