2 * Copyright (c) 2003-2007 Chelsio, Inc. All rights reserved.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
35 #include "firmware_exports.h"
38 * t3_wait_op_done_val - wait until an operation is completed
39 * @adapter: the adapter performing the operation
40 * @reg: the register to check for completion
41 * @mask: a single-bit field within @reg that indicates completion
42 * @polarity: the value of the field when the operation is completed
43 * @attempts: number of check iterations
44 * @delay: delay in usecs between iterations
45 * @valp: where to store the value of the register at completion time
47 * Wait until an operation is completed by checking a bit in a register
48 * up to @attempts times. If @valp is not NULL the value of the register
49 * at the time it indicated completion is stored there. Returns 0 if the
50 * operation completes and -EAGAIN otherwise.
53 int t3_wait_op_done_val(struct adapter *adapter, int reg, u32 mask,
54 int polarity, int attempts, int delay, u32 *valp)
57 u32 val = t3_read_reg(adapter, reg);
59 if (!!(val & mask) == polarity) {
72 * t3_write_regs - write a bunch of registers
73 * @adapter: the adapter to program
74 * @p: an array of register address/register value pairs
75 * @n: the number of address/value pairs
76 * @offset: register address offset
78 * Takes an array of register address/register value pairs and writes each
79 * value to the corresponding register. Register addresses are adjusted
80 * by the supplied offset.
82 void t3_write_regs(struct adapter *adapter, const struct addr_val_pair *p,
83 int n, unsigned int offset)
86 t3_write_reg(adapter, p->reg_addr + offset, p->val);
92 * t3_set_reg_field - set a register field to a value
93 * @adapter: the adapter to program
94 * @addr: the register address
95 * @mask: specifies the portion of the register to modify
96 * @val: the new value for the register field
98 * Sets a register field specified by the supplied mask to the
101 void t3_set_reg_field(struct adapter *adapter, unsigned int addr, u32 mask,
104 u32 v = t3_read_reg(adapter, addr) & ~mask;
106 t3_write_reg(adapter, addr, v | val);
107 t3_read_reg(adapter, addr); /* flush */
111 * t3_read_indirect - read indirectly addressed registers
113 * @addr_reg: register holding the indirect address
114 * @data_reg: register holding the value of the indirect register
115 * @vals: where the read register values are stored
116 * @start_idx: index of first indirect register to read
117 * @nregs: how many indirect registers to read
119 * Reads registers that are accessed indirectly through an address/data
122 void t3_read_indirect(struct adapter *adap, unsigned int addr_reg,
123 unsigned int data_reg, u32 *vals, unsigned int nregs,
124 unsigned int start_idx)
127 t3_write_reg(adap, addr_reg, start_idx);
128 *vals++ = t3_read_reg(adap, data_reg);
134 * t3_mc7_bd_read - read from MC7 through backdoor accesses
135 * @mc7: identifies MC7 to read from
136 * @start: index of first 64-bit word to read
137 * @n: number of 64-bit words to read
138 * @buf: where to store the read result
140 * Read n 64-bit words from MC7 starting at word start, using backdoor
143 int t3_mc7_bd_read(struct mc7 *mc7, unsigned int start, unsigned int n,
146 static const int shift[] = { 0, 0, 16, 24 };
147 static const int step[] = { 0, 32, 16, 8 };
149 unsigned int size64 = mc7->size / 8; /* # of 64-bit words */
150 struct adapter *adap = mc7->adapter;
152 if (start >= size64 || start + n > size64)
155 start *= (8 << mc7->width);
160 for (i = (1 << mc7->width) - 1; i >= 0; --i) {
164 t3_write_reg(adap, mc7->offset + A_MC7_BD_ADDR, start);
165 t3_write_reg(adap, mc7->offset + A_MC7_BD_OP, 0);
166 val = t3_read_reg(adap, mc7->offset + A_MC7_BD_OP);
167 while ((val & F_BUSY) && attempts--)
168 val = t3_read_reg(adap,
169 mc7->offset + A_MC7_BD_OP);
173 val = t3_read_reg(adap, mc7->offset + A_MC7_BD_DATA1);
174 if (mc7->width == 0) {
175 val64 = t3_read_reg(adap,
178 val64 |= (u64) val << 32;
181 val >>= shift[mc7->width];
182 val64 |= (u64) val << (step[mc7->width] * i);
194 static void mi1_init(struct adapter *adap, const struct adapter_info *ai)
196 u32 clkdiv = adap->params.vpd.cclk / (2 * adap->params.vpd.mdc) - 1;
197 u32 val = F_PREEN | V_MDIINV(ai->mdiinv) | V_MDIEN(ai->mdien) |
200 if (!(ai->caps & SUPPORTED_10000baseT_Full))
202 t3_write_reg(adap, A_MI1_CFG, val);
205 #define MDIO_ATTEMPTS 10
208 * MI1 read/write operations for direct-addressed PHYs.
210 static int mi1_read(struct adapter *adapter, int phy_addr, int mmd_addr,
211 int reg_addr, unsigned int *valp)
214 u32 addr = V_REGADDR(reg_addr) | V_PHYADDR(phy_addr);
219 mutex_lock(&adapter->mdio_lock);
220 t3_write_reg(adapter, A_MI1_ADDR, addr);
221 t3_write_reg(adapter, A_MI1_OP, V_MDI_OP(2));
222 ret = t3_wait_op_done(adapter, A_MI1_OP, F_BUSY, 0, MDIO_ATTEMPTS, 20);
224 *valp = t3_read_reg(adapter, A_MI1_DATA);
225 mutex_unlock(&adapter->mdio_lock);
229 static int mi1_write(struct adapter *adapter, int phy_addr, int mmd_addr,
230 int reg_addr, unsigned int val)
233 u32 addr = V_REGADDR(reg_addr) | V_PHYADDR(phy_addr);
238 mutex_lock(&adapter->mdio_lock);
239 t3_write_reg(adapter, A_MI1_ADDR, addr);
240 t3_write_reg(adapter, A_MI1_DATA, val);
241 t3_write_reg(adapter, A_MI1_OP, V_MDI_OP(1));
242 ret = t3_wait_op_done(adapter, A_MI1_OP, F_BUSY, 0, MDIO_ATTEMPTS, 20);
243 mutex_unlock(&adapter->mdio_lock);
247 static const struct mdio_ops mi1_mdio_ops = {
253 * MI1 read/write operations for indirect-addressed PHYs.
255 static int mi1_ext_read(struct adapter *adapter, int phy_addr, int mmd_addr,
256 int reg_addr, unsigned int *valp)
259 u32 addr = V_REGADDR(mmd_addr) | V_PHYADDR(phy_addr);
261 mutex_lock(&adapter->mdio_lock);
262 t3_write_reg(adapter, A_MI1_ADDR, addr);
263 t3_write_reg(adapter, A_MI1_DATA, reg_addr);
264 t3_write_reg(adapter, A_MI1_OP, V_MDI_OP(0));
265 ret = t3_wait_op_done(adapter, A_MI1_OP, F_BUSY, 0, MDIO_ATTEMPTS, 20);
267 t3_write_reg(adapter, A_MI1_OP, V_MDI_OP(3));
268 ret = t3_wait_op_done(adapter, A_MI1_OP, F_BUSY, 0,
271 *valp = t3_read_reg(adapter, A_MI1_DATA);
273 mutex_unlock(&adapter->mdio_lock);
277 static int mi1_ext_write(struct adapter *adapter, int phy_addr, int mmd_addr,
278 int reg_addr, unsigned int val)
281 u32 addr = V_REGADDR(mmd_addr) | V_PHYADDR(phy_addr);
283 mutex_lock(&adapter->mdio_lock);
284 t3_write_reg(adapter, A_MI1_ADDR, addr);
285 t3_write_reg(adapter, A_MI1_DATA, reg_addr);
286 t3_write_reg(adapter, A_MI1_OP, V_MDI_OP(0));
287 ret = t3_wait_op_done(adapter, A_MI1_OP, F_BUSY, 0, MDIO_ATTEMPTS, 20);
289 t3_write_reg(adapter, A_MI1_DATA, val);
290 t3_write_reg(adapter, A_MI1_OP, V_MDI_OP(1));
291 ret = t3_wait_op_done(adapter, A_MI1_OP, F_BUSY, 0,
294 mutex_unlock(&adapter->mdio_lock);
298 static const struct mdio_ops mi1_mdio_ext_ops = {
304 * t3_mdio_change_bits - modify the value of a PHY register
305 * @phy: the PHY to operate on
306 * @mmd: the device address
307 * @reg: the register address
308 * @clear: what part of the register value to mask off
309 * @set: what part of the register value to set
311 * Changes the value of a PHY register by applying a mask to its current
312 * value and ORing the result with a new value.
314 int t3_mdio_change_bits(struct cphy *phy, int mmd, int reg, unsigned int clear,
320 ret = mdio_read(phy, mmd, reg, &val);
323 ret = mdio_write(phy, mmd, reg, val | set);
329 * t3_phy_reset - reset a PHY block
330 * @phy: the PHY to operate on
331 * @mmd: the device address of the PHY block to reset
332 * @wait: how long to wait for the reset to complete in 1ms increments
334 * Resets a PHY block and optionally waits for the reset to complete.
335 * @mmd should be 0 for 10/100/1000 PHYs and the device address to reset
338 int t3_phy_reset(struct cphy *phy, int mmd, int wait)
343 err = t3_mdio_change_bits(phy, mmd, MII_BMCR, BMCR_PDOWN, BMCR_RESET);
348 err = mdio_read(phy, mmd, MII_BMCR, &ctl);
354 } while (ctl && --wait);
360 * t3_phy_advertise - set the PHY advertisement registers for autoneg
361 * @phy: the PHY to operate on
362 * @advert: bitmap of capabilities the PHY should advertise
364 * Sets a 10/100/1000 PHY's advertisement registers to advertise the
365 * requested capabilities.
367 int t3_phy_advertise(struct cphy *phy, unsigned int advert)
370 unsigned int val = 0;
372 err = mdio_read(phy, 0, MII_CTRL1000, &val);
376 val &= ~(ADVERTISE_1000HALF | ADVERTISE_1000FULL);
377 if (advert & ADVERTISED_1000baseT_Half)
378 val |= ADVERTISE_1000HALF;
379 if (advert & ADVERTISED_1000baseT_Full)
380 val |= ADVERTISE_1000FULL;
382 err = mdio_write(phy, 0, MII_CTRL1000, val);
387 if (advert & ADVERTISED_10baseT_Half)
388 val |= ADVERTISE_10HALF;
389 if (advert & ADVERTISED_10baseT_Full)
390 val |= ADVERTISE_10FULL;
391 if (advert & ADVERTISED_100baseT_Half)
392 val |= ADVERTISE_100HALF;
393 if (advert & ADVERTISED_100baseT_Full)
394 val |= ADVERTISE_100FULL;
395 if (advert & ADVERTISED_Pause)
396 val |= ADVERTISE_PAUSE_CAP;
397 if (advert & ADVERTISED_Asym_Pause)
398 val |= ADVERTISE_PAUSE_ASYM;
399 return mdio_write(phy, 0, MII_ADVERTISE, val);
403 * t3_set_phy_speed_duplex - force PHY speed and duplex
404 * @phy: the PHY to operate on
405 * @speed: requested PHY speed
406 * @duplex: requested PHY duplex
408 * Force a 10/100/1000 PHY's speed and duplex. This also disables
409 * auto-negotiation except for GigE, where auto-negotiation is mandatory.
411 int t3_set_phy_speed_duplex(struct cphy *phy, int speed, int duplex)
416 err = mdio_read(phy, 0, MII_BMCR, &ctl);
421 ctl &= ~(BMCR_SPEED100 | BMCR_SPEED1000 | BMCR_ANENABLE);
422 if (speed == SPEED_100)
423 ctl |= BMCR_SPEED100;
424 else if (speed == SPEED_1000)
425 ctl |= BMCR_SPEED1000;
428 ctl &= ~(BMCR_FULLDPLX | BMCR_ANENABLE);
429 if (duplex == DUPLEX_FULL)
430 ctl |= BMCR_FULLDPLX;
432 if (ctl & BMCR_SPEED1000) /* auto-negotiation required for GigE */
433 ctl |= BMCR_ANENABLE;
434 return mdio_write(phy, 0, MII_BMCR, ctl);
437 static const struct adapter_info t3_adap_info[] = {
439 F_GPIO2_OEN | F_GPIO4_OEN |
440 F_GPIO2_OUT_VAL | F_GPIO4_OUT_VAL, F_GPIO3 | F_GPIO5,
442 &mi1_mdio_ops, "Chelsio PE9000"},
444 F_GPIO2_OEN | F_GPIO4_OEN |
445 F_GPIO2_OUT_VAL | F_GPIO4_OUT_VAL, F_GPIO3 | F_GPIO5,
447 &mi1_mdio_ops, "Chelsio T302"},
449 F_GPIO1_OEN | F_GPIO6_OEN | F_GPIO7_OEN | F_GPIO10_OEN |
450 F_GPIO1_OUT_VAL | F_GPIO6_OUT_VAL | F_GPIO10_OUT_VAL, 0,
451 SUPPORTED_10000baseT_Full | SUPPORTED_AUI,
452 &mi1_mdio_ext_ops, "Chelsio T310"},
454 F_GPIO1_OEN | F_GPIO2_OEN | F_GPIO4_OEN | F_GPIO5_OEN | F_GPIO6_OEN |
455 F_GPIO7_OEN | F_GPIO10_OEN | F_GPIO11_OEN | F_GPIO1_OUT_VAL |
456 F_GPIO5_OUT_VAL | F_GPIO6_OUT_VAL | F_GPIO10_OUT_VAL, 0,
457 SUPPORTED_10000baseT_Full | SUPPORTED_AUI,
458 &mi1_mdio_ext_ops, "Chelsio T320"},
462 * Return the adapter_info structure with a given index. Out-of-range indices
465 const struct adapter_info *t3_get_adapter_info(unsigned int id)
467 return id < ARRAY_SIZE(t3_adap_info) ? &t3_adap_info[id] : NULL;
470 #define CAPS_1G (SUPPORTED_10baseT_Full | SUPPORTED_100baseT_Full | \
471 SUPPORTED_1000baseT_Full | SUPPORTED_Autoneg | SUPPORTED_MII)
472 #define CAPS_10G (SUPPORTED_10000baseT_Full | SUPPORTED_AUI)
474 static const struct port_type_info port_types[] = {
476 {t3_ael1002_phy_prep, CAPS_10G | SUPPORTED_FIBRE,
478 {t3_vsc8211_phy_prep, CAPS_1G | SUPPORTED_TP | SUPPORTED_IRQ,
479 "10/100/1000BASE-T"},
480 {NULL, CAPS_1G | SUPPORTED_TP | SUPPORTED_IRQ,
481 "10/100/1000BASE-T"},
482 {t3_xaui_direct_phy_prep, CAPS_10G | SUPPORTED_TP, "10GBASE-CX4"},
483 {NULL, CAPS_10G, "10GBASE-KX4"},
484 {t3_qt2045_phy_prep, CAPS_10G | SUPPORTED_TP, "10GBASE-CX4"},
485 {t3_ael1006_phy_prep, CAPS_10G | SUPPORTED_FIBRE,
487 {NULL, CAPS_10G | SUPPORTED_TP, "10GBASE-CX4"},
493 #define VPD_ENTRY(name, len) \
494 u8 name##_kword[2]; u8 name##_len; u8 name##_data[len]
497 * Partial EEPROM Vital Product Data structure. Includes only the ID and
506 VPD_ENTRY(pn, 16); /* part number */
507 VPD_ENTRY(ec, 16); /* EC level */
508 VPD_ENTRY(sn, 16); /* serial number */
509 VPD_ENTRY(na, 12); /* MAC address base */
510 VPD_ENTRY(cclk, 6); /* core clock */
511 VPD_ENTRY(mclk, 6); /* mem clock */
512 VPD_ENTRY(uclk, 6); /* uP clk */
513 VPD_ENTRY(mdc, 6); /* MDIO clk */
514 VPD_ENTRY(mt, 2); /* mem timing */
515 VPD_ENTRY(xaui0cfg, 6); /* XAUI0 config */
516 VPD_ENTRY(xaui1cfg, 6); /* XAUI1 config */
517 VPD_ENTRY(port0, 2); /* PHY0 complex */
518 VPD_ENTRY(port1, 2); /* PHY1 complex */
519 VPD_ENTRY(port2, 2); /* PHY2 complex */
520 VPD_ENTRY(port3, 2); /* PHY3 complex */
521 VPD_ENTRY(rv, 1); /* csum */
522 u32 pad; /* for multiple-of-4 sizing and alignment */
525 #define EEPROM_MAX_POLL 4
526 #define EEPROM_STAT_ADDR 0x4000
527 #define VPD_BASE 0xc00
530 * t3_seeprom_read - read a VPD EEPROM location
531 * @adapter: adapter to read
532 * @addr: EEPROM address
533 * @data: where to store the read data
535 * Read a 32-bit word from a location in VPD EEPROM using the card's PCI
536 * VPD ROM capability. A zero is written to the flag bit when the
537 * addres is written to the control register. The hardware device will
538 * set the flag to 1 when 4 bytes have been read into the data register.
540 int t3_seeprom_read(struct adapter *adapter, u32 addr, u32 *data)
543 int attempts = EEPROM_MAX_POLL;
544 unsigned int base = adapter->params.pci.vpd_cap_addr;
546 if ((addr >= EEPROMSIZE && addr != EEPROM_STAT_ADDR) || (addr & 3))
549 pci_write_config_word(adapter->pdev, base + PCI_VPD_ADDR, addr);
552 pci_read_config_word(adapter->pdev, base + PCI_VPD_ADDR, &val);
553 } while (!(val & PCI_VPD_ADDR_F) && --attempts);
555 if (!(val & PCI_VPD_ADDR_F)) {
556 CH_ERR(adapter, "reading EEPROM address 0x%x failed\n", addr);
559 pci_read_config_dword(adapter->pdev, base + PCI_VPD_DATA, data);
560 *data = le32_to_cpu(*data);
565 * t3_seeprom_write - write a VPD EEPROM location
566 * @adapter: adapter to write
567 * @addr: EEPROM address
568 * @data: value to write
570 * Write a 32-bit word to a location in VPD EEPROM using the card's PCI
571 * VPD ROM capability.
573 int t3_seeprom_write(struct adapter *adapter, u32 addr, u32 data)
576 int attempts = EEPROM_MAX_POLL;
577 unsigned int base = adapter->params.pci.vpd_cap_addr;
579 if ((addr >= EEPROMSIZE && addr != EEPROM_STAT_ADDR) || (addr & 3))
582 pci_write_config_dword(adapter->pdev, base + PCI_VPD_DATA,
584 pci_write_config_word(adapter->pdev,base + PCI_VPD_ADDR,
585 addr | PCI_VPD_ADDR_F);
588 pci_read_config_word(adapter->pdev, base + PCI_VPD_ADDR, &val);
589 } while ((val & PCI_VPD_ADDR_F) && --attempts);
591 if (val & PCI_VPD_ADDR_F) {
592 CH_ERR(adapter, "write to EEPROM address 0x%x failed\n", addr);
599 * t3_seeprom_wp - enable/disable EEPROM write protection
600 * @adapter: the adapter
601 * @enable: 1 to enable write protection, 0 to disable it
603 * Enables or disables write protection on the serial EEPROM.
605 int t3_seeprom_wp(struct adapter *adapter, int enable)
607 return t3_seeprom_write(adapter, EEPROM_STAT_ADDR, enable ? 0xc : 0);
611 * Convert a character holding a hex digit to a number.
613 static unsigned int hex2int(unsigned char c)
615 return isdigit(c) ? c - '0' : toupper(c) - 'A' + 10;
619 * get_vpd_params - read VPD parameters from VPD EEPROM
620 * @adapter: adapter to read
621 * @p: where to store the parameters
623 * Reads card parameters stored in VPD EEPROM.
625 static int get_vpd_params(struct adapter *adapter, struct vpd_params *p)
631 * Card information is normally at VPD_BASE but some early cards had
634 ret = t3_seeprom_read(adapter, VPD_BASE, (u32 *)&vpd);
637 addr = vpd.id_tag == 0x82 ? VPD_BASE : 0;
639 for (i = 0; i < sizeof(vpd); i += 4) {
640 ret = t3_seeprom_read(adapter, addr + i,
641 (u32 *)((u8 *)&vpd + i));
646 p->cclk = simple_strtoul(vpd.cclk_data, NULL, 10);
647 p->mclk = simple_strtoul(vpd.mclk_data, NULL, 10);
648 p->uclk = simple_strtoul(vpd.uclk_data, NULL, 10);
649 p->mdc = simple_strtoul(vpd.mdc_data, NULL, 10);
650 p->mem_timing = simple_strtoul(vpd.mt_data, NULL, 10);
652 /* Old eeproms didn't have port information */
653 if (adapter->params.rev == 0 && !vpd.port0_data[0]) {
654 p->port_type[0] = uses_xaui(adapter) ? 1 : 2;
655 p->port_type[1] = uses_xaui(adapter) ? 6 : 2;
657 p->port_type[0] = hex2int(vpd.port0_data[0]);
658 p->port_type[1] = hex2int(vpd.port1_data[0]);
659 p->xauicfg[0] = simple_strtoul(vpd.xaui0cfg_data, NULL, 16);
660 p->xauicfg[1] = simple_strtoul(vpd.xaui1cfg_data, NULL, 16);
663 for (i = 0; i < 6; i++)
664 p->eth_base[i] = hex2int(vpd.na_data[2 * i]) * 16 +
665 hex2int(vpd.na_data[2 * i + 1]);
669 /* serial flash and firmware constants */
671 SF_ATTEMPTS = 5, /* max retries for SF1 operations */
672 SF_SEC_SIZE = 64 * 1024, /* serial flash sector size */
673 SF_SIZE = SF_SEC_SIZE * 8, /* serial flash size */
675 /* flash command opcodes */
676 SF_PROG_PAGE = 2, /* program page */
677 SF_WR_DISABLE = 4, /* disable writes */
678 SF_RD_STATUS = 5, /* read status register */
679 SF_WR_ENABLE = 6, /* enable writes */
680 SF_RD_DATA_FAST = 0xb, /* read flash */
681 SF_ERASE_SECTOR = 0xd8, /* erase sector */
683 FW_FLASH_BOOT_ADDR = 0x70000, /* start address of FW in flash */
684 FW_VERS_ADDR = 0x77ffc, /* flash address holding FW version */
685 FW_MIN_SIZE = 8 /* at least version and csum */
689 * sf1_read - read data from the serial flash
690 * @adapter: the adapter
691 * @byte_cnt: number of bytes to read
692 * @cont: whether another operation will be chained
693 * @valp: where to store the read data
695 * Reads up to 4 bytes of data from the serial flash. The location of
696 * the read needs to be specified prior to calling this by issuing the
697 * appropriate commands to the serial flash.
699 static int sf1_read(struct adapter *adapter, unsigned int byte_cnt, int cont,
704 if (!byte_cnt || byte_cnt > 4)
706 if (t3_read_reg(adapter, A_SF_OP) & F_BUSY)
708 t3_write_reg(adapter, A_SF_OP, V_CONT(cont) | V_BYTECNT(byte_cnt - 1));
709 ret = t3_wait_op_done(adapter, A_SF_OP, F_BUSY, 0, SF_ATTEMPTS, 10);
711 *valp = t3_read_reg(adapter, A_SF_DATA);
716 * sf1_write - write data to the serial flash
717 * @adapter: the adapter
718 * @byte_cnt: number of bytes to write
719 * @cont: whether another operation will be chained
720 * @val: value to write
722 * Writes up to 4 bytes of data to the serial flash. The location of
723 * the write needs to be specified prior to calling this by issuing the
724 * appropriate commands to the serial flash.
726 static int sf1_write(struct adapter *adapter, unsigned int byte_cnt, int cont,
729 if (!byte_cnt || byte_cnt > 4)
731 if (t3_read_reg(adapter, A_SF_OP) & F_BUSY)
733 t3_write_reg(adapter, A_SF_DATA, val);
734 t3_write_reg(adapter, A_SF_OP,
735 V_CONT(cont) | V_BYTECNT(byte_cnt - 1) | V_OP(1));
736 return t3_wait_op_done(adapter, A_SF_OP, F_BUSY, 0, SF_ATTEMPTS, 10);
740 * flash_wait_op - wait for a flash operation to complete
741 * @adapter: the adapter
742 * @attempts: max number of polls of the status register
743 * @delay: delay between polls in ms
745 * Wait for a flash operation to complete by polling the status register.
747 static int flash_wait_op(struct adapter *adapter, int attempts, int delay)
753 if ((ret = sf1_write(adapter, 1, 1, SF_RD_STATUS)) != 0 ||
754 (ret = sf1_read(adapter, 1, 0, &status)) != 0)
766 * t3_read_flash - read words from serial flash
767 * @adapter: the adapter
768 * @addr: the start address for the read
769 * @nwords: how many 32-bit words to read
770 * @data: where to store the read data
771 * @byte_oriented: whether to store data as bytes or as words
773 * Read the specified number of 32-bit words from the serial flash.
774 * If @byte_oriented is set the read data is stored as a byte array
775 * (i.e., big-endian), otherwise as 32-bit words in the platform's
778 int t3_read_flash(struct adapter *adapter, unsigned int addr,
779 unsigned int nwords, u32 *data, int byte_oriented)
783 if (addr + nwords * sizeof(u32) > SF_SIZE || (addr & 3))
786 addr = swab32(addr) | SF_RD_DATA_FAST;
788 if ((ret = sf1_write(adapter, 4, 1, addr)) != 0 ||
789 (ret = sf1_read(adapter, 1, 1, data)) != 0)
792 for (; nwords; nwords--, data++) {
793 ret = sf1_read(adapter, 4, nwords > 1, data);
797 *data = htonl(*data);
803 * t3_write_flash - write up to a page of data to the serial flash
804 * @adapter: the adapter
805 * @addr: the start address to write
806 * @n: length of data to write
807 * @data: the data to write
809 * Writes up to a page of data (256 bytes) to the serial flash starting
810 * at the given address.
812 static int t3_write_flash(struct adapter *adapter, unsigned int addr,
813 unsigned int n, const u8 *data)
817 unsigned int i, c, left, val, offset = addr & 0xff;
819 if (addr + n > SF_SIZE || offset + n > 256)
822 val = swab32(addr) | SF_PROG_PAGE;
824 if ((ret = sf1_write(adapter, 1, 0, SF_WR_ENABLE)) != 0 ||
825 (ret = sf1_write(adapter, 4, 1, val)) != 0)
828 for (left = n; left; left -= c) {
830 for (val = 0, i = 0; i < c; ++i)
831 val = (val << 8) + *data++;
833 ret = sf1_write(adapter, c, c != left, val);
837 if ((ret = flash_wait_op(adapter, 5, 1)) != 0)
840 /* Read the page to verify the write succeeded */
841 ret = t3_read_flash(adapter, addr & ~0xff, ARRAY_SIZE(buf), buf, 1);
845 if (memcmp(data - n, (u8 *) buf + offset, n))
851 * t3_get_tp_version - read the tp sram version
852 * @adapter: the adapter
853 * @vers: where to place the version
855 * Reads the protocol sram version from sram.
857 int t3_get_tp_version(struct adapter *adapter, u32 *vers)
861 /* Get version loaded in SRAM */
862 t3_write_reg(adapter, A_TP_EMBED_OP_FIELD0, 0);
863 ret = t3_wait_op_done(adapter, A_TP_EMBED_OP_FIELD0,
868 *vers = t3_read_reg(adapter, A_TP_EMBED_OP_FIELD1);
874 * t3_check_tpsram_version - read the tp sram version
875 * @adapter: the adapter
876 * @must_load: set to 1 if loading a new microcode image is required
878 * Reads the protocol sram version from flash.
880 int t3_check_tpsram_version(struct adapter *adapter, int *must_load)
884 unsigned int major, minor;
886 if (adapter->params.rev == T3_REV_A)
891 ret = t3_get_tp_version(adapter, &vers);
895 major = G_TP_VERSION_MAJOR(vers);
896 minor = G_TP_VERSION_MINOR(vers);
898 if (major == TP_VERSION_MAJOR && minor == TP_VERSION_MINOR)
901 if (major != TP_VERSION_MAJOR)
902 CH_ERR(adapter, "found wrong TP version (%u.%u), "
903 "driver needs version %d.%d\n", major, minor,
904 TP_VERSION_MAJOR, TP_VERSION_MINOR);
907 CH_ERR(adapter, "found wrong TP version (%u.%u), "
908 "driver compiled for version %d.%d\n", major, minor,
909 TP_VERSION_MAJOR, TP_VERSION_MINOR);
915 * t3_check_tpsram - check if provided protocol SRAM
916 * is compatible with this driver
917 * @adapter: the adapter
918 * @tp_sram: the firmware image to write
921 * Checks if an adapter's tp sram is compatible with the driver.
922 * Returns 0 if the versions are compatible, a negative error otherwise.
924 int t3_check_tpsram(struct adapter *adapter, u8 *tp_sram, unsigned int size)
928 const u32 *p = (const u32 *)tp_sram;
930 /* Verify checksum */
931 for (csum = 0, i = 0; i < size / sizeof(csum); i++)
933 if (csum != 0xffffffff) {
934 CH_ERR(adapter, "corrupted protocol SRAM image, checksum %u\n",
942 enum fw_version_type {
948 * t3_get_fw_version - read the firmware version
949 * @adapter: the adapter
950 * @vers: where to place the version
952 * Reads the FW version from flash.
954 int t3_get_fw_version(struct adapter *adapter, u32 *vers)
956 return t3_read_flash(adapter, FW_VERS_ADDR, 1, vers, 0);
960 * t3_check_fw_version - check if the FW is compatible with this driver
961 * @adapter: the adapter
963 * Checks if an adapter's FW is compatible with the driver. Returns 0
964 * if the versions are compatible, a negative error otherwise.
966 int t3_check_fw_version(struct adapter *adapter)
970 unsigned int type, major, minor;
972 ret = t3_get_fw_version(adapter, &vers);
976 type = G_FW_VERSION_TYPE(vers);
977 major = G_FW_VERSION_MAJOR(vers);
978 minor = G_FW_VERSION_MINOR(vers);
980 if (type == FW_VERSION_T3 && major == FW_VERSION_MAJOR &&
981 minor == FW_VERSION_MINOR)
984 CH_ERR(adapter, "found wrong FW version(%u.%u), "
985 "driver needs version %u.%u\n", major, minor,
986 FW_VERSION_MAJOR, FW_VERSION_MINOR);
991 * t3_flash_erase_sectors - erase a range of flash sectors
992 * @adapter: the adapter
993 * @start: the first sector to erase
994 * @end: the last sector to erase
996 * Erases the sectors in the given range.
998 static int t3_flash_erase_sectors(struct adapter *adapter, int start, int end)
1000 while (start <= end) {
1003 if ((ret = sf1_write(adapter, 1, 0, SF_WR_ENABLE)) != 0 ||
1004 (ret = sf1_write(adapter, 4, 0,
1005 SF_ERASE_SECTOR | (start << 8))) != 0 ||
1006 (ret = flash_wait_op(adapter, 5, 500)) != 0)
1014 * t3_load_fw - download firmware
1015 * @adapter: the adapter
1016 * @fw_data: the firmware image to write
1019 * Write the supplied firmware image to the card's serial flash.
1020 * The FW image has the following sections: @size - 8 bytes of code and
1021 * data, followed by 4 bytes of FW version, followed by the 32-bit
1022 * 1's complement checksum of the whole image.
1024 int t3_load_fw(struct adapter *adapter, const u8 *fw_data, unsigned int size)
1028 const u32 *p = (const u32 *)fw_data;
1029 int ret, addr, fw_sector = FW_FLASH_BOOT_ADDR >> 16;
1031 if ((size & 3) || size < FW_MIN_SIZE)
1033 if (size > FW_VERS_ADDR + 8 - FW_FLASH_BOOT_ADDR)
1036 for (csum = 0, i = 0; i < size / sizeof(csum); i++)
1037 csum += ntohl(p[i]);
1038 if (csum != 0xffffffff) {
1039 CH_ERR(adapter, "corrupted firmware image, checksum %u\n",
1044 ret = t3_flash_erase_sectors(adapter, fw_sector, fw_sector);
1048 size -= 8; /* trim off version and checksum */
1049 for (addr = FW_FLASH_BOOT_ADDR; size;) {
1050 unsigned int chunk_size = min(size, 256U);
1052 ret = t3_write_flash(adapter, addr, chunk_size, fw_data);
1057 fw_data += chunk_size;
1061 ret = t3_write_flash(adapter, FW_VERS_ADDR, 4, fw_data);
1064 CH_ERR(adapter, "firmware download failed, error %d\n", ret);
1068 #define CIM_CTL_BASE 0x2000
1071 * t3_cim_ctl_blk_read - read a block from CIM control region
1073 * @adap: the adapter
1074 * @addr: the start address within the CIM control region
1075 * @n: number of words to read
1076 * @valp: where to store the result
1078 * Reads a block of 4-byte words from the CIM control region.
1080 int t3_cim_ctl_blk_read(struct adapter *adap, unsigned int addr,
1081 unsigned int n, unsigned int *valp)
1085 if (t3_read_reg(adap, A_CIM_HOST_ACC_CTRL) & F_HOSTBUSY)
1088 for ( ; !ret && n--; addr += 4) {
1089 t3_write_reg(adap, A_CIM_HOST_ACC_CTRL, CIM_CTL_BASE + addr);
1090 ret = t3_wait_op_done(adap, A_CIM_HOST_ACC_CTRL, F_HOSTBUSY,
1093 *valp++ = t3_read_reg(adap, A_CIM_HOST_ACC_DATA);
1100 * t3_link_changed - handle interface link changes
1101 * @adapter: the adapter
1102 * @port_id: the port index that changed link state
1104 * Called when a port's link settings change to propagate the new values
1105 * to the associated PHY and MAC. After performing the common tasks it
1106 * invokes an OS-specific handler.
1108 void t3_link_changed(struct adapter *adapter, int port_id)
1110 int link_ok, speed, duplex, fc;
1111 struct port_info *pi = adap2pinfo(adapter, port_id);
1112 struct cphy *phy = &pi->phy;
1113 struct cmac *mac = &pi->mac;
1114 struct link_config *lc = &pi->link_config;
1116 phy->ops->get_link_status(phy, &link_ok, &speed, &duplex, &fc);
1118 if (link_ok != lc->link_ok && adapter->params.rev > 0 &&
1119 uses_xaui(adapter)) {
1122 t3_write_reg(adapter, A_XGM_XAUI_ACT_CTRL + mac->offset,
1123 link_ok ? F_TXACTENABLE | F_RXEN : 0);
1125 lc->link_ok = link_ok;
1126 lc->speed = speed < 0 ? SPEED_INVALID : speed;
1127 lc->duplex = duplex < 0 ? DUPLEX_INVALID : duplex;
1128 if (lc->requested_fc & PAUSE_AUTONEG)
1129 fc &= lc->requested_fc;
1131 fc = lc->requested_fc & (PAUSE_RX | PAUSE_TX);
1133 if (link_ok && speed >= 0 && lc->autoneg == AUTONEG_ENABLE) {
1134 /* Set MAC speed, duplex, and flow control to match PHY. */
1135 t3_mac_set_speed_duplex_fc(mac, speed, duplex, fc);
1139 t3_os_link_changed(adapter, port_id, link_ok, speed, duplex, fc);
1143 * t3_link_start - apply link configuration to MAC/PHY
1144 * @phy: the PHY to setup
1145 * @mac: the MAC to setup
1146 * @lc: the requested link configuration
1148 * Set up a port's MAC and PHY according to a desired link configuration.
1149 * - If the PHY can auto-negotiate first decide what to advertise, then
1150 * enable/disable auto-negotiation as desired, and reset.
1151 * - If the PHY does not auto-negotiate just reset it.
1152 * - If auto-negotiation is off set the MAC to the proper speed/duplex/FC,
1153 * otherwise do it later based on the outcome of auto-negotiation.
1155 int t3_link_start(struct cphy *phy, struct cmac *mac, struct link_config *lc)
1157 unsigned int fc = lc->requested_fc & (PAUSE_RX | PAUSE_TX);
1160 if (lc->supported & SUPPORTED_Autoneg) {
1161 lc->advertising &= ~(ADVERTISED_Asym_Pause | ADVERTISED_Pause);
1163 lc->advertising |= ADVERTISED_Asym_Pause;
1165 lc->advertising |= ADVERTISED_Pause;
1167 phy->ops->advertise(phy, lc->advertising);
1169 if (lc->autoneg == AUTONEG_DISABLE) {
1170 lc->speed = lc->requested_speed;
1171 lc->duplex = lc->requested_duplex;
1172 lc->fc = (unsigned char)fc;
1173 t3_mac_set_speed_duplex_fc(mac, lc->speed, lc->duplex,
1175 /* Also disables autoneg */
1176 phy->ops->set_speed_duplex(phy, lc->speed, lc->duplex);
1177 phy->ops->reset(phy, 0);
1179 phy->ops->autoneg_enable(phy);
1181 t3_mac_set_speed_duplex_fc(mac, -1, -1, fc);
1182 lc->fc = (unsigned char)fc;
1183 phy->ops->reset(phy, 0);
1189 * t3_set_vlan_accel - control HW VLAN extraction
1190 * @adapter: the adapter
1191 * @ports: bitmap of adapter ports to operate on
1192 * @on: enable (1) or disable (0) HW VLAN extraction
1194 * Enables or disables HW extraction of VLAN tags for the given port.
1196 void t3_set_vlan_accel(struct adapter *adapter, unsigned int ports, int on)
1198 t3_set_reg_field(adapter, A_TP_OUT_CONFIG,
1199 ports << S_VLANEXTRACTIONENABLE,
1200 on ? (ports << S_VLANEXTRACTIONENABLE) : 0);
1204 unsigned int mask; /* bits to check in interrupt status */
1205 const char *msg; /* message to print or NULL */
1206 short stat_idx; /* stat counter to increment or -1 */
1207 unsigned short fatal:1; /* whether the condition reported is fatal */
1211 * t3_handle_intr_status - table driven interrupt handler
1212 * @adapter: the adapter that generated the interrupt
1213 * @reg: the interrupt status register to process
1214 * @mask: a mask to apply to the interrupt status
1215 * @acts: table of interrupt actions
1216 * @stats: statistics counters tracking interrupt occurences
1218 * A table driven interrupt handler that applies a set of masks to an
1219 * interrupt status word and performs the corresponding actions if the
1220 * interrupts described by the mask have occured. The actions include
1221 * optionally printing a warning or alert message, and optionally
1222 * incrementing a stat counter. The table is terminated by an entry
1223 * specifying mask 0. Returns the number of fatal interrupt conditions.
1225 static int t3_handle_intr_status(struct adapter *adapter, unsigned int reg,
1227 const struct intr_info *acts,
1228 unsigned long *stats)
1231 unsigned int status = t3_read_reg(adapter, reg) & mask;
1233 for (; acts->mask; ++acts) {
1234 if (!(status & acts->mask))
1238 CH_ALERT(adapter, "%s (0x%x)\n",
1239 acts->msg, status & acts->mask);
1240 } else if (acts->msg)
1241 CH_WARN(adapter, "%s (0x%x)\n",
1242 acts->msg, status & acts->mask);
1243 if (acts->stat_idx >= 0)
1244 stats[acts->stat_idx]++;
1246 if (status) /* clear processed interrupts */
1247 t3_write_reg(adapter, reg, status);
1251 #define SGE_INTR_MASK (F_RSPQDISABLED)
1252 #define MC5_INTR_MASK (F_PARITYERR | F_ACTRGNFULL | F_UNKNOWNCMD | \
1253 F_REQQPARERR | F_DISPQPARERR | F_DELACTEMPTY | \
1255 #define MC7_INTR_MASK (F_AE | F_UE | F_CE | V_PE(M_PE))
1256 #define XGM_INTR_MASK (V_TXFIFO_PRTY_ERR(M_TXFIFO_PRTY_ERR) | \
1257 V_RXFIFO_PRTY_ERR(M_RXFIFO_PRTY_ERR) | \
1258 F_TXFIFO_UNDERRUN | F_RXFIFO_OVERFLOW)
1259 #define PCIX_INTR_MASK (F_MSTDETPARERR | F_SIGTARABT | F_RCVTARABT | \
1260 F_RCVMSTABT | F_SIGSYSERR | F_DETPARERR | \
1261 F_SPLCMPDIS | F_UNXSPLCMP | F_RCVSPLCMPERR | \
1262 F_DETCORECCERR | F_DETUNCECCERR | F_PIOPARERR | \
1263 V_WFPARERR(M_WFPARERR) | V_RFPARERR(M_RFPARERR) | \
1264 V_CFPARERR(M_CFPARERR) /* | V_MSIXPARERR(M_MSIXPARERR) */)
1265 #define PCIE_INTR_MASK (F_UNXSPLCPLERRR | F_UNXSPLCPLERRC | F_PCIE_PIOPARERR |\
1266 F_PCIE_WFPARERR | F_PCIE_RFPARERR | F_PCIE_CFPARERR | \
1267 /* V_PCIE_MSIXPARERR(M_PCIE_MSIXPARERR) | */ \
1268 V_BISTERR(M_BISTERR) | F_PEXERR)
1269 #define ULPRX_INTR_MASK F_PARERR
1270 #define ULPTX_INTR_MASK 0
1271 #define CPLSW_INTR_MASK (F_TP_FRAMING_ERROR | \
1272 F_SGE_FRAMING_ERROR | F_CIM_FRAMING_ERROR | \
1273 F_ZERO_SWITCH_ERROR)
1274 #define CIM_INTR_MASK (F_BLKWRPLINT | F_BLKRDPLINT | F_BLKWRCTLINT | \
1275 F_BLKRDCTLINT | F_BLKWRFLASHINT | F_BLKRDFLASHINT | \
1276 F_SGLWRFLASHINT | F_WRBLKFLASHINT | F_BLKWRBOOTINT | \
1277 F_FLASHRANGEINT | F_SDRAMRANGEINT | F_RSVDSPACEINT)
1278 #define PMTX_INTR_MASK (F_ZERO_C_CMD_ERROR | ICSPI_FRM_ERR | OESPI_FRM_ERR | \
1279 V_ICSPI_PAR_ERROR(M_ICSPI_PAR_ERROR) | \
1280 V_OESPI_PAR_ERROR(M_OESPI_PAR_ERROR))
1281 #define PMRX_INTR_MASK (F_ZERO_E_CMD_ERROR | IESPI_FRM_ERR | OCSPI_FRM_ERR | \
1282 V_IESPI_PAR_ERROR(M_IESPI_PAR_ERROR) | \
1283 V_OCSPI_PAR_ERROR(M_OCSPI_PAR_ERROR))
1284 #define MPS_INTR_MASK (V_TX0TPPARERRENB(M_TX0TPPARERRENB) | \
1285 V_TX1TPPARERRENB(M_TX1TPPARERRENB) | \
1286 V_RXTPPARERRENB(M_RXTPPARERRENB) | \
1287 V_MCAPARERRENB(M_MCAPARERRENB))
1288 #define PL_INTR_MASK (F_T3DBG | F_XGMAC0_0 | F_XGMAC0_1 | F_MC5A | F_PM1_TX | \
1289 F_PM1_RX | F_ULP2_TX | F_ULP2_RX | F_TP1 | F_CIM | \
1290 F_MC7_CM | F_MC7_PMTX | F_MC7_PMRX | F_SGE3 | F_PCIM0 | \
1291 F_MPS0 | F_CPL_SWITCH)
1294 * Interrupt handler for the PCIX1 module.
1296 static void pci_intr_handler(struct adapter *adapter)
1298 static const struct intr_info pcix1_intr_info[] = {
1299 {F_MSTDETPARERR, "PCI master detected parity error", -1, 1},
1300 {F_SIGTARABT, "PCI signaled target abort", -1, 1},
1301 {F_RCVTARABT, "PCI received target abort", -1, 1},
1302 {F_RCVMSTABT, "PCI received master abort", -1, 1},
1303 {F_SIGSYSERR, "PCI signaled system error", -1, 1},
1304 {F_DETPARERR, "PCI detected parity error", -1, 1},
1305 {F_SPLCMPDIS, "PCI split completion discarded", -1, 1},
1306 {F_UNXSPLCMP, "PCI unexpected split completion error", -1, 1},
1307 {F_RCVSPLCMPERR, "PCI received split completion error", -1,
1309 {F_DETCORECCERR, "PCI correctable ECC error",
1310 STAT_PCI_CORR_ECC, 0},
1311 {F_DETUNCECCERR, "PCI uncorrectable ECC error", -1, 1},
1312 {F_PIOPARERR, "PCI PIO FIFO parity error", -1, 1},
1313 {V_WFPARERR(M_WFPARERR), "PCI write FIFO parity error", -1,
1315 {V_RFPARERR(M_RFPARERR), "PCI read FIFO parity error", -1,
1317 {V_CFPARERR(M_CFPARERR), "PCI command FIFO parity error", -1,
1319 {V_MSIXPARERR(M_MSIXPARERR), "PCI MSI-X table/PBA parity "
1324 if (t3_handle_intr_status(adapter, A_PCIX_INT_CAUSE, PCIX_INTR_MASK,
1325 pcix1_intr_info, adapter->irq_stats))
1326 t3_fatal_err(adapter);
1330 * Interrupt handler for the PCIE module.
1332 static void pcie_intr_handler(struct adapter *adapter)
1334 static const struct intr_info pcie_intr_info[] = {
1335 {F_PEXERR, "PCI PEX error", -1, 1},
1337 "PCI unexpected split completion DMA read error", -1, 1},
1339 "PCI unexpected split completion DMA command error", -1, 1},
1340 {F_PCIE_PIOPARERR, "PCI PIO FIFO parity error", -1, 1},
1341 {F_PCIE_WFPARERR, "PCI write FIFO parity error", -1, 1},
1342 {F_PCIE_RFPARERR, "PCI read FIFO parity error", -1, 1},
1343 {F_PCIE_CFPARERR, "PCI command FIFO parity error", -1, 1},
1344 {V_PCIE_MSIXPARERR(M_PCIE_MSIXPARERR),
1345 "PCI MSI-X table/PBA parity error", -1, 1},
1346 {V_BISTERR(M_BISTERR), "PCI BIST error", -1, 1},
1350 if (t3_handle_intr_status(adapter, A_PCIE_INT_CAUSE, PCIE_INTR_MASK,
1351 pcie_intr_info, adapter->irq_stats))
1352 t3_fatal_err(adapter);
1356 * TP interrupt handler.
1358 static void tp_intr_handler(struct adapter *adapter)
1360 static const struct intr_info tp_intr_info[] = {
1361 {0xffffff, "TP parity error", -1, 1},
1362 {0x1000000, "TP out of Rx pages", -1, 1},
1363 {0x2000000, "TP out of Tx pages", -1, 1},
1367 if (t3_handle_intr_status(adapter, A_TP_INT_CAUSE, 0xffffffff,
1368 tp_intr_info, NULL))
1369 t3_fatal_err(adapter);
1373 * CIM interrupt handler.
1375 static void cim_intr_handler(struct adapter *adapter)
1377 static const struct intr_info cim_intr_info[] = {
1378 {F_RSVDSPACEINT, "CIM reserved space write", -1, 1},
1379 {F_SDRAMRANGEINT, "CIM SDRAM address out of range", -1, 1},
1380 {F_FLASHRANGEINT, "CIM flash address out of range", -1, 1},
1381 {F_BLKWRBOOTINT, "CIM block write to boot space", -1, 1},
1382 {F_WRBLKFLASHINT, "CIM write to cached flash space", -1, 1},
1383 {F_SGLWRFLASHINT, "CIM single write to flash space", -1, 1},
1384 {F_BLKRDFLASHINT, "CIM block read from flash space", -1, 1},
1385 {F_BLKWRFLASHINT, "CIM block write to flash space", -1, 1},
1386 {F_BLKRDCTLINT, "CIM block read from CTL space", -1, 1},
1387 {F_BLKWRCTLINT, "CIM block write to CTL space", -1, 1},
1388 {F_BLKRDPLINT, "CIM block read from PL space", -1, 1},
1389 {F_BLKWRPLINT, "CIM block write to PL space", -1, 1},
1393 if (t3_handle_intr_status(adapter, A_CIM_HOST_INT_CAUSE, 0xffffffff,
1394 cim_intr_info, NULL))
1395 t3_fatal_err(adapter);
1399 * ULP RX interrupt handler.
1401 static void ulprx_intr_handler(struct adapter *adapter)
1403 static const struct intr_info ulprx_intr_info[] = {
1404 {F_PARERR, "ULP RX parity error", -1, 1},
1408 if (t3_handle_intr_status(adapter, A_ULPRX_INT_CAUSE, 0xffffffff,
1409 ulprx_intr_info, NULL))
1410 t3_fatal_err(adapter);
1414 * ULP TX interrupt handler.
1416 static void ulptx_intr_handler(struct adapter *adapter)
1418 static const struct intr_info ulptx_intr_info[] = {
1419 {F_PBL_BOUND_ERR_CH0, "ULP TX channel 0 PBL out of bounds",
1420 STAT_ULP_CH0_PBL_OOB, 0},
1421 {F_PBL_BOUND_ERR_CH1, "ULP TX channel 1 PBL out of bounds",
1422 STAT_ULP_CH1_PBL_OOB, 0},
1426 if (t3_handle_intr_status(adapter, A_ULPTX_INT_CAUSE, 0xffffffff,
1427 ulptx_intr_info, adapter->irq_stats))
1428 t3_fatal_err(adapter);
1431 #define ICSPI_FRM_ERR (F_ICSPI0_FIFO2X_RX_FRAMING_ERROR | \
1432 F_ICSPI1_FIFO2X_RX_FRAMING_ERROR | F_ICSPI0_RX_FRAMING_ERROR | \
1433 F_ICSPI1_RX_FRAMING_ERROR | F_ICSPI0_TX_FRAMING_ERROR | \
1434 F_ICSPI1_TX_FRAMING_ERROR)
1435 #define OESPI_FRM_ERR (F_OESPI0_RX_FRAMING_ERROR | \
1436 F_OESPI1_RX_FRAMING_ERROR | F_OESPI0_TX_FRAMING_ERROR | \
1437 F_OESPI1_TX_FRAMING_ERROR | F_OESPI0_OFIFO2X_TX_FRAMING_ERROR | \
1438 F_OESPI1_OFIFO2X_TX_FRAMING_ERROR)
1441 * PM TX interrupt handler.
1443 static void pmtx_intr_handler(struct adapter *adapter)
1445 static const struct intr_info pmtx_intr_info[] = {
1446 {F_ZERO_C_CMD_ERROR, "PMTX 0-length pcmd", -1, 1},
1447 {ICSPI_FRM_ERR, "PMTX ispi framing error", -1, 1},
1448 {OESPI_FRM_ERR, "PMTX ospi framing error", -1, 1},
1449 {V_ICSPI_PAR_ERROR(M_ICSPI_PAR_ERROR),
1450 "PMTX ispi parity error", -1, 1},
1451 {V_OESPI_PAR_ERROR(M_OESPI_PAR_ERROR),
1452 "PMTX ospi parity error", -1, 1},
1456 if (t3_handle_intr_status(adapter, A_PM1_TX_INT_CAUSE, 0xffffffff,
1457 pmtx_intr_info, NULL))
1458 t3_fatal_err(adapter);
1461 #define IESPI_FRM_ERR (F_IESPI0_FIFO2X_RX_FRAMING_ERROR | \
1462 F_IESPI1_FIFO2X_RX_FRAMING_ERROR | F_IESPI0_RX_FRAMING_ERROR | \
1463 F_IESPI1_RX_FRAMING_ERROR | F_IESPI0_TX_FRAMING_ERROR | \
1464 F_IESPI1_TX_FRAMING_ERROR)
1465 #define OCSPI_FRM_ERR (F_OCSPI0_RX_FRAMING_ERROR | \
1466 F_OCSPI1_RX_FRAMING_ERROR | F_OCSPI0_TX_FRAMING_ERROR | \
1467 F_OCSPI1_TX_FRAMING_ERROR | F_OCSPI0_OFIFO2X_TX_FRAMING_ERROR | \
1468 F_OCSPI1_OFIFO2X_TX_FRAMING_ERROR)
1471 * PM RX interrupt handler.
1473 static void pmrx_intr_handler(struct adapter *adapter)
1475 static const struct intr_info pmrx_intr_info[] = {
1476 {F_ZERO_E_CMD_ERROR, "PMRX 0-length pcmd", -1, 1},
1477 {IESPI_FRM_ERR, "PMRX ispi framing error", -1, 1},
1478 {OCSPI_FRM_ERR, "PMRX ospi framing error", -1, 1},
1479 {V_IESPI_PAR_ERROR(M_IESPI_PAR_ERROR),
1480 "PMRX ispi parity error", -1, 1},
1481 {V_OCSPI_PAR_ERROR(M_OCSPI_PAR_ERROR),
1482 "PMRX ospi parity error", -1, 1},
1486 if (t3_handle_intr_status(adapter, A_PM1_RX_INT_CAUSE, 0xffffffff,
1487 pmrx_intr_info, NULL))
1488 t3_fatal_err(adapter);
1492 * CPL switch interrupt handler.
1494 static void cplsw_intr_handler(struct adapter *adapter)
1496 static const struct intr_info cplsw_intr_info[] = {
1497 /* { F_CIM_OVFL_ERROR, "CPL switch CIM overflow", -1, 1 }, */
1498 {F_TP_FRAMING_ERROR, "CPL switch TP framing error", -1, 1},
1499 {F_SGE_FRAMING_ERROR, "CPL switch SGE framing error", -1, 1},
1500 {F_CIM_FRAMING_ERROR, "CPL switch CIM framing error", -1, 1},
1501 {F_ZERO_SWITCH_ERROR, "CPL switch no-switch error", -1, 1},
1505 if (t3_handle_intr_status(adapter, A_CPL_INTR_CAUSE, 0xffffffff,
1506 cplsw_intr_info, NULL))
1507 t3_fatal_err(adapter);
1511 * MPS interrupt handler.
1513 static void mps_intr_handler(struct adapter *adapter)
1515 static const struct intr_info mps_intr_info[] = {
1516 {0x1ff, "MPS parity error", -1, 1},
1520 if (t3_handle_intr_status(adapter, A_MPS_INT_CAUSE, 0xffffffff,
1521 mps_intr_info, NULL))
1522 t3_fatal_err(adapter);
1525 #define MC7_INTR_FATAL (F_UE | V_PE(M_PE) | F_AE)
1528 * MC7 interrupt handler.
1530 static void mc7_intr_handler(struct mc7 *mc7)
1532 struct adapter *adapter = mc7->adapter;
1533 u32 cause = t3_read_reg(adapter, mc7->offset + A_MC7_INT_CAUSE);
1536 mc7->stats.corr_err++;
1537 CH_WARN(adapter, "%s MC7 correctable error at addr 0x%x, "
1538 "data 0x%x 0x%x 0x%x\n", mc7->name,
1539 t3_read_reg(adapter, mc7->offset + A_MC7_CE_ADDR),
1540 t3_read_reg(adapter, mc7->offset + A_MC7_CE_DATA0),
1541 t3_read_reg(adapter, mc7->offset + A_MC7_CE_DATA1),
1542 t3_read_reg(adapter, mc7->offset + A_MC7_CE_DATA2));
1546 mc7->stats.uncorr_err++;
1547 CH_ALERT(adapter, "%s MC7 uncorrectable error at addr 0x%x, "
1548 "data 0x%x 0x%x 0x%x\n", mc7->name,
1549 t3_read_reg(adapter, mc7->offset + A_MC7_UE_ADDR),
1550 t3_read_reg(adapter, mc7->offset + A_MC7_UE_DATA0),
1551 t3_read_reg(adapter, mc7->offset + A_MC7_UE_DATA1),
1552 t3_read_reg(adapter, mc7->offset + A_MC7_UE_DATA2));
1556 mc7->stats.parity_err++;
1557 CH_ALERT(adapter, "%s MC7 parity error 0x%x\n",
1558 mc7->name, G_PE(cause));
1564 if (adapter->params.rev > 0)
1565 addr = t3_read_reg(adapter,
1566 mc7->offset + A_MC7_ERR_ADDR);
1567 mc7->stats.addr_err++;
1568 CH_ALERT(adapter, "%s MC7 address error: 0x%x\n",
1572 if (cause & MC7_INTR_FATAL)
1573 t3_fatal_err(adapter);
1575 t3_write_reg(adapter, mc7->offset + A_MC7_INT_CAUSE, cause);
1578 #define XGM_INTR_FATAL (V_TXFIFO_PRTY_ERR(M_TXFIFO_PRTY_ERR) | \
1579 V_RXFIFO_PRTY_ERR(M_RXFIFO_PRTY_ERR))
1581 * XGMAC interrupt handler.
1583 static int mac_intr_handler(struct adapter *adap, unsigned int idx)
1585 struct cmac *mac = &adap2pinfo(adap, idx)->mac;
1586 u32 cause = t3_read_reg(adap, A_XGM_INT_CAUSE + mac->offset);
1588 if (cause & V_TXFIFO_PRTY_ERR(M_TXFIFO_PRTY_ERR)) {
1589 mac->stats.tx_fifo_parity_err++;
1590 CH_ALERT(adap, "port%d: MAC TX FIFO parity error\n", idx);
1592 if (cause & V_RXFIFO_PRTY_ERR(M_RXFIFO_PRTY_ERR)) {
1593 mac->stats.rx_fifo_parity_err++;
1594 CH_ALERT(adap, "port%d: MAC RX FIFO parity error\n", idx);
1596 if (cause & F_TXFIFO_UNDERRUN)
1597 mac->stats.tx_fifo_urun++;
1598 if (cause & F_RXFIFO_OVERFLOW)
1599 mac->stats.rx_fifo_ovfl++;
1600 if (cause & V_SERDES_LOS(M_SERDES_LOS))
1601 mac->stats.serdes_signal_loss++;
1602 if (cause & F_XAUIPCSCTCERR)
1603 mac->stats.xaui_pcs_ctc_err++;
1604 if (cause & F_XAUIPCSALIGNCHANGE)
1605 mac->stats.xaui_pcs_align_change++;
1607 t3_write_reg(adap, A_XGM_INT_CAUSE + mac->offset, cause);
1608 if (cause & XGM_INTR_FATAL)
1614 * Interrupt handler for PHY events.
1616 int t3_phy_intr_handler(struct adapter *adapter)
1618 u32 mask, gpi = adapter_info(adapter)->gpio_intr;
1619 u32 i, cause = t3_read_reg(adapter, A_T3DBG_INT_CAUSE);
1621 for_each_port(adapter, i) {
1622 struct port_info *p = adap2pinfo(adapter, i);
1624 mask = gpi - (gpi & (gpi - 1));
1627 if (!(p->port_type->caps & SUPPORTED_IRQ))
1631 int phy_cause = p->phy.ops->intr_handler(&p->phy);
1633 if (phy_cause & cphy_cause_link_change)
1634 t3_link_changed(adapter, i);
1635 if (phy_cause & cphy_cause_fifo_error)
1636 p->phy.fifo_errors++;
1640 t3_write_reg(adapter, A_T3DBG_INT_CAUSE, cause);
1645 * T3 slow path (non-data) interrupt handler.
1647 int t3_slow_intr_handler(struct adapter *adapter)
1649 u32 cause = t3_read_reg(adapter, A_PL_INT_CAUSE0);
1651 cause &= adapter->slow_intr_mask;
1654 if (cause & F_PCIM0) {
1655 if (is_pcie(adapter))
1656 pcie_intr_handler(adapter);
1658 pci_intr_handler(adapter);
1661 t3_sge_err_intr_handler(adapter);
1662 if (cause & F_MC7_PMRX)
1663 mc7_intr_handler(&adapter->pmrx);
1664 if (cause & F_MC7_PMTX)
1665 mc7_intr_handler(&adapter->pmtx);
1666 if (cause & F_MC7_CM)
1667 mc7_intr_handler(&adapter->cm);
1669 cim_intr_handler(adapter);
1671 tp_intr_handler(adapter);
1672 if (cause & F_ULP2_RX)
1673 ulprx_intr_handler(adapter);
1674 if (cause & F_ULP2_TX)
1675 ulptx_intr_handler(adapter);
1676 if (cause & F_PM1_RX)
1677 pmrx_intr_handler(adapter);
1678 if (cause & F_PM1_TX)
1679 pmtx_intr_handler(adapter);
1680 if (cause & F_CPL_SWITCH)
1681 cplsw_intr_handler(adapter);
1683 mps_intr_handler(adapter);
1685 t3_mc5_intr_handler(&adapter->mc5);
1686 if (cause & F_XGMAC0_0)
1687 mac_intr_handler(adapter, 0);
1688 if (cause & F_XGMAC0_1)
1689 mac_intr_handler(adapter, 1);
1690 if (cause & F_T3DBG)
1691 t3_os_ext_intr_handler(adapter);
1693 /* Clear the interrupts just processed. */
1694 t3_write_reg(adapter, A_PL_INT_CAUSE0, cause);
1695 t3_read_reg(adapter, A_PL_INT_CAUSE0); /* flush */
1700 * t3_intr_enable - enable interrupts
1701 * @adapter: the adapter whose interrupts should be enabled
1703 * Enable interrupts by setting the interrupt enable registers of the
1704 * various HW modules and then enabling the top-level interrupt
1707 void t3_intr_enable(struct adapter *adapter)
1709 static const struct addr_val_pair intr_en_avp[] = {
1710 {A_SG_INT_ENABLE, SGE_INTR_MASK},
1711 {A_MC7_INT_ENABLE, MC7_INTR_MASK},
1712 {A_MC7_INT_ENABLE - MC7_PMRX_BASE_ADDR + MC7_PMTX_BASE_ADDR,
1714 {A_MC7_INT_ENABLE - MC7_PMRX_BASE_ADDR + MC7_CM_BASE_ADDR,
1716 {A_MC5_DB_INT_ENABLE, MC5_INTR_MASK},
1717 {A_ULPRX_INT_ENABLE, ULPRX_INTR_MASK},
1718 {A_TP_INT_ENABLE, 0x3bfffff},
1719 {A_PM1_TX_INT_ENABLE, PMTX_INTR_MASK},
1720 {A_PM1_RX_INT_ENABLE, PMRX_INTR_MASK},
1721 {A_CIM_HOST_INT_ENABLE, CIM_INTR_MASK},
1722 {A_MPS_INT_ENABLE, MPS_INTR_MASK},
1725 adapter->slow_intr_mask = PL_INTR_MASK;
1727 t3_write_regs(adapter, intr_en_avp, ARRAY_SIZE(intr_en_avp), 0);
1729 if (adapter->params.rev > 0) {
1730 t3_write_reg(adapter, A_CPL_INTR_ENABLE,
1731 CPLSW_INTR_MASK | F_CIM_OVFL_ERROR);
1732 t3_write_reg(adapter, A_ULPTX_INT_ENABLE,
1733 ULPTX_INTR_MASK | F_PBL_BOUND_ERR_CH0 |
1734 F_PBL_BOUND_ERR_CH1);
1736 t3_write_reg(adapter, A_CPL_INTR_ENABLE, CPLSW_INTR_MASK);
1737 t3_write_reg(adapter, A_ULPTX_INT_ENABLE, ULPTX_INTR_MASK);
1740 t3_write_reg(adapter, A_T3DBG_GPIO_ACT_LOW,
1741 adapter_info(adapter)->gpio_intr);
1742 t3_write_reg(adapter, A_T3DBG_INT_ENABLE,
1743 adapter_info(adapter)->gpio_intr);
1744 if (is_pcie(adapter))
1745 t3_write_reg(adapter, A_PCIE_INT_ENABLE, PCIE_INTR_MASK);
1747 t3_write_reg(adapter, A_PCIX_INT_ENABLE, PCIX_INTR_MASK);
1748 t3_write_reg(adapter, A_PL_INT_ENABLE0, adapter->slow_intr_mask);
1749 t3_read_reg(adapter, A_PL_INT_ENABLE0); /* flush */
1753 * t3_intr_disable - disable a card's interrupts
1754 * @adapter: the adapter whose interrupts should be disabled
1756 * Disable interrupts. We only disable the top-level interrupt
1757 * concentrator and the SGE data interrupts.
1759 void t3_intr_disable(struct adapter *adapter)
1761 t3_write_reg(adapter, A_PL_INT_ENABLE0, 0);
1762 t3_read_reg(adapter, A_PL_INT_ENABLE0); /* flush */
1763 adapter->slow_intr_mask = 0;
1767 * t3_intr_clear - clear all interrupts
1768 * @adapter: the adapter whose interrupts should be cleared
1770 * Clears all interrupts.
1772 void t3_intr_clear(struct adapter *adapter)
1774 static const unsigned int cause_reg_addr[] = {
1776 A_SG_RSPQ_FL_STATUS,
1779 A_MC7_INT_CAUSE - MC7_PMRX_BASE_ADDR + MC7_PMTX_BASE_ADDR,
1780 A_MC7_INT_CAUSE - MC7_PMRX_BASE_ADDR + MC7_CM_BASE_ADDR,
1781 A_CIM_HOST_INT_CAUSE,
1794 /* Clear PHY and MAC interrupts for each port. */
1795 for_each_port(adapter, i)
1796 t3_port_intr_clear(adapter, i);
1798 for (i = 0; i < ARRAY_SIZE(cause_reg_addr); ++i)
1799 t3_write_reg(adapter, cause_reg_addr[i], 0xffffffff);
1801 t3_write_reg(adapter, A_PL_INT_CAUSE0, 0xffffffff);
1802 t3_read_reg(adapter, A_PL_INT_CAUSE0); /* flush */
1806 * t3_port_intr_enable - enable port-specific interrupts
1807 * @adapter: associated adapter
1808 * @idx: index of port whose interrupts should be enabled
1810 * Enable port-specific (i.e., MAC and PHY) interrupts for the given
1813 void t3_port_intr_enable(struct adapter *adapter, int idx)
1815 struct cphy *phy = &adap2pinfo(adapter, idx)->phy;
1817 t3_write_reg(adapter, XGM_REG(A_XGM_INT_ENABLE, idx), XGM_INTR_MASK);
1818 t3_read_reg(adapter, XGM_REG(A_XGM_INT_ENABLE, idx)); /* flush */
1819 phy->ops->intr_enable(phy);
1823 * t3_port_intr_disable - disable port-specific interrupts
1824 * @adapter: associated adapter
1825 * @idx: index of port whose interrupts should be disabled
1827 * Disable port-specific (i.e., MAC and PHY) interrupts for the given
1830 void t3_port_intr_disable(struct adapter *adapter, int idx)
1832 struct cphy *phy = &adap2pinfo(adapter, idx)->phy;
1834 t3_write_reg(adapter, XGM_REG(A_XGM_INT_ENABLE, idx), 0);
1835 t3_read_reg(adapter, XGM_REG(A_XGM_INT_ENABLE, idx)); /* flush */
1836 phy->ops->intr_disable(phy);
1840 * t3_port_intr_clear - clear port-specific interrupts
1841 * @adapter: associated adapter
1842 * @idx: index of port whose interrupts to clear
1844 * Clear port-specific (i.e., MAC and PHY) interrupts for the given
1847 void t3_port_intr_clear(struct adapter *adapter, int idx)
1849 struct cphy *phy = &adap2pinfo(adapter, idx)->phy;
1851 t3_write_reg(adapter, XGM_REG(A_XGM_INT_CAUSE, idx), 0xffffffff);
1852 t3_read_reg(adapter, XGM_REG(A_XGM_INT_CAUSE, idx)); /* flush */
1853 phy->ops->intr_clear(phy);
1857 * t3_sge_write_context - write an SGE context
1858 * @adapter: the adapter
1859 * @id: the context id
1860 * @type: the context type
1862 * Program an SGE context with the values already loaded in the
1863 * CONTEXT_DATA? registers.
1865 static int t3_sge_write_context(struct adapter *adapter, unsigned int id,
1868 t3_write_reg(adapter, A_SG_CONTEXT_MASK0, 0xffffffff);
1869 t3_write_reg(adapter, A_SG_CONTEXT_MASK1, 0xffffffff);
1870 t3_write_reg(adapter, A_SG_CONTEXT_MASK2, 0xffffffff);
1871 t3_write_reg(adapter, A_SG_CONTEXT_MASK3, 0xffffffff);
1872 t3_write_reg(adapter, A_SG_CONTEXT_CMD,
1873 V_CONTEXT_CMD_OPCODE(1) | type | V_CONTEXT(id));
1874 return t3_wait_op_done(adapter, A_SG_CONTEXT_CMD, F_CONTEXT_CMD_BUSY,
1879 * t3_sge_init_ecntxt - initialize an SGE egress context
1880 * @adapter: the adapter to configure
1881 * @id: the context id
1882 * @gts_enable: whether to enable GTS for the context
1883 * @type: the egress context type
1884 * @respq: associated response queue
1885 * @base_addr: base address of queue
1886 * @size: number of queue entries
1888 * @gen: initial generation value for the context
1889 * @cidx: consumer pointer
1891 * Initialize an SGE egress context and make it ready for use. If the
1892 * platform allows concurrent context operations, the caller is
1893 * responsible for appropriate locking.
1895 int t3_sge_init_ecntxt(struct adapter *adapter, unsigned int id, int gts_enable,
1896 enum sge_context_type type, int respq, u64 base_addr,
1897 unsigned int size, unsigned int token, int gen,
1900 unsigned int credits = type == SGE_CNTXT_OFLD ? 0 : FW_WR_NUM;
1902 if (base_addr & 0xfff) /* must be 4K aligned */
1904 if (t3_read_reg(adapter, A_SG_CONTEXT_CMD) & F_CONTEXT_CMD_BUSY)
1908 t3_write_reg(adapter, A_SG_CONTEXT_DATA0, V_EC_INDEX(cidx) |
1909 V_EC_CREDITS(credits) | V_EC_GTS(gts_enable));
1910 t3_write_reg(adapter, A_SG_CONTEXT_DATA1, V_EC_SIZE(size) |
1911 V_EC_BASE_LO(base_addr & 0xffff));
1913 t3_write_reg(adapter, A_SG_CONTEXT_DATA2, base_addr);
1915 t3_write_reg(adapter, A_SG_CONTEXT_DATA3,
1916 V_EC_BASE_HI(base_addr & 0xf) | V_EC_RESPQ(respq) |
1917 V_EC_TYPE(type) | V_EC_GEN(gen) | V_EC_UP_TOKEN(token) |
1919 return t3_sge_write_context(adapter, id, F_EGRESS);
1923 * t3_sge_init_flcntxt - initialize an SGE free-buffer list context
1924 * @adapter: the adapter to configure
1925 * @id: the context id
1926 * @gts_enable: whether to enable GTS for the context
1927 * @base_addr: base address of queue
1928 * @size: number of queue entries
1929 * @bsize: size of each buffer for this queue
1930 * @cong_thres: threshold to signal congestion to upstream producers
1931 * @gen: initial generation value for the context
1932 * @cidx: consumer pointer
1934 * Initialize an SGE free list context and make it ready for use. The
1935 * caller is responsible for ensuring only one context operation occurs
1938 int t3_sge_init_flcntxt(struct adapter *adapter, unsigned int id,
1939 int gts_enable, u64 base_addr, unsigned int size,
1940 unsigned int bsize, unsigned int cong_thres, int gen,
1943 if (base_addr & 0xfff) /* must be 4K aligned */
1945 if (t3_read_reg(adapter, A_SG_CONTEXT_CMD) & F_CONTEXT_CMD_BUSY)
1949 t3_write_reg(adapter, A_SG_CONTEXT_DATA0, base_addr);
1951 t3_write_reg(adapter, A_SG_CONTEXT_DATA1,
1952 V_FL_BASE_HI((u32) base_addr) |
1953 V_FL_INDEX_LO(cidx & M_FL_INDEX_LO));
1954 t3_write_reg(adapter, A_SG_CONTEXT_DATA2, V_FL_SIZE(size) |
1955 V_FL_GEN(gen) | V_FL_INDEX_HI(cidx >> 12) |
1956 V_FL_ENTRY_SIZE_LO(bsize & M_FL_ENTRY_SIZE_LO));
1957 t3_write_reg(adapter, A_SG_CONTEXT_DATA3,
1958 V_FL_ENTRY_SIZE_HI(bsize >> (32 - S_FL_ENTRY_SIZE_LO)) |
1959 V_FL_CONG_THRES(cong_thres) | V_FL_GTS(gts_enable));
1960 return t3_sge_write_context(adapter, id, F_FREELIST);
1964 * t3_sge_init_rspcntxt - initialize an SGE response queue context
1965 * @adapter: the adapter to configure
1966 * @id: the context id
1967 * @irq_vec_idx: MSI-X interrupt vector index, 0 if no MSI-X, -1 if no IRQ
1968 * @base_addr: base address of queue
1969 * @size: number of queue entries
1970 * @fl_thres: threshold for selecting the normal or jumbo free list
1971 * @gen: initial generation value for the context
1972 * @cidx: consumer pointer
1974 * Initialize an SGE response queue context and make it ready for use.
1975 * The caller is responsible for ensuring only one context operation
1978 int t3_sge_init_rspcntxt(struct adapter *adapter, unsigned int id,
1979 int irq_vec_idx, u64 base_addr, unsigned int size,
1980 unsigned int fl_thres, int gen, unsigned int cidx)
1982 unsigned int intr = 0;
1984 if (base_addr & 0xfff) /* must be 4K aligned */
1986 if (t3_read_reg(adapter, A_SG_CONTEXT_CMD) & F_CONTEXT_CMD_BUSY)
1990 t3_write_reg(adapter, A_SG_CONTEXT_DATA0, V_CQ_SIZE(size) |
1992 t3_write_reg(adapter, A_SG_CONTEXT_DATA1, base_addr);
1994 if (irq_vec_idx >= 0)
1995 intr = V_RQ_MSI_VEC(irq_vec_idx) | F_RQ_INTR_EN;
1996 t3_write_reg(adapter, A_SG_CONTEXT_DATA2,
1997 V_CQ_BASE_HI((u32) base_addr) | intr | V_RQ_GEN(gen));
1998 t3_write_reg(adapter, A_SG_CONTEXT_DATA3, fl_thres);
1999 return t3_sge_write_context(adapter, id, F_RESPONSEQ);
2003 * t3_sge_init_cqcntxt - initialize an SGE completion queue context
2004 * @adapter: the adapter to configure
2005 * @id: the context id
2006 * @base_addr: base address of queue
2007 * @size: number of queue entries
2008 * @rspq: response queue for async notifications
2009 * @ovfl_mode: CQ overflow mode
2010 * @credits: completion queue credits
2011 * @credit_thres: the credit threshold
2013 * Initialize an SGE completion queue context and make it ready for use.
2014 * The caller is responsible for ensuring only one context operation
2017 int t3_sge_init_cqcntxt(struct adapter *adapter, unsigned int id, u64 base_addr,
2018 unsigned int size, int rspq, int ovfl_mode,
2019 unsigned int credits, unsigned int credit_thres)
2021 if (base_addr & 0xfff) /* must be 4K aligned */
2023 if (t3_read_reg(adapter, A_SG_CONTEXT_CMD) & F_CONTEXT_CMD_BUSY)
2027 t3_write_reg(adapter, A_SG_CONTEXT_DATA0, V_CQ_SIZE(size));
2028 t3_write_reg(adapter, A_SG_CONTEXT_DATA1, base_addr);
2030 t3_write_reg(adapter, A_SG_CONTEXT_DATA2,
2031 V_CQ_BASE_HI((u32) base_addr) | V_CQ_RSPQ(rspq) |
2032 V_CQ_GEN(1) | V_CQ_OVERFLOW_MODE(ovfl_mode));
2033 t3_write_reg(adapter, A_SG_CONTEXT_DATA3, V_CQ_CREDITS(credits) |
2034 V_CQ_CREDIT_THRES(credit_thres));
2035 return t3_sge_write_context(adapter, id, F_CQ);
2039 * t3_sge_enable_ecntxt - enable/disable an SGE egress context
2040 * @adapter: the adapter
2041 * @id: the egress context id
2042 * @enable: enable (1) or disable (0) the context
2044 * Enable or disable an SGE egress context. The caller is responsible for
2045 * ensuring only one context operation occurs at a time.
2047 int t3_sge_enable_ecntxt(struct adapter *adapter, unsigned int id, int enable)
2049 if (t3_read_reg(adapter, A_SG_CONTEXT_CMD) & F_CONTEXT_CMD_BUSY)
2052 t3_write_reg(adapter, A_SG_CONTEXT_MASK0, 0);
2053 t3_write_reg(adapter, A_SG_CONTEXT_MASK1, 0);
2054 t3_write_reg(adapter, A_SG_CONTEXT_MASK2, 0);
2055 t3_write_reg(adapter, A_SG_CONTEXT_MASK3, F_EC_VALID);
2056 t3_write_reg(adapter, A_SG_CONTEXT_DATA3, V_EC_VALID(enable));
2057 t3_write_reg(adapter, A_SG_CONTEXT_CMD,
2058 V_CONTEXT_CMD_OPCODE(1) | F_EGRESS | V_CONTEXT(id));
2059 return t3_wait_op_done(adapter, A_SG_CONTEXT_CMD, F_CONTEXT_CMD_BUSY,
2064 * t3_sge_disable_fl - disable an SGE free-buffer list
2065 * @adapter: the adapter
2066 * @id: the free list context id
2068 * Disable an SGE free-buffer list. The caller is responsible for
2069 * ensuring only one context operation occurs at a time.
2071 int t3_sge_disable_fl(struct adapter *adapter, unsigned int id)
2073 if (t3_read_reg(adapter, A_SG_CONTEXT_CMD) & F_CONTEXT_CMD_BUSY)
2076 t3_write_reg(adapter, A_SG_CONTEXT_MASK0, 0);
2077 t3_write_reg(adapter, A_SG_CONTEXT_MASK1, 0);
2078 t3_write_reg(adapter, A_SG_CONTEXT_MASK2, V_FL_SIZE(M_FL_SIZE));
2079 t3_write_reg(adapter, A_SG_CONTEXT_MASK3, 0);
2080 t3_write_reg(adapter, A_SG_CONTEXT_DATA2, 0);
2081 t3_write_reg(adapter, A_SG_CONTEXT_CMD,
2082 V_CONTEXT_CMD_OPCODE(1) | F_FREELIST | V_CONTEXT(id));
2083 return t3_wait_op_done(adapter, A_SG_CONTEXT_CMD, F_CONTEXT_CMD_BUSY,
2088 * t3_sge_disable_rspcntxt - disable an SGE response queue
2089 * @adapter: the adapter
2090 * @id: the response queue context id
2092 * Disable an SGE response queue. The caller is responsible for
2093 * ensuring only one context operation occurs at a time.
2095 int t3_sge_disable_rspcntxt(struct adapter *adapter, unsigned int id)
2097 if (t3_read_reg(adapter, A_SG_CONTEXT_CMD) & F_CONTEXT_CMD_BUSY)
2100 t3_write_reg(adapter, A_SG_CONTEXT_MASK0, V_CQ_SIZE(M_CQ_SIZE));
2101 t3_write_reg(adapter, A_SG_CONTEXT_MASK1, 0);
2102 t3_write_reg(adapter, A_SG_CONTEXT_MASK2, 0);
2103 t3_write_reg(adapter, A_SG_CONTEXT_MASK3, 0);
2104 t3_write_reg(adapter, A_SG_CONTEXT_DATA0, 0);
2105 t3_write_reg(adapter, A_SG_CONTEXT_CMD,
2106 V_CONTEXT_CMD_OPCODE(1) | F_RESPONSEQ | V_CONTEXT(id));
2107 return t3_wait_op_done(adapter, A_SG_CONTEXT_CMD, F_CONTEXT_CMD_BUSY,
2112 * t3_sge_disable_cqcntxt - disable an SGE completion queue
2113 * @adapter: the adapter
2114 * @id: the completion queue context id
2116 * Disable an SGE completion queue. The caller is responsible for
2117 * ensuring only one context operation occurs at a time.
2119 int t3_sge_disable_cqcntxt(struct adapter *adapter, unsigned int id)
2121 if (t3_read_reg(adapter, A_SG_CONTEXT_CMD) & F_CONTEXT_CMD_BUSY)
2124 t3_write_reg(adapter, A_SG_CONTEXT_MASK0, V_CQ_SIZE(M_CQ_SIZE));
2125 t3_write_reg(adapter, A_SG_CONTEXT_MASK1, 0);
2126 t3_write_reg(adapter, A_SG_CONTEXT_MASK2, 0);
2127 t3_write_reg(adapter, A_SG_CONTEXT_MASK3, 0);
2128 t3_write_reg(adapter, A_SG_CONTEXT_DATA0, 0);
2129 t3_write_reg(adapter, A_SG_CONTEXT_CMD,
2130 V_CONTEXT_CMD_OPCODE(1) | F_CQ | V_CONTEXT(id));
2131 return t3_wait_op_done(adapter, A_SG_CONTEXT_CMD, F_CONTEXT_CMD_BUSY,
2136 * t3_sge_cqcntxt_op - perform an operation on a completion queue context
2137 * @adapter: the adapter
2138 * @id: the context id
2139 * @op: the operation to perform
2141 * Perform the selected operation on an SGE completion queue context.
2142 * The caller is responsible for ensuring only one context operation
2145 int t3_sge_cqcntxt_op(struct adapter *adapter, unsigned int id, unsigned int op,
2146 unsigned int credits)
2150 if (t3_read_reg(adapter, A_SG_CONTEXT_CMD) & F_CONTEXT_CMD_BUSY)
2153 t3_write_reg(adapter, A_SG_CONTEXT_DATA0, credits << 16);
2154 t3_write_reg(adapter, A_SG_CONTEXT_CMD, V_CONTEXT_CMD_OPCODE(op) |
2155 V_CONTEXT(id) | F_CQ);
2156 if (t3_wait_op_done_val(adapter, A_SG_CONTEXT_CMD, F_CONTEXT_CMD_BUSY,
2160 if (op >= 2 && op < 7) {
2161 if (adapter->params.rev > 0)
2162 return G_CQ_INDEX(val);
2164 t3_write_reg(adapter, A_SG_CONTEXT_CMD,
2165 V_CONTEXT_CMD_OPCODE(0) | F_CQ | V_CONTEXT(id));
2166 if (t3_wait_op_done(adapter, A_SG_CONTEXT_CMD,
2167 F_CONTEXT_CMD_BUSY, 0, 5, 1))
2169 return G_CQ_INDEX(t3_read_reg(adapter, A_SG_CONTEXT_DATA0));
2175 * t3_sge_read_context - read an SGE context
2176 * @type: the context type
2177 * @adapter: the adapter
2178 * @id: the context id
2179 * @data: holds the retrieved context
2181 * Read an SGE egress context. The caller is responsible for ensuring
2182 * only one context operation occurs at a time.
2184 static int t3_sge_read_context(unsigned int type, struct adapter *adapter,
2185 unsigned int id, u32 data[4])
2187 if (t3_read_reg(adapter, A_SG_CONTEXT_CMD) & F_CONTEXT_CMD_BUSY)
2190 t3_write_reg(adapter, A_SG_CONTEXT_CMD,
2191 V_CONTEXT_CMD_OPCODE(0) | type | V_CONTEXT(id));
2192 if (t3_wait_op_done(adapter, A_SG_CONTEXT_CMD, F_CONTEXT_CMD_BUSY, 0,
2195 data[0] = t3_read_reg(adapter, A_SG_CONTEXT_DATA0);
2196 data[1] = t3_read_reg(adapter, A_SG_CONTEXT_DATA1);
2197 data[2] = t3_read_reg(adapter, A_SG_CONTEXT_DATA2);
2198 data[3] = t3_read_reg(adapter, A_SG_CONTEXT_DATA3);
2203 * t3_sge_read_ecntxt - read an SGE egress context
2204 * @adapter: the adapter
2205 * @id: the context id
2206 * @data: holds the retrieved context
2208 * Read an SGE egress context. The caller is responsible for ensuring
2209 * only one context operation occurs at a time.
2211 int t3_sge_read_ecntxt(struct adapter *adapter, unsigned int id, u32 data[4])
2215 return t3_sge_read_context(F_EGRESS, adapter, id, data);
2219 * t3_sge_read_cq - read an SGE CQ context
2220 * @adapter: the adapter
2221 * @id: the context id
2222 * @data: holds the retrieved context
2224 * Read an SGE CQ context. The caller is responsible for ensuring
2225 * only one context operation occurs at a time.
2227 int t3_sge_read_cq(struct adapter *adapter, unsigned int id, u32 data[4])
2231 return t3_sge_read_context(F_CQ, adapter, id, data);
2235 * t3_sge_read_fl - read an SGE free-list context
2236 * @adapter: the adapter
2237 * @id: the context id
2238 * @data: holds the retrieved context
2240 * Read an SGE free-list context. The caller is responsible for ensuring
2241 * only one context operation occurs at a time.
2243 int t3_sge_read_fl(struct adapter *adapter, unsigned int id, u32 data[4])
2245 if (id >= SGE_QSETS * 2)
2247 return t3_sge_read_context(F_FREELIST, adapter, id, data);
2251 * t3_sge_read_rspq - read an SGE response queue context
2252 * @adapter: the adapter
2253 * @id: the context id
2254 * @data: holds the retrieved context
2256 * Read an SGE response queue context. The caller is responsible for
2257 * ensuring only one context operation occurs at a time.
2259 int t3_sge_read_rspq(struct adapter *adapter, unsigned int id, u32 data[4])
2261 if (id >= SGE_QSETS)
2263 return t3_sge_read_context(F_RESPONSEQ, adapter, id, data);
2267 * t3_config_rss - configure Rx packet steering
2268 * @adapter: the adapter
2269 * @rss_config: RSS settings (written to TP_RSS_CONFIG)
2270 * @cpus: values for the CPU lookup table (0xff terminated)
2271 * @rspq: values for the response queue lookup table (0xffff terminated)
2273 * Programs the receive packet steering logic. @cpus and @rspq provide
2274 * the values for the CPU and response queue lookup tables. If they
2275 * provide fewer values than the size of the tables the supplied values
2276 * are used repeatedly until the tables are fully populated.
2278 void t3_config_rss(struct adapter *adapter, unsigned int rss_config,
2279 const u8 * cpus, const u16 *rspq)
2281 int i, j, cpu_idx = 0, q_idx = 0;
2284 for (i = 0; i < RSS_TABLE_SIZE; ++i) {
2287 for (j = 0; j < 2; ++j) {
2288 val |= (cpus[cpu_idx++] & 0x3f) << (8 * j);
2289 if (cpus[cpu_idx] == 0xff)
2292 t3_write_reg(adapter, A_TP_RSS_LKP_TABLE, val);
2296 for (i = 0; i < RSS_TABLE_SIZE; ++i) {
2297 t3_write_reg(adapter, A_TP_RSS_MAP_TABLE,
2298 (i << 16) | rspq[q_idx++]);
2299 if (rspq[q_idx] == 0xffff)
2303 t3_write_reg(adapter, A_TP_RSS_CONFIG, rss_config);
2307 * t3_read_rss - read the contents of the RSS tables
2308 * @adapter: the adapter
2309 * @lkup: holds the contents of the RSS lookup table
2310 * @map: holds the contents of the RSS map table
2312 * Reads the contents of the receive packet steering tables.
2314 int t3_read_rss(struct adapter *adapter, u8 * lkup, u16 *map)
2320 for (i = 0; i < RSS_TABLE_SIZE; ++i) {
2321 t3_write_reg(adapter, A_TP_RSS_LKP_TABLE,
2323 val = t3_read_reg(adapter, A_TP_RSS_LKP_TABLE);
2324 if (!(val & 0x80000000))
2327 *lkup++ = (val >> 8);
2331 for (i = 0; i < RSS_TABLE_SIZE; ++i) {
2332 t3_write_reg(adapter, A_TP_RSS_MAP_TABLE,
2334 val = t3_read_reg(adapter, A_TP_RSS_MAP_TABLE);
2335 if (!(val & 0x80000000))
2343 * t3_tp_set_offload_mode - put TP in NIC/offload mode
2344 * @adap: the adapter
2345 * @enable: 1 to select offload mode, 0 for regular NIC
2347 * Switches TP to NIC/offload mode.
2349 void t3_tp_set_offload_mode(struct adapter *adap, int enable)
2351 if (is_offload(adap) || !enable)
2352 t3_set_reg_field(adap, A_TP_IN_CONFIG, F_NICMODE,
2353 V_NICMODE(!enable));
2357 * pm_num_pages - calculate the number of pages of the payload memory
2358 * @mem_size: the size of the payload memory
2359 * @pg_size: the size of each payload memory page
2361 * Calculate the number of pages, each of the given size, that fit in a
2362 * memory of the specified size, respecting the HW requirement that the
2363 * number of pages must be a multiple of 24.
2365 static inline unsigned int pm_num_pages(unsigned int mem_size,
2366 unsigned int pg_size)
2368 unsigned int n = mem_size / pg_size;
2373 #define mem_region(adap, start, size, reg) \
2374 t3_write_reg((adap), A_ ## reg, (start)); \
2378 * partition_mem - partition memory and configure TP memory settings
2379 * @adap: the adapter
2380 * @p: the TP parameters
2382 * Partitions context and payload memory and configures TP's memory
2385 static void partition_mem(struct adapter *adap, const struct tp_params *p)
2387 unsigned int m, pstructs, tids = t3_mc5_size(&adap->mc5);
2388 unsigned int timers = 0, timers_shift = 22;
2390 if (adap->params.rev > 0) {
2391 if (tids <= 16 * 1024) {
2394 } else if (tids <= 64 * 1024) {
2397 } else if (tids <= 256 * 1024) {
2403 t3_write_reg(adap, A_TP_PMM_SIZE,
2404 p->chan_rx_size | (p->chan_tx_size >> 16));
2406 t3_write_reg(adap, A_TP_PMM_TX_BASE, 0);
2407 t3_write_reg(adap, A_TP_PMM_TX_PAGE_SIZE, p->tx_pg_size);
2408 t3_write_reg(adap, A_TP_PMM_TX_MAX_PAGE, p->tx_num_pgs);
2409 t3_set_reg_field(adap, A_TP_PARA_REG3, V_TXDATAACKIDX(M_TXDATAACKIDX),
2410 V_TXDATAACKIDX(fls(p->tx_pg_size) - 12));
2412 t3_write_reg(adap, A_TP_PMM_RX_BASE, 0);
2413 t3_write_reg(adap, A_TP_PMM_RX_PAGE_SIZE, p->rx_pg_size);
2414 t3_write_reg(adap, A_TP_PMM_RX_MAX_PAGE, p->rx_num_pgs);
2416 pstructs = p->rx_num_pgs + p->tx_num_pgs;
2417 /* Add a bit of headroom and make multiple of 24 */
2419 pstructs -= pstructs % 24;
2420 t3_write_reg(adap, A_TP_CMM_MM_MAX_PSTRUCT, pstructs);
2422 m = tids * TCB_SIZE;
2423 mem_region(adap, m, (64 << 10) * 64, SG_EGR_CNTX_BADDR);
2424 mem_region(adap, m, (64 << 10) * 64, SG_CQ_CONTEXT_BADDR);
2425 t3_write_reg(adap, A_TP_CMM_TIMER_BASE, V_CMTIMERMAXNUM(timers) | m);
2426 m += ((p->ntimer_qs - 1) << timers_shift) + (1 << 22);
2427 mem_region(adap, m, pstructs * 64, TP_CMM_MM_BASE);
2428 mem_region(adap, m, 64 * (pstructs / 24), TP_CMM_MM_PS_FLST_BASE);
2429 mem_region(adap, m, 64 * (p->rx_num_pgs / 24), TP_CMM_MM_RX_FLST_BASE);
2430 mem_region(adap, m, 64 * (p->tx_num_pgs / 24), TP_CMM_MM_TX_FLST_BASE);
2432 m = (m + 4095) & ~0xfff;
2433 t3_write_reg(adap, A_CIM_SDRAM_BASE_ADDR, m);
2434 t3_write_reg(adap, A_CIM_SDRAM_ADDR_SIZE, p->cm_size - m);
2436 tids = (p->cm_size - m - (3 << 20)) / 3072 - 32;
2437 m = t3_mc5_size(&adap->mc5) - adap->params.mc5.nservers -
2438 adap->params.mc5.nfilters - adap->params.mc5.nroutes;
2440 adap->params.mc5.nservers += m - tids;
2443 static inline void tp_wr_indirect(struct adapter *adap, unsigned int addr,
2446 t3_write_reg(adap, A_TP_PIO_ADDR, addr);
2447 t3_write_reg(adap, A_TP_PIO_DATA, val);
2450 static void tp_config(struct adapter *adap, const struct tp_params *p)
2452 t3_write_reg(adap, A_TP_GLOBAL_CONFIG, F_TXPACINGENABLE | F_PATHMTU |
2453 F_IPCHECKSUMOFFLOAD | F_UDPCHECKSUMOFFLOAD |
2454 F_TCPCHECKSUMOFFLOAD | V_IPTTL(64));
2455 t3_write_reg(adap, A_TP_TCP_OPTIONS, V_MTUDEFAULT(576) |
2456 F_MTUENABLE | V_WINDOWSCALEMODE(1) |
2457 V_TIMESTAMPSMODE(0) | V_SACKMODE(1) | V_SACKRX(1));
2458 t3_write_reg(adap, A_TP_DACK_CONFIG, V_AUTOSTATE3(1) |
2459 V_AUTOSTATE2(1) | V_AUTOSTATE1(0) |
2460 V_BYTETHRESHOLD(16384) | V_MSSTHRESHOLD(2) |
2461 F_AUTOCAREFUL | F_AUTOENABLE | V_DACK_MODE(1));
2462 t3_set_reg_field(adap, A_TP_IN_CONFIG, F_IPV6ENABLE | F_NICMODE,
2463 F_IPV6ENABLE | F_NICMODE);
2464 t3_write_reg(adap, A_TP_TX_RESOURCE_LIMIT, 0x18141814);
2465 t3_write_reg(adap, A_TP_PARA_REG4, 0x5050105);
2466 t3_set_reg_field(adap, A_TP_PARA_REG6, 0,
2467 adap->params.rev > 0 ? F_ENABLEESND :
2470 t3_set_reg_field(adap, A_TP_PC_CONFIG,
2472 F_ENABLEOCSPIFULL |F_TXDEFERENABLE | F_HEARBEATDACK |
2473 F_TXCONGESTIONMODE | F_RXCONGESTIONMODE);
2474 t3_set_reg_field(adap, A_TP_PC_CONFIG2, F_CHDRAFULL, 0);
2475 t3_write_reg(adap, A_TP_PROXY_FLOW_CNTL, 1080);
2476 t3_write_reg(adap, A_TP_PROXY_FLOW_CNTL, 1000);
2478 if (adap->params.rev > 0) {
2479 tp_wr_indirect(adap, A_TP_EGRESS_CONFIG, F_REWRITEFORCETOSIZE);
2480 t3_set_reg_field(adap, A_TP_PARA_REG3, F_TXPACEAUTO,
2482 t3_set_reg_field(adap, A_TP_PC_CONFIG, F_LOCKTID, F_LOCKTID);
2483 t3_set_reg_field(adap, A_TP_PARA_REG3, 0, F_TXPACEAUTOSTRICT);
2485 t3_set_reg_field(adap, A_TP_PARA_REG3, 0, F_TXPACEFIXED);
2487 t3_write_reg(adap, A_TP_TX_MOD_QUEUE_WEIGHT1, 0);
2488 t3_write_reg(adap, A_TP_TX_MOD_QUEUE_WEIGHT0, 0);
2489 t3_write_reg(adap, A_TP_MOD_CHANNEL_WEIGHT, 0);
2490 t3_write_reg(adap, A_TP_MOD_RATE_LIMIT, 0xf2200000);
2493 /* Desired TP timer resolution in usec */
2494 #define TP_TMR_RES 50
2496 /* TCP timer values in ms */
2497 #define TP_DACK_TIMER 50
2498 #define TP_RTO_MIN 250
2501 * tp_set_timers - set TP timing parameters
2502 * @adap: the adapter to set
2503 * @core_clk: the core clock frequency in Hz
2505 * Set TP's timing parameters, such as the various timer resolutions and
2506 * the TCP timer values.
2508 static void tp_set_timers(struct adapter *adap, unsigned int core_clk)
2510 unsigned int tre = fls(core_clk / (1000000 / TP_TMR_RES)) - 1;
2511 unsigned int dack_re = fls(core_clk / 5000) - 1; /* 200us */
2512 unsigned int tstamp_re = fls(core_clk / 1000); /* 1ms, at least */
2513 unsigned int tps = core_clk >> tre;
2515 t3_write_reg(adap, A_TP_TIMER_RESOLUTION, V_TIMERRESOLUTION(tre) |
2516 V_DELAYEDACKRESOLUTION(dack_re) |
2517 V_TIMESTAMPRESOLUTION(tstamp_re));
2518 t3_write_reg(adap, A_TP_DACK_TIMER,
2519 (core_clk >> dack_re) / (1000 / TP_DACK_TIMER));
2520 t3_write_reg(adap, A_TP_TCP_BACKOFF_REG0, 0x3020100);
2521 t3_write_reg(adap, A_TP_TCP_BACKOFF_REG1, 0x7060504);
2522 t3_write_reg(adap, A_TP_TCP_BACKOFF_REG2, 0xb0a0908);
2523 t3_write_reg(adap, A_TP_TCP_BACKOFF_REG3, 0xf0e0d0c);
2524 t3_write_reg(adap, A_TP_SHIFT_CNT, V_SYNSHIFTMAX(6) |
2525 V_RXTSHIFTMAXR1(4) | V_RXTSHIFTMAXR2(15) |
2526 V_PERSHIFTBACKOFFMAX(8) | V_PERSHIFTMAX(8) |
2529 #define SECONDS * tps
2531 t3_write_reg(adap, A_TP_MSL, adap->params.rev > 0 ? 0 : 2 SECONDS);
2532 t3_write_reg(adap, A_TP_RXT_MIN, tps / (1000 / TP_RTO_MIN));
2533 t3_write_reg(adap, A_TP_RXT_MAX, 64 SECONDS);
2534 t3_write_reg(adap, A_TP_PERS_MIN, 5 SECONDS);
2535 t3_write_reg(adap, A_TP_PERS_MAX, 64 SECONDS);
2536 t3_write_reg(adap, A_TP_KEEP_IDLE, 7200 SECONDS);
2537 t3_write_reg(adap, A_TP_KEEP_INTVL, 75 SECONDS);
2538 t3_write_reg(adap, A_TP_INIT_SRTT, 3 SECONDS);
2539 t3_write_reg(adap, A_TP_FINWAIT2_TIMER, 600 SECONDS);
2545 * t3_tp_set_coalescing_size - set receive coalescing size
2546 * @adap: the adapter
2547 * @size: the receive coalescing size
2548 * @psh: whether a set PSH bit should deliver coalesced data
2550 * Set the receive coalescing size and PSH bit handling.
2552 int t3_tp_set_coalescing_size(struct adapter *adap, unsigned int size, int psh)
2556 if (size > MAX_RX_COALESCING_LEN)
2559 val = t3_read_reg(adap, A_TP_PARA_REG3);
2560 val &= ~(F_RXCOALESCEENABLE | F_RXCOALESCEPSHEN);
2563 val |= F_RXCOALESCEENABLE;
2565 val |= F_RXCOALESCEPSHEN;
2566 size = min(MAX_RX_COALESCING_LEN, size);
2567 t3_write_reg(adap, A_TP_PARA_REG2, V_RXCOALESCESIZE(size) |
2568 V_MAXRXDATA(MAX_RX_COALESCING_LEN));
2570 t3_write_reg(adap, A_TP_PARA_REG3, val);
2575 * t3_tp_set_max_rxsize - set the max receive size
2576 * @adap: the adapter
2577 * @size: the max receive size
2579 * Set TP's max receive size. This is the limit that applies when
2580 * receive coalescing is disabled.
2582 void t3_tp_set_max_rxsize(struct adapter *adap, unsigned int size)
2584 t3_write_reg(adap, A_TP_PARA_REG7,
2585 V_PMMAXXFERLEN0(size) | V_PMMAXXFERLEN1(size));
2588 static void __devinit init_mtus(unsigned short mtus[])
2591 * See draft-mathis-plpmtud-00.txt for the values. The min is 88 so
2592 * it can accomodate max size TCP/IP headers when SACK and timestamps
2593 * are enabled and still have at least 8 bytes of payload.
2614 * Initial congestion control parameters.
2616 static void __devinit init_cong_ctrl(unsigned short *a, unsigned short *b)
2618 a[0] = a[1] = a[2] = a[3] = a[4] = a[5] = a[6] = a[7] = a[8] = 1;
2643 b[0] = b[1] = b[2] = b[3] = b[4] = b[5] = b[6] = b[7] = b[8] = 0;
2646 b[13] = b[14] = b[15] = b[16] = 3;
2647 b[17] = b[18] = b[19] = b[20] = b[21] = 4;
2648 b[22] = b[23] = b[24] = b[25] = b[26] = b[27] = 5;
2653 /* The minimum additive increment value for the congestion control table */
2654 #define CC_MIN_INCR 2U
2657 * t3_load_mtus - write the MTU and congestion control HW tables
2658 * @adap: the adapter
2659 * @mtus: the unrestricted values for the MTU table
2660 * @alphs: the values for the congestion control alpha parameter
2661 * @beta: the values for the congestion control beta parameter
2662 * @mtu_cap: the maximum permitted effective MTU
2664 * Write the MTU table with the supplied MTUs capping each at &mtu_cap.
2665 * Update the high-speed congestion control table with the supplied alpha,
2668 void t3_load_mtus(struct adapter *adap, unsigned short mtus[NMTUS],
2669 unsigned short alpha[NCCTRL_WIN],
2670 unsigned short beta[NCCTRL_WIN], unsigned short mtu_cap)
2672 static const unsigned int avg_pkts[NCCTRL_WIN] = {
2673 2, 6, 10, 14, 20, 28, 40, 56, 80, 112, 160, 224, 320, 448, 640,
2674 896, 1281, 1792, 2560, 3584, 5120, 7168, 10240, 14336, 20480,
2675 28672, 40960, 57344, 81920, 114688, 163840, 229376
2680 for (i = 0; i < NMTUS; ++i) {
2681 unsigned int mtu = min(mtus[i], mtu_cap);
2682 unsigned int log2 = fls(mtu);
2684 if (!(mtu & ((1 << log2) >> 2))) /* round */
2686 t3_write_reg(adap, A_TP_MTU_TABLE,
2687 (i << 24) | (log2 << 16) | mtu);
2689 for (w = 0; w < NCCTRL_WIN; ++w) {
2692 inc = max(((mtu - 40) * alpha[w]) / avg_pkts[w],
2695 t3_write_reg(adap, A_TP_CCTRL_TABLE, (i << 21) |
2696 (w << 16) | (beta[w] << 13) | inc);
2702 * t3_read_hw_mtus - returns the values in the HW MTU table
2703 * @adap: the adapter
2704 * @mtus: where to store the HW MTU values
2706 * Reads the HW MTU table.
2708 void t3_read_hw_mtus(struct adapter *adap, unsigned short mtus[NMTUS])
2712 for (i = 0; i < NMTUS; ++i) {
2715 t3_write_reg(adap, A_TP_MTU_TABLE, 0xff000000 | i);
2716 val = t3_read_reg(adap, A_TP_MTU_TABLE);
2717 mtus[i] = val & 0x3fff;
2722 * t3_get_cong_cntl_tab - reads the congestion control table
2723 * @adap: the adapter
2724 * @incr: where to store the alpha values
2726 * Reads the additive increments programmed into the HW congestion
2729 void t3_get_cong_cntl_tab(struct adapter *adap,
2730 unsigned short incr[NMTUS][NCCTRL_WIN])
2732 unsigned int mtu, w;
2734 for (mtu = 0; mtu < NMTUS; ++mtu)
2735 for (w = 0; w < NCCTRL_WIN; ++w) {
2736 t3_write_reg(adap, A_TP_CCTRL_TABLE,
2737 0xffff0000 | (mtu << 5) | w);
2738 incr[mtu][w] = t3_read_reg(adap, A_TP_CCTRL_TABLE) &
2744 * t3_tp_get_mib_stats - read TP's MIB counters
2745 * @adap: the adapter
2746 * @tps: holds the returned counter values
2748 * Returns the values of TP's MIB counters.
2750 void t3_tp_get_mib_stats(struct adapter *adap, struct tp_mib_stats *tps)
2752 t3_read_indirect(adap, A_TP_MIB_INDEX, A_TP_MIB_RDATA, (u32 *) tps,
2753 sizeof(*tps) / sizeof(u32), 0);
2756 #define ulp_region(adap, name, start, len) \
2757 t3_write_reg((adap), A_ULPRX_ ## name ## _LLIMIT, (start)); \
2758 t3_write_reg((adap), A_ULPRX_ ## name ## _ULIMIT, \
2759 (start) + (len) - 1); \
2762 #define ulptx_region(adap, name, start, len) \
2763 t3_write_reg((adap), A_ULPTX_ ## name ## _LLIMIT, (start)); \
2764 t3_write_reg((adap), A_ULPTX_ ## name ## _ULIMIT, \
2765 (start) + (len) - 1)
2767 static void ulp_config(struct adapter *adap, const struct tp_params *p)
2769 unsigned int m = p->chan_rx_size;
2771 ulp_region(adap, ISCSI, m, p->chan_rx_size / 8);
2772 ulp_region(adap, TDDP, m, p->chan_rx_size / 8);
2773 ulptx_region(adap, TPT, m, p->chan_rx_size / 4);
2774 ulp_region(adap, STAG, m, p->chan_rx_size / 4);
2775 ulp_region(adap, RQ, m, p->chan_rx_size / 4);
2776 ulptx_region(adap, PBL, m, p->chan_rx_size / 4);
2777 ulp_region(adap, PBL, m, p->chan_rx_size / 4);
2778 t3_write_reg(adap, A_ULPRX_TDDP_TAGMASK, 0xffffffff);
2782 * t3_set_proto_sram - set the contents of the protocol sram
2783 * @adapter: the adapter
2784 * @data: the protocol image
2786 * Write the contents of the protocol SRAM.
2788 int t3_set_proto_sram(struct adapter *adap, u8 *data)
2791 u32 *buf = (u32 *)data;
2793 for (i = 0; i < PROTO_SRAM_LINES; i++) {
2794 t3_write_reg(adap, A_TP_EMBED_OP_FIELD5, cpu_to_be32(*buf++));
2795 t3_write_reg(adap, A_TP_EMBED_OP_FIELD4, cpu_to_be32(*buf++));
2796 t3_write_reg(adap, A_TP_EMBED_OP_FIELD3, cpu_to_be32(*buf++));
2797 t3_write_reg(adap, A_TP_EMBED_OP_FIELD2, cpu_to_be32(*buf++));
2798 t3_write_reg(adap, A_TP_EMBED_OP_FIELD1, cpu_to_be32(*buf++));
2800 t3_write_reg(adap, A_TP_EMBED_OP_FIELD0, i << 1 | 1 << 31);
2801 if (t3_wait_op_done(adap, A_TP_EMBED_OP_FIELD0, 1, 1, 5, 1))
2804 t3_write_reg(adap, A_TP_EMBED_OP_FIELD0, 0);
2809 void t3_config_trace_filter(struct adapter *adapter,
2810 const struct trace_params *tp, int filter_index,
2811 int invert, int enable)
2813 u32 addr, key[4], mask[4];
2815 key[0] = tp->sport | (tp->sip << 16);
2816 key[1] = (tp->sip >> 16) | (tp->dport << 16);
2818 key[3] = tp->proto | (tp->vlan << 8) | (tp->intf << 20);
2820 mask[0] = tp->sport_mask | (tp->sip_mask << 16);
2821 mask[1] = (tp->sip_mask >> 16) | (tp->dport_mask << 16);
2822 mask[2] = tp->dip_mask;
2823 mask[3] = tp->proto_mask | (tp->vlan_mask << 8) | (tp->intf_mask << 20);
2826 key[3] |= (1 << 29);
2828 key[3] |= (1 << 28);
2830 addr = filter_index ? A_TP_RX_TRC_KEY0 : A_TP_TX_TRC_KEY0;
2831 tp_wr_indirect(adapter, addr++, key[0]);
2832 tp_wr_indirect(adapter, addr++, mask[0]);
2833 tp_wr_indirect(adapter, addr++, key[1]);
2834 tp_wr_indirect(adapter, addr++, mask[1]);
2835 tp_wr_indirect(adapter, addr++, key[2]);
2836 tp_wr_indirect(adapter, addr++, mask[2]);
2837 tp_wr_indirect(adapter, addr++, key[3]);
2838 tp_wr_indirect(adapter, addr, mask[3]);
2839 t3_read_reg(adapter, A_TP_PIO_DATA);
2843 * t3_config_sched - configure a HW traffic scheduler
2844 * @adap: the adapter
2845 * @kbps: target rate in Kbps
2846 * @sched: the scheduler index
2848 * Configure a HW scheduler for the target rate
2850 int t3_config_sched(struct adapter *adap, unsigned int kbps, int sched)
2852 unsigned int v, tps, cpt, bpt, delta, mindelta = ~0;
2853 unsigned int clk = adap->params.vpd.cclk * 1000;
2854 unsigned int selected_cpt = 0, selected_bpt = 0;
2857 kbps *= 125; /* -> bytes */
2858 for (cpt = 1; cpt <= 255; cpt++) {
2860 bpt = (kbps + tps / 2) / tps;
2861 if (bpt > 0 && bpt <= 255) {
2863 delta = v >= kbps ? v - kbps : kbps - v;
2864 if (delta <= mindelta) {
2869 } else if (selected_cpt)
2875 t3_write_reg(adap, A_TP_TM_PIO_ADDR,
2876 A_TP_TX_MOD_Q1_Q0_RATE_LIMIT - sched / 2);
2877 v = t3_read_reg(adap, A_TP_TM_PIO_DATA);
2879 v = (v & 0xffff) | (selected_cpt << 16) | (selected_bpt << 24);
2881 v = (v & 0xffff0000) | selected_cpt | (selected_bpt << 8);
2882 t3_write_reg(adap, A_TP_TM_PIO_DATA, v);
2886 static int tp_init(struct adapter *adap, const struct tp_params *p)
2891 t3_set_vlan_accel(adap, 3, 0);
2893 if (is_offload(adap)) {
2894 tp_set_timers(adap, adap->params.vpd.cclk * 1000);
2895 t3_write_reg(adap, A_TP_RESET, F_FLSTINITENABLE);
2896 busy = t3_wait_op_done(adap, A_TP_RESET, F_FLSTINITENABLE,
2899 CH_ERR(adap, "TP initialization timed out\n");
2903 t3_write_reg(adap, A_TP_RESET, F_TPRESET);
2907 int t3_mps_set_active_ports(struct adapter *adap, unsigned int port_mask)
2909 if (port_mask & ~((1 << adap->params.nports) - 1))
2911 t3_set_reg_field(adap, A_MPS_CFG, F_PORT1ACTIVE | F_PORT0ACTIVE,
2912 port_mask << S_PORT0ACTIVE);
2917 * Perform the bits of HW initialization that are dependent on the number
2918 * of available ports.
2920 static void init_hw_for_avail_ports(struct adapter *adap, int nports)
2925 t3_set_reg_field(adap, A_ULPRX_CTL, F_ROUND_ROBIN, 0);
2926 t3_set_reg_field(adap, A_ULPTX_CONFIG, F_CFG_RR_ARB, 0);
2927 t3_write_reg(adap, A_MPS_CFG, F_TPRXPORTEN | F_TPTXPORT0EN |
2928 F_PORT0ACTIVE | F_ENFORCEPKT);
2929 t3_write_reg(adap, A_PM1_TX_CFG, 0xffffffff);
2931 t3_set_reg_field(adap, A_ULPRX_CTL, 0, F_ROUND_ROBIN);
2932 t3_set_reg_field(adap, A_ULPTX_CONFIG, 0, F_CFG_RR_ARB);
2933 t3_write_reg(adap, A_ULPTX_DMA_WEIGHT,
2934 V_D1_WEIGHT(16) | V_D0_WEIGHT(16));
2935 t3_write_reg(adap, A_MPS_CFG, F_TPTXPORT0EN | F_TPTXPORT1EN |
2936 F_TPRXPORTEN | F_PORT0ACTIVE | F_PORT1ACTIVE |
2938 t3_write_reg(adap, A_PM1_TX_CFG, 0x80008000);
2939 t3_set_reg_field(adap, A_TP_PC_CONFIG, 0, F_TXTOSQUEUEMAPMODE);
2940 t3_write_reg(adap, A_TP_TX_MOD_QUEUE_REQ_MAP,
2941 V_TX_MOD_QUEUE_REQ_MAP(0xaa));
2942 for (i = 0; i < 16; i++)
2943 t3_write_reg(adap, A_TP_TX_MOD_QUE_TABLE,
2944 (i << 16) | 0x1010);
2948 static int calibrate_xgm(struct adapter *adapter)
2950 if (uses_xaui(adapter)) {
2953 for (i = 0; i < 5; ++i) {
2954 t3_write_reg(adapter, A_XGM_XAUI_IMP, 0);
2955 t3_read_reg(adapter, A_XGM_XAUI_IMP);
2957 v = t3_read_reg(adapter, A_XGM_XAUI_IMP);
2958 if (!(v & (F_XGM_CALFAULT | F_CALBUSY))) {
2959 t3_write_reg(adapter, A_XGM_XAUI_IMP,
2960 V_XAUIIMP(G_CALIMP(v) >> 2));
2964 CH_ERR(adapter, "MAC calibration failed\n");
2967 t3_write_reg(adapter, A_XGM_RGMII_IMP,
2968 V_RGMIIIMPPD(2) | V_RGMIIIMPPU(3));
2969 t3_set_reg_field(adapter, A_XGM_RGMII_IMP, F_XGM_IMPSETUPDATE,
2970 F_XGM_IMPSETUPDATE);
2975 static void calibrate_xgm_t3b(struct adapter *adapter)
2977 if (!uses_xaui(adapter)) {
2978 t3_write_reg(adapter, A_XGM_RGMII_IMP, F_CALRESET |
2979 F_CALUPDATE | V_RGMIIIMPPD(2) | V_RGMIIIMPPU(3));
2980 t3_set_reg_field(adapter, A_XGM_RGMII_IMP, F_CALRESET, 0);
2981 t3_set_reg_field(adapter, A_XGM_RGMII_IMP, 0,
2982 F_XGM_IMPSETUPDATE);
2983 t3_set_reg_field(adapter, A_XGM_RGMII_IMP, F_XGM_IMPSETUPDATE,
2985 t3_set_reg_field(adapter, A_XGM_RGMII_IMP, F_CALUPDATE, 0);
2986 t3_set_reg_field(adapter, A_XGM_RGMII_IMP, 0, F_CALUPDATE);
2990 struct mc7_timing_params {
2991 unsigned char ActToPreDly;
2992 unsigned char ActToRdWrDly;
2993 unsigned char PreCyc;
2994 unsigned char RefCyc[5];
2995 unsigned char BkCyc;
2996 unsigned char WrToRdDly;
2997 unsigned char RdToWrDly;
3001 * Write a value to a register and check that the write completed. These
3002 * writes normally complete in a cycle or two, so one read should suffice.
3003 * The very first read exists to flush the posted write to the device.
3005 static int wrreg_wait(struct adapter *adapter, unsigned int addr, u32 val)
3007 t3_write_reg(adapter, addr, val);
3008 t3_read_reg(adapter, addr); /* flush */
3009 if (!(t3_read_reg(adapter, addr) & F_BUSY))
3011 CH_ERR(adapter, "write to MC7 register 0x%x timed out\n", addr);
3015 static int mc7_init(struct mc7 *mc7, unsigned int mc7_clock, int mem_type)
3017 static const unsigned int mc7_mode[] = {
3018 0x632, 0x642, 0x652, 0x432, 0x442
3020 static const struct mc7_timing_params mc7_timings[] = {
3021 {12, 3, 4, {20, 28, 34, 52, 0}, 15, 6, 4},
3022 {12, 4, 5, {20, 28, 34, 52, 0}, 16, 7, 4},
3023 {12, 5, 6, {20, 28, 34, 52, 0}, 17, 8, 4},
3024 {9, 3, 4, {15, 21, 26, 39, 0}, 12, 6, 4},
3025 {9, 4, 5, {15, 21, 26, 39, 0}, 13, 7, 4}
3029 unsigned int width, density, slow, attempts;
3030 struct adapter *adapter = mc7->adapter;
3031 const struct mc7_timing_params *p = &mc7_timings[mem_type];
3036 val = t3_read_reg(adapter, mc7->offset + A_MC7_CFG);
3037 slow = val & F_SLOW;
3038 width = G_WIDTH(val);
3039 density = G_DEN(val);
3041 t3_write_reg(adapter, mc7->offset + A_MC7_CFG, val | F_IFEN);
3042 val = t3_read_reg(adapter, mc7->offset + A_MC7_CFG); /* flush */
3046 t3_write_reg(adapter, mc7->offset + A_MC7_CAL, F_SGL_CAL_EN);
3047 t3_read_reg(adapter, mc7->offset + A_MC7_CAL);
3049 if (t3_read_reg(adapter, mc7->offset + A_MC7_CAL) &
3050 (F_BUSY | F_SGL_CAL_EN | F_CAL_FAULT)) {
3051 CH_ERR(adapter, "%s MC7 calibration timed out\n",
3057 t3_write_reg(adapter, mc7->offset + A_MC7_PARM,
3058 V_ACTTOPREDLY(p->ActToPreDly) |
3059 V_ACTTORDWRDLY(p->ActToRdWrDly) | V_PRECYC(p->PreCyc) |
3060 V_REFCYC(p->RefCyc[density]) | V_BKCYC(p->BkCyc) |
3061 V_WRTORDDLY(p->WrToRdDly) | V_RDTOWRDLY(p->RdToWrDly));
3063 t3_write_reg(adapter, mc7->offset + A_MC7_CFG,
3064 val | F_CLKEN | F_TERM150);
3065 t3_read_reg(adapter, mc7->offset + A_MC7_CFG); /* flush */
3068 t3_set_reg_field(adapter, mc7->offset + A_MC7_DLL, F_DLLENB,
3073 if (wrreg_wait(adapter, mc7->offset + A_MC7_PRE, 0) ||
3074 wrreg_wait(adapter, mc7->offset + A_MC7_EXT_MODE2, 0) ||
3075 wrreg_wait(adapter, mc7->offset + A_MC7_EXT_MODE3, 0) ||
3076 wrreg_wait(adapter, mc7->offset + A_MC7_EXT_MODE1, val))
3080 t3_write_reg(adapter, mc7->offset + A_MC7_MODE, 0x100);
3081 t3_set_reg_field(adapter, mc7->offset + A_MC7_DLL, F_DLLRST, 0);
3085 if (wrreg_wait(adapter, mc7->offset + A_MC7_PRE, 0) ||
3086 wrreg_wait(adapter, mc7->offset + A_MC7_REF, 0) ||
3087 wrreg_wait(adapter, mc7->offset + A_MC7_REF, 0) ||
3088 wrreg_wait(adapter, mc7->offset + A_MC7_MODE,
3089 mc7_mode[mem_type]) ||
3090 wrreg_wait(adapter, mc7->offset + A_MC7_EXT_MODE1, val | 0x380) ||
3091 wrreg_wait(adapter, mc7->offset + A_MC7_EXT_MODE1, val))
3094 /* clock value is in KHz */
3095 mc7_clock = mc7_clock * 7812 + mc7_clock / 2; /* ns */
3096 mc7_clock /= 1000000; /* KHz->MHz, ns->us */
3098 t3_write_reg(adapter, mc7->offset + A_MC7_REF,
3099 F_PERREFEN | V_PREREFDIV(mc7_clock));
3100 t3_read_reg(adapter, mc7->offset + A_MC7_REF); /* flush */
3102 t3_write_reg(adapter, mc7->offset + A_MC7_ECC, F_ECCGENEN | F_ECCCHKEN);
3103 t3_write_reg(adapter, mc7->offset + A_MC7_BIST_DATA, 0);
3104 t3_write_reg(adapter, mc7->offset + A_MC7_BIST_ADDR_BEG, 0);
3105 t3_write_reg(adapter, mc7->offset + A_MC7_BIST_ADDR_END,
3106 (mc7->size << width) - 1);
3107 t3_write_reg(adapter, mc7->offset + A_MC7_BIST_OP, V_OP(1));
3108 t3_read_reg(adapter, mc7->offset + A_MC7_BIST_OP); /* flush */
3113 val = t3_read_reg(adapter, mc7->offset + A_MC7_BIST_OP);
3114 } while ((val & F_BUSY) && --attempts);
3116 CH_ERR(adapter, "%s MC7 BIST timed out\n", mc7->name);
3120 /* Enable normal memory accesses. */
3121 t3_set_reg_field(adapter, mc7->offset + A_MC7_CFG, 0, F_RDY);
3128 static void config_pcie(struct adapter *adap)
3130 static const u16 ack_lat[4][6] = {
3131 {237, 416, 559, 1071, 2095, 4143},
3132 {128, 217, 289, 545, 1057, 2081},
3133 {73, 118, 154, 282, 538, 1050},
3134 {67, 107, 86, 150, 278, 534}
3136 static const u16 rpl_tmr[4][6] = {
3137 {711, 1248, 1677, 3213, 6285, 12429},
3138 {384, 651, 867, 1635, 3171, 6243},
3139 {219, 354, 462, 846, 1614, 3150},
3140 {201, 321, 258, 450, 834, 1602}
3144 unsigned int log2_width, pldsize;
3145 unsigned int fst_trn_rx, fst_trn_tx, acklat, rpllmt;
3147 pci_read_config_word(adap->pdev,
3148 adap->params.pci.pcie_cap_addr + PCI_EXP_DEVCTL,
3150 pldsize = (val & PCI_EXP_DEVCTL_PAYLOAD) >> 5;
3151 pci_read_config_word(adap->pdev,
3152 adap->params.pci.pcie_cap_addr + PCI_EXP_LNKCTL,
3155 fst_trn_tx = G_NUMFSTTRNSEQ(t3_read_reg(adap, A_PCIE_PEX_CTRL0));
3156 fst_trn_rx = adap->params.rev == 0 ? fst_trn_tx :
3157 G_NUMFSTTRNSEQRX(t3_read_reg(adap, A_PCIE_MODE));
3158 log2_width = fls(adap->params.pci.width) - 1;
3159 acklat = ack_lat[log2_width][pldsize];
3160 if (val & 1) /* check LOsEnable */
3161 acklat += fst_trn_tx * 4;
3162 rpllmt = rpl_tmr[log2_width][pldsize] + fst_trn_rx * 4;
3164 if (adap->params.rev == 0)
3165 t3_set_reg_field(adap, A_PCIE_PEX_CTRL1,
3166 V_T3A_ACKLAT(M_T3A_ACKLAT),
3167 V_T3A_ACKLAT(acklat));
3169 t3_set_reg_field(adap, A_PCIE_PEX_CTRL1, V_ACKLAT(M_ACKLAT),
3172 t3_set_reg_field(adap, A_PCIE_PEX_CTRL0, V_REPLAYLMT(M_REPLAYLMT),
3173 V_REPLAYLMT(rpllmt));
3175 t3_write_reg(adap, A_PCIE_PEX_ERR, 0xffffffff);
3176 t3_set_reg_field(adap, A_PCIE_CFG, F_PCIE_CLIDECEN, F_PCIE_CLIDECEN);
3180 * Initialize and configure T3 HW modules. This performs the
3181 * initialization steps that need to be done once after a card is reset.
3182 * MAC and PHY initialization is handled separarely whenever a port is enabled.
3184 * fw_params are passed to FW and their value is platform dependent. Only the
3185 * top 8 bits are available for use, the rest must be 0.
3187 int t3_init_hw(struct adapter *adapter, u32 fw_params)
3189 int err = -EIO, attempts = 100;
3190 const struct vpd_params *vpd = &adapter->params.vpd;
3192 if (adapter->params.rev > 0)
3193 calibrate_xgm_t3b(adapter);
3194 else if (calibrate_xgm(adapter))
3198 partition_mem(adapter, &adapter->params.tp);
3200 if (mc7_init(&adapter->pmrx, vpd->mclk, vpd->mem_timing) ||
3201 mc7_init(&adapter->pmtx, vpd->mclk, vpd->mem_timing) ||
3202 mc7_init(&adapter->cm, vpd->mclk, vpd->mem_timing) ||
3203 t3_mc5_init(&adapter->mc5, adapter->params.mc5.nservers,
3204 adapter->params.mc5.nfilters,
3205 adapter->params.mc5.nroutes))
3209 if (tp_init(adapter, &adapter->params.tp))
3212 t3_tp_set_coalescing_size(adapter,
3213 min(adapter->params.sge.max_pkt_size,
3214 MAX_RX_COALESCING_LEN), 1);
3215 t3_tp_set_max_rxsize(adapter,
3216 min(adapter->params.sge.max_pkt_size, 16384U));
3217 ulp_config(adapter, &adapter->params.tp);
3219 if (is_pcie(adapter))
3220 config_pcie(adapter);
3222 t3_set_reg_field(adapter, A_PCIX_CFG, 0, F_CLIDECEN);
3224 t3_write_reg(adapter, A_PM1_RX_CFG, 0xffffffff);
3225 init_hw_for_avail_ports(adapter, adapter->params.nports);
3226 t3_sge_init(adapter, &adapter->params.sge);
3228 t3_write_reg(adapter, A_CIM_HOST_ACC_DATA, vpd->uclk | fw_params);
3229 t3_write_reg(adapter, A_CIM_BOOT_CFG,
3230 V_BOOTADDR(FW_FLASH_BOOT_ADDR >> 2));
3231 t3_read_reg(adapter, A_CIM_BOOT_CFG); /* flush */
3233 do { /* wait for uP to initialize */
3235 } while (t3_read_reg(adapter, A_CIM_HOST_ACC_DATA) && --attempts);
3237 CH_ERR(adapter, "uP initialization timed out\n");
3247 * get_pci_mode - determine a card's PCI mode
3248 * @adapter: the adapter
3249 * @p: where to store the PCI settings
3251 * Determines a card's PCI mode and associated parameters, such as speed
3254 static void __devinit get_pci_mode(struct adapter *adapter,
3255 struct pci_params *p)
3257 static unsigned short speed_map[] = { 33, 66, 100, 133 };
3258 u32 pci_mode, pcie_cap;
3260 pcie_cap = pci_find_capability(adapter->pdev, PCI_CAP_ID_EXP);
3264 p->variant = PCI_VARIANT_PCIE;
3265 p->pcie_cap_addr = pcie_cap;
3266 pci_read_config_word(adapter->pdev, pcie_cap + PCI_EXP_LNKSTA,
3268 p->width = (val >> 4) & 0x3f;
3272 pci_mode = t3_read_reg(adapter, A_PCIX_MODE);
3273 p->speed = speed_map[G_PCLKRANGE(pci_mode)];
3274 p->width = (pci_mode & F_64BIT) ? 64 : 32;
3275 pci_mode = G_PCIXINITPAT(pci_mode);
3277 p->variant = PCI_VARIANT_PCI;
3278 else if (pci_mode < 4)
3279 p->variant = PCI_VARIANT_PCIX_MODE1_PARITY;
3280 else if (pci_mode < 8)
3281 p->variant = PCI_VARIANT_PCIX_MODE1_ECC;
3283 p->variant = PCI_VARIANT_PCIX_266_MODE2;
3287 * init_link_config - initialize a link's SW state
3288 * @lc: structure holding the link state
3289 * @ai: information about the current card
3291 * Initializes the SW state maintained for each link, including the link's
3292 * capabilities and default speed/duplex/flow-control/autonegotiation
3295 static void __devinit init_link_config(struct link_config *lc,
3298 lc->supported = caps;
3299 lc->requested_speed = lc->speed = SPEED_INVALID;
3300 lc->requested_duplex = lc->duplex = DUPLEX_INVALID;
3301 lc->requested_fc = lc->fc = PAUSE_RX | PAUSE_TX;
3302 if (lc->supported & SUPPORTED_Autoneg) {
3303 lc->advertising = lc->supported;
3304 lc->autoneg = AUTONEG_ENABLE;
3305 lc->requested_fc |= PAUSE_AUTONEG;
3307 lc->advertising = 0;
3308 lc->autoneg = AUTONEG_DISABLE;
3313 * mc7_calc_size - calculate MC7 memory size
3314 * @cfg: the MC7 configuration
3316 * Calculates the size of an MC7 memory in bytes from the value of its
3317 * configuration register.
3319 static unsigned int __devinit mc7_calc_size(u32 cfg)
3321 unsigned int width = G_WIDTH(cfg);
3322 unsigned int banks = !!(cfg & F_BKS) + 1;
3323 unsigned int org = !!(cfg & F_ORG) + 1;
3324 unsigned int density = G_DEN(cfg);
3325 unsigned int MBs = ((256 << density) * banks) / (org << width);
3330 static void __devinit mc7_prep(struct adapter *adapter, struct mc7 *mc7,
3331 unsigned int base_addr, const char *name)
3335 mc7->adapter = adapter;
3337 mc7->offset = base_addr - MC7_PMRX_BASE_ADDR;
3338 cfg = t3_read_reg(adapter, mc7->offset + A_MC7_CFG);
3339 mc7->size = mc7->size = G_DEN(cfg) == M_DEN ? 0 : mc7_calc_size(cfg);
3340 mc7->width = G_WIDTH(cfg);
3343 void mac_prep(struct cmac *mac, struct adapter *adapter, int index)
3345 mac->adapter = adapter;
3346 mac->offset = (XGMAC0_1_BASE_ADDR - XGMAC0_0_BASE_ADDR) * index;
3349 if (adapter->params.rev == 0 && uses_xaui(adapter)) {
3350 t3_write_reg(adapter, A_XGM_SERDES_CTRL + mac->offset,
3351 is_10G(adapter) ? 0x2901c04 : 0x2301c04);
3352 t3_set_reg_field(adapter, A_XGM_PORT_CFG + mac->offset,
3357 void early_hw_init(struct adapter *adapter, const struct adapter_info *ai)
3359 u32 val = V_PORTSPEED(is_10G(adapter) ? 3 : 2);
3361 mi1_init(adapter, ai);
3362 t3_write_reg(adapter, A_I2C_CFG, /* set for 80KHz */
3363 V_I2C_CLKDIV(adapter->params.vpd.cclk / 80 - 1));
3364 t3_write_reg(adapter, A_T3DBG_GPIO_EN,
3365 ai->gpio_out | F_GPIO0_OEN | F_GPIO0_OUT_VAL);
3366 t3_write_reg(adapter, A_MC5_DB_SERVER_INDEX, 0);
3368 if (adapter->params.rev == 0 || !uses_xaui(adapter))
3371 /* Enable MAC clocks so we can access the registers */
3372 t3_write_reg(adapter, A_XGM_PORT_CFG, val);
3373 t3_read_reg(adapter, A_XGM_PORT_CFG);
3375 val |= F_CLKDIVRESET_;
3376 t3_write_reg(adapter, A_XGM_PORT_CFG, val);
3377 t3_read_reg(adapter, A_XGM_PORT_CFG);
3378 t3_write_reg(adapter, XGM_REG(A_XGM_PORT_CFG, 1), val);
3379 t3_read_reg(adapter, A_XGM_PORT_CFG);
3383 * Reset the adapter.
3384 * Older PCIe cards lose their config space during reset, PCI-X
3387 int t3_reset_adapter(struct adapter *adapter)
3389 int i, save_and_restore_pcie =
3390 adapter->params.rev < T3_REV_B2 && is_pcie(adapter);
3393 if (save_and_restore_pcie)
3394 pci_save_state(adapter->pdev);
3395 t3_write_reg(adapter, A_PL_RST, F_CRSTWRM | F_CRSTWRMMODE);
3398 * Delay. Give Some time to device to reset fully.
3399 * XXX The delay time should be modified.
3401 for (i = 0; i < 10; i++) {
3403 pci_read_config_word(adapter->pdev, 0x00, &devid);
3404 if (devid == 0x1425)
3408 if (devid != 0x1425)
3411 if (save_and_restore_pcie)
3412 pci_restore_state(adapter->pdev);
3417 * Initialize adapter SW state for the various HW modules, set initial values
3418 * for some adapter tunables, take PHYs out of reset, and initialize the MDIO
3421 int __devinit t3_prep_adapter(struct adapter *adapter,
3422 const struct adapter_info *ai, int reset)
3425 unsigned int i, j = 0;
3427 get_pci_mode(adapter, &adapter->params.pci);
3429 adapter->params.info = ai;
3430 adapter->params.nports = ai->nports;
3431 adapter->params.rev = t3_read_reg(adapter, A_PL_REV);
3432 adapter->params.linkpoll_period = 0;
3433 adapter->params.stats_update_period = is_10G(adapter) ?
3434 MAC_STATS_ACCUM_SECS : (MAC_STATS_ACCUM_SECS * 10);
3435 adapter->params.pci.vpd_cap_addr =
3436 pci_find_capability(adapter->pdev, PCI_CAP_ID_VPD);
3437 ret = get_vpd_params(adapter, &adapter->params.vpd);
3441 if (reset && t3_reset_adapter(adapter))
3444 t3_sge_prep(adapter, &adapter->params.sge);
3446 if (adapter->params.vpd.mclk) {
3447 struct tp_params *p = &adapter->params.tp;
3449 mc7_prep(adapter, &adapter->pmrx, MC7_PMRX_BASE_ADDR, "PMRX");
3450 mc7_prep(adapter, &adapter->pmtx, MC7_PMTX_BASE_ADDR, "PMTX");
3451 mc7_prep(adapter, &adapter->cm, MC7_CM_BASE_ADDR, "CM");
3453 p->nchan = ai->nports;
3454 p->pmrx_size = t3_mc7_size(&adapter->pmrx);
3455 p->pmtx_size = t3_mc7_size(&adapter->pmtx);
3456 p->cm_size = t3_mc7_size(&adapter->cm);
3457 p->chan_rx_size = p->pmrx_size / 2; /* only 1 Rx channel */
3458 p->chan_tx_size = p->pmtx_size / p->nchan;
3459 p->rx_pg_size = 64 * 1024;
3460 p->tx_pg_size = is_10G(adapter) ? 64 * 1024 : 16 * 1024;
3461 p->rx_num_pgs = pm_num_pages(p->chan_rx_size, p->rx_pg_size);
3462 p->tx_num_pgs = pm_num_pages(p->chan_tx_size, p->tx_pg_size);
3463 p->ntimer_qs = p->cm_size >= (128 << 20) ||
3464 adapter->params.rev > 0 ? 12 : 6;
3467 adapter->params.offload = t3_mc7_size(&adapter->pmrx) &&
3468 t3_mc7_size(&adapter->pmtx) &&
3469 t3_mc7_size(&adapter->cm);
3471 if (is_offload(adapter)) {
3472 adapter->params.mc5.nservers = DEFAULT_NSERVERS;
3473 adapter->params.mc5.nfilters = adapter->params.rev > 0 ?
3474 DEFAULT_NFILTERS : 0;
3475 adapter->params.mc5.nroutes = 0;
3476 t3_mc5_prep(adapter, &adapter->mc5, MC5_MODE_144_BIT);
3478 init_mtus(adapter->params.mtus);
3479 init_cong_ctrl(adapter->params.a_wnd, adapter->params.b_wnd);
3482 early_hw_init(adapter, ai);
3484 for_each_port(adapter, i) {
3486 struct port_info *p = adap2pinfo(adapter, i);
3488 while (!adapter->params.vpd.port_type[j])
3491 p->port_type = &port_types[adapter->params.vpd.port_type[j]];
3492 p->port_type->phy_prep(&p->phy, adapter, ai->phy_base_addr + j,
3494 mac_prep(&p->mac, adapter, j);
3498 * The VPD EEPROM stores the base Ethernet address for the
3499 * card. A port's address is derived from the base by adding
3500 * the port's index to the base's low octet.
3502 memcpy(hw_addr, adapter->params.vpd.eth_base, 5);
3503 hw_addr[5] = adapter->params.vpd.eth_base[5] + i;
3505 memcpy(adapter->port[i]->dev_addr, hw_addr,
3507 memcpy(adapter->port[i]->perm_addr, hw_addr,
3509 init_link_config(&p->link_config, p->port_type->caps);
3510 p->phy.ops->power_down(&p->phy, 1);
3511 if (!(p->port_type->caps & SUPPORTED_IRQ))
3512 adapter->params.linkpoll_period = 10;
3518 void t3_led_ready(struct adapter *adapter)
3520 t3_set_reg_field(adapter, A_T3DBG_GPIO_EN, F_GPIO0_OUT_VAL,