2 * Copyright (c) 2003-2007 Chelsio, Inc. All rights reserved.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
35 #include "firmware_exports.h"
38 * t3_wait_op_done_val - wait until an operation is completed
39 * @adapter: the adapter performing the operation
40 * @reg: the register to check for completion
41 * @mask: a single-bit field within @reg that indicates completion
42 * @polarity: the value of the field when the operation is completed
43 * @attempts: number of check iterations
44 * @delay: delay in usecs between iterations
45 * @valp: where to store the value of the register at completion time
47 * Wait until an operation is completed by checking a bit in a register
48 * up to @attempts times. If @valp is not NULL the value of the register
49 * at the time it indicated completion is stored there. Returns 0 if the
50 * operation completes and -EAGAIN otherwise.
53 int t3_wait_op_done_val(struct adapter *adapter, int reg, u32 mask,
54 int polarity, int attempts, int delay, u32 *valp)
57 u32 val = t3_read_reg(adapter, reg);
59 if (!!(val & mask) == polarity) {
72 * t3_write_regs - write a bunch of registers
73 * @adapter: the adapter to program
74 * @p: an array of register address/register value pairs
75 * @n: the number of address/value pairs
76 * @offset: register address offset
78 * Takes an array of register address/register value pairs and writes each
79 * value to the corresponding register. Register addresses are adjusted
80 * by the supplied offset.
82 void t3_write_regs(struct adapter *adapter, const struct addr_val_pair *p,
83 int n, unsigned int offset)
86 t3_write_reg(adapter, p->reg_addr + offset, p->val);
92 * t3_set_reg_field - set a register field to a value
93 * @adapter: the adapter to program
94 * @addr: the register address
95 * @mask: specifies the portion of the register to modify
96 * @val: the new value for the register field
98 * Sets a register field specified by the supplied mask to the
101 void t3_set_reg_field(struct adapter *adapter, unsigned int addr, u32 mask,
104 u32 v = t3_read_reg(adapter, addr) & ~mask;
106 t3_write_reg(adapter, addr, v | val);
107 t3_read_reg(adapter, addr); /* flush */
111 * t3_read_indirect - read indirectly addressed registers
113 * @addr_reg: register holding the indirect address
114 * @data_reg: register holding the value of the indirect register
115 * @vals: where the read register values are stored
116 * @start_idx: index of first indirect register to read
117 * @nregs: how many indirect registers to read
119 * Reads registers that are accessed indirectly through an address/data
122 static void t3_read_indirect(struct adapter *adap, unsigned int addr_reg,
123 unsigned int data_reg, u32 *vals,
124 unsigned int nregs, unsigned int start_idx)
127 t3_write_reg(adap, addr_reg, start_idx);
128 *vals++ = t3_read_reg(adap, data_reg);
134 * t3_mc7_bd_read - read from MC7 through backdoor accesses
135 * @mc7: identifies MC7 to read from
136 * @start: index of first 64-bit word to read
137 * @n: number of 64-bit words to read
138 * @buf: where to store the read result
140 * Read n 64-bit words from MC7 starting at word start, using backdoor
143 int t3_mc7_bd_read(struct mc7 *mc7, unsigned int start, unsigned int n,
146 static const int shift[] = { 0, 0, 16, 24 };
147 static const int step[] = { 0, 32, 16, 8 };
149 unsigned int size64 = mc7->size / 8; /* # of 64-bit words */
150 struct adapter *adap = mc7->adapter;
152 if (start >= size64 || start + n > size64)
155 start *= (8 << mc7->width);
160 for (i = (1 << mc7->width) - 1; i >= 0; --i) {
164 t3_write_reg(adap, mc7->offset + A_MC7_BD_ADDR, start);
165 t3_write_reg(adap, mc7->offset + A_MC7_BD_OP, 0);
166 val = t3_read_reg(adap, mc7->offset + A_MC7_BD_OP);
167 while ((val & F_BUSY) && attempts--)
168 val = t3_read_reg(adap,
169 mc7->offset + A_MC7_BD_OP);
173 val = t3_read_reg(adap, mc7->offset + A_MC7_BD_DATA1);
174 if (mc7->width == 0) {
175 val64 = t3_read_reg(adap,
178 val64 |= (u64) val << 32;
181 val >>= shift[mc7->width];
182 val64 |= (u64) val << (step[mc7->width] * i);
194 static void mi1_init(struct adapter *adap, const struct adapter_info *ai)
196 u32 clkdiv = adap->params.vpd.cclk / (2 * adap->params.vpd.mdc) - 1;
197 u32 val = F_PREEN | V_CLKDIV(clkdiv);
199 t3_write_reg(adap, A_MI1_CFG, val);
202 #define MDIO_ATTEMPTS 20
205 * MI1 read/write operations for clause 22 PHYs.
207 static int t3_mi1_read(struct adapter *adapter, int phy_addr, int mmd_addr,
208 int reg_addr, unsigned int *valp)
211 u32 addr = V_REGADDR(reg_addr) | V_PHYADDR(phy_addr);
216 mutex_lock(&adapter->mdio_lock);
217 t3_set_reg_field(adapter, A_MI1_CFG, V_ST(M_ST), V_ST(1));
218 t3_write_reg(adapter, A_MI1_ADDR, addr);
219 t3_write_reg(adapter, A_MI1_OP, V_MDI_OP(2));
220 ret = t3_wait_op_done(adapter, A_MI1_OP, F_BUSY, 0, MDIO_ATTEMPTS, 10);
222 *valp = t3_read_reg(adapter, A_MI1_DATA);
223 mutex_unlock(&adapter->mdio_lock);
227 static int t3_mi1_write(struct adapter *adapter, int phy_addr, int mmd_addr,
228 int reg_addr, unsigned int val)
231 u32 addr = V_REGADDR(reg_addr) | V_PHYADDR(phy_addr);
236 mutex_lock(&adapter->mdio_lock);
237 t3_set_reg_field(adapter, A_MI1_CFG, V_ST(M_ST), V_ST(1));
238 t3_write_reg(adapter, A_MI1_ADDR, addr);
239 t3_write_reg(adapter, A_MI1_DATA, val);
240 t3_write_reg(adapter, A_MI1_OP, V_MDI_OP(1));
241 ret = t3_wait_op_done(adapter, A_MI1_OP, F_BUSY, 0, MDIO_ATTEMPTS, 10);
242 mutex_unlock(&adapter->mdio_lock);
246 static const struct mdio_ops mi1_mdio_ops = {
252 * Performs the address cycle for clause 45 PHYs.
253 * Must be called with the MDIO_LOCK held.
255 static int mi1_wr_addr(struct adapter *adapter, int phy_addr, int mmd_addr,
258 u32 addr = V_REGADDR(mmd_addr) | V_PHYADDR(phy_addr);
260 t3_set_reg_field(adapter, A_MI1_CFG, V_ST(M_ST), 0);
261 t3_write_reg(adapter, A_MI1_ADDR, addr);
262 t3_write_reg(adapter, A_MI1_DATA, reg_addr);
263 t3_write_reg(adapter, A_MI1_OP, V_MDI_OP(0));
264 return t3_wait_op_done(adapter, A_MI1_OP, F_BUSY, 0,
269 * MI1 read/write operations for indirect-addressed PHYs.
271 static int mi1_ext_read(struct adapter *adapter, int phy_addr, int mmd_addr,
272 int reg_addr, unsigned int *valp)
276 mutex_lock(&adapter->mdio_lock);
277 ret = mi1_wr_addr(adapter, phy_addr, mmd_addr, reg_addr);
279 t3_write_reg(adapter, A_MI1_OP, V_MDI_OP(3));
280 ret = t3_wait_op_done(adapter, A_MI1_OP, F_BUSY, 0,
283 *valp = t3_read_reg(adapter, A_MI1_DATA);
285 mutex_unlock(&adapter->mdio_lock);
289 static int mi1_ext_write(struct adapter *adapter, int phy_addr, int mmd_addr,
290 int reg_addr, unsigned int val)
294 mutex_lock(&adapter->mdio_lock);
295 ret = mi1_wr_addr(adapter, phy_addr, mmd_addr, reg_addr);
297 t3_write_reg(adapter, A_MI1_DATA, val);
298 t3_write_reg(adapter, A_MI1_OP, V_MDI_OP(1));
299 ret = t3_wait_op_done(adapter, A_MI1_OP, F_BUSY, 0,
302 mutex_unlock(&adapter->mdio_lock);
306 static const struct mdio_ops mi1_mdio_ext_ops = {
312 * t3_mdio_change_bits - modify the value of a PHY register
313 * @phy: the PHY to operate on
314 * @mmd: the device address
315 * @reg: the register address
316 * @clear: what part of the register value to mask off
317 * @set: what part of the register value to set
319 * Changes the value of a PHY register by applying a mask to its current
320 * value and ORing the result with a new value.
322 int t3_mdio_change_bits(struct cphy *phy, int mmd, int reg, unsigned int clear,
328 ret = mdio_read(phy, mmd, reg, &val);
331 ret = mdio_write(phy, mmd, reg, val | set);
337 * t3_phy_reset - reset a PHY block
338 * @phy: the PHY to operate on
339 * @mmd: the device address of the PHY block to reset
340 * @wait: how long to wait for the reset to complete in 1ms increments
342 * Resets a PHY block and optionally waits for the reset to complete.
343 * @mmd should be 0 for 10/100/1000 PHYs and the device address to reset
346 int t3_phy_reset(struct cphy *phy, int mmd, int wait)
351 err = t3_mdio_change_bits(phy, mmd, MII_BMCR, BMCR_PDOWN, BMCR_RESET);
356 err = mdio_read(phy, mmd, MII_BMCR, &ctl);
362 } while (ctl && --wait);
368 * t3_phy_advertise - set the PHY advertisement registers for autoneg
369 * @phy: the PHY to operate on
370 * @advert: bitmap of capabilities the PHY should advertise
372 * Sets a 10/100/1000 PHY's advertisement registers to advertise the
373 * requested capabilities.
375 int t3_phy_advertise(struct cphy *phy, unsigned int advert)
378 unsigned int val = 0;
380 err = mdio_read(phy, 0, MII_CTRL1000, &val);
384 val &= ~(ADVERTISE_1000HALF | ADVERTISE_1000FULL);
385 if (advert & ADVERTISED_1000baseT_Half)
386 val |= ADVERTISE_1000HALF;
387 if (advert & ADVERTISED_1000baseT_Full)
388 val |= ADVERTISE_1000FULL;
390 err = mdio_write(phy, 0, MII_CTRL1000, val);
395 if (advert & ADVERTISED_10baseT_Half)
396 val |= ADVERTISE_10HALF;
397 if (advert & ADVERTISED_10baseT_Full)
398 val |= ADVERTISE_10FULL;
399 if (advert & ADVERTISED_100baseT_Half)
400 val |= ADVERTISE_100HALF;
401 if (advert & ADVERTISED_100baseT_Full)
402 val |= ADVERTISE_100FULL;
403 if (advert & ADVERTISED_Pause)
404 val |= ADVERTISE_PAUSE_CAP;
405 if (advert & ADVERTISED_Asym_Pause)
406 val |= ADVERTISE_PAUSE_ASYM;
407 return mdio_write(phy, 0, MII_ADVERTISE, val);
411 * t3_set_phy_speed_duplex - force PHY speed and duplex
412 * @phy: the PHY to operate on
413 * @speed: requested PHY speed
414 * @duplex: requested PHY duplex
416 * Force a 10/100/1000 PHY's speed and duplex. This also disables
417 * auto-negotiation except for GigE, where auto-negotiation is mandatory.
419 int t3_set_phy_speed_duplex(struct cphy *phy, int speed, int duplex)
424 err = mdio_read(phy, 0, MII_BMCR, &ctl);
429 ctl &= ~(BMCR_SPEED100 | BMCR_SPEED1000 | BMCR_ANENABLE);
430 if (speed == SPEED_100)
431 ctl |= BMCR_SPEED100;
432 else if (speed == SPEED_1000)
433 ctl |= BMCR_SPEED1000;
436 ctl &= ~(BMCR_FULLDPLX | BMCR_ANENABLE);
437 if (duplex == DUPLEX_FULL)
438 ctl |= BMCR_FULLDPLX;
440 if (ctl & BMCR_SPEED1000) /* auto-negotiation required for GigE */
441 ctl |= BMCR_ANENABLE;
442 return mdio_write(phy, 0, MII_BMCR, ctl);
445 static const struct adapter_info t3_adap_info[] = {
447 F_GPIO2_OEN | F_GPIO4_OEN |
448 F_GPIO2_OUT_VAL | F_GPIO4_OUT_VAL, F_GPIO3 | F_GPIO5,
450 &mi1_mdio_ops, "Chelsio PE9000"},
452 F_GPIO2_OEN | F_GPIO4_OEN |
453 F_GPIO2_OUT_VAL | F_GPIO4_OUT_VAL, F_GPIO3 | F_GPIO5,
455 &mi1_mdio_ops, "Chelsio T302"},
457 F_GPIO1_OEN | F_GPIO6_OEN | F_GPIO7_OEN | F_GPIO10_OEN |
458 F_GPIO11_OEN | F_GPIO1_OUT_VAL | F_GPIO6_OUT_VAL | F_GPIO10_OUT_VAL,
459 0, SUPPORTED_10000baseT_Full | SUPPORTED_AUI,
460 &mi1_mdio_ext_ops, "Chelsio T310"},
462 F_GPIO1_OEN | F_GPIO2_OEN | F_GPIO4_OEN | F_GPIO5_OEN | F_GPIO6_OEN |
463 F_GPIO7_OEN | F_GPIO10_OEN | F_GPIO11_OEN | F_GPIO1_OUT_VAL |
464 F_GPIO5_OUT_VAL | F_GPIO6_OUT_VAL | F_GPIO10_OUT_VAL, 0,
465 SUPPORTED_10000baseT_Full | SUPPORTED_AUI,
466 &mi1_mdio_ext_ops, "Chelsio T320"},
470 * Return the adapter_info structure with a given index. Out-of-range indices
473 const struct adapter_info *t3_get_adapter_info(unsigned int id)
475 return id < ARRAY_SIZE(t3_adap_info) ? &t3_adap_info[id] : NULL;
478 struct port_type_info {
479 int (*phy_prep)(struct cphy *phy, struct adapter *adapter,
480 int phy_addr, const struct mdio_ops *ops);
483 static const struct port_type_info port_types[] = {
485 { t3_ael1002_phy_prep },
486 { t3_vsc8211_phy_prep },
488 { t3_xaui_direct_phy_prep },
490 { t3_qt2045_phy_prep },
491 { t3_ael1006_phy_prep },
495 #define VPD_ENTRY(name, len) \
496 u8 name##_kword[2]; u8 name##_len; u8 name##_data[len]
499 * Partial EEPROM Vital Product Data structure. Includes only the ID and
508 VPD_ENTRY(pn, 16); /* part number */
509 VPD_ENTRY(ec, 16); /* EC level */
510 VPD_ENTRY(sn, SERNUM_LEN); /* serial number */
511 VPD_ENTRY(na, 12); /* MAC address base */
512 VPD_ENTRY(cclk, 6); /* core clock */
513 VPD_ENTRY(mclk, 6); /* mem clock */
514 VPD_ENTRY(uclk, 6); /* uP clk */
515 VPD_ENTRY(mdc, 6); /* MDIO clk */
516 VPD_ENTRY(mt, 2); /* mem timing */
517 VPD_ENTRY(xaui0cfg, 6); /* XAUI0 config */
518 VPD_ENTRY(xaui1cfg, 6); /* XAUI1 config */
519 VPD_ENTRY(port0, 2); /* PHY0 complex */
520 VPD_ENTRY(port1, 2); /* PHY1 complex */
521 VPD_ENTRY(port2, 2); /* PHY2 complex */
522 VPD_ENTRY(port3, 2); /* PHY3 complex */
523 VPD_ENTRY(rv, 1); /* csum */
524 u32 pad; /* for multiple-of-4 sizing and alignment */
527 #define EEPROM_MAX_POLL 4
528 #define EEPROM_STAT_ADDR 0x4000
529 #define VPD_BASE 0xc00
532 * t3_seeprom_read - read a VPD EEPROM location
533 * @adapter: adapter to read
534 * @addr: EEPROM address
535 * @data: where to store the read data
537 * Read a 32-bit word from a location in VPD EEPROM using the card's PCI
538 * VPD ROM capability. A zero is written to the flag bit when the
539 * addres is written to the control register. The hardware device will
540 * set the flag to 1 when 4 bytes have been read into the data register.
542 int t3_seeprom_read(struct adapter *adapter, u32 addr, __le32 *data)
545 int attempts = EEPROM_MAX_POLL;
547 unsigned int base = adapter->params.pci.vpd_cap_addr;
549 if ((addr >= EEPROMSIZE && addr != EEPROM_STAT_ADDR) || (addr & 3))
552 pci_write_config_word(adapter->pdev, base + PCI_VPD_ADDR, addr);
555 pci_read_config_word(adapter->pdev, base + PCI_VPD_ADDR, &val);
556 } while (!(val & PCI_VPD_ADDR_F) && --attempts);
558 if (!(val & PCI_VPD_ADDR_F)) {
559 CH_ERR(adapter, "reading EEPROM address 0x%x failed\n", addr);
562 pci_read_config_dword(adapter->pdev, base + PCI_VPD_DATA, &v);
563 *data = cpu_to_le32(v);
568 * t3_seeprom_write - write a VPD EEPROM location
569 * @adapter: adapter to write
570 * @addr: EEPROM address
571 * @data: value to write
573 * Write a 32-bit word to a location in VPD EEPROM using the card's PCI
574 * VPD ROM capability.
576 int t3_seeprom_write(struct adapter *adapter, u32 addr, __le32 data)
579 int attempts = EEPROM_MAX_POLL;
580 unsigned int base = adapter->params.pci.vpd_cap_addr;
582 if ((addr >= EEPROMSIZE && addr != EEPROM_STAT_ADDR) || (addr & 3))
585 pci_write_config_dword(adapter->pdev, base + PCI_VPD_DATA,
587 pci_write_config_word(adapter->pdev,base + PCI_VPD_ADDR,
588 addr | PCI_VPD_ADDR_F);
591 pci_read_config_word(adapter->pdev, base + PCI_VPD_ADDR, &val);
592 } while ((val & PCI_VPD_ADDR_F) && --attempts);
594 if (val & PCI_VPD_ADDR_F) {
595 CH_ERR(adapter, "write to EEPROM address 0x%x failed\n", addr);
602 * t3_seeprom_wp - enable/disable EEPROM write protection
603 * @adapter: the adapter
604 * @enable: 1 to enable write protection, 0 to disable it
606 * Enables or disables write protection on the serial EEPROM.
608 int t3_seeprom_wp(struct adapter *adapter, int enable)
610 return t3_seeprom_write(adapter, EEPROM_STAT_ADDR, enable ? 0xc : 0);
614 * Convert a character holding a hex digit to a number.
616 static unsigned int hex2int(unsigned char c)
618 return isdigit(c) ? c - '0' : toupper(c) - 'A' + 10;
622 * get_vpd_params - read VPD parameters from VPD EEPROM
623 * @adapter: adapter to read
624 * @p: where to store the parameters
626 * Reads card parameters stored in VPD EEPROM.
628 static int get_vpd_params(struct adapter *adapter, struct vpd_params *p)
634 * Card information is normally at VPD_BASE but some early cards had
637 ret = t3_seeprom_read(adapter, VPD_BASE, (__le32 *)&vpd);
640 addr = vpd.id_tag == 0x82 ? VPD_BASE : 0;
642 for (i = 0; i < sizeof(vpd); i += 4) {
643 ret = t3_seeprom_read(adapter, addr + i,
644 (__le32 *)((u8 *)&vpd + i));
649 p->cclk = simple_strtoul(vpd.cclk_data, NULL, 10);
650 p->mclk = simple_strtoul(vpd.mclk_data, NULL, 10);
651 p->uclk = simple_strtoul(vpd.uclk_data, NULL, 10);
652 p->mdc = simple_strtoul(vpd.mdc_data, NULL, 10);
653 p->mem_timing = simple_strtoul(vpd.mt_data, NULL, 10);
654 memcpy(p->sn, vpd.sn_data, SERNUM_LEN);
656 /* Old eeproms didn't have port information */
657 if (adapter->params.rev == 0 && !vpd.port0_data[0]) {
658 p->port_type[0] = uses_xaui(adapter) ? 1 : 2;
659 p->port_type[1] = uses_xaui(adapter) ? 6 : 2;
661 p->port_type[0] = hex2int(vpd.port0_data[0]);
662 p->port_type[1] = hex2int(vpd.port1_data[0]);
663 p->xauicfg[0] = simple_strtoul(vpd.xaui0cfg_data, NULL, 16);
664 p->xauicfg[1] = simple_strtoul(vpd.xaui1cfg_data, NULL, 16);
667 for (i = 0; i < 6; i++)
668 p->eth_base[i] = hex2int(vpd.na_data[2 * i]) * 16 +
669 hex2int(vpd.na_data[2 * i + 1]);
673 /* serial flash and firmware constants */
675 SF_ATTEMPTS = 5, /* max retries for SF1 operations */
676 SF_SEC_SIZE = 64 * 1024, /* serial flash sector size */
677 SF_SIZE = SF_SEC_SIZE * 8, /* serial flash size */
679 /* flash command opcodes */
680 SF_PROG_PAGE = 2, /* program page */
681 SF_WR_DISABLE = 4, /* disable writes */
682 SF_RD_STATUS = 5, /* read status register */
683 SF_WR_ENABLE = 6, /* enable writes */
684 SF_RD_DATA_FAST = 0xb, /* read flash */
685 SF_ERASE_SECTOR = 0xd8, /* erase sector */
687 FW_FLASH_BOOT_ADDR = 0x70000, /* start address of FW in flash */
688 FW_VERS_ADDR = 0x7fffc, /* flash address holding FW version */
689 FW_MIN_SIZE = 8 /* at least version and csum */
693 * sf1_read - read data from the serial flash
694 * @adapter: the adapter
695 * @byte_cnt: number of bytes to read
696 * @cont: whether another operation will be chained
697 * @valp: where to store the read data
699 * Reads up to 4 bytes of data from the serial flash. The location of
700 * the read needs to be specified prior to calling this by issuing the
701 * appropriate commands to the serial flash.
703 static int sf1_read(struct adapter *adapter, unsigned int byte_cnt, int cont,
708 if (!byte_cnt || byte_cnt > 4)
710 if (t3_read_reg(adapter, A_SF_OP) & F_BUSY)
712 t3_write_reg(adapter, A_SF_OP, V_CONT(cont) | V_BYTECNT(byte_cnt - 1));
713 ret = t3_wait_op_done(adapter, A_SF_OP, F_BUSY, 0, SF_ATTEMPTS, 10);
715 *valp = t3_read_reg(adapter, A_SF_DATA);
720 * sf1_write - write data to the serial flash
721 * @adapter: the adapter
722 * @byte_cnt: number of bytes to write
723 * @cont: whether another operation will be chained
724 * @val: value to write
726 * Writes up to 4 bytes of data to the serial flash. The location of
727 * the write needs to be specified prior to calling this by issuing the
728 * appropriate commands to the serial flash.
730 static int sf1_write(struct adapter *adapter, unsigned int byte_cnt, int cont,
733 if (!byte_cnt || byte_cnt > 4)
735 if (t3_read_reg(adapter, A_SF_OP) & F_BUSY)
737 t3_write_reg(adapter, A_SF_DATA, val);
738 t3_write_reg(adapter, A_SF_OP,
739 V_CONT(cont) | V_BYTECNT(byte_cnt - 1) | V_OP(1));
740 return t3_wait_op_done(adapter, A_SF_OP, F_BUSY, 0, SF_ATTEMPTS, 10);
744 * flash_wait_op - wait for a flash operation to complete
745 * @adapter: the adapter
746 * @attempts: max number of polls of the status register
747 * @delay: delay between polls in ms
749 * Wait for a flash operation to complete by polling the status register.
751 static int flash_wait_op(struct adapter *adapter, int attempts, int delay)
757 if ((ret = sf1_write(adapter, 1, 1, SF_RD_STATUS)) != 0 ||
758 (ret = sf1_read(adapter, 1, 0, &status)) != 0)
770 * t3_read_flash - read words from serial flash
771 * @adapter: the adapter
772 * @addr: the start address for the read
773 * @nwords: how many 32-bit words to read
774 * @data: where to store the read data
775 * @byte_oriented: whether to store data as bytes or as words
777 * Read the specified number of 32-bit words from the serial flash.
778 * If @byte_oriented is set the read data is stored as a byte array
779 * (i.e., big-endian), otherwise as 32-bit words in the platform's
782 int t3_read_flash(struct adapter *adapter, unsigned int addr,
783 unsigned int nwords, u32 *data, int byte_oriented)
787 if (addr + nwords * sizeof(u32) > SF_SIZE || (addr & 3))
790 addr = swab32(addr) | SF_RD_DATA_FAST;
792 if ((ret = sf1_write(adapter, 4, 1, addr)) != 0 ||
793 (ret = sf1_read(adapter, 1, 1, data)) != 0)
796 for (; nwords; nwords--, data++) {
797 ret = sf1_read(adapter, 4, nwords > 1, data);
801 *data = htonl(*data);
807 * t3_write_flash - write up to a page of data to the serial flash
808 * @adapter: the adapter
809 * @addr: the start address to write
810 * @n: length of data to write
811 * @data: the data to write
813 * Writes up to a page of data (256 bytes) to the serial flash starting
814 * at the given address.
816 static int t3_write_flash(struct adapter *adapter, unsigned int addr,
817 unsigned int n, const u8 *data)
821 unsigned int i, c, left, val, offset = addr & 0xff;
823 if (addr + n > SF_SIZE || offset + n > 256)
826 val = swab32(addr) | SF_PROG_PAGE;
828 if ((ret = sf1_write(adapter, 1, 0, SF_WR_ENABLE)) != 0 ||
829 (ret = sf1_write(adapter, 4, 1, val)) != 0)
832 for (left = n; left; left -= c) {
834 for (val = 0, i = 0; i < c; ++i)
835 val = (val << 8) + *data++;
837 ret = sf1_write(adapter, c, c != left, val);
841 if ((ret = flash_wait_op(adapter, 5, 1)) != 0)
844 /* Read the page to verify the write succeeded */
845 ret = t3_read_flash(adapter, addr & ~0xff, ARRAY_SIZE(buf), buf, 1);
849 if (memcmp(data - n, (u8 *) buf + offset, n))
855 * t3_get_tp_version - read the tp sram version
856 * @adapter: the adapter
857 * @vers: where to place the version
859 * Reads the protocol sram version from sram.
861 int t3_get_tp_version(struct adapter *adapter, u32 *vers)
865 /* Get version loaded in SRAM */
866 t3_write_reg(adapter, A_TP_EMBED_OP_FIELD0, 0);
867 ret = t3_wait_op_done(adapter, A_TP_EMBED_OP_FIELD0,
872 *vers = t3_read_reg(adapter, A_TP_EMBED_OP_FIELD1);
878 * t3_check_tpsram_version - read the tp sram version
879 * @adapter: the adapter
880 * @must_load: set to 1 if loading a new microcode image is required
882 * Reads the protocol sram version from flash.
884 int t3_check_tpsram_version(struct adapter *adapter, int *must_load)
888 unsigned int major, minor;
890 if (adapter->params.rev == T3_REV_A)
895 ret = t3_get_tp_version(adapter, &vers);
899 major = G_TP_VERSION_MAJOR(vers);
900 minor = G_TP_VERSION_MINOR(vers);
902 if (major == TP_VERSION_MAJOR && minor == TP_VERSION_MINOR)
905 if (major != TP_VERSION_MAJOR)
906 CH_ERR(adapter, "found wrong TP version (%u.%u), "
907 "driver needs version %d.%d\n", major, minor,
908 TP_VERSION_MAJOR, TP_VERSION_MINOR);
911 CH_ERR(adapter, "found wrong TP version (%u.%u), "
912 "driver compiled for version %d.%d\n", major, minor,
913 TP_VERSION_MAJOR, TP_VERSION_MINOR);
919 * t3_check_tpsram - check if provided protocol SRAM
920 * is compatible with this driver
921 * @adapter: the adapter
922 * @tp_sram: the firmware image to write
925 * Checks if an adapter's tp sram is compatible with the driver.
926 * Returns 0 if the versions are compatible, a negative error otherwise.
928 int t3_check_tpsram(struct adapter *adapter, const u8 *tp_sram,
933 const __be32 *p = (const __be32 *)tp_sram;
935 /* Verify checksum */
936 for (csum = 0, i = 0; i < size / sizeof(csum); i++)
938 if (csum != 0xffffffff) {
939 CH_ERR(adapter, "corrupted protocol SRAM image, checksum %u\n",
947 enum fw_version_type {
953 * t3_get_fw_version - read the firmware version
954 * @adapter: the adapter
955 * @vers: where to place the version
957 * Reads the FW version from flash.
959 int t3_get_fw_version(struct adapter *adapter, u32 *vers)
961 return t3_read_flash(adapter, FW_VERS_ADDR, 1, vers, 0);
965 * t3_check_fw_version - check if the FW is compatible with this driver
966 * @adapter: the adapter
967 * @must_load: set to 1 if loading a new FW image is required
969 * Checks if an adapter's FW is compatible with the driver. Returns 0
970 * if the versions are compatible, a negative error otherwise.
972 int t3_check_fw_version(struct adapter *adapter, int *must_load)
976 unsigned int type, major, minor;
979 ret = t3_get_fw_version(adapter, &vers);
983 type = G_FW_VERSION_TYPE(vers);
984 major = G_FW_VERSION_MAJOR(vers);
985 minor = G_FW_VERSION_MINOR(vers);
987 if (type == FW_VERSION_T3 && major == FW_VERSION_MAJOR &&
988 minor == FW_VERSION_MINOR)
991 if (major != FW_VERSION_MAJOR)
992 CH_ERR(adapter, "found wrong FW version(%u.%u), "
993 "driver needs version %u.%u\n", major, minor,
994 FW_VERSION_MAJOR, FW_VERSION_MINOR);
995 else if (minor < FW_VERSION_MINOR) {
997 CH_WARN(adapter, "found old FW minor version(%u.%u), "
998 "driver compiled for version %u.%u\n", major, minor,
999 FW_VERSION_MAJOR, FW_VERSION_MINOR);
1001 CH_WARN(adapter, "found newer FW version(%u.%u), "
1002 "driver compiled for version %u.%u\n", major, minor,
1003 FW_VERSION_MAJOR, FW_VERSION_MINOR);
1010 * t3_flash_erase_sectors - erase a range of flash sectors
1011 * @adapter: the adapter
1012 * @start: the first sector to erase
1013 * @end: the last sector to erase
1015 * Erases the sectors in the given range.
1017 static int t3_flash_erase_sectors(struct adapter *adapter, int start, int end)
1019 while (start <= end) {
1022 if ((ret = sf1_write(adapter, 1, 0, SF_WR_ENABLE)) != 0 ||
1023 (ret = sf1_write(adapter, 4, 0,
1024 SF_ERASE_SECTOR | (start << 8))) != 0 ||
1025 (ret = flash_wait_op(adapter, 5, 500)) != 0)
1033 * t3_load_fw - download firmware
1034 * @adapter: the adapter
1035 * @fw_data: the firmware image to write
1038 * Write the supplied firmware image to the card's serial flash.
1039 * The FW image has the following sections: @size - 8 bytes of code and
1040 * data, followed by 4 bytes of FW version, followed by the 32-bit
1041 * 1's complement checksum of the whole image.
1043 int t3_load_fw(struct adapter *adapter, const u8 *fw_data, unsigned int size)
1047 const __be32 *p = (const __be32 *)fw_data;
1048 int ret, addr, fw_sector = FW_FLASH_BOOT_ADDR >> 16;
1050 if ((size & 3) || size < FW_MIN_SIZE)
1052 if (size > FW_VERS_ADDR + 8 - FW_FLASH_BOOT_ADDR)
1055 for (csum = 0, i = 0; i < size / sizeof(csum); i++)
1056 csum += ntohl(p[i]);
1057 if (csum != 0xffffffff) {
1058 CH_ERR(adapter, "corrupted firmware image, checksum %u\n",
1063 ret = t3_flash_erase_sectors(adapter, fw_sector, fw_sector);
1067 size -= 8; /* trim off version and checksum */
1068 for (addr = FW_FLASH_BOOT_ADDR; size;) {
1069 unsigned int chunk_size = min(size, 256U);
1071 ret = t3_write_flash(adapter, addr, chunk_size, fw_data);
1076 fw_data += chunk_size;
1080 ret = t3_write_flash(adapter, FW_VERS_ADDR, 4, fw_data);
1083 CH_ERR(adapter, "firmware download failed, error %d\n", ret);
1087 #define CIM_CTL_BASE 0x2000
1090 * t3_cim_ctl_blk_read - read a block from CIM control region
1092 * @adap: the adapter
1093 * @addr: the start address within the CIM control region
1094 * @n: number of words to read
1095 * @valp: where to store the result
1097 * Reads a block of 4-byte words from the CIM control region.
1099 int t3_cim_ctl_blk_read(struct adapter *adap, unsigned int addr,
1100 unsigned int n, unsigned int *valp)
1104 if (t3_read_reg(adap, A_CIM_HOST_ACC_CTRL) & F_HOSTBUSY)
1107 for ( ; !ret && n--; addr += 4) {
1108 t3_write_reg(adap, A_CIM_HOST_ACC_CTRL, CIM_CTL_BASE + addr);
1109 ret = t3_wait_op_done(adap, A_CIM_HOST_ACC_CTRL, F_HOSTBUSY,
1112 *valp++ = t3_read_reg(adap, A_CIM_HOST_ACC_DATA);
1119 * t3_link_changed - handle interface link changes
1120 * @adapter: the adapter
1121 * @port_id: the port index that changed link state
1123 * Called when a port's link settings change to propagate the new values
1124 * to the associated PHY and MAC. After performing the common tasks it
1125 * invokes an OS-specific handler.
1127 void t3_link_changed(struct adapter *adapter, int port_id)
1129 int link_ok, speed, duplex, fc;
1130 struct port_info *pi = adap2pinfo(adapter, port_id);
1131 struct cphy *phy = &pi->phy;
1132 struct cmac *mac = &pi->mac;
1133 struct link_config *lc = &pi->link_config;
1135 phy->ops->get_link_status(phy, &link_ok, &speed, &duplex, &fc);
1137 if (link_ok != lc->link_ok && adapter->params.rev > 0 &&
1138 uses_xaui(adapter)) {
1141 t3_write_reg(adapter, A_XGM_XAUI_ACT_CTRL + mac->offset,
1142 link_ok ? F_TXACTENABLE | F_RXEN : 0);
1144 lc->link_ok = link_ok;
1145 lc->speed = speed < 0 ? SPEED_INVALID : speed;
1146 lc->duplex = duplex < 0 ? DUPLEX_INVALID : duplex;
1147 if (lc->requested_fc & PAUSE_AUTONEG)
1148 fc &= lc->requested_fc;
1150 fc = lc->requested_fc & (PAUSE_RX | PAUSE_TX);
1152 if (link_ok && speed >= 0 && lc->autoneg == AUTONEG_ENABLE) {
1153 /* Set MAC speed, duplex, and flow control to match PHY. */
1154 t3_mac_set_speed_duplex_fc(mac, speed, duplex, fc);
1158 t3_os_link_changed(adapter, port_id, link_ok, speed, duplex, fc);
1162 * t3_link_start - apply link configuration to MAC/PHY
1163 * @phy: the PHY to setup
1164 * @mac: the MAC to setup
1165 * @lc: the requested link configuration
1167 * Set up a port's MAC and PHY according to a desired link configuration.
1168 * - If the PHY can auto-negotiate first decide what to advertise, then
1169 * enable/disable auto-negotiation as desired, and reset.
1170 * - If the PHY does not auto-negotiate just reset it.
1171 * - If auto-negotiation is off set the MAC to the proper speed/duplex/FC,
1172 * otherwise do it later based on the outcome of auto-negotiation.
1174 int t3_link_start(struct cphy *phy, struct cmac *mac, struct link_config *lc)
1176 unsigned int fc = lc->requested_fc & (PAUSE_RX | PAUSE_TX);
1179 if (lc->supported & SUPPORTED_Autoneg) {
1180 lc->advertising &= ~(ADVERTISED_Asym_Pause | ADVERTISED_Pause);
1182 lc->advertising |= ADVERTISED_Asym_Pause;
1184 lc->advertising |= ADVERTISED_Pause;
1186 phy->ops->advertise(phy, lc->advertising);
1188 if (lc->autoneg == AUTONEG_DISABLE) {
1189 lc->speed = lc->requested_speed;
1190 lc->duplex = lc->requested_duplex;
1191 lc->fc = (unsigned char)fc;
1192 t3_mac_set_speed_duplex_fc(mac, lc->speed, lc->duplex,
1194 /* Also disables autoneg */
1195 phy->ops->set_speed_duplex(phy, lc->speed, lc->duplex);
1196 phy->ops->reset(phy, 0);
1198 phy->ops->autoneg_enable(phy);
1200 t3_mac_set_speed_duplex_fc(mac, -1, -1, fc);
1201 lc->fc = (unsigned char)fc;
1202 phy->ops->reset(phy, 0);
1208 * t3_set_vlan_accel - control HW VLAN extraction
1209 * @adapter: the adapter
1210 * @ports: bitmap of adapter ports to operate on
1211 * @on: enable (1) or disable (0) HW VLAN extraction
1213 * Enables or disables HW extraction of VLAN tags for the given port.
1215 void t3_set_vlan_accel(struct adapter *adapter, unsigned int ports, int on)
1217 t3_set_reg_field(adapter, A_TP_OUT_CONFIG,
1218 ports << S_VLANEXTRACTIONENABLE,
1219 on ? (ports << S_VLANEXTRACTIONENABLE) : 0);
1223 unsigned int mask; /* bits to check in interrupt status */
1224 const char *msg; /* message to print or NULL */
1225 short stat_idx; /* stat counter to increment or -1 */
1226 unsigned short fatal; /* whether the condition reported is fatal */
1230 * t3_handle_intr_status - table driven interrupt handler
1231 * @adapter: the adapter that generated the interrupt
1232 * @reg: the interrupt status register to process
1233 * @mask: a mask to apply to the interrupt status
1234 * @acts: table of interrupt actions
1235 * @stats: statistics counters tracking interrupt occurences
1237 * A table driven interrupt handler that applies a set of masks to an
1238 * interrupt status word and performs the corresponding actions if the
1239 * interrupts described by the mask have occured. The actions include
1240 * optionally printing a warning or alert message, and optionally
1241 * incrementing a stat counter. The table is terminated by an entry
1242 * specifying mask 0. Returns the number of fatal interrupt conditions.
1244 static int t3_handle_intr_status(struct adapter *adapter, unsigned int reg,
1246 const struct intr_info *acts,
1247 unsigned long *stats)
1250 unsigned int status = t3_read_reg(adapter, reg) & mask;
1252 for (; acts->mask; ++acts) {
1253 if (!(status & acts->mask))
1257 CH_ALERT(adapter, "%s (0x%x)\n",
1258 acts->msg, status & acts->mask);
1259 } else if (acts->msg)
1260 CH_WARN(adapter, "%s (0x%x)\n",
1261 acts->msg, status & acts->mask);
1262 if (acts->stat_idx >= 0)
1263 stats[acts->stat_idx]++;
1265 if (status) /* clear processed interrupts */
1266 t3_write_reg(adapter, reg, status);
1270 #define SGE_INTR_MASK (F_RSPQDISABLED | \
1271 F_UC_REQ_FRAMINGERROR | F_R_REQ_FRAMINGERROR | \
1272 F_CPPARITYERROR | F_OCPARITYERROR | F_RCPARITYERROR | \
1273 F_IRPARITYERROR | V_ITPARITYERROR(M_ITPARITYERROR) | \
1274 V_FLPARITYERROR(M_FLPARITYERROR) | F_LODRBPARITYERROR | \
1275 F_HIDRBPARITYERROR | F_LORCQPARITYERROR | \
1277 #define MC5_INTR_MASK (F_PARITYERR | F_ACTRGNFULL | F_UNKNOWNCMD | \
1278 F_REQQPARERR | F_DISPQPARERR | F_DELACTEMPTY | \
1280 #define MC7_INTR_MASK (F_AE | F_UE | F_CE | V_PE(M_PE))
1281 #define XGM_INTR_MASK (V_TXFIFO_PRTY_ERR(M_TXFIFO_PRTY_ERR) | \
1282 V_RXFIFO_PRTY_ERR(M_RXFIFO_PRTY_ERR) | \
1283 F_TXFIFO_UNDERRUN | F_RXFIFO_OVERFLOW)
1284 #define PCIX_INTR_MASK (F_MSTDETPARERR | F_SIGTARABT | F_RCVTARABT | \
1285 F_RCVMSTABT | F_SIGSYSERR | F_DETPARERR | \
1286 F_SPLCMPDIS | F_UNXSPLCMP | F_RCVSPLCMPERR | \
1287 F_DETCORECCERR | F_DETUNCECCERR | F_PIOPARERR | \
1288 V_WFPARERR(M_WFPARERR) | V_RFPARERR(M_RFPARERR) | \
1289 V_CFPARERR(M_CFPARERR) /* | V_MSIXPARERR(M_MSIXPARERR) */)
1290 #define PCIE_INTR_MASK (F_UNXSPLCPLERRR | F_UNXSPLCPLERRC | F_PCIE_PIOPARERR |\
1291 F_PCIE_WFPARERR | F_PCIE_RFPARERR | F_PCIE_CFPARERR | \
1292 /* V_PCIE_MSIXPARERR(M_PCIE_MSIXPARERR) | */ \
1293 F_RETRYBUFPARERR | F_RETRYLUTPARERR | F_RXPARERR | \
1294 F_TXPARERR | V_BISTERR(M_BISTERR))
1295 #define ULPRX_INTR_MASK (F_PARERRDATA | F_PARERRPCMD | F_ARBPF1PERR | \
1296 F_ARBPF0PERR | F_ARBFPERR | F_PCMDMUXPERR | \
1297 F_DATASELFRAMEERR1 | F_DATASELFRAMEERR0)
1298 #define ULPTX_INTR_MASK 0xfc
1299 #define CPLSW_INTR_MASK (F_CIM_OP_MAP_PERR | F_TP_FRAMING_ERROR | \
1300 F_SGE_FRAMING_ERROR | F_CIM_FRAMING_ERROR | \
1301 F_ZERO_SWITCH_ERROR)
1302 #define CIM_INTR_MASK (F_BLKWRPLINT | F_BLKRDPLINT | F_BLKWRCTLINT | \
1303 F_BLKRDCTLINT | F_BLKWRFLASHINT | F_BLKRDFLASHINT | \
1304 F_SGLWRFLASHINT | F_WRBLKFLASHINT | F_BLKWRBOOTINT | \
1305 F_FLASHRANGEINT | F_SDRAMRANGEINT | F_RSVDSPACEINT | \
1306 F_DRAMPARERR | F_ICACHEPARERR | F_DCACHEPARERR | \
1307 F_OBQSGEPARERR | F_OBQULPHIPARERR | F_OBQULPLOPARERR | \
1308 F_IBQSGELOPARERR | F_IBQSGEHIPARERR | F_IBQULPPARERR | \
1309 F_IBQTPPARERR | F_ITAGPARERR | F_DTAGPARERR)
1310 #define PMTX_INTR_MASK (F_ZERO_C_CMD_ERROR | ICSPI_FRM_ERR | OESPI_FRM_ERR | \
1311 V_ICSPI_PAR_ERROR(M_ICSPI_PAR_ERROR) | \
1312 V_OESPI_PAR_ERROR(M_OESPI_PAR_ERROR))
1313 #define PMRX_INTR_MASK (F_ZERO_E_CMD_ERROR | IESPI_FRM_ERR | OCSPI_FRM_ERR | \
1314 V_IESPI_PAR_ERROR(M_IESPI_PAR_ERROR) | \
1315 V_OCSPI_PAR_ERROR(M_OCSPI_PAR_ERROR))
1316 #define MPS_INTR_MASK (V_TX0TPPARERRENB(M_TX0TPPARERRENB) | \
1317 V_TX1TPPARERRENB(M_TX1TPPARERRENB) | \
1318 V_RXTPPARERRENB(M_RXTPPARERRENB) | \
1319 V_MCAPARERRENB(M_MCAPARERRENB))
1320 #define PL_INTR_MASK (F_T3DBG | F_XGMAC0_0 | F_XGMAC0_1 | F_MC5A | F_PM1_TX | \
1321 F_PM1_RX | F_ULP2_TX | F_ULP2_RX | F_TP1 | F_CIM | \
1322 F_MC7_CM | F_MC7_PMTX | F_MC7_PMRX | F_SGE3 | F_PCIM0 | \
1323 F_MPS0 | F_CPL_SWITCH)
1326 * Interrupt handler for the PCIX1 module.
1328 static void pci_intr_handler(struct adapter *adapter)
1330 static const struct intr_info pcix1_intr_info[] = {
1331 {F_MSTDETPARERR, "PCI master detected parity error", -1, 1},
1332 {F_SIGTARABT, "PCI signaled target abort", -1, 1},
1333 {F_RCVTARABT, "PCI received target abort", -1, 1},
1334 {F_RCVMSTABT, "PCI received master abort", -1, 1},
1335 {F_SIGSYSERR, "PCI signaled system error", -1, 1},
1336 {F_DETPARERR, "PCI detected parity error", -1, 1},
1337 {F_SPLCMPDIS, "PCI split completion discarded", -1, 1},
1338 {F_UNXSPLCMP, "PCI unexpected split completion error", -1, 1},
1339 {F_RCVSPLCMPERR, "PCI received split completion error", -1,
1341 {F_DETCORECCERR, "PCI correctable ECC error",
1342 STAT_PCI_CORR_ECC, 0},
1343 {F_DETUNCECCERR, "PCI uncorrectable ECC error", -1, 1},
1344 {F_PIOPARERR, "PCI PIO FIFO parity error", -1, 1},
1345 {V_WFPARERR(M_WFPARERR), "PCI write FIFO parity error", -1,
1347 {V_RFPARERR(M_RFPARERR), "PCI read FIFO parity error", -1,
1349 {V_CFPARERR(M_CFPARERR), "PCI command FIFO parity error", -1,
1351 {V_MSIXPARERR(M_MSIXPARERR), "PCI MSI-X table/PBA parity "
1356 if (t3_handle_intr_status(adapter, A_PCIX_INT_CAUSE, PCIX_INTR_MASK,
1357 pcix1_intr_info, adapter->irq_stats))
1358 t3_fatal_err(adapter);
1362 * Interrupt handler for the PCIE module.
1364 static void pcie_intr_handler(struct adapter *adapter)
1366 static const struct intr_info pcie_intr_info[] = {
1367 {F_PEXERR, "PCI PEX error", -1, 1},
1369 "PCI unexpected split completion DMA read error", -1, 1},
1371 "PCI unexpected split completion DMA command error", -1, 1},
1372 {F_PCIE_PIOPARERR, "PCI PIO FIFO parity error", -1, 1},
1373 {F_PCIE_WFPARERR, "PCI write FIFO parity error", -1, 1},
1374 {F_PCIE_RFPARERR, "PCI read FIFO parity error", -1, 1},
1375 {F_PCIE_CFPARERR, "PCI command FIFO parity error", -1, 1},
1376 {V_PCIE_MSIXPARERR(M_PCIE_MSIXPARERR),
1377 "PCI MSI-X table/PBA parity error", -1, 1},
1378 {F_RETRYBUFPARERR, "PCI retry buffer parity error", -1, 1},
1379 {F_RETRYLUTPARERR, "PCI retry LUT parity error", -1, 1},
1380 {F_RXPARERR, "PCI Rx parity error", -1, 1},
1381 {F_TXPARERR, "PCI Tx parity error", -1, 1},
1382 {V_BISTERR(M_BISTERR), "PCI BIST error", -1, 1},
1386 if (t3_read_reg(adapter, A_PCIE_INT_CAUSE) & F_PEXERR)
1387 CH_ALERT(adapter, "PEX error code 0x%x\n",
1388 t3_read_reg(adapter, A_PCIE_PEX_ERR));
1390 if (t3_handle_intr_status(adapter, A_PCIE_INT_CAUSE, PCIE_INTR_MASK,
1391 pcie_intr_info, adapter->irq_stats))
1392 t3_fatal_err(adapter);
1396 * TP interrupt handler.
1398 static void tp_intr_handler(struct adapter *adapter)
1400 static const struct intr_info tp_intr_info[] = {
1401 {0xffffff, "TP parity error", -1, 1},
1402 {0x1000000, "TP out of Rx pages", -1, 1},
1403 {0x2000000, "TP out of Tx pages", -1, 1},
1407 static struct intr_info tp_intr_info_t3c[] = {
1408 {0x1fffffff, "TP parity error", -1, 1},
1409 {F_FLMRXFLSTEMPTY, "TP out of Rx pages", -1, 1},
1410 {F_FLMTXFLSTEMPTY, "TP out of Tx pages", -1, 1},
1414 if (t3_handle_intr_status(adapter, A_TP_INT_CAUSE, 0xffffffff,
1415 adapter->params.rev < T3_REV_C ?
1416 tp_intr_info : tp_intr_info_t3c, NULL))
1417 t3_fatal_err(adapter);
1421 * CIM interrupt handler.
1423 static void cim_intr_handler(struct adapter *adapter)
1425 static const struct intr_info cim_intr_info[] = {
1426 {F_RSVDSPACEINT, "CIM reserved space write", -1, 1},
1427 {F_SDRAMRANGEINT, "CIM SDRAM address out of range", -1, 1},
1428 {F_FLASHRANGEINT, "CIM flash address out of range", -1, 1},
1429 {F_BLKWRBOOTINT, "CIM block write to boot space", -1, 1},
1430 {F_WRBLKFLASHINT, "CIM write to cached flash space", -1, 1},
1431 {F_SGLWRFLASHINT, "CIM single write to flash space", -1, 1},
1432 {F_BLKRDFLASHINT, "CIM block read from flash space", -1, 1},
1433 {F_BLKWRFLASHINT, "CIM block write to flash space", -1, 1},
1434 {F_BLKRDCTLINT, "CIM block read from CTL space", -1, 1},
1435 {F_BLKWRCTLINT, "CIM block write to CTL space", -1, 1},
1436 {F_BLKRDPLINT, "CIM block read from PL space", -1, 1},
1437 {F_BLKWRPLINT, "CIM block write to PL space", -1, 1},
1438 {F_DRAMPARERR, "CIM DRAM parity error", -1, 1},
1439 {F_ICACHEPARERR, "CIM icache parity error", -1, 1},
1440 {F_DCACHEPARERR, "CIM dcache parity error", -1, 1},
1441 {F_OBQSGEPARERR, "CIM OBQ SGE parity error", -1, 1},
1442 {F_OBQULPHIPARERR, "CIM OBQ ULPHI parity error", -1, 1},
1443 {F_OBQULPLOPARERR, "CIM OBQ ULPLO parity error", -1, 1},
1444 {F_IBQSGELOPARERR, "CIM IBQ SGELO parity error", -1, 1},
1445 {F_IBQSGEHIPARERR, "CIM IBQ SGEHI parity error", -1, 1},
1446 {F_IBQULPPARERR, "CIM IBQ ULP parity error", -1, 1},
1447 {F_IBQTPPARERR, "CIM IBQ TP parity error", -1, 1},
1448 {F_ITAGPARERR, "CIM itag parity error", -1, 1},
1449 {F_DTAGPARERR, "CIM dtag parity error", -1, 1},
1453 if (t3_handle_intr_status(adapter, A_CIM_HOST_INT_CAUSE, 0xffffffff,
1454 cim_intr_info, NULL))
1455 t3_fatal_err(adapter);
1459 * ULP RX interrupt handler.
1461 static void ulprx_intr_handler(struct adapter *adapter)
1463 static const struct intr_info ulprx_intr_info[] = {
1464 {F_PARERRDATA, "ULP RX data parity error", -1, 1},
1465 {F_PARERRPCMD, "ULP RX command parity error", -1, 1},
1466 {F_ARBPF1PERR, "ULP RX ArbPF1 parity error", -1, 1},
1467 {F_ARBPF0PERR, "ULP RX ArbPF0 parity error", -1, 1},
1468 {F_ARBFPERR, "ULP RX ArbF parity error", -1, 1},
1469 {F_PCMDMUXPERR, "ULP RX PCMDMUX parity error", -1, 1},
1470 {F_DATASELFRAMEERR1, "ULP RX frame error", -1, 1},
1471 {F_DATASELFRAMEERR0, "ULP RX frame error", -1, 1},
1475 if (t3_handle_intr_status(adapter, A_ULPRX_INT_CAUSE, 0xffffffff,
1476 ulprx_intr_info, NULL))
1477 t3_fatal_err(adapter);
1481 * ULP TX interrupt handler.
1483 static void ulptx_intr_handler(struct adapter *adapter)
1485 static const struct intr_info ulptx_intr_info[] = {
1486 {F_PBL_BOUND_ERR_CH0, "ULP TX channel 0 PBL out of bounds",
1487 STAT_ULP_CH0_PBL_OOB, 0},
1488 {F_PBL_BOUND_ERR_CH1, "ULP TX channel 1 PBL out of bounds",
1489 STAT_ULP_CH1_PBL_OOB, 0},
1490 {0xfc, "ULP TX parity error", -1, 1},
1494 if (t3_handle_intr_status(adapter, A_ULPTX_INT_CAUSE, 0xffffffff,
1495 ulptx_intr_info, adapter->irq_stats))
1496 t3_fatal_err(adapter);
1499 #define ICSPI_FRM_ERR (F_ICSPI0_FIFO2X_RX_FRAMING_ERROR | \
1500 F_ICSPI1_FIFO2X_RX_FRAMING_ERROR | F_ICSPI0_RX_FRAMING_ERROR | \
1501 F_ICSPI1_RX_FRAMING_ERROR | F_ICSPI0_TX_FRAMING_ERROR | \
1502 F_ICSPI1_TX_FRAMING_ERROR)
1503 #define OESPI_FRM_ERR (F_OESPI0_RX_FRAMING_ERROR | \
1504 F_OESPI1_RX_FRAMING_ERROR | F_OESPI0_TX_FRAMING_ERROR | \
1505 F_OESPI1_TX_FRAMING_ERROR | F_OESPI0_OFIFO2X_TX_FRAMING_ERROR | \
1506 F_OESPI1_OFIFO2X_TX_FRAMING_ERROR)
1509 * PM TX interrupt handler.
1511 static void pmtx_intr_handler(struct adapter *adapter)
1513 static const struct intr_info pmtx_intr_info[] = {
1514 {F_ZERO_C_CMD_ERROR, "PMTX 0-length pcmd", -1, 1},
1515 {ICSPI_FRM_ERR, "PMTX ispi framing error", -1, 1},
1516 {OESPI_FRM_ERR, "PMTX ospi framing error", -1, 1},
1517 {V_ICSPI_PAR_ERROR(M_ICSPI_PAR_ERROR),
1518 "PMTX ispi parity error", -1, 1},
1519 {V_OESPI_PAR_ERROR(M_OESPI_PAR_ERROR),
1520 "PMTX ospi parity error", -1, 1},
1524 if (t3_handle_intr_status(adapter, A_PM1_TX_INT_CAUSE, 0xffffffff,
1525 pmtx_intr_info, NULL))
1526 t3_fatal_err(adapter);
1529 #define IESPI_FRM_ERR (F_IESPI0_FIFO2X_RX_FRAMING_ERROR | \
1530 F_IESPI1_FIFO2X_RX_FRAMING_ERROR | F_IESPI0_RX_FRAMING_ERROR | \
1531 F_IESPI1_RX_FRAMING_ERROR | F_IESPI0_TX_FRAMING_ERROR | \
1532 F_IESPI1_TX_FRAMING_ERROR)
1533 #define OCSPI_FRM_ERR (F_OCSPI0_RX_FRAMING_ERROR | \
1534 F_OCSPI1_RX_FRAMING_ERROR | F_OCSPI0_TX_FRAMING_ERROR | \
1535 F_OCSPI1_TX_FRAMING_ERROR | F_OCSPI0_OFIFO2X_TX_FRAMING_ERROR | \
1536 F_OCSPI1_OFIFO2X_TX_FRAMING_ERROR)
1539 * PM RX interrupt handler.
1541 static void pmrx_intr_handler(struct adapter *adapter)
1543 static const struct intr_info pmrx_intr_info[] = {
1544 {F_ZERO_E_CMD_ERROR, "PMRX 0-length pcmd", -1, 1},
1545 {IESPI_FRM_ERR, "PMRX ispi framing error", -1, 1},
1546 {OCSPI_FRM_ERR, "PMRX ospi framing error", -1, 1},
1547 {V_IESPI_PAR_ERROR(M_IESPI_PAR_ERROR),
1548 "PMRX ispi parity error", -1, 1},
1549 {V_OCSPI_PAR_ERROR(M_OCSPI_PAR_ERROR),
1550 "PMRX ospi parity error", -1, 1},
1554 if (t3_handle_intr_status(adapter, A_PM1_RX_INT_CAUSE, 0xffffffff,
1555 pmrx_intr_info, NULL))
1556 t3_fatal_err(adapter);
1560 * CPL switch interrupt handler.
1562 static void cplsw_intr_handler(struct adapter *adapter)
1564 static const struct intr_info cplsw_intr_info[] = {
1565 {F_CIM_OP_MAP_PERR, "CPL switch CIM parity error", -1, 1},
1566 {F_CIM_OVFL_ERROR, "CPL switch CIM overflow", -1, 1},
1567 {F_TP_FRAMING_ERROR, "CPL switch TP framing error", -1, 1},
1568 {F_SGE_FRAMING_ERROR, "CPL switch SGE framing error", -1, 1},
1569 {F_CIM_FRAMING_ERROR, "CPL switch CIM framing error", -1, 1},
1570 {F_ZERO_SWITCH_ERROR, "CPL switch no-switch error", -1, 1},
1574 if (t3_handle_intr_status(adapter, A_CPL_INTR_CAUSE, 0xffffffff,
1575 cplsw_intr_info, NULL))
1576 t3_fatal_err(adapter);
1580 * MPS interrupt handler.
1582 static void mps_intr_handler(struct adapter *adapter)
1584 static const struct intr_info mps_intr_info[] = {
1585 {0x1ff, "MPS parity error", -1, 1},
1589 if (t3_handle_intr_status(adapter, A_MPS_INT_CAUSE, 0xffffffff,
1590 mps_intr_info, NULL))
1591 t3_fatal_err(adapter);
1594 #define MC7_INTR_FATAL (F_UE | V_PE(M_PE) | F_AE)
1597 * MC7 interrupt handler.
1599 static void mc7_intr_handler(struct mc7 *mc7)
1601 struct adapter *adapter = mc7->adapter;
1602 u32 cause = t3_read_reg(adapter, mc7->offset + A_MC7_INT_CAUSE);
1605 mc7->stats.corr_err++;
1606 CH_WARN(adapter, "%s MC7 correctable error at addr 0x%x, "
1607 "data 0x%x 0x%x 0x%x\n", mc7->name,
1608 t3_read_reg(adapter, mc7->offset + A_MC7_CE_ADDR),
1609 t3_read_reg(adapter, mc7->offset + A_MC7_CE_DATA0),
1610 t3_read_reg(adapter, mc7->offset + A_MC7_CE_DATA1),
1611 t3_read_reg(adapter, mc7->offset + A_MC7_CE_DATA2));
1615 mc7->stats.uncorr_err++;
1616 CH_ALERT(adapter, "%s MC7 uncorrectable error at addr 0x%x, "
1617 "data 0x%x 0x%x 0x%x\n", mc7->name,
1618 t3_read_reg(adapter, mc7->offset + A_MC7_UE_ADDR),
1619 t3_read_reg(adapter, mc7->offset + A_MC7_UE_DATA0),
1620 t3_read_reg(adapter, mc7->offset + A_MC7_UE_DATA1),
1621 t3_read_reg(adapter, mc7->offset + A_MC7_UE_DATA2));
1625 mc7->stats.parity_err++;
1626 CH_ALERT(adapter, "%s MC7 parity error 0x%x\n",
1627 mc7->name, G_PE(cause));
1633 if (adapter->params.rev > 0)
1634 addr = t3_read_reg(adapter,
1635 mc7->offset + A_MC7_ERR_ADDR);
1636 mc7->stats.addr_err++;
1637 CH_ALERT(adapter, "%s MC7 address error: 0x%x\n",
1641 if (cause & MC7_INTR_FATAL)
1642 t3_fatal_err(adapter);
1644 t3_write_reg(adapter, mc7->offset + A_MC7_INT_CAUSE, cause);
1647 #define XGM_INTR_FATAL (V_TXFIFO_PRTY_ERR(M_TXFIFO_PRTY_ERR) | \
1648 V_RXFIFO_PRTY_ERR(M_RXFIFO_PRTY_ERR))
1650 * XGMAC interrupt handler.
1652 static int mac_intr_handler(struct adapter *adap, unsigned int idx)
1654 struct cmac *mac = &adap2pinfo(adap, idx)->mac;
1655 u32 cause = t3_read_reg(adap, A_XGM_INT_CAUSE + mac->offset);
1657 if (cause & V_TXFIFO_PRTY_ERR(M_TXFIFO_PRTY_ERR)) {
1658 mac->stats.tx_fifo_parity_err++;
1659 CH_ALERT(adap, "port%d: MAC TX FIFO parity error\n", idx);
1661 if (cause & V_RXFIFO_PRTY_ERR(M_RXFIFO_PRTY_ERR)) {
1662 mac->stats.rx_fifo_parity_err++;
1663 CH_ALERT(adap, "port%d: MAC RX FIFO parity error\n", idx);
1665 if (cause & F_TXFIFO_UNDERRUN)
1666 mac->stats.tx_fifo_urun++;
1667 if (cause & F_RXFIFO_OVERFLOW)
1668 mac->stats.rx_fifo_ovfl++;
1669 if (cause & V_SERDES_LOS(M_SERDES_LOS))
1670 mac->stats.serdes_signal_loss++;
1671 if (cause & F_XAUIPCSCTCERR)
1672 mac->stats.xaui_pcs_ctc_err++;
1673 if (cause & F_XAUIPCSALIGNCHANGE)
1674 mac->stats.xaui_pcs_align_change++;
1676 t3_write_reg(adap, A_XGM_INT_CAUSE + mac->offset, cause);
1677 if (cause & XGM_INTR_FATAL)
1683 * Interrupt handler for PHY events.
1685 int t3_phy_intr_handler(struct adapter *adapter)
1687 u32 mask, gpi = adapter_info(adapter)->gpio_intr;
1688 u32 i, cause = t3_read_reg(adapter, A_T3DBG_INT_CAUSE);
1690 for_each_port(adapter, i) {
1691 struct port_info *p = adap2pinfo(adapter, i);
1693 mask = gpi - (gpi & (gpi - 1));
1696 if (!(p->phy.caps & SUPPORTED_IRQ))
1700 int phy_cause = p->phy.ops->intr_handler(&p->phy);
1702 if (phy_cause & cphy_cause_link_change)
1703 t3_link_changed(adapter, i);
1704 if (phy_cause & cphy_cause_fifo_error)
1705 p->phy.fifo_errors++;
1709 t3_write_reg(adapter, A_T3DBG_INT_CAUSE, cause);
1714 * T3 slow path (non-data) interrupt handler.
1716 int t3_slow_intr_handler(struct adapter *adapter)
1718 u32 cause = t3_read_reg(adapter, A_PL_INT_CAUSE0);
1720 cause &= adapter->slow_intr_mask;
1723 if (cause & F_PCIM0) {
1724 if (is_pcie(adapter))
1725 pcie_intr_handler(adapter);
1727 pci_intr_handler(adapter);
1730 t3_sge_err_intr_handler(adapter);
1731 if (cause & F_MC7_PMRX)
1732 mc7_intr_handler(&adapter->pmrx);
1733 if (cause & F_MC7_PMTX)
1734 mc7_intr_handler(&adapter->pmtx);
1735 if (cause & F_MC7_CM)
1736 mc7_intr_handler(&adapter->cm);
1738 cim_intr_handler(adapter);
1740 tp_intr_handler(adapter);
1741 if (cause & F_ULP2_RX)
1742 ulprx_intr_handler(adapter);
1743 if (cause & F_ULP2_TX)
1744 ulptx_intr_handler(adapter);
1745 if (cause & F_PM1_RX)
1746 pmrx_intr_handler(adapter);
1747 if (cause & F_PM1_TX)
1748 pmtx_intr_handler(adapter);
1749 if (cause & F_CPL_SWITCH)
1750 cplsw_intr_handler(adapter);
1752 mps_intr_handler(adapter);
1754 t3_mc5_intr_handler(&adapter->mc5);
1755 if (cause & F_XGMAC0_0)
1756 mac_intr_handler(adapter, 0);
1757 if (cause & F_XGMAC0_1)
1758 mac_intr_handler(adapter, 1);
1759 if (cause & F_T3DBG)
1760 t3_os_ext_intr_handler(adapter);
1762 /* Clear the interrupts just processed. */
1763 t3_write_reg(adapter, A_PL_INT_CAUSE0, cause);
1764 t3_read_reg(adapter, A_PL_INT_CAUSE0); /* flush */
1769 * t3_intr_enable - enable interrupts
1770 * @adapter: the adapter whose interrupts should be enabled
1772 * Enable interrupts by setting the interrupt enable registers of the
1773 * various HW modules and then enabling the top-level interrupt
1776 void t3_intr_enable(struct adapter *adapter)
1778 static const struct addr_val_pair intr_en_avp[] = {
1779 {A_SG_INT_ENABLE, SGE_INTR_MASK},
1780 {A_MC7_INT_ENABLE, MC7_INTR_MASK},
1781 {A_MC7_INT_ENABLE - MC7_PMRX_BASE_ADDR + MC7_PMTX_BASE_ADDR,
1783 {A_MC7_INT_ENABLE - MC7_PMRX_BASE_ADDR + MC7_CM_BASE_ADDR,
1785 {A_MC5_DB_INT_ENABLE, MC5_INTR_MASK},
1786 {A_ULPRX_INT_ENABLE, ULPRX_INTR_MASK},
1787 {A_PM1_TX_INT_ENABLE, PMTX_INTR_MASK},
1788 {A_PM1_RX_INT_ENABLE, PMRX_INTR_MASK},
1789 {A_CIM_HOST_INT_ENABLE, CIM_INTR_MASK},
1790 {A_MPS_INT_ENABLE, MPS_INTR_MASK},
1793 adapter->slow_intr_mask = PL_INTR_MASK;
1795 t3_write_regs(adapter, intr_en_avp, ARRAY_SIZE(intr_en_avp), 0);
1796 t3_write_reg(adapter, A_TP_INT_ENABLE,
1797 adapter->params.rev >= T3_REV_C ? 0x2bfffff : 0x3bfffff);
1799 if (adapter->params.rev > 0) {
1800 t3_write_reg(adapter, A_CPL_INTR_ENABLE,
1801 CPLSW_INTR_MASK | F_CIM_OVFL_ERROR);
1802 t3_write_reg(adapter, A_ULPTX_INT_ENABLE,
1803 ULPTX_INTR_MASK | F_PBL_BOUND_ERR_CH0 |
1804 F_PBL_BOUND_ERR_CH1);
1806 t3_write_reg(adapter, A_CPL_INTR_ENABLE, CPLSW_INTR_MASK);
1807 t3_write_reg(adapter, A_ULPTX_INT_ENABLE, ULPTX_INTR_MASK);
1810 t3_write_reg(adapter, A_T3DBG_GPIO_ACT_LOW,
1811 adapter_info(adapter)->gpio_intr);
1812 t3_write_reg(adapter, A_T3DBG_INT_ENABLE,
1813 adapter_info(adapter)->gpio_intr);
1814 if (is_pcie(adapter))
1815 t3_write_reg(adapter, A_PCIE_INT_ENABLE, PCIE_INTR_MASK);
1817 t3_write_reg(adapter, A_PCIX_INT_ENABLE, PCIX_INTR_MASK);
1818 t3_write_reg(adapter, A_PL_INT_ENABLE0, adapter->slow_intr_mask);
1819 t3_read_reg(adapter, A_PL_INT_ENABLE0); /* flush */
1823 * t3_intr_disable - disable a card's interrupts
1824 * @adapter: the adapter whose interrupts should be disabled
1826 * Disable interrupts. We only disable the top-level interrupt
1827 * concentrator and the SGE data interrupts.
1829 void t3_intr_disable(struct adapter *adapter)
1831 t3_write_reg(adapter, A_PL_INT_ENABLE0, 0);
1832 t3_read_reg(adapter, A_PL_INT_ENABLE0); /* flush */
1833 adapter->slow_intr_mask = 0;
1837 * t3_intr_clear - clear all interrupts
1838 * @adapter: the adapter whose interrupts should be cleared
1840 * Clears all interrupts.
1842 void t3_intr_clear(struct adapter *adapter)
1844 static const unsigned int cause_reg_addr[] = {
1846 A_SG_RSPQ_FL_STATUS,
1849 A_MC7_INT_CAUSE - MC7_PMRX_BASE_ADDR + MC7_PMTX_BASE_ADDR,
1850 A_MC7_INT_CAUSE - MC7_PMRX_BASE_ADDR + MC7_CM_BASE_ADDR,
1851 A_CIM_HOST_INT_CAUSE,
1864 /* Clear PHY and MAC interrupts for each port. */
1865 for_each_port(adapter, i)
1866 t3_port_intr_clear(adapter, i);
1868 for (i = 0; i < ARRAY_SIZE(cause_reg_addr); ++i)
1869 t3_write_reg(adapter, cause_reg_addr[i], 0xffffffff);
1871 if (is_pcie(adapter))
1872 t3_write_reg(adapter, A_PCIE_PEX_ERR, 0xffffffff);
1873 t3_write_reg(adapter, A_PL_INT_CAUSE0, 0xffffffff);
1874 t3_read_reg(adapter, A_PL_INT_CAUSE0); /* flush */
1878 * t3_port_intr_enable - enable port-specific interrupts
1879 * @adapter: associated adapter
1880 * @idx: index of port whose interrupts should be enabled
1882 * Enable port-specific (i.e., MAC and PHY) interrupts for the given
1885 void t3_port_intr_enable(struct adapter *adapter, int idx)
1887 struct cphy *phy = &adap2pinfo(adapter, idx)->phy;
1889 t3_write_reg(adapter, XGM_REG(A_XGM_INT_ENABLE, idx), XGM_INTR_MASK);
1890 t3_read_reg(adapter, XGM_REG(A_XGM_INT_ENABLE, idx)); /* flush */
1891 phy->ops->intr_enable(phy);
1895 * t3_port_intr_disable - disable port-specific interrupts
1896 * @adapter: associated adapter
1897 * @idx: index of port whose interrupts should be disabled
1899 * Disable port-specific (i.e., MAC and PHY) interrupts for the given
1902 void t3_port_intr_disable(struct adapter *adapter, int idx)
1904 struct cphy *phy = &adap2pinfo(adapter, idx)->phy;
1906 t3_write_reg(adapter, XGM_REG(A_XGM_INT_ENABLE, idx), 0);
1907 t3_read_reg(adapter, XGM_REG(A_XGM_INT_ENABLE, idx)); /* flush */
1908 phy->ops->intr_disable(phy);
1912 * t3_port_intr_clear - clear port-specific interrupts
1913 * @adapter: associated adapter
1914 * @idx: index of port whose interrupts to clear
1916 * Clear port-specific (i.e., MAC and PHY) interrupts for the given
1919 void t3_port_intr_clear(struct adapter *adapter, int idx)
1921 struct cphy *phy = &adap2pinfo(adapter, idx)->phy;
1923 t3_write_reg(adapter, XGM_REG(A_XGM_INT_CAUSE, idx), 0xffffffff);
1924 t3_read_reg(adapter, XGM_REG(A_XGM_INT_CAUSE, idx)); /* flush */
1925 phy->ops->intr_clear(phy);
1928 #define SG_CONTEXT_CMD_ATTEMPTS 100
1931 * t3_sge_write_context - write an SGE context
1932 * @adapter: the adapter
1933 * @id: the context id
1934 * @type: the context type
1936 * Program an SGE context with the values already loaded in the
1937 * CONTEXT_DATA? registers.
1939 static int t3_sge_write_context(struct adapter *adapter, unsigned int id,
1942 t3_write_reg(adapter, A_SG_CONTEXT_MASK0, 0xffffffff);
1943 t3_write_reg(adapter, A_SG_CONTEXT_MASK1, 0xffffffff);
1944 t3_write_reg(adapter, A_SG_CONTEXT_MASK2, 0xffffffff);
1945 t3_write_reg(adapter, A_SG_CONTEXT_MASK3, 0xffffffff);
1946 t3_write_reg(adapter, A_SG_CONTEXT_CMD,
1947 V_CONTEXT_CMD_OPCODE(1) | type | V_CONTEXT(id));
1948 return t3_wait_op_done(adapter, A_SG_CONTEXT_CMD, F_CONTEXT_CMD_BUSY,
1949 0, SG_CONTEXT_CMD_ATTEMPTS, 1);
1952 static int clear_sge_ctxt(struct adapter *adap, unsigned int id,
1955 t3_write_reg(adap, A_SG_CONTEXT_DATA0, 0);
1956 t3_write_reg(adap, A_SG_CONTEXT_DATA1, 0);
1957 t3_write_reg(adap, A_SG_CONTEXT_DATA2, 0);
1958 t3_write_reg(adap, A_SG_CONTEXT_DATA3, 0);
1959 return t3_sge_write_context(adap, id, type);
1963 * t3_sge_init_ecntxt - initialize an SGE egress context
1964 * @adapter: the adapter to configure
1965 * @id: the context id
1966 * @gts_enable: whether to enable GTS for the context
1967 * @type: the egress context type
1968 * @respq: associated response queue
1969 * @base_addr: base address of queue
1970 * @size: number of queue entries
1972 * @gen: initial generation value for the context
1973 * @cidx: consumer pointer
1975 * Initialize an SGE egress context and make it ready for use. If the
1976 * platform allows concurrent context operations, the caller is
1977 * responsible for appropriate locking.
1979 int t3_sge_init_ecntxt(struct adapter *adapter, unsigned int id, int gts_enable,
1980 enum sge_context_type type, int respq, u64 base_addr,
1981 unsigned int size, unsigned int token, int gen,
1984 unsigned int credits = type == SGE_CNTXT_OFLD ? 0 : FW_WR_NUM;
1986 if (base_addr & 0xfff) /* must be 4K aligned */
1988 if (t3_read_reg(adapter, A_SG_CONTEXT_CMD) & F_CONTEXT_CMD_BUSY)
1992 t3_write_reg(adapter, A_SG_CONTEXT_DATA0, V_EC_INDEX(cidx) |
1993 V_EC_CREDITS(credits) | V_EC_GTS(gts_enable));
1994 t3_write_reg(adapter, A_SG_CONTEXT_DATA1, V_EC_SIZE(size) |
1995 V_EC_BASE_LO(base_addr & 0xffff));
1997 t3_write_reg(adapter, A_SG_CONTEXT_DATA2, base_addr);
1999 t3_write_reg(adapter, A_SG_CONTEXT_DATA3,
2000 V_EC_BASE_HI(base_addr & 0xf) | V_EC_RESPQ(respq) |
2001 V_EC_TYPE(type) | V_EC_GEN(gen) | V_EC_UP_TOKEN(token) |
2003 return t3_sge_write_context(adapter, id, F_EGRESS);
2007 * t3_sge_init_flcntxt - initialize an SGE free-buffer list context
2008 * @adapter: the adapter to configure
2009 * @id: the context id
2010 * @gts_enable: whether to enable GTS for the context
2011 * @base_addr: base address of queue
2012 * @size: number of queue entries
2013 * @bsize: size of each buffer for this queue
2014 * @cong_thres: threshold to signal congestion to upstream producers
2015 * @gen: initial generation value for the context
2016 * @cidx: consumer pointer
2018 * Initialize an SGE free list context and make it ready for use. The
2019 * caller is responsible for ensuring only one context operation occurs
2022 int t3_sge_init_flcntxt(struct adapter *adapter, unsigned int id,
2023 int gts_enable, u64 base_addr, unsigned int size,
2024 unsigned int bsize, unsigned int cong_thres, int gen,
2027 if (base_addr & 0xfff) /* must be 4K aligned */
2029 if (t3_read_reg(adapter, A_SG_CONTEXT_CMD) & F_CONTEXT_CMD_BUSY)
2033 t3_write_reg(adapter, A_SG_CONTEXT_DATA0, base_addr);
2035 t3_write_reg(adapter, A_SG_CONTEXT_DATA1,
2036 V_FL_BASE_HI((u32) base_addr) |
2037 V_FL_INDEX_LO(cidx & M_FL_INDEX_LO));
2038 t3_write_reg(adapter, A_SG_CONTEXT_DATA2, V_FL_SIZE(size) |
2039 V_FL_GEN(gen) | V_FL_INDEX_HI(cidx >> 12) |
2040 V_FL_ENTRY_SIZE_LO(bsize & M_FL_ENTRY_SIZE_LO));
2041 t3_write_reg(adapter, A_SG_CONTEXT_DATA3,
2042 V_FL_ENTRY_SIZE_HI(bsize >> (32 - S_FL_ENTRY_SIZE_LO)) |
2043 V_FL_CONG_THRES(cong_thres) | V_FL_GTS(gts_enable));
2044 return t3_sge_write_context(adapter, id, F_FREELIST);
2048 * t3_sge_init_rspcntxt - initialize an SGE response queue context
2049 * @adapter: the adapter to configure
2050 * @id: the context id
2051 * @irq_vec_idx: MSI-X interrupt vector index, 0 if no MSI-X, -1 if no IRQ
2052 * @base_addr: base address of queue
2053 * @size: number of queue entries
2054 * @fl_thres: threshold for selecting the normal or jumbo free list
2055 * @gen: initial generation value for the context
2056 * @cidx: consumer pointer
2058 * Initialize an SGE response queue context and make it ready for use.
2059 * The caller is responsible for ensuring only one context operation
2062 int t3_sge_init_rspcntxt(struct adapter *adapter, unsigned int id,
2063 int irq_vec_idx, u64 base_addr, unsigned int size,
2064 unsigned int fl_thres, int gen, unsigned int cidx)
2066 unsigned int intr = 0;
2068 if (base_addr & 0xfff) /* must be 4K aligned */
2070 if (t3_read_reg(adapter, A_SG_CONTEXT_CMD) & F_CONTEXT_CMD_BUSY)
2074 t3_write_reg(adapter, A_SG_CONTEXT_DATA0, V_CQ_SIZE(size) |
2076 t3_write_reg(adapter, A_SG_CONTEXT_DATA1, base_addr);
2078 if (irq_vec_idx >= 0)
2079 intr = V_RQ_MSI_VEC(irq_vec_idx) | F_RQ_INTR_EN;
2080 t3_write_reg(adapter, A_SG_CONTEXT_DATA2,
2081 V_CQ_BASE_HI((u32) base_addr) | intr | V_RQ_GEN(gen));
2082 t3_write_reg(adapter, A_SG_CONTEXT_DATA3, fl_thres);
2083 return t3_sge_write_context(adapter, id, F_RESPONSEQ);
2087 * t3_sge_init_cqcntxt - initialize an SGE completion queue context
2088 * @adapter: the adapter to configure
2089 * @id: the context id
2090 * @base_addr: base address of queue
2091 * @size: number of queue entries
2092 * @rspq: response queue for async notifications
2093 * @ovfl_mode: CQ overflow mode
2094 * @credits: completion queue credits
2095 * @credit_thres: the credit threshold
2097 * Initialize an SGE completion queue context and make it ready for use.
2098 * The caller is responsible for ensuring only one context operation
2101 int t3_sge_init_cqcntxt(struct adapter *adapter, unsigned int id, u64 base_addr,
2102 unsigned int size, int rspq, int ovfl_mode,
2103 unsigned int credits, unsigned int credit_thres)
2105 if (base_addr & 0xfff) /* must be 4K aligned */
2107 if (t3_read_reg(adapter, A_SG_CONTEXT_CMD) & F_CONTEXT_CMD_BUSY)
2111 t3_write_reg(adapter, A_SG_CONTEXT_DATA0, V_CQ_SIZE(size));
2112 t3_write_reg(adapter, A_SG_CONTEXT_DATA1, base_addr);
2114 t3_write_reg(adapter, A_SG_CONTEXT_DATA2,
2115 V_CQ_BASE_HI((u32) base_addr) | V_CQ_RSPQ(rspq) |
2116 V_CQ_GEN(1) | V_CQ_OVERFLOW_MODE(ovfl_mode) |
2117 V_CQ_ERR(ovfl_mode));
2118 t3_write_reg(adapter, A_SG_CONTEXT_DATA3, V_CQ_CREDITS(credits) |
2119 V_CQ_CREDIT_THRES(credit_thres));
2120 return t3_sge_write_context(adapter, id, F_CQ);
2124 * t3_sge_enable_ecntxt - enable/disable an SGE egress context
2125 * @adapter: the adapter
2126 * @id: the egress context id
2127 * @enable: enable (1) or disable (0) the context
2129 * Enable or disable an SGE egress context. The caller is responsible for
2130 * ensuring only one context operation occurs at a time.
2132 int t3_sge_enable_ecntxt(struct adapter *adapter, unsigned int id, int enable)
2134 if (t3_read_reg(adapter, A_SG_CONTEXT_CMD) & F_CONTEXT_CMD_BUSY)
2137 t3_write_reg(adapter, A_SG_CONTEXT_MASK0, 0);
2138 t3_write_reg(adapter, A_SG_CONTEXT_MASK1, 0);
2139 t3_write_reg(adapter, A_SG_CONTEXT_MASK2, 0);
2140 t3_write_reg(adapter, A_SG_CONTEXT_MASK3, F_EC_VALID);
2141 t3_write_reg(adapter, A_SG_CONTEXT_DATA3, V_EC_VALID(enable));
2142 t3_write_reg(adapter, A_SG_CONTEXT_CMD,
2143 V_CONTEXT_CMD_OPCODE(1) | F_EGRESS | V_CONTEXT(id));
2144 return t3_wait_op_done(adapter, A_SG_CONTEXT_CMD, F_CONTEXT_CMD_BUSY,
2145 0, SG_CONTEXT_CMD_ATTEMPTS, 1);
2149 * t3_sge_disable_fl - disable an SGE free-buffer list
2150 * @adapter: the adapter
2151 * @id: the free list context id
2153 * Disable an SGE free-buffer list. The caller is responsible for
2154 * ensuring only one context operation occurs at a time.
2156 int t3_sge_disable_fl(struct adapter *adapter, unsigned int id)
2158 if (t3_read_reg(adapter, A_SG_CONTEXT_CMD) & F_CONTEXT_CMD_BUSY)
2161 t3_write_reg(adapter, A_SG_CONTEXT_MASK0, 0);
2162 t3_write_reg(adapter, A_SG_CONTEXT_MASK1, 0);
2163 t3_write_reg(adapter, A_SG_CONTEXT_MASK2, V_FL_SIZE(M_FL_SIZE));
2164 t3_write_reg(adapter, A_SG_CONTEXT_MASK3, 0);
2165 t3_write_reg(adapter, A_SG_CONTEXT_DATA2, 0);
2166 t3_write_reg(adapter, A_SG_CONTEXT_CMD,
2167 V_CONTEXT_CMD_OPCODE(1) | F_FREELIST | V_CONTEXT(id));
2168 return t3_wait_op_done(adapter, A_SG_CONTEXT_CMD, F_CONTEXT_CMD_BUSY,
2169 0, SG_CONTEXT_CMD_ATTEMPTS, 1);
2173 * t3_sge_disable_rspcntxt - disable an SGE response queue
2174 * @adapter: the adapter
2175 * @id: the response queue context id
2177 * Disable an SGE response queue. The caller is responsible for
2178 * ensuring only one context operation occurs at a time.
2180 int t3_sge_disable_rspcntxt(struct adapter *adapter, unsigned int id)
2182 if (t3_read_reg(adapter, A_SG_CONTEXT_CMD) & F_CONTEXT_CMD_BUSY)
2185 t3_write_reg(adapter, A_SG_CONTEXT_MASK0, V_CQ_SIZE(M_CQ_SIZE));
2186 t3_write_reg(adapter, A_SG_CONTEXT_MASK1, 0);
2187 t3_write_reg(adapter, A_SG_CONTEXT_MASK2, 0);
2188 t3_write_reg(adapter, A_SG_CONTEXT_MASK3, 0);
2189 t3_write_reg(adapter, A_SG_CONTEXT_DATA0, 0);
2190 t3_write_reg(adapter, A_SG_CONTEXT_CMD,
2191 V_CONTEXT_CMD_OPCODE(1) | F_RESPONSEQ | V_CONTEXT(id));
2192 return t3_wait_op_done(adapter, A_SG_CONTEXT_CMD, F_CONTEXT_CMD_BUSY,
2193 0, SG_CONTEXT_CMD_ATTEMPTS, 1);
2197 * t3_sge_disable_cqcntxt - disable an SGE completion queue
2198 * @adapter: the adapter
2199 * @id: the completion queue context id
2201 * Disable an SGE completion queue. The caller is responsible for
2202 * ensuring only one context operation occurs at a time.
2204 int t3_sge_disable_cqcntxt(struct adapter *adapter, unsigned int id)
2206 if (t3_read_reg(adapter, A_SG_CONTEXT_CMD) & F_CONTEXT_CMD_BUSY)
2209 t3_write_reg(adapter, A_SG_CONTEXT_MASK0, V_CQ_SIZE(M_CQ_SIZE));
2210 t3_write_reg(adapter, A_SG_CONTEXT_MASK1, 0);
2211 t3_write_reg(adapter, A_SG_CONTEXT_MASK2, 0);
2212 t3_write_reg(adapter, A_SG_CONTEXT_MASK3, 0);
2213 t3_write_reg(adapter, A_SG_CONTEXT_DATA0, 0);
2214 t3_write_reg(adapter, A_SG_CONTEXT_CMD,
2215 V_CONTEXT_CMD_OPCODE(1) | F_CQ | V_CONTEXT(id));
2216 return t3_wait_op_done(adapter, A_SG_CONTEXT_CMD, F_CONTEXT_CMD_BUSY,
2217 0, SG_CONTEXT_CMD_ATTEMPTS, 1);
2221 * t3_sge_cqcntxt_op - perform an operation on a completion queue context
2222 * @adapter: the adapter
2223 * @id: the context id
2224 * @op: the operation to perform
2226 * Perform the selected operation on an SGE completion queue context.
2227 * The caller is responsible for ensuring only one context operation
2230 int t3_sge_cqcntxt_op(struct adapter *adapter, unsigned int id, unsigned int op,
2231 unsigned int credits)
2235 if (t3_read_reg(adapter, A_SG_CONTEXT_CMD) & F_CONTEXT_CMD_BUSY)
2238 t3_write_reg(adapter, A_SG_CONTEXT_DATA0, credits << 16);
2239 t3_write_reg(adapter, A_SG_CONTEXT_CMD, V_CONTEXT_CMD_OPCODE(op) |
2240 V_CONTEXT(id) | F_CQ);
2241 if (t3_wait_op_done_val(adapter, A_SG_CONTEXT_CMD, F_CONTEXT_CMD_BUSY,
2242 0, SG_CONTEXT_CMD_ATTEMPTS, 1, &val))
2245 if (op >= 2 && op < 7) {
2246 if (adapter->params.rev > 0)
2247 return G_CQ_INDEX(val);
2249 t3_write_reg(adapter, A_SG_CONTEXT_CMD,
2250 V_CONTEXT_CMD_OPCODE(0) | F_CQ | V_CONTEXT(id));
2251 if (t3_wait_op_done(adapter, A_SG_CONTEXT_CMD,
2252 F_CONTEXT_CMD_BUSY, 0,
2253 SG_CONTEXT_CMD_ATTEMPTS, 1))
2255 return G_CQ_INDEX(t3_read_reg(adapter, A_SG_CONTEXT_DATA0));
2261 * t3_sge_read_context - read an SGE context
2262 * @type: the context type
2263 * @adapter: the adapter
2264 * @id: the context id
2265 * @data: holds the retrieved context
2267 * Read an SGE egress context. The caller is responsible for ensuring
2268 * only one context operation occurs at a time.
2270 static int t3_sge_read_context(unsigned int type, struct adapter *adapter,
2271 unsigned int id, u32 data[4])
2273 if (t3_read_reg(adapter, A_SG_CONTEXT_CMD) & F_CONTEXT_CMD_BUSY)
2276 t3_write_reg(adapter, A_SG_CONTEXT_CMD,
2277 V_CONTEXT_CMD_OPCODE(0) | type | V_CONTEXT(id));
2278 if (t3_wait_op_done(adapter, A_SG_CONTEXT_CMD, F_CONTEXT_CMD_BUSY, 0,
2279 SG_CONTEXT_CMD_ATTEMPTS, 1))
2281 data[0] = t3_read_reg(adapter, A_SG_CONTEXT_DATA0);
2282 data[1] = t3_read_reg(adapter, A_SG_CONTEXT_DATA1);
2283 data[2] = t3_read_reg(adapter, A_SG_CONTEXT_DATA2);
2284 data[3] = t3_read_reg(adapter, A_SG_CONTEXT_DATA3);
2289 * t3_sge_read_ecntxt - read an SGE egress context
2290 * @adapter: the adapter
2291 * @id: the context id
2292 * @data: holds the retrieved context
2294 * Read an SGE egress context. The caller is responsible for ensuring
2295 * only one context operation occurs at a time.
2297 int t3_sge_read_ecntxt(struct adapter *adapter, unsigned int id, u32 data[4])
2301 return t3_sge_read_context(F_EGRESS, adapter, id, data);
2305 * t3_sge_read_cq - read an SGE CQ context
2306 * @adapter: the adapter
2307 * @id: the context id
2308 * @data: holds the retrieved context
2310 * Read an SGE CQ context. The caller is responsible for ensuring
2311 * only one context operation occurs at a time.
2313 int t3_sge_read_cq(struct adapter *adapter, unsigned int id, u32 data[4])
2317 return t3_sge_read_context(F_CQ, adapter, id, data);
2321 * t3_sge_read_fl - read an SGE free-list context
2322 * @adapter: the adapter
2323 * @id: the context id
2324 * @data: holds the retrieved context
2326 * Read an SGE free-list context. The caller is responsible for ensuring
2327 * only one context operation occurs at a time.
2329 int t3_sge_read_fl(struct adapter *adapter, unsigned int id, u32 data[4])
2331 if (id >= SGE_QSETS * 2)
2333 return t3_sge_read_context(F_FREELIST, adapter, id, data);
2337 * t3_sge_read_rspq - read an SGE response queue context
2338 * @adapter: the adapter
2339 * @id: the context id
2340 * @data: holds the retrieved context
2342 * Read an SGE response queue context. The caller is responsible for
2343 * ensuring only one context operation occurs at a time.
2345 int t3_sge_read_rspq(struct adapter *adapter, unsigned int id, u32 data[4])
2347 if (id >= SGE_QSETS)
2349 return t3_sge_read_context(F_RESPONSEQ, adapter, id, data);
2353 * t3_config_rss - configure Rx packet steering
2354 * @adapter: the adapter
2355 * @rss_config: RSS settings (written to TP_RSS_CONFIG)
2356 * @cpus: values for the CPU lookup table (0xff terminated)
2357 * @rspq: values for the response queue lookup table (0xffff terminated)
2359 * Programs the receive packet steering logic. @cpus and @rspq provide
2360 * the values for the CPU and response queue lookup tables. If they
2361 * provide fewer values than the size of the tables the supplied values
2362 * are used repeatedly until the tables are fully populated.
2364 void t3_config_rss(struct adapter *adapter, unsigned int rss_config,
2365 const u8 * cpus, const u16 *rspq)
2367 int i, j, cpu_idx = 0, q_idx = 0;
2370 for (i = 0; i < RSS_TABLE_SIZE; ++i) {
2373 for (j = 0; j < 2; ++j) {
2374 val |= (cpus[cpu_idx++] & 0x3f) << (8 * j);
2375 if (cpus[cpu_idx] == 0xff)
2378 t3_write_reg(adapter, A_TP_RSS_LKP_TABLE, val);
2382 for (i = 0; i < RSS_TABLE_SIZE; ++i) {
2383 t3_write_reg(adapter, A_TP_RSS_MAP_TABLE,
2384 (i << 16) | rspq[q_idx++]);
2385 if (rspq[q_idx] == 0xffff)
2389 t3_write_reg(adapter, A_TP_RSS_CONFIG, rss_config);
2393 * t3_read_rss - read the contents of the RSS tables
2394 * @adapter: the adapter
2395 * @lkup: holds the contents of the RSS lookup table
2396 * @map: holds the contents of the RSS map table
2398 * Reads the contents of the receive packet steering tables.
2400 int t3_read_rss(struct adapter *adapter, u8 * lkup, u16 *map)
2406 for (i = 0; i < RSS_TABLE_SIZE; ++i) {
2407 t3_write_reg(adapter, A_TP_RSS_LKP_TABLE,
2409 val = t3_read_reg(adapter, A_TP_RSS_LKP_TABLE);
2410 if (!(val & 0x80000000))
2413 *lkup++ = (val >> 8);
2417 for (i = 0; i < RSS_TABLE_SIZE; ++i) {
2418 t3_write_reg(adapter, A_TP_RSS_MAP_TABLE,
2420 val = t3_read_reg(adapter, A_TP_RSS_MAP_TABLE);
2421 if (!(val & 0x80000000))
2429 * t3_tp_set_offload_mode - put TP in NIC/offload mode
2430 * @adap: the adapter
2431 * @enable: 1 to select offload mode, 0 for regular NIC
2433 * Switches TP to NIC/offload mode.
2435 void t3_tp_set_offload_mode(struct adapter *adap, int enable)
2437 if (is_offload(adap) || !enable)
2438 t3_set_reg_field(adap, A_TP_IN_CONFIG, F_NICMODE,
2439 V_NICMODE(!enable));
2443 * pm_num_pages - calculate the number of pages of the payload memory
2444 * @mem_size: the size of the payload memory
2445 * @pg_size: the size of each payload memory page
2447 * Calculate the number of pages, each of the given size, that fit in a
2448 * memory of the specified size, respecting the HW requirement that the
2449 * number of pages must be a multiple of 24.
2451 static inline unsigned int pm_num_pages(unsigned int mem_size,
2452 unsigned int pg_size)
2454 unsigned int n = mem_size / pg_size;
2459 #define mem_region(adap, start, size, reg) \
2460 t3_write_reg((adap), A_ ## reg, (start)); \
2464 * partition_mem - partition memory and configure TP memory settings
2465 * @adap: the adapter
2466 * @p: the TP parameters
2468 * Partitions context and payload memory and configures TP's memory
2471 static void partition_mem(struct adapter *adap, const struct tp_params *p)
2473 unsigned int m, pstructs, tids = t3_mc5_size(&adap->mc5);
2474 unsigned int timers = 0, timers_shift = 22;
2476 if (adap->params.rev > 0) {
2477 if (tids <= 16 * 1024) {
2480 } else if (tids <= 64 * 1024) {
2483 } else if (tids <= 256 * 1024) {
2489 t3_write_reg(adap, A_TP_PMM_SIZE,
2490 p->chan_rx_size | (p->chan_tx_size >> 16));
2492 t3_write_reg(adap, A_TP_PMM_TX_BASE, 0);
2493 t3_write_reg(adap, A_TP_PMM_TX_PAGE_SIZE, p->tx_pg_size);
2494 t3_write_reg(adap, A_TP_PMM_TX_MAX_PAGE, p->tx_num_pgs);
2495 t3_set_reg_field(adap, A_TP_PARA_REG3, V_TXDATAACKIDX(M_TXDATAACKIDX),
2496 V_TXDATAACKIDX(fls(p->tx_pg_size) - 12));
2498 t3_write_reg(adap, A_TP_PMM_RX_BASE, 0);
2499 t3_write_reg(adap, A_TP_PMM_RX_PAGE_SIZE, p->rx_pg_size);
2500 t3_write_reg(adap, A_TP_PMM_RX_MAX_PAGE, p->rx_num_pgs);
2502 pstructs = p->rx_num_pgs + p->tx_num_pgs;
2503 /* Add a bit of headroom and make multiple of 24 */
2505 pstructs -= pstructs % 24;
2506 t3_write_reg(adap, A_TP_CMM_MM_MAX_PSTRUCT, pstructs);
2508 m = tids * TCB_SIZE;
2509 mem_region(adap, m, (64 << 10) * 64, SG_EGR_CNTX_BADDR);
2510 mem_region(adap, m, (64 << 10) * 64, SG_CQ_CONTEXT_BADDR);
2511 t3_write_reg(adap, A_TP_CMM_TIMER_BASE, V_CMTIMERMAXNUM(timers) | m);
2512 m += ((p->ntimer_qs - 1) << timers_shift) + (1 << 22);
2513 mem_region(adap, m, pstructs * 64, TP_CMM_MM_BASE);
2514 mem_region(adap, m, 64 * (pstructs / 24), TP_CMM_MM_PS_FLST_BASE);
2515 mem_region(adap, m, 64 * (p->rx_num_pgs / 24), TP_CMM_MM_RX_FLST_BASE);
2516 mem_region(adap, m, 64 * (p->tx_num_pgs / 24), TP_CMM_MM_TX_FLST_BASE);
2518 m = (m + 4095) & ~0xfff;
2519 t3_write_reg(adap, A_CIM_SDRAM_BASE_ADDR, m);
2520 t3_write_reg(adap, A_CIM_SDRAM_ADDR_SIZE, p->cm_size - m);
2522 tids = (p->cm_size - m - (3 << 20)) / 3072 - 32;
2523 m = t3_mc5_size(&adap->mc5) - adap->params.mc5.nservers -
2524 adap->params.mc5.nfilters - adap->params.mc5.nroutes;
2526 adap->params.mc5.nservers += m - tids;
2529 static inline void tp_wr_indirect(struct adapter *adap, unsigned int addr,
2532 t3_write_reg(adap, A_TP_PIO_ADDR, addr);
2533 t3_write_reg(adap, A_TP_PIO_DATA, val);
2536 static void tp_config(struct adapter *adap, const struct tp_params *p)
2538 t3_write_reg(adap, A_TP_GLOBAL_CONFIG, F_TXPACINGENABLE | F_PATHMTU |
2539 F_IPCHECKSUMOFFLOAD | F_UDPCHECKSUMOFFLOAD |
2540 F_TCPCHECKSUMOFFLOAD | V_IPTTL(64));
2541 t3_write_reg(adap, A_TP_TCP_OPTIONS, V_MTUDEFAULT(576) |
2542 F_MTUENABLE | V_WINDOWSCALEMODE(1) |
2543 V_TIMESTAMPSMODE(0) | V_SACKMODE(1) | V_SACKRX(1));
2544 t3_write_reg(adap, A_TP_DACK_CONFIG, V_AUTOSTATE3(1) |
2545 V_AUTOSTATE2(1) | V_AUTOSTATE1(0) |
2546 V_BYTETHRESHOLD(16384) | V_MSSTHRESHOLD(2) |
2547 F_AUTOCAREFUL | F_AUTOENABLE | V_DACK_MODE(1));
2548 t3_set_reg_field(adap, A_TP_IN_CONFIG, F_RXFBARBPRIO | F_TXFBARBPRIO,
2549 F_IPV6ENABLE | F_NICMODE);
2550 t3_write_reg(adap, A_TP_TX_RESOURCE_LIMIT, 0x18141814);
2551 t3_write_reg(adap, A_TP_PARA_REG4, 0x5050105);
2552 t3_set_reg_field(adap, A_TP_PARA_REG6, 0,
2553 adap->params.rev > 0 ? F_ENABLEESND :
2556 t3_set_reg_field(adap, A_TP_PC_CONFIG,
2558 F_ENABLEOCSPIFULL |F_TXDEFERENABLE | F_HEARBEATDACK |
2559 F_TXCONGESTIONMODE | F_RXCONGESTIONMODE);
2560 t3_set_reg_field(adap, A_TP_PC_CONFIG2, F_CHDRAFULL,
2561 F_ENABLEIPV6RSS | F_ENABLENONOFDTNLSYN |
2562 F_ENABLEARPMISS | F_DISBLEDAPARBIT0);
2563 t3_write_reg(adap, A_TP_PROXY_FLOW_CNTL, 1080);
2564 t3_write_reg(adap, A_TP_PROXY_FLOW_CNTL, 1000);
2566 if (adap->params.rev > 0) {
2567 tp_wr_indirect(adap, A_TP_EGRESS_CONFIG, F_REWRITEFORCETOSIZE);
2568 t3_set_reg_field(adap, A_TP_PARA_REG3, F_TXPACEAUTO,
2570 t3_set_reg_field(adap, A_TP_PC_CONFIG, F_LOCKTID, F_LOCKTID);
2571 t3_set_reg_field(adap, A_TP_PARA_REG3, 0, F_TXPACEAUTOSTRICT);
2573 t3_set_reg_field(adap, A_TP_PARA_REG3, 0, F_TXPACEFIXED);
2575 if (adap->params.rev == T3_REV_C)
2576 t3_set_reg_field(adap, A_TP_PC_CONFIG,
2577 V_TABLELATENCYDELTA(M_TABLELATENCYDELTA),
2578 V_TABLELATENCYDELTA(4));
2580 t3_write_reg(adap, A_TP_TX_MOD_QUEUE_WEIGHT1, 0);
2581 t3_write_reg(adap, A_TP_TX_MOD_QUEUE_WEIGHT0, 0);
2582 t3_write_reg(adap, A_TP_MOD_CHANNEL_WEIGHT, 0);
2583 t3_write_reg(adap, A_TP_MOD_RATE_LIMIT, 0xf2200000);
2586 /* Desired TP timer resolution in usec */
2587 #define TP_TMR_RES 50
2589 /* TCP timer values in ms */
2590 #define TP_DACK_TIMER 50
2591 #define TP_RTO_MIN 250
2594 * tp_set_timers - set TP timing parameters
2595 * @adap: the adapter to set
2596 * @core_clk: the core clock frequency in Hz
2598 * Set TP's timing parameters, such as the various timer resolutions and
2599 * the TCP timer values.
2601 static void tp_set_timers(struct adapter *adap, unsigned int core_clk)
2603 unsigned int tre = fls(core_clk / (1000000 / TP_TMR_RES)) - 1;
2604 unsigned int dack_re = fls(core_clk / 5000) - 1; /* 200us */
2605 unsigned int tstamp_re = fls(core_clk / 1000); /* 1ms, at least */
2606 unsigned int tps = core_clk >> tre;
2608 t3_write_reg(adap, A_TP_TIMER_RESOLUTION, V_TIMERRESOLUTION(tre) |
2609 V_DELAYEDACKRESOLUTION(dack_re) |
2610 V_TIMESTAMPRESOLUTION(tstamp_re));
2611 t3_write_reg(adap, A_TP_DACK_TIMER,
2612 (core_clk >> dack_re) / (1000 / TP_DACK_TIMER));
2613 t3_write_reg(adap, A_TP_TCP_BACKOFF_REG0, 0x3020100);
2614 t3_write_reg(adap, A_TP_TCP_BACKOFF_REG1, 0x7060504);
2615 t3_write_reg(adap, A_TP_TCP_BACKOFF_REG2, 0xb0a0908);
2616 t3_write_reg(adap, A_TP_TCP_BACKOFF_REG3, 0xf0e0d0c);
2617 t3_write_reg(adap, A_TP_SHIFT_CNT, V_SYNSHIFTMAX(6) |
2618 V_RXTSHIFTMAXR1(4) | V_RXTSHIFTMAXR2(15) |
2619 V_PERSHIFTBACKOFFMAX(8) | V_PERSHIFTMAX(8) |
2622 #define SECONDS * tps
2624 t3_write_reg(adap, A_TP_MSL, adap->params.rev > 0 ? 0 : 2 SECONDS);
2625 t3_write_reg(adap, A_TP_RXT_MIN, tps / (1000 / TP_RTO_MIN));
2626 t3_write_reg(adap, A_TP_RXT_MAX, 64 SECONDS);
2627 t3_write_reg(adap, A_TP_PERS_MIN, 5 SECONDS);
2628 t3_write_reg(adap, A_TP_PERS_MAX, 64 SECONDS);
2629 t3_write_reg(adap, A_TP_KEEP_IDLE, 7200 SECONDS);
2630 t3_write_reg(adap, A_TP_KEEP_INTVL, 75 SECONDS);
2631 t3_write_reg(adap, A_TP_INIT_SRTT, 3 SECONDS);
2632 t3_write_reg(adap, A_TP_FINWAIT2_TIMER, 600 SECONDS);
2638 * t3_tp_set_coalescing_size - set receive coalescing size
2639 * @adap: the adapter
2640 * @size: the receive coalescing size
2641 * @psh: whether a set PSH bit should deliver coalesced data
2643 * Set the receive coalescing size and PSH bit handling.
2645 int t3_tp_set_coalescing_size(struct adapter *adap, unsigned int size, int psh)
2649 if (size > MAX_RX_COALESCING_LEN)
2652 val = t3_read_reg(adap, A_TP_PARA_REG3);
2653 val &= ~(F_RXCOALESCEENABLE | F_RXCOALESCEPSHEN);
2656 val |= F_RXCOALESCEENABLE;
2658 val |= F_RXCOALESCEPSHEN;
2659 size = min(MAX_RX_COALESCING_LEN, size);
2660 t3_write_reg(adap, A_TP_PARA_REG2, V_RXCOALESCESIZE(size) |
2661 V_MAXRXDATA(MAX_RX_COALESCING_LEN));
2663 t3_write_reg(adap, A_TP_PARA_REG3, val);
2668 * t3_tp_set_max_rxsize - set the max receive size
2669 * @adap: the adapter
2670 * @size: the max receive size
2672 * Set TP's max receive size. This is the limit that applies when
2673 * receive coalescing is disabled.
2675 void t3_tp_set_max_rxsize(struct adapter *adap, unsigned int size)
2677 t3_write_reg(adap, A_TP_PARA_REG7,
2678 V_PMMAXXFERLEN0(size) | V_PMMAXXFERLEN1(size));
2681 static void init_mtus(unsigned short mtus[])
2684 * See draft-mathis-plpmtud-00.txt for the values. The min is 88 so
2685 * it can accomodate max size TCP/IP headers when SACK and timestamps
2686 * are enabled and still have at least 8 bytes of payload.
2707 * Initial congestion control parameters.
2709 static void init_cong_ctrl(unsigned short *a, unsigned short *b)
2711 a[0] = a[1] = a[2] = a[3] = a[4] = a[5] = a[6] = a[7] = a[8] = 1;
2736 b[0] = b[1] = b[2] = b[3] = b[4] = b[5] = b[6] = b[7] = b[8] = 0;
2739 b[13] = b[14] = b[15] = b[16] = 3;
2740 b[17] = b[18] = b[19] = b[20] = b[21] = 4;
2741 b[22] = b[23] = b[24] = b[25] = b[26] = b[27] = 5;
2746 /* The minimum additive increment value for the congestion control table */
2747 #define CC_MIN_INCR 2U
2750 * t3_load_mtus - write the MTU and congestion control HW tables
2751 * @adap: the adapter
2752 * @mtus: the unrestricted values for the MTU table
2753 * @alphs: the values for the congestion control alpha parameter
2754 * @beta: the values for the congestion control beta parameter
2755 * @mtu_cap: the maximum permitted effective MTU
2757 * Write the MTU table with the supplied MTUs capping each at &mtu_cap.
2758 * Update the high-speed congestion control table with the supplied alpha,
2761 void t3_load_mtus(struct adapter *adap, unsigned short mtus[NMTUS],
2762 unsigned short alpha[NCCTRL_WIN],
2763 unsigned short beta[NCCTRL_WIN], unsigned short mtu_cap)
2765 static const unsigned int avg_pkts[NCCTRL_WIN] = {
2766 2, 6, 10, 14, 20, 28, 40, 56, 80, 112, 160, 224, 320, 448, 640,
2767 896, 1281, 1792, 2560, 3584, 5120, 7168, 10240, 14336, 20480,
2768 28672, 40960, 57344, 81920, 114688, 163840, 229376
2773 for (i = 0; i < NMTUS; ++i) {
2774 unsigned int mtu = min(mtus[i], mtu_cap);
2775 unsigned int log2 = fls(mtu);
2777 if (!(mtu & ((1 << log2) >> 2))) /* round */
2779 t3_write_reg(adap, A_TP_MTU_TABLE,
2780 (i << 24) | (log2 << 16) | mtu);
2782 for (w = 0; w < NCCTRL_WIN; ++w) {
2785 inc = max(((mtu - 40) * alpha[w]) / avg_pkts[w],
2788 t3_write_reg(adap, A_TP_CCTRL_TABLE, (i << 21) |
2789 (w << 16) | (beta[w] << 13) | inc);
2795 * t3_read_hw_mtus - returns the values in the HW MTU table
2796 * @adap: the adapter
2797 * @mtus: where to store the HW MTU values
2799 * Reads the HW MTU table.
2801 void t3_read_hw_mtus(struct adapter *adap, unsigned short mtus[NMTUS])
2805 for (i = 0; i < NMTUS; ++i) {
2808 t3_write_reg(adap, A_TP_MTU_TABLE, 0xff000000 | i);
2809 val = t3_read_reg(adap, A_TP_MTU_TABLE);
2810 mtus[i] = val & 0x3fff;
2815 * t3_get_cong_cntl_tab - reads the congestion control table
2816 * @adap: the adapter
2817 * @incr: where to store the alpha values
2819 * Reads the additive increments programmed into the HW congestion
2822 void t3_get_cong_cntl_tab(struct adapter *adap,
2823 unsigned short incr[NMTUS][NCCTRL_WIN])
2825 unsigned int mtu, w;
2827 for (mtu = 0; mtu < NMTUS; ++mtu)
2828 for (w = 0; w < NCCTRL_WIN; ++w) {
2829 t3_write_reg(adap, A_TP_CCTRL_TABLE,
2830 0xffff0000 | (mtu << 5) | w);
2831 incr[mtu][w] = t3_read_reg(adap, A_TP_CCTRL_TABLE) &
2837 * t3_tp_get_mib_stats - read TP's MIB counters
2838 * @adap: the adapter
2839 * @tps: holds the returned counter values
2841 * Returns the values of TP's MIB counters.
2843 void t3_tp_get_mib_stats(struct adapter *adap, struct tp_mib_stats *tps)
2845 t3_read_indirect(adap, A_TP_MIB_INDEX, A_TP_MIB_RDATA, (u32 *) tps,
2846 sizeof(*tps) / sizeof(u32), 0);
2849 #define ulp_region(adap, name, start, len) \
2850 t3_write_reg((adap), A_ULPRX_ ## name ## _LLIMIT, (start)); \
2851 t3_write_reg((adap), A_ULPRX_ ## name ## _ULIMIT, \
2852 (start) + (len) - 1); \
2855 #define ulptx_region(adap, name, start, len) \
2856 t3_write_reg((adap), A_ULPTX_ ## name ## _LLIMIT, (start)); \
2857 t3_write_reg((adap), A_ULPTX_ ## name ## _ULIMIT, \
2858 (start) + (len) - 1)
2860 static void ulp_config(struct adapter *adap, const struct tp_params *p)
2862 unsigned int m = p->chan_rx_size;
2864 ulp_region(adap, ISCSI, m, p->chan_rx_size / 8);
2865 ulp_region(adap, TDDP, m, p->chan_rx_size / 8);
2866 ulptx_region(adap, TPT, m, p->chan_rx_size / 4);
2867 ulp_region(adap, STAG, m, p->chan_rx_size / 4);
2868 ulp_region(adap, RQ, m, p->chan_rx_size / 4);
2869 ulptx_region(adap, PBL, m, p->chan_rx_size / 4);
2870 ulp_region(adap, PBL, m, p->chan_rx_size / 4);
2871 t3_write_reg(adap, A_ULPRX_TDDP_TAGMASK, 0xffffffff);
2875 * t3_set_proto_sram - set the contents of the protocol sram
2876 * @adapter: the adapter
2877 * @data: the protocol image
2879 * Write the contents of the protocol SRAM.
2881 int t3_set_proto_sram(struct adapter *adap, const u8 *data)
2884 const __be32 *buf = (const __be32 *)data;
2886 for (i = 0; i < PROTO_SRAM_LINES; i++) {
2887 t3_write_reg(adap, A_TP_EMBED_OP_FIELD5, be32_to_cpu(*buf++));
2888 t3_write_reg(adap, A_TP_EMBED_OP_FIELD4, be32_to_cpu(*buf++));
2889 t3_write_reg(adap, A_TP_EMBED_OP_FIELD3, be32_to_cpu(*buf++));
2890 t3_write_reg(adap, A_TP_EMBED_OP_FIELD2, be32_to_cpu(*buf++));
2891 t3_write_reg(adap, A_TP_EMBED_OP_FIELD1, be32_to_cpu(*buf++));
2893 t3_write_reg(adap, A_TP_EMBED_OP_FIELD0, i << 1 | 1 << 31);
2894 if (t3_wait_op_done(adap, A_TP_EMBED_OP_FIELD0, 1, 1, 5, 1))
2897 t3_write_reg(adap, A_TP_EMBED_OP_FIELD0, 0);
2902 void t3_config_trace_filter(struct adapter *adapter,
2903 const struct trace_params *tp, int filter_index,
2904 int invert, int enable)
2906 u32 addr, key[4], mask[4];
2908 key[0] = tp->sport | (tp->sip << 16);
2909 key[1] = (tp->sip >> 16) | (tp->dport << 16);
2911 key[3] = tp->proto | (tp->vlan << 8) | (tp->intf << 20);
2913 mask[0] = tp->sport_mask | (tp->sip_mask << 16);
2914 mask[1] = (tp->sip_mask >> 16) | (tp->dport_mask << 16);
2915 mask[2] = tp->dip_mask;
2916 mask[3] = tp->proto_mask | (tp->vlan_mask << 8) | (tp->intf_mask << 20);
2919 key[3] |= (1 << 29);
2921 key[3] |= (1 << 28);
2923 addr = filter_index ? A_TP_RX_TRC_KEY0 : A_TP_TX_TRC_KEY0;
2924 tp_wr_indirect(adapter, addr++, key[0]);
2925 tp_wr_indirect(adapter, addr++, mask[0]);
2926 tp_wr_indirect(adapter, addr++, key[1]);
2927 tp_wr_indirect(adapter, addr++, mask[1]);
2928 tp_wr_indirect(adapter, addr++, key[2]);
2929 tp_wr_indirect(adapter, addr++, mask[2]);
2930 tp_wr_indirect(adapter, addr++, key[3]);
2931 tp_wr_indirect(adapter, addr, mask[3]);
2932 t3_read_reg(adapter, A_TP_PIO_DATA);
2936 * t3_config_sched - configure a HW traffic scheduler
2937 * @adap: the adapter
2938 * @kbps: target rate in Kbps
2939 * @sched: the scheduler index
2941 * Configure a HW scheduler for the target rate
2943 int t3_config_sched(struct adapter *adap, unsigned int kbps, int sched)
2945 unsigned int v, tps, cpt, bpt, delta, mindelta = ~0;
2946 unsigned int clk = adap->params.vpd.cclk * 1000;
2947 unsigned int selected_cpt = 0, selected_bpt = 0;
2950 kbps *= 125; /* -> bytes */
2951 for (cpt = 1; cpt <= 255; cpt++) {
2953 bpt = (kbps + tps / 2) / tps;
2954 if (bpt > 0 && bpt <= 255) {
2956 delta = v >= kbps ? v - kbps : kbps - v;
2957 if (delta <= mindelta) {
2962 } else if (selected_cpt)
2968 t3_write_reg(adap, A_TP_TM_PIO_ADDR,
2969 A_TP_TX_MOD_Q1_Q0_RATE_LIMIT - sched / 2);
2970 v = t3_read_reg(adap, A_TP_TM_PIO_DATA);
2972 v = (v & 0xffff) | (selected_cpt << 16) | (selected_bpt << 24);
2974 v = (v & 0xffff0000) | selected_cpt | (selected_bpt << 8);
2975 t3_write_reg(adap, A_TP_TM_PIO_DATA, v);
2979 static int tp_init(struct adapter *adap, const struct tp_params *p)
2984 t3_set_vlan_accel(adap, 3, 0);
2986 if (is_offload(adap)) {
2987 tp_set_timers(adap, adap->params.vpd.cclk * 1000);
2988 t3_write_reg(adap, A_TP_RESET, F_FLSTINITENABLE);
2989 busy = t3_wait_op_done(adap, A_TP_RESET, F_FLSTINITENABLE,
2992 CH_ERR(adap, "TP initialization timed out\n");
2996 t3_write_reg(adap, A_TP_RESET, F_TPRESET);
3000 int t3_mps_set_active_ports(struct adapter *adap, unsigned int port_mask)
3002 if (port_mask & ~((1 << adap->params.nports) - 1))
3004 t3_set_reg_field(adap, A_MPS_CFG, F_PORT1ACTIVE | F_PORT0ACTIVE,
3005 port_mask << S_PORT0ACTIVE);
3010 * Perform the bits of HW initialization that are dependent on the number
3011 * of available ports.
3013 static void init_hw_for_avail_ports(struct adapter *adap, int nports)
3018 t3_set_reg_field(adap, A_ULPRX_CTL, F_ROUND_ROBIN, 0);
3019 t3_set_reg_field(adap, A_ULPTX_CONFIG, F_CFG_RR_ARB, 0);
3020 t3_write_reg(adap, A_MPS_CFG, F_TPRXPORTEN | F_TPTXPORT0EN |
3021 F_PORT0ACTIVE | F_ENFORCEPKT);
3022 t3_write_reg(adap, A_PM1_TX_CFG, 0xffffffff);
3024 t3_set_reg_field(adap, A_ULPRX_CTL, 0, F_ROUND_ROBIN);
3025 t3_set_reg_field(adap, A_ULPTX_CONFIG, 0, F_CFG_RR_ARB);
3026 t3_write_reg(adap, A_ULPTX_DMA_WEIGHT,
3027 V_D1_WEIGHT(16) | V_D0_WEIGHT(16));
3028 t3_write_reg(adap, A_MPS_CFG, F_TPTXPORT0EN | F_TPTXPORT1EN |
3029 F_TPRXPORTEN | F_PORT0ACTIVE | F_PORT1ACTIVE |
3031 t3_write_reg(adap, A_PM1_TX_CFG, 0x80008000);
3032 t3_set_reg_field(adap, A_TP_PC_CONFIG, 0, F_TXTOSQUEUEMAPMODE);
3033 t3_write_reg(adap, A_TP_TX_MOD_QUEUE_REQ_MAP,
3034 V_TX_MOD_QUEUE_REQ_MAP(0xaa));
3035 for (i = 0; i < 16; i++)
3036 t3_write_reg(adap, A_TP_TX_MOD_QUE_TABLE,
3037 (i << 16) | 0x1010);
3041 static int calibrate_xgm(struct adapter *adapter)
3043 if (uses_xaui(adapter)) {
3046 for (i = 0; i < 5; ++i) {
3047 t3_write_reg(adapter, A_XGM_XAUI_IMP, 0);
3048 t3_read_reg(adapter, A_XGM_XAUI_IMP);
3050 v = t3_read_reg(adapter, A_XGM_XAUI_IMP);
3051 if (!(v & (F_XGM_CALFAULT | F_CALBUSY))) {
3052 t3_write_reg(adapter, A_XGM_XAUI_IMP,
3053 V_XAUIIMP(G_CALIMP(v) >> 2));
3057 CH_ERR(adapter, "MAC calibration failed\n");
3060 t3_write_reg(adapter, A_XGM_RGMII_IMP,
3061 V_RGMIIIMPPD(2) | V_RGMIIIMPPU(3));
3062 t3_set_reg_field(adapter, A_XGM_RGMII_IMP, F_XGM_IMPSETUPDATE,
3063 F_XGM_IMPSETUPDATE);
3068 static void calibrate_xgm_t3b(struct adapter *adapter)
3070 if (!uses_xaui(adapter)) {
3071 t3_write_reg(adapter, A_XGM_RGMII_IMP, F_CALRESET |
3072 F_CALUPDATE | V_RGMIIIMPPD(2) | V_RGMIIIMPPU(3));
3073 t3_set_reg_field(adapter, A_XGM_RGMII_IMP, F_CALRESET, 0);
3074 t3_set_reg_field(adapter, A_XGM_RGMII_IMP, 0,
3075 F_XGM_IMPSETUPDATE);
3076 t3_set_reg_field(adapter, A_XGM_RGMII_IMP, F_XGM_IMPSETUPDATE,
3078 t3_set_reg_field(adapter, A_XGM_RGMII_IMP, F_CALUPDATE, 0);
3079 t3_set_reg_field(adapter, A_XGM_RGMII_IMP, 0, F_CALUPDATE);
3083 struct mc7_timing_params {
3084 unsigned char ActToPreDly;
3085 unsigned char ActToRdWrDly;
3086 unsigned char PreCyc;
3087 unsigned char RefCyc[5];
3088 unsigned char BkCyc;
3089 unsigned char WrToRdDly;
3090 unsigned char RdToWrDly;
3094 * Write a value to a register and check that the write completed. These
3095 * writes normally complete in a cycle or two, so one read should suffice.
3096 * The very first read exists to flush the posted write to the device.
3098 static int wrreg_wait(struct adapter *adapter, unsigned int addr, u32 val)
3100 t3_write_reg(adapter, addr, val);
3101 t3_read_reg(adapter, addr); /* flush */
3102 if (!(t3_read_reg(adapter, addr) & F_BUSY))
3104 CH_ERR(adapter, "write to MC7 register 0x%x timed out\n", addr);
3108 static int mc7_init(struct mc7 *mc7, unsigned int mc7_clock, int mem_type)
3110 static const unsigned int mc7_mode[] = {
3111 0x632, 0x642, 0x652, 0x432, 0x442
3113 static const struct mc7_timing_params mc7_timings[] = {
3114 {12, 3, 4, {20, 28, 34, 52, 0}, 15, 6, 4},
3115 {12, 4, 5, {20, 28, 34, 52, 0}, 16, 7, 4},
3116 {12, 5, 6, {20, 28, 34, 52, 0}, 17, 8, 4},
3117 {9, 3, 4, {15, 21, 26, 39, 0}, 12, 6, 4},
3118 {9, 4, 5, {15, 21, 26, 39, 0}, 13, 7, 4}
3122 unsigned int width, density, slow, attempts;
3123 struct adapter *adapter = mc7->adapter;
3124 const struct mc7_timing_params *p = &mc7_timings[mem_type];
3129 val = t3_read_reg(adapter, mc7->offset + A_MC7_CFG);
3130 slow = val & F_SLOW;
3131 width = G_WIDTH(val);
3132 density = G_DEN(val);
3134 t3_write_reg(adapter, mc7->offset + A_MC7_CFG, val | F_IFEN);
3135 val = t3_read_reg(adapter, mc7->offset + A_MC7_CFG); /* flush */
3139 t3_write_reg(adapter, mc7->offset + A_MC7_CAL, F_SGL_CAL_EN);
3140 t3_read_reg(adapter, mc7->offset + A_MC7_CAL);
3142 if (t3_read_reg(adapter, mc7->offset + A_MC7_CAL) &
3143 (F_BUSY | F_SGL_CAL_EN | F_CAL_FAULT)) {
3144 CH_ERR(adapter, "%s MC7 calibration timed out\n",
3150 t3_write_reg(adapter, mc7->offset + A_MC7_PARM,
3151 V_ACTTOPREDLY(p->ActToPreDly) |
3152 V_ACTTORDWRDLY(p->ActToRdWrDly) | V_PRECYC(p->PreCyc) |
3153 V_REFCYC(p->RefCyc[density]) | V_BKCYC(p->BkCyc) |
3154 V_WRTORDDLY(p->WrToRdDly) | V_RDTOWRDLY(p->RdToWrDly));
3156 t3_write_reg(adapter, mc7->offset + A_MC7_CFG,
3157 val | F_CLKEN | F_TERM150);
3158 t3_read_reg(adapter, mc7->offset + A_MC7_CFG); /* flush */
3161 t3_set_reg_field(adapter, mc7->offset + A_MC7_DLL, F_DLLENB,
3166 if (wrreg_wait(adapter, mc7->offset + A_MC7_PRE, 0) ||
3167 wrreg_wait(adapter, mc7->offset + A_MC7_EXT_MODE2, 0) ||
3168 wrreg_wait(adapter, mc7->offset + A_MC7_EXT_MODE3, 0) ||
3169 wrreg_wait(adapter, mc7->offset + A_MC7_EXT_MODE1, val))
3173 t3_write_reg(adapter, mc7->offset + A_MC7_MODE, 0x100);
3174 t3_set_reg_field(adapter, mc7->offset + A_MC7_DLL, F_DLLRST, 0);
3178 if (wrreg_wait(adapter, mc7->offset + A_MC7_PRE, 0) ||
3179 wrreg_wait(adapter, mc7->offset + A_MC7_REF, 0) ||
3180 wrreg_wait(adapter, mc7->offset + A_MC7_REF, 0) ||
3181 wrreg_wait(adapter, mc7->offset + A_MC7_MODE,
3182 mc7_mode[mem_type]) ||
3183 wrreg_wait(adapter, mc7->offset + A_MC7_EXT_MODE1, val | 0x380) ||
3184 wrreg_wait(adapter, mc7->offset + A_MC7_EXT_MODE1, val))
3187 /* clock value is in KHz */
3188 mc7_clock = mc7_clock * 7812 + mc7_clock / 2; /* ns */
3189 mc7_clock /= 1000000; /* KHz->MHz, ns->us */
3191 t3_write_reg(adapter, mc7->offset + A_MC7_REF,
3192 F_PERREFEN | V_PREREFDIV(mc7_clock));
3193 t3_read_reg(adapter, mc7->offset + A_MC7_REF); /* flush */
3195 t3_write_reg(adapter, mc7->offset + A_MC7_ECC, F_ECCGENEN | F_ECCCHKEN);
3196 t3_write_reg(adapter, mc7->offset + A_MC7_BIST_DATA, 0);
3197 t3_write_reg(adapter, mc7->offset + A_MC7_BIST_ADDR_BEG, 0);
3198 t3_write_reg(adapter, mc7->offset + A_MC7_BIST_ADDR_END,
3199 (mc7->size << width) - 1);
3200 t3_write_reg(adapter, mc7->offset + A_MC7_BIST_OP, V_OP(1));
3201 t3_read_reg(adapter, mc7->offset + A_MC7_BIST_OP); /* flush */
3206 val = t3_read_reg(adapter, mc7->offset + A_MC7_BIST_OP);
3207 } while ((val & F_BUSY) && --attempts);
3209 CH_ERR(adapter, "%s MC7 BIST timed out\n", mc7->name);
3213 /* Enable normal memory accesses. */
3214 t3_set_reg_field(adapter, mc7->offset + A_MC7_CFG, 0, F_RDY);
3221 static void config_pcie(struct adapter *adap)
3223 static const u16 ack_lat[4][6] = {
3224 {237, 416, 559, 1071, 2095, 4143},
3225 {128, 217, 289, 545, 1057, 2081},
3226 {73, 118, 154, 282, 538, 1050},
3227 {67, 107, 86, 150, 278, 534}
3229 static const u16 rpl_tmr[4][6] = {
3230 {711, 1248, 1677, 3213, 6285, 12429},
3231 {384, 651, 867, 1635, 3171, 6243},
3232 {219, 354, 462, 846, 1614, 3150},
3233 {201, 321, 258, 450, 834, 1602}
3237 unsigned int log2_width, pldsize;
3238 unsigned int fst_trn_rx, fst_trn_tx, acklat, rpllmt;
3240 pci_read_config_word(adap->pdev,
3241 adap->params.pci.pcie_cap_addr + PCI_EXP_DEVCTL,
3243 pldsize = (val & PCI_EXP_DEVCTL_PAYLOAD) >> 5;
3244 pci_read_config_word(adap->pdev,
3245 adap->params.pci.pcie_cap_addr + PCI_EXP_LNKCTL,
3248 fst_trn_tx = G_NUMFSTTRNSEQ(t3_read_reg(adap, A_PCIE_PEX_CTRL0));
3249 fst_trn_rx = adap->params.rev == 0 ? fst_trn_tx :
3250 G_NUMFSTTRNSEQRX(t3_read_reg(adap, A_PCIE_MODE));
3251 log2_width = fls(adap->params.pci.width) - 1;
3252 acklat = ack_lat[log2_width][pldsize];
3253 if (val & 1) /* check LOsEnable */
3254 acklat += fst_trn_tx * 4;
3255 rpllmt = rpl_tmr[log2_width][pldsize] + fst_trn_rx * 4;
3257 if (adap->params.rev == 0)
3258 t3_set_reg_field(adap, A_PCIE_PEX_CTRL1,
3259 V_T3A_ACKLAT(M_T3A_ACKLAT),
3260 V_T3A_ACKLAT(acklat));
3262 t3_set_reg_field(adap, A_PCIE_PEX_CTRL1, V_ACKLAT(M_ACKLAT),
3265 t3_set_reg_field(adap, A_PCIE_PEX_CTRL0, V_REPLAYLMT(M_REPLAYLMT),
3266 V_REPLAYLMT(rpllmt));
3268 t3_write_reg(adap, A_PCIE_PEX_ERR, 0xffffffff);
3269 t3_set_reg_field(adap, A_PCIE_CFG, 0,
3270 F_ENABLELINKDWNDRST | F_ENABLELINKDOWNRST |
3271 F_PCIE_DMASTOPEN | F_PCIE_CLIDECEN);
3275 * Initialize and configure T3 HW modules. This performs the
3276 * initialization steps that need to be done once after a card is reset.
3277 * MAC and PHY initialization is handled separarely whenever a port is enabled.
3279 * fw_params are passed to FW and their value is platform dependent. Only the
3280 * top 8 bits are available for use, the rest must be 0.
3282 int t3_init_hw(struct adapter *adapter, u32 fw_params)
3284 int err = -EIO, attempts, i;
3285 const struct vpd_params *vpd = &adapter->params.vpd;
3287 if (adapter->params.rev > 0)
3288 calibrate_xgm_t3b(adapter);
3289 else if (calibrate_xgm(adapter))
3293 partition_mem(adapter, &adapter->params.tp);
3295 if (mc7_init(&adapter->pmrx, vpd->mclk, vpd->mem_timing) ||
3296 mc7_init(&adapter->pmtx, vpd->mclk, vpd->mem_timing) ||
3297 mc7_init(&adapter->cm, vpd->mclk, vpd->mem_timing) ||
3298 t3_mc5_init(&adapter->mc5, adapter->params.mc5.nservers,
3299 adapter->params.mc5.nfilters,
3300 adapter->params.mc5.nroutes))
3303 for (i = 0; i < 32; i++)
3304 if (clear_sge_ctxt(adapter, i, F_CQ))
3308 if (tp_init(adapter, &adapter->params.tp))
3311 t3_tp_set_coalescing_size(adapter,
3312 min(adapter->params.sge.max_pkt_size,
3313 MAX_RX_COALESCING_LEN), 1);
3314 t3_tp_set_max_rxsize(adapter,
3315 min(adapter->params.sge.max_pkt_size, 16384U));
3316 ulp_config(adapter, &adapter->params.tp);
3318 if (is_pcie(adapter))
3319 config_pcie(adapter);
3321 t3_set_reg_field(adapter, A_PCIX_CFG, 0,
3322 F_DMASTOPEN | F_CLIDECEN);
3324 if (adapter->params.rev == T3_REV_C)
3325 t3_set_reg_field(adapter, A_ULPTX_CONFIG, 0,
3326 F_CFG_CQE_SOP_MASK);
3328 t3_write_reg(adapter, A_PM1_RX_CFG, 0xffffffff);
3329 t3_write_reg(adapter, A_PM1_RX_MODE, 0);
3330 t3_write_reg(adapter, A_PM1_TX_MODE, 0);
3331 init_hw_for_avail_ports(adapter, adapter->params.nports);
3332 t3_sge_init(adapter, &adapter->params.sge);
3334 t3_write_reg(adapter, A_CIM_HOST_ACC_DATA, vpd->uclk | fw_params);
3335 t3_write_reg(adapter, A_CIM_BOOT_CFG,
3336 V_BOOTADDR(FW_FLASH_BOOT_ADDR >> 2));
3337 t3_read_reg(adapter, A_CIM_BOOT_CFG); /* flush */
3340 do { /* wait for uP to initialize */
3342 } while (t3_read_reg(adapter, A_CIM_HOST_ACC_DATA) && --attempts);
3344 CH_ERR(adapter, "uP initialization timed out\n");
3354 * get_pci_mode - determine a card's PCI mode
3355 * @adapter: the adapter
3356 * @p: where to store the PCI settings
3358 * Determines a card's PCI mode and associated parameters, such as speed
3361 static void get_pci_mode(struct adapter *adapter, struct pci_params *p)
3363 static unsigned short speed_map[] = { 33, 66, 100, 133 };
3364 u32 pci_mode, pcie_cap;
3366 pcie_cap = pci_find_capability(adapter->pdev, PCI_CAP_ID_EXP);
3370 p->variant = PCI_VARIANT_PCIE;
3371 p->pcie_cap_addr = pcie_cap;
3372 pci_read_config_word(adapter->pdev, pcie_cap + PCI_EXP_LNKSTA,
3374 p->width = (val >> 4) & 0x3f;
3378 pci_mode = t3_read_reg(adapter, A_PCIX_MODE);
3379 p->speed = speed_map[G_PCLKRANGE(pci_mode)];
3380 p->width = (pci_mode & F_64BIT) ? 64 : 32;
3381 pci_mode = G_PCIXINITPAT(pci_mode);
3383 p->variant = PCI_VARIANT_PCI;
3384 else if (pci_mode < 4)
3385 p->variant = PCI_VARIANT_PCIX_MODE1_PARITY;
3386 else if (pci_mode < 8)
3387 p->variant = PCI_VARIANT_PCIX_MODE1_ECC;
3389 p->variant = PCI_VARIANT_PCIX_266_MODE2;
3393 * init_link_config - initialize a link's SW state
3394 * @lc: structure holding the link state
3395 * @ai: information about the current card
3397 * Initializes the SW state maintained for each link, including the link's
3398 * capabilities and default speed/duplex/flow-control/autonegotiation
3401 static void init_link_config(struct link_config *lc, unsigned int caps)
3403 lc->supported = caps;
3404 lc->requested_speed = lc->speed = SPEED_INVALID;
3405 lc->requested_duplex = lc->duplex = DUPLEX_INVALID;
3406 lc->requested_fc = lc->fc = PAUSE_RX | PAUSE_TX;
3407 if (lc->supported & SUPPORTED_Autoneg) {
3408 lc->advertising = lc->supported;
3409 lc->autoneg = AUTONEG_ENABLE;
3410 lc->requested_fc |= PAUSE_AUTONEG;
3412 lc->advertising = 0;
3413 lc->autoneg = AUTONEG_DISABLE;
3418 * mc7_calc_size - calculate MC7 memory size
3419 * @cfg: the MC7 configuration
3421 * Calculates the size of an MC7 memory in bytes from the value of its
3422 * configuration register.
3424 static unsigned int mc7_calc_size(u32 cfg)
3426 unsigned int width = G_WIDTH(cfg);
3427 unsigned int banks = !!(cfg & F_BKS) + 1;
3428 unsigned int org = !!(cfg & F_ORG) + 1;
3429 unsigned int density = G_DEN(cfg);
3430 unsigned int MBs = ((256 << density) * banks) / (org << width);
3435 static void mc7_prep(struct adapter *adapter, struct mc7 *mc7,
3436 unsigned int base_addr, const char *name)
3440 mc7->adapter = adapter;
3442 mc7->offset = base_addr - MC7_PMRX_BASE_ADDR;
3443 cfg = t3_read_reg(adapter, mc7->offset + A_MC7_CFG);
3444 mc7->size = mc7->size = G_DEN(cfg) == M_DEN ? 0 : mc7_calc_size(cfg);
3445 mc7->width = G_WIDTH(cfg);
3448 void mac_prep(struct cmac *mac, struct adapter *adapter, int index)
3450 mac->adapter = adapter;
3451 mac->offset = (XGMAC0_1_BASE_ADDR - XGMAC0_0_BASE_ADDR) * index;
3454 if (adapter->params.rev == 0 && uses_xaui(adapter)) {
3455 t3_write_reg(adapter, A_XGM_SERDES_CTRL + mac->offset,
3456 is_10G(adapter) ? 0x2901c04 : 0x2301c04);
3457 t3_set_reg_field(adapter, A_XGM_PORT_CFG + mac->offset,
3462 void early_hw_init(struct adapter *adapter, const struct adapter_info *ai)
3464 u32 val = V_PORTSPEED(is_10G(adapter) ? 3 : 2);
3466 mi1_init(adapter, ai);
3467 t3_write_reg(adapter, A_I2C_CFG, /* set for 80KHz */
3468 V_I2C_CLKDIV(adapter->params.vpd.cclk / 80 - 1));
3469 t3_write_reg(adapter, A_T3DBG_GPIO_EN,
3470 ai->gpio_out | F_GPIO0_OEN | F_GPIO0_OUT_VAL);
3471 t3_write_reg(adapter, A_MC5_DB_SERVER_INDEX, 0);
3472 t3_write_reg(adapter, A_SG_OCO_BASE, V_BASE1(0xfff));
3474 if (adapter->params.rev == 0 || !uses_xaui(adapter))
3477 /* Enable MAC clocks so we can access the registers */
3478 t3_write_reg(adapter, A_XGM_PORT_CFG, val);
3479 t3_read_reg(adapter, A_XGM_PORT_CFG);
3481 val |= F_CLKDIVRESET_;
3482 t3_write_reg(adapter, A_XGM_PORT_CFG, val);
3483 t3_read_reg(adapter, A_XGM_PORT_CFG);
3484 t3_write_reg(adapter, XGM_REG(A_XGM_PORT_CFG, 1), val);
3485 t3_read_reg(adapter, A_XGM_PORT_CFG);
3489 * Reset the adapter.
3490 * Older PCIe cards lose their config space during reset, PCI-X
3493 int t3_reset_adapter(struct adapter *adapter)
3495 int i, save_and_restore_pcie =
3496 adapter->params.rev < T3_REV_B2 && is_pcie(adapter);
3499 if (save_and_restore_pcie)
3500 pci_save_state(adapter->pdev);
3501 t3_write_reg(adapter, A_PL_RST, F_CRSTWRM | F_CRSTWRMMODE);
3504 * Delay. Give Some time to device to reset fully.
3505 * XXX The delay time should be modified.
3507 for (i = 0; i < 10; i++) {
3509 pci_read_config_word(adapter->pdev, 0x00, &devid);
3510 if (devid == 0x1425)
3514 if (devid != 0x1425)
3517 if (save_and_restore_pcie)
3518 pci_restore_state(adapter->pdev);
3522 static int init_parity(struct adapter *adap)
3526 if (t3_read_reg(adap, A_SG_CONTEXT_CMD) & F_CONTEXT_CMD_BUSY)
3529 for (err = i = 0; !err && i < 16; i++)
3530 err = clear_sge_ctxt(adap, i, F_EGRESS);
3531 for (i = 0xfff0; !err && i <= 0xffff; i++)
3532 err = clear_sge_ctxt(adap, i, F_EGRESS);
3533 for (i = 0; !err && i < SGE_QSETS; i++)
3534 err = clear_sge_ctxt(adap, i, F_RESPONSEQ);
3538 t3_write_reg(adap, A_CIM_IBQ_DBG_DATA, 0);
3539 for (i = 0; i < 4; i++)
3540 for (addr = 0; addr <= M_IBQDBGADDR; addr++) {
3541 t3_write_reg(adap, A_CIM_IBQ_DBG_CFG, F_IBQDBGEN |
3542 F_IBQDBGWR | V_IBQDBGQID(i) |
3543 V_IBQDBGADDR(addr));
3544 err = t3_wait_op_done(adap, A_CIM_IBQ_DBG_CFG,
3545 F_IBQDBGBUSY, 0, 2, 1);
3553 * Initialize adapter SW state for the various HW modules, set initial values
3554 * for some adapter tunables, take PHYs out of reset, and initialize the MDIO
3557 int t3_prep_adapter(struct adapter *adapter, const struct adapter_info *ai,
3561 unsigned int i, j = -1;
3563 get_pci_mode(adapter, &adapter->params.pci);
3565 adapter->params.info = ai;
3566 adapter->params.nports = ai->nports;
3567 adapter->params.rev = t3_read_reg(adapter, A_PL_REV);
3568 adapter->params.linkpoll_period = 0;
3569 adapter->params.stats_update_period = is_10G(adapter) ?
3570 MAC_STATS_ACCUM_SECS : (MAC_STATS_ACCUM_SECS * 10);
3571 adapter->params.pci.vpd_cap_addr =
3572 pci_find_capability(adapter->pdev, PCI_CAP_ID_VPD);
3573 ret = get_vpd_params(adapter, &adapter->params.vpd);
3577 if (reset && t3_reset_adapter(adapter))
3580 t3_sge_prep(adapter, &adapter->params.sge);
3582 if (adapter->params.vpd.mclk) {
3583 struct tp_params *p = &adapter->params.tp;
3585 mc7_prep(adapter, &adapter->pmrx, MC7_PMRX_BASE_ADDR, "PMRX");
3586 mc7_prep(adapter, &adapter->pmtx, MC7_PMTX_BASE_ADDR, "PMTX");
3587 mc7_prep(adapter, &adapter->cm, MC7_CM_BASE_ADDR, "CM");
3589 p->nchan = ai->nports;
3590 p->pmrx_size = t3_mc7_size(&adapter->pmrx);
3591 p->pmtx_size = t3_mc7_size(&adapter->pmtx);
3592 p->cm_size = t3_mc7_size(&adapter->cm);
3593 p->chan_rx_size = p->pmrx_size / 2; /* only 1 Rx channel */
3594 p->chan_tx_size = p->pmtx_size / p->nchan;
3595 p->rx_pg_size = 64 * 1024;
3596 p->tx_pg_size = is_10G(adapter) ? 64 * 1024 : 16 * 1024;
3597 p->rx_num_pgs = pm_num_pages(p->chan_rx_size, p->rx_pg_size);
3598 p->tx_num_pgs = pm_num_pages(p->chan_tx_size, p->tx_pg_size);
3599 p->ntimer_qs = p->cm_size >= (128 << 20) ||
3600 adapter->params.rev > 0 ? 12 : 6;
3603 adapter->params.offload = t3_mc7_size(&adapter->pmrx) &&
3604 t3_mc7_size(&adapter->pmtx) &&
3605 t3_mc7_size(&adapter->cm);
3607 if (is_offload(adapter)) {
3608 adapter->params.mc5.nservers = DEFAULT_NSERVERS;
3609 adapter->params.mc5.nfilters = adapter->params.rev > 0 ?
3610 DEFAULT_NFILTERS : 0;
3611 adapter->params.mc5.nroutes = 0;
3612 t3_mc5_prep(adapter, &adapter->mc5, MC5_MODE_144_BIT);
3614 init_mtus(adapter->params.mtus);
3615 init_cong_ctrl(adapter->params.a_wnd, adapter->params.b_wnd);
3618 early_hw_init(adapter, ai);
3619 ret = init_parity(adapter);
3623 for_each_port(adapter, i) {
3625 const struct port_type_info *pti;
3626 struct port_info *p = adap2pinfo(adapter, i);
3628 while (!adapter->params.vpd.port_type[++j])
3631 pti = &port_types[adapter->params.vpd.port_type[j]];
3632 ret = pti->phy_prep(&p->phy, adapter, ai->phy_base_addr + j,
3636 mac_prep(&p->mac, adapter, j);
3639 * The VPD EEPROM stores the base Ethernet address for the
3640 * card. A port's address is derived from the base by adding
3641 * the port's index to the base's low octet.
3643 memcpy(hw_addr, adapter->params.vpd.eth_base, 5);
3644 hw_addr[5] = adapter->params.vpd.eth_base[5] + i;
3646 memcpy(adapter->port[i]->dev_addr, hw_addr,
3648 memcpy(adapter->port[i]->perm_addr, hw_addr,
3650 init_link_config(&p->link_config, p->phy.caps);
3651 p->phy.ops->power_down(&p->phy, 1);
3652 if (!(p->phy.caps & SUPPORTED_IRQ))
3653 adapter->params.linkpoll_period = 10;
3659 void t3_led_ready(struct adapter *adapter)
3661 t3_set_reg_field(adapter, A_T3DBG_GPIO_EN, F_GPIO0_OUT_VAL,
3665 int t3_replay_prep_adapter(struct adapter *adapter)
3667 const struct adapter_info *ai = adapter->params.info;
3668 unsigned int i, j = -1;
3671 early_hw_init(adapter, ai);
3672 ret = init_parity(adapter);
3676 for_each_port(adapter, i) {
3677 const struct port_type_info *pti;
3678 struct port_info *p = adap2pinfo(adapter, i);
3680 while (!adapter->params.vpd.port_type[++j])
3683 pti = &port_types[adapter->params.vpd.port_type[j]];
3684 ret = pti->phy_prep(&p->phy, adapter, p->phy.addr, NULL);
3687 p->phy.ops->power_down(&p->phy, 1);