]> bbs.cooldavid.org Git - net-next-2.6.git/blame - drivers/net/cxgb3/t3_hw.c
cxgb3: treat firmware data as const
[net-next-2.6.git] / drivers / net / cxgb3 / t3_hw.c
CommitLineData
4d22de3e 1/*
1d68e93d 2 * Copyright (c) 2003-2007 Chelsio, Inc. All rights reserved.
4d22de3e 3 *
1d68e93d
DLR
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
4d22de3e 9 *
1d68e93d
DLR
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
4d22de3e 31 */
4d22de3e
DLR
32#include "common.h"
33#include "regs.h"
34#include "sge_defs.h"
35#include "firmware_exports.h"
36
f2c6879e
DLR
37/**
38 * t3_wait_op_done_val - wait until an operation is completed
39 * @adapter: the adapter performing the operation
40 * @reg: the register to check for completion
41 * @mask: a single-bit field within @reg that indicates completion
42 * @polarity: the value of the field when the operation is completed
43 * @attempts: number of check iterations
44 * @delay: delay in usecs between iterations
45 * @valp: where to store the value of the register at completion time
46 *
47 * Wait until an operation is completed by checking a bit in a register
48 * up to @attempts times. If @valp is not NULL the value of the register
49 * at the time it indicated completion is stored there. Returns 0 if the
50 * operation completes and -EAGAIN otherwise.
51 */
4d22de3e
DLR
52
53int t3_wait_op_done_val(struct adapter *adapter, int reg, u32 mask,
54 int polarity, int attempts, int delay, u32 *valp)
55{
56 while (1) {
57 u32 val = t3_read_reg(adapter, reg);
58
59 if (!!(val & mask) == polarity) {
60 if (valp)
61 *valp = val;
62 return 0;
63 }
64 if (--attempts == 0)
b881955b 65 return -EAGAIN;
4d22de3e
DLR
66 if (delay)
67 udelay(delay);
68 }
69}
70
71/**
72 * t3_write_regs - write a bunch of registers
73 * @adapter: the adapter to program
74 * @p: an array of register address/register value pairs
75 * @n: the number of address/value pairs
76 * @offset: register address offset
77 *
78 * Takes an array of register address/register value pairs and writes each
79 * value to the corresponding register. Register addresses are adjusted
80 * by the supplied offset.
81 */
82void t3_write_regs(struct adapter *adapter, const struct addr_val_pair *p,
83 int n, unsigned int offset)
84{
85 while (n--) {
86 t3_write_reg(adapter, p->reg_addr + offset, p->val);
87 p++;
88 }
89}
90
91/**
92 * t3_set_reg_field - set a register field to a value
93 * @adapter: the adapter to program
94 * @addr: the register address
95 * @mask: specifies the portion of the register to modify
96 * @val: the new value for the register field
97 *
98 * Sets a register field specified by the supplied mask to the
99 * given value.
100 */
101void t3_set_reg_field(struct adapter *adapter, unsigned int addr, u32 mask,
102 u32 val)
103{
104 u32 v = t3_read_reg(adapter, addr) & ~mask;
105
106 t3_write_reg(adapter, addr, v | val);
107 t3_read_reg(adapter, addr); /* flush */
108}
109
110/**
111 * t3_read_indirect - read indirectly addressed registers
112 * @adap: the adapter
113 * @addr_reg: register holding the indirect address
114 * @data_reg: register holding the value of the indirect register
115 * @vals: where the read register values are stored
116 * @start_idx: index of first indirect register to read
117 * @nregs: how many indirect registers to read
118 *
119 * Reads registers that are accessed indirectly through an address/data
120 * register pair.
121 */
9265fabf
SH
122static void t3_read_indirect(struct adapter *adap, unsigned int addr_reg,
123 unsigned int data_reg, u32 *vals,
124 unsigned int nregs, unsigned int start_idx)
4d22de3e
DLR
125{
126 while (nregs--) {
127 t3_write_reg(adap, addr_reg, start_idx);
128 *vals++ = t3_read_reg(adap, data_reg);
129 start_idx++;
130 }
131}
132
133/**
134 * t3_mc7_bd_read - read from MC7 through backdoor accesses
135 * @mc7: identifies MC7 to read from
136 * @start: index of first 64-bit word to read
137 * @n: number of 64-bit words to read
138 * @buf: where to store the read result
139 *
140 * Read n 64-bit words from MC7 starting at word start, using backdoor
141 * accesses.
142 */
143int t3_mc7_bd_read(struct mc7 *mc7, unsigned int start, unsigned int n,
144 u64 *buf)
145{
146 static const int shift[] = { 0, 0, 16, 24 };
147 static const int step[] = { 0, 32, 16, 8 };
148
149 unsigned int size64 = mc7->size / 8; /* # of 64-bit words */
150 struct adapter *adap = mc7->adapter;
151
152 if (start >= size64 || start + n > size64)
153 return -EINVAL;
154
155 start *= (8 << mc7->width);
156 while (n--) {
157 int i;
158 u64 val64 = 0;
159
160 for (i = (1 << mc7->width) - 1; i >= 0; --i) {
161 int attempts = 10;
162 u32 val;
163
164 t3_write_reg(adap, mc7->offset + A_MC7_BD_ADDR, start);
165 t3_write_reg(adap, mc7->offset + A_MC7_BD_OP, 0);
166 val = t3_read_reg(adap, mc7->offset + A_MC7_BD_OP);
167 while ((val & F_BUSY) && attempts--)
168 val = t3_read_reg(adap,
169 mc7->offset + A_MC7_BD_OP);
170 if (val & F_BUSY)
171 return -EIO;
172
173 val = t3_read_reg(adap, mc7->offset + A_MC7_BD_DATA1);
174 if (mc7->width == 0) {
175 val64 = t3_read_reg(adap,
176 mc7->offset +
177 A_MC7_BD_DATA0);
178 val64 |= (u64) val << 32;
179 } else {
180 if (mc7->width > 1)
181 val >>= shift[mc7->width];
182 val64 |= (u64) val << (step[mc7->width] * i);
183 }
184 start += 8;
185 }
186 *buf++ = val64;
187 }
188 return 0;
189}
190
191/*
192 * Initialize MI1.
193 */
194static void mi1_init(struct adapter *adap, const struct adapter_info *ai)
195{
196 u32 clkdiv = adap->params.vpd.cclk / (2 * adap->params.vpd.mdc) - 1;
197 u32 val = F_PREEN | V_MDIINV(ai->mdiinv) | V_MDIEN(ai->mdien) |
198 V_CLKDIV(clkdiv);
199
200 if (!(ai->caps & SUPPORTED_10000baseT_Full))
201 val |= V_ST(1);
202 t3_write_reg(adap, A_MI1_CFG, val);
203}
204
205#define MDIO_ATTEMPTS 10
206
207/*
208 * MI1 read/write operations for direct-addressed PHYs.
209 */
210static int mi1_read(struct adapter *adapter, int phy_addr, int mmd_addr,
211 int reg_addr, unsigned int *valp)
212{
213 int ret;
214 u32 addr = V_REGADDR(reg_addr) | V_PHYADDR(phy_addr);
215
216 if (mmd_addr)
217 return -EINVAL;
218
219 mutex_lock(&adapter->mdio_lock);
220 t3_write_reg(adapter, A_MI1_ADDR, addr);
221 t3_write_reg(adapter, A_MI1_OP, V_MDI_OP(2));
222 ret = t3_wait_op_done(adapter, A_MI1_OP, F_BUSY, 0, MDIO_ATTEMPTS, 20);
223 if (!ret)
224 *valp = t3_read_reg(adapter, A_MI1_DATA);
225 mutex_unlock(&adapter->mdio_lock);
226 return ret;
227}
228
229static int mi1_write(struct adapter *adapter, int phy_addr, int mmd_addr,
230 int reg_addr, unsigned int val)
231{
232 int ret;
233 u32 addr = V_REGADDR(reg_addr) | V_PHYADDR(phy_addr);
234
235 if (mmd_addr)
236 return -EINVAL;
237
238 mutex_lock(&adapter->mdio_lock);
239 t3_write_reg(adapter, A_MI1_ADDR, addr);
240 t3_write_reg(adapter, A_MI1_DATA, val);
241 t3_write_reg(adapter, A_MI1_OP, V_MDI_OP(1));
242 ret = t3_wait_op_done(adapter, A_MI1_OP, F_BUSY, 0, MDIO_ATTEMPTS, 20);
243 mutex_unlock(&adapter->mdio_lock);
244 return ret;
245}
246
247static const struct mdio_ops mi1_mdio_ops = {
248 mi1_read,
249 mi1_write
250};
251
252/*
253 * MI1 read/write operations for indirect-addressed PHYs.
254 */
255static int mi1_ext_read(struct adapter *adapter, int phy_addr, int mmd_addr,
256 int reg_addr, unsigned int *valp)
257{
258 int ret;
259 u32 addr = V_REGADDR(mmd_addr) | V_PHYADDR(phy_addr);
260
261 mutex_lock(&adapter->mdio_lock);
262 t3_write_reg(adapter, A_MI1_ADDR, addr);
263 t3_write_reg(adapter, A_MI1_DATA, reg_addr);
264 t3_write_reg(adapter, A_MI1_OP, V_MDI_OP(0));
265 ret = t3_wait_op_done(adapter, A_MI1_OP, F_BUSY, 0, MDIO_ATTEMPTS, 20);
266 if (!ret) {
267 t3_write_reg(adapter, A_MI1_OP, V_MDI_OP(3));
268 ret = t3_wait_op_done(adapter, A_MI1_OP, F_BUSY, 0,
269 MDIO_ATTEMPTS, 20);
270 if (!ret)
271 *valp = t3_read_reg(adapter, A_MI1_DATA);
272 }
273 mutex_unlock(&adapter->mdio_lock);
274 return ret;
275}
276
277static int mi1_ext_write(struct adapter *adapter, int phy_addr, int mmd_addr,
278 int reg_addr, unsigned int val)
279{
280 int ret;
281 u32 addr = V_REGADDR(mmd_addr) | V_PHYADDR(phy_addr);
282
283 mutex_lock(&adapter->mdio_lock);
284 t3_write_reg(adapter, A_MI1_ADDR, addr);
285 t3_write_reg(adapter, A_MI1_DATA, reg_addr);
286 t3_write_reg(adapter, A_MI1_OP, V_MDI_OP(0));
287 ret = t3_wait_op_done(adapter, A_MI1_OP, F_BUSY, 0, MDIO_ATTEMPTS, 20);
288 if (!ret) {
289 t3_write_reg(adapter, A_MI1_DATA, val);
290 t3_write_reg(adapter, A_MI1_OP, V_MDI_OP(1));
291 ret = t3_wait_op_done(adapter, A_MI1_OP, F_BUSY, 0,
292 MDIO_ATTEMPTS, 20);
293 }
294 mutex_unlock(&adapter->mdio_lock);
295 return ret;
296}
297
298static const struct mdio_ops mi1_mdio_ext_ops = {
299 mi1_ext_read,
300 mi1_ext_write
301};
302
303/**
304 * t3_mdio_change_bits - modify the value of a PHY register
305 * @phy: the PHY to operate on
306 * @mmd: the device address
307 * @reg: the register address
308 * @clear: what part of the register value to mask off
309 * @set: what part of the register value to set
310 *
311 * Changes the value of a PHY register by applying a mask to its current
312 * value and ORing the result with a new value.
313 */
314int t3_mdio_change_bits(struct cphy *phy, int mmd, int reg, unsigned int clear,
315 unsigned int set)
316{
317 int ret;
318 unsigned int val;
319
320 ret = mdio_read(phy, mmd, reg, &val);
321 if (!ret) {
322 val &= ~clear;
323 ret = mdio_write(phy, mmd, reg, val | set);
324 }
325 return ret;
326}
327
328/**
329 * t3_phy_reset - reset a PHY block
330 * @phy: the PHY to operate on
331 * @mmd: the device address of the PHY block to reset
332 * @wait: how long to wait for the reset to complete in 1ms increments
333 *
334 * Resets a PHY block and optionally waits for the reset to complete.
335 * @mmd should be 0 for 10/100/1000 PHYs and the device address to reset
336 * for 10G PHYs.
337 */
338int t3_phy_reset(struct cphy *phy, int mmd, int wait)
339{
340 int err;
341 unsigned int ctl;
342
343 err = t3_mdio_change_bits(phy, mmd, MII_BMCR, BMCR_PDOWN, BMCR_RESET);
344 if (err || !wait)
345 return err;
346
347 do {
348 err = mdio_read(phy, mmd, MII_BMCR, &ctl);
349 if (err)
350 return err;
351 ctl &= BMCR_RESET;
352 if (ctl)
353 msleep(1);
354 } while (ctl && --wait);
355
356 return ctl ? -1 : 0;
357}
358
359/**
360 * t3_phy_advertise - set the PHY advertisement registers for autoneg
361 * @phy: the PHY to operate on
362 * @advert: bitmap of capabilities the PHY should advertise
363 *
364 * Sets a 10/100/1000 PHY's advertisement registers to advertise the
365 * requested capabilities.
366 */
367int t3_phy_advertise(struct cphy *phy, unsigned int advert)
368{
369 int err;
370 unsigned int val = 0;
371
372 err = mdio_read(phy, 0, MII_CTRL1000, &val);
373 if (err)
374 return err;
375
376 val &= ~(ADVERTISE_1000HALF | ADVERTISE_1000FULL);
377 if (advert & ADVERTISED_1000baseT_Half)
378 val |= ADVERTISE_1000HALF;
379 if (advert & ADVERTISED_1000baseT_Full)
380 val |= ADVERTISE_1000FULL;
381
382 err = mdio_write(phy, 0, MII_CTRL1000, val);
383 if (err)
384 return err;
385
386 val = 1;
387 if (advert & ADVERTISED_10baseT_Half)
388 val |= ADVERTISE_10HALF;
389 if (advert & ADVERTISED_10baseT_Full)
390 val |= ADVERTISE_10FULL;
391 if (advert & ADVERTISED_100baseT_Half)
392 val |= ADVERTISE_100HALF;
393 if (advert & ADVERTISED_100baseT_Full)
394 val |= ADVERTISE_100FULL;
395 if (advert & ADVERTISED_Pause)
396 val |= ADVERTISE_PAUSE_CAP;
397 if (advert & ADVERTISED_Asym_Pause)
398 val |= ADVERTISE_PAUSE_ASYM;
399 return mdio_write(phy, 0, MII_ADVERTISE, val);
400}
401
402/**
403 * t3_set_phy_speed_duplex - force PHY speed and duplex
404 * @phy: the PHY to operate on
405 * @speed: requested PHY speed
406 * @duplex: requested PHY duplex
407 *
408 * Force a 10/100/1000 PHY's speed and duplex. This also disables
409 * auto-negotiation except for GigE, where auto-negotiation is mandatory.
410 */
411int t3_set_phy_speed_duplex(struct cphy *phy, int speed, int duplex)
412{
413 int err;
414 unsigned int ctl;
415
416 err = mdio_read(phy, 0, MII_BMCR, &ctl);
417 if (err)
418 return err;
419
420 if (speed >= 0) {
421 ctl &= ~(BMCR_SPEED100 | BMCR_SPEED1000 | BMCR_ANENABLE);
422 if (speed == SPEED_100)
423 ctl |= BMCR_SPEED100;
424 else if (speed == SPEED_1000)
425 ctl |= BMCR_SPEED1000;
426 }
427 if (duplex >= 0) {
428 ctl &= ~(BMCR_FULLDPLX | BMCR_ANENABLE);
429 if (duplex == DUPLEX_FULL)
430 ctl |= BMCR_FULLDPLX;
431 }
432 if (ctl & BMCR_SPEED1000) /* auto-negotiation required for GigE */
433 ctl |= BMCR_ANENABLE;
434 return mdio_write(phy, 0, MII_BMCR, ctl);
435}
436
437static const struct adapter_info t3_adap_info[] = {
438 {2, 0, 0, 0,
439 F_GPIO2_OEN | F_GPIO4_OEN |
440 F_GPIO2_OUT_VAL | F_GPIO4_OUT_VAL, F_GPIO3 | F_GPIO5,
8ac3ba68 441 0,
4d22de3e
DLR
442 &mi1_mdio_ops, "Chelsio PE9000"},
443 {2, 0, 0, 0,
444 F_GPIO2_OEN | F_GPIO4_OEN |
445 F_GPIO2_OUT_VAL | F_GPIO4_OUT_VAL, F_GPIO3 | F_GPIO5,
8ac3ba68 446 0,
4d22de3e
DLR
447 &mi1_mdio_ops, "Chelsio T302"},
448 {1, 0, 0, 0,
449 F_GPIO1_OEN | F_GPIO6_OEN | F_GPIO7_OEN | F_GPIO10_OEN |
75758e8a
DLR
450 F_GPIO11_OEN | F_GPIO1_OUT_VAL | F_GPIO6_OUT_VAL | F_GPIO10_OUT_VAL,
451 0, SUPPORTED_10000baseT_Full | SUPPORTED_AUI,
4d22de3e
DLR
452 &mi1_mdio_ext_ops, "Chelsio T310"},
453 {2, 0, 0, 0,
454 F_GPIO1_OEN | F_GPIO2_OEN | F_GPIO4_OEN | F_GPIO5_OEN | F_GPIO6_OEN |
455 F_GPIO7_OEN | F_GPIO10_OEN | F_GPIO11_OEN | F_GPIO1_OUT_VAL |
456 F_GPIO5_OUT_VAL | F_GPIO6_OUT_VAL | F_GPIO10_OUT_VAL, 0,
8ac3ba68 457 SUPPORTED_10000baseT_Full | SUPPORTED_AUI,
4d22de3e
DLR
458 &mi1_mdio_ext_ops, "Chelsio T320"},
459};
460
461/*
462 * Return the adapter_info structure with a given index. Out-of-range indices
463 * return NULL.
464 */
465const struct adapter_info *t3_get_adapter_info(unsigned int id)
466{
467 return id < ARRAY_SIZE(t3_adap_info) ? &t3_adap_info[id] : NULL;
468}
469
470#define CAPS_1G (SUPPORTED_10baseT_Full | SUPPORTED_100baseT_Full | \
471 SUPPORTED_1000baseT_Full | SUPPORTED_Autoneg | SUPPORTED_MII)
472#define CAPS_10G (SUPPORTED_10000baseT_Full | SUPPORTED_AUI)
473
474static const struct port_type_info port_types[] = {
475 {NULL},
476 {t3_ael1002_phy_prep, CAPS_10G | SUPPORTED_FIBRE,
477 "10GBASE-XR"},
478 {t3_vsc8211_phy_prep, CAPS_1G | SUPPORTED_TP | SUPPORTED_IRQ,
479 "10/100/1000BASE-T"},
480 {NULL, CAPS_1G | SUPPORTED_TP | SUPPORTED_IRQ,
481 "10/100/1000BASE-T"},
482 {t3_xaui_direct_phy_prep, CAPS_10G | SUPPORTED_TP, "10GBASE-CX4"},
483 {NULL, CAPS_10G, "10GBASE-KX4"},
484 {t3_qt2045_phy_prep, CAPS_10G | SUPPORTED_TP, "10GBASE-CX4"},
485 {t3_ael1006_phy_prep, CAPS_10G | SUPPORTED_FIBRE,
486 "10GBASE-SR"},
487 {NULL, CAPS_10G | SUPPORTED_TP, "10GBASE-CX4"},
488};
489
490#undef CAPS_1G
491#undef CAPS_10G
492
493#define VPD_ENTRY(name, len) \
494 u8 name##_kword[2]; u8 name##_len; u8 name##_data[len]
495
496/*
497 * Partial EEPROM Vital Product Data structure. Includes only the ID and
498 * VPD-R sections.
499 */
500struct t3_vpd {
501 u8 id_tag;
502 u8 id_len[2];
503 u8 id_data[16];
504 u8 vpdr_tag;
505 u8 vpdr_len[2];
506 VPD_ENTRY(pn, 16); /* part number */
507 VPD_ENTRY(ec, 16); /* EC level */
167cdf5f 508 VPD_ENTRY(sn, SERNUM_LEN); /* serial number */
4d22de3e
DLR
509 VPD_ENTRY(na, 12); /* MAC address base */
510 VPD_ENTRY(cclk, 6); /* core clock */
511 VPD_ENTRY(mclk, 6); /* mem clock */
512 VPD_ENTRY(uclk, 6); /* uP clk */
513 VPD_ENTRY(mdc, 6); /* MDIO clk */
514 VPD_ENTRY(mt, 2); /* mem timing */
515 VPD_ENTRY(xaui0cfg, 6); /* XAUI0 config */
516 VPD_ENTRY(xaui1cfg, 6); /* XAUI1 config */
517 VPD_ENTRY(port0, 2); /* PHY0 complex */
518 VPD_ENTRY(port1, 2); /* PHY1 complex */
519 VPD_ENTRY(port2, 2); /* PHY2 complex */
520 VPD_ENTRY(port3, 2); /* PHY3 complex */
521 VPD_ENTRY(rv, 1); /* csum */
522 u32 pad; /* for multiple-of-4 sizing and alignment */
523};
524
525#define EEPROM_MAX_POLL 4
526#define EEPROM_STAT_ADDR 0x4000
527#define VPD_BASE 0xc00
528
529/**
530 * t3_seeprom_read - read a VPD EEPROM location
531 * @adapter: adapter to read
532 * @addr: EEPROM address
533 * @data: where to store the read data
534 *
535 * Read a 32-bit word from a location in VPD EEPROM using the card's PCI
536 * VPD ROM capability. A zero is written to the flag bit when the
537 * addres is written to the control register. The hardware device will
538 * set the flag to 1 when 4 bytes have been read into the data register.
539 */
05e5c116 540int t3_seeprom_read(struct adapter *adapter, u32 addr, __le32 *data)
4d22de3e
DLR
541{
542 u16 val;
543 int attempts = EEPROM_MAX_POLL;
05e5c116 544 u32 v;
4d22de3e
DLR
545 unsigned int base = adapter->params.pci.vpd_cap_addr;
546
547 if ((addr >= EEPROMSIZE && addr != EEPROM_STAT_ADDR) || (addr & 3))
548 return -EINVAL;
549
550 pci_write_config_word(adapter->pdev, base + PCI_VPD_ADDR, addr);
551 do {
552 udelay(10);
553 pci_read_config_word(adapter->pdev, base + PCI_VPD_ADDR, &val);
554 } while (!(val & PCI_VPD_ADDR_F) && --attempts);
555
556 if (!(val & PCI_VPD_ADDR_F)) {
557 CH_ERR(adapter, "reading EEPROM address 0x%x failed\n", addr);
558 return -EIO;
559 }
05e5c116
AV
560 pci_read_config_dword(adapter->pdev, base + PCI_VPD_DATA, &v);
561 *data = cpu_to_le32(v);
4d22de3e
DLR
562 return 0;
563}
564
565/**
566 * t3_seeprom_write - write a VPD EEPROM location
567 * @adapter: adapter to write
568 * @addr: EEPROM address
569 * @data: value to write
570 *
571 * Write a 32-bit word to a location in VPD EEPROM using the card's PCI
572 * VPD ROM capability.
573 */
05e5c116 574int t3_seeprom_write(struct adapter *adapter, u32 addr, __le32 data)
4d22de3e
DLR
575{
576 u16 val;
577 int attempts = EEPROM_MAX_POLL;
578 unsigned int base = adapter->params.pci.vpd_cap_addr;
579
580 if ((addr >= EEPROMSIZE && addr != EEPROM_STAT_ADDR) || (addr & 3))
581 return -EINVAL;
582
583 pci_write_config_dword(adapter->pdev, base + PCI_VPD_DATA,
05e5c116 584 le32_to_cpu(data));
4d22de3e
DLR
585 pci_write_config_word(adapter->pdev,base + PCI_VPD_ADDR,
586 addr | PCI_VPD_ADDR_F);
587 do {
588 msleep(1);
589 pci_read_config_word(adapter->pdev, base + PCI_VPD_ADDR, &val);
590 } while ((val & PCI_VPD_ADDR_F) && --attempts);
591
592 if (val & PCI_VPD_ADDR_F) {
593 CH_ERR(adapter, "write to EEPROM address 0x%x failed\n", addr);
594 return -EIO;
595 }
596 return 0;
597}
598
599/**
600 * t3_seeprom_wp - enable/disable EEPROM write protection
601 * @adapter: the adapter
602 * @enable: 1 to enable write protection, 0 to disable it
603 *
604 * Enables or disables write protection on the serial EEPROM.
605 */
606int t3_seeprom_wp(struct adapter *adapter, int enable)
607{
608 return t3_seeprom_write(adapter, EEPROM_STAT_ADDR, enable ? 0xc : 0);
609}
610
611/*
612 * Convert a character holding a hex digit to a number.
613 */
614static unsigned int hex2int(unsigned char c)
615{
616 return isdigit(c) ? c - '0' : toupper(c) - 'A' + 10;
617}
618
619/**
620 * get_vpd_params - read VPD parameters from VPD EEPROM
621 * @adapter: adapter to read
622 * @p: where to store the parameters
623 *
624 * Reads card parameters stored in VPD EEPROM.
625 */
626static int get_vpd_params(struct adapter *adapter, struct vpd_params *p)
627{
628 int i, addr, ret;
629 struct t3_vpd vpd;
630
631 /*
632 * Card information is normally at VPD_BASE but some early cards had
633 * it at 0.
634 */
05e5c116 635 ret = t3_seeprom_read(adapter, VPD_BASE, (__le32 *)&vpd);
4d22de3e
DLR
636 if (ret)
637 return ret;
638 addr = vpd.id_tag == 0x82 ? VPD_BASE : 0;
639
640 for (i = 0; i < sizeof(vpd); i += 4) {
641 ret = t3_seeprom_read(adapter, addr + i,
05e5c116 642 (__le32 *)((u8 *)&vpd + i));
4d22de3e
DLR
643 if (ret)
644 return ret;
645 }
646
647 p->cclk = simple_strtoul(vpd.cclk_data, NULL, 10);
648 p->mclk = simple_strtoul(vpd.mclk_data, NULL, 10);
649 p->uclk = simple_strtoul(vpd.uclk_data, NULL, 10);
650 p->mdc = simple_strtoul(vpd.mdc_data, NULL, 10);
651 p->mem_timing = simple_strtoul(vpd.mt_data, NULL, 10);
167cdf5f 652 memcpy(p->sn, vpd.sn_data, SERNUM_LEN);
4d22de3e
DLR
653
654 /* Old eeproms didn't have port information */
655 if (adapter->params.rev == 0 && !vpd.port0_data[0]) {
656 p->port_type[0] = uses_xaui(adapter) ? 1 : 2;
657 p->port_type[1] = uses_xaui(adapter) ? 6 : 2;
658 } else {
659 p->port_type[0] = hex2int(vpd.port0_data[0]);
660 p->port_type[1] = hex2int(vpd.port1_data[0]);
661 p->xauicfg[0] = simple_strtoul(vpd.xaui0cfg_data, NULL, 16);
662 p->xauicfg[1] = simple_strtoul(vpd.xaui1cfg_data, NULL, 16);
663 }
664
665 for (i = 0; i < 6; i++)
666 p->eth_base[i] = hex2int(vpd.na_data[2 * i]) * 16 +
667 hex2int(vpd.na_data[2 * i + 1]);
668 return 0;
669}
670
671/* serial flash and firmware constants */
672enum {
673 SF_ATTEMPTS = 5, /* max retries for SF1 operations */
674 SF_SEC_SIZE = 64 * 1024, /* serial flash sector size */
675 SF_SIZE = SF_SEC_SIZE * 8, /* serial flash size */
676
677 /* flash command opcodes */
678 SF_PROG_PAGE = 2, /* program page */
679 SF_WR_DISABLE = 4, /* disable writes */
680 SF_RD_STATUS = 5, /* read status register */
681 SF_WR_ENABLE = 6, /* enable writes */
682 SF_RD_DATA_FAST = 0xb, /* read flash */
683 SF_ERASE_SECTOR = 0xd8, /* erase sector */
684
685 FW_FLASH_BOOT_ADDR = 0x70000, /* start address of FW in flash */
2e283962
DLR
686 FW_VERS_ADDR = 0x77ffc, /* flash address holding FW version */
687 FW_MIN_SIZE = 8 /* at least version and csum */
4d22de3e
DLR
688};
689
690/**
691 * sf1_read - read data from the serial flash
692 * @adapter: the adapter
693 * @byte_cnt: number of bytes to read
694 * @cont: whether another operation will be chained
695 * @valp: where to store the read data
696 *
697 * Reads up to 4 bytes of data from the serial flash. The location of
698 * the read needs to be specified prior to calling this by issuing the
699 * appropriate commands to the serial flash.
700 */
701static int sf1_read(struct adapter *adapter, unsigned int byte_cnt, int cont,
702 u32 *valp)
703{
704 int ret;
705
706 if (!byte_cnt || byte_cnt > 4)
707 return -EINVAL;
708 if (t3_read_reg(adapter, A_SF_OP) & F_BUSY)
709 return -EBUSY;
710 t3_write_reg(adapter, A_SF_OP, V_CONT(cont) | V_BYTECNT(byte_cnt - 1));
711 ret = t3_wait_op_done(adapter, A_SF_OP, F_BUSY, 0, SF_ATTEMPTS, 10);
712 if (!ret)
713 *valp = t3_read_reg(adapter, A_SF_DATA);
714 return ret;
715}
716
717/**
718 * sf1_write - write data to the serial flash
719 * @adapter: the adapter
720 * @byte_cnt: number of bytes to write
721 * @cont: whether another operation will be chained
722 * @val: value to write
723 *
724 * Writes up to 4 bytes of data to the serial flash. The location of
725 * the write needs to be specified prior to calling this by issuing the
726 * appropriate commands to the serial flash.
727 */
728static int sf1_write(struct adapter *adapter, unsigned int byte_cnt, int cont,
729 u32 val)
730{
731 if (!byte_cnt || byte_cnt > 4)
732 return -EINVAL;
733 if (t3_read_reg(adapter, A_SF_OP) & F_BUSY)
734 return -EBUSY;
735 t3_write_reg(adapter, A_SF_DATA, val);
736 t3_write_reg(adapter, A_SF_OP,
737 V_CONT(cont) | V_BYTECNT(byte_cnt - 1) | V_OP(1));
738 return t3_wait_op_done(adapter, A_SF_OP, F_BUSY, 0, SF_ATTEMPTS, 10);
739}
740
741/**
742 * flash_wait_op - wait for a flash operation to complete
743 * @adapter: the adapter
744 * @attempts: max number of polls of the status register
745 * @delay: delay between polls in ms
746 *
747 * Wait for a flash operation to complete by polling the status register.
748 */
749static int flash_wait_op(struct adapter *adapter, int attempts, int delay)
750{
751 int ret;
752 u32 status;
753
754 while (1) {
755 if ((ret = sf1_write(adapter, 1, 1, SF_RD_STATUS)) != 0 ||
756 (ret = sf1_read(adapter, 1, 0, &status)) != 0)
757 return ret;
758 if (!(status & 1))
759 return 0;
760 if (--attempts == 0)
761 return -EAGAIN;
762 if (delay)
763 msleep(delay);
764 }
765}
766
767/**
768 * t3_read_flash - read words from serial flash
769 * @adapter: the adapter
770 * @addr: the start address for the read
771 * @nwords: how many 32-bit words to read
772 * @data: where to store the read data
773 * @byte_oriented: whether to store data as bytes or as words
774 *
775 * Read the specified number of 32-bit words from the serial flash.
776 * If @byte_oriented is set the read data is stored as a byte array
777 * (i.e., big-endian), otherwise as 32-bit words in the platform's
778 * natural endianess.
779 */
780int t3_read_flash(struct adapter *adapter, unsigned int addr,
781 unsigned int nwords, u32 *data, int byte_oriented)
782{
783 int ret;
784
785 if (addr + nwords * sizeof(u32) > SF_SIZE || (addr & 3))
786 return -EINVAL;
787
788 addr = swab32(addr) | SF_RD_DATA_FAST;
789
790 if ((ret = sf1_write(adapter, 4, 1, addr)) != 0 ||
791 (ret = sf1_read(adapter, 1, 1, data)) != 0)
792 return ret;
793
794 for (; nwords; nwords--, data++) {
795 ret = sf1_read(adapter, 4, nwords > 1, data);
796 if (ret)
797 return ret;
798 if (byte_oriented)
799 *data = htonl(*data);
800 }
801 return 0;
802}
803
804/**
805 * t3_write_flash - write up to a page of data to the serial flash
806 * @adapter: the adapter
807 * @addr: the start address to write
808 * @n: length of data to write
809 * @data: the data to write
810 *
811 * Writes up to a page of data (256 bytes) to the serial flash starting
812 * at the given address.
813 */
814static int t3_write_flash(struct adapter *adapter, unsigned int addr,
815 unsigned int n, const u8 *data)
816{
817 int ret;
818 u32 buf[64];
819 unsigned int i, c, left, val, offset = addr & 0xff;
820
821 if (addr + n > SF_SIZE || offset + n > 256)
822 return -EINVAL;
823
824 val = swab32(addr) | SF_PROG_PAGE;
825
826 if ((ret = sf1_write(adapter, 1, 0, SF_WR_ENABLE)) != 0 ||
827 (ret = sf1_write(adapter, 4, 1, val)) != 0)
828 return ret;
829
830 for (left = n; left; left -= c) {
831 c = min(left, 4U);
832 for (val = 0, i = 0; i < c; ++i)
833 val = (val << 8) + *data++;
834
835 ret = sf1_write(adapter, c, c != left, val);
836 if (ret)
837 return ret;
838 }
839 if ((ret = flash_wait_op(adapter, 5, 1)) != 0)
840 return ret;
841
842 /* Read the page to verify the write succeeded */
843 ret = t3_read_flash(adapter, addr & ~0xff, ARRAY_SIZE(buf), buf, 1);
844 if (ret)
845 return ret;
846
847 if (memcmp(data - n, (u8 *) buf + offset, n))
848 return -EIO;
849 return 0;
850}
851
480fe1a3 852/**
47330077 853 * t3_get_tp_version - read the tp sram version
480fe1a3 854 * @adapter: the adapter
47330077 855 * @vers: where to place the version
480fe1a3 856 *
47330077 857 * Reads the protocol sram version from sram.
480fe1a3 858 */
47330077 859int t3_get_tp_version(struct adapter *adapter, u32 *vers)
480fe1a3
DLR
860{
861 int ret;
480fe1a3
DLR
862
863 /* Get version loaded in SRAM */
864 t3_write_reg(adapter, A_TP_EMBED_OP_FIELD0, 0);
865 ret = t3_wait_op_done(adapter, A_TP_EMBED_OP_FIELD0,
866 1, 1, 5, 1);
867 if (ret)
868 return ret;
2eab17ab 869
47330077
DLR
870 *vers = t3_read_reg(adapter, A_TP_EMBED_OP_FIELD1);
871
872 return 0;
873}
874
875/**
876 * t3_check_tpsram_version - read the tp sram version
877 * @adapter: the adapter
878 * @must_load: set to 1 if loading a new microcode image is required
879 *
880 * Reads the protocol sram version from flash.
881 */
882int t3_check_tpsram_version(struct adapter *adapter, int *must_load)
883{
884 int ret;
885 u32 vers;
886 unsigned int major, minor;
887
888 if (adapter->params.rev == T3_REV_A)
889 return 0;
890
891 *must_load = 1;
892
893 ret = t3_get_tp_version(adapter, &vers);
894 if (ret)
895 return ret;
480fe1a3
DLR
896
897 major = G_TP_VERSION_MAJOR(vers);
898 minor = G_TP_VERSION_MINOR(vers);
899
2eab17ab 900 if (major == TP_VERSION_MAJOR && minor == TP_VERSION_MINOR)
480fe1a3
DLR
901 return 0;
902
47330077
DLR
903 if (major != TP_VERSION_MAJOR)
904 CH_ERR(adapter, "found wrong TP version (%u.%u), "
905 "driver needs version %d.%d\n", major, minor,
906 TP_VERSION_MAJOR, TP_VERSION_MINOR);
907 else {
908 *must_load = 0;
909 CH_ERR(adapter, "found wrong TP version (%u.%u), "
910 "driver compiled for version %d.%d\n", major, minor,
911 TP_VERSION_MAJOR, TP_VERSION_MINOR);
912 }
480fe1a3
DLR
913 return -EINVAL;
914}
915
916/**
2eab17ab 917 * t3_check_tpsram - check if provided protocol SRAM
480fe1a3
DLR
918 * is compatible with this driver
919 * @adapter: the adapter
920 * @tp_sram: the firmware image to write
921 * @size: image size
922 *
923 * Checks if an adapter's tp sram is compatible with the driver.
924 * Returns 0 if the versions are compatible, a negative error otherwise.
925 */
2c733a16
DW
926int t3_check_tpsram(struct adapter *adapter, const u8 *tp_sram,
927 unsigned int size)
480fe1a3
DLR
928{
929 u32 csum;
930 unsigned int i;
05e5c116 931 const __be32 *p = (const __be32 *)tp_sram;
480fe1a3
DLR
932
933 /* Verify checksum */
934 for (csum = 0, i = 0; i < size / sizeof(csum); i++)
935 csum += ntohl(p[i]);
936 if (csum != 0xffffffff) {
937 CH_ERR(adapter, "corrupted protocol SRAM image, checksum %u\n",
938 csum);
939 return -EINVAL;
940 }
941
942 return 0;
943}
944
4aac3899
DLR
945enum fw_version_type {
946 FW_VERSION_N3,
947 FW_VERSION_T3
948};
949
4d22de3e
DLR
950/**
951 * t3_get_fw_version - read the firmware version
952 * @adapter: the adapter
953 * @vers: where to place the version
954 *
955 * Reads the FW version from flash.
956 */
957int t3_get_fw_version(struct adapter *adapter, u32 *vers)
958{
959 return t3_read_flash(adapter, FW_VERS_ADDR, 1, vers, 0);
960}
961
962/**
963 * t3_check_fw_version - check if the FW is compatible with this driver
964 * @adapter: the adapter
a5a3b460
DLR
965 * @must_load: set to 1 if loading a new FW image is required
966
4d22de3e
DLR
967 * Checks if an adapter's FW is compatible with the driver. Returns 0
968 * if the versions are compatible, a negative error otherwise.
969 */
a5a3b460 970int t3_check_fw_version(struct adapter *adapter, int *must_load)
4d22de3e
DLR
971{
972 int ret;
973 u32 vers;
4aac3899 974 unsigned int type, major, minor;
4d22de3e 975
a5a3b460 976 *must_load = 1;
4d22de3e
DLR
977 ret = t3_get_fw_version(adapter, &vers);
978 if (ret)
979 return ret;
980
4aac3899
DLR
981 type = G_FW_VERSION_TYPE(vers);
982 major = G_FW_VERSION_MAJOR(vers);
983 minor = G_FW_VERSION_MINOR(vers);
4d22de3e 984
75d8626f
DLR
985 if (type == FW_VERSION_T3 && major == FW_VERSION_MAJOR &&
986 minor == FW_VERSION_MINOR)
4d22de3e
DLR
987 return 0;
988
a5a3b460
DLR
989 if (major != FW_VERSION_MAJOR)
990 CH_ERR(adapter, "found wrong FW version(%u.%u), "
991 "driver needs version %u.%u\n", major, minor,
992 FW_VERSION_MAJOR, FW_VERSION_MINOR);
273fa904 993 else if (minor < FW_VERSION_MINOR) {
a5a3b460 994 *must_load = 0;
273fa904
DLR
995 CH_WARN(adapter, "found old FW minor version(%u.%u), "
996 "driver compiled for version %u.%u\n", major, minor,
997 FW_VERSION_MAJOR, FW_VERSION_MINOR);
998 } else {
999 CH_WARN(adapter, "found newer FW version(%u.%u), "
a5a3b460
DLR
1000 "driver compiled for version %u.%u\n", major, minor,
1001 FW_VERSION_MAJOR, FW_VERSION_MINOR);
273fa904 1002 return 0;
a5a3b460 1003 }
4d22de3e
DLR
1004 return -EINVAL;
1005}
1006
1007/**
1008 * t3_flash_erase_sectors - erase a range of flash sectors
1009 * @adapter: the adapter
1010 * @start: the first sector to erase
1011 * @end: the last sector to erase
1012 *
1013 * Erases the sectors in the given range.
1014 */
1015static int t3_flash_erase_sectors(struct adapter *adapter, int start, int end)
1016{
1017 while (start <= end) {
1018 int ret;
1019
1020 if ((ret = sf1_write(adapter, 1, 0, SF_WR_ENABLE)) != 0 ||
1021 (ret = sf1_write(adapter, 4, 0,
1022 SF_ERASE_SECTOR | (start << 8))) != 0 ||
1023 (ret = flash_wait_op(adapter, 5, 500)) != 0)
1024 return ret;
1025 start++;
1026 }
1027 return 0;
1028}
1029
1030/*
1031 * t3_load_fw - download firmware
1032 * @adapter: the adapter
8a9fab22 1033 * @fw_data: the firmware image to write
4d22de3e
DLR
1034 * @size: image size
1035 *
1036 * Write the supplied firmware image to the card's serial flash.
1037 * The FW image has the following sections: @size - 8 bytes of code and
1038 * data, followed by 4 bytes of FW version, followed by the 32-bit
1039 * 1's complement checksum of the whole image.
1040 */
1041int t3_load_fw(struct adapter *adapter, const u8 *fw_data, unsigned int size)
1042{
1043 u32 csum;
1044 unsigned int i;
05e5c116 1045 const __be32 *p = (const __be32 *)fw_data;
4d22de3e
DLR
1046 int ret, addr, fw_sector = FW_FLASH_BOOT_ADDR >> 16;
1047
2e283962 1048 if ((size & 3) || size < FW_MIN_SIZE)
4d22de3e
DLR
1049 return -EINVAL;
1050 if (size > FW_VERS_ADDR + 8 - FW_FLASH_BOOT_ADDR)
1051 return -EFBIG;
1052
1053 for (csum = 0, i = 0; i < size / sizeof(csum); i++)
1054 csum += ntohl(p[i]);
1055 if (csum != 0xffffffff) {
1056 CH_ERR(adapter, "corrupted firmware image, checksum %u\n",
1057 csum);
1058 return -EINVAL;
1059 }
1060
1061 ret = t3_flash_erase_sectors(adapter, fw_sector, fw_sector);
1062 if (ret)
1063 goto out;
1064
1065 size -= 8; /* trim off version and checksum */
1066 for (addr = FW_FLASH_BOOT_ADDR; size;) {
1067 unsigned int chunk_size = min(size, 256U);
1068
1069 ret = t3_write_flash(adapter, addr, chunk_size, fw_data);
1070 if (ret)
1071 goto out;
1072
1073 addr += chunk_size;
1074 fw_data += chunk_size;
1075 size -= chunk_size;
1076 }
1077
1078 ret = t3_write_flash(adapter, FW_VERS_ADDR, 4, fw_data);
1079out:
1080 if (ret)
1081 CH_ERR(adapter, "firmware download failed, error %d\n", ret);
1082 return ret;
1083}
1084
1085#define CIM_CTL_BASE 0x2000
1086
1087/**
1088 * t3_cim_ctl_blk_read - read a block from CIM control region
1089 *
1090 * @adap: the adapter
1091 * @addr: the start address within the CIM control region
1092 * @n: number of words to read
1093 * @valp: where to store the result
1094 *
1095 * Reads a block of 4-byte words from the CIM control region.
1096 */
1097int t3_cim_ctl_blk_read(struct adapter *adap, unsigned int addr,
1098 unsigned int n, unsigned int *valp)
1099{
1100 int ret = 0;
1101
1102 if (t3_read_reg(adap, A_CIM_HOST_ACC_CTRL) & F_HOSTBUSY)
1103 return -EBUSY;
1104
1105 for ( ; !ret && n--; addr += 4) {
1106 t3_write_reg(adap, A_CIM_HOST_ACC_CTRL, CIM_CTL_BASE + addr);
1107 ret = t3_wait_op_done(adap, A_CIM_HOST_ACC_CTRL, F_HOSTBUSY,
1108 0, 5, 2);
1109 if (!ret)
1110 *valp++ = t3_read_reg(adap, A_CIM_HOST_ACC_DATA);
1111 }
1112 return ret;
1113}
1114
1115
1116/**
1117 * t3_link_changed - handle interface link changes
1118 * @adapter: the adapter
1119 * @port_id: the port index that changed link state
1120 *
1121 * Called when a port's link settings change to propagate the new values
1122 * to the associated PHY and MAC. After performing the common tasks it
1123 * invokes an OS-specific handler.
1124 */
1125void t3_link_changed(struct adapter *adapter, int port_id)
1126{
1127 int link_ok, speed, duplex, fc;
1128 struct port_info *pi = adap2pinfo(adapter, port_id);
1129 struct cphy *phy = &pi->phy;
1130 struct cmac *mac = &pi->mac;
1131 struct link_config *lc = &pi->link_config;
1132
1133 phy->ops->get_link_status(phy, &link_ok, &speed, &duplex, &fc);
1134
1135 if (link_ok != lc->link_ok && adapter->params.rev > 0 &&
1136 uses_xaui(adapter)) {
1137 if (link_ok)
1138 t3b_pcs_reset(mac);
1139 t3_write_reg(adapter, A_XGM_XAUI_ACT_CTRL + mac->offset,
1140 link_ok ? F_TXACTENABLE | F_RXEN : 0);
1141 }
1142 lc->link_ok = link_ok;
1143 lc->speed = speed < 0 ? SPEED_INVALID : speed;
1144 lc->duplex = duplex < 0 ? DUPLEX_INVALID : duplex;
1145 if (lc->requested_fc & PAUSE_AUTONEG)
1146 fc &= lc->requested_fc;
1147 else
1148 fc = lc->requested_fc & (PAUSE_RX | PAUSE_TX);
1149
1150 if (link_ok && speed >= 0 && lc->autoneg == AUTONEG_ENABLE) {
1151 /* Set MAC speed, duplex, and flow control to match PHY. */
1152 t3_mac_set_speed_duplex_fc(mac, speed, duplex, fc);
1153 lc->fc = fc;
1154 }
1155
1156 t3_os_link_changed(adapter, port_id, link_ok, speed, duplex, fc);
1157}
1158
1159/**
1160 * t3_link_start - apply link configuration to MAC/PHY
1161 * @phy: the PHY to setup
1162 * @mac: the MAC to setup
1163 * @lc: the requested link configuration
1164 *
1165 * Set up a port's MAC and PHY according to a desired link configuration.
1166 * - If the PHY can auto-negotiate first decide what to advertise, then
1167 * enable/disable auto-negotiation as desired, and reset.
1168 * - If the PHY does not auto-negotiate just reset it.
1169 * - If auto-negotiation is off set the MAC to the proper speed/duplex/FC,
1170 * otherwise do it later based on the outcome of auto-negotiation.
1171 */
1172int t3_link_start(struct cphy *phy, struct cmac *mac, struct link_config *lc)
1173{
1174 unsigned int fc = lc->requested_fc & (PAUSE_RX | PAUSE_TX);
1175
1176 lc->link_ok = 0;
1177 if (lc->supported & SUPPORTED_Autoneg) {
1178 lc->advertising &= ~(ADVERTISED_Asym_Pause | ADVERTISED_Pause);
1179 if (fc) {
1180 lc->advertising |= ADVERTISED_Asym_Pause;
1181 if (fc & PAUSE_RX)
1182 lc->advertising |= ADVERTISED_Pause;
1183 }
1184 phy->ops->advertise(phy, lc->advertising);
1185
1186 if (lc->autoneg == AUTONEG_DISABLE) {
1187 lc->speed = lc->requested_speed;
1188 lc->duplex = lc->requested_duplex;
1189 lc->fc = (unsigned char)fc;
1190 t3_mac_set_speed_duplex_fc(mac, lc->speed, lc->duplex,
1191 fc);
1192 /* Also disables autoneg */
1193 phy->ops->set_speed_duplex(phy, lc->speed, lc->duplex);
1194 phy->ops->reset(phy, 0);
1195 } else
1196 phy->ops->autoneg_enable(phy);
1197 } else {
1198 t3_mac_set_speed_duplex_fc(mac, -1, -1, fc);
1199 lc->fc = (unsigned char)fc;
1200 phy->ops->reset(phy, 0);
1201 }
1202 return 0;
1203}
1204
1205/**
1206 * t3_set_vlan_accel - control HW VLAN extraction
1207 * @adapter: the adapter
1208 * @ports: bitmap of adapter ports to operate on
1209 * @on: enable (1) or disable (0) HW VLAN extraction
1210 *
1211 * Enables or disables HW extraction of VLAN tags for the given port.
1212 */
1213void t3_set_vlan_accel(struct adapter *adapter, unsigned int ports, int on)
1214{
1215 t3_set_reg_field(adapter, A_TP_OUT_CONFIG,
1216 ports << S_VLANEXTRACTIONENABLE,
1217 on ? (ports << S_VLANEXTRACTIONENABLE) : 0);
1218}
1219
1220struct intr_info {
1221 unsigned int mask; /* bits to check in interrupt status */
1222 const char *msg; /* message to print or NULL */
1223 short stat_idx; /* stat counter to increment or -1 */
1224 unsigned short fatal:1; /* whether the condition reported is fatal */
1225};
1226
1227/**
1228 * t3_handle_intr_status - table driven interrupt handler
1229 * @adapter: the adapter that generated the interrupt
1230 * @reg: the interrupt status register to process
1231 * @mask: a mask to apply to the interrupt status
1232 * @acts: table of interrupt actions
1233 * @stats: statistics counters tracking interrupt occurences
1234 *
1235 * A table driven interrupt handler that applies a set of masks to an
1236 * interrupt status word and performs the corresponding actions if the
1237 * interrupts described by the mask have occured. The actions include
1238 * optionally printing a warning or alert message, and optionally
1239 * incrementing a stat counter. The table is terminated by an entry
1240 * specifying mask 0. Returns the number of fatal interrupt conditions.
1241 */
1242static int t3_handle_intr_status(struct adapter *adapter, unsigned int reg,
1243 unsigned int mask,
1244 const struct intr_info *acts,
1245 unsigned long *stats)
1246{
1247 int fatal = 0;
1248 unsigned int status = t3_read_reg(adapter, reg) & mask;
1249
1250 for (; acts->mask; ++acts) {
1251 if (!(status & acts->mask))
1252 continue;
1253 if (acts->fatal) {
1254 fatal++;
1255 CH_ALERT(adapter, "%s (0x%x)\n",
1256 acts->msg, status & acts->mask);
1257 } else if (acts->msg)
1258 CH_WARN(adapter, "%s (0x%x)\n",
1259 acts->msg, status & acts->mask);
1260 if (acts->stat_idx >= 0)
1261 stats[acts->stat_idx]++;
1262 }
1263 if (status) /* clear processed interrupts */
1264 t3_write_reg(adapter, reg, status);
1265 return fatal;
1266}
1267
b881955b
DLR
1268#define SGE_INTR_MASK (F_RSPQDISABLED | \
1269 F_UC_REQ_FRAMINGERROR | F_R_REQ_FRAMINGERROR | \
1270 F_CPPARITYERROR | F_OCPARITYERROR | F_RCPARITYERROR | \
1271 F_IRPARITYERROR | V_ITPARITYERROR(M_ITPARITYERROR) | \
1272 V_FLPARITYERROR(M_FLPARITYERROR) | F_LODRBPARITYERROR | \
1273 F_HIDRBPARITYERROR | F_LORCQPARITYERROR | \
1274 F_HIRCQPARITYERROR)
4d22de3e
DLR
1275#define MC5_INTR_MASK (F_PARITYERR | F_ACTRGNFULL | F_UNKNOWNCMD | \
1276 F_REQQPARERR | F_DISPQPARERR | F_DELACTEMPTY | \
1277 F_NFASRCHFAIL)
1278#define MC7_INTR_MASK (F_AE | F_UE | F_CE | V_PE(M_PE))
1279#define XGM_INTR_MASK (V_TXFIFO_PRTY_ERR(M_TXFIFO_PRTY_ERR) | \
1280 V_RXFIFO_PRTY_ERR(M_RXFIFO_PRTY_ERR) | \
1281 F_TXFIFO_UNDERRUN | F_RXFIFO_OVERFLOW)
1282#define PCIX_INTR_MASK (F_MSTDETPARERR | F_SIGTARABT | F_RCVTARABT | \
1283 F_RCVMSTABT | F_SIGSYSERR | F_DETPARERR | \
1284 F_SPLCMPDIS | F_UNXSPLCMP | F_RCVSPLCMPERR | \
1285 F_DETCORECCERR | F_DETUNCECCERR | F_PIOPARERR | \
1286 V_WFPARERR(M_WFPARERR) | V_RFPARERR(M_RFPARERR) | \
1287 V_CFPARERR(M_CFPARERR) /* | V_MSIXPARERR(M_MSIXPARERR) */)
1288#define PCIE_INTR_MASK (F_UNXSPLCPLERRR | F_UNXSPLCPLERRC | F_PCIE_PIOPARERR |\
1289 F_PCIE_WFPARERR | F_PCIE_RFPARERR | F_PCIE_CFPARERR | \
1290 /* V_PCIE_MSIXPARERR(M_PCIE_MSIXPARERR) | */ \
b881955b
DLR
1291 F_RETRYBUFPARERR | F_RETRYLUTPARERR | F_RXPARERR | \
1292 F_TXPARERR | V_BISTERR(M_BISTERR))
1293#define ULPRX_INTR_MASK (F_PARERRDATA | F_PARERRPCMD | F_ARBPF1PERR | \
1294 F_ARBPF0PERR | F_ARBFPERR | F_PCMDMUXPERR | \
1295 F_DATASELFRAMEERR1 | F_DATASELFRAMEERR0)
1296#define ULPTX_INTR_MASK 0xfc
1297#define CPLSW_INTR_MASK (F_CIM_OP_MAP_PERR | F_TP_FRAMING_ERROR | \
4d22de3e
DLR
1298 F_SGE_FRAMING_ERROR | F_CIM_FRAMING_ERROR | \
1299 F_ZERO_SWITCH_ERROR)
1300#define CIM_INTR_MASK (F_BLKWRPLINT | F_BLKRDPLINT | F_BLKWRCTLINT | \
1301 F_BLKRDCTLINT | F_BLKWRFLASHINT | F_BLKRDFLASHINT | \
1302 F_SGLWRFLASHINT | F_WRBLKFLASHINT | F_BLKWRBOOTINT | \
b881955b
DLR
1303 F_FLASHRANGEINT | F_SDRAMRANGEINT | F_RSVDSPACEINT | \
1304 F_DRAMPARERR | F_ICACHEPARERR | F_DCACHEPARERR | \
1305 F_OBQSGEPARERR | F_OBQULPHIPARERR | F_OBQULPLOPARERR | \
1306 F_IBQSGELOPARERR | F_IBQSGEHIPARERR | F_IBQULPPARERR | \
1307 F_IBQTPPARERR | F_ITAGPARERR | F_DTAGPARERR)
4d22de3e
DLR
1308#define PMTX_INTR_MASK (F_ZERO_C_CMD_ERROR | ICSPI_FRM_ERR | OESPI_FRM_ERR | \
1309 V_ICSPI_PAR_ERROR(M_ICSPI_PAR_ERROR) | \
1310 V_OESPI_PAR_ERROR(M_OESPI_PAR_ERROR))
1311#define PMRX_INTR_MASK (F_ZERO_E_CMD_ERROR | IESPI_FRM_ERR | OCSPI_FRM_ERR | \
1312 V_IESPI_PAR_ERROR(M_IESPI_PAR_ERROR) | \
1313 V_OCSPI_PAR_ERROR(M_OCSPI_PAR_ERROR))
1314#define MPS_INTR_MASK (V_TX0TPPARERRENB(M_TX0TPPARERRENB) | \
1315 V_TX1TPPARERRENB(M_TX1TPPARERRENB) | \
1316 V_RXTPPARERRENB(M_RXTPPARERRENB) | \
1317 V_MCAPARERRENB(M_MCAPARERRENB))
1318#define PL_INTR_MASK (F_T3DBG | F_XGMAC0_0 | F_XGMAC0_1 | F_MC5A | F_PM1_TX | \
1319 F_PM1_RX | F_ULP2_TX | F_ULP2_RX | F_TP1 | F_CIM | \
1320 F_MC7_CM | F_MC7_PMTX | F_MC7_PMRX | F_SGE3 | F_PCIM0 | \
1321 F_MPS0 | F_CPL_SWITCH)
1322
1323/*
1324 * Interrupt handler for the PCIX1 module.
1325 */
1326static void pci_intr_handler(struct adapter *adapter)
1327{
1328 static const struct intr_info pcix1_intr_info[] = {
4d22de3e
DLR
1329 {F_MSTDETPARERR, "PCI master detected parity error", -1, 1},
1330 {F_SIGTARABT, "PCI signaled target abort", -1, 1},
1331 {F_RCVTARABT, "PCI received target abort", -1, 1},
1332 {F_RCVMSTABT, "PCI received master abort", -1, 1},
1333 {F_SIGSYSERR, "PCI signaled system error", -1, 1},
1334 {F_DETPARERR, "PCI detected parity error", -1, 1},
1335 {F_SPLCMPDIS, "PCI split completion discarded", -1, 1},
1336 {F_UNXSPLCMP, "PCI unexpected split completion error", -1, 1},
1337 {F_RCVSPLCMPERR, "PCI received split completion error", -1,
1338 1},
1339 {F_DETCORECCERR, "PCI correctable ECC error",
1340 STAT_PCI_CORR_ECC, 0},
1341 {F_DETUNCECCERR, "PCI uncorrectable ECC error", -1, 1},
1342 {F_PIOPARERR, "PCI PIO FIFO parity error", -1, 1},
1343 {V_WFPARERR(M_WFPARERR), "PCI write FIFO parity error", -1,
1344 1},
1345 {V_RFPARERR(M_RFPARERR), "PCI read FIFO parity error", -1,
1346 1},
1347 {V_CFPARERR(M_CFPARERR), "PCI command FIFO parity error", -1,
1348 1},
1349 {V_MSIXPARERR(M_MSIXPARERR), "PCI MSI-X table/PBA parity "
1350 "error", -1, 1},
1351 {0}
1352 };
1353
1354 if (t3_handle_intr_status(adapter, A_PCIX_INT_CAUSE, PCIX_INTR_MASK,
1355 pcix1_intr_info, adapter->irq_stats))
1356 t3_fatal_err(adapter);
1357}
1358
1359/*
1360 * Interrupt handler for the PCIE module.
1361 */
1362static void pcie_intr_handler(struct adapter *adapter)
1363{
1364 static const struct intr_info pcie_intr_info[] = {
b5a44bcb 1365 {F_PEXERR, "PCI PEX error", -1, 1},
4d22de3e
DLR
1366 {F_UNXSPLCPLERRR,
1367 "PCI unexpected split completion DMA read error", -1, 1},
1368 {F_UNXSPLCPLERRC,
1369 "PCI unexpected split completion DMA command error", -1, 1},
1370 {F_PCIE_PIOPARERR, "PCI PIO FIFO parity error", -1, 1},
1371 {F_PCIE_WFPARERR, "PCI write FIFO parity error", -1, 1},
1372 {F_PCIE_RFPARERR, "PCI read FIFO parity error", -1, 1},
1373 {F_PCIE_CFPARERR, "PCI command FIFO parity error", -1, 1},
1374 {V_PCIE_MSIXPARERR(M_PCIE_MSIXPARERR),
1375 "PCI MSI-X table/PBA parity error", -1, 1},
b881955b
DLR
1376 {F_RETRYBUFPARERR, "PCI retry buffer parity error", -1, 1},
1377 {F_RETRYLUTPARERR, "PCI retry LUT parity error", -1, 1},
1378 {F_RXPARERR, "PCI Rx parity error", -1, 1},
1379 {F_TXPARERR, "PCI Tx parity error", -1, 1},
4d22de3e
DLR
1380 {V_BISTERR(M_BISTERR), "PCI BIST error", -1, 1},
1381 {0}
1382 };
1383
3eea3337
DLR
1384 if (t3_read_reg(adapter, A_PCIE_INT_CAUSE) & F_PEXERR)
1385 CH_ALERT(adapter, "PEX error code 0x%x\n",
1386 t3_read_reg(adapter, A_PCIE_PEX_ERR));
1387
4d22de3e
DLR
1388 if (t3_handle_intr_status(adapter, A_PCIE_INT_CAUSE, PCIE_INTR_MASK,
1389 pcie_intr_info, adapter->irq_stats))
1390 t3_fatal_err(adapter);
1391}
1392
1393/*
1394 * TP interrupt handler.
1395 */
1396static void tp_intr_handler(struct adapter *adapter)
1397{
1398 static const struct intr_info tp_intr_info[] = {
1399 {0xffffff, "TP parity error", -1, 1},
1400 {0x1000000, "TP out of Rx pages", -1, 1},
1401 {0x2000000, "TP out of Tx pages", -1, 1},
1402 {0}
1403 };
1404
a2604be5 1405 static struct intr_info tp_intr_info_t3c[] = {
b881955b
DLR
1406 {0x1fffffff, "TP parity error", -1, 1},
1407 {F_FLMRXFLSTEMPTY, "TP out of Rx pages", -1, 1},
1408 {F_FLMTXFLSTEMPTY, "TP out of Tx pages", -1, 1},
1409 {0}
a2604be5
DLR
1410 };
1411
4d22de3e 1412 if (t3_handle_intr_status(adapter, A_TP_INT_CAUSE, 0xffffffff,
a2604be5 1413 adapter->params.rev < T3_REV_C ?
b881955b 1414 tp_intr_info : tp_intr_info_t3c, NULL))
4d22de3e
DLR
1415 t3_fatal_err(adapter);
1416}
1417
1418/*
1419 * CIM interrupt handler.
1420 */
1421static void cim_intr_handler(struct adapter *adapter)
1422{
1423 static const struct intr_info cim_intr_info[] = {
1424 {F_RSVDSPACEINT, "CIM reserved space write", -1, 1},
1425 {F_SDRAMRANGEINT, "CIM SDRAM address out of range", -1, 1},
1426 {F_FLASHRANGEINT, "CIM flash address out of range", -1, 1},
1427 {F_BLKWRBOOTINT, "CIM block write to boot space", -1, 1},
1428 {F_WRBLKFLASHINT, "CIM write to cached flash space", -1, 1},
1429 {F_SGLWRFLASHINT, "CIM single write to flash space", -1, 1},
1430 {F_BLKRDFLASHINT, "CIM block read from flash space", -1, 1},
1431 {F_BLKWRFLASHINT, "CIM block write to flash space", -1, 1},
1432 {F_BLKRDCTLINT, "CIM block read from CTL space", -1, 1},
1433 {F_BLKWRCTLINT, "CIM block write to CTL space", -1, 1},
1434 {F_BLKRDPLINT, "CIM block read from PL space", -1, 1},
1435 {F_BLKWRPLINT, "CIM block write to PL space", -1, 1},
b881955b
DLR
1436 {F_DRAMPARERR, "CIM DRAM parity error", -1, 1},
1437 {F_ICACHEPARERR, "CIM icache parity error", -1, 1},
1438 {F_DCACHEPARERR, "CIM dcache parity error", -1, 1},
1439 {F_OBQSGEPARERR, "CIM OBQ SGE parity error", -1, 1},
1440 {F_OBQULPHIPARERR, "CIM OBQ ULPHI parity error", -1, 1},
1441 {F_OBQULPLOPARERR, "CIM OBQ ULPLO parity error", -1, 1},
1442 {F_IBQSGELOPARERR, "CIM IBQ SGELO parity error", -1, 1},
1443 {F_IBQSGEHIPARERR, "CIM IBQ SGEHI parity error", -1, 1},
1444 {F_IBQULPPARERR, "CIM IBQ ULP parity error", -1, 1},
1445 {F_IBQTPPARERR, "CIM IBQ TP parity error", -1, 1},
1446 {F_ITAGPARERR, "CIM itag parity error", -1, 1},
1447 {F_DTAGPARERR, "CIM dtag parity error", -1, 1},
4d22de3e
DLR
1448 {0}
1449 };
1450
1451 if (t3_handle_intr_status(adapter, A_CIM_HOST_INT_CAUSE, 0xffffffff,
1452 cim_intr_info, NULL))
1453 t3_fatal_err(adapter);
1454}
1455
1456/*
1457 * ULP RX interrupt handler.
1458 */
1459static void ulprx_intr_handler(struct adapter *adapter)
1460{
1461 static const struct intr_info ulprx_intr_info[] = {
b881955b
DLR
1462 {F_PARERRDATA, "ULP RX data parity error", -1, 1},
1463 {F_PARERRPCMD, "ULP RX command parity error", -1, 1},
1464 {F_ARBPF1PERR, "ULP RX ArbPF1 parity error", -1, 1},
1465 {F_ARBPF0PERR, "ULP RX ArbPF0 parity error", -1, 1},
1466 {F_ARBFPERR, "ULP RX ArbF parity error", -1, 1},
1467 {F_PCMDMUXPERR, "ULP RX PCMDMUX parity error", -1, 1},
1468 {F_DATASELFRAMEERR1, "ULP RX frame error", -1, 1},
1469 {F_DATASELFRAMEERR0, "ULP RX frame error", -1, 1},
4d22de3e
DLR
1470 {0}
1471 };
1472
1473 if (t3_handle_intr_status(adapter, A_ULPRX_INT_CAUSE, 0xffffffff,
1474 ulprx_intr_info, NULL))
1475 t3_fatal_err(adapter);
1476}
1477
1478/*
1479 * ULP TX interrupt handler.
1480 */
1481static void ulptx_intr_handler(struct adapter *adapter)
1482{
1483 static const struct intr_info ulptx_intr_info[] = {
1484 {F_PBL_BOUND_ERR_CH0, "ULP TX channel 0 PBL out of bounds",
1485 STAT_ULP_CH0_PBL_OOB, 0},
1486 {F_PBL_BOUND_ERR_CH1, "ULP TX channel 1 PBL out of bounds",
1487 STAT_ULP_CH1_PBL_OOB, 0},
b881955b 1488 {0xfc, "ULP TX parity error", -1, 1},
4d22de3e
DLR
1489 {0}
1490 };
1491
1492 if (t3_handle_intr_status(adapter, A_ULPTX_INT_CAUSE, 0xffffffff,
1493 ulptx_intr_info, adapter->irq_stats))
1494 t3_fatal_err(adapter);
1495}
1496
1497#define ICSPI_FRM_ERR (F_ICSPI0_FIFO2X_RX_FRAMING_ERROR | \
1498 F_ICSPI1_FIFO2X_RX_FRAMING_ERROR | F_ICSPI0_RX_FRAMING_ERROR | \
1499 F_ICSPI1_RX_FRAMING_ERROR | F_ICSPI0_TX_FRAMING_ERROR | \
1500 F_ICSPI1_TX_FRAMING_ERROR)
1501#define OESPI_FRM_ERR (F_OESPI0_RX_FRAMING_ERROR | \
1502 F_OESPI1_RX_FRAMING_ERROR | F_OESPI0_TX_FRAMING_ERROR | \
1503 F_OESPI1_TX_FRAMING_ERROR | F_OESPI0_OFIFO2X_TX_FRAMING_ERROR | \
1504 F_OESPI1_OFIFO2X_TX_FRAMING_ERROR)
1505
1506/*
1507 * PM TX interrupt handler.
1508 */
1509static void pmtx_intr_handler(struct adapter *adapter)
1510{
1511 static const struct intr_info pmtx_intr_info[] = {
1512 {F_ZERO_C_CMD_ERROR, "PMTX 0-length pcmd", -1, 1},
1513 {ICSPI_FRM_ERR, "PMTX ispi framing error", -1, 1},
1514 {OESPI_FRM_ERR, "PMTX ospi framing error", -1, 1},
1515 {V_ICSPI_PAR_ERROR(M_ICSPI_PAR_ERROR),
1516 "PMTX ispi parity error", -1, 1},
1517 {V_OESPI_PAR_ERROR(M_OESPI_PAR_ERROR),
1518 "PMTX ospi parity error", -1, 1},
1519 {0}
1520 };
1521
1522 if (t3_handle_intr_status(adapter, A_PM1_TX_INT_CAUSE, 0xffffffff,
1523 pmtx_intr_info, NULL))
1524 t3_fatal_err(adapter);
1525}
1526
1527#define IESPI_FRM_ERR (F_IESPI0_FIFO2X_RX_FRAMING_ERROR | \
1528 F_IESPI1_FIFO2X_RX_FRAMING_ERROR | F_IESPI0_RX_FRAMING_ERROR | \
1529 F_IESPI1_RX_FRAMING_ERROR | F_IESPI0_TX_FRAMING_ERROR | \
1530 F_IESPI1_TX_FRAMING_ERROR)
1531#define OCSPI_FRM_ERR (F_OCSPI0_RX_FRAMING_ERROR | \
1532 F_OCSPI1_RX_FRAMING_ERROR | F_OCSPI0_TX_FRAMING_ERROR | \
1533 F_OCSPI1_TX_FRAMING_ERROR | F_OCSPI0_OFIFO2X_TX_FRAMING_ERROR | \
1534 F_OCSPI1_OFIFO2X_TX_FRAMING_ERROR)
1535
1536/*
1537 * PM RX interrupt handler.
1538 */
1539static void pmrx_intr_handler(struct adapter *adapter)
1540{
1541 static const struct intr_info pmrx_intr_info[] = {
1542 {F_ZERO_E_CMD_ERROR, "PMRX 0-length pcmd", -1, 1},
1543 {IESPI_FRM_ERR, "PMRX ispi framing error", -1, 1},
1544 {OCSPI_FRM_ERR, "PMRX ospi framing error", -1, 1},
1545 {V_IESPI_PAR_ERROR(M_IESPI_PAR_ERROR),
1546 "PMRX ispi parity error", -1, 1},
1547 {V_OCSPI_PAR_ERROR(M_OCSPI_PAR_ERROR),
1548 "PMRX ospi parity error", -1, 1},
1549 {0}
1550 };
1551
1552 if (t3_handle_intr_status(adapter, A_PM1_RX_INT_CAUSE, 0xffffffff,
1553 pmrx_intr_info, NULL))
1554 t3_fatal_err(adapter);
1555}
1556
1557/*
1558 * CPL switch interrupt handler.
1559 */
1560static void cplsw_intr_handler(struct adapter *adapter)
1561{
1562 static const struct intr_info cplsw_intr_info[] = {
b881955b
DLR
1563 {F_CIM_OP_MAP_PERR, "CPL switch CIM parity error", -1, 1},
1564 {F_CIM_OVFL_ERROR, "CPL switch CIM overflow", -1, 1},
4d22de3e
DLR
1565 {F_TP_FRAMING_ERROR, "CPL switch TP framing error", -1, 1},
1566 {F_SGE_FRAMING_ERROR, "CPL switch SGE framing error", -1, 1},
1567 {F_CIM_FRAMING_ERROR, "CPL switch CIM framing error", -1, 1},
1568 {F_ZERO_SWITCH_ERROR, "CPL switch no-switch error", -1, 1},
1569 {0}
1570 };
1571
1572 if (t3_handle_intr_status(adapter, A_CPL_INTR_CAUSE, 0xffffffff,
1573 cplsw_intr_info, NULL))
1574 t3_fatal_err(adapter);
1575}
1576
1577/*
1578 * MPS interrupt handler.
1579 */
1580static void mps_intr_handler(struct adapter *adapter)
1581{
1582 static const struct intr_info mps_intr_info[] = {
1583 {0x1ff, "MPS parity error", -1, 1},
1584 {0}
1585 };
1586
1587 if (t3_handle_intr_status(adapter, A_MPS_INT_CAUSE, 0xffffffff,
1588 mps_intr_info, NULL))
1589 t3_fatal_err(adapter);
1590}
1591
1592#define MC7_INTR_FATAL (F_UE | V_PE(M_PE) | F_AE)
1593
1594/*
1595 * MC7 interrupt handler.
1596 */
1597static void mc7_intr_handler(struct mc7 *mc7)
1598{
1599 struct adapter *adapter = mc7->adapter;
1600 u32 cause = t3_read_reg(adapter, mc7->offset + A_MC7_INT_CAUSE);
1601
1602 if (cause & F_CE) {
1603 mc7->stats.corr_err++;
1604 CH_WARN(adapter, "%s MC7 correctable error at addr 0x%x, "
1605 "data 0x%x 0x%x 0x%x\n", mc7->name,
1606 t3_read_reg(adapter, mc7->offset + A_MC7_CE_ADDR),
1607 t3_read_reg(adapter, mc7->offset + A_MC7_CE_DATA0),
1608 t3_read_reg(adapter, mc7->offset + A_MC7_CE_DATA1),
1609 t3_read_reg(adapter, mc7->offset + A_MC7_CE_DATA2));
1610 }
1611
1612 if (cause & F_UE) {
1613 mc7->stats.uncorr_err++;
1614 CH_ALERT(adapter, "%s MC7 uncorrectable error at addr 0x%x, "
1615 "data 0x%x 0x%x 0x%x\n", mc7->name,
1616 t3_read_reg(adapter, mc7->offset + A_MC7_UE_ADDR),
1617 t3_read_reg(adapter, mc7->offset + A_MC7_UE_DATA0),
1618 t3_read_reg(adapter, mc7->offset + A_MC7_UE_DATA1),
1619 t3_read_reg(adapter, mc7->offset + A_MC7_UE_DATA2));
1620 }
1621
1622 if (G_PE(cause)) {
1623 mc7->stats.parity_err++;
1624 CH_ALERT(adapter, "%s MC7 parity error 0x%x\n",
1625 mc7->name, G_PE(cause));
1626 }
1627
1628 if (cause & F_AE) {
1629 u32 addr = 0;
1630
1631 if (adapter->params.rev > 0)
1632 addr = t3_read_reg(adapter,
1633 mc7->offset + A_MC7_ERR_ADDR);
1634 mc7->stats.addr_err++;
1635 CH_ALERT(adapter, "%s MC7 address error: 0x%x\n",
1636 mc7->name, addr);
1637 }
1638
1639 if (cause & MC7_INTR_FATAL)
1640 t3_fatal_err(adapter);
1641
1642 t3_write_reg(adapter, mc7->offset + A_MC7_INT_CAUSE, cause);
1643}
1644
1645#define XGM_INTR_FATAL (V_TXFIFO_PRTY_ERR(M_TXFIFO_PRTY_ERR) | \
1646 V_RXFIFO_PRTY_ERR(M_RXFIFO_PRTY_ERR))
1647/*
1648 * XGMAC interrupt handler.
1649 */
1650static int mac_intr_handler(struct adapter *adap, unsigned int idx)
1651{
1652 struct cmac *mac = &adap2pinfo(adap, idx)->mac;
1653 u32 cause = t3_read_reg(adap, A_XGM_INT_CAUSE + mac->offset);
1654
1655 if (cause & V_TXFIFO_PRTY_ERR(M_TXFIFO_PRTY_ERR)) {
1656 mac->stats.tx_fifo_parity_err++;
1657 CH_ALERT(adap, "port%d: MAC TX FIFO parity error\n", idx);
1658 }
1659 if (cause & V_RXFIFO_PRTY_ERR(M_RXFIFO_PRTY_ERR)) {
1660 mac->stats.rx_fifo_parity_err++;
1661 CH_ALERT(adap, "port%d: MAC RX FIFO parity error\n", idx);
1662 }
1663 if (cause & F_TXFIFO_UNDERRUN)
1664 mac->stats.tx_fifo_urun++;
1665 if (cause & F_RXFIFO_OVERFLOW)
1666 mac->stats.rx_fifo_ovfl++;
1667 if (cause & V_SERDES_LOS(M_SERDES_LOS))
1668 mac->stats.serdes_signal_loss++;
1669 if (cause & F_XAUIPCSCTCERR)
1670 mac->stats.xaui_pcs_ctc_err++;
1671 if (cause & F_XAUIPCSALIGNCHANGE)
1672 mac->stats.xaui_pcs_align_change++;
1673
1674 t3_write_reg(adap, A_XGM_INT_CAUSE + mac->offset, cause);
1675 if (cause & XGM_INTR_FATAL)
1676 t3_fatal_err(adap);
1677 return cause != 0;
1678}
1679
1680/*
1681 * Interrupt handler for PHY events.
1682 */
1683int t3_phy_intr_handler(struct adapter *adapter)
1684{
1ca03cbc 1685 u32 mask, gpi = adapter_info(adapter)->gpio_intr;
4d22de3e
DLR
1686 u32 i, cause = t3_read_reg(adapter, A_T3DBG_INT_CAUSE);
1687
1688 for_each_port(adapter, i) {
1ca03cbc
DLR
1689 struct port_info *p = adap2pinfo(adapter, i);
1690
1691 mask = gpi - (gpi & (gpi - 1));
1692 gpi -= mask;
1693
1694 if (!(p->port_type->caps & SUPPORTED_IRQ))
1695 continue;
1696
1697 if (cause & mask) {
1698 int phy_cause = p->phy.ops->intr_handler(&p->phy);
4d22de3e
DLR
1699
1700 if (phy_cause & cphy_cause_link_change)
1701 t3_link_changed(adapter, i);
1702 if (phy_cause & cphy_cause_fifo_error)
1ca03cbc 1703 p->phy.fifo_errors++;
4d22de3e
DLR
1704 }
1705 }
1706
1707 t3_write_reg(adapter, A_T3DBG_INT_CAUSE, cause);
1708 return 0;
1709}
1710
1711/*
1712 * T3 slow path (non-data) interrupt handler.
1713 */
1714int t3_slow_intr_handler(struct adapter *adapter)
1715{
1716 u32 cause = t3_read_reg(adapter, A_PL_INT_CAUSE0);
1717
1718 cause &= adapter->slow_intr_mask;
1719 if (!cause)
1720 return 0;
1721 if (cause & F_PCIM0) {
1722 if (is_pcie(adapter))
1723 pcie_intr_handler(adapter);
1724 else
1725 pci_intr_handler(adapter);
1726 }
1727 if (cause & F_SGE3)
1728 t3_sge_err_intr_handler(adapter);
1729 if (cause & F_MC7_PMRX)
1730 mc7_intr_handler(&adapter->pmrx);
1731 if (cause & F_MC7_PMTX)
1732 mc7_intr_handler(&adapter->pmtx);
1733 if (cause & F_MC7_CM)
1734 mc7_intr_handler(&adapter->cm);
1735 if (cause & F_CIM)
1736 cim_intr_handler(adapter);
1737 if (cause & F_TP1)
1738 tp_intr_handler(adapter);
1739 if (cause & F_ULP2_RX)
1740 ulprx_intr_handler(adapter);
1741 if (cause & F_ULP2_TX)
1742 ulptx_intr_handler(adapter);
1743 if (cause & F_PM1_RX)
1744 pmrx_intr_handler(adapter);
1745 if (cause & F_PM1_TX)
1746 pmtx_intr_handler(adapter);
1747 if (cause & F_CPL_SWITCH)
1748 cplsw_intr_handler(adapter);
1749 if (cause & F_MPS0)
1750 mps_intr_handler(adapter);
1751 if (cause & F_MC5A)
1752 t3_mc5_intr_handler(&adapter->mc5);
1753 if (cause & F_XGMAC0_0)
1754 mac_intr_handler(adapter, 0);
1755 if (cause & F_XGMAC0_1)
1756 mac_intr_handler(adapter, 1);
1757 if (cause & F_T3DBG)
1758 t3_os_ext_intr_handler(adapter);
1759
1760 /* Clear the interrupts just processed. */
1761 t3_write_reg(adapter, A_PL_INT_CAUSE0, cause);
1762 t3_read_reg(adapter, A_PL_INT_CAUSE0); /* flush */
1763 return 1;
1764}
1765
1766/**
1767 * t3_intr_enable - enable interrupts
1768 * @adapter: the adapter whose interrupts should be enabled
1769 *
1770 * Enable interrupts by setting the interrupt enable registers of the
1771 * various HW modules and then enabling the top-level interrupt
1772 * concentrator.
1773 */
1774void t3_intr_enable(struct adapter *adapter)
1775{
1776 static const struct addr_val_pair intr_en_avp[] = {
1777 {A_SG_INT_ENABLE, SGE_INTR_MASK},
1778 {A_MC7_INT_ENABLE, MC7_INTR_MASK},
1779 {A_MC7_INT_ENABLE - MC7_PMRX_BASE_ADDR + MC7_PMTX_BASE_ADDR,
1780 MC7_INTR_MASK},
1781 {A_MC7_INT_ENABLE - MC7_PMRX_BASE_ADDR + MC7_CM_BASE_ADDR,
1782 MC7_INTR_MASK},
1783 {A_MC5_DB_INT_ENABLE, MC5_INTR_MASK},
1784 {A_ULPRX_INT_ENABLE, ULPRX_INTR_MASK},
4d22de3e
DLR
1785 {A_PM1_TX_INT_ENABLE, PMTX_INTR_MASK},
1786 {A_PM1_RX_INT_ENABLE, PMRX_INTR_MASK},
1787 {A_CIM_HOST_INT_ENABLE, CIM_INTR_MASK},
1788 {A_MPS_INT_ENABLE, MPS_INTR_MASK},
1789 };
1790
1791 adapter->slow_intr_mask = PL_INTR_MASK;
1792
1793 t3_write_regs(adapter, intr_en_avp, ARRAY_SIZE(intr_en_avp), 0);
a2604be5
DLR
1794 t3_write_reg(adapter, A_TP_INT_ENABLE,
1795 adapter->params.rev >= T3_REV_C ? 0x2bfffff : 0x3bfffff);
4d22de3e
DLR
1796
1797 if (adapter->params.rev > 0) {
1798 t3_write_reg(adapter, A_CPL_INTR_ENABLE,
1799 CPLSW_INTR_MASK | F_CIM_OVFL_ERROR);
1800 t3_write_reg(adapter, A_ULPTX_INT_ENABLE,
1801 ULPTX_INTR_MASK | F_PBL_BOUND_ERR_CH0 |
1802 F_PBL_BOUND_ERR_CH1);
1803 } else {
1804 t3_write_reg(adapter, A_CPL_INTR_ENABLE, CPLSW_INTR_MASK);
1805 t3_write_reg(adapter, A_ULPTX_INT_ENABLE, ULPTX_INTR_MASK);
1806 }
1807
1808 t3_write_reg(adapter, A_T3DBG_GPIO_ACT_LOW,
1809 adapter_info(adapter)->gpio_intr);
1810 t3_write_reg(adapter, A_T3DBG_INT_ENABLE,
1811 adapter_info(adapter)->gpio_intr);
1812 if (is_pcie(adapter))
1813 t3_write_reg(adapter, A_PCIE_INT_ENABLE, PCIE_INTR_MASK);
1814 else
1815 t3_write_reg(adapter, A_PCIX_INT_ENABLE, PCIX_INTR_MASK);
1816 t3_write_reg(adapter, A_PL_INT_ENABLE0, adapter->slow_intr_mask);
1817 t3_read_reg(adapter, A_PL_INT_ENABLE0); /* flush */
1818}
1819
1820/**
1821 * t3_intr_disable - disable a card's interrupts
1822 * @adapter: the adapter whose interrupts should be disabled
1823 *
1824 * Disable interrupts. We only disable the top-level interrupt
1825 * concentrator and the SGE data interrupts.
1826 */
1827void t3_intr_disable(struct adapter *adapter)
1828{
1829 t3_write_reg(adapter, A_PL_INT_ENABLE0, 0);
1830 t3_read_reg(adapter, A_PL_INT_ENABLE0); /* flush */
1831 adapter->slow_intr_mask = 0;
1832}
1833
1834/**
1835 * t3_intr_clear - clear all interrupts
1836 * @adapter: the adapter whose interrupts should be cleared
1837 *
1838 * Clears all interrupts.
1839 */
1840void t3_intr_clear(struct adapter *adapter)
1841{
1842 static const unsigned int cause_reg_addr[] = {
1843 A_SG_INT_CAUSE,
1844 A_SG_RSPQ_FL_STATUS,
1845 A_PCIX_INT_CAUSE,
1846 A_MC7_INT_CAUSE,
1847 A_MC7_INT_CAUSE - MC7_PMRX_BASE_ADDR + MC7_PMTX_BASE_ADDR,
1848 A_MC7_INT_CAUSE - MC7_PMRX_BASE_ADDR + MC7_CM_BASE_ADDR,
1849 A_CIM_HOST_INT_CAUSE,
1850 A_TP_INT_CAUSE,
1851 A_MC5_DB_INT_CAUSE,
1852 A_ULPRX_INT_CAUSE,
1853 A_ULPTX_INT_CAUSE,
1854 A_CPL_INTR_CAUSE,
1855 A_PM1_TX_INT_CAUSE,
1856 A_PM1_RX_INT_CAUSE,
1857 A_MPS_INT_CAUSE,
1858 A_T3DBG_INT_CAUSE,
1859 };
1860 unsigned int i;
1861
1862 /* Clear PHY and MAC interrupts for each port. */
1863 for_each_port(adapter, i)
1864 t3_port_intr_clear(adapter, i);
1865
1866 for (i = 0; i < ARRAY_SIZE(cause_reg_addr); ++i)
1867 t3_write_reg(adapter, cause_reg_addr[i], 0xffffffff);
1868
3eea3337
DLR
1869 if (is_pcie(adapter))
1870 t3_write_reg(adapter, A_PCIE_PEX_ERR, 0xffffffff);
4d22de3e
DLR
1871 t3_write_reg(adapter, A_PL_INT_CAUSE0, 0xffffffff);
1872 t3_read_reg(adapter, A_PL_INT_CAUSE0); /* flush */
1873}
1874
1875/**
1876 * t3_port_intr_enable - enable port-specific interrupts
1877 * @adapter: associated adapter
1878 * @idx: index of port whose interrupts should be enabled
1879 *
1880 * Enable port-specific (i.e., MAC and PHY) interrupts for the given
1881 * adapter port.
1882 */
1883void t3_port_intr_enable(struct adapter *adapter, int idx)
1884{
1885 struct cphy *phy = &adap2pinfo(adapter, idx)->phy;
1886
1887 t3_write_reg(adapter, XGM_REG(A_XGM_INT_ENABLE, idx), XGM_INTR_MASK);
1888 t3_read_reg(adapter, XGM_REG(A_XGM_INT_ENABLE, idx)); /* flush */
1889 phy->ops->intr_enable(phy);
1890}
1891
1892/**
1893 * t3_port_intr_disable - disable port-specific interrupts
1894 * @adapter: associated adapter
1895 * @idx: index of port whose interrupts should be disabled
1896 *
1897 * Disable port-specific (i.e., MAC and PHY) interrupts for the given
1898 * adapter port.
1899 */
1900void t3_port_intr_disable(struct adapter *adapter, int idx)
1901{
1902 struct cphy *phy = &adap2pinfo(adapter, idx)->phy;
1903
1904 t3_write_reg(adapter, XGM_REG(A_XGM_INT_ENABLE, idx), 0);
1905 t3_read_reg(adapter, XGM_REG(A_XGM_INT_ENABLE, idx)); /* flush */
1906 phy->ops->intr_disable(phy);
1907}
1908
1909/**
1910 * t3_port_intr_clear - clear port-specific interrupts
1911 * @adapter: associated adapter
1912 * @idx: index of port whose interrupts to clear
1913 *
1914 * Clear port-specific (i.e., MAC and PHY) interrupts for the given
1915 * adapter port.
1916 */
1917void t3_port_intr_clear(struct adapter *adapter, int idx)
1918{
1919 struct cphy *phy = &adap2pinfo(adapter, idx)->phy;
1920
1921 t3_write_reg(adapter, XGM_REG(A_XGM_INT_CAUSE, idx), 0xffffffff);
1922 t3_read_reg(adapter, XGM_REG(A_XGM_INT_CAUSE, idx)); /* flush */
1923 phy->ops->intr_clear(phy);
1924}
1925
bb9366af
DLR
1926#define SG_CONTEXT_CMD_ATTEMPTS 100
1927
4d22de3e
DLR
1928/**
1929 * t3_sge_write_context - write an SGE context
1930 * @adapter: the adapter
1931 * @id: the context id
1932 * @type: the context type
1933 *
1934 * Program an SGE context with the values already loaded in the
1935 * CONTEXT_DATA? registers.
1936 */
1937static int t3_sge_write_context(struct adapter *adapter, unsigned int id,
1938 unsigned int type)
1939{
1940 t3_write_reg(adapter, A_SG_CONTEXT_MASK0, 0xffffffff);
1941 t3_write_reg(adapter, A_SG_CONTEXT_MASK1, 0xffffffff);
1942 t3_write_reg(adapter, A_SG_CONTEXT_MASK2, 0xffffffff);
1943 t3_write_reg(adapter, A_SG_CONTEXT_MASK3, 0xffffffff);
1944 t3_write_reg(adapter, A_SG_CONTEXT_CMD,
1945 V_CONTEXT_CMD_OPCODE(1) | type | V_CONTEXT(id));
1946 return t3_wait_op_done(adapter, A_SG_CONTEXT_CMD, F_CONTEXT_CMD_BUSY,
bb9366af 1947 0, SG_CONTEXT_CMD_ATTEMPTS, 1);
4d22de3e
DLR
1948}
1949
b881955b
DLR
1950static int clear_sge_ctxt(struct adapter *adap, unsigned int id,
1951 unsigned int type)
1952{
1953 t3_write_reg(adap, A_SG_CONTEXT_DATA0, 0);
1954 t3_write_reg(adap, A_SG_CONTEXT_DATA1, 0);
1955 t3_write_reg(adap, A_SG_CONTEXT_DATA2, 0);
1956 t3_write_reg(adap, A_SG_CONTEXT_DATA3, 0);
1957 return t3_sge_write_context(adap, id, type);
1958}
1959
4d22de3e
DLR
1960/**
1961 * t3_sge_init_ecntxt - initialize an SGE egress context
1962 * @adapter: the adapter to configure
1963 * @id: the context id
1964 * @gts_enable: whether to enable GTS for the context
1965 * @type: the egress context type
1966 * @respq: associated response queue
1967 * @base_addr: base address of queue
1968 * @size: number of queue entries
1969 * @token: uP token
1970 * @gen: initial generation value for the context
1971 * @cidx: consumer pointer
1972 *
1973 * Initialize an SGE egress context and make it ready for use. If the
1974 * platform allows concurrent context operations, the caller is
1975 * responsible for appropriate locking.
1976 */
1977int t3_sge_init_ecntxt(struct adapter *adapter, unsigned int id, int gts_enable,
1978 enum sge_context_type type, int respq, u64 base_addr,
1979 unsigned int size, unsigned int token, int gen,
1980 unsigned int cidx)
1981{
1982 unsigned int credits = type == SGE_CNTXT_OFLD ? 0 : FW_WR_NUM;
1983
1984 if (base_addr & 0xfff) /* must be 4K aligned */
1985 return -EINVAL;
1986 if (t3_read_reg(adapter, A_SG_CONTEXT_CMD) & F_CONTEXT_CMD_BUSY)
1987 return -EBUSY;
1988
1989 base_addr >>= 12;
1990 t3_write_reg(adapter, A_SG_CONTEXT_DATA0, V_EC_INDEX(cidx) |
1991 V_EC_CREDITS(credits) | V_EC_GTS(gts_enable));
1992 t3_write_reg(adapter, A_SG_CONTEXT_DATA1, V_EC_SIZE(size) |
1993 V_EC_BASE_LO(base_addr & 0xffff));
1994 base_addr >>= 16;
1995 t3_write_reg(adapter, A_SG_CONTEXT_DATA2, base_addr);
1996 base_addr >>= 32;
1997 t3_write_reg(adapter, A_SG_CONTEXT_DATA3,
1998 V_EC_BASE_HI(base_addr & 0xf) | V_EC_RESPQ(respq) |
1999 V_EC_TYPE(type) | V_EC_GEN(gen) | V_EC_UP_TOKEN(token) |
2000 F_EC_VALID);
2001 return t3_sge_write_context(adapter, id, F_EGRESS);
2002}
2003
2004/**
2005 * t3_sge_init_flcntxt - initialize an SGE free-buffer list context
2006 * @adapter: the adapter to configure
2007 * @id: the context id
2008 * @gts_enable: whether to enable GTS for the context
2009 * @base_addr: base address of queue
2010 * @size: number of queue entries
2011 * @bsize: size of each buffer for this queue
2012 * @cong_thres: threshold to signal congestion to upstream producers
2013 * @gen: initial generation value for the context
2014 * @cidx: consumer pointer
2015 *
2016 * Initialize an SGE free list context and make it ready for use. The
2017 * caller is responsible for ensuring only one context operation occurs
2018 * at a time.
2019 */
2020int t3_sge_init_flcntxt(struct adapter *adapter, unsigned int id,
2021 int gts_enable, u64 base_addr, unsigned int size,
2022 unsigned int bsize, unsigned int cong_thres, int gen,
2023 unsigned int cidx)
2024{
2025 if (base_addr & 0xfff) /* must be 4K aligned */
2026 return -EINVAL;
2027 if (t3_read_reg(adapter, A_SG_CONTEXT_CMD) & F_CONTEXT_CMD_BUSY)
2028 return -EBUSY;
2029
2030 base_addr >>= 12;
2031 t3_write_reg(adapter, A_SG_CONTEXT_DATA0, base_addr);
2032 base_addr >>= 32;
2033 t3_write_reg(adapter, A_SG_CONTEXT_DATA1,
2034 V_FL_BASE_HI((u32) base_addr) |
2035 V_FL_INDEX_LO(cidx & M_FL_INDEX_LO));
2036 t3_write_reg(adapter, A_SG_CONTEXT_DATA2, V_FL_SIZE(size) |
2037 V_FL_GEN(gen) | V_FL_INDEX_HI(cidx >> 12) |
2038 V_FL_ENTRY_SIZE_LO(bsize & M_FL_ENTRY_SIZE_LO));
2039 t3_write_reg(adapter, A_SG_CONTEXT_DATA3,
2040 V_FL_ENTRY_SIZE_HI(bsize >> (32 - S_FL_ENTRY_SIZE_LO)) |
2041 V_FL_CONG_THRES(cong_thres) | V_FL_GTS(gts_enable));
2042 return t3_sge_write_context(adapter, id, F_FREELIST);
2043}
2044
2045/**
2046 * t3_sge_init_rspcntxt - initialize an SGE response queue context
2047 * @adapter: the adapter to configure
2048 * @id: the context id
2049 * @irq_vec_idx: MSI-X interrupt vector index, 0 if no MSI-X, -1 if no IRQ
2050 * @base_addr: base address of queue
2051 * @size: number of queue entries
2052 * @fl_thres: threshold for selecting the normal or jumbo free list
2053 * @gen: initial generation value for the context
2054 * @cidx: consumer pointer
2055 *
2056 * Initialize an SGE response queue context and make it ready for use.
2057 * The caller is responsible for ensuring only one context operation
2058 * occurs at a time.
2059 */
2060int t3_sge_init_rspcntxt(struct adapter *adapter, unsigned int id,
2061 int irq_vec_idx, u64 base_addr, unsigned int size,
2062 unsigned int fl_thres, int gen, unsigned int cidx)
2063{
2064 unsigned int intr = 0;
2065
2066 if (base_addr & 0xfff) /* must be 4K aligned */
2067 return -EINVAL;
2068 if (t3_read_reg(adapter, A_SG_CONTEXT_CMD) & F_CONTEXT_CMD_BUSY)
2069 return -EBUSY;
2070
2071 base_addr >>= 12;
2072 t3_write_reg(adapter, A_SG_CONTEXT_DATA0, V_CQ_SIZE(size) |
2073 V_CQ_INDEX(cidx));
2074 t3_write_reg(adapter, A_SG_CONTEXT_DATA1, base_addr);
2075 base_addr >>= 32;
2076 if (irq_vec_idx >= 0)
2077 intr = V_RQ_MSI_VEC(irq_vec_idx) | F_RQ_INTR_EN;
2078 t3_write_reg(adapter, A_SG_CONTEXT_DATA2,
2079 V_CQ_BASE_HI((u32) base_addr) | intr | V_RQ_GEN(gen));
2080 t3_write_reg(adapter, A_SG_CONTEXT_DATA3, fl_thres);
2081 return t3_sge_write_context(adapter, id, F_RESPONSEQ);
2082}
2083
2084/**
2085 * t3_sge_init_cqcntxt - initialize an SGE completion queue context
2086 * @adapter: the adapter to configure
2087 * @id: the context id
2088 * @base_addr: base address of queue
2089 * @size: number of queue entries
2090 * @rspq: response queue for async notifications
2091 * @ovfl_mode: CQ overflow mode
2092 * @credits: completion queue credits
2093 * @credit_thres: the credit threshold
2094 *
2095 * Initialize an SGE completion queue context and make it ready for use.
2096 * The caller is responsible for ensuring only one context operation
2097 * occurs at a time.
2098 */
2099int t3_sge_init_cqcntxt(struct adapter *adapter, unsigned int id, u64 base_addr,
2100 unsigned int size, int rspq, int ovfl_mode,
2101 unsigned int credits, unsigned int credit_thres)
2102{
2103 if (base_addr & 0xfff) /* must be 4K aligned */
2104 return -EINVAL;
2105 if (t3_read_reg(adapter, A_SG_CONTEXT_CMD) & F_CONTEXT_CMD_BUSY)
2106 return -EBUSY;
2107
2108 base_addr >>= 12;
2109 t3_write_reg(adapter, A_SG_CONTEXT_DATA0, V_CQ_SIZE(size));
2110 t3_write_reg(adapter, A_SG_CONTEXT_DATA1, base_addr);
2111 base_addr >>= 32;
2112 t3_write_reg(adapter, A_SG_CONTEXT_DATA2,
2113 V_CQ_BASE_HI((u32) base_addr) | V_CQ_RSPQ(rspq) |
1c17ae8a
DLR
2114 V_CQ_GEN(1) | V_CQ_OVERFLOW_MODE(ovfl_mode) |
2115 V_CQ_ERR(ovfl_mode));
4d22de3e
DLR
2116 t3_write_reg(adapter, A_SG_CONTEXT_DATA3, V_CQ_CREDITS(credits) |
2117 V_CQ_CREDIT_THRES(credit_thres));
2118 return t3_sge_write_context(adapter, id, F_CQ);
2119}
2120
2121/**
2122 * t3_sge_enable_ecntxt - enable/disable an SGE egress context
2123 * @adapter: the adapter
2124 * @id: the egress context id
2125 * @enable: enable (1) or disable (0) the context
2126 *
2127 * Enable or disable an SGE egress context. The caller is responsible for
2128 * ensuring only one context operation occurs at a time.
2129 */
2130int t3_sge_enable_ecntxt(struct adapter *adapter, unsigned int id, int enable)
2131{
2132 if (t3_read_reg(adapter, A_SG_CONTEXT_CMD) & F_CONTEXT_CMD_BUSY)
2133 return -EBUSY;
2134
2135 t3_write_reg(adapter, A_SG_CONTEXT_MASK0, 0);
2136 t3_write_reg(adapter, A_SG_CONTEXT_MASK1, 0);
2137 t3_write_reg(adapter, A_SG_CONTEXT_MASK2, 0);
2138 t3_write_reg(adapter, A_SG_CONTEXT_MASK3, F_EC_VALID);
2139 t3_write_reg(adapter, A_SG_CONTEXT_DATA3, V_EC_VALID(enable));
2140 t3_write_reg(adapter, A_SG_CONTEXT_CMD,
2141 V_CONTEXT_CMD_OPCODE(1) | F_EGRESS | V_CONTEXT(id));
2142 return t3_wait_op_done(adapter, A_SG_CONTEXT_CMD, F_CONTEXT_CMD_BUSY,
bb9366af 2143 0, SG_CONTEXT_CMD_ATTEMPTS, 1);
4d22de3e
DLR
2144}
2145
2146/**
2147 * t3_sge_disable_fl - disable an SGE free-buffer list
2148 * @adapter: the adapter
2149 * @id: the free list context id
2150 *
2151 * Disable an SGE free-buffer list. The caller is responsible for
2152 * ensuring only one context operation occurs at a time.
2153 */
2154int t3_sge_disable_fl(struct adapter *adapter, unsigned int id)
2155{
2156 if (t3_read_reg(adapter, A_SG_CONTEXT_CMD) & F_CONTEXT_CMD_BUSY)
2157 return -EBUSY;
2158
2159 t3_write_reg(adapter, A_SG_CONTEXT_MASK0, 0);
2160 t3_write_reg(adapter, A_SG_CONTEXT_MASK1, 0);
2161 t3_write_reg(adapter, A_SG_CONTEXT_MASK2, V_FL_SIZE(M_FL_SIZE));
2162 t3_write_reg(adapter, A_SG_CONTEXT_MASK3, 0);
2163 t3_write_reg(adapter, A_SG_CONTEXT_DATA2, 0);
2164 t3_write_reg(adapter, A_SG_CONTEXT_CMD,
2165 V_CONTEXT_CMD_OPCODE(1) | F_FREELIST | V_CONTEXT(id));
2166 return t3_wait_op_done(adapter, A_SG_CONTEXT_CMD, F_CONTEXT_CMD_BUSY,
bb9366af 2167 0, SG_CONTEXT_CMD_ATTEMPTS, 1);
4d22de3e
DLR
2168}
2169
2170/**
2171 * t3_sge_disable_rspcntxt - disable an SGE response queue
2172 * @adapter: the adapter
2173 * @id: the response queue context id
2174 *
2175 * Disable an SGE response queue. The caller is responsible for
2176 * ensuring only one context operation occurs at a time.
2177 */
2178int t3_sge_disable_rspcntxt(struct adapter *adapter, unsigned int id)
2179{
2180 if (t3_read_reg(adapter, A_SG_CONTEXT_CMD) & F_CONTEXT_CMD_BUSY)
2181 return -EBUSY;
2182
2183 t3_write_reg(adapter, A_SG_CONTEXT_MASK0, V_CQ_SIZE(M_CQ_SIZE));
2184 t3_write_reg(adapter, A_SG_CONTEXT_MASK1, 0);
2185 t3_write_reg(adapter, A_SG_CONTEXT_MASK2, 0);
2186 t3_write_reg(adapter, A_SG_CONTEXT_MASK3, 0);
2187 t3_write_reg(adapter, A_SG_CONTEXT_DATA0, 0);
2188 t3_write_reg(adapter, A_SG_CONTEXT_CMD,
2189 V_CONTEXT_CMD_OPCODE(1) | F_RESPONSEQ | V_CONTEXT(id));
2190 return t3_wait_op_done(adapter, A_SG_CONTEXT_CMD, F_CONTEXT_CMD_BUSY,
bb9366af 2191 0, SG_CONTEXT_CMD_ATTEMPTS, 1);
4d22de3e
DLR
2192}
2193
2194/**
2195 * t3_sge_disable_cqcntxt - disable an SGE completion queue
2196 * @adapter: the adapter
2197 * @id: the completion queue context id
2198 *
2199 * Disable an SGE completion queue. The caller is responsible for
2200 * ensuring only one context operation occurs at a time.
2201 */
2202int t3_sge_disable_cqcntxt(struct adapter *adapter, unsigned int id)
2203{
2204 if (t3_read_reg(adapter, A_SG_CONTEXT_CMD) & F_CONTEXT_CMD_BUSY)
2205 return -EBUSY;
2206
2207 t3_write_reg(adapter, A_SG_CONTEXT_MASK0, V_CQ_SIZE(M_CQ_SIZE));
2208 t3_write_reg(adapter, A_SG_CONTEXT_MASK1, 0);
2209 t3_write_reg(adapter, A_SG_CONTEXT_MASK2, 0);
2210 t3_write_reg(adapter, A_SG_CONTEXT_MASK3, 0);
2211 t3_write_reg(adapter, A_SG_CONTEXT_DATA0, 0);
2212 t3_write_reg(adapter, A_SG_CONTEXT_CMD,
2213 V_CONTEXT_CMD_OPCODE(1) | F_CQ | V_CONTEXT(id));
2214 return t3_wait_op_done(adapter, A_SG_CONTEXT_CMD, F_CONTEXT_CMD_BUSY,
bb9366af 2215 0, SG_CONTEXT_CMD_ATTEMPTS, 1);
4d22de3e
DLR
2216}
2217
2218/**
2219 * t3_sge_cqcntxt_op - perform an operation on a completion queue context
2220 * @adapter: the adapter
2221 * @id: the context id
2222 * @op: the operation to perform
2223 *
2224 * Perform the selected operation on an SGE completion queue context.
2225 * The caller is responsible for ensuring only one context operation
2226 * occurs at a time.
2227 */
2228int t3_sge_cqcntxt_op(struct adapter *adapter, unsigned int id, unsigned int op,
2229 unsigned int credits)
2230{
2231 u32 val;
2232
2233 if (t3_read_reg(adapter, A_SG_CONTEXT_CMD) & F_CONTEXT_CMD_BUSY)
2234 return -EBUSY;
2235
2236 t3_write_reg(adapter, A_SG_CONTEXT_DATA0, credits << 16);
2237 t3_write_reg(adapter, A_SG_CONTEXT_CMD, V_CONTEXT_CMD_OPCODE(op) |
2238 V_CONTEXT(id) | F_CQ);
2239 if (t3_wait_op_done_val(adapter, A_SG_CONTEXT_CMD, F_CONTEXT_CMD_BUSY,
bb9366af 2240 0, SG_CONTEXT_CMD_ATTEMPTS, 1, &val))
4d22de3e
DLR
2241 return -EIO;
2242
2243 if (op >= 2 && op < 7) {
2244 if (adapter->params.rev > 0)
2245 return G_CQ_INDEX(val);
2246
2247 t3_write_reg(adapter, A_SG_CONTEXT_CMD,
2248 V_CONTEXT_CMD_OPCODE(0) | F_CQ | V_CONTEXT(id));
2249 if (t3_wait_op_done(adapter, A_SG_CONTEXT_CMD,
bb9366af
DLR
2250 F_CONTEXT_CMD_BUSY, 0,
2251 SG_CONTEXT_CMD_ATTEMPTS, 1))
4d22de3e
DLR
2252 return -EIO;
2253 return G_CQ_INDEX(t3_read_reg(adapter, A_SG_CONTEXT_DATA0));
2254 }
2255 return 0;
2256}
2257
2258/**
2259 * t3_sge_read_context - read an SGE context
2260 * @type: the context type
2261 * @adapter: the adapter
2262 * @id: the context id
2263 * @data: holds the retrieved context
2264 *
2265 * Read an SGE egress context. The caller is responsible for ensuring
2266 * only one context operation occurs at a time.
2267 */
2268static int t3_sge_read_context(unsigned int type, struct adapter *adapter,
2269 unsigned int id, u32 data[4])
2270{
2271 if (t3_read_reg(adapter, A_SG_CONTEXT_CMD) & F_CONTEXT_CMD_BUSY)
2272 return -EBUSY;
2273
2274 t3_write_reg(adapter, A_SG_CONTEXT_CMD,
2275 V_CONTEXT_CMD_OPCODE(0) | type | V_CONTEXT(id));
2276 if (t3_wait_op_done(adapter, A_SG_CONTEXT_CMD, F_CONTEXT_CMD_BUSY, 0,
bb9366af 2277 SG_CONTEXT_CMD_ATTEMPTS, 1))
4d22de3e
DLR
2278 return -EIO;
2279 data[0] = t3_read_reg(adapter, A_SG_CONTEXT_DATA0);
2280 data[1] = t3_read_reg(adapter, A_SG_CONTEXT_DATA1);
2281 data[2] = t3_read_reg(adapter, A_SG_CONTEXT_DATA2);
2282 data[3] = t3_read_reg(adapter, A_SG_CONTEXT_DATA3);
2283 return 0;
2284}
2285
2286/**
2287 * t3_sge_read_ecntxt - read an SGE egress context
2288 * @adapter: the adapter
2289 * @id: the context id
2290 * @data: holds the retrieved context
2291 *
2292 * Read an SGE egress context. The caller is responsible for ensuring
2293 * only one context operation occurs at a time.
2294 */
2295int t3_sge_read_ecntxt(struct adapter *adapter, unsigned int id, u32 data[4])
2296{
2297 if (id >= 65536)
2298 return -EINVAL;
2299 return t3_sge_read_context(F_EGRESS, adapter, id, data);
2300}
2301
2302/**
2303 * t3_sge_read_cq - read an SGE CQ context
2304 * @adapter: the adapter
2305 * @id: the context id
2306 * @data: holds the retrieved context
2307 *
2308 * Read an SGE CQ context. The caller is responsible for ensuring
2309 * only one context operation occurs at a time.
2310 */
2311int t3_sge_read_cq(struct adapter *adapter, unsigned int id, u32 data[4])
2312{
2313 if (id >= 65536)
2314 return -EINVAL;
2315 return t3_sge_read_context(F_CQ, adapter, id, data);
2316}
2317
2318/**
2319 * t3_sge_read_fl - read an SGE free-list context
2320 * @adapter: the adapter
2321 * @id: the context id
2322 * @data: holds the retrieved context
2323 *
2324 * Read an SGE free-list context. The caller is responsible for ensuring
2325 * only one context operation occurs at a time.
2326 */
2327int t3_sge_read_fl(struct adapter *adapter, unsigned int id, u32 data[4])
2328{
2329 if (id >= SGE_QSETS * 2)
2330 return -EINVAL;
2331 return t3_sge_read_context(F_FREELIST, adapter, id, data);
2332}
2333
2334/**
2335 * t3_sge_read_rspq - read an SGE response queue context
2336 * @adapter: the adapter
2337 * @id: the context id
2338 * @data: holds the retrieved context
2339 *
2340 * Read an SGE response queue context. The caller is responsible for
2341 * ensuring only one context operation occurs at a time.
2342 */
2343int t3_sge_read_rspq(struct adapter *adapter, unsigned int id, u32 data[4])
2344{
2345 if (id >= SGE_QSETS)
2346 return -EINVAL;
2347 return t3_sge_read_context(F_RESPONSEQ, adapter, id, data);
2348}
2349
2350/**
2351 * t3_config_rss - configure Rx packet steering
2352 * @adapter: the adapter
2353 * @rss_config: RSS settings (written to TP_RSS_CONFIG)
2354 * @cpus: values for the CPU lookup table (0xff terminated)
2355 * @rspq: values for the response queue lookup table (0xffff terminated)
2356 *
2357 * Programs the receive packet steering logic. @cpus and @rspq provide
2358 * the values for the CPU and response queue lookup tables. If they
2359 * provide fewer values than the size of the tables the supplied values
2360 * are used repeatedly until the tables are fully populated.
2361 */
2362void t3_config_rss(struct adapter *adapter, unsigned int rss_config,
2363 const u8 * cpus, const u16 *rspq)
2364{
2365 int i, j, cpu_idx = 0, q_idx = 0;
2366
2367 if (cpus)
2368 for (i = 0; i < RSS_TABLE_SIZE; ++i) {
2369 u32 val = i << 16;
2370
2371 for (j = 0; j < 2; ++j) {
2372 val |= (cpus[cpu_idx++] & 0x3f) << (8 * j);
2373 if (cpus[cpu_idx] == 0xff)
2374 cpu_idx = 0;
2375 }
2376 t3_write_reg(adapter, A_TP_RSS_LKP_TABLE, val);
2377 }
2378
2379 if (rspq)
2380 for (i = 0; i < RSS_TABLE_SIZE; ++i) {
2381 t3_write_reg(adapter, A_TP_RSS_MAP_TABLE,
2382 (i << 16) | rspq[q_idx++]);
2383 if (rspq[q_idx] == 0xffff)
2384 q_idx = 0;
2385 }
2386
2387 t3_write_reg(adapter, A_TP_RSS_CONFIG, rss_config);
2388}
2389
2390/**
2391 * t3_read_rss - read the contents of the RSS tables
2392 * @adapter: the adapter
2393 * @lkup: holds the contents of the RSS lookup table
2394 * @map: holds the contents of the RSS map table
2395 *
2396 * Reads the contents of the receive packet steering tables.
2397 */
2398int t3_read_rss(struct adapter *adapter, u8 * lkup, u16 *map)
2399{
2400 int i;
2401 u32 val;
2402
2403 if (lkup)
2404 for (i = 0; i < RSS_TABLE_SIZE; ++i) {
2405 t3_write_reg(adapter, A_TP_RSS_LKP_TABLE,
2406 0xffff0000 | i);
2407 val = t3_read_reg(adapter, A_TP_RSS_LKP_TABLE);
2408 if (!(val & 0x80000000))
2409 return -EAGAIN;
2410 *lkup++ = val;
2411 *lkup++ = (val >> 8);
2412 }
2413
2414 if (map)
2415 for (i = 0; i < RSS_TABLE_SIZE; ++i) {
2416 t3_write_reg(adapter, A_TP_RSS_MAP_TABLE,
2417 0xffff0000 | i);
2418 val = t3_read_reg(adapter, A_TP_RSS_MAP_TABLE);
2419 if (!(val & 0x80000000))
2420 return -EAGAIN;
2421 *map++ = val;
2422 }
2423 return 0;
2424}
2425
2426/**
2427 * t3_tp_set_offload_mode - put TP in NIC/offload mode
2428 * @adap: the adapter
2429 * @enable: 1 to select offload mode, 0 for regular NIC
2430 *
2431 * Switches TP to NIC/offload mode.
2432 */
2433void t3_tp_set_offload_mode(struct adapter *adap, int enable)
2434{
2435 if (is_offload(adap) || !enable)
2436 t3_set_reg_field(adap, A_TP_IN_CONFIG, F_NICMODE,
2437 V_NICMODE(!enable));
2438}
2439
2440/**
2441 * pm_num_pages - calculate the number of pages of the payload memory
2442 * @mem_size: the size of the payload memory
2443 * @pg_size: the size of each payload memory page
2444 *
2445 * Calculate the number of pages, each of the given size, that fit in a
2446 * memory of the specified size, respecting the HW requirement that the
2447 * number of pages must be a multiple of 24.
2448 */
2449static inline unsigned int pm_num_pages(unsigned int mem_size,
2450 unsigned int pg_size)
2451{
2452 unsigned int n = mem_size / pg_size;
2453
2454 return n - n % 24;
2455}
2456
2457#define mem_region(adap, start, size, reg) \
2458 t3_write_reg((adap), A_ ## reg, (start)); \
2459 start += size
2460
b881955b 2461/**
4d22de3e
DLR
2462 * partition_mem - partition memory and configure TP memory settings
2463 * @adap: the adapter
2464 * @p: the TP parameters
2465 *
2466 * Partitions context and payload memory and configures TP's memory
2467 * registers.
2468 */
2469static void partition_mem(struct adapter *adap, const struct tp_params *p)
2470{
2471 unsigned int m, pstructs, tids = t3_mc5_size(&adap->mc5);
2472 unsigned int timers = 0, timers_shift = 22;
2473
2474 if (adap->params.rev > 0) {
2475 if (tids <= 16 * 1024) {
2476 timers = 1;
2477 timers_shift = 16;
2478 } else if (tids <= 64 * 1024) {
2479 timers = 2;
2480 timers_shift = 18;
2481 } else if (tids <= 256 * 1024) {
2482 timers = 3;
2483 timers_shift = 20;
2484 }
2485 }
2486
2487 t3_write_reg(adap, A_TP_PMM_SIZE,
2488 p->chan_rx_size | (p->chan_tx_size >> 16));
2489
2490 t3_write_reg(adap, A_TP_PMM_TX_BASE, 0);
2491 t3_write_reg(adap, A_TP_PMM_TX_PAGE_SIZE, p->tx_pg_size);
2492 t3_write_reg(adap, A_TP_PMM_TX_MAX_PAGE, p->tx_num_pgs);
2493 t3_set_reg_field(adap, A_TP_PARA_REG3, V_TXDATAACKIDX(M_TXDATAACKIDX),
2494 V_TXDATAACKIDX(fls(p->tx_pg_size) - 12));
2495
2496 t3_write_reg(adap, A_TP_PMM_RX_BASE, 0);
2497 t3_write_reg(adap, A_TP_PMM_RX_PAGE_SIZE, p->rx_pg_size);
2498 t3_write_reg(adap, A_TP_PMM_RX_MAX_PAGE, p->rx_num_pgs);
2499
2500 pstructs = p->rx_num_pgs + p->tx_num_pgs;
2501 /* Add a bit of headroom and make multiple of 24 */
2502 pstructs += 48;
2503 pstructs -= pstructs % 24;
2504 t3_write_reg(adap, A_TP_CMM_MM_MAX_PSTRUCT, pstructs);
2505
2506 m = tids * TCB_SIZE;
2507 mem_region(adap, m, (64 << 10) * 64, SG_EGR_CNTX_BADDR);
2508 mem_region(adap, m, (64 << 10) * 64, SG_CQ_CONTEXT_BADDR);
2509 t3_write_reg(adap, A_TP_CMM_TIMER_BASE, V_CMTIMERMAXNUM(timers) | m);
2510 m += ((p->ntimer_qs - 1) << timers_shift) + (1 << 22);
2511 mem_region(adap, m, pstructs * 64, TP_CMM_MM_BASE);
2512 mem_region(adap, m, 64 * (pstructs / 24), TP_CMM_MM_PS_FLST_BASE);
2513 mem_region(adap, m, 64 * (p->rx_num_pgs / 24), TP_CMM_MM_RX_FLST_BASE);
2514 mem_region(adap, m, 64 * (p->tx_num_pgs / 24), TP_CMM_MM_TX_FLST_BASE);
2515
2516 m = (m + 4095) & ~0xfff;
2517 t3_write_reg(adap, A_CIM_SDRAM_BASE_ADDR, m);
2518 t3_write_reg(adap, A_CIM_SDRAM_ADDR_SIZE, p->cm_size - m);
2519
2520 tids = (p->cm_size - m - (3 << 20)) / 3072 - 32;
2521 m = t3_mc5_size(&adap->mc5) - adap->params.mc5.nservers -
2522 adap->params.mc5.nfilters - adap->params.mc5.nroutes;
2523 if (tids < m)
2524 adap->params.mc5.nservers += m - tids;
2525}
2526
2527static inline void tp_wr_indirect(struct adapter *adap, unsigned int addr,
2528 u32 val)
2529{
2530 t3_write_reg(adap, A_TP_PIO_ADDR, addr);
2531 t3_write_reg(adap, A_TP_PIO_DATA, val);
2532}
2533
2534static void tp_config(struct adapter *adap, const struct tp_params *p)
2535{
4d22de3e
DLR
2536 t3_write_reg(adap, A_TP_GLOBAL_CONFIG, F_TXPACINGENABLE | F_PATHMTU |
2537 F_IPCHECKSUMOFFLOAD | F_UDPCHECKSUMOFFLOAD |
2538 F_TCPCHECKSUMOFFLOAD | V_IPTTL(64));
2539 t3_write_reg(adap, A_TP_TCP_OPTIONS, V_MTUDEFAULT(576) |
2540 F_MTUENABLE | V_WINDOWSCALEMODE(1) |
8a9fab22 2541 V_TIMESTAMPSMODE(0) | V_SACKMODE(1) | V_SACKRX(1));
4d22de3e
DLR
2542 t3_write_reg(adap, A_TP_DACK_CONFIG, V_AUTOSTATE3(1) |
2543 V_AUTOSTATE2(1) | V_AUTOSTATE1(0) |
2544 V_BYTETHRESHOLD(16384) | V_MSSTHRESHOLD(2) |
2545 F_AUTOCAREFUL | F_AUTOENABLE | V_DACK_MODE(1));
b881955b 2546 t3_set_reg_field(adap, A_TP_IN_CONFIG, F_RXFBARBPRIO | F_TXFBARBPRIO,
4d22de3e
DLR
2547 F_IPV6ENABLE | F_NICMODE);
2548 t3_write_reg(adap, A_TP_TX_RESOURCE_LIMIT, 0x18141814);
2549 t3_write_reg(adap, A_TP_PARA_REG4, 0x5050105);
8a9fab22
DLR
2550 t3_set_reg_field(adap, A_TP_PARA_REG6, 0,
2551 adap->params.rev > 0 ? F_ENABLEESND :
2552 F_T3A_ENABLEESND);
4d22de3e 2553
3b1d307b 2554 t3_set_reg_field(adap, A_TP_PC_CONFIG,
8a9fab22
DLR
2555 F_ENABLEEPCMDAFULL,
2556 F_ENABLEOCSPIFULL |F_TXDEFERENABLE | F_HEARBEATDACK |
2557 F_TXCONGESTIONMODE | F_RXCONGESTIONMODE);
b881955b
DLR
2558 t3_set_reg_field(adap, A_TP_PC_CONFIG2, F_CHDRAFULL,
2559 F_ENABLEIPV6RSS | F_ENABLENONOFDTNLSYN |
2560 F_ENABLEARPMISS | F_DISBLEDAPARBIT0);
8a9fab22
DLR
2561 t3_write_reg(adap, A_TP_PROXY_FLOW_CNTL, 1080);
2562 t3_write_reg(adap, A_TP_PROXY_FLOW_CNTL, 1000);
2eab17ab 2563
4d22de3e
DLR
2564 if (adap->params.rev > 0) {
2565 tp_wr_indirect(adap, A_TP_EGRESS_CONFIG, F_REWRITEFORCETOSIZE);
2566 t3_set_reg_field(adap, A_TP_PARA_REG3, F_TXPACEAUTO,
2567 F_TXPACEAUTO);
2568 t3_set_reg_field(adap, A_TP_PC_CONFIG, F_LOCKTID, F_LOCKTID);
2569 t3_set_reg_field(adap, A_TP_PARA_REG3, 0, F_TXPACEAUTOSTRICT);
2570 } else
2571 t3_set_reg_field(adap, A_TP_PARA_REG3, 0, F_TXPACEFIXED);
2572
a2604be5
DLR
2573 if (adap->params.rev == T3_REV_C)
2574 t3_set_reg_field(adap, A_TP_PC_CONFIG,
2575 V_TABLELATENCYDELTA(M_TABLELATENCYDELTA),
2576 V_TABLELATENCYDELTA(4));
2577
8a9fab22
DLR
2578 t3_write_reg(adap, A_TP_TX_MOD_QUEUE_WEIGHT1, 0);
2579 t3_write_reg(adap, A_TP_TX_MOD_QUEUE_WEIGHT0, 0);
2580 t3_write_reg(adap, A_TP_MOD_CHANNEL_WEIGHT, 0);
2581 t3_write_reg(adap, A_TP_MOD_RATE_LIMIT, 0xf2200000);
4d22de3e
DLR
2582}
2583
2584/* Desired TP timer resolution in usec */
2585#define TP_TMR_RES 50
2586
2587/* TCP timer values in ms */
2588#define TP_DACK_TIMER 50
2589#define TP_RTO_MIN 250
2590
2591/**
2592 * tp_set_timers - set TP timing parameters
2593 * @adap: the adapter to set
2594 * @core_clk: the core clock frequency in Hz
2595 *
2596 * Set TP's timing parameters, such as the various timer resolutions and
2597 * the TCP timer values.
2598 */
2599static void tp_set_timers(struct adapter *adap, unsigned int core_clk)
2600{
2601 unsigned int tre = fls(core_clk / (1000000 / TP_TMR_RES)) - 1;
2602 unsigned int dack_re = fls(core_clk / 5000) - 1; /* 200us */
2603 unsigned int tstamp_re = fls(core_clk / 1000); /* 1ms, at least */
2604 unsigned int tps = core_clk >> tre;
2605
2606 t3_write_reg(adap, A_TP_TIMER_RESOLUTION, V_TIMERRESOLUTION(tre) |
2607 V_DELAYEDACKRESOLUTION(dack_re) |
2608 V_TIMESTAMPRESOLUTION(tstamp_re));
2609 t3_write_reg(adap, A_TP_DACK_TIMER,
2610 (core_clk >> dack_re) / (1000 / TP_DACK_TIMER));
2611 t3_write_reg(adap, A_TP_TCP_BACKOFF_REG0, 0x3020100);
2612 t3_write_reg(adap, A_TP_TCP_BACKOFF_REG1, 0x7060504);
2613 t3_write_reg(adap, A_TP_TCP_BACKOFF_REG2, 0xb0a0908);
2614 t3_write_reg(adap, A_TP_TCP_BACKOFF_REG3, 0xf0e0d0c);
2615 t3_write_reg(adap, A_TP_SHIFT_CNT, V_SYNSHIFTMAX(6) |
2616 V_RXTSHIFTMAXR1(4) | V_RXTSHIFTMAXR2(15) |
2617 V_PERSHIFTBACKOFFMAX(8) | V_PERSHIFTMAX(8) |
2618 V_KEEPALIVEMAX(9));
2619
2620#define SECONDS * tps
2621
2622 t3_write_reg(adap, A_TP_MSL, adap->params.rev > 0 ? 0 : 2 SECONDS);
2623 t3_write_reg(adap, A_TP_RXT_MIN, tps / (1000 / TP_RTO_MIN));
2624 t3_write_reg(adap, A_TP_RXT_MAX, 64 SECONDS);
2625 t3_write_reg(adap, A_TP_PERS_MIN, 5 SECONDS);
2626 t3_write_reg(adap, A_TP_PERS_MAX, 64 SECONDS);
2627 t3_write_reg(adap, A_TP_KEEP_IDLE, 7200 SECONDS);
2628 t3_write_reg(adap, A_TP_KEEP_INTVL, 75 SECONDS);
2629 t3_write_reg(adap, A_TP_INIT_SRTT, 3 SECONDS);
2630 t3_write_reg(adap, A_TP_FINWAIT2_TIMER, 600 SECONDS);
2631
2632#undef SECONDS
2633}
2634
2635/**
2636 * t3_tp_set_coalescing_size - set receive coalescing size
2637 * @adap: the adapter
2638 * @size: the receive coalescing size
2639 * @psh: whether a set PSH bit should deliver coalesced data
2640 *
2641 * Set the receive coalescing size and PSH bit handling.
2642 */
2643int t3_tp_set_coalescing_size(struct adapter *adap, unsigned int size, int psh)
2644{
2645 u32 val;
2646
2647 if (size > MAX_RX_COALESCING_LEN)
2648 return -EINVAL;
2649
2650 val = t3_read_reg(adap, A_TP_PARA_REG3);
2651 val &= ~(F_RXCOALESCEENABLE | F_RXCOALESCEPSHEN);
2652
2653 if (size) {
2654 val |= F_RXCOALESCEENABLE;
2655 if (psh)
2656 val |= F_RXCOALESCEPSHEN;
8a9fab22 2657 size = min(MAX_RX_COALESCING_LEN, size);
4d22de3e
DLR
2658 t3_write_reg(adap, A_TP_PARA_REG2, V_RXCOALESCESIZE(size) |
2659 V_MAXRXDATA(MAX_RX_COALESCING_LEN));
2660 }
2661 t3_write_reg(adap, A_TP_PARA_REG3, val);
2662 return 0;
2663}
2664
2665/**
2666 * t3_tp_set_max_rxsize - set the max receive size
2667 * @adap: the adapter
2668 * @size: the max receive size
2669 *
2670 * Set TP's max receive size. This is the limit that applies when
2671 * receive coalescing is disabled.
2672 */
2673void t3_tp_set_max_rxsize(struct adapter *adap, unsigned int size)
2674{
2675 t3_write_reg(adap, A_TP_PARA_REG7,
2676 V_PMMAXXFERLEN0(size) | V_PMMAXXFERLEN1(size));
2677}
2678
7b9b0943 2679static void init_mtus(unsigned short mtus[])
4d22de3e
DLR
2680{
2681 /*
2682 * See draft-mathis-plpmtud-00.txt for the values. The min is 88 so
2683 * it can accomodate max size TCP/IP headers when SACK and timestamps
2684 * are enabled and still have at least 8 bytes of payload.
2685 */
75758e8a 2686 mtus[0] = 88;
8a9fab22
DLR
2687 mtus[1] = 88;
2688 mtus[2] = 256;
2689 mtus[3] = 512;
2690 mtus[4] = 576;
4d22de3e
DLR
2691 mtus[5] = 1024;
2692 mtus[6] = 1280;
2693 mtus[7] = 1492;
2694 mtus[8] = 1500;
2695 mtus[9] = 2002;
2696 mtus[10] = 2048;
2697 mtus[11] = 4096;
2698 mtus[12] = 4352;
2699 mtus[13] = 8192;
2700 mtus[14] = 9000;
2701 mtus[15] = 9600;
2702}
2703
2704/*
2705 * Initial congestion control parameters.
2706 */
7b9b0943 2707static void init_cong_ctrl(unsigned short *a, unsigned short *b)
4d22de3e
DLR
2708{
2709 a[0] = a[1] = a[2] = a[3] = a[4] = a[5] = a[6] = a[7] = a[8] = 1;
2710 a[9] = 2;
2711 a[10] = 3;
2712 a[11] = 4;
2713 a[12] = 5;
2714 a[13] = 6;
2715 a[14] = 7;
2716 a[15] = 8;
2717 a[16] = 9;
2718 a[17] = 10;
2719 a[18] = 14;
2720 a[19] = 17;
2721 a[20] = 21;
2722 a[21] = 25;
2723 a[22] = 30;
2724 a[23] = 35;
2725 a[24] = 45;
2726 a[25] = 60;
2727 a[26] = 80;
2728 a[27] = 100;
2729 a[28] = 200;
2730 a[29] = 300;
2731 a[30] = 400;
2732 a[31] = 500;
2733
2734 b[0] = b[1] = b[2] = b[3] = b[4] = b[5] = b[6] = b[7] = b[8] = 0;
2735 b[9] = b[10] = 1;
2736 b[11] = b[12] = 2;
2737 b[13] = b[14] = b[15] = b[16] = 3;
2738 b[17] = b[18] = b[19] = b[20] = b[21] = 4;
2739 b[22] = b[23] = b[24] = b[25] = b[26] = b[27] = 5;
2740 b[28] = b[29] = 6;
2741 b[30] = b[31] = 7;
2742}
2743
2744/* The minimum additive increment value for the congestion control table */
2745#define CC_MIN_INCR 2U
2746
2747/**
2748 * t3_load_mtus - write the MTU and congestion control HW tables
2749 * @adap: the adapter
2750 * @mtus: the unrestricted values for the MTU table
2751 * @alphs: the values for the congestion control alpha parameter
2752 * @beta: the values for the congestion control beta parameter
2753 * @mtu_cap: the maximum permitted effective MTU
2754 *
2755 * Write the MTU table with the supplied MTUs capping each at &mtu_cap.
2756 * Update the high-speed congestion control table with the supplied alpha,
2757 * beta, and MTUs.
2758 */
2759void t3_load_mtus(struct adapter *adap, unsigned short mtus[NMTUS],
2760 unsigned short alpha[NCCTRL_WIN],
2761 unsigned short beta[NCCTRL_WIN], unsigned short mtu_cap)
2762{
2763 static const unsigned int avg_pkts[NCCTRL_WIN] = {
2764 2, 6, 10, 14, 20, 28, 40, 56, 80, 112, 160, 224, 320, 448, 640,
2765 896, 1281, 1792, 2560, 3584, 5120, 7168, 10240, 14336, 20480,
2766 28672, 40960, 57344, 81920, 114688, 163840, 229376
2767 };
2768
2769 unsigned int i, w;
2770
2771 for (i = 0; i < NMTUS; ++i) {
2772 unsigned int mtu = min(mtus[i], mtu_cap);
2773 unsigned int log2 = fls(mtu);
2774
2775 if (!(mtu & ((1 << log2) >> 2))) /* round */
2776 log2--;
2777 t3_write_reg(adap, A_TP_MTU_TABLE,
2778 (i << 24) | (log2 << 16) | mtu);
2779
2780 for (w = 0; w < NCCTRL_WIN; ++w) {
2781 unsigned int inc;
2782
2783 inc = max(((mtu - 40) * alpha[w]) / avg_pkts[w],
2784 CC_MIN_INCR);
2785
2786 t3_write_reg(adap, A_TP_CCTRL_TABLE, (i << 21) |
2787 (w << 16) | (beta[w] << 13) | inc);
2788 }
2789 }
2790}
2791
2792/**
2793 * t3_read_hw_mtus - returns the values in the HW MTU table
2794 * @adap: the adapter
2795 * @mtus: where to store the HW MTU values
2796 *
2797 * Reads the HW MTU table.
2798 */
2799void t3_read_hw_mtus(struct adapter *adap, unsigned short mtus[NMTUS])
2800{
2801 int i;
2802
2803 for (i = 0; i < NMTUS; ++i) {
2804 unsigned int val;
2805
2806 t3_write_reg(adap, A_TP_MTU_TABLE, 0xff000000 | i);
2807 val = t3_read_reg(adap, A_TP_MTU_TABLE);
2808 mtus[i] = val & 0x3fff;
2809 }
2810}
2811
2812/**
2813 * t3_get_cong_cntl_tab - reads the congestion control table
2814 * @adap: the adapter
2815 * @incr: where to store the alpha values
2816 *
2817 * Reads the additive increments programmed into the HW congestion
2818 * control table.
2819 */
2820void t3_get_cong_cntl_tab(struct adapter *adap,
2821 unsigned short incr[NMTUS][NCCTRL_WIN])
2822{
2823 unsigned int mtu, w;
2824
2825 for (mtu = 0; mtu < NMTUS; ++mtu)
2826 for (w = 0; w < NCCTRL_WIN; ++w) {
2827 t3_write_reg(adap, A_TP_CCTRL_TABLE,
2828 0xffff0000 | (mtu << 5) | w);
2829 incr[mtu][w] = t3_read_reg(adap, A_TP_CCTRL_TABLE) &
2830 0x1fff;
2831 }
2832}
2833
2834/**
2835 * t3_tp_get_mib_stats - read TP's MIB counters
2836 * @adap: the adapter
2837 * @tps: holds the returned counter values
2838 *
2839 * Returns the values of TP's MIB counters.
2840 */
2841void t3_tp_get_mib_stats(struct adapter *adap, struct tp_mib_stats *tps)
2842{
2843 t3_read_indirect(adap, A_TP_MIB_INDEX, A_TP_MIB_RDATA, (u32 *) tps,
2844 sizeof(*tps) / sizeof(u32), 0);
2845}
2846
2847#define ulp_region(adap, name, start, len) \
2848 t3_write_reg((adap), A_ULPRX_ ## name ## _LLIMIT, (start)); \
2849 t3_write_reg((adap), A_ULPRX_ ## name ## _ULIMIT, \
2850 (start) + (len) - 1); \
2851 start += len
2852
2853#define ulptx_region(adap, name, start, len) \
2854 t3_write_reg((adap), A_ULPTX_ ## name ## _LLIMIT, (start)); \
2855 t3_write_reg((adap), A_ULPTX_ ## name ## _ULIMIT, \
2856 (start) + (len) - 1)
2857
2858static void ulp_config(struct adapter *adap, const struct tp_params *p)
2859{
2860 unsigned int m = p->chan_rx_size;
2861
2862 ulp_region(adap, ISCSI, m, p->chan_rx_size / 8);
2863 ulp_region(adap, TDDP, m, p->chan_rx_size / 8);
2864 ulptx_region(adap, TPT, m, p->chan_rx_size / 4);
2865 ulp_region(adap, STAG, m, p->chan_rx_size / 4);
2866 ulp_region(adap, RQ, m, p->chan_rx_size / 4);
2867 ulptx_region(adap, PBL, m, p->chan_rx_size / 4);
2868 ulp_region(adap, PBL, m, p->chan_rx_size / 4);
2869 t3_write_reg(adap, A_ULPRX_TDDP_TAGMASK, 0xffffffff);
2870}
2871
480fe1a3
DLR
2872/**
2873 * t3_set_proto_sram - set the contents of the protocol sram
2874 * @adapter: the adapter
2875 * @data: the protocol image
2876 *
2877 * Write the contents of the protocol SRAM.
2878 */
2c733a16 2879int t3_set_proto_sram(struct adapter *adap, const u8 *data)
480fe1a3
DLR
2880{
2881 int i;
2c733a16 2882 const __be32 *buf = (const __be32 *)data;
480fe1a3
DLR
2883
2884 for (i = 0; i < PROTO_SRAM_LINES; i++) {
05e5c116
AV
2885 t3_write_reg(adap, A_TP_EMBED_OP_FIELD5, be32_to_cpu(*buf++));
2886 t3_write_reg(adap, A_TP_EMBED_OP_FIELD4, be32_to_cpu(*buf++));
2887 t3_write_reg(adap, A_TP_EMBED_OP_FIELD3, be32_to_cpu(*buf++));
2888 t3_write_reg(adap, A_TP_EMBED_OP_FIELD2, be32_to_cpu(*buf++));
2889 t3_write_reg(adap, A_TP_EMBED_OP_FIELD1, be32_to_cpu(*buf++));
2eab17ab 2890
480fe1a3
DLR
2891 t3_write_reg(adap, A_TP_EMBED_OP_FIELD0, i << 1 | 1 << 31);
2892 if (t3_wait_op_done(adap, A_TP_EMBED_OP_FIELD0, 1, 1, 5, 1))
2893 return -EIO;
2894 }
2895 t3_write_reg(adap, A_TP_EMBED_OP_FIELD0, 0);
2896
2897 return 0;
2898}
2899
4d22de3e
DLR
2900void t3_config_trace_filter(struct adapter *adapter,
2901 const struct trace_params *tp, int filter_index,
2902 int invert, int enable)
2903{
2904 u32 addr, key[4], mask[4];
2905
2906 key[0] = tp->sport | (tp->sip << 16);
2907 key[1] = (tp->sip >> 16) | (tp->dport << 16);
2908 key[2] = tp->dip;
2909 key[3] = tp->proto | (tp->vlan << 8) | (tp->intf << 20);
2910
2911 mask[0] = tp->sport_mask | (tp->sip_mask << 16);
2912 mask[1] = (tp->sip_mask >> 16) | (tp->dport_mask << 16);
2913 mask[2] = tp->dip_mask;
2914 mask[3] = tp->proto_mask | (tp->vlan_mask << 8) | (tp->intf_mask << 20);
2915
2916 if (invert)
2917 key[3] |= (1 << 29);
2918 if (enable)
2919 key[3] |= (1 << 28);
2920
2921 addr = filter_index ? A_TP_RX_TRC_KEY0 : A_TP_TX_TRC_KEY0;
2922 tp_wr_indirect(adapter, addr++, key[0]);
2923 tp_wr_indirect(adapter, addr++, mask[0]);
2924 tp_wr_indirect(adapter, addr++, key[1]);
2925 tp_wr_indirect(adapter, addr++, mask[1]);
2926 tp_wr_indirect(adapter, addr++, key[2]);
2927 tp_wr_indirect(adapter, addr++, mask[2]);
2928 tp_wr_indirect(adapter, addr++, key[3]);
2929 tp_wr_indirect(adapter, addr, mask[3]);
2930 t3_read_reg(adapter, A_TP_PIO_DATA);
2931}
2932
2933/**
2934 * t3_config_sched - configure a HW traffic scheduler
2935 * @adap: the adapter
2936 * @kbps: target rate in Kbps
2937 * @sched: the scheduler index
2938 *
2939 * Configure a HW scheduler for the target rate
2940 */
2941int t3_config_sched(struct adapter *adap, unsigned int kbps, int sched)
2942{
2943 unsigned int v, tps, cpt, bpt, delta, mindelta = ~0;
2944 unsigned int clk = adap->params.vpd.cclk * 1000;
2945 unsigned int selected_cpt = 0, selected_bpt = 0;
2946
2947 if (kbps > 0) {
2948 kbps *= 125; /* -> bytes */
2949 for (cpt = 1; cpt <= 255; cpt++) {
2950 tps = clk / cpt;
2951 bpt = (kbps + tps / 2) / tps;
2952 if (bpt > 0 && bpt <= 255) {
2953 v = bpt * tps;
2954 delta = v >= kbps ? v - kbps : kbps - v;
2955 if (delta <= mindelta) {
2956 mindelta = delta;
2957 selected_cpt = cpt;
2958 selected_bpt = bpt;
2959 }
2960 } else if (selected_cpt)
2961 break;
2962 }
2963 if (!selected_cpt)
2964 return -EINVAL;
2965 }
2966 t3_write_reg(adap, A_TP_TM_PIO_ADDR,
2967 A_TP_TX_MOD_Q1_Q0_RATE_LIMIT - sched / 2);
2968 v = t3_read_reg(adap, A_TP_TM_PIO_DATA);
2969 if (sched & 1)
2970 v = (v & 0xffff) | (selected_cpt << 16) | (selected_bpt << 24);
2971 else
2972 v = (v & 0xffff0000) | selected_cpt | (selected_bpt << 8);
2973 t3_write_reg(adap, A_TP_TM_PIO_DATA, v);
2974 return 0;
2975}
2976
2977static int tp_init(struct adapter *adap, const struct tp_params *p)
2978{
2979 int busy = 0;
2980
2981 tp_config(adap, p);
2982 t3_set_vlan_accel(adap, 3, 0);
2983
2984 if (is_offload(adap)) {
2985 tp_set_timers(adap, adap->params.vpd.cclk * 1000);
2986 t3_write_reg(adap, A_TP_RESET, F_FLSTINITENABLE);
2987 busy = t3_wait_op_done(adap, A_TP_RESET, F_FLSTINITENABLE,
2988 0, 1000, 5);
2989 if (busy)
2990 CH_ERR(adap, "TP initialization timed out\n");
2991 }
2992
2993 if (!busy)
2994 t3_write_reg(adap, A_TP_RESET, F_TPRESET);
2995 return busy;
2996}
2997
2998int t3_mps_set_active_ports(struct adapter *adap, unsigned int port_mask)
2999{
3000 if (port_mask & ~((1 << adap->params.nports) - 1))
3001 return -EINVAL;
3002 t3_set_reg_field(adap, A_MPS_CFG, F_PORT1ACTIVE | F_PORT0ACTIVE,
3003 port_mask << S_PORT0ACTIVE);
3004 return 0;
3005}
3006
3007/*
3008 * Perform the bits of HW initialization that are dependent on the number
3009 * of available ports.
3010 */
3011static void init_hw_for_avail_ports(struct adapter *adap, int nports)
3012{
3013 int i;
3014
3015 if (nports == 1) {
3016 t3_set_reg_field(adap, A_ULPRX_CTL, F_ROUND_ROBIN, 0);
3017 t3_set_reg_field(adap, A_ULPTX_CONFIG, F_CFG_RR_ARB, 0);
3018 t3_write_reg(adap, A_MPS_CFG, F_TPRXPORTEN | F_TPTXPORT0EN |
3019 F_PORT0ACTIVE | F_ENFORCEPKT);
8a9fab22 3020 t3_write_reg(adap, A_PM1_TX_CFG, 0xffffffff);
4d22de3e
DLR
3021 } else {
3022 t3_set_reg_field(adap, A_ULPRX_CTL, 0, F_ROUND_ROBIN);
3023 t3_set_reg_field(adap, A_ULPTX_CONFIG, 0, F_CFG_RR_ARB);
3024 t3_write_reg(adap, A_ULPTX_DMA_WEIGHT,
3025 V_D1_WEIGHT(16) | V_D0_WEIGHT(16));
3026 t3_write_reg(adap, A_MPS_CFG, F_TPTXPORT0EN | F_TPTXPORT1EN |
3027 F_TPRXPORTEN | F_PORT0ACTIVE | F_PORT1ACTIVE |
3028 F_ENFORCEPKT);
3029 t3_write_reg(adap, A_PM1_TX_CFG, 0x80008000);
3030 t3_set_reg_field(adap, A_TP_PC_CONFIG, 0, F_TXTOSQUEUEMAPMODE);
3031 t3_write_reg(adap, A_TP_TX_MOD_QUEUE_REQ_MAP,
3032 V_TX_MOD_QUEUE_REQ_MAP(0xaa));
3033 for (i = 0; i < 16; i++)
3034 t3_write_reg(adap, A_TP_TX_MOD_QUE_TABLE,
3035 (i << 16) | 0x1010);
3036 }
3037}
3038
3039static int calibrate_xgm(struct adapter *adapter)
3040{
3041 if (uses_xaui(adapter)) {
3042 unsigned int v, i;
3043
3044 for (i = 0; i < 5; ++i) {
3045 t3_write_reg(adapter, A_XGM_XAUI_IMP, 0);
3046 t3_read_reg(adapter, A_XGM_XAUI_IMP);
3047 msleep(1);
3048 v = t3_read_reg(adapter, A_XGM_XAUI_IMP);
3049 if (!(v & (F_XGM_CALFAULT | F_CALBUSY))) {
3050 t3_write_reg(adapter, A_XGM_XAUI_IMP,
3051 V_XAUIIMP(G_CALIMP(v) >> 2));
3052 return 0;
3053 }
3054 }
3055 CH_ERR(adapter, "MAC calibration failed\n");
3056 return -1;
3057 } else {
3058 t3_write_reg(adapter, A_XGM_RGMII_IMP,
3059 V_RGMIIIMPPD(2) | V_RGMIIIMPPU(3));
3060 t3_set_reg_field(adapter, A_XGM_RGMII_IMP, F_XGM_IMPSETUPDATE,
3061 F_XGM_IMPSETUPDATE);
3062 }
3063 return 0;
3064}
3065
3066static void calibrate_xgm_t3b(struct adapter *adapter)
3067{
3068 if (!uses_xaui(adapter)) {
3069 t3_write_reg(adapter, A_XGM_RGMII_IMP, F_CALRESET |
3070 F_CALUPDATE | V_RGMIIIMPPD(2) | V_RGMIIIMPPU(3));
3071 t3_set_reg_field(adapter, A_XGM_RGMII_IMP, F_CALRESET, 0);
3072 t3_set_reg_field(adapter, A_XGM_RGMII_IMP, 0,
3073 F_XGM_IMPSETUPDATE);
3074 t3_set_reg_field(adapter, A_XGM_RGMII_IMP, F_XGM_IMPSETUPDATE,
3075 0);
3076 t3_set_reg_field(adapter, A_XGM_RGMII_IMP, F_CALUPDATE, 0);
3077 t3_set_reg_field(adapter, A_XGM_RGMII_IMP, 0, F_CALUPDATE);
3078 }
3079}
3080
3081struct mc7_timing_params {
3082 unsigned char ActToPreDly;
3083 unsigned char ActToRdWrDly;
3084 unsigned char PreCyc;
3085 unsigned char RefCyc[5];
3086 unsigned char BkCyc;
3087 unsigned char WrToRdDly;
3088 unsigned char RdToWrDly;
3089};
3090
3091/*
3092 * Write a value to a register and check that the write completed. These
3093 * writes normally complete in a cycle or two, so one read should suffice.
3094 * The very first read exists to flush the posted write to the device.
3095 */
3096static int wrreg_wait(struct adapter *adapter, unsigned int addr, u32 val)
3097{
3098 t3_write_reg(adapter, addr, val);
3099 t3_read_reg(adapter, addr); /* flush */
3100 if (!(t3_read_reg(adapter, addr) & F_BUSY))
3101 return 0;
3102 CH_ERR(adapter, "write to MC7 register 0x%x timed out\n", addr);
3103 return -EIO;
3104}
3105
3106static int mc7_init(struct mc7 *mc7, unsigned int mc7_clock, int mem_type)
3107{
3108 static const unsigned int mc7_mode[] = {
3109 0x632, 0x642, 0x652, 0x432, 0x442
3110 };
3111 static const struct mc7_timing_params mc7_timings[] = {
3112 {12, 3, 4, {20, 28, 34, 52, 0}, 15, 6, 4},
3113 {12, 4, 5, {20, 28, 34, 52, 0}, 16, 7, 4},
3114 {12, 5, 6, {20, 28, 34, 52, 0}, 17, 8, 4},
3115 {9, 3, 4, {15, 21, 26, 39, 0}, 12, 6, 4},
3116 {9, 4, 5, {15, 21, 26, 39, 0}, 13, 7, 4}
3117 };
3118
3119 u32 val;
3120 unsigned int width, density, slow, attempts;
3121 struct adapter *adapter = mc7->adapter;
3122 const struct mc7_timing_params *p = &mc7_timings[mem_type];
3123
8ac3ba68
DLR
3124 if (!mc7->size)
3125 return 0;
3126
4d22de3e
DLR
3127 val = t3_read_reg(adapter, mc7->offset + A_MC7_CFG);
3128 slow = val & F_SLOW;
3129 width = G_WIDTH(val);
3130 density = G_DEN(val);
3131
3132 t3_write_reg(adapter, mc7->offset + A_MC7_CFG, val | F_IFEN);
3133 val = t3_read_reg(adapter, mc7->offset + A_MC7_CFG); /* flush */
3134 msleep(1);
3135
3136 if (!slow) {
3137 t3_write_reg(adapter, mc7->offset + A_MC7_CAL, F_SGL_CAL_EN);
3138 t3_read_reg(adapter, mc7->offset + A_MC7_CAL);
3139 msleep(1);
3140 if (t3_read_reg(adapter, mc7->offset + A_MC7_CAL) &
3141 (F_BUSY | F_SGL_CAL_EN | F_CAL_FAULT)) {
3142 CH_ERR(adapter, "%s MC7 calibration timed out\n",
3143 mc7->name);
3144 goto out_fail;
3145 }
3146 }
3147
3148 t3_write_reg(adapter, mc7->offset + A_MC7_PARM,
3149 V_ACTTOPREDLY(p->ActToPreDly) |
3150 V_ACTTORDWRDLY(p->ActToRdWrDly) | V_PRECYC(p->PreCyc) |
3151 V_REFCYC(p->RefCyc[density]) | V_BKCYC(p->BkCyc) |
3152 V_WRTORDDLY(p->WrToRdDly) | V_RDTOWRDLY(p->RdToWrDly));
3153
3154 t3_write_reg(adapter, mc7->offset + A_MC7_CFG,
3155 val | F_CLKEN | F_TERM150);
3156 t3_read_reg(adapter, mc7->offset + A_MC7_CFG); /* flush */
3157
3158 if (!slow)
3159 t3_set_reg_field(adapter, mc7->offset + A_MC7_DLL, F_DLLENB,
3160 F_DLLENB);
3161 udelay(1);
3162
3163 val = slow ? 3 : 6;
3164 if (wrreg_wait(adapter, mc7->offset + A_MC7_PRE, 0) ||
3165 wrreg_wait(adapter, mc7->offset + A_MC7_EXT_MODE2, 0) ||
3166 wrreg_wait(adapter, mc7->offset + A_MC7_EXT_MODE3, 0) ||
3167 wrreg_wait(adapter, mc7->offset + A_MC7_EXT_MODE1, val))
3168 goto out_fail;
3169
3170 if (!slow) {
3171 t3_write_reg(adapter, mc7->offset + A_MC7_MODE, 0x100);
3172 t3_set_reg_field(adapter, mc7->offset + A_MC7_DLL, F_DLLRST, 0);
3173 udelay(5);
3174 }
3175
3176 if (wrreg_wait(adapter, mc7->offset + A_MC7_PRE, 0) ||
3177 wrreg_wait(adapter, mc7->offset + A_MC7_REF, 0) ||
3178 wrreg_wait(adapter, mc7->offset + A_MC7_REF, 0) ||
3179 wrreg_wait(adapter, mc7->offset + A_MC7_MODE,
3180 mc7_mode[mem_type]) ||
3181 wrreg_wait(adapter, mc7->offset + A_MC7_EXT_MODE1, val | 0x380) ||
3182 wrreg_wait(adapter, mc7->offset + A_MC7_EXT_MODE1, val))
3183 goto out_fail;
3184
3185 /* clock value is in KHz */
3186 mc7_clock = mc7_clock * 7812 + mc7_clock / 2; /* ns */
3187 mc7_clock /= 1000000; /* KHz->MHz, ns->us */
3188
3189 t3_write_reg(adapter, mc7->offset + A_MC7_REF,
3190 F_PERREFEN | V_PREREFDIV(mc7_clock));
3191 t3_read_reg(adapter, mc7->offset + A_MC7_REF); /* flush */
3192
3193 t3_write_reg(adapter, mc7->offset + A_MC7_ECC, F_ECCGENEN | F_ECCCHKEN);
3194 t3_write_reg(adapter, mc7->offset + A_MC7_BIST_DATA, 0);
3195 t3_write_reg(adapter, mc7->offset + A_MC7_BIST_ADDR_BEG, 0);
3196 t3_write_reg(adapter, mc7->offset + A_MC7_BIST_ADDR_END,
3197 (mc7->size << width) - 1);
3198 t3_write_reg(adapter, mc7->offset + A_MC7_BIST_OP, V_OP(1));
3199 t3_read_reg(adapter, mc7->offset + A_MC7_BIST_OP); /* flush */
3200
3201 attempts = 50;
3202 do {
3203 msleep(250);
3204 val = t3_read_reg(adapter, mc7->offset + A_MC7_BIST_OP);
3205 } while ((val & F_BUSY) && --attempts);
3206 if (val & F_BUSY) {
3207 CH_ERR(adapter, "%s MC7 BIST timed out\n", mc7->name);
3208 goto out_fail;
3209 }
3210
3211 /* Enable normal memory accesses. */
3212 t3_set_reg_field(adapter, mc7->offset + A_MC7_CFG, 0, F_RDY);
3213 return 0;
3214
3215out_fail:
3216 return -1;
3217}
3218
3219static void config_pcie(struct adapter *adap)
3220{
3221 static const u16 ack_lat[4][6] = {
3222 {237, 416, 559, 1071, 2095, 4143},
3223 {128, 217, 289, 545, 1057, 2081},
3224 {73, 118, 154, 282, 538, 1050},
3225 {67, 107, 86, 150, 278, 534}
3226 };
3227 static const u16 rpl_tmr[4][6] = {
3228 {711, 1248, 1677, 3213, 6285, 12429},
3229 {384, 651, 867, 1635, 3171, 6243},
3230 {219, 354, 462, 846, 1614, 3150},
3231 {201, 321, 258, 450, 834, 1602}
3232 };
3233
3234 u16 val;
3235 unsigned int log2_width, pldsize;
3236 unsigned int fst_trn_rx, fst_trn_tx, acklat, rpllmt;
3237
3238 pci_read_config_word(adap->pdev,
3239 adap->params.pci.pcie_cap_addr + PCI_EXP_DEVCTL,
3240 &val);
3241 pldsize = (val & PCI_EXP_DEVCTL_PAYLOAD) >> 5;
3242 pci_read_config_word(adap->pdev,
3243 adap->params.pci.pcie_cap_addr + PCI_EXP_LNKCTL,
3244 &val);
3245
3246 fst_trn_tx = G_NUMFSTTRNSEQ(t3_read_reg(adap, A_PCIE_PEX_CTRL0));
3247 fst_trn_rx = adap->params.rev == 0 ? fst_trn_tx :
3248 G_NUMFSTTRNSEQRX(t3_read_reg(adap, A_PCIE_MODE));
3249 log2_width = fls(adap->params.pci.width) - 1;
3250 acklat = ack_lat[log2_width][pldsize];
3251 if (val & 1) /* check LOsEnable */
3252 acklat += fst_trn_tx * 4;
3253 rpllmt = rpl_tmr[log2_width][pldsize] + fst_trn_rx * 4;
3254
3255 if (adap->params.rev == 0)
3256 t3_set_reg_field(adap, A_PCIE_PEX_CTRL1,
3257 V_T3A_ACKLAT(M_T3A_ACKLAT),
3258 V_T3A_ACKLAT(acklat));
3259 else
3260 t3_set_reg_field(adap, A_PCIE_PEX_CTRL1, V_ACKLAT(M_ACKLAT),
3261 V_ACKLAT(acklat));
3262
3263 t3_set_reg_field(adap, A_PCIE_PEX_CTRL0, V_REPLAYLMT(M_REPLAYLMT),
3264 V_REPLAYLMT(rpllmt));
3265
3266 t3_write_reg(adap, A_PCIE_PEX_ERR, 0xffffffff);
b881955b 3267 t3_set_reg_field(adap, A_PCIE_CFG, 0,
204e2f98 3268 F_ENABLELINKDWNDRST | F_ENABLELINKDOWNRST |
b881955b 3269 F_PCIE_DMASTOPEN | F_PCIE_CLIDECEN);
4d22de3e
DLR
3270}
3271
3272/*
3273 * Initialize and configure T3 HW modules. This performs the
3274 * initialization steps that need to be done once after a card is reset.
3275 * MAC and PHY initialization is handled separarely whenever a port is enabled.
3276 *
3277 * fw_params are passed to FW and their value is platform dependent. Only the
3278 * top 8 bits are available for use, the rest must be 0.
3279 */
3280int t3_init_hw(struct adapter *adapter, u32 fw_params)
3281{
b881955b 3282 int err = -EIO, attempts, i;
4d22de3e
DLR
3283 const struct vpd_params *vpd = &adapter->params.vpd;
3284
3285 if (adapter->params.rev > 0)
3286 calibrate_xgm_t3b(adapter);
3287 else if (calibrate_xgm(adapter))
3288 goto out_err;
3289
3290 if (vpd->mclk) {
3291 partition_mem(adapter, &adapter->params.tp);
3292
3293 if (mc7_init(&adapter->pmrx, vpd->mclk, vpd->mem_timing) ||
3294 mc7_init(&adapter->pmtx, vpd->mclk, vpd->mem_timing) ||
3295 mc7_init(&adapter->cm, vpd->mclk, vpd->mem_timing) ||
3296 t3_mc5_init(&adapter->mc5, adapter->params.mc5.nservers,
3297 adapter->params.mc5.nfilters,
3298 adapter->params.mc5.nroutes))
3299 goto out_err;
b881955b
DLR
3300
3301 for (i = 0; i < 32; i++)
3302 if (clear_sge_ctxt(adapter, i, F_CQ))
3303 goto out_err;
4d22de3e
DLR
3304 }
3305
3306 if (tp_init(adapter, &adapter->params.tp))
3307 goto out_err;
3308
3309 t3_tp_set_coalescing_size(adapter,
3310 min(adapter->params.sge.max_pkt_size,
3311 MAX_RX_COALESCING_LEN), 1);
3312 t3_tp_set_max_rxsize(adapter,
3313 min(adapter->params.sge.max_pkt_size, 16384U));
3314 ulp_config(adapter, &adapter->params.tp);
3315
3316 if (is_pcie(adapter))
3317 config_pcie(adapter);
3318 else
b881955b
DLR
3319 t3_set_reg_field(adapter, A_PCIX_CFG, 0,
3320 F_DMASTOPEN | F_CLIDECEN);
4d22de3e 3321
a2604be5
DLR
3322 if (adapter->params.rev == T3_REV_C)
3323 t3_set_reg_field(adapter, A_ULPTX_CONFIG, 0,
3324 F_CFG_CQE_SOP_MASK);
3325
8a9fab22 3326 t3_write_reg(adapter, A_PM1_RX_CFG, 0xffffffff);
3f61e427
DLR
3327 t3_write_reg(adapter, A_PM1_RX_MODE, 0);
3328 t3_write_reg(adapter, A_PM1_TX_MODE, 0);
4d22de3e
DLR
3329 init_hw_for_avail_ports(adapter, adapter->params.nports);
3330 t3_sge_init(adapter, &adapter->params.sge);
3331
3332 t3_write_reg(adapter, A_CIM_HOST_ACC_DATA, vpd->uclk | fw_params);
3333 t3_write_reg(adapter, A_CIM_BOOT_CFG,
3334 V_BOOTADDR(FW_FLASH_BOOT_ADDR >> 2));
3335 t3_read_reg(adapter, A_CIM_BOOT_CFG); /* flush */
3336
b881955b 3337 attempts = 100;
4d22de3e
DLR
3338 do { /* wait for uP to initialize */
3339 msleep(20);
3340 } while (t3_read_reg(adapter, A_CIM_HOST_ACC_DATA) && --attempts);
8ac3ba68
DLR
3341 if (!attempts) {
3342 CH_ERR(adapter, "uP initialization timed out\n");
4d22de3e 3343 goto out_err;
8ac3ba68 3344 }
4d22de3e
DLR
3345
3346 err = 0;
3347out_err:
3348 return err;
3349}
3350
3351/**
3352 * get_pci_mode - determine a card's PCI mode
3353 * @adapter: the adapter
3354 * @p: where to store the PCI settings
3355 *
3356 * Determines a card's PCI mode and associated parameters, such as speed
3357 * and width.
3358 */
7b9b0943 3359static void get_pci_mode(struct adapter *adapter, struct pci_params *p)
4d22de3e
DLR
3360{
3361 static unsigned short speed_map[] = { 33, 66, 100, 133 };
3362 u32 pci_mode, pcie_cap;
3363
3364 pcie_cap = pci_find_capability(adapter->pdev, PCI_CAP_ID_EXP);
3365 if (pcie_cap) {
3366 u16 val;
3367
3368 p->variant = PCI_VARIANT_PCIE;
3369 p->pcie_cap_addr = pcie_cap;
3370 pci_read_config_word(adapter->pdev, pcie_cap + PCI_EXP_LNKSTA,
3371 &val);
3372 p->width = (val >> 4) & 0x3f;
3373 return;
3374 }
3375
3376 pci_mode = t3_read_reg(adapter, A_PCIX_MODE);
3377 p->speed = speed_map[G_PCLKRANGE(pci_mode)];
3378 p->width = (pci_mode & F_64BIT) ? 64 : 32;
3379 pci_mode = G_PCIXINITPAT(pci_mode);
3380 if (pci_mode == 0)
3381 p->variant = PCI_VARIANT_PCI;
3382 else if (pci_mode < 4)
3383 p->variant = PCI_VARIANT_PCIX_MODE1_PARITY;
3384 else if (pci_mode < 8)
3385 p->variant = PCI_VARIANT_PCIX_MODE1_ECC;
3386 else
3387 p->variant = PCI_VARIANT_PCIX_266_MODE2;
3388}
3389
3390/**
3391 * init_link_config - initialize a link's SW state
3392 * @lc: structure holding the link state
3393 * @ai: information about the current card
3394 *
3395 * Initializes the SW state maintained for each link, including the link's
3396 * capabilities and default speed/duplex/flow-control/autonegotiation
3397 * settings.
3398 */
7b9b0943 3399static void init_link_config(struct link_config *lc, unsigned int caps)
4d22de3e
DLR
3400{
3401 lc->supported = caps;
3402 lc->requested_speed = lc->speed = SPEED_INVALID;
3403 lc->requested_duplex = lc->duplex = DUPLEX_INVALID;
3404 lc->requested_fc = lc->fc = PAUSE_RX | PAUSE_TX;
3405 if (lc->supported & SUPPORTED_Autoneg) {
3406 lc->advertising = lc->supported;
3407 lc->autoneg = AUTONEG_ENABLE;
3408 lc->requested_fc |= PAUSE_AUTONEG;
3409 } else {
3410 lc->advertising = 0;
3411 lc->autoneg = AUTONEG_DISABLE;
3412 }
3413}
3414
3415/**
3416 * mc7_calc_size - calculate MC7 memory size
3417 * @cfg: the MC7 configuration
3418 *
3419 * Calculates the size of an MC7 memory in bytes from the value of its
3420 * configuration register.
3421 */
7b9b0943 3422static unsigned int mc7_calc_size(u32 cfg)
4d22de3e
DLR
3423{
3424 unsigned int width = G_WIDTH(cfg);
3425 unsigned int banks = !!(cfg & F_BKS) + 1;
3426 unsigned int org = !!(cfg & F_ORG) + 1;
3427 unsigned int density = G_DEN(cfg);
3428 unsigned int MBs = ((256 << density) * banks) / (org << width);
3429
3430 return MBs << 20;
3431}
3432
7b9b0943
RD
3433static void mc7_prep(struct adapter *adapter, struct mc7 *mc7,
3434 unsigned int base_addr, const char *name)
4d22de3e
DLR
3435{
3436 u32 cfg;
3437
3438 mc7->adapter = adapter;
3439 mc7->name = name;
3440 mc7->offset = base_addr - MC7_PMRX_BASE_ADDR;
3441 cfg = t3_read_reg(adapter, mc7->offset + A_MC7_CFG);
8ac3ba68 3442 mc7->size = mc7->size = G_DEN(cfg) == M_DEN ? 0 : mc7_calc_size(cfg);
4d22de3e
DLR
3443 mc7->width = G_WIDTH(cfg);
3444}
3445
3446void mac_prep(struct cmac *mac, struct adapter *adapter, int index)
3447{
3448 mac->adapter = adapter;
3449 mac->offset = (XGMAC0_1_BASE_ADDR - XGMAC0_0_BASE_ADDR) * index;
3450 mac->nucast = 1;
3451
3452 if (adapter->params.rev == 0 && uses_xaui(adapter)) {
3453 t3_write_reg(adapter, A_XGM_SERDES_CTRL + mac->offset,
3454 is_10G(adapter) ? 0x2901c04 : 0x2301c04);
3455 t3_set_reg_field(adapter, A_XGM_PORT_CFG + mac->offset,
3456 F_ENRGMII, 0);
3457 }
3458}
3459
3460void early_hw_init(struct adapter *adapter, const struct adapter_info *ai)
3461{
3462 u32 val = V_PORTSPEED(is_10G(adapter) ? 3 : 2);
3463
3464 mi1_init(adapter, ai);
3465 t3_write_reg(adapter, A_I2C_CFG, /* set for 80KHz */
3466 V_I2C_CLKDIV(adapter->params.vpd.cclk / 80 - 1));
3467 t3_write_reg(adapter, A_T3DBG_GPIO_EN,
3468 ai->gpio_out | F_GPIO0_OEN | F_GPIO0_OUT_VAL);
8ac3ba68 3469 t3_write_reg(adapter, A_MC5_DB_SERVER_INDEX, 0);
b881955b 3470 t3_write_reg(adapter, A_SG_OCO_BASE, V_BASE1(0xfff));
4d22de3e
DLR
3471
3472 if (adapter->params.rev == 0 || !uses_xaui(adapter))
3473 val |= F_ENRGMII;
3474
3475 /* Enable MAC clocks so we can access the registers */
3476 t3_write_reg(adapter, A_XGM_PORT_CFG, val);
3477 t3_read_reg(adapter, A_XGM_PORT_CFG);
3478
3479 val |= F_CLKDIVRESET_;
3480 t3_write_reg(adapter, A_XGM_PORT_CFG, val);
3481 t3_read_reg(adapter, A_XGM_PORT_CFG);
3482 t3_write_reg(adapter, XGM_REG(A_XGM_PORT_CFG, 1), val);
3483 t3_read_reg(adapter, A_XGM_PORT_CFG);
3484}
3485
3486/*
2eab17ab 3487 * Reset the adapter.
e4d08359 3488 * Older PCIe cards lose their config space during reset, PCI-X
4d22de3e
DLR
3489 * ones don't.
3490 */
9265fabf 3491static int t3_reset_adapter(struct adapter *adapter)
4d22de3e 3492{
2eab17ab 3493 int i, save_and_restore_pcie =
e4d08359 3494 adapter->params.rev < T3_REV_B2 && is_pcie(adapter);
4d22de3e
DLR
3495 uint16_t devid = 0;
3496
e4d08359 3497 if (save_and_restore_pcie)
4d22de3e
DLR
3498 pci_save_state(adapter->pdev);
3499 t3_write_reg(adapter, A_PL_RST, F_CRSTWRM | F_CRSTWRMMODE);
3500
3501 /*
3502 * Delay. Give Some time to device to reset fully.
3503 * XXX The delay time should be modified.
3504 */
3505 for (i = 0; i < 10; i++) {
3506 msleep(50);
3507 pci_read_config_word(adapter->pdev, 0x00, &devid);
3508 if (devid == 0x1425)
3509 break;
3510 }
3511
3512 if (devid != 0x1425)
3513 return -1;
3514
e4d08359 3515 if (save_and_restore_pcie)
4d22de3e
DLR
3516 pci_restore_state(adapter->pdev);
3517 return 0;
3518}
3519
7b9b0943 3520static int init_parity(struct adapter *adap)
b881955b
DLR
3521{
3522 int i, err, addr;
3523
3524 if (t3_read_reg(adap, A_SG_CONTEXT_CMD) & F_CONTEXT_CMD_BUSY)
3525 return -EBUSY;
3526
3527 for (err = i = 0; !err && i < 16; i++)
3528 err = clear_sge_ctxt(adap, i, F_EGRESS);
3529 for (i = 0xfff0; !err && i <= 0xffff; i++)
3530 err = clear_sge_ctxt(adap, i, F_EGRESS);
3531 for (i = 0; !err && i < SGE_QSETS; i++)
3532 err = clear_sge_ctxt(adap, i, F_RESPONSEQ);
3533 if (err)
3534 return err;
3535
3536 t3_write_reg(adap, A_CIM_IBQ_DBG_DATA, 0);
3537 for (i = 0; i < 4; i++)
3538 for (addr = 0; addr <= M_IBQDBGADDR; addr++) {
3539 t3_write_reg(adap, A_CIM_IBQ_DBG_CFG, F_IBQDBGEN |
3540 F_IBQDBGWR | V_IBQDBGQID(i) |
3541 V_IBQDBGADDR(addr));
3542 err = t3_wait_op_done(adap, A_CIM_IBQ_DBG_CFG,
3543 F_IBQDBGBUSY, 0, 2, 1);
3544 if (err)
3545 return err;
3546 }
3547 return 0;
3548}
3549
4d22de3e
DLR
3550/*
3551 * Initialize adapter SW state for the various HW modules, set initial values
3552 * for some adapter tunables, take PHYs out of reset, and initialize the MDIO
3553 * interface.
3554 */
7b9b0943
RD
3555int t3_prep_adapter(struct adapter *adapter, const struct adapter_info *ai,
3556 int reset)
4d22de3e
DLR
3557{
3558 int ret;
3559 unsigned int i, j = 0;
3560
3561 get_pci_mode(adapter, &adapter->params.pci);
3562
3563 adapter->params.info = ai;
3564 adapter->params.nports = ai->nports;
3565 adapter->params.rev = t3_read_reg(adapter, A_PL_REV);
3566 adapter->params.linkpoll_period = 0;
3567 adapter->params.stats_update_period = is_10G(adapter) ?
3568 MAC_STATS_ACCUM_SECS : (MAC_STATS_ACCUM_SECS * 10);
3569 adapter->params.pci.vpd_cap_addr =
3570 pci_find_capability(adapter->pdev, PCI_CAP_ID_VPD);
3571 ret = get_vpd_params(adapter, &adapter->params.vpd);
3572 if (ret < 0)
3573 return ret;
3574
3575 if (reset && t3_reset_adapter(adapter))
3576 return -1;
3577
3578 t3_sge_prep(adapter, &adapter->params.sge);
3579
3580 if (adapter->params.vpd.mclk) {
3581 struct tp_params *p = &adapter->params.tp;
3582
3583 mc7_prep(adapter, &adapter->pmrx, MC7_PMRX_BASE_ADDR, "PMRX");
3584 mc7_prep(adapter, &adapter->pmtx, MC7_PMTX_BASE_ADDR, "PMTX");
3585 mc7_prep(adapter, &adapter->cm, MC7_CM_BASE_ADDR, "CM");
3586
3587 p->nchan = ai->nports;
3588 p->pmrx_size = t3_mc7_size(&adapter->pmrx);
3589 p->pmtx_size = t3_mc7_size(&adapter->pmtx);
3590 p->cm_size = t3_mc7_size(&adapter->cm);
3591 p->chan_rx_size = p->pmrx_size / 2; /* only 1 Rx channel */
3592 p->chan_tx_size = p->pmtx_size / p->nchan;
3593 p->rx_pg_size = 64 * 1024;
3594 p->tx_pg_size = is_10G(adapter) ? 64 * 1024 : 16 * 1024;
3595 p->rx_num_pgs = pm_num_pages(p->chan_rx_size, p->rx_pg_size);
3596 p->tx_num_pgs = pm_num_pages(p->chan_tx_size, p->tx_pg_size);
3597 p->ntimer_qs = p->cm_size >= (128 << 20) ||
3598 adapter->params.rev > 0 ? 12 : 6;
8ac3ba68
DLR
3599 }
3600
3601 adapter->params.offload = t3_mc7_size(&adapter->pmrx) &&
3602 t3_mc7_size(&adapter->pmtx) &&
3603 t3_mc7_size(&adapter->cm);
4d22de3e 3604
8ac3ba68 3605 if (is_offload(adapter)) {
4d22de3e
DLR
3606 adapter->params.mc5.nservers = DEFAULT_NSERVERS;
3607 adapter->params.mc5.nfilters = adapter->params.rev > 0 ?
3608 DEFAULT_NFILTERS : 0;
3609 adapter->params.mc5.nroutes = 0;
3610 t3_mc5_prep(adapter, &adapter->mc5, MC5_MODE_144_BIT);
3611
3612 init_mtus(adapter->params.mtus);
3613 init_cong_ctrl(adapter->params.a_wnd, adapter->params.b_wnd);
3614 }
3615
3616 early_hw_init(adapter, ai);
b881955b
DLR
3617 ret = init_parity(adapter);
3618 if (ret)
3619 return ret;
4d22de3e
DLR
3620
3621 for_each_port(adapter, i) {
3622 u8 hw_addr[6];
3623 struct port_info *p = adap2pinfo(adapter, i);
3624
3625 while (!adapter->params.vpd.port_type[j])
3626 ++j;
3627
3628 p->port_type = &port_types[adapter->params.vpd.port_type[j]];
3629 p->port_type->phy_prep(&p->phy, adapter, ai->phy_base_addr + j,
3630 ai->mdio_ops);
3631 mac_prep(&p->mac, adapter, j);
3632 ++j;
3633
3634 /*
3635 * The VPD EEPROM stores the base Ethernet address for the
3636 * card. A port's address is derived from the base by adding
3637 * the port's index to the base's low octet.
3638 */
3639 memcpy(hw_addr, adapter->params.vpd.eth_base, 5);
3640 hw_addr[5] = adapter->params.vpd.eth_base[5] + i;
3641
3642 memcpy(adapter->port[i]->dev_addr, hw_addr,
3643 ETH_ALEN);
3644 memcpy(adapter->port[i]->perm_addr, hw_addr,
3645 ETH_ALEN);
3646 init_link_config(&p->link_config, p->port_type->caps);
3647 p->phy.ops->power_down(&p->phy, 1);
3648 if (!(p->port_type->caps & SUPPORTED_IRQ))
3649 adapter->params.linkpoll_period = 10;
3650 }
3651
3652 return 0;
3653}
3654
3655void t3_led_ready(struct adapter *adapter)
3656{
3657 t3_set_reg_field(adapter, A_T3DBG_GPIO_EN, F_GPIO0_OUT_VAL,
3658 F_GPIO0_OUT_VAL);
3659}
204e2f98
DLR
3660
3661int t3_replay_prep_adapter(struct adapter *adapter)
3662{
3663 const struct adapter_info *ai = adapter->params.info;
3664 unsigned int i, j = 0;
3665 int ret;
3666
3667 early_hw_init(adapter, ai);
3668 ret = init_parity(adapter);
3669 if (ret)
3670 return ret;
3671
3672 for_each_port(adapter, i) {
3673 struct port_info *p = adap2pinfo(adapter, i);
3674 while (!adapter->params.vpd.port_type[j])
3675 ++j;
3676
3677 p->port_type->phy_prep(&p->phy, adapter, ai->phy_base_addr + j,
3678 ai->mdio_ops);
3679
3680 p->phy.ops->power_down(&p->phy, 1);
3681 ++j;
3682 }
3683
3684return 0;
3685}
3686