]> bbs.cooldavid.org Git - net-next-2.6.git/blame - drivers/net/cxgb3/t3_hw.c
Merge branch 'ebt_config_compat_v4' of git://git.breakpoint.cc/fw/nf-next-2.6
[net-next-2.6.git] / drivers / net / cxgb3 / t3_hw.c
CommitLineData
4d22de3e 1/*
a02d44a0 2 * Copyright (c) 2003-2008 Chelsio, Inc. All rights reserved.
4d22de3e 3 *
1d68e93d
DLR
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
4d22de3e 9 *
1d68e93d
DLR
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
4d22de3e 31 */
4d22de3e
DLR
32#include "common.h"
33#include "regs.h"
34#include "sge_defs.h"
35#include "firmware_exports.h"
36
f2c6879e
DLR
37/**
38 * t3_wait_op_done_val - wait until an operation is completed
39 * @adapter: the adapter performing the operation
40 * @reg: the register to check for completion
41 * @mask: a single-bit field within @reg that indicates completion
42 * @polarity: the value of the field when the operation is completed
43 * @attempts: number of check iterations
44 * @delay: delay in usecs between iterations
45 * @valp: where to store the value of the register at completion time
46 *
47 * Wait until an operation is completed by checking a bit in a register
48 * up to @attempts times. If @valp is not NULL the value of the register
49 * at the time it indicated completion is stored there. Returns 0 if the
50 * operation completes and -EAGAIN otherwise.
51 */
4d22de3e
DLR
52
53int t3_wait_op_done_val(struct adapter *adapter, int reg, u32 mask,
54 int polarity, int attempts, int delay, u32 *valp)
55{
56 while (1) {
57 u32 val = t3_read_reg(adapter, reg);
58
59 if (!!(val & mask) == polarity) {
60 if (valp)
61 *valp = val;
62 return 0;
63 }
64 if (--attempts == 0)
b881955b 65 return -EAGAIN;
4d22de3e
DLR
66 if (delay)
67 udelay(delay);
68 }
69}
70
71/**
72 * t3_write_regs - write a bunch of registers
73 * @adapter: the adapter to program
74 * @p: an array of register address/register value pairs
75 * @n: the number of address/value pairs
76 * @offset: register address offset
77 *
78 * Takes an array of register address/register value pairs and writes each
79 * value to the corresponding register. Register addresses are adjusted
80 * by the supplied offset.
81 */
82void t3_write_regs(struct adapter *adapter, const struct addr_val_pair *p,
83 int n, unsigned int offset)
84{
85 while (n--) {
86 t3_write_reg(adapter, p->reg_addr + offset, p->val);
87 p++;
88 }
89}
90
91/**
92 * t3_set_reg_field - set a register field to a value
93 * @adapter: the adapter to program
94 * @addr: the register address
95 * @mask: specifies the portion of the register to modify
96 * @val: the new value for the register field
97 *
98 * Sets a register field specified by the supplied mask to the
99 * given value.
100 */
101void t3_set_reg_field(struct adapter *adapter, unsigned int addr, u32 mask,
102 u32 val)
103{
104 u32 v = t3_read_reg(adapter, addr) & ~mask;
105
106 t3_write_reg(adapter, addr, v | val);
107 t3_read_reg(adapter, addr); /* flush */
108}
109
110/**
111 * t3_read_indirect - read indirectly addressed registers
112 * @adap: the adapter
113 * @addr_reg: register holding the indirect address
114 * @data_reg: register holding the value of the indirect register
115 * @vals: where the read register values are stored
116 * @start_idx: index of first indirect register to read
117 * @nregs: how many indirect registers to read
118 *
119 * Reads registers that are accessed indirectly through an address/data
120 * register pair.
121 */
9265fabf
SH
122static void t3_read_indirect(struct adapter *adap, unsigned int addr_reg,
123 unsigned int data_reg, u32 *vals,
124 unsigned int nregs, unsigned int start_idx)
4d22de3e
DLR
125{
126 while (nregs--) {
127 t3_write_reg(adap, addr_reg, start_idx);
128 *vals++ = t3_read_reg(adap, data_reg);
129 start_idx++;
130 }
131}
132
133/**
134 * t3_mc7_bd_read - read from MC7 through backdoor accesses
135 * @mc7: identifies MC7 to read from
136 * @start: index of first 64-bit word to read
137 * @n: number of 64-bit words to read
138 * @buf: where to store the read result
139 *
140 * Read n 64-bit words from MC7 starting at word start, using backdoor
141 * accesses.
142 */
143int t3_mc7_bd_read(struct mc7 *mc7, unsigned int start, unsigned int n,
144 u64 *buf)
145{
146 static const int shift[] = { 0, 0, 16, 24 };
147 static const int step[] = { 0, 32, 16, 8 };
148
149 unsigned int size64 = mc7->size / 8; /* # of 64-bit words */
150 struct adapter *adap = mc7->adapter;
151
152 if (start >= size64 || start + n > size64)
153 return -EINVAL;
154
155 start *= (8 << mc7->width);
156 while (n--) {
157 int i;
158 u64 val64 = 0;
159
160 for (i = (1 << mc7->width) - 1; i >= 0; --i) {
161 int attempts = 10;
162 u32 val;
163
164 t3_write_reg(adap, mc7->offset + A_MC7_BD_ADDR, start);
165 t3_write_reg(adap, mc7->offset + A_MC7_BD_OP, 0);
166 val = t3_read_reg(adap, mc7->offset + A_MC7_BD_OP);
167 while ((val & F_BUSY) && attempts--)
168 val = t3_read_reg(adap,
169 mc7->offset + A_MC7_BD_OP);
170 if (val & F_BUSY)
171 return -EIO;
172
173 val = t3_read_reg(adap, mc7->offset + A_MC7_BD_DATA1);
174 if (mc7->width == 0) {
175 val64 = t3_read_reg(adap,
176 mc7->offset +
177 A_MC7_BD_DATA0);
178 val64 |= (u64) val << 32;
179 } else {
180 if (mc7->width > 1)
181 val >>= shift[mc7->width];
182 val64 |= (u64) val << (step[mc7->width] * i);
183 }
184 start += 8;
185 }
186 *buf++ = val64;
187 }
188 return 0;
189}
190
191/*
192 * Initialize MI1.
193 */
194static void mi1_init(struct adapter *adap, const struct adapter_info *ai)
195{
196 u32 clkdiv = adap->params.vpd.cclk / (2 * adap->params.vpd.mdc) - 1;
04497982 197 u32 val = F_PREEN | V_CLKDIV(clkdiv);
4d22de3e 198
4d22de3e
DLR
199 t3_write_reg(adap, A_MI1_CFG, val);
200}
201
04497982 202#define MDIO_ATTEMPTS 20
4d22de3e
DLR
203
204/*
04497982 205 * MI1 read/write operations for clause 22 PHYs.
4d22de3e 206 */
0f07c4ee
BH
207static int t3_mi1_read(struct net_device *dev, int phy_addr, int mmd_addr,
208 u16 reg_addr)
4d22de3e 209{
0f07c4ee
BH
210 struct port_info *pi = netdev_priv(dev);
211 struct adapter *adapter = pi->adapter;
4d22de3e
DLR
212 int ret;
213 u32 addr = V_REGADDR(reg_addr) | V_PHYADDR(phy_addr);
214
4d22de3e 215 mutex_lock(&adapter->mdio_lock);
04497982 216 t3_set_reg_field(adapter, A_MI1_CFG, V_ST(M_ST), V_ST(1));
4d22de3e
DLR
217 t3_write_reg(adapter, A_MI1_ADDR, addr);
218 t3_write_reg(adapter, A_MI1_OP, V_MDI_OP(2));
04497982 219 ret = t3_wait_op_done(adapter, A_MI1_OP, F_BUSY, 0, MDIO_ATTEMPTS, 10);
4d22de3e 220 if (!ret)
0f07c4ee 221 ret = t3_read_reg(adapter, A_MI1_DATA);
4d22de3e
DLR
222 mutex_unlock(&adapter->mdio_lock);
223 return ret;
224}
225
0f07c4ee
BH
226static int t3_mi1_write(struct net_device *dev, int phy_addr, int mmd_addr,
227 u16 reg_addr, u16 val)
4d22de3e 228{
0f07c4ee
BH
229 struct port_info *pi = netdev_priv(dev);
230 struct adapter *adapter = pi->adapter;
4d22de3e
DLR
231 int ret;
232 u32 addr = V_REGADDR(reg_addr) | V_PHYADDR(phy_addr);
233
4d22de3e 234 mutex_lock(&adapter->mdio_lock);
04497982 235 t3_set_reg_field(adapter, A_MI1_CFG, V_ST(M_ST), V_ST(1));
4d22de3e
DLR
236 t3_write_reg(adapter, A_MI1_ADDR, addr);
237 t3_write_reg(adapter, A_MI1_DATA, val);
238 t3_write_reg(adapter, A_MI1_OP, V_MDI_OP(1));
04497982 239 ret = t3_wait_op_done(adapter, A_MI1_OP, F_BUSY, 0, MDIO_ATTEMPTS, 10);
4d22de3e
DLR
240 mutex_unlock(&adapter->mdio_lock);
241 return ret;
242}
243
244static const struct mdio_ops mi1_mdio_ops = {
0f07c4ee
BH
245 .read = t3_mi1_read,
246 .write = t3_mi1_write,
247 .mode_support = MDIO_SUPPORTS_C22
4d22de3e
DLR
248};
249
04497982
DLR
250/*
251 * Performs the address cycle for clause 45 PHYs.
252 * Must be called with the MDIO_LOCK held.
253 */
254static int mi1_wr_addr(struct adapter *adapter, int phy_addr, int mmd_addr,
255 int reg_addr)
256{
257 u32 addr = V_REGADDR(mmd_addr) | V_PHYADDR(phy_addr);
258
259 t3_set_reg_field(adapter, A_MI1_CFG, V_ST(M_ST), 0);
260 t3_write_reg(adapter, A_MI1_ADDR, addr);
261 t3_write_reg(adapter, A_MI1_DATA, reg_addr);
262 t3_write_reg(adapter, A_MI1_OP, V_MDI_OP(0));
263 return t3_wait_op_done(adapter, A_MI1_OP, F_BUSY, 0,
264 MDIO_ATTEMPTS, 10);
265}
266
4d22de3e
DLR
267/*
268 * MI1 read/write operations for indirect-addressed PHYs.
269 */
0f07c4ee
BH
270static int mi1_ext_read(struct net_device *dev, int phy_addr, int mmd_addr,
271 u16 reg_addr)
4d22de3e 272{
0f07c4ee
BH
273 struct port_info *pi = netdev_priv(dev);
274 struct adapter *adapter = pi->adapter;
4d22de3e 275 int ret;
4d22de3e
DLR
276
277 mutex_lock(&adapter->mdio_lock);
04497982 278 ret = mi1_wr_addr(adapter, phy_addr, mmd_addr, reg_addr);
4d22de3e
DLR
279 if (!ret) {
280 t3_write_reg(adapter, A_MI1_OP, V_MDI_OP(3));
281 ret = t3_wait_op_done(adapter, A_MI1_OP, F_BUSY, 0,
04497982 282 MDIO_ATTEMPTS, 10);
4d22de3e 283 if (!ret)
0f07c4ee 284 ret = t3_read_reg(adapter, A_MI1_DATA);
4d22de3e
DLR
285 }
286 mutex_unlock(&adapter->mdio_lock);
287 return ret;
288}
289
0f07c4ee
BH
290static int mi1_ext_write(struct net_device *dev, int phy_addr, int mmd_addr,
291 u16 reg_addr, u16 val)
4d22de3e 292{
0f07c4ee
BH
293 struct port_info *pi = netdev_priv(dev);
294 struct adapter *adapter = pi->adapter;
4d22de3e 295 int ret;
4d22de3e
DLR
296
297 mutex_lock(&adapter->mdio_lock);
04497982 298 ret = mi1_wr_addr(adapter, phy_addr, mmd_addr, reg_addr);
4d22de3e
DLR
299 if (!ret) {
300 t3_write_reg(adapter, A_MI1_DATA, val);
301 t3_write_reg(adapter, A_MI1_OP, V_MDI_OP(1));
302 ret = t3_wait_op_done(adapter, A_MI1_OP, F_BUSY, 0,
04497982 303 MDIO_ATTEMPTS, 10);
4d22de3e
DLR
304 }
305 mutex_unlock(&adapter->mdio_lock);
306 return ret;
307}
308
309static const struct mdio_ops mi1_mdio_ext_ops = {
0f07c4ee
BH
310 .read = mi1_ext_read,
311 .write = mi1_ext_write,
312 .mode_support = MDIO_SUPPORTS_C45 | MDIO_EMULATE_C22
4d22de3e
DLR
313};
314
315/**
316 * t3_mdio_change_bits - modify the value of a PHY register
317 * @phy: the PHY to operate on
318 * @mmd: the device address
319 * @reg: the register address
320 * @clear: what part of the register value to mask off
321 * @set: what part of the register value to set
322 *
323 * Changes the value of a PHY register by applying a mask to its current
324 * value and ORing the result with a new value.
325 */
326int t3_mdio_change_bits(struct cphy *phy, int mmd, int reg, unsigned int clear,
327 unsigned int set)
328{
329 int ret;
330 unsigned int val;
331
0f07c4ee 332 ret = t3_mdio_read(phy, mmd, reg, &val);
4d22de3e
DLR
333 if (!ret) {
334 val &= ~clear;
0f07c4ee 335 ret = t3_mdio_write(phy, mmd, reg, val | set);
4d22de3e
DLR
336 }
337 return ret;
338}
339
340/**
341 * t3_phy_reset - reset a PHY block
342 * @phy: the PHY to operate on
343 * @mmd: the device address of the PHY block to reset
344 * @wait: how long to wait for the reset to complete in 1ms increments
345 *
346 * Resets a PHY block and optionally waits for the reset to complete.
347 * @mmd should be 0 for 10/100/1000 PHYs and the device address to reset
348 * for 10G PHYs.
349 */
350int t3_phy_reset(struct cphy *phy, int mmd, int wait)
351{
352 int err;
353 unsigned int ctl;
354
0f07c4ee
BH
355 err = t3_mdio_change_bits(phy, mmd, MDIO_CTRL1, MDIO_CTRL1_LPOWER,
356 MDIO_CTRL1_RESET);
4d22de3e
DLR
357 if (err || !wait)
358 return err;
359
360 do {
0f07c4ee 361 err = t3_mdio_read(phy, mmd, MDIO_CTRL1, &ctl);
4d22de3e
DLR
362 if (err)
363 return err;
0f07c4ee 364 ctl &= MDIO_CTRL1_RESET;
4d22de3e
DLR
365 if (ctl)
366 msleep(1);
367 } while (ctl && --wait);
368
369 return ctl ? -1 : 0;
370}
371
372/**
373 * t3_phy_advertise - set the PHY advertisement registers for autoneg
374 * @phy: the PHY to operate on
375 * @advert: bitmap of capabilities the PHY should advertise
376 *
377 * Sets a 10/100/1000 PHY's advertisement registers to advertise the
378 * requested capabilities.
379 */
380int t3_phy_advertise(struct cphy *phy, unsigned int advert)
381{
382 int err;
383 unsigned int val = 0;
384
0f07c4ee 385 err = t3_mdio_read(phy, MDIO_DEVAD_NONE, MII_CTRL1000, &val);
4d22de3e
DLR
386 if (err)
387 return err;
388
389 val &= ~(ADVERTISE_1000HALF | ADVERTISE_1000FULL);
390 if (advert & ADVERTISED_1000baseT_Half)
391 val |= ADVERTISE_1000HALF;
392 if (advert & ADVERTISED_1000baseT_Full)
393 val |= ADVERTISE_1000FULL;
394
0f07c4ee 395 err = t3_mdio_write(phy, MDIO_DEVAD_NONE, MII_CTRL1000, val);
4d22de3e
DLR
396 if (err)
397 return err;
398
399 val = 1;
400 if (advert & ADVERTISED_10baseT_Half)
401 val |= ADVERTISE_10HALF;
402 if (advert & ADVERTISED_10baseT_Full)
403 val |= ADVERTISE_10FULL;
404 if (advert & ADVERTISED_100baseT_Half)
405 val |= ADVERTISE_100HALF;
406 if (advert & ADVERTISED_100baseT_Full)
407 val |= ADVERTISE_100FULL;
408 if (advert & ADVERTISED_Pause)
409 val |= ADVERTISE_PAUSE_CAP;
410 if (advert & ADVERTISED_Asym_Pause)
411 val |= ADVERTISE_PAUSE_ASYM;
0f07c4ee 412 return t3_mdio_write(phy, MDIO_DEVAD_NONE, MII_ADVERTISE, val);
4d22de3e
DLR
413}
414
0ce2f03b
DLR
415/**
416 * t3_phy_advertise_fiber - set fiber PHY advertisement register
417 * @phy: the PHY to operate on
418 * @advert: bitmap of capabilities the PHY should advertise
419 *
420 * Sets a fiber PHY's advertisement register to advertise the
421 * requested capabilities.
422 */
423int t3_phy_advertise_fiber(struct cphy *phy, unsigned int advert)
424{
425 unsigned int val = 0;
426
427 if (advert & ADVERTISED_1000baseT_Half)
428 val |= ADVERTISE_1000XHALF;
429 if (advert & ADVERTISED_1000baseT_Full)
430 val |= ADVERTISE_1000XFULL;
431 if (advert & ADVERTISED_Pause)
432 val |= ADVERTISE_1000XPAUSE;
433 if (advert & ADVERTISED_Asym_Pause)
434 val |= ADVERTISE_1000XPSE_ASYM;
0f07c4ee 435 return t3_mdio_write(phy, MDIO_DEVAD_NONE, MII_ADVERTISE, val);
0ce2f03b
DLR
436}
437
4d22de3e
DLR
438/**
439 * t3_set_phy_speed_duplex - force PHY speed and duplex
440 * @phy: the PHY to operate on
441 * @speed: requested PHY speed
442 * @duplex: requested PHY duplex
443 *
444 * Force a 10/100/1000 PHY's speed and duplex. This also disables
445 * auto-negotiation except for GigE, where auto-negotiation is mandatory.
446 */
447int t3_set_phy_speed_duplex(struct cphy *phy, int speed, int duplex)
448{
449 int err;
450 unsigned int ctl;
451
0f07c4ee 452 err = t3_mdio_read(phy, MDIO_DEVAD_NONE, MII_BMCR, &ctl);
4d22de3e
DLR
453 if (err)
454 return err;
455
456 if (speed >= 0) {
457 ctl &= ~(BMCR_SPEED100 | BMCR_SPEED1000 | BMCR_ANENABLE);
458 if (speed == SPEED_100)
459 ctl |= BMCR_SPEED100;
460 else if (speed == SPEED_1000)
461 ctl |= BMCR_SPEED1000;
462 }
463 if (duplex >= 0) {
464 ctl &= ~(BMCR_FULLDPLX | BMCR_ANENABLE);
465 if (duplex == DUPLEX_FULL)
466 ctl |= BMCR_FULLDPLX;
467 }
468 if (ctl & BMCR_SPEED1000) /* auto-negotiation required for GigE */
469 ctl |= BMCR_ANENABLE;
0f07c4ee 470 return t3_mdio_write(phy, MDIO_DEVAD_NONE, MII_BMCR, ctl);
4d22de3e
DLR
471}
472
9b1e3656
DLR
473int t3_phy_lasi_intr_enable(struct cphy *phy)
474{
64318334
BH
475 return t3_mdio_write(phy, MDIO_MMD_PMAPMD, MDIO_PMA_LASI_CTRL,
476 MDIO_PMA_LASI_LSALARM);
9b1e3656
DLR
477}
478
479int t3_phy_lasi_intr_disable(struct cphy *phy)
480{
64318334 481 return t3_mdio_write(phy, MDIO_MMD_PMAPMD, MDIO_PMA_LASI_CTRL, 0);
9b1e3656
DLR
482}
483
484int t3_phy_lasi_intr_clear(struct cphy *phy)
485{
486 u32 val;
487
64318334 488 return t3_mdio_read(phy, MDIO_MMD_PMAPMD, MDIO_PMA_LASI_STAT, &val);
9b1e3656
DLR
489}
490
491int t3_phy_lasi_intr_handler(struct cphy *phy)
492{
493 unsigned int status;
64318334
BH
494 int err = t3_mdio_read(phy, MDIO_MMD_PMAPMD, MDIO_PMA_LASI_STAT,
495 &status);
9b1e3656
DLR
496
497 if (err)
498 return err;
64318334 499 return (status & MDIO_PMA_LASI_LSALARM) ? cphy_cause_link_change : 0;
9b1e3656
DLR
500}
501
4d22de3e 502static const struct adapter_info t3_adap_info[] = {
952cdf33 503 {1, 1, 0,
4d22de3e 504 F_GPIO2_OEN | F_GPIO4_OEN |
f231e0a5 505 F_GPIO2_OUT_VAL | F_GPIO4_OUT_VAL, { S_GPIO3, S_GPIO5 }, 0,
4d22de3e 506 &mi1_mdio_ops, "Chelsio PE9000"},
952cdf33 507 {1, 1, 0,
4d22de3e 508 F_GPIO2_OEN | F_GPIO4_OEN |
f231e0a5 509 F_GPIO2_OUT_VAL | F_GPIO4_OUT_VAL, { S_GPIO3, S_GPIO5 }, 0,
4d22de3e 510 &mi1_mdio_ops, "Chelsio T302"},
952cdf33 511 {1, 0, 0,
4d22de3e 512 F_GPIO1_OEN | F_GPIO6_OEN | F_GPIO7_OEN | F_GPIO10_OEN |
75758e8a 513 F_GPIO11_OEN | F_GPIO1_OUT_VAL | F_GPIO6_OUT_VAL | F_GPIO10_OUT_VAL,
f231e0a5 514 { 0 }, SUPPORTED_10000baseT_Full | SUPPORTED_AUI,
4d22de3e 515 &mi1_mdio_ext_ops, "Chelsio T310"},
952cdf33 516 {1, 1, 0,
4d22de3e
DLR
517 F_GPIO1_OEN | F_GPIO2_OEN | F_GPIO4_OEN | F_GPIO5_OEN | F_GPIO6_OEN |
518 F_GPIO7_OEN | F_GPIO10_OEN | F_GPIO11_OEN | F_GPIO1_OUT_VAL |
f231e0a5
DLR
519 F_GPIO5_OUT_VAL | F_GPIO6_OUT_VAL | F_GPIO10_OUT_VAL,
520 { S_GPIO9, S_GPIO3 }, SUPPORTED_10000baseT_Full | SUPPORTED_AUI,
4d22de3e 521 &mi1_mdio_ext_ops, "Chelsio T320"},
ce03aadd
DLR
522 {},
523 {},
952cdf33 524 {1, 0, 0,
ce03aadd
DLR
525 F_GPIO1_OEN | F_GPIO2_OEN | F_GPIO4_OEN | F_GPIO6_OEN | F_GPIO7_OEN |
526 F_GPIO10_OEN | F_GPIO1_OUT_VAL | F_GPIO6_OUT_VAL | F_GPIO10_OUT_VAL,
527 { S_GPIO9 }, SUPPORTED_10000baseT_Full | SUPPORTED_AUI,
528 &mi1_mdio_ext_ops, "Chelsio T310" },
74451424
DLR
529 {1, 0, 0,
530 F_GPIO1_OEN | F_GPIO6_OEN | F_GPIO7_OEN |
531 F_GPIO1_OUT_VAL | F_GPIO6_OUT_VAL,
532 { S_GPIO9 }, SUPPORTED_10000baseT_Full | SUPPORTED_AUI,
533 &mi1_mdio_ext_ops, "Chelsio N320E-G2" },
4d22de3e
DLR
534};
535
536/*
537 * Return the adapter_info structure with a given index. Out-of-range indices
538 * return NULL.
539 */
540const struct adapter_info *t3_get_adapter_info(unsigned int id)
541{
542 return id < ARRAY_SIZE(t3_adap_info) ? &t3_adap_info[id] : NULL;
543}
544
04497982
DLR
545struct port_type_info {
546 int (*phy_prep)(struct cphy *phy, struct adapter *adapter,
547 int phy_addr, const struct mdio_ops *ops);
548};
4d22de3e
DLR
549
550static const struct port_type_info port_types[] = {
04497982
DLR
551 { NULL },
552 { t3_ael1002_phy_prep },
553 { t3_vsc8211_phy_prep },
554 { NULL},
555 { t3_xaui_direct_phy_prep },
1e882025 556 { t3_ael2005_phy_prep },
04497982
DLR
557 { t3_qt2045_phy_prep },
558 { t3_ael1006_phy_prep },
559 { NULL },
f22a563b 560 { t3_aq100x_phy_prep },
74451424 561 { t3_ael2020_phy_prep },
4d22de3e
DLR
562};
563
4d22de3e
DLR
564#define VPD_ENTRY(name, len) \
565 u8 name##_kword[2]; u8 name##_len; u8 name##_data[len]
566
567/*
568 * Partial EEPROM Vital Product Data structure. Includes only the ID and
569 * VPD-R sections.
570 */
571struct t3_vpd {
572 u8 id_tag;
573 u8 id_len[2];
574 u8 id_data[16];
575 u8 vpdr_tag;
576 u8 vpdr_len[2];
577 VPD_ENTRY(pn, 16); /* part number */
578 VPD_ENTRY(ec, 16); /* EC level */
167cdf5f 579 VPD_ENTRY(sn, SERNUM_LEN); /* serial number */
4d22de3e
DLR
580 VPD_ENTRY(na, 12); /* MAC address base */
581 VPD_ENTRY(cclk, 6); /* core clock */
582 VPD_ENTRY(mclk, 6); /* mem clock */
583 VPD_ENTRY(uclk, 6); /* uP clk */
584 VPD_ENTRY(mdc, 6); /* MDIO clk */
585 VPD_ENTRY(mt, 2); /* mem timing */
586 VPD_ENTRY(xaui0cfg, 6); /* XAUI0 config */
587 VPD_ENTRY(xaui1cfg, 6); /* XAUI1 config */
588 VPD_ENTRY(port0, 2); /* PHY0 complex */
589 VPD_ENTRY(port1, 2); /* PHY1 complex */
590 VPD_ENTRY(port2, 2); /* PHY2 complex */
591 VPD_ENTRY(port3, 2); /* PHY3 complex */
592 VPD_ENTRY(rv, 1); /* csum */
593 u32 pad; /* for multiple-of-4 sizing and alignment */
594};
595
9f64306b 596#define EEPROM_MAX_POLL 40
4d22de3e
DLR
597#define EEPROM_STAT_ADDR 0x4000
598#define VPD_BASE 0xc00
599
600/**
601 * t3_seeprom_read - read a VPD EEPROM location
602 * @adapter: adapter to read
603 * @addr: EEPROM address
604 * @data: where to store the read data
605 *
606 * Read a 32-bit word from a location in VPD EEPROM using the card's PCI
607 * VPD ROM capability. A zero is written to the flag bit when the
608 * addres is written to the control register. The hardware device will
609 * set the flag to 1 when 4 bytes have been read into the data register.
610 */
05e5c116 611int t3_seeprom_read(struct adapter *adapter, u32 addr, __le32 *data)
4d22de3e
DLR
612{
613 u16 val;
614 int attempts = EEPROM_MAX_POLL;
05e5c116 615 u32 v;
4d22de3e
DLR
616 unsigned int base = adapter->params.pci.vpd_cap_addr;
617
618 if ((addr >= EEPROMSIZE && addr != EEPROM_STAT_ADDR) || (addr & 3))
619 return -EINVAL;
620
621 pci_write_config_word(adapter->pdev, base + PCI_VPD_ADDR, addr);
622 do {
623 udelay(10);
624 pci_read_config_word(adapter->pdev, base + PCI_VPD_ADDR, &val);
625 } while (!(val & PCI_VPD_ADDR_F) && --attempts);
626
627 if (!(val & PCI_VPD_ADDR_F)) {
628 CH_ERR(adapter, "reading EEPROM address 0x%x failed\n", addr);
629 return -EIO;
630 }
05e5c116
AV
631 pci_read_config_dword(adapter->pdev, base + PCI_VPD_DATA, &v);
632 *data = cpu_to_le32(v);
4d22de3e
DLR
633 return 0;
634}
635
636/**
637 * t3_seeprom_write - write a VPD EEPROM location
638 * @adapter: adapter to write
639 * @addr: EEPROM address
640 * @data: value to write
641 *
642 * Write a 32-bit word to a location in VPD EEPROM using the card's PCI
643 * VPD ROM capability.
644 */
05e5c116 645int t3_seeprom_write(struct adapter *adapter, u32 addr, __le32 data)
4d22de3e
DLR
646{
647 u16 val;
648 int attempts = EEPROM_MAX_POLL;
649 unsigned int base = adapter->params.pci.vpd_cap_addr;
650
651 if ((addr >= EEPROMSIZE && addr != EEPROM_STAT_ADDR) || (addr & 3))
652 return -EINVAL;
653
654 pci_write_config_dword(adapter->pdev, base + PCI_VPD_DATA,
05e5c116 655 le32_to_cpu(data));
4d22de3e
DLR
656 pci_write_config_word(adapter->pdev,base + PCI_VPD_ADDR,
657 addr | PCI_VPD_ADDR_F);
658 do {
659 msleep(1);
660 pci_read_config_word(adapter->pdev, base + PCI_VPD_ADDR, &val);
661 } while ((val & PCI_VPD_ADDR_F) && --attempts);
662
663 if (val & PCI_VPD_ADDR_F) {
664 CH_ERR(adapter, "write to EEPROM address 0x%x failed\n", addr);
665 return -EIO;
666 }
667 return 0;
668}
669
670/**
671 * t3_seeprom_wp - enable/disable EEPROM write protection
672 * @adapter: the adapter
673 * @enable: 1 to enable write protection, 0 to disable it
674 *
675 * Enables or disables write protection on the serial EEPROM.
676 */
677int t3_seeprom_wp(struct adapter *adapter, int enable)
678{
679 return t3_seeprom_write(adapter, EEPROM_STAT_ADDR, enable ? 0xc : 0);
680}
681
682/*
683 * Convert a character holding a hex digit to a number.
684 */
685static unsigned int hex2int(unsigned char c)
686{
687 return isdigit(c) ? c - '0' : toupper(c) - 'A' + 10;
688}
689
690/**
691 * get_vpd_params - read VPD parameters from VPD EEPROM
692 * @adapter: adapter to read
693 * @p: where to store the parameters
694 *
695 * Reads card parameters stored in VPD EEPROM.
696 */
697static int get_vpd_params(struct adapter *adapter, struct vpd_params *p)
698{
699 int i, addr, ret;
700 struct t3_vpd vpd;
701
702 /*
703 * Card information is normally at VPD_BASE but some early cards had
704 * it at 0.
705 */
05e5c116 706 ret = t3_seeprom_read(adapter, VPD_BASE, (__le32 *)&vpd);
4d22de3e
DLR
707 if (ret)
708 return ret;
709 addr = vpd.id_tag == 0x82 ? VPD_BASE : 0;
710
711 for (i = 0; i < sizeof(vpd); i += 4) {
712 ret = t3_seeprom_read(adapter, addr + i,
05e5c116 713 (__le32 *)((u8 *)&vpd + i));
4d22de3e
DLR
714 if (ret)
715 return ret;
716 }
717
718 p->cclk = simple_strtoul(vpd.cclk_data, NULL, 10);
719 p->mclk = simple_strtoul(vpd.mclk_data, NULL, 10);
720 p->uclk = simple_strtoul(vpd.uclk_data, NULL, 10);
721 p->mdc = simple_strtoul(vpd.mdc_data, NULL, 10);
722 p->mem_timing = simple_strtoul(vpd.mt_data, NULL, 10);
167cdf5f 723 memcpy(p->sn, vpd.sn_data, SERNUM_LEN);
4d22de3e
DLR
724
725 /* Old eeproms didn't have port information */
726 if (adapter->params.rev == 0 && !vpd.port0_data[0]) {
727 p->port_type[0] = uses_xaui(adapter) ? 1 : 2;
728 p->port_type[1] = uses_xaui(adapter) ? 6 : 2;
729 } else {
730 p->port_type[0] = hex2int(vpd.port0_data[0]);
731 p->port_type[1] = hex2int(vpd.port1_data[0]);
732 p->xauicfg[0] = simple_strtoul(vpd.xaui0cfg_data, NULL, 16);
733 p->xauicfg[1] = simple_strtoul(vpd.xaui1cfg_data, NULL, 16);
734 }
735
736 for (i = 0; i < 6; i++)
737 p->eth_base[i] = hex2int(vpd.na_data[2 * i]) * 16 +
738 hex2int(vpd.na_data[2 * i + 1]);
739 return 0;
740}
741
742/* serial flash and firmware constants */
743enum {
744 SF_ATTEMPTS = 5, /* max retries for SF1 operations */
745 SF_SEC_SIZE = 64 * 1024, /* serial flash sector size */
746 SF_SIZE = SF_SEC_SIZE * 8, /* serial flash size */
747
748 /* flash command opcodes */
749 SF_PROG_PAGE = 2, /* program page */
750 SF_WR_DISABLE = 4, /* disable writes */
751 SF_RD_STATUS = 5, /* read status register */
752 SF_WR_ENABLE = 6, /* enable writes */
753 SF_RD_DATA_FAST = 0xb, /* read flash */
754 SF_ERASE_SECTOR = 0xd8, /* erase sector */
755
756 FW_FLASH_BOOT_ADDR = 0x70000, /* start address of FW in flash */
80513675 757 FW_VERS_ADDR = 0x7fffc, /* flash address holding FW version */
2e283962 758 FW_MIN_SIZE = 8 /* at least version and csum */
4d22de3e
DLR
759};
760
761/**
762 * sf1_read - read data from the serial flash
763 * @adapter: the adapter
764 * @byte_cnt: number of bytes to read
765 * @cont: whether another operation will be chained
766 * @valp: where to store the read data
767 *
768 * Reads up to 4 bytes of data from the serial flash. The location of
769 * the read needs to be specified prior to calling this by issuing the
770 * appropriate commands to the serial flash.
771 */
772static int sf1_read(struct adapter *adapter, unsigned int byte_cnt, int cont,
773 u32 *valp)
774{
775 int ret;
776
777 if (!byte_cnt || byte_cnt > 4)
778 return -EINVAL;
779 if (t3_read_reg(adapter, A_SF_OP) & F_BUSY)
780 return -EBUSY;
781 t3_write_reg(adapter, A_SF_OP, V_CONT(cont) | V_BYTECNT(byte_cnt - 1));
782 ret = t3_wait_op_done(adapter, A_SF_OP, F_BUSY, 0, SF_ATTEMPTS, 10);
783 if (!ret)
784 *valp = t3_read_reg(adapter, A_SF_DATA);
785 return ret;
786}
787
788/**
789 * sf1_write - write data to the serial flash
790 * @adapter: the adapter
791 * @byte_cnt: number of bytes to write
792 * @cont: whether another operation will be chained
793 * @val: value to write
794 *
795 * Writes up to 4 bytes of data to the serial flash. The location of
796 * the write needs to be specified prior to calling this by issuing the
797 * appropriate commands to the serial flash.
798 */
799static int sf1_write(struct adapter *adapter, unsigned int byte_cnt, int cont,
800 u32 val)
801{
802 if (!byte_cnt || byte_cnt > 4)
803 return -EINVAL;
804 if (t3_read_reg(adapter, A_SF_OP) & F_BUSY)
805 return -EBUSY;
806 t3_write_reg(adapter, A_SF_DATA, val);
807 t3_write_reg(adapter, A_SF_OP,
808 V_CONT(cont) | V_BYTECNT(byte_cnt - 1) | V_OP(1));
809 return t3_wait_op_done(adapter, A_SF_OP, F_BUSY, 0, SF_ATTEMPTS, 10);
810}
811
812/**
813 * flash_wait_op - wait for a flash operation to complete
814 * @adapter: the adapter
815 * @attempts: max number of polls of the status register
816 * @delay: delay between polls in ms
817 *
818 * Wait for a flash operation to complete by polling the status register.
819 */
820static int flash_wait_op(struct adapter *adapter, int attempts, int delay)
821{
822 int ret;
823 u32 status;
824
825 while (1) {
826 if ((ret = sf1_write(adapter, 1, 1, SF_RD_STATUS)) != 0 ||
827 (ret = sf1_read(adapter, 1, 0, &status)) != 0)
828 return ret;
829 if (!(status & 1))
830 return 0;
831 if (--attempts == 0)
832 return -EAGAIN;
833 if (delay)
834 msleep(delay);
835 }
836}
837
838/**
839 * t3_read_flash - read words from serial flash
840 * @adapter: the adapter
841 * @addr: the start address for the read
842 * @nwords: how many 32-bit words to read
843 * @data: where to store the read data
844 * @byte_oriented: whether to store data as bytes or as words
845 *
846 * Read the specified number of 32-bit words from the serial flash.
847 * If @byte_oriented is set the read data is stored as a byte array
848 * (i.e., big-endian), otherwise as 32-bit words in the platform's
849 * natural endianess.
850 */
851int t3_read_flash(struct adapter *adapter, unsigned int addr,
852 unsigned int nwords, u32 *data, int byte_oriented)
853{
854 int ret;
855
856 if (addr + nwords * sizeof(u32) > SF_SIZE || (addr & 3))
857 return -EINVAL;
858
859 addr = swab32(addr) | SF_RD_DATA_FAST;
860
861 if ((ret = sf1_write(adapter, 4, 1, addr)) != 0 ||
862 (ret = sf1_read(adapter, 1, 1, data)) != 0)
863 return ret;
864
865 for (; nwords; nwords--, data++) {
866 ret = sf1_read(adapter, 4, nwords > 1, data);
867 if (ret)
868 return ret;
869 if (byte_oriented)
870 *data = htonl(*data);
871 }
872 return 0;
873}
874
875/**
876 * t3_write_flash - write up to a page of data to the serial flash
877 * @adapter: the adapter
878 * @addr: the start address to write
879 * @n: length of data to write
880 * @data: the data to write
881 *
882 * Writes up to a page of data (256 bytes) to the serial flash starting
883 * at the given address.
884 */
885static int t3_write_flash(struct adapter *adapter, unsigned int addr,
886 unsigned int n, const u8 *data)
887{
888 int ret;
889 u32 buf[64];
890 unsigned int i, c, left, val, offset = addr & 0xff;
891
892 if (addr + n > SF_SIZE || offset + n > 256)
893 return -EINVAL;
894
895 val = swab32(addr) | SF_PROG_PAGE;
896
897 if ((ret = sf1_write(adapter, 1, 0, SF_WR_ENABLE)) != 0 ||
898 (ret = sf1_write(adapter, 4, 1, val)) != 0)
899 return ret;
900
901 for (left = n; left; left -= c) {
902 c = min(left, 4U);
903 for (val = 0, i = 0; i < c; ++i)
904 val = (val << 8) + *data++;
905
906 ret = sf1_write(adapter, c, c != left, val);
907 if (ret)
908 return ret;
909 }
910 if ((ret = flash_wait_op(adapter, 5, 1)) != 0)
911 return ret;
912
913 /* Read the page to verify the write succeeded */
914 ret = t3_read_flash(adapter, addr & ~0xff, ARRAY_SIZE(buf), buf, 1);
915 if (ret)
916 return ret;
917
918 if (memcmp(data - n, (u8 *) buf + offset, n))
919 return -EIO;
920 return 0;
921}
922
480fe1a3 923/**
47330077 924 * t3_get_tp_version - read the tp sram version
480fe1a3 925 * @adapter: the adapter
47330077 926 * @vers: where to place the version
480fe1a3 927 *
47330077 928 * Reads the protocol sram version from sram.
480fe1a3 929 */
47330077 930int t3_get_tp_version(struct adapter *adapter, u32 *vers)
480fe1a3
DLR
931{
932 int ret;
480fe1a3
DLR
933
934 /* Get version loaded in SRAM */
935 t3_write_reg(adapter, A_TP_EMBED_OP_FIELD0, 0);
936 ret = t3_wait_op_done(adapter, A_TP_EMBED_OP_FIELD0,
937 1, 1, 5, 1);
938 if (ret)
939 return ret;
2eab17ab 940
47330077
DLR
941 *vers = t3_read_reg(adapter, A_TP_EMBED_OP_FIELD1);
942
943 return 0;
944}
945
946/**
947 * t3_check_tpsram_version - read the tp sram version
948 * @adapter: the adapter
47330077
DLR
949 *
950 * Reads the protocol sram version from flash.
951 */
8207befa 952int t3_check_tpsram_version(struct adapter *adapter)
47330077
DLR
953{
954 int ret;
955 u32 vers;
956 unsigned int major, minor;
957
958 if (adapter->params.rev == T3_REV_A)
959 return 0;
960
47330077
DLR
961
962 ret = t3_get_tp_version(adapter, &vers);
963 if (ret)
964 return ret;
480fe1a3
DLR
965
966 major = G_TP_VERSION_MAJOR(vers);
967 minor = G_TP_VERSION_MINOR(vers);
968
2eab17ab 969 if (major == TP_VERSION_MAJOR && minor == TP_VERSION_MINOR)
480fe1a3 970 return 0;
47330077 971 else {
47330077
DLR
972 CH_ERR(adapter, "found wrong TP version (%u.%u), "
973 "driver compiled for version %d.%d\n", major, minor,
974 TP_VERSION_MAJOR, TP_VERSION_MINOR);
975 }
480fe1a3
DLR
976 return -EINVAL;
977}
978
979/**
2eab17ab 980 * t3_check_tpsram - check if provided protocol SRAM
480fe1a3
DLR
981 * is compatible with this driver
982 * @adapter: the adapter
983 * @tp_sram: the firmware image to write
984 * @size: image size
985 *
986 * Checks if an adapter's tp sram is compatible with the driver.
987 * Returns 0 if the versions are compatible, a negative error otherwise.
988 */
2c733a16
DW
989int t3_check_tpsram(struct adapter *adapter, const u8 *tp_sram,
990 unsigned int size)
480fe1a3
DLR
991{
992 u32 csum;
993 unsigned int i;
05e5c116 994 const __be32 *p = (const __be32 *)tp_sram;
480fe1a3
DLR
995
996 /* Verify checksum */
997 for (csum = 0, i = 0; i < size / sizeof(csum); i++)
998 csum += ntohl(p[i]);
999 if (csum != 0xffffffff) {
1000 CH_ERR(adapter, "corrupted protocol SRAM image, checksum %u\n",
1001 csum);
1002 return -EINVAL;
1003 }
1004
1005 return 0;
1006}
1007
4aac3899
DLR
1008enum fw_version_type {
1009 FW_VERSION_N3,
1010 FW_VERSION_T3
1011};
1012
4d22de3e
DLR
1013/**
1014 * t3_get_fw_version - read the firmware version
1015 * @adapter: the adapter
1016 * @vers: where to place the version
1017 *
1018 * Reads the FW version from flash.
1019 */
1020int t3_get_fw_version(struct adapter *adapter, u32 *vers)
1021{
1022 return t3_read_flash(adapter, FW_VERS_ADDR, 1, vers, 0);
1023}
1024
1025/**
1026 * t3_check_fw_version - check if the FW is compatible with this driver
1027 * @adapter: the adapter
8207befa 1028 *
4d22de3e
DLR
1029 * Checks if an adapter's FW is compatible with the driver. Returns 0
1030 * if the versions are compatible, a negative error otherwise.
1031 */
8207befa 1032int t3_check_fw_version(struct adapter *adapter)
4d22de3e
DLR
1033{
1034 int ret;
1035 u32 vers;
4aac3899 1036 unsigned int type, major, minor;
4d22de3e
DLR
1037
1038 ret = t3_get_fw_version(adapter, &vers);
1039 if (ret)
1040 return ret;
1041
4aac3899
DLR
1042 type = G_FW_VERSION_TYPE(vers);
1043 major = G_FW_VERSION_MAJOR(vers);
1044 minor = G_FW_VERSION_MINOR(vers);
4d22de3e 1045
75d8626f
DLR
1046 if (type == FW_VERSION_T3 && major == FW_VERSION_MAJOR &&
1047 minor == FW_VERSION_MINOR)
4d22de3e 1048 return 0;
8207befa 1049 else if (major != FW_VERSION_MAJOR || minor < FW_VERSION_MINOR)
273fa904
DLR
1050 CH_WARN(adapter, "found old FW minor version(%u.%u), "
1051 "driver compiled for version %u.%u\n", major, minor,
1052 FW_VERSION_MAJOR, FW_VERSION_MINOR);
8207befa 1053 else {
273fa904 1054 CH_WARN(adapter, "found newer FW version(%u.%u), "
a5a3b460
DLR
1055 "driver compiled for version %u.%u\n", major, minor,
1056 FW_VERSION_MAJOR, FW_VERSION_MINOR);
273fa904 1057 return 0;
a5a3b460 1058 }
4d22de3e
DLR
1059 return -EINVAL;
1060}
1061
1062/**
1063 * t3_flash_erase_sectors - erase a range of flash sectors
1064 * @adapter: the adapter
1065 * @start: the first sector to erase
1066 * @end: the last sector to erase
1067 *
1068 * Erases the sectors in the given range.
1069 */
1070static int t3_flash_erase_sectors(struct adapter *adapter, int start, int end)
1071{
1072 while (start <= end) {
1073 int ret;
1074
1075 if ((ret = sf1_write(adapter, 1, 0, SF_WR_ENABLE)) != 0 ||
1076 (ret = sf1_write(adapter, 4, 0,
1077 SF_ERASE_SECTOR | (start << 8))) != 0 ||
1078 (ret = flash_wait_op(adapter, 5, 500)) != 0)
1079 return ret;
1080 start++;
1081 }
1082 return 0;
1083}
1084
1085/*
1086 * t3_load_fw - download firmware
1087 * @adapter: the adapter
8a9fab22 1088 * @fw_data: the firmware image to write
4d22de3e
DLR
1089 * @size: image size
1090 *
1091 * Write the supplied firmware image to the card's serial flash.
1092 * The FW image has the following sections: @size - 8 bytes of code and
1093 * data, followed by 4 bytes of FW version, followed by the 32-bit
1094 * 1's complement checksum of the whole image.
1095 */
1096int t3_load_fw(struct adapter *adapter, const u8 *fw_data, unsigned int size)
1097{
1098 u32 csum;
1099 unsigned int i;
05e5c116 1100 const __be32 *p = (const __be32 *)fw_data;
4d22de3e
DLR
1101 int ret, addr, fw_sector = FW_FLASH_BOOT_ADDR >> 16;
1102
2e283962 1103 if ((size & 3) || size < FW_MIN_SIZE)
4d22de3e
DLR
1104 return -EINVAL;
1105 if (size > FW_VERS_ADDR + 8 - FW_FLASH_BOOT_ADDR)
1106 return -EFBIG;
1107
1108 for (csum = 0, i = 0; i < size / sizeof(csum); i++)
1109 csum += ntohl(p[i]);
1110 if (csum != 0xffffffff) {
1111 CH_ERR(adapter, "corrupted firmware image, checksum %u\n",
1112 csum);
1113 return -EINVAL;
1114 }
1115
1116 ret = t3_flash_erase_sectors(adapter, fw_sector, fw_sector);
1117 if (ret)
1118 goto out;
1119
1120 size -= 8; /* trim off version and checksum */
1121 for (addr = FW_FLASH_BOOT_ADDR; size;) {
1122 unsigned int chunk_size = min(size, 256U);
1123
1124 ret = t3_write_flash(adapter, addr, chunk_size, fw_data);
1125 if (ret)
1126 goto out;
1127
1128 addr += chunk_size;
1129 fw_data += chunk_size;
1130 size -= chunk_size;
1131 }
1132
1133 ret = t3_write_flash(adapter, FW_VERS_ADDR, 4, fw_data);
1134out:
1135 if (ret)
1136 CH_ERR(adapter, "firmware download failed, error %d\n", ret);
1137 return ret;
1138}
1139
1140#define CIM_CTL_BASE 0x2000
1141
1142/**
1143 * t3_cim_ctl_blk_read - read a block from CIM control region
1144 *
1145 * @adap: the adapter
1146 * @addr: the start address within the CIM control region
1147 * @n: number of words to read
1148 * @valp: where to store the result
1149 *
1150 * Reads a block of 4-byte words from the CIM control region.
1151 */
1152int t3_cim_ctl_blk_read(struct adapter *adap, unsigned int addr,
1153 unsigned int n, unsigned int *valp)
1154{
1155 int ret = 0;
1156
1157 if (t3_read_reg(adap, A_CIM_HOST_ACC_CTRL) & F_HOSTBUSY)
1158 return -EBUSY;
1159
1160 for ( ; !ret && n--; addr += 4) {
1161 t3_write_reg(adap, A_CIM_HOST_ACC_CTRL, CIM_CTL_BASE + addr);
1162 ret = t3_wait_op_done(adap, A_CIM_HOST_ACC_CTRL, F_HOSTBUSY,
1163 0, 5, 2);
1164 if (!ret)
1165 *valp++ = t3_read_reg(adap, A_CIM_HOST_ACC_DATA);
1166 }
1167 return ret;
1168}
1169
bf792094
DLR
1170static void t3_gate_rx_traffic(struct cmac *mac, u32 *rx_cfg,
1171 u32 *rx_hash_high, u32 *rx_hash_low)
1172{
1173 /* stop Rx unicast traffic */
1174 t3_mac_disable_exact_filters(mac);
1175
1176 /* stop broadcast, multicast, promiscuous mode traffic */
1177 *rx_cfg = t3_read_reg(mac->adapter, A_XGM_RX_CFG);
1178 t3_set_reg_field(mac->adapter, A_XGM_RX_CFG,
1179 F_ENHASHMCAST | F_DISBCAST | F_COPYALLFRAMES,
1180 F_DISBCAST);
1181
1182 *rx_hash_high = t3_read_reg(mac->adapter, A_XGM_RX_HASH_HIGH);
1183 t3_write_reg(mac->adapter, A_XGM_RX_HASH_HIGH, 0);
1184
1185 *rx_hash_low = t3_read_reg(mac->adapter, A_XGM_RX_HASH_LOW);
1186 t3_write_reg(mac->adapter, A_XGM_RX_HASH_LOW, 0);
1187
1188 /* Leave time to drain max RX fifo */
1189 msleep(1);
1190}
1191
1192static void t3_open_rx_traffic(struct cmac *mac, u32 rx_cfg,
1193 u32 rx_hash_high, u32 rx_hash_low)
1194{
1195 t3_mac_enable_exact_filters(mac);
1196 t3_set_reg_field(mac->adapter, A_XGM_RX_CFG,
1197 F_ENHASHMCAST | F_DISBCAST | F_COPYALLFRAMES,
1198 rx_cfg);
1199 t3_write_reg(mac->adapter, A_XGM_RX_HASH_HIGH, rx_hash_high);
1200 t3_write_reg(mac->adapter, A_XGM_RX_HASH_LOW, rx_hash_low);
1201}
4d22de3e
DLR
1202
1203/**
1204 * t3_link_changed - handle interface link changes
1205 * @adapter: the adapter
1206 * @port_id: the port index that changed link state
1207 *
1208 * Called when a port's link settings change to propagate the new values
1209 * to the associated PHY and MAC. After performing the common tasks it
1210 * invokes an OS-specific handler.
1211 */
1212void t3_link_changed(struct adapter *adapter, int port_id)
1213{
1214 int link_ok, speed, duplex, fc;
1215 struct port_info *pi = adap2pinfo(adapter, port_id);
1216 struct cphy *phy = &pi->phy;
1217 struct cmac *mac = &pi->mac;
1218 struct link_config *lc = &pi->link_config;
1219
1220 phy->ops->get_link_status(phy, &link_ok, &speed, &duplex, &fc);
1221
bf792094
DLR
1222 if (!lc->link_ok && link_ok) {
1223 u32 rx_cfg, rx_hash_high, rx_hash_low;
1224 u32 status;
1225
1226 t3_xgm_intr_enable(adapter, port_id);
1227 t3_gate_rx_traffic(mac, &rx_cfg, &rx_hash_high, &rx_hash_low);
1228 t3_write_reg(adapter, A_XGM_RX_CTRL + mac->offset, 0);
1229 t3_mac_enable(mac, MAC_DIRECTION_RX);
1230
1231 status = t3_read_reg(adapter, A_XGM_INT_STATUS + mac->offset);
1232 if (status & F_LINKFAULTCHANGE) {
1233 mac->stats.link_faults++;
3851c66c 1234 pi->link_fault = 1;
bf792094
DLR
1235 }
1236 t3_open_rx_traffic(mac, rx_cfg, rx_hash_high, rx_hash_low);
bf792094
DLR
1237 }
1238
9b1e3656
DLR
1239 if (lc->requested_fc & PAUSE_AUTONEG)
1240 fc &= lc->requested_fc;
1241 else
1242 fc = lc->requested_fc & (PAUSE_RX | PAUSE_TX);
1243
1244 if (link_ok == lc->link_ok && speed == lc->speed &&
1245 duplex == lc->duplex && fc == lc->fc)
1246 return; /* nothing changed */
1247
4d22de3e
DLR
1248 if (link_ok != lc->link_ok && adapter->params.rev > 0 &&
1249 uses_xaui(adapter)) {
1250 if (link_ok)
1251 t3b_pcs_reset(mac);
1252 t3_write_reg(adapter, A_XGM_XAUI_ACT_CTRL + mac->offset,
1253 link_ok ? F_TXACTENABLE | F_RXEN : 0);
1254 }
1255 lc->link_ok = link_ok;
1256 lc->speed = speed < 0 ? SPEED_INVALID : speed;
1257 lc->duplex = duplex < 0 ? DUPLEX_INVALID : duplex;
4d22de3e
DLR
1258
1259 if (link_ok && speed >= 0 && lc->autoneg == AUTONEG_ENABLE) {
1260 /* Set MAC speed, duplex, and flow control to match PHY. */
1261 t3_mac_set_speed_duplex_fc(mac, speed, duplex, fc);
1262 lc->fc = fc;
1263 }
1264
e9449d85
DLR
1265 t3_os_link_changed(adapter, port_id, link_ok && !pi->link_fault,
1266 speed, duplex, fc);
4d22de3e
DLR
1267}
1268
bf792094
DLR
1269void t3_link_fault(struct adapter *adapter, int port_id)
1270{
1271 struct port_info *pi = adap2pinfo(adapter, port_id);
1272 struct cmac *mac = &pi->mac;
1273 struct cphy *phy = &pi->phy;
1274 struct link_config *lc = &pi->link_config;
1275 int link_ok, speed, duplex, fc, link_fault;
1276 u32 rx_cfg, rx_hash_high, rx_hash_low;
1277
1278 t3_gate_rx_traffic(mac, &rx_cfg, &rx_hash_high, &rx_hash_low);
1279
1280 if (adapter->params.rev > 0 && uses_xaui(adapter))
1281 t3_write_reg(adapter, A_XGM_XAUI_ACT_CTRL + mac->offset, 0);
1282
1283 t3_write_reg(adapter, A_XGM_RX_CTRL + mac->offset, 0);
1284 t3_mac_enable(mac, MAC_DIRECTION_RX);
1285
1286 t3_open_rx_traffic(mac, rx_cfg, rx_hash_high, rx_hash_low);
1287
1288 link_fault = t3_read_reg(adapter,
1289 A_XGM_INT_STATUS + mac->offset);
1290 link_fault &= F_LINKFAULTCHANGE;
1291
c22c8149
DLR
1292 link_ok = lc->link_ok;
1293 speed = lc->speed;
1294 duplex = lc->duplex;
1295 fc = lc->fc;
1296
bf792094
DLR
1297 phy->ops->get_link_status(phy, &link_ok, &speed, &duplex, &fc);
1298
1299 if (link_fault) {
1300 lc->link_ok = 0;
1301 lc->speed = SPEED_INVALID;
1302 lc->duplex = DUPLEX_INVALID;
1303
1304 t3_os_link_fault(adapter, port_id, 0);
1305
1306 /* Account link faults only when the phy reports a link up */
1307 if (link_ok)
1308 mac->stats.link_faults++;
bf792094
DLR
1309 } else {
1310 if (link_ok)
1311 t3_write_reg(adapter, A_XGM_XAUI_ACT_CTRL + mac->offset,
1312 F_TXACTENABLE | F_RXEN);
1313
1314 pi->link_fault = 0;
1315 lc->link_ok = (unsigned char)link_ok;
1316 lc->speed = speed < 0 ? SPEED_INVALID : speed;
1317 lc->duplex = duplex < 0 ? DUPLEX_INVALID : duplex;
1318 t3_os_link_fault(adapter, port_id, link_ok);
1319 }
1320}
1321
4d22de3e
DLR
1322/**
1323 * t3_link_start - apply link configuration to MAC/PHY
1324 * @phy: the PHY to setup
1325 * @mac: the MAC to setup
1326 * @lc: the requested link configuration
1327 *
1328 * Set up a port's MAC and PHY according to a desired link configuration.
1329 * - If the PHY can auto-negotiate first decide what to advertise, then
1330 * enable/disable auto-negotiation as desired, and reset.
1331 * - If the PHY does not auto-negotiate just reset it.
1332 * - If auto-negotiation is off set the MAC to the proper speed/duplex/FC,
1333 * otherwise do it later based on the outcome of auto-negotiation.
1334 */
1335int t3_link_start(struct cphy *phy, struct cmac *mac, struct link_config *lc)
1336{
1337 unsigned int fc = lc->requested_fc & (PAUSE_RX | PAUSE_TX);
1338
1339 lc->link_ok = 0;
1340 if (lc->supported & SUPPORTED_Autoneg) {
1341 lc->advertising &= ~(ADVERTISED_Asym_Pause | ADVERTISED_Pause);
1342 if (fc) {
1343 lc->advertising |= ADVERTISED_Asym_Pause;
1344 if (fc & PAUSE_RX)
1345 lc->advertising |= ADVERTISED_Pause;
1346 }
1347 phy->ops->advertise(phy, lc->advertising);
1348
1349 if (lc->autoneg == AUTONEG_DISABLE) {
1350 lc->speed = lc->requested_speed;
1351 lc->duplex = lc->requested_duplex;
1352 lc->fc = (unsigned char)fc;
1353 t3_mac_set_speed_duplex_fc(mac, lc->speed, lc->duplex,
1354 fc);
1355 /* Also disables autoneg */
1356 phy->ops->set_speed_duplex(phy, lc->speed, lc->duplex);
4d22de3e
DLR
1357 } else
1358 phy->ops->autoneg_enable(phy);
1359 } else {
1360 t3_mac_set_speed_duplex_fc(mac, -1, -1, fc);
1361 lc->fc = (unsigned char)fc;
1362 phy->ops->reset(phy, 0);
1363 }
1364 return 0;
1365}
1366
1367/**
1368 * t3_set_vlan_accel - control HW VLAN extraction
1369 * @adapter: the adapter
1370 * @ports: bitmap of adapter ports to operate on
1371 * @on: enable (1) or disable (0) HW VLAN extraction
1372 *
1373 * Enables or disables HW extraction of VLAN tags for the given port.
1374 */
1375void t3_set_vlan_accel(struct adapter *adapter, unsigned int ports, int on)
1376{
1377 t3_set_reg_field(adapter, A_TP_OUT_CONFIG,
1378 ports << S_VLANEXTRACTIONENABLE,
1379 on ? (ports << S_VLANEXTRACTIONENABLE) : 0);
1380}
1381
1382struct intr_info {
1383 unsigned int mask; /* bits to check in interrupt status */
1384 const char *msg; /* message to print or NULL */
1385 short stat_idx; /* stat counter to increment or -1 */
20d3fc11 1386 unsigned short fatal; /* whether the condition reported is fatal */
4d22de3e
DLR
1387};
1388
1389/**
1390 * t3_handle_intr_status - table driven interrupt handler
1391 * @adapter: the adapter that generated the interrupt
1392 * @reg: the interrupt status register to process
1393 * @mask: a mask to apply to the interrupt status
1394 * @acts: table of interrupt actions
1395 * @stats: statistics counters tracking interrupt occurences
1396 *
1397 * A table driven interrupt handler that applies a set of masks to an
1398 * interrupt status word and performs the corresponding actions if the
1399 * interrupts described by the mask have occured. The actions include
1400 * optionally printing a warning or alert message, and optionally
1401 * incrementing a stat counter. The table is terminated by an entry
1402 * specifying mask 0. Returns the number of fatal interrupt conditions.
1403 */
1404static int t3_handle_intr_status(struct adapter *adapter, unsigned int reg,
1405 unsigned int mask,
1406 const struct intr_info *acts,
1407 unsigned long *stats)
1408{
1409 int fatal = 0;
1410 unsigned int status = t3_read_reg(adapter, reg) & mask;
1411
1412 for (; acts->mask; ++acts) {
1413 if (!(status & acts->mask))
1414 continue;
1415 if (acts->fatal) {
1416 fatal++;
1417 CH_ALERT(adapter, "%s (0x%x)\n",
1418 acts->msg, status & acts->mask);
1419 } else if (acts->msg)
1420 CH_WARN(adapter, "%s (0x%x)\n",
1421 acts->msg, status & acts->mask);
1422 if (acts->stat_idx >= 0)
1423 stats[acts->stat_idx]++;
1424 }
1425 if (status) /* clear processed interrupts */
1426 t3_write_reg(adapter, reg, status);
1427 return fatal;
1428}
1429
b881955b
DLR
1430#define SGE_INTR_MASK (F_RSPQDISABLED | \
1431 F_UC_REQ_FRAMINGERROR | F_R_REQ_FRAMINGERROR | \
1432 F_CPPARITYERROR | F_OCPARITYERROR | F_RCPARITYERROR | \
1433 F_IRPARITYERROR | V_ITPARITYERROR(M_ITPARITYERROR) | \
1434 V_FLPARITYERROR(M_FLPARITYERROR) | F_LODRBPARITYERROR | \
1435 F_HIDRBPARITYERROR | F_LORCQPARITYERROR | \
1436 F_HIRCQPARITYERROR)
4d22de3e
DLR
1437#define MC5_INTR_MASK (F_PARITYERR | F_ACTRGNFULL | F_UNKNOWNCMD | \
1438 F_REQQPARERR | F_DISPQPARERR | F_DELACTEMPTY | \
1439 F_NFASRCHFAIL)
1440#define MC7_INTR_MASK (F_AE | F_UE | F_CE | V_PE(M_PE))
1441#define XGM_INTR_MASK (V_TXFIFO_PRTY_ERR(M_TXFIFO_PRTY_ERR) | \
1442 V_RXFIFO_PRTY_ERR(M_RXFIFO_PRTY_ERR) | \
fc882196 1443 F_TXFIFO_UNDERRUN)
4d22de3e
DLR
1444#define PCIX_INTR_MASK (F_MSTDETPARERR | F_SIGTARABT | F_RCVTARABT | \
1445 F_RCVMSTABT | F_SIGSYSERR | F_DETPARERR | \
1446 F_SPLCMPDIS | F_UNXSPLCMP | F_RCVSPLCMPERR | \
1447 F_DETCORECCERR | F_DETUNCECCERR | F_PIOPARERR | \
1448 V_WFPARERR(M_WFPARERR) | V_RFPARERR(M_RFPARERR) | \
1449 V_CFPARERR(M_CFPARERR) /* | V_MSIXPARERR(M_MSIXPARERR) */)
1450#define PCIE_INTR_MASK (F_UNXSPLCPLERRR | F_UNXSPLCPLERRC | F_PCIE_PIOPARERR |\
1451 F_PCIE_WFPARERR | F_PCIE_RFPARERR | F_PCIE_CFPARERR | \
1452 /* V_PCIE_MSIXPARERR(M_PCIE_MSIXPARERR) | */ \
b881955b
DLR
1453 F_RETRYBUFPARERR | F_RETRYLUTPARERR | F_RXPARERR | \
1454 F_TXPARERR | V_BISTERR(M_BISTERR))
1455#define ULPRX_INTR_MASK (F_PARERRDATA | F_PARERRPCMD | F_ARBPF1PERR | \
1456 F_ARBPF0PERR | F_ARBFPERR | F_PCMDMUXPERR | \
1457 F_DATASELFRAMEERR1 | F_DATASELFRAMEERR0)
1458#define ULPTX_INTR_MASK 0xfc
1459#define CPLSW_INTR_MASK (F_CIM_OP_MAP_PERR | F_TP_FRAMING_ERROR | \
4d22de3e
DLR
1460 F_SGE_FRAMING_ERROR | F_CIM_FRAMING_ERROR | \
1461 F_ZERO_SWITCH_ERROR)
1462#define CIM_INTR_MASK (F_BLKWRPLINT | F_BLKRDPLINT | F_BLKWRCTLINT | \
1463 F_BLKRDCTLINT | F_BLKWRFLASHINT | F_BLKRDFLASHINT | \
1464 F_SGLWRFLASHINT | F_WRBLKFLASHINT | F_BLKWRBOOTINT | \
b881955b
DLR
1465 F_FLASHRANGEINT | F_SDRAMRANGEINT | F_RSVDSPACEINT | \
1466 F_DRAMPARERR | F_ICACHEPARERR | F_DCACHEPARERR | \
1467 F_OBQSGEPARERR | F_OBQULPHIPARERR | F_OBQULPLOPARERR | \
1468 F_IBQSGELOPARERR | F_IBQSGEHIPARERR | F_IBQULPPARERR | \
1469 F_IBQTPPARERR | F_ITAGPARERR | F_DTAGPARERR)
4d22de3e
DLR
1470#define PMTX_INTR_MASK (F_ZERO_C_CMD_ERROR | ICSPI_FRM_ERR | OESPI_FRM_ERR | \
1471 V_ICSPI_PAR_ERROR(M_ICSPI_PAR_ERROR) | \
1472 V_OESPI_PAR_ERROR(M_OESPI_PAR_ERROR))
1473#define PMRX_INTR_MASK (F_ZERO_E_CMD_ERROR | IESPI_FRM_ERR | OCSPI_FRM_ERR | \
1474 V_IESPI_PAR_ERROR(M_IESPI_PAR_ERROR) | \
1475 V_OCSPI_PAR_ERROR(M_OCSPI_PAR_ERROR))
1476#define MPS_INTR_MASK (V_TX0TPPARERRENB(M_TX0TPPARERRENB) | \
1477 V_TX1TPPARERRENB(M_TX1TPPARERRENB) | \
1478 V_RXTPPARERRENB(M_RXTPPARERRENB) | \
1479 V_MCAPARERRENB(M_MCAPARERRENB))
bf792094 1480#define XGM_EXTRA_INTR_MASK (F_LINKFAULTCHANGE)
4d22de3e
DLR
1481#define PL_INTR_MASK (F_T3DBG | F_XGMAC0_0 | F_XGMAC0_1 | F_MC5A | F_PM1_TX | \
1482 F_PM1_RX | F_ULP2_TX | F_ULP2_RX | F_TP1 | F_CIM | \
1483 F_MC7_CM | F_MC7_PMTX | F_MC7_PMRX | F_SGE3 | F_PCIM0 | \
1484 F_MPS0 | F_CPL_SWITCH)
4d22de3e
DLR
1485/*
1486 * Interrupt handler for the PCIX1 module.
1487 */
1488static void pci_intr_handler(struct adapter *adapter)
1489{
1490 static const struct intr_info pcix1_intr_info[] = {
4d22de3e
DLR
1491 {F_MSTDETPARERR, "PCI master detected parity error", -1, 1},
1492 {F_SIGTARABT, "PCI signaled target abort", -1, 1},
1493 {F_RCVTARABT, "PCI received target abort", -1, 1},
1494 {F_RCVMSTABT, "PCI received master abort", -1, 1},
1495 {F_SIGSYSERR, "PCI signaled system error", -1, 1},
1496 {F_DETPARERR, "PCI detected parity error", -1, 1},
1497 {F_SPLCMPDIS, "PCI split completion discarded", -1, 1},
1498 {F_UNXSPLCMP, "PCI unexpected split completion error", -1, 1},
1499 {F_RCVSPLCMPERR, "PCI received split completion error", -1,
1500 1},
1501 {F_DETCORECCERR, "PCI correctable ECC error",
1502 STAT_PCI_CORR_ECC, 0},
1503 {F_DETUNCECCERR, "PCI uncorrectable ECC error", -1, 1},
1504 {F_PIOPARERR, "PCI PIO FIFO parity error", -1, 1},
1505 {V_WFPARERR(M_WFPARERR), "PCI write FIFO parity error", -1,
1506 1},
1507 {V_RFPARERR(M_RFPARERR), "PCI read FIFO parity error", -1,
1508 1},
1509 {V_CFPARERR(M_CFPARERR), "PCI command FIFO parity error", -1,
1510 1},
1511 {V_MSIXPARERR(M_MSIXPARERR), "PCI MSI-X table/PBA parity "
1512 "error", -1, 1},
1513 {0}
1514 };
1515
1516 if (t3_handle_intr_status(adapter, A_PCIX_INT_CAUSE, PCIX_INTR_MASK,
1517 pcix1_intr_info, adapter->irq_stats))
1518 t3_fatal_err(adapter);
1519}
1520
1521/*
1522 * Interrupt handler for the PCIE module.
1523 */
1524static void pcie_intr_handler(struct adapter *adapter)
1525{
1526 static const struct intr_info pcie_intr_info[] = {
b5a44bcb 1527 {F_PEXERR, "PCI PEX error", -1, 1},
4d22de3e
DLR
1528 {F_UNXSPLCPLERRR,
1529 "PCI unexpected split completion DMA read error", -1, 1},
1530 {F_UNXSPLCPLERRC,
1531 "PCI unexpected split completion DMA command error", -1, 1},
1532 {F_PCIE_PIOPARERR, "PCI PIO FIFO parity error", -1, 1},
1533 {F_PCIE_WFPARERR, "PCI write FIFO parity error", -1, 1},
1534 {F_PCIE_RFPARERR, "PCI read FIFO parity error", -1, 1},
1535 {F_PCIE_CFPARERR, "PCI command FIFO parity error", -1, 1},
1536 {V_PCIE_MSIXPARERR(M_PCIE_MSIXPARERR),
1537 "PCI MSI-X table/PBA parity error", -1, 1},
b881955b
DLR
1538 {F_RETRYBUFPARERR, "PCI retry buffer parity error", -1, 1},
1539 {F_RETRYLUTPARERR, "PCI retry LUT parity error", -1, 1},
1540 {F_RXPARERR, "PCI Rx parity error", -1, 1},
1541 {F_TXPARERR, "PCI Tx parity error", -1, 1},
4d22de3e
DLR
1542 {V_BISTERR(M_BISTERR), "PCI BIST error", -1, 1},
1543 {0}
1544 };
1545
3eea3337
DLR
1546 if (t3_read_reg(adapter, A_PCIE_INT_CAUSE) & F_PEXERR)
1547 CH_ALERT(adapter, "PEX error code 0x%x\n",
1548 t3_read_reg(adapter, A_PCIE_PEX_ERR));
1549
4d22de3e
DLR
1550 if (t3_handle_intr_status(adapter, A_PCIE_INT_CAUSE, PCIE_INTR_MASK,
1551 pcie_intr_info, adapter->irq_stats))
1552 t3_fatal_err(adapter);
1553}
1554
1555/*
1556 * TP interrupt handler.
1557 */
1558static void tp_intr_handler(struct adapter *adapter)
1559{
1560 static const struct intr_info tp_intr_info[] = {
1561 {0xffffff, "TP parity error", -1, 1},
1562 {0x1000000, "TP out of Rx pages", -1, 1},
1563 {0x2000000, "TP out of Tx pages", -1, 1},
1564 {0}
1565 };
1566
a2604be5 1567 static struct intr_info tp_intr_info_t3c[] = {
b881955b
DLR
1568 {0x1fffffff, "TP parity error", -1, 1},
1569 {F_FLMRXFLSTEMPTY, "TP out of Rx pages", -1, 1},
1570 {F_FLMTXFLSTEMPTY, "TP out of Tx pages", -1, 1},
1571 {0}
a2604be5
DLR
1572 };
1573
4d22de3e 1574 if (t3_handle_intr_status(adapter, A_TP_INT_CAUSE, 0xffffffff,
a2604be5 1575 adapter->params.rev < T3_REV_C ?
b881955b 1576 tp_intr_info : tp_intr_info_t3c, NULL))
4d22de3e
DLR
1577 t3_fatal_err(adapter);
1578}
1579
1580/*
1581 * CIM interrupt handler.
1582 */
1583static void cim_intr_handler(struct adapter *adapter)
1584{
1585 static const struct intr_info cim_intr_info[] = {
1586 {F_RSVDSPACEINT, "CIM reserved space write", -1, 1},
1587 {F_SDRAMRANGEINT, "CIM SDRAM address out of range", -1, 1},
1588 {F_FLASHRANGEINT, "CIM flash address out of range", -1, 1},
1589 {F_BLKWRBOOTINT, "CIM block write to boot space", -1, 1},
1590 {F_WRBLKFLASHINT, "CIM write to cached flash space", -1, 1},
1591 {F_SGLWRFLASHINT, "CIM single write to flash space", -1, 1},
1592 {F_BLKRDFLASHINT, "CIM block read from flash space", -1, 1},
1593 {F_BLKWRFLASHINT, "CIM block write to flash space", -1, 1},
1594 {F_BLKRDCTLINT, "CIM block read from CTL space", -1, 1},
1595 {F_BLKWRCTLINT, "CIM block write to CTL space", -1, 1},
1596 {F_BLKRDPLINT, "CIM block read from PL space", -1, 1},
1597 {F_BLKWRPLINT, "CIM block write to PL space", -1, 1},
b881955b
DLR
1598 {F_DRAMPARERR, "CIM DRAM parity error", -1, 1},
1599 {F_ICACHEPARERR, "CIM icache parity error", -1, 1},
1600 {F_DCACHEPARERR, "CIM dcache parity error", -1, 1},
1601 {F_OBQSGEPARERR, "CIM OBQ SGE parity error", -1, 1},
1602 {F_OBQULPHIPARERR, "CIM OBQ ULPHI parity error", -1, 1},
1603 {F_OBQULPLOPARERR, "CIM OBQ ULPLO parity error", -1, 1},
1604 {F_IBQSGELOPARERR, "CIM IBQ SGELO parity error", -1, 1},
1605 {F_IBQSGEHIPARERR, "CIM IBQ SGEHI parity error", -1, 1},
1606 {F_IBQULPPARERR, "CIM IBQ ULP parity error", -1, 1},
1607 {F_IBQTPPARERR, "CIM IBQ TP parity error", -1, 1},
1608 {F_ITAGPARERR, "CIM itag parity error", -1, 1},
1609 {F_DTAGPARERR, "CIM dtag parity error", -1, 1},
4d22de3e
DLR
1610 {0}
1611 };
1612
1613 if (t3_handle_intr_status(adapter, A_CIM_HOST_INT_CAUSE, 0xffffffff,
1614 cim_intr_info, NULL))
1615 t3_fatal_err(adapter);
1616}
1617
1618/*
1619 * ULP RX interrupt handler.
1620 */
1621static void ulprx_intr_handler(struct adapter *adapter)
1622{
1623 static const struct intr_info ulprx_intr_info[] = {
b881955b
DLR
1624 {F_PARERRDATA, "ULP RX data parity error", -1, 1},
1625 {F_PARERRPCMD, "ULP RX command parity error", -1, 1},
1626 {F_ARBPF1PERR, "ULP RX ArbPF1 parity error", -1, 1},
1627 {F_ARBPF0PERR, "ULP RX ArbPF0 parity error", -1, 1},
1628 {F_ARBFPERR, "ULP RX ArbF parity error", -1, 1},
1629 {F_PCMDMUXPERR, "ULP RX PCMDMUX parity error", -1, 1},
1630 {F_DATASELFRAMEERR1, "ULP RX frame error", -1, 1},
1631 {F_DATASELFRAMEERR0, "ULP RX frame error", -1, 1},
4d22de3e
DLR
1632 {0}
1633 };
1634
1635 if (t3_handle_intr_status(adapter, A_ULPRX_INT_CAUSE, 0xffffffff,
1636 ulprx_intr_info, NULL))
1637 t3_fatal_err(adapter);
1638}
1639
1640/*
1641 * ULP TX interrupt handler.
1642 */
1643static void ulptx_intr_handler(struct adapter *adapter)
1644{
1645 static const struct intr_info ulptx_intr_info[] = {
1646 {F_PBL_BOUND_ERR_CH0, "ULP TX channel 0 PBL out of bounds",
1647 STAT_ULP_CH0_PBL_OOB, 0},
1648 {F_PBL_BOUND_ERR_CH1, "ULP TX channel 1 PBL out of bounds",
1649 STAT_ULP_CH1_PBL_OOB, 0},
b881955b 1650 {0xfc, "ULP TX parity error", -1, 1},
4d22de3e
DLR
1651 {0}
1652 };
1653
1654 if (t3_handle_intr_status(adapter, A_ULPTX_INT_CAUSE, 0xffffffff,
1655 ulptx_intr_info, adapter->irq_stats))
1656 t3_fatal_err(adapter);
1657}
1658
1659#define ICSPI_FRM_ERR (F_ICSPI0_FIFO2X_RX_FRAMING_ERROR | \
1660 F_ICSPI1_FIFO2X_RX_FRAMING_ERROR | F_ICSPI0_RX_FRAMING_ERROR | \
1661 F_ICSPI1_RX_FRAMING_ERROR | F_ICSPI0_TX_FRAMING_ERROR | \
1662 F_ICSPI1_TX_FRAMING_ERROR)
1663#define OESPI_FRM_ERR (F_OESPI0_RX_FRAMING_ERROR | \
1664 F_OESPI1_RX_FRAMING_ERROR | F_OESPI0_TX_FRAMING_ERROR | \
1665 F_OESPI1_TX_FRAMING_ERROR | F_OESPI0_OFIFO2X_TX_FRAMING_ERROR | \
1666 F_OESPI1_OFIFO2X_TX_FRAMING_ERROR)
1667
1668/*
1669 * PM TX interrupt handler.
1670 */
1671static void pmtx_intr_handler(struct adapter *adapter)
1672{
1673 static const struct intr_info pmtx_intr_info[] = {
1674 {F_ZERO_C_CMD_ERROR, "PMTX 0-length pcmd", -1, 1},
1675 {ICSPI_FRM_ERR, "PMTX ispi framing error", -1, 1},
1676 {OESPI_FRM_ERR, "PMTX ospi framing error", -1, 1},
1677 {V_ICSPI_PAR_ERROR(M_ICSPI_PAR_ERROR),
1678 "PMTX ispi parity error", -1, 1},
1679 {V_OESPI_PAR_ERROR(M_OESPI_PAR_ERROR),
1680 "PMTX ospi parity error", -1, 1},
1681 {0}
1682 };
1683
1684 if (t3_handle_intr_status(adapter, A_PM1_TX_INT_CAUSE, 0xffffffff,
1685 pmtx_intr_info, NULL))
1686 t3_fatal_err(adapter);
1687}
1688
1689#define IESPI_FRM_ERR (F_IESPI0_FIFO2X_RX_FRAMING_ERROR | \
1690 F_IESPI1_FIFO2X_RX_FRAMING_ERROR | F_IESPI0_RX_FRAMING_ERROR | \
1691 F_IESPI1_RX_FRAMING_ERROR | F_IESPI0_TX_FRAMING_ERROR | \
1692 F_IESPI1_TX_FRAMING_ERROR)
1693#define OCSPI_FRM_ERR (F_OCSPI0_RX_FRAMING_ERROR | \
1694 F_OCSPI1_RX_FRAMING_ERROR | F_OCSPI0_TX_FRAMING_ERROR | \
1695 F_OCSPI1_TX_FRAMING_ERROR | F_OCSPI0_OFIFO2X_TX_FRAMING_ERROR | \
1696 F_OCSPI1_OFIFO2X_TX_FRAMING_ERROR)
1697
1698/*
1699 * PM RX interrupt handler.
1700 */
1701static void pmrx_intr_handler(struct adapter *adapter)
1702{
1703 static const struct intr_info pmrx_intr_info[] = {
1704 {F_ZERO_E_CMD_ERROR, "PMRX 0-length pcmd", -1, 1},
1705 {IESPI_FRM_ERR, "PMRX ispi framing error", -1, 1},
1706 {OCSPI_FRM_ERR, "PMRX ospi framing error", -1, 1},
1707 {V_IESPI_PAR_ERROR(M_IESPI_PAR_ERROR),
1708 "PMRX ispi parity error", -1, 1},
1709 {V_OCSPI_PAR_ERROR(M_OCSPI_PAR_ERROR),
1710 "PMRX ospi parity error", -1, 1},
1711 {0}
1712 };
1713
1714 if (t3_handle_intr_status(adapter, A_PM1_RX_INT_CAUSE, 0xffffffff,
1715 pmrx_intr_info, NULL))
1716 t3_fatal_err(adapter);
1717}
1718
1719/*
1720 * CPL switch interrupt handler.
1721 */
1722static void cplsw_intr_handler(struct adapter *adapter)
1723{
1724 static const struct intr_info cplsw_intr_info[] = {
b881955b
DLR
1725 {F_CIM_OP_MAP_PERR, "CPL switch CIM parity error", -1, 1},
1726 {F_CIM_OVFL_ERROR, "CPL switch CIM overflow", -1, 1},
4d22de3e
DLR
1727 {F_TP_FRAMING_ERROR, "CPL switch TP framing error", -1, 1},
1728 {F_SGE_FRAMING_ERROR, "CPL switch SGE framing error", -1, 1},
1729 {F_CIM_FRAMING_ERROR, "CPL switch CIM framing error", -1, 1},
1730 {F_ZERO_SWITCH_ERROR, "CPL switch no-switch error", -1, 1},
1731 {0}
1732 };
1733
1734 if (t3_handle_intr_status(adapter, A_CPL_INTR_CAUSE, 0xffffffff,
1735 cplsw_intr_info, NULL))
1736 t3_fatal_err(adapter);
1737}
1738
1739/*
1740 * MPS interrupt handler.
1741 */
1742static void mps_intr_handler(struct adapter *adapter)
1743{
1744 static const struct intr_info mps_intr_info[] = {
1745 {0x1ff, "MPS parity error", -1, 1},
1746 {0}
1747 };
1748
1749 if (t3_handle_intr_status(adapter, A_MPS_INT_CAUSE, 0xffffffff,
1750 mps_intr_info, NULL))
1751 t3_fatal_err(adapter);
1752}
1753
1754#define MC7_INTR_FATAL (F_UE | V_PE(M_PE) | F_AE)
1755
1756/*
1757 * MC7 interrupt handler.
1758 */
1759static void mc7_intr_handler(struct mc7 *mc7)
1760{
1761 struct adapter *adapter = mc7->adapter;
1762 u32 cause = t3_read_reg(adapter, mc7->offset + A_MC7_INT_CAUSE);
1763
1764 if (cause & F_CE) {
1765 mc7->stats.corr_err++;
1766 CH_WARN(adapter, "%s MC7 correctable error at addr 0x%x, "
1767 "data 0x%x 0x%x 0x%x\n", mc7->name,
1768 t3_read_reg(adapter, mc7->offset + A_MC7_CE_ADDR),
1769 t3_read_reg(adapter, mc7->offset + A_MC7_CE_DATA0),
1770 t3_read_reg(adapter, mc7->offset + A_MC7_CE_DATA1),
1771 t3_read_reg(adapter, mc7->offset + A_MC7_CE_DATA2));
1772 }
1773
1774 if (cause & F_UE) {
1775 mc7->stats.uncorr_err++;
1776 CH_ALERT(adapter, "%s MC7 uncorrectable error at addr 0x%x, "
1777 "data 0x%x 0x%x 0x%x\n", mc7->name,
1778 t3_read_reg(adapter, mc7->offset + A_MC7_UE_ADDR),
1779 t3_read_reg(adapter, mc7->offset + A_MC7_UE_DATA0),
1780 t3_read_reg(adapter, mc7->offset + A_MC7_UE_DATA1),
1781 t3_read_reg(adapter, mc7->offset + A_MC7_UE_DATA2));
1782 }
1783
1784 if (G_PE(cause)) {
1785 mc7->stats.parity_err++;
1786 CH_ALERT(adapter, "%s MC7 parity error 0x%x\n",
1787 mc7->name, G_PE(cause));
1788 }
1789
1790 if (cause & F_AE) {
1791 u32 addr = 0;
1792
1793 if (adapter->params.rev > 0)
1794 addr = t3_read_reg(adapter,
1795 mc7->offset + A_MC7_ERR_ADDR);
1796 mc7->stats.addr_err++;
1797 CH_ALERT(adapter, "%s MC7 address error: 0x%x\n",
1798 mc7->name, addr);
1799 }
1800
1801 if (cause & MC7_INTR_FATAL)
1802 t3_fatal_err(adapter);
1803
1804 t3_write_reg(adapter, mc7->offset + A_MC7_INT_CAUSE, cause);
1805}
1806
1807#define XGM_INTR_FATAL (V_TXFIFO_PRTY_ERR(M_TXFIFO_PRTY_ERR) | \
1808 V_RXFIFO_PRTY_ERR(M_RXFIFO_PRTY_ERR))
1809/*
1810 * XGMAC interrupt handler.
1811 */
1812static int mac_intr_handler(struct adapter *adap, unsigned int idx)
1813{
1814 struct cmac *mac = &adap2pinfo(adap, idx)->mac;
fc882196
DLR
1815 /*
1816 * We mask out interrupt causes for which we're not taking interrupts.
1817 * This allows us to use polling logic to monitor some of the other
1818 * conditions when taking interrupts would impose too much load on the
1819 * system.
1820 */
1821 u32 cause = t3_read_reg(adap, A_XGM_INT_CAUSE + mac->offset) &
1822 ~F_RXFIFO_OVERFLOW;
4d22de3e
DLR
1823
1824 if (cause & V_TXFIFO_PRTY_ERR(M_TXFIFO_PRTY_ERR)) {
1825 mac->stats.tx_fifo_parity_err++;
1826 CH_ALERT(adap, "port%d: MAC TX FIFO parity error\n", idx);
1827 }
1828 if (cause & V_RXFIFO_PRTY_ERR(M_RXFIFO_PRTY_ERR)) {
1829 mac->stats.rx_fifo_parity_err++;
1830 CH_ALERT(adap, "port%d: MAC RX FIFO parity error\n", idx);
1831 }
1832 if (cause & F_TXFIFO_UNDERRUN)
1833 mac->stats.tx_fifo_urun++;
1834 if (cause & F_RXFIFO_OVERFLOW)
1835 mac->stats.rx_fifo_ovfl++;
1836 if (cause & V_SERDES_LOS(M_SERDES_LOS))
1837 mac->stats.serdes_signal_loss++;
1838 if (cause & F_XAUIPCSCTCERR)
1839 mac->stats.xaui_pcs_ctc_err++;
1840 if (cause & F_XAUIPCSALIGNCHANGE)
1841 mac->stats.xaui_pcs_align_change++;
bf792094
DLR
1842 if (cause & F_XGM_INT) {
1843 t3_set_reg_field(adap,
1844 A_XGM_INT_ENABLE + mac->offset,
1845 F_XGM_INT, 0);
1846 mac->stats.link_faults++;
1847
1848 t3_os_link_fault_handler(adap, idx);
1849 }
4d22de3e
DLR
1850
1851 t3_write_reg(adap, A_XGM_INT_CAUSE + mac->offset, cause);
bf792094 1852
4d22de3e
DLR
1853 if (cause & XGM_INTR_FATAL)
1854 t3_fatal_err(adap);
bf792094 1855
4d22de3e
DLR
1856 return cause != 0;
1857}
1858
1859/*
1860 * Interrupt handler for PHY events.
1861 */
1862int t3_phy_intr_handler(struct adapter *adapter)
1863{
4d22de3e
DLR
1864 u32 i, cause = t3_read_reg(adapter, A_T3DBG_INT_CAUSE);
1865
1866 for_each_port(adapter, i) {
1ca03cbc
DLR
1867 struct port_info *p = adap2pinfo(adapter, i);
1868
04497982 1869 if (!(p->phy.caps & SUPPORTED_IRQ))
1ca03cbc
DLR
1870 continue;
1871
f231e0a5 1872 if (cause & (1 << adapter_info(adapter)->gpio_intr[i])) {
1ca03cbc 1873 int phy_cause = p->phy.ops->intr_handler(&p->phy);
4d22de3e
DLR
1874
1875 if (phy_cause & cphy_cause_link_change)
1876 t3_link_changed(adapter, i);
1877 if (phy_cause & cphy_cause_fifo_error)
1ca03cbc 1878 p->phy.fifo_errors++;
1e882025
DLR
1879 if (phy_cause & cphy_cause_module_change)
1880 t3_os_phymod_changed(adapter, i);
4d22de3e
DLR
1881 }
1882 }
1883
1884 t3_write_reg(adapter, A_T3DBG_INT_CAUSE, cause);
1885 return 0;
1886}
1887
1888/*
1889 * T3 slow path (non-data) interrupt handler.
1890 */
1891int t3_slow_intr_handler(struct adapter *adapter)
1892{
1893 u32 cause = t3_read_reg(adapter, A_PL_INT_CAUSE0);
1894
1895 cause &= adapter->slow_intr_mask;
1896 if (!cause)
1897 return 0;
1898 if (cause & F_PCIM0) {
1899 if (is_pcie(adapter))
1900 pcie_intr_handler(adapter);
1901 else
1902 pci_intr_handler(adapter);
1903 }
1904 if (cause & F_SGE3)
1905 t3_sge_err_intr_handler(adapter);
1906 if (cause & F_MC7_PMRX)
1907 mc7_intr_handler(&adapter->pmrx);
1908 if (cause & F_MC7_PMTX)
1909 mc7_intr_handler(&adapter->pmtx);
1910 if (cause & F_MC7_CM)
1911 mc7_intr_handler(&adapter->cm);
1912 if (cause & F_CIM)
1913 cim_intr_handler(adapter);
1914 if (cause & F_TP1)
1915 tp_intr_handler(adapter);
1916 if (cause & F_ULP2_RX)
1917 ulprx_intr_handler(adapter);
1918 if (cause & F_ULP2_TX)
1919 ulptx_intr_handler(adapter);
1920 if (cause & F_PM1_RX)
1921 pmrx_intr_handler(adapter);
1922 if (cause & F_PM1_TX)
1923 pmtx_intr_handler(adapter);
1924 if (cause & F_CPL_SWITCH)
1925 cplsw_intr_handler(adapter);
1926 if (cause & F_MPS0)
1927 mps_intr_handler(adapter);
1928 if (cause & F_MC5A)
1929 t3_mc5_intr_handler(&adapter->mc5);
1930 if (cause & F_XGMAC0_0)
1931 mac_intr_handler(adapter, 0);
1932 if (cause & F_XGMAC0_1)
1933 mac_intr_handler(adapter, 1);
1934 if (cause & F_T3DBG)
1935 t3_os_ext_intr_handler(adapter);
1936
1937 /* Clear the interrupts just processed. */
1938 t3_write_reg(adapter, A_PL_INT_CAUSE0, cause);
1939 t3_read_reg(adapter, A_PL_INT_CAUSE0); /* flush */
1940 return 1;
1941}
1942
f231e0a5
DLR
1943static unsigned int calc_gpio_intr(struct adapter *adap)
1944{
1945 unsigned int i, gpi_intr = 0;
1946
1947 for_each_port(adap, i)
1948 if ((adap2pinfo(adap, i)->phy.caps & SUPPORTED_IRQ) &&
1949 adapter_info(adap)->gpio_intr[i])
1950 gpi_intr |= 1 << adapter_info(adap)->gpio_intr[i];
1951 return gpi_intr;
1952}
1953
4d22de3e
DLR
1954/**
1955 * t3_intr_enable - enable interrupts
1956 * @adapter: the adapter whose interrupts should be enabled
1957 *
1958 * Enable interrupts by setting the interrupt enable registers of the
1959 * various HW modules and then enabling the top-level interrupt
1960 * concentrator.
1961 */
1962void t3_intr_enable(struct adapter *adapter)
1963{
1964 static const struct addr_val_pair intr_en_avp[] = {
1965 {A_SG_INT_ENABLE, SGE_INTR_MASK},
1966 {A_MC7_INT_ENABLE, MC7_INTR_MASK},
1967 {A_MC7_INT_ENABLE - MC7_PMRX_BASE_ADDR + MC7_PMTX_BASE_ADDR,
1968 MC7_INTR_MASK},
1969 {A_MC7_INT_ENABLE - MC7_PMRX_BASE_ADDR + MC7_CM_BASE_ADDR,
1970 MC7_INTR_MASK},
1971 {A_MC5_DB_INT_ENABLE, MC5_INTR_MASK},
1972 {A_ULPRX_INT_ENABLE, ULPRX_INTR_MASK},
4d22de3e
DLR
1973 {A_PM1_TX_INT_ENABLE, PMTX_INTR_MASK},
1974 {A_PM1_RX_INT_ENABLE, PMRX_INTR_MASK},
1975 {A_CIM_HOST_INT_ENABLE, CIM_INTR_MASK},
1976 {A_MPS_INT_ENABLE, MPS_INTR_MASK},
1977 };
1978
1979 adapter->slow_intr_mask = PL_INTR_MASK;
1980
1981 t3_write_regs(adapter, intr_en_avp, ARRAY_SIZE(intr_en_avp), 0);
a2604be5
DLR
1982 t3_write_reg(adapter, A_TP_INT_ENABLE,
1983 adapter->params.rev >= T3_REV_C ? 0x2bfffff : 0x3bfffff);
4d22de3e
DLR
1984
1985 if (adapter->params.rev > 0) {
1986 t3_write_reg(adapter, A_CPL_INTR_ENABLE,
1987 CPLSW_INTR_MASK | F_CIM_OVFL_ERROR);
1988 t3_write_reg(adapter, A_ULPTX_INT_ENABLE,
1989 ULPTX_INTR_MASK | F_PBL_BOUND_ERR_CH0 |
1990 F_PBL_BOUND_ERR_CH1);
1991 } else {
1992 t3_write_reg(adapter, A_CPL_INTR_ENABLE, CPLSW_INTR_MASK);
1993 t3_write_reg(adapter, A_ULPTX_INT_ENABLE, ULPTX_INTR_MASK);
1994 }
1995
f231e0a5
DLR
1996 t3_write_reg(adapter, A_T3DBG_INT_ENABLE, calc_gpio_intr(adapter));
1997
4d22de3e
DLR
1998 if (is_pcie(adapter))
1999 t3_write_reg(adapter, A_PCIE_INT_ENABLE, PCIE_INTR_MASK);
2000 else
2001 t3_write_reg(adapter, A_PCIX_INT_ENABLE, PCIX_INTR_MASK);
2002 t3_write_reg(adapter, A_PL_INT_ENABLE0, adapter->slow_intr_mask);
2003 t3_read_reg(adapter, A_PL_INT_ENABLE0); /* flush */
2004}
2005
2006/**
2007 * t3_intr_disable - disable a card's interrupts
2008 * @adapter: the adapter whose interrupts should be disabled
2009 *
2010 * Disable interrupts. We only disable the top-level interrupt
2011 * concentrator and the SGE data interrupts.
2012 */
2013void t3_intr_disable(struct adapter *adapter)
2014{
2015 t3_write_reg(adapter, A_PL_INT_ENABLE0, 0);
2016 t3_read_reg(adapter, A_PL_INT_ENABLE0); /* flush */
2017 adapter->slow_intr_mask = 0;
2018}
2019
2020/**
2021 * t3_intr_clear - clear all interrupts
2022 * @adapter: the adapter whose interrupts should be cleared
2023 *
2024 * Clears all interrupts.
2025 */
2026void t3_intr_clear(struct adapter *adapter)
2027{
2028 static const unsigned int cause_reg_addr[] = {
2029 A_SG_INT_CAUSE,
2030 A_SG_RSPQ_FL_STATUS,
2031 A_PCIX_INT_CAUSE,
2032 A_MC7_INT_CAUSE,
2033 A_MC7_INT_CAUSE - MC7_PMRX_BASE_ADDR + MC7_PMTX_BASE_ADDR,
2034 A_MC7_INT_CAUSE - MC7_PMRX_BASE_ADDR + MC7_CM_BASE_ADDR,
2035 A_CIM_HOST_INT_CAUSE,
2036 A_TP_INT_CAUSE,
2037 A_MC5_DB_INT_CAUSE,
2038 A_ULPRX_INT_CAUSE,
2039 A_ULPTX_INT_CAUSE,
2040 A_CPL_INTR_CAUSE,
2041 A_PM1_TX_INT_CAUSE,
2042 A_PM1_RX_INT_CAUSE,
2043 A_MPS_INT_CAUSE,
2044 A_T3DBG_INT_CAUSE,
2045 };
2046 unsigned int i;
2047
2048 /* Clear PHY and MAC interrupts for each port. */
2049 for_each_port(adapter, i)
2050 t3_port_intr_clear(adapter, i);
2051
2052 for (i = 0; i < ARRAY_SIZE(cause_reg_addr); ++i)
2053 t3_write_reg(adapter, cause_reg_addr[i], 0xffffffff);
2054
3eea3337
DLR
2055 if (is_pcie(adapter))
2056 t3_write_reg(adapter, A_PCIE_PEX_ERR, 0xffffffff);
4d22de3e
DLR
2057 t3_write_reg(adapter, A_PL_INT_CAUSE0, 0xffffffff);
2058 t3_read_reg(adapter, A_PL_INT_CAUSE0); /* flush */
2059}
2060
bf792094
DLR
2061void t3_xgm_intr_enable(struct adapter *adapter, int idx)
2062{
2063 struct port_info *pi = adap2pinfo(adapter, idx);
2064
2065 t3_write_reg(adapter, A_XGM_XGM_INT_ENABLE + pi->mac.offset,
2066 XGM_EXTRA_INTR_MASK);
2067}
2068
2069void t3_xgm_intr_disable(struct adapter *adapter, int idx)
2070{
2071 struct port_info *pi = adap2pinfo(adapter, idx);
2072
2073 t3_write_reg(adapter, A_XGM_XGM_INT_DISABLE + pi->mac.offset,
2074 0x7ff);
2075}
2076
4d22de3e
DLR
2077/**
2078 * t3_port_intr_enable - enable port-specific interrupts
2079 * @adapter: associated adapter
2080 * @idx: index of port whose interrupts should be enabled
2081 *
2082 * Enable port-specific (i.e., MAC and PHY) interrupts for the given
2083 * adapter port.
2084 */
2085void t3_port_intr_enable(struct adapter *adapter, int idx)
2086{
2087 struct cphy *phy = &adap2pinfo(adapter, idx)->phy;
2088
2089 t3_write_reg(adapter, XGM_REG(A_XGM_INT_ENABLE, idx), XGM_INTR_MASK);
2090 t3_read_reg(adapter, XGM_REG(A_XGM_INT_ENABLE, idx)); /* flush */
2091 phy->ops->intr_enable(phy);
2092}
2093
2094/**
2095 * t3_port_intr_disable - disable port-specific interrupts
2096 * @adapter: associated adapter
2097 * @idx: index of port whose interrupts should be disabled
2098 *
2099 * Disable port-specific (i.e., MAC and PHY) interrupts for the given
2100 * adapter port.
2101 */
2102void t3_port_intr_disable(struct adapter *adapter, int idx)
2103{
2104 struct cphy *phy = &adap2pinfo(adapter, idx)->phy;
2105
2106 t3_write_reg(adapter, XGM_REG(A_XGM_INT_ENABLE, idx), 0);
2107 t3_read_reg(adapter, XGM_REG(A_XGM_INT_ENABLE, idx)); /* flush */
2108 phy->ops->intr_disable(phy);
2109}
2110
2111/**
2112 * t3_port_intr_clear - clear port-specific interrupts
2113 * @adapter: associated adapter
2114 * @idx: index of port whose interrupts to clear
2115 *
2116 * Clear port-specific (i.e., MAC and PHY) interrupts for the given
2117 * adapter port.
2118 */
2119void t3_port_intr_clear(struct adapter *adapter, int idx)
2120{
2121 struct cphy *phy = &adap2pinfo(adapter, idx)->phy;
2122
2123 t3_write_reg(adapter, XGM_REG(A_XGM_INT_CAUSE, idx), 0xffffffff);
2124 t3_read_reg(adapter, XGM_REG(A_XGM_INT_CAUSE, idx)); /* flush */
2125 phy->ops->intr_clear(phy);
2126}
2127
bb9366af
DLR
2128#define SG_CONTEXT_CMD_ATTEMPTS 100
2129
4d22de3e
DLR
2130/**
2131 * t3_sge_write_context - write an SGE context
2132 * @adapter: the adapter
2133 * @id: the context id
2134 * @type: the context type
2135 *
2136 * Program an SGE context with the values already loaded in the
2137 * CONTEXT_DATA? registers.
2138 */
2139static int t3_sge_write_context(struct adapter *adapter, unsigned int id,
2140 unsigned int type)
2141{
3fa58c88
DLR
2142 if (type == F_RESPONSEQ) {
2143 /*
2144 * Can't write the Response Queue Context bits for
2145 * Interrupt Armed or the Reserve bits after the chip
2146 * has been initialized out of reset. Writing to these
2147 * bits can confuse the hardware.
2148 */
2149 t3_write_reg(adapter, A_SG_CONTEXT_MASK0, 0xffffffff);
2150 t3_write_reg(adapter, A_SG_CONTEXT_MASK1, 0xffffffff);
2151 t3_write_reg(adapter, A_SG_CONTEXT_MASK2, 0x17ffffff);
2152 t3_write_reg(adapter, A_SG_CONTEXT_MASK3, 0xffffffff);
2153 } else {
2154 t3_write_reg(adapter, A_SG_CONTEXT_MASK0, 0xffffffff);
2155 t3_write_reg(adapter, A_SG_CONTEXT_MASK1, 0xffffffff);
2156 t3_write_reg(adapter, A_SG_CONTEXT_MASK2, 0xffffffff);
2157 t3_write_reg(adapter, A_SG_CONTEXT_MASK3, 0xffffffff);
2158 }
4d22de3e
DLR
2159 t3_write_reg(adapter, A_SG_CONTEXT_CMD,
2160 V_CONTEXT_CMD_OPCODE(1) | type | V_CONTEXT(id));
2161 return t3_wait_op_done(adapter, A_SG_CONTEXT_CMD, F_CONTEXT_CMD_BUSY,
bb9366af 2162 0, SG_CONTEXT_CMD_ATTEMPTS, 1);
4d22de3e
DLR
2163}
2164
3fa58c88
DLR
2165/**
2166 * clear_sge_ctxt - completely clear an SGE context
2167 * @adapter: the adapter
2168 * @id: the context id
2169 * @type: the context type
2170 *
2171 * Completely clear an SGE context. Used predominantly at post-reset
2172 * initialization. Note in particular that we don't skip writing to any
2173 * "sensitive bits" in the contexts the way that t3_sge_write_context()
2174 * does ...
2175 */
b881955b
DLR
2176static int clear_sge_ctxt(struct adapter *adap, unsigned int id,
2177 unsigned int type)
2178{
2179 t3_write_reg(adap, A_SG_CONTEXT_DATA0, 0);
2180 t3_write_reg(adap, A_SG_CONTEXT_DATA1, 0);
2181 t3_write_reg(adap, A_SG_CONTEXT_DATA2, 0);
2182 t3_write_reg(adap, A_SG_CONTEXT_DATA3, 0);
3fa58c88
DLR
2183 t3_write_reg(adap, A_SG_CONTEXT_MASK0, 0xffffffff);
2184 t3_write_reg(adap, A_SG_CONTEXT_MASK1, 0xffffffff);
2185 t3_write_reg(adap, A_SG_CONTEXT_MASK2, 0xffffffff);
2186 t3_write_reg(adap, A_SG_CONTEXT_MASK3, 0xffffffff);
2187 t3_write_reg(adap, A_SG_CONTEXT_CMD,
2188 V_CONTEXT_CMD_OPCODE(1) | type | V_CONTEXT(id));
2189 return t3_wait_op_done(adap, A_SG_CONTEXT_CMD, F_CONTEXT_CMD_BUSY,
2190 0, SG_CONTEXT_CMD_ATTEMPTS, 1);
b881955b
DLR
2191}
2192
4d22de3e
DLR
2193/**
2194 * t3_sge_init_ecntxt - initialize an SGE egress context
2195 * @adapter: the adapter to configure
2196 * @id: the context id
2197 * @gts_enable: whether to enable GTS for the context
2198 * @type: the egress context type
2199 * @respq: associated response queue
2200 * @base_addr: base address of queue
2201 * @size: number of queue entries
2202 * @token: uP token
2203 * @gen: initial generation value for the context
2204 * @cidx: consumer pointer
2205 *
2206 * Initialize an SGE egress context and make it ready for use. If the
2207 * platform allows concurrent context operations, the caller is
2208 * responsible for appropriate locking.
2209 */
2210int t3_sge_init_ecntxt(struct adapter *adapter, unsigned int id, int gts_enable,
2211 enum sge_context_type type, int respq, u64 base_addr,
2212 unsigned int size, unsigned int token, int gen,
2213 unsigned int cidx)
2214{
2215 unsigned int credits = type == SGE_CNTXT_OFLD ? 0 : FW_WR_NUM;
2216
2217 if (base_addr & 0xfff) /* must be 4K aligned */
2218 return -EINVAL;
2219 if (t3_read_reg(adapter, A_SG_CONTEXT_CMD) & F_CONTEXT_CMD_BUSY)
2220 return -EBUSY;
2221
2222 base_addr >>= 12;
2223 t3_write_reg(adapter, A_SG_CONTEXT_DATA0, V_EC_INDEX(cidx) |
2224 V_EC_CREDITS(credits) | V_EC_GTS(gts_enable));
2225 t3_write_reg(adapter, A_SG_CONTEXT_DATA1, V_EC_SIZE(size) |
2226 V_EC_BASE_LO(base_addr & 0xffff));
2227 base_addr >>= 16;
2228 t3_write_reg(adapter, A_SG_CONTEXT_DATA2, base_addr);
2229 base_addr >>= 32;
2230 t3_write_reg(adapter, A_SG_CONTEXT_DATA3,
2231 V_EC_BASE_HI(base_addr & 0xf) | V_EC_RESPQ(respq) |
2232 V_EC_TYPE(type) | V_EC_GEN(gen) | V_EC_UP_TOKEN(token) |
2233 F_EC_VALID);
2234 return t3_sge_write_context(adapter, id, F_EGRESS);
2235}
2236
2237/**
2238 * t3_sge_init_flcntxt - initialize an SGE free-buffer list context
2239 * @adapter: the adapter to configure
2240 * @id: the context id
2241 * @gts_enable: whether to enable GTS for the context
2242 * @base_addr: base address of queue
2243 * @size: number of queue entries
2244 * @bsize: size of each buffer for this queue
2245 * @cong_thres: threshold to signal congestion to upstream producers
2246 * @gen: initial generation value for the context
2247 * @cidx: consumer pointer
2248 *
2249 * Initialize an SGE free list context and make it ready for use. The
2250 * caller is responsible for ensuring only one context operation occurs
2251 * at a time.
2252 */
2253int t3_sge_init_flcntxt(struct adapter *adapter, unsigned int id,
2254 int gts_enable, u64 base_addr, unsigned int size,
2255 unsigned int bsize, unsigned int cong_thres, int gen,
2256 unsigned int cidx)
2257{
2258 if (base_addr & 0xfff) /* must be 4K aligned */
2259 return -EINVAL;
2260 if (t3_read_reg(adapter, A_SG_CONTEXT_CMD) & F_CONTEXT_CMD_BUSY)
2261 return -EBUSY;
2262
2263 base_addr >>= 12;
2264 t3_write_reg(adapter, A_SG_CONTEXT_DATA0, base_addr);
2265 base_addr >>= 32;
2266 t3_write_reg(adapter, A_SG_CONTEXT_DATA1,
2267 V_FL_BASE_HI((u32) base_addr) |
2268 V_FL_INDEX_LO(cidx & M_FL_INDEX_LO));
2269 t3_write_reg(adapter, A_SG_CONTEXT_DATA2, V_FL_SIZE(size) |
2270 V_FL_GEN(gen) | V_FL_INDEX_HI(cidx >> 12) |
2271 V_FL_ENTRY_SIZE_LO(bsize & M_FL_ENTRY_SIZE_LO));
2272 t3_write_reg(adapter, A_SG_CONTEXT_DATA3,
2273 V_FL_ENTRY_SIZE_HI(bsize >> (32 - S_FL_ENTRY_SIZE_LO)) |
2274 V_FL_CONG_THRES(cong_thres) | V_FL_GTS(gts_enable));
2275 return t3_sge_write_context(adapter, id, F_FREELIST);
2276}
2277
2278/**
2279 * t3_sge_init_rspcntxt - initialize an SGE response queue context
2280 * @adapter: the adapter to configure
2281 * @id: the context id
2282 * @irq_vec_idx: MSI-X interrupt vector index, 0 if no MSI-X, -1 if no IRQ
2283 * @base_addr: base address of queue
2284 * @size: number of queue entries
2285 * @fl_thres: threshold for selecting the normal or jumbo free list
2286 * @gen: initial generation value for the context
2287 * @cidx: consumer pointer
2288 *
2289 * Initialize an SGE response queue context and make it ready for use.
2290 * The caller is responsible for ensuring only one context operation
2291 * occurs at a time.
2292 */
2293int t3_sge_init_rspcntxt(struct adapter *adapter, unsigned int id,
2294 int irq_vec_idx, u64 base_addr, unsigned int size,
2295 unsigned int fl_thres, int gen, unsigned int cidx)
2296{
2297 unsigned int intr = 0;
2298
2299 if (base_addr & 0xfff) /* must be 4K aligned */
2300 return -EINVAL;
2301 if (t3_read_reg(adapter, A_SG_CONTEXT_CMD) & F_CONTEXT_CMD_BUSY)
2302 return -EBUSY;
2303
2304 base_addr >>= 12;
2305 t3_write_reg(adapter, A_SG_CONTEXT_DATA0, V_CQ_SIZE(size) |
2306 V_CQ_INDEX(cidx));
2307 t3_write_reg(adapter, A_SG_CONTEXT_DATA1, base_addr);
2308 base_addr >>= 32;
2309 if (irq_vec_idx >= 0)
2310 intr = V_RQ_MSI_VEC(irq_vec_idx) | F_RQ_INTR_EN;
2311 t3_write_reg(adapter, A_SG_CONTEXT_DATA2,
2312 V_CQ_BASE_HI((u32) base_addr) | intr | V_RQ_GEN(gen));
2313 t3_write_reg(adapter, A_SG_CONTEXT_DATA3, fl_thres);
2314 return t3_sge_write_context(adapter, id, F_RESPONSEQ);
2315}
2316
2317/**
2318 * t3_sge_init_cqcntxt - initialize an SGE completion queue context
2319 * @adapter: the adapter to configure
2320 * @id: the context id
2321 * @base_addr: base address of queue
2322 * @size: number of queue entries
2323 * @rspq: response queue for async notifications
2324 * @ovfl_mode: CQ overflow mode
2325 * @credits: completion queue credits
2326 * @credit_thres: the credit threshold
2327 *
2328 * Initialize an SGE completion queue context and make it ready for use.
2329 * The caller is responsible for ensuring only one context operation
2330 * occurs at a time.
2331 */
2332int t3_sge_init_cqcntxt(struct adapter *adapter, unsigned int id, u64 base_addr,
2333 unsigned int size, int rspq, int ovfl_mode,
2334 unsigned int credits, unsigned int credit_thres)
2335{
2336 if (base_addr & 0xfff) /* must be 4K aligned */
2337 return -EINVAL;
2338 if (t3_read_reg(adapter, A_SG_CONTEXT_CMD) & F_CONTEXT_CMD_BUSY)
2339 return -EBUSY;
2340
2341 base_addr >>= 12;
2342 t3_write_reg(adapter, A_SG_CONTEXT_DATA0, V_CQ_SIZE(size));
2343 t3_write_reg(adapter, A_SG_CONTEXT_DATA1, base_addr);
2344 base_addr >>= 32;
2345 t3_write_reg(adapter, A_SG_CONTEXT_DATA2,
2346 V_CQ_BASE_HI((u32) base_addr) | V_CQ_RSPQ(rspq) |
1c17ae8a
DLR
2347 V_CQ_GEN(1) | V_CQ_OVERFLOW_MODE(ovfl_mode) |
2348 V_CQ_ERR(ovfl_mode));
4d22de3e
DLR
2349 t3_write_reg(adapter, A_SG_CONTEXT_DATA3, V_CQ_CREDITS(credits) |
2350 V_CQ_CREDIT_THRES(credit_thres));
2351 return t3_sge_write_context(adapter, id, F_CQ);
2352}
2353
2354/**
2355 * t3_sge_enable_ecntxt - enable/disable an SGE egress context
2356 * @adapter: the adapter
2357 * @id: the egress context id
2358 * @enable: enable (1) or disable (0) the context
2359 *
2360 * Enable or disable an SGE egress context. The caller is responsible for
2361 * ensuring only one context operation occurs at a time.
2362 */
2363int t3_sge_enable_ecntxt(struct adapter *adapter, unsigned int id, int enable)
2364{
2365 if (t3_read_reg(adapter, A_SG_CONTEXT_CMD) & F_CONTEXT_CMD_BUSY)
2366 return -EBUSY;
2367
2368 t3_write_reg(adapter, A_SG_CONTEXT_MASK0, 0);
2369 t3_write_reg(adapter, A_SG_CONTEXT_MASK1, 0);
2370 t3_write_reg(adapter, A_SG_CONTEXT_MASK2, 0);
2371 t3_write_reg(adapter, A_SG_CONTEXT_MASK3, F_EC_VALID);
2372 t3_write_reg(adapter, A_SG_CONTEXT_DATA3, V_EC_VALID(enable));
2373 t3_write_reg(adapter, A_SG_CONTEXT_CMD,
2374 V_CONTEXT_CMD_OPCODE(1) | F_EGRESS | V_CONTEXT(id));
2375 return t3_wait_op_done(adapter, A_SG_CONTEXT_CMD, F_CONTEXT_CMD_BUSY,
bb9366af 2376 0, SG_CONTEXT_CMD_ATTEMPTS, 1);
4d22de3e
DLR
2377}
2378
2379/**
2380 * t3_sge_disable_fl - disable an SGE free-buffer list
2381 * @adapter: the adapter
2382 * @id: the free list context id
2383 *
2384 * Disable an SGE free-buffer list. The caller is responsible for
2385 * ensuring only one context operation occurs at a time.
2386 */
2387int t3_sge_disable_fl(struct adapter *adapter, unsigned int id)
2388{
2389 if (t3_read_reg(adapter, A_SG_CONTEXT_CMD) & F_CONTEXT_CMD_BUSY)
2390 return -EBUSY;
2391
2392 t3_write_reg(adapter, A_SG_CONTEXT_MASK0, 0);
2393 t3_write_reg(adapter, A_SG_CONTEXT_MASK1, 0);
2394 t3_write_reg(adapter, A_SG_CONTEXT_MASK2, V_FL_SIZE(M_FL_SIZE));
2395 t3_write_reg(adapter, A_SG_CONTEXT_MASK3, 0);
2396 t3_write_reg(adapter, A_SG_CONTEXT_DATA2, 0);
2397 t3_write_reg(adapter, A_SG_CONTEXT_CMD,
2398 V_CONTEXT_CMD_OPCODE(1) | F_FREELIST | V_CONTEXT(id));
2399 return t3_wait_op_done(adapter, A_SG_CONTEXT_CMD, F_CONTEXT_CMD_BUSY,
bb9366af 2400 0, SG_CONTEXT_CMD_ATTEMPTS, 1);
4d22de3e
DLR
2401}
2402
2403/**
2404 * t3_sge_disable_rspcntxt - disable an SGE response queue
2405 * @adapter: the adapter
2406 * @id: the response queue context id
2407 *
2408 * Disable an SGE response queue. The caller is responsible for
2409 * ensuring only one context operation occurs at a time.
2410 */
2411int t3_sge_disable_rspcntxt(struct adapter *adapter, unsigned int id)
2412{
2413 if (t3_read_reg(adapter, A_SG_CONTEXT_CMD) & F_CONTEXT_CMD_BUSY)
2414 return -EBUSY;
2415
2416 t3_write_reg(adapter, A_SG_CONTEXT_MASK0, V_CQ_SIZE(M_CQ_SIZE));
2417 t3_write_reg(adapter, A_SG_CONTEXT_MASK1, 0);
2418 t3_write_reg(adapter, A_SG_CONTEXT_MASK2, 0);
2419 t3_write_reg(adapter, A_SG_CONTEXT_MASK3, 0);
2420 t3_write_reg(adapter, A_SG_CONTEXT_DATA0, 0);
2421 t3_write_reg(adapter, A_SG_CONTEXT_CMD,
2422 V_CONTEXT_CMD_OPCODE(1) | F_RESPONSEQ | V_CONTEXT(id));
2423 return t3_wait_op_done(adapter, A_SG_CONTEXT_CMD, F_CONTEXT_CMD_BUSY,
bb9366af 2424 0, SG_CONTEXT_CMD_ATTEMPTS, 1);
4d22de3e
DLR
2425}
2426
2427/**
2428 * t3_sge_disable_cqcntxt - disable an SGE completion queue
2429 * @adapter: the adapter
2430 * @id: the completion queue context id
2431 *
2432 * Disable an SGE completion queue. The caller is responsible for
2433 * ensuring only one context operation occurs at a time.
2434 */
2435int t3_sge_disable_cqcntxt(struct adapter *adapter, unsigned int id)
2436{
2437 if (t3_read_reg(adapter, A_SG_CONTEXT_CMD) & F_CONTEXT_CMD_BUSY)
2438 return -EBUSY;
2439
2440 t3_write_reg(adapter, A_SG_CONTEXT_MASK0, V_CQ_SIZE(M_CQ_SIZE));
2441 t3_write_reg(adapter, A_SG_CONTEXT_MASK1, 0);
2442 t3_write_reg(adapter, A_SG_CONTEXT_MASK2, 0);
2443 t3_write_reg(adapter, A_SG_CONTEXT_MASK3, 0);
2444 t3_write_reg(adapter, A_SG_CONTEXT_DATA0, 0);
2445 t3_write_reg(adapter, A_SG_CONTEXT_CMD,
2446 V_CONTEXT_CMD_OPCODE(1) | F_CQ | V_CONTEXT(id));
2447 return t3_wait_op_done(adapter, A_SG_CONTEXT_CMD, F_CONTEXT_CMD_BUSY,
bb9366af 2448 0, SG_CONTEXT_CMD_ATTEMPTS, 1);
4d22de3e
DLR
2449}
2450
2451/**
2452 * t3_sge_cqcntxt_op - perform an operation on a completion queue context
2453 * @adapter: the adapter
2454 * @id: the context id
2455 * @op: the operation to perform
2456 *
2457 * Perform the selected operation on an SGE completion queue context.
2458 * The caller is responsible for ensuring only one context operation
2459 * occurs at a time.
2460 */
2461int t3_sge_cqcntxt_op(struct adapter *adapter, unsigned int id, unsigned int op,
2462 unsigned int credits)
2463{
2464 u32 val;
2465
2466 if (t3_read_reg(adapter, A_SG_CONTEXT_CMD) & F_CONTEXT_CMD_BUSY)
2467 return -EBUSY;
2468
2469 t3_write_reg(adapter, A_SG_CONTEXT_DATA0, credits << 16);
2470 t3_write_reg(adapter, A_SG_CONTEXT_CMD, V_CONTEXT_CMD_OPCODE(op) |
2471 V_CONTEXT(id) | F_CQ);
2472 if (t3_wait_op_done_val(adapter, A_SG_CONTEXT_CMD, F_CONTEXT_CMD_BUSY,
bb9366af 2473 0, SG_CONTEXT_CMD_ATTEMPTS, 1, &val))
4d22de3e
DLR
2474 return -EIO;
2475
2476 if (op >= 2 && op < 7) {
2477 if (adapter->params.rev > 0)
2478 return G_CQ_INDEX(val);
2479
2480 t3_write_reg(adapter, A_SG_CONTEXT_CMD,
2481 V_CONTEXT_CMD_OPCODE(0) | F_CQ | V_CONTEXT(id));
2482 if (t3_wait_op_done(adapter, A_SG_CONTEXT_CMD,
bb9366af
DLR
2483 F_CONTEXT_CMD_BUSY, 0,
2484 SG_CONTEXT_CMD_ATTEMPTS, 1))
4d22de3e
DLR
2485 return -EIO;
2486 return G_CQ_INDEX(t3_read_reg(adapter, A_SG_CONTEXT_DATA0));
2487 }
2488 return 0;
2489}
2490
2491/**
2492 * t3_sge_read_context - read an SGE context
2493 * @type: the context type
2494 * @adapter: the adapter
2495 * @id: the context id
2496 * @data: holds the retrieved context
2497 *
2498 * Read an SGE egress context. The caller is responsible for ensuring
2499 * only one context operation occurs at a time.
2500 */
2501static int t3_sge_read_context(unsigned int type, struct adapter *adapter,
2502 unsigned int id, u32 data[4])
2503{
2504 if (t3_read_reg(adapter, A_SG_CONTEXT_CMD) & F_CONTEXT_CMD_BUSY)
2505 return -EBUSY;
2506
2507 t3_write_reg(adapter, A_SG_CONTEXT_CMD,
2508 V_CONTEXT_CMD_OPCODE(0) | type | V_CONTEXT(id));
2509 if (t3_wait_op_done(adapter, A_SG_CONTEXT_CMD, F_CONTEXT_CMD_BUSY, 0,
bb9366af 2510 SG_CONTEXT_CMD_ATTEMPTS, 1))
4d22de3e
DLR
2511 return -EIO;
2512 data[0] = t3_read_reg(adapter, A_SG_CONTEXT_DATA0);
2513 data[1] = t3_read_reg(adapter, A_SG_CONTEXT_DATA1);
2514 data[2] = t3_read_reg(adapter, A_SG_CONTEXT_DATA2);
2515 data[3] = t3_read_reg(adapter, A_SG_CONTEXT_DATA3);
2516 return 0;
2517}
2518
2519/**
2520 * t3_sge_read_ecntxt - read an SGE egress context
2521 * @adapter: the adapter
2522 * @id: the context id
2523 * @data: holds the retrieved context
2524 *
2525 * Read an SGE egress context. The caller is responsible for ensuring
2526 * only one context operation occurs at a time.
2527 */
2528int t3_sge_read_ecntxt(struct adapter *adapter, unsigned int id, u32 data[4])
2529{
2530 if (id >= 65536)
2531 return -EINVAL;
2532 return t3_sge_read_context(F_EGRESS, adapter, id, data);
2533}
2534
2535/**
2536 * t3_sge_read_cq - read an SGE CQ context
2537 * @adapter: the adapter
2538 * @id: the context id
2539 * @data: holds the retrieved context
2540 *
2541 * Read an SGE CQ context. The caller is responsible for ensuring
2542 * only one context operation occurs at a time.
2543 */
2544int t3_sge_read_cq(struct adapter *adapter, unsigned int id, u32 data[4])
2545{
2546 if (id >= 65536)
2547 return -EINVAL;
2548 return t3_sge_read_context(F_CQ, adapter, id, data);
2549}
2550
2551/**
2552 * t3_sge_read_fl - read an SGE free-list context
2553 * @adapter: the adapter
2554 * @id: the context id
2555 * @data: holds the retrieved context
2556 *
2557 * Read an SGE free-list context. The caller is responsible for ensuring
2558 * only one context operation occurs at a time.
2559 */
2560int t3_sge_read_fl(struct adapter *adapter, unsigned int id, u32 data[4])
2561{
2562 if (id >= SGE_QSETS * 2)
2563 return -EINVAL;
2564 return t3_sge_read_context(F_FREELIST, adapter, id, data);
2565}
2566
2567/**
2568 * t3_sge_read_rspq - read an SGE response queue context
2569 * @adapter: the adapter
2570 * @id: the context id
2571 * @data: holds the retrieved context
2572 *
2573 * Read an SGE response queue context. The caller is responsible for
2574 * ensuring only one context operation occurs at a time.
2575 */
2576int t3_sge_read_rspq(struct adapter *adapter, unsigned int id, u32 data[4])
2577{
2578 if (id >= SGE_QSETS)
2579 return -EINVAL;
2580 return t3_sge_read_context(F_RESPONSEQ, adapter, id, data);
2581}
2582
2583/**
2584 * t3_config_rss - configure Rx packet steering
2585 * @adapter: the adapter
2586 * @rss_config: RSS settings (written to TP_RSS_CONFIG)
2587 * @cpus: values for the CPU lookup table (0xff terminated)
2588 * @rspq: values for the response queue lookup table (0xffff terminated)
2589 *
2590 * Programs the receive packet steering logic. @cpus and @rspq provide
2591 * the values for the CPU and response queue lookup tables. If they
2592 * provide fewer values than the size of the tables the supplied values
2593 * are used repeatedly until the tables are fully populated.
2594 */
2595void t3_config_rss(struct adapter *adapter, unsigned int rss_config,
2596 const u8 * cpus, const u16 *rspq)
2597{
2598 int i, j, cpu_idx = 0, q_idx = 0;
2599
2600 if (cpus)
2601 for (i = 0; i < RSS_TABLE_SIZE; ++i) {
2602 u32 val = i << 16;
2603
2604 for (j = 0; j < 2; ++j) {
2605 val |= (cpus[cpu_idx++] & 0x3f) << (8 * j);
2606 if (cpus[cpu_idx] == 0xff)
2607 cpu_idx = 0;
2608 }
2609 t3_write_reg(adapter, A_TP_RSS_LKP_TABLE, val);
2610 }
2611
2612 if (rspq)
2613 for (i = 0; i < RSS_TABLE_SIZE; ++i) {
2614 t3_write_reg(adapter, A_TP_RSS_MAP_TABLE,
2615 (i << 16) | rspq[q_idx++]);
2616 if (rspq[q_idx] == 0xffff)
2617 q_idx = 0;
2618 }
2619
2620 t3_write_reg(adapter, A_TP_RSS_CONFIG, rss_config);
2621}
2622
2623/**
2624 * t3_read_rss - read the contents of the RSS tables
2625 * @adapter: the adapter
2626 * @lkup: holds the contents of the RSS lookup table
2627 * @map: holds the contents of the RSS map table
2628 *
2629 * Reads the contents of the receive packet steering tables.
2630 */
2631int t3_read_rss(struct adapter *adapter, u8 * lkup, u16 *map)
2632{
2633 int i;
2634 u32 val;
2635
2636 if (lkup)
2637 for (i = 0; i < RSS_TABLE_SIZE; ++i) {
2638 t3_write_reg(adapter, A_TP_RSS_LKP_TABLE,
2639 0xffff0000 | i);
2640 val = t3_read_reg(adapter, A_TP_RSS_LKP_TABLE);
2641 if (!(val & 0x80000000))
2642 return -EAGAIN;
2643 *lkup++ = val;
2644 *lkup++ = (val >> 8);
2645 }
2646
2647 if (map)
2648 for (i = 0; i < RSS_TABLE_SIZE; ++i) {
2649 t3_write_reg(adapter, A_TP_RSS_MAP_TABLE,
2650 0xffff0000 | i);
2651 val = t3_read_reg(adapter, A_TP_RSS_MAP_TABLE);
2652 if (!(val & 0x80000000))
2653 return -EAGAIN;
2654 *map++ = val;
2655 }
2656 return 0;
2657}
2658
2659/**
2660 * t3_tp_set_offload_mode - put TP in NIC/offload mode
2661 * @adap: the adapter
2662 * @enable: 1 to select offload mode, 0 for regular NIC
2663 *
2664 * Switches TP to NIC/offload mode.
2665 */
2666void t3_tp_set_offload_mode(struct adapter *adap, int enable)
2667{
2668 if (is_offload(adap) || !enable)
2669 t3_set_reg_field(adap, A_TP_IN_CONFIG, F_NICMODE,
2670 V_NICMODE(!enable));
2671}
2672
2673/**
2674 * pm_num_pages - calculate the number of pages of the payload memory
2675 * @mem_size: the size of the payload memory
2676 * @pg_size: the size of each payload memory page
2677 *
2678 * Calculate the number of pages, each of the given size, that fit in a
2679 * memory of the specified size, respecting the HW requirement that the
2680 * number of pages must be a multiple of 24.
2681 */
2682static inline unsigned int pm_num_pages(unsigned int mem_size,
2683 unsigned int pg_size)
2684{
2685 unsigned int n = mem_size / pg_size;
2686
2687 return n - n % 24;
2688}
2689
2690#define mem_region(adap, start, size, reg) \
2691 t3_write_reg((adap), A_ ## reg, (start)); \
2692 start += size
2693
b881955b 2694/**
4d22de3e
DLR
2695 * partition_mem - partition memory and configure TP memory settings
2696 * @adap: the adapter
2697 * @p: the TP parameters
2698 *
2699 * Partitions context and payload memory and configures TP's memory
2700 * registers.
2701 */
2702static void partition_mem(struct adapter *adap, const struct tp_params *p)
2703{
2704 unsigned int m, pstructs, tids = t3_mc5_size(&adap->mc5);
2705 unsigned int timers = 0, timers_shift = 22;
2706
2707 if (adap->params.rev > 0) {
2708 if (tids <= 16 * 1024) {
2709 timers = 1;
2710 timers_shift = 16;
2711 } else if (tids <= 64 * 1024) {
2712 timers = 2;
2713 timers_shift = 18;
2714 } else if (tids <= 256 * 1024) {
2715 timers = 3;
2716 timers_shift = 20;
2717 }
2718 }
2719
2720 t3_write_reg(adap, A_TP_PMM_SIZE,
2721 p->chan_rx_size | (p->chan_tx_size >> 16));
2722
2723 t3_write_reg(adap, A_TP_PMM_TX_BASE, 0);
2724 t3_write_reg(adap, A_TP_PMM_TX_PAGE_SIZE, p->tx_pg_size);
2725 t3_write_reg(adap, A_TP_PMM_TX_MAX_PAGE, p->tx_num_pgs);
2726 t3_set_reg_field(adap, A_TP_PARA_REG3, V_TXDATAACKIDX(M_TXDATAACKIDX),
2727 V_TXDATAACKIDX(fls(p->tx_pg_size) - 12));
2728
2729 t3_write_reg(adap, A_TP_PMM_RX_BASE, 0);
2730 t3_write_reg(adap, A_TP_PMM_RX_PAGE_SIZE, p->rx_pg_size);
2731 t3_write_reg(adap, A_TP_PMM_RX_MAX_PAGE, p->rx_num_pgs);
2732
2733 pstructs = p->rx_num_pgs + p->tx_num_pgs;
2734 /* Add a bit of headroom and make multiple of 24 */
2735 pstructs += 48;
2736 pstructs -= pstructs % 24;
2737 t3_write_reg(adap, A_TP_CMM_MM_MAX_PSTRUCT, pstructs);
2738
2739 m = tids * TCB_SIZE;
2740 mem_region(adap, m, (64 << 10) * 64, SG_EGR_CNTX_BADDR);
2741 mem_region(adap, m, (64 << 10) * 64, SG_CQ_CONTEXT_BADDR);
2742 t3_write_reg(adap, A_TP_CMM_TIMER_BASE, V_CMTIMERMAXNUM(timers) | m);
2743 m += ((p->ntimer_qs - 1) << timers_shift) + (1 << 22);
2744 mem_region(adap, m, pstructs * 64, TP_CMM_MM_BASE);
2745 mem_region(adap, m, 64 * (pstructs / 24), TP_CMM_MM_PS_FLST_BASE);
2746 mem_region(adap, m, 64 * (p->rx_num_pgs / 24), TP_CMM_MM_RX_FLST_BASE);
2747 mem_region(adap, m, 64 * (p->tx_num_pgs / 24), TP_CMM_MM_TX_FLST_BASE);
2748
2749 m = (m + 4095) & ~0xfff;
2750 t3_write_reg(adap, A_CIM_SDRAM_BASE_ADDR, m);
2751 t3_write_reg(adap, A_CIM_SDRAM_ADDR_SIZE, p->cm_size - m);
2752
2753 tids = (p->cm_size - m - (3 << 20)) / 3072 - 32;
2754 m = t3_mc5_size(&adap->mc5) - adap->params.mc5.nservers -
2755 adap->params.mc5.nfilters - adap->params.mc5.nroutes;
2756 if (tids < m)
2757 adap->params.mc5.nservers += m - tids;
2758}
2759
2760static inline void tp_wr_indirect(struct adapter *adap, unsigned int addr,
2761 u32 val)
2762{
2763 t3_write_reg(adap, A_TP_PIO_ADDR, addr);
2764 t3_write_reg(adap, A_TP_PIO_DATA, val);
2765}
2766
2767static void tp_config(struct adapter *adap, const struct tp_params *p)
2768{
4d22de3e
DLR
2769 t3_write_reg(adap, A_TP_GLOBAL_CONFIG, F_TXPACINGENABLE | F_PATHMTU |
2770 F_IPCHECKSUMOFFLOAD | F_UDPCHECKSUMOFFLOAD |
2771 F_TCPCHECKSUMOFFLOAD | V_IPTTL(64));
2772 t3_write_reg(adap, A_TP_TCP_OPTIONS, V_MTUDEFAULT(576) |
2773 F_MTUENABLE | V_WINDOWSCALEMODE(1) |
3fa58c88 2774 V_TIMESTAMPSMODE(1) | V_SACKMODE(1) | V_SACKRX(1));
4d22de3e
DLR
2775 t3_write_reg(adap, A_TP_DACK_CONFIG, V_AUTOSTATE3(1) |
2776 V_AUTOSTATE2(1) | V_AUTOSTATE1(0) |
3fa58c88 2777 V_BYTETHRESHOLD(26880) | V_MSSTHRESHOLD(2) |
4d22de3e 2778 F_AUTOCAREFUL | F_AUTOENABLE | V_DACK_MODE(1));
b881955b 2779 t3_set_reg_field(adap, A_TP_IN_CONFIG, F_RXFBARBPRIO | F_TXFBARBPRIO,
4d22de3e
DLR
2780 F_IPV6ENABLE | F_NICMODE);
2781 t3_write_reg(adap, A_TP_TX_RESOURCE_LIMIT, 0x18141814);
2782 t3_write_reg(adap, A_TP_PARA_REG4, 0x5050105);
8a9fab22
DLR
2783 t3_set_reg_field(adap, A_TP_PARA_REG6, 0,
2784 adap->params.rev > 0 ? F_ENABLEESND :
2785 F_T3A_ENABLEESND);
4d22de3e 2786
3b1d307b 2787 t3_set_reg_field(adap, A_TP_PC_CONFIG,
8a9fab22
DLR
2788 F_ENABLEEPCMDAFULL,
2789 F_ENABLEOCSPIFULL |F_TXDEFERENABLE | F_HEARBEATDACK |
2790 F_TXCONGESTIONMODE | F_RXCONGESTIONMODE);
b881955b
DLR
2791 t3_set_reg_field(adap, A_TP_PC_CONFIG2, F_CHDRAFULL,
2792 F_ENABLEIPV6RSS | F_ENABLENONOFDTNLSYN |
2793 F_ENABLEARPMISS | F_DISBLEDAPARBIT0);
8a9fab22
DLR
2794 t3_write_reg(adap, A_TP_PROXY_FLOW_CNTL, 1080);
2795 t3_write_reg(adap, A_TP_PROXY_FLOW_CNTL, 1000);
2eab17ab 2796
4d22de3e
DLR
2797 if (adap->params.rev > 0) {
2798 tp_wr_indirect(adap, A_TP_EGRESS_CONFIG, F_REWRITEFORCETOSIZE);
2799 t3_set_reg_field(adap, A_TP_PARA_REG3, F_TXPACEAUTO,
2800 F_TXPACEAUTO);
2801 t3_set_reg_field(adap, A_TP_PC_CONFIG, F_LOCKTID, F_LOCKTID);
2802 t3_set_reg_field(adap, A_TP_PARA_REG3, 0, F_TXPACEAUTOSTRICT);
2803 } else
2804 t3_set_reg_field(adap, A_TP_PARA_REG3, 0, F_TXPACEFIXED);
2805
a2604be5
DLR
2806 if (adap->params.rev == T3_REV_C)
2807 t3_set_reg_field(adap, A_TP_PC_CONFIG,
2808 V_TABLELATENCYDELTA(M_TABLELATENCYDELTA),
2809 V_TABLELATENCYDELTA(4));
2810
8a9fab22
DLR
2811 t3_write_reg(adap, A_TP_TX_MOD_QUEUE_WEIGHT1, 0);
2812 t3_write_reg(adap, A_TP_TX_MOD_QUEUE_WEIGHT0, 0);
2813 t3_write_reg(adap, A_TP_MOD_CHANNEL_WEIGHT, 0);
2814 t3_write_reg(adap, A_TP_MOD_RATE_LIMIT, 0xf2200000);
4d22de3e
DLR
2815}
2816
2817/* Desired TP timer resolution in usec */
2818#define TP_TMR_RES 50
2819
2820/* TCP timer values in ms */
2821#define TP_DACK_TIMER 50
2822#define TP_RTO_MIN 250
2823
2824/**
2825 * tp_set_timers - set TP timing parameters
2826 * @adap: the adapter to set
2827 * @core_clk: the core clock frequency in Hz
2828 *
2829 * Set TP's timing parameters, such as the various timer resolutions and
2830 * the TCP timer values.
2831 */
2832static void tp_set_timers(struct adapter *adap, unsigned int core_clk)
2833{
2834 unsigned int tre = fls(core_clk / (1000000 / TP_TMR_RES)) - 1;
2835 unsigned int dack_re = fls(core_clk / 5000) - 1; /* 200us */
2836 unsigned int tstamp_re = fls(core_clk / 1000); /* 1ms, at least */
2837 unsigned int tps = core_clk >> tre;
2838
2839 t3_write_reg(adap, A_TP_TIMER_RESOLUTION, V_TIMERRESOLUTION(tre) |
2840 V_DELAYEDACKRESOLUTION(dack_re) |
2841 V_TIMESTAMPRESOLUTION(tstamp_re));
2842 t3_write_reg(adap, A_TP_DACK_TIMER,
2843 (core_clk >> dack_re) / (1000 / TP_DACK_TIMER));
2844 t3_write_reg(adap, A_TP_TCP_BACKOFF_REG0, 0x3020100);
2845 t3_write_reg(adap, A_TP_TCP_BACKOFF_REG1, 0x7060504);
2846 t3_write_reg(adap, A_TP_TCP_BACKOFF_REG2, 0xb0a0908);
2847 t3_write_reg(adap, A_TP_TCP_BACKOFF_REG3, 0xf0e0d0c);
2848 t3_write_reg(adap, A_TP_SHIFT_CNT, V_SYNSHIFTMAX(6) |
2849 V_RXTSHIFTMAXR1(4) | V_RXTSHIFTMAXR2(15) |
2850 V_PERSHIFTBACKOFFMAX(8) | V_PERSHIFTMAX(8) |
2851 V_KEEPALIVEMAX(9));
2852
2853#define SECONDS * tps
2854
2855 t3_write_reg(adap, A_TP_MSL, adap->params.rev > 0 ? 0 : 2 SECONDS);
2856 t3_write_reg(adap, A_TP_RXT_MIN, tps / (1000 / TP_RTO_MIN));
2857 t3_write_reg(adap, A_TP_RXT_MAX, 64 SECONDS);
2858 t3_write_reg(adap, A_TP_PERS_MIN, 5 SECONDS);
2859 t3_write_reg(adap, A_TP_PERS_MAX, 64 SECONDS);
2860 t3_write_reg(adap, A_TP_KEEP_IDLE, 7200 SECONDS);
2861 t3_write_reg(adap, A_TP_KEEP_INTVL, 75 SECONDS);
2862 t3_write_reg(adap, A_TP_INIT_SRTT, 3 SECONDS);
2863 t3_write_reg(adap, A_TP_FINWAIT2_TIMER, 600 SECONDS);
2864
2865#undef SECONDS
2866}
2867
2868/**
2869 * t3_tp_set_coalescing_size - set receive coalescing size
2870 * @adap: the adapter
2871 * @size: the receive coalescing size
2872 * @psh: whether a set PSH bit should deliver coalesced data
2873 *
2874 * Set the receive coalescing size and PSH bit handling.
2875 */
2876int t3_tp_set_coalescing_size(struct adapter *adap, unsigned int size, int psh)
2877{
2878 u32 val;
2879
2880 if (size > MAX_RX_COALESCING_LEN)
2881 return -EINVAL;
2882
2883 val = t3_read_reg(adap, A_TP_PARA_REG3);
2884 val &= ~(F_RXCOALESCEENABLE | F_RXCOALESCEPSHEN);
2885
2886 if (size) {
2887 val |= F_RXCOALESCEENABLE;
2888 if (psh)
2889 val |= F_RXCOALESCEPSHEN;
8a9fab22 2890 size = min(MAX_RX_COALESCING_LEN, size);
4d22de3e
DLR
2891 t3_write_reg(adap, A_TP_PARA_REG2, V_RXCOALESCESIZE(size) |
2892 V_MAXRXDATA(MAX_RX_COALESCING_LEN));
2893 }
2894 t3_write_reg(adap, A_TP_PARA_REG3, val);
2895 return 0;
2896}
2897
2898/**
2899 * t3_tp_set_max_rxsize - set the max receive size
2900 * @adap: the adapter
2901 * @size: the max receive size
2902 *
2903 * Set TP's max receive size. This is the limit that applies when
2904 * receive coalescing is disabled.
2905 */
2906void t3_tp_set_max_rxsize(struct adapter *adap, unsigned int size)
2907{
2908 t3_write_reg(adap, A_TP_PARA_REG7,
2909 V_PMMAXXFERLEN0(size) | V_PMMAXXFERLEN1(size));
2910}
2911
7b9b0943 2912static void init_mtus(unsigned short mtus[])
4d22de3e
DLR
2913{
2914 /*
2915 * See draft-mathis-plpmtud-00.txt for the values. The min is 88 so
2916 * it can accomodate max size TCP/IP headers when SACK and timestamps
2917 * are enabled and still have at least 8 bytes of payload.
2918 */
75758e8a 2919 mtus[0] = 88;
8a9fab22
DLR
2920 mtus[1] = 88;
2921 mtus[2] = 256;
2922 mtus[3] = 512;
2923 mtus[4] = 576;
4d22de3e
DLR
2924 mtus[5] = 1024;
2925 mtus[6] = 1280;
2926 mtus[7] = 1492;
2927 mtus[8] = 1500;
2928 mtus[9] = 2002;
2929 mtus[10] = 2048;
2930 mtus[11] = 4096;
2931 mtus[12] = 4352;
2932 mtus[13] = 8192;
2933 mtus[14] = 9000;
2934 mtus[15] = 9600;
2935}
2936
2937/*
2938 * Initial congestion control parameters.
2939 */
7b9b0943 2940static void init_cong_ctrl(unsigned short *a, unsigned short *b)
4d22de3e
DLR
2941{
2942 a[0] = a[1] = a[2] = a[3] = a[4] = a[5] = a[6] = a[7] = a[8] = 1;
2943 a[9] = 2;
2944 a[10] = 3;
2945 a[11] = 4;
2946 a[12] = 5;
2947 a[13] = 6;
2948 a[14] = 7;
2949 a[15] = 8;
2950 a[16] = 9;
2951 a[17] = 10;
2952 a[18] = 14;
2953 a[19] = 17;
2954 a[20] = 21;
2955 a[21] = 25;
2956 a[22] = 30;
2957 a[23] = 35;
2958 a[24] = 45;
2959 a[25] = 60;
2960 a[26] = 80;
2961 a[27] = 100;
2962 a[28] = 200;
2963 a[29] = 300;
2964 a[30] = 400;
2965 a[31] = 500;
2966
2967 b[0] = b[1] = b[2] = b[3] = b[4] = b[5] = b[6] = b[7] = b[8] = 0;
2968 b[9] = b[10] = 1;
2969 b[11] = b[12] = 2;
2970 b[13] = b[14] = b[15] = b[16] = 3;
2971 b[17] = b[18] = b[19] = b[20] = b[21] = 4;
2972 b[22] = b[23] = b[24] = b[25] = b[26] = b[27] = 5;
2973 b[28] = b[29] = 6;
2974 b[30] = b[31] = 7;
2975}
2976
2977/* The minimum additive increment value for the congestion control table */
2978#define CC_MIN_INCR 2U
2979
2980/**
2981 * t3_load_mtus - write the MTU and congestion control HW tables
2982 * @adap: the adapter
2983 * @mtus: the unrestricted values for the MTU table
2984 * @alphs: the values for the congestion control alpha parameter
2985 * @beta: the values for the congestion control beta parameter
2986 * @mtu_cap: the maximum permitted effective MTU
2987 *
2988 * Write the MTU table with the supplied MTUs capping each at &mtu_cap.
2989 * Update the high-speed congestion control table with the supplied alpha,
2990 * beta, and MTUs.
2991 */
2992void t3_load_mtus(struct adapter *adap, unsigned short mtus[NMTUS],
2993 unsigned short alpha[NCCTRL_WIN],
2994 unsigned short beta[NCCTRL_WIN], unsigned short mtu_cap)
2995{
2996 static const unsigned int avg_pkts[NCCTRL_WIN] = {
2997 2, 6, 10, 14, 20, 28, 40, 56, 80, 112, 160, 224, 320, 448, 640,
2998 896, 1281, 1792, 2560, 3584, 5120, 7168, 10240, 14336, 20480,
2999 28672, 40960, 57344, 81920, 114688, 163840, 229376
3000 };
3001
3002 unsigned int i, w;
3003
3004 for (i = 0; i < NMTUS; ++i) {
3005 unsigned int mtu = min(mtus[i], mtu_cap);
3006 unsigned int log2 = fls(mtu);
3007
3008 if (!(mtu & ((1 << log2) >> 2))) /* round */
3009 log2--;
3010 t3_write_reg(adap, A_TP_MTU_TABLE,
3011 (i << 24) | (log2 << 16) | mtu);
3012
3013 for (w = 0; w < NCCTRL_WIN; ++w) {
3014 unsigned int inc;
3015
3016 inc = max(((mtu - 40) * alpha[w]) / avg_pkts[w],
3017 CC_MIN_INCR);
3018
3019 t3_write_reg(adap, A_TP_CCTRL_TABLE, (i << 21) |
3020 (w << 16) | (beta[w] << 13) | inc);
3021 }
3022 }
3023}
3024
3025/**
3026 * t3_read_hw_mtus - returns the values in the HW MTU table
3027 * @adap: the adapter
3028 * @mtus: where to store the HW MTU values
3029 *
3030 * Reads the HW MTU table.
3031 */
3032void t3_read_hw_mtus(struct adapter *adap, unsigned short mtus[NMTUS])
3033{
3034 int i;
3035
3036 for (i = 0; i < NMTUS; ++i) {
3037 unsigned int val;
3038
3039 t3_write_reg(adap, A_TP_MTU_TABLE, 0xff000000 | i);
3040 val = t3_read_reg(adap, A_TP_MTU_TABLE);
3041 mtus[i] = val & 0x3fff;
3042 }
3043}
3044
3045/**
3046 * t3_get_cong_cntl_tab - reads the congestion control table
3047 * @adap: the adapter
3048 * @incr: where to store the alpha values
3049 *
3050 * Reads the additive increments programmed into the HW congestion
3051 * control table.
3052 */
3053void t3_get_cong_cntl_tab(struct adapter *adap,
3054 unsigned short incr[NMTUS][NCCTRL_WIN])
3055{
3056 unsigned int mtu, w;
3057
3058 for (mtu = 0; mtu < NMTUS; ++mtu)
3059 for (w = 0; w < NCCTRL_WIN; ++w) {
3060 t3_write_reg(adap, A_TP_CCTRL_TABLE,
3061 0xffff0000 | (mtu << 5) | w);
3062 incr[mtu][w] = t3_read_reg(adap, A_TP_CCTRL_TABLE) &
3063 0x1fff;
3064 }
3065}
3066
3067/**
3068 * t3_tp_get_mib_stats - read TP's MIB counters
3069 * @adap: the adapter
3070 * @tps: holds the returned counter values
3071 *
3072 * Returns the values of TP's MIB counters.
3073 */
3074void t3_tp_get_mib_stats(struct adapter *adap, struct tp_mib_stats *tps)
3075{
3076 t3_read_indirect(adap, A_TP_MIB_INDEX, A_TP_MIB_RDATA, (u32 *) tps,
3077 sizeof(*tps) / sizeof(u32), 0);
3078}
3079
3080#define ulp_region(adap, name, start, len) \
3081 t3_write_reg((adap), A_ULPRX_ ## name ## _LLIMIT, (start)); \
3082 t3_write_reg((adap), A_ULPRX_ ## name ## _ULIMIT, \
3083 (start) + (len) - 1); \
3084 start += len
3085
3086#define ulptx_region(adap, name, start, len) \
3087 t3_write_reg((adap), A_ULPTX_ ## name ## _LLIMIT, (start)); \
3088 t3_write_reg((adap), A_ULPTX_ ## name ## _ULIMIT, \
3089 (start) + (len) - 1)
3090
3091static void ulp_config(struct adapter *adap, const struct tp_params *p)
3092{
3093 unsigned int m = p->chan_rx_size;
3094
3095 ulp_region(adap, ISCSI, m, p->chan_rx_size / 8);
3096 ulp_region(adap, TDDP, m, p->chan_rx_size / 8);
3097 ulptx_region(adap, TPT, m, p->chan_rx_size / 4);
3098 ulp_region(adap, STAG, m, p->chan_rx_size / 4);
3099 ulp_region(adap, RQ, m, p->chan_rx_size / 4);
3100 ulptx_region(adap, PBL, m, p->chan_rx_size / 4);
3101 ulp_region(adap, PBL, m, p->chan_rx_size / 4);
3102 t3_write_reg(adap, A_ULPRX_TDDP_TAGMASK, 0xffffffff);
3103}
3104
480fe1a3
DLR
3105/**
3106 * t3_set_proto_sram - set the contents of the protocol sram
3107 * @adapter: the adapter
3108 * @data: the protocol image
3109 *
3110 * Write the contents of the protocol SRAM.
3111 */
2c733a16 3112int t3_set_proto_sram(struct adapter *adap, const u8 *data)
480fe1a3
DLR
3113{
3114 int i;
2c733a16 3115 const __be32 *buf = (const __be32 *)data;
480fe1a3
DLR
3116
3117 for (i = 0; i < PROTO_SRAM_LINES; i++) {
05e5c116
AV
3118 t3_write_reg(adap, A_TP_EMBED_OP_FIELD5, be32_to_cpu(*buf++));
3119 t3_write_reg(adap, A_TP_EMBED_OP_FIELD4, be32_to_cpu(*buf++));
3120 t3_write_reg(adap, A_TP_EMBED_OP_FIELD3, be32_to_cpu(*buf++));
3121 t3_write_reg(adap, A_TP_EMBED_OP_FIELD2, be32_to_cpu(*buf++));
3122 t3_write_reg(adap, A_TP_EMBED_OP_FIELD1, be32_to_cpu(*buf++));
2eab17ab 3123
480fe1a3
DLR
3124 t3_write_reg(adap, A_TP_EMBED_OP_FIELD0, i << 1 | 1 << 31);
3125 if (t3_wait_op_done(adap, A_TP_EMBED_OP_FIELD0, 1, 1, 5, 1))
3126 return -EIO;
3127 }
3128 t3_write_reg(adap, A_TP_EMBED_OP_FIELD0, 0);
3129
3130 return 0;
3131}
3132
4d22de3e
DLR
3133void t3_config_trace_filter(struct adapter *adapter,
3134 const struct trace_params *tp, int filter_index,
3135 int invert, int enable)
3136{
3137 u32 addr, key[4], mask[4];
3138
3139 key[0] = tp->sport | (tp->sip << 16);
3140 key[1] = (tp->sip >> 16) | (tp->dport << 16);
3141 key[2] = tp->dip;
3142 key[3] = tp->proto | (tp->vlan << 8) | (tp->intf << 20);
3143
3144 mask[0] = tp->sport_mask | (tp->sip_mask << 16);
3145 mask[1] = (tp->sip_mask >> 16) | (tp->dport_mask << 16);
3146 mask[2] = tp->dip_mask;
3147 mask[3] = tp->proto_mask | (tp->vlan_mask << 8) | (tp->intf_mask << 20);
3148
3149 if (invert)
3150 key[3] |= (1 << 29);
3151 if (enable)
3152 key[3] |= (1 << 28);
3153
3154 addr = filter_index ? A_TP_RX_TRC_KEY0 : A_TP_TX_TRC_KEY0;
3155 tp_wr_indirect(adapter, addr++, key[0]);
3156 tp_wr_indirect(adapter, addr++, mask[0]);
3157 tp_wr_indirect(adapter, addr++, key[1]);
3158 tp_wr_indirect(adapter, addr++, mask[1]);
3159 tp_wr_indirect(adapter, addr++, key[2]);
3160 tp_wr_indirect(adapter, addr++, mask[2]);
3161 tp_wr_indirect(adapter, addr++, key[3]);
3162 tp_wr_indirect(adapter, addr, mask[3]);
3163 t3_read_reg(adapter, A_TP_PIO_DATA);
3164}
3165
3166/**
3167 * t3_config_sched - configure a HW traffic scheduler
3168 * @adap: the adapter
3169 * @kbps: target rate in Kbps
3170 * @sched: the scheduler index
3171 *
3172 * Configure a HW scheduler for the target rate
3173 */
3174int t3_config_sched(struct adapter *adap, unsigned int kbps, int sched)
3175{
3176 unsigned int v, tps, cpt, bpt, delta, mindelta = ~0;
3177 unsigned int clk = adap->params.vpd.cclk * 1000;
3178 unsigned int selected_cpt = 0, selected_bpt = 0;
3179
3180 if (kbps > 0) {
3181 kbps *= 125; /* -> bytes */
3182 for (cpt = 1; cpt <= 255; cpt++) {
3183 tps = clk / cpt;
3184 bpt = (kbps + tps / 2) / tps;
3185 if (bpt > 0 && bpt <= 255) {
3186 v = bpt * tps;
3187 delta = v >= kbps ? v - kbps : kbps - v;
3188 if (delta <= mindelta) {
3189 mindelta = delta;
3190 selected_cpt = cpt;
3191 selected_bpt = bpt;
3192 }
3193 } else if (selected_cpt)
3194 break;
3195 }
3196 if (!selected_cpt)
3197 return -EINVAL;
3198 }
3199 t3_write_reg(adap, A_TP_TM_PIO_ADDR,
3200 A_TP_TX_MOD_Q1_Q0_RATE_LIMIT - sched / 2);
3201 v = t3_read_reg(adap, A_TP_TM_PIO_DATA);
3202 if (sched & 1)
3203 v = (v & 0xffff) | (selected_cpt << 16) | (selected_bpt << 24);
3204 else
3205 v = (v & 0xffff0000) | selected_cpt | (selected_bpt << 8);
3206 t3_write_reg(adap, A_TP_TM_PIO_DATA, v);
3207 return 0;
3208}
3209
3210static int tp_init(struct adapter *adap, const struct tp_params *p)
3211{
3212 int busy = 0;
3213
3214 tp_config(adap, p);
3215 t3_set_vlan_accel(adap, 3, 0);
3216
3217 if (is_offload(adap)) {
3218 tp_set_timers(adap, adap->params.vpd.cclk * 1000);
3219 t3_write_reg(adap, A_TP_RESET, F_FLSTINITENABLE);
3220 busy = t3_wait_op_done(adap, A_TP_RESET, F_FLSTINITENABLE,
3221 0, 1000, 5);
3222 if (busy)
3223 CH_ERR(adap, "TP initialization timed out\n");
3224 }
3225
3226 if (!busy)
3227 t3_write_reg(adap, A_TP_RESET, F_TPRESET);
3228 return busy;
3229}
3230
3231int t3_mps_set_active_ports(struct adapter *adap, unsigned int port_mask)
3232{
3233 if (port_mask & ~((1 << adap->params.nports) - 1))
3234 return -EINVAL;
3235 t3_set_reg_field(adap, A_MPS_CFG, F_PORT1ACTIVE | F_PORT0ACTIVE,
3236 port_mask << S_PORT0ACTIVE);
3237 return 0;
3238}
3239
3240/*
952cdf33
DLR
3241 * Perform the bits of HW initialization that are dependent on the Tx
3242 * channels being used.
4d22de3e 3243 */
952cdf33 3244static void chan_init_hw(struct adapter *adap, unsigned int chan_map)
4d22de3e
DLR
3245{
3246 int i;
3247
952cdf33 3248 if (chan_map != 3) { /* one channel */
4d22de3e
DLR
3249 t3_set_reg_field(adap, A_ULPRX_CTL, F_ROUND_ROBIN, 0);
3250 t3_set_reg_field(adap, A_ULPTX_CONFIG, F_CFG_RR_ARB, 0);
952cdf33
DLR
3251 t3_write_reg(adap, A_MPS_CFG, F_TPRXPORTEN | F_ENFORCEPKT |
3252 (chan_map == 1 ? F_TPTXPORT0EN | F_PORT0ACTIVE :
3253 F_TPTXPORT1EN | F_PORT1ACTIVE));
3254 t3_write_reg(adap, A_PM1_TX_CFG,
3255 chan_map == 1 ? 0xffffffff : 0);
3256 } else { /* two channels */
4d22de3e
DLR
3257 t3_set_reg_field(adap, A_ULPRX_CTL, 0, F_ROUND_ROBIN);
3258 t3_set_reg_field(adap, A_ULPTX_CONFIG, 0, F_CFG_RR_ARB);
3259 t3_write_reg(adap, A_ULPTX_DMA_WEIGHT,
3260 V_D1_WEIGHT(16) | V_D0_WEIGHT(16));
3261 t3_write_reg(adap, A_MPS_CFG, F_TPTXPORT0EN | F_TPTXPORT1EN |
3262 F_TPRXPORTEN | F_PORT0ACTIVE | F_PORT1ACTIVE |
3263 F_ENFORCEPKT);
3264 t3_write_reg(adap, A_PM1_TX_CFG, 0x80008000);
3265 t3_set_reg_field(adap, A_TP_PC_CONFIG, 0, F_TXTOSQUEUEMAPMODE);
3266 t3_write_reg(adap, A_TP_TX_MOD_QUEUE_REQ_MAP,
3267 V_TX_MOD_QUEUE_REQ_MAP(0xaa));
3268 for (i = 0; i < 16; i++)
3269 t3_write_reg(adap, A_TP_TX_MOD_QUE_TABLE,
3270 (i << 16) | 0x1010);
3271 }
3272}
3273
3274static int calibrate_xgm(struct adapter *adapter)
3275{
3276 if (uses_xaui(adapter)) {
3277 unsigned int v, i;
3278
3279 for (i = 0; i < 5; ++i) {
3280 t3_write_reg(adapter, A_XGM_XAUI_IMP, 0);
3281 t3_read_reg(adapter, A_XGM_XAUI_IMP);
3282 msleep(1);
3283 v = t3_read_reg(adapter, A_XGM_XAUI_IMP);
3284 if (!(v & (F_XGM_CALFAULT | F_CALBUSY))) {
3285 t3_write_reg(adapter, A_XGM_XAUI_IMP,
3286 V_XAUIIMP(G_CALIMP(v) >> 2));
3287 return 0;
3288 }
3289 }
3290 CH_ERR(adapter, "MAC calibration failed\n");
3291 return -1;
3292 } else {
3293 t3_write_reg(adapter, A_XGM_RGMII_IMP,
3294 V_RGMIIIMPPD(2) | V_RGMIIIMPPU(3));
3295 t3_set_reg_field(adapter, A_XGM_RGMII_IMP, F_XGM_IMPSETUPDATE,
3296 F_XGM_IMPSETUPDATE);
3297 }
3298 return 0;
3299}
3300
3301static void calibrate_xgm_t3b(struct adapter *adapter)
3302{
3303 if (!uses_xaui(adapter)) {
3304 t3_write_reg(adapter, A_XGM_RGMII_IMP, F_CALRESET |
3305 F_CALUPDATE | V_RGMIIIMPPD(2) | V_RGMIIIMPPU(3));
3306 t3_set_reg_field(adapter, A_XGM_RGMII_IMP, F_CALRESET, 0);
3307 t3_set_reg_field(adapter, A_XGM_RGMII_IMP, 0,
3308 F_XGM_IMPSETUPDATE);
3309 t3_set_reg_field(adapter, A_XGM_RGMII_IMP, F_XGM_IMPSETUPDATE,
3310 0);
3311 t3_set_reg_field(adapter, A_XGM_RGMII_IMP, F_CALUPDATE, 0);
3312 t3_set_reg_field(adapter, A_XGM_RGMII_IMP, 0, F_CALUPDATE);
3313 }
3314}
3315
3316struct mc7_timing_params {
3317 unsigned char ActToPreDly;
3318 unsigned char ActToRdWrDly;
3319 unsigned char PreCyc;
3320 unsigned char RefCyc[5];
3321 unsigned char BkCyc;
3322 unsigned char WrToRdDly;
3323 unsigned char RdToWrDly;
3324};
3325
3326/*
3327 * Write a value to a register and check that the write completed. These
3328 * writes normally complete in a cycle or two, so one read should suffice.
3329 * The very first read exists to flush the posted write to the device.
3330 */
3331static int wrreg_wait(struct adapter *adapter, unsigned int addr, u32 val)
3332{
3333 t3_write_reg(adapter, addr, val);
3334 t3_read_reg(adapter, addr); /* flush */
3335 if (!(t3_read_reg(adapter, addr) & F_BUSY))
3336 return 0;
3337 CH_ERR(adapter, "write to MC7 register 0x%x timed out\n", addr);
3338 return -EIO;
3339}
3340
3341static int mc7_init(struct mc7 *mc7, unsigned int mc7_clock, int mem_type)
3342{
3343 static const unsigned int mc7_mode[] = {
3344 0x632, 0x642, 0x652, 0x432, 0x442
3345 };
3346 static const struct mc7_timing_params mc7_timings[] = {
3347 {12, 3, 4, {20, 28, 34, 52, 0}, 15, 6, 4},
3348 {12, 4, 5, {20, 28, 34, 52, 0}, 16, 7, 4},
3349 {12, 5, 6, {20, 28, 34, 52, 0}, 17, 8, 4},
3350 {9, 3, 4, {15, 21, 26, 39, 0}, 12, 6, 4},
3351 {9, 4, 5, {15, 21, 26, 39, 0}, 13, 7, 4}
3352 };
3353
3354 u32 val;
3355 unsigned int width, density, slow, attempts;
3356 struct adapter *adapter = mc7->adapter;
3357 const struct mc7_timing_params *p = &mc7_timings[mem_type];
3358
8ac3ba68
DLR
3359 if (!mc7->size)
3360 return 0;
3361
4d22de3e
DLR
3362 val = t3_read_reg(adapter, mc7->offset + A_MC7_CFG);
3363 slow = val & F_SLOW;
3364 width = G_WIDTH(val);
3365 density = G_DEN(val);
3366
3367 t3_write_reg(adapter, mc7->offset + A_MC7_CFG, val | F_IFEN);
3368 val = t3_read_reg(adapter, mc7->offset + A_MC7_CFG); /* flush */
3369 msleep(1);
3370
3371 if (!slow) {
3372 t3_write_reg(adapter, mc7->offset + A_MC7_CAL, F_SGL_CAL_EN);
3373 t3_read_reg(adapter, mc7->offset + A_MC7_CAL);
3374 msleep(1);
3375 if (t3_read_reg(adapter, mc7->offset + A_MC7_CAL) &
3376 (F_BUSY | F_SGL_CAL_EN | F_CAL_FAULT)) {
3377 CH_ERR(adapter, "%s MC7 calibration timed out\n",
3378 mc7->name);
3379 goto out_fail;
3380 }
3381 }
3382
3383 t3_write_reg(adapter, mc7->offset + A_MC7_PARM,
3384 V_ACTTOPREDLY(p->ActToPreDly) |
3385 V_ACTTORDWRDLY(p->ActToRdWrDly) | V_PRECYC(p->PreCyc) |
3386 V_REFCYC(p->RefCyc[density]) | V_BKCYC(p->BkCyc) |
3387 V_WRTORDDLY(p->WrToRdDly) | V_RDTOWRDLY(p->RdToWrDly));
3388
3389 t3_write_reg(adapter, mc7->offset + A_MC7_CFG,
3390 val | F_CLKEN | F_TERM150);
3391 t3_read_reg(adapter, mc7->offset + A_MC7_CFG); /* flush */
3392
3393 if (!slow)
3394 t3_set_reg_field(adapter, mc7->offset + A_MC7_DLL, F_DLLENB,
3395 F_DLLENB);
3396 udelay(1);
3397
3398 val = slow ? 3 : 6;
3399 if (wrreg_wait(adapter, mc7->offset + A_MC7_PRE, 0) ||
3400 wrreg_wait(adapter, mc7->offset + A_MC7_EXT_MODE2, 0) ||
3401 wrreg_wait(adapter, mc7->offset + A_MC7_EXT_MODE3, 0) ||
3402 wrreg_wait(adapter, mc7->offset + A_MC7_EXT_MODE1, val))
3403 goto out_fail;
3404
3405 if (!slow) {
3406 t3_write_reg(adapter, mc7->offset + A_MC7_MODE, 0x100);
3407 t3_set_reg_field(adapter, mc7->offset + A_MC7_DLL, F_DLLRST, 0);
3408 udelay(5);
3409 }
3410
3411 if (wrreg_wait(adapter, mc7->offset + A_MC7_PRE, 0) ||
3412 wrreg_wait(adapter, mc7->offset + A_MC7_REF, 0) ||
3413 wrreg_wait(adapter, mc7->offset + A_MC7_REF, 0) ||
3414 wrreg_wait(adapter, mc7->offset + A_MC7_MODE,
3415 mc7_mode[mem_type]) ||
3416 wrreg_wait(adapter, mc7->offset + A_MC7_EXT_MODE1, val | 0x380) ||
3417 wrreg_wait(adapter, mc7->offset + A_MC7_EXT_MODE1, val))
3418 goto out_fail;
3419
3420 /* clock value is in KHz */
3421 mc7_clock = mc7_clock * 7812 + mc7_clock / 2; /* ns */
3422 mc7_clock /= 1000000; /* KHz->MHz, ns->us */
3423
3424 t3_write_reg(adapter, mc7->offset + A_MC7_REF,
3425 F_PERREFEN | V_PREREFDIV(mc7_clock));
3426 t3_read_reg(adapter, mc7->offset + A_MC7_REF); /* flush */
3427
3428 t3_write_reg(adapter, mc7->offset + A_MC7_ECC, F_ECCGENEN | F_ECCCHKEN);
3429 t3_write_reg(adapter, mc7->offset + A_MC7_BIST_DATA, 0);
3430 t3_write_reg(adapter, mc7->offset + A_MC7_BIST_ADDR_BEG, 0);
3431 t3_write_reg(adapter, mc7->offset + A_MC7_BIST_ADDR_END,
3432 (mc7->size << width) - 1);
3433 t3_write_reg(adapter, mc7->offset + A_MC7_BIST_OP, V_OP(1));
3434 t3_read_reg(adapter, mc7->offset + A_MC7_BIST_OP); /* flush */
3435
3436 attempts = 50;
3437 do {
3438 msleep(250);
3439 val = t3_read_reg(adapter, mc7->offset + A_MC7_BIST_OP);
3440 } while ((val & F_BUSY) && --attempts);
3441 if (val & F_BUSY) {
3442 CH_ERR(adapter, "%s MC7 BIST timed out\n", mc7->name);
3443 goto out_fail;
3444 }
3445
3446 /* Enable normal memory accesses. */
3447 t3_set_reg_field(adapter, mc7->offset + A_MC7_CFG, 0, F_RDY);
3448 return 0;
3449
3450out_fail:
3451 return -1;
3452}
3453
3454static void config_pcie(struct adapter *adap)
3455{
3456 static const u16 ack_lat[4][6] = {
3457 {237, 416, 559, 1071, 2095, 4143},
3458 {128, 217, 289, 545, 1057, 2081},
3459 {73, 118, 154, 282, 538, 1050},
3460 {67, 107, 86, 150, 278, 534}
3461 };
3462 static const u16 rpl_tmr[4][6] = {
3463 {711, 1248, 1677, 3213, 6285, 12429},
3464 {384, 651, 867, 1635, 3171, 6243},
3465 {219, 354, 462, 846, 1614, 3150},
3466 {201, 321, 258, 450, 834, 1602}
3467 };
3468
88e7b76e 3469 u16 val, devid;
4d22de3e
DLR
3470 unsigned int log2_width, pldsize;
3471 unsigned int fst_trn_rx, fst_trn_tx, acklat, rpllmt;
3472
3473 pci_read_config_word(adap->pdev,
3474 adap->params.pci.pcie_cap_addr + PCI_EXP_DEVCTL,
3475 &val);
3476 pldsize = (val & PCI_EXP_DEVCTL_PAYLOAD) >> 5;
88e7b76e
DLR
3477
3478 pci_read_config_word(adap->pdev, 0x2, &devid);
3479 if (devid == 0x37) {
3480 pci_write_config_word(adap->pdev,
3481 adap->params.pci.pcie_cap_addr +
3482 PCI_EXP_DEVCTL,
3483 val & ~PCI_EXP_DEVCTL_READRQ &
3484 ~PCI_EXP_DEVCTL_PAYLOAD);
3485 pldsize = 0;
3486 }
3487
4d22de3e
DLR
3488 pci_read_config_word(adap->pdev,
3489 adap->params.pci.pcie_cap_addr + PCI_EXP_LNKCTL,
3490 &val);
3491
3492 fst_trn_tx = G_NUMFSTTRNSEQ(t3_read_reg(adap, A_PCIE_PEX_CTRL0));
3493 fst_trn_rx = adap->params.rev == 0 ? fst_trn_tx :
3494 G_NUMFSTTRNSEQRX(t3_read_reg(adap, A_PCIE_MODE));
3495 log2_width = fls(adap->params.pci.width) - 1;
3496 acklat = ack_lat[log2_width][pldsize];
3497 if (val & 1) /* check LOsEnable */
3498 acklat += fst_trn_tx * 4;
3499 rpllmt = rpl_tmr[log2_width][pldsize] + fst_trn_rx * 4;
3500
3501 if (adap->params.rev == 0)
3502 t3_set_reg_field(adap, A_PCIE_PEX_CTRL1,
3503 V_T3A_ACKLAT(M_T3A_ACKLAT),
3504 V_T3A_ACKLAT(acklat));
3505 else
3506 t3_set_reg_field(adap, A_PCIE_PEX_CTRL1, V_ACKLAT(M_ACKLAT),
3507 V_ACKLAT(acklat));
3508
3509 t3_set_reg_field(adap, A_PCIE_PEX_CTRL0, V_REPLAYLMT(M_REPLAYLMT),
3510 V_REPLAYLMT(rpllmt));
3511
3512 t3_write_reg(adap, A_PCIE_PEX_ERR, 0xffffffff);
b881955b 3513 t3_set_reg_field(adap, A_PCIE_CFG, 0,
204e2f98 3514 F_ENABLELINKDWNDRST | F_ENABLELINKDOWNRST |
b881955b 3515 F_PCIE_DMASTOPEN | F_PCIE_CLIDECEN);
4d22de3e
DLR
3516}
3517
3518/*
3519 * Initialize and configure T3 HW modules. This performs the
3520 * initialization steps that need to be done once after a card is reset.
3521 * MAC and PHY initialization is handled separarely whenever a port is enabled.
3522 *
3523 * fw_params are passed to FW and their value is platform dependent. Only the
3524 * top 8 bits are available for use, the rest must be 0.
3525 */
3526int t3_init_hw(struct adapter *adapter, u32 fw_params)
3527{
b881955b 3528 int err = -EIO, attempts, i;
4d22de3e
DLR
3529 const struct vpd_params *vpd = &adapter->params.vpd;
3530
3531 if (adapter->params.rev > 0)
3532 calibrate_xgm_t3b(adapter);
3533 else if (calibrate_xgm(adapter))
3534 goto out_err;
3535
3536 if (vpd->mclk) {
3537 partition_mem(adapter, &adapter->params.tp);
3538
3539 if (mc7_init(&adapter->pmrx, vpd->mclk, vpd->mem_timing) ||
3540 mc7_init(&adapter->pmtx, vpd->mclk, vpd->mem_timing) ||
3541 mc7_init(&adapter->cm, vpd->mclk, vpd->mem_timing) ||
3542 t3_mc5_init(&adapter->mc5, adapter->params.mc5.nservers,
3543 adapter->params.mc5.nfilters,
3544 adapter->params.mc5.nroutes))
3545 goto out_err;
b881955b
DLR
3546
3547 for (i = 0; i < 32; i++)
3548 if (clear_sge_ctxt(adapter, i, F_CQ))
3549 goto out_err;
4d22de3e
DLR
3550 }
3551
3552 if (tp_init(adapter, &adapter->params.tp))
3553 goto out_err;
3554
3555 t3_tp_set_coalescing_size(adapter,
3556 min(adapter->params.sge.max_pkt_size,
3557 MAX_RX_COALESCING_LEN), 1);
3558 t3_tp_set_max_rxsize(adapter,
3559 min(adapter->params.sge.max_pkt_size, 16384U));
3560 ulp_config(adapter, &adapter->params.tp);
3561
3562 if (is_pcie(adapter))
3563 config_pcie(adapter);
3564 else
b881955b
DLR
3565 t3_set_reg_field(adapter, A_PCIX_CFG, 0,
3566 F_DMASTOPEN | F_CLIDECEN);
4d22de3e 3567
a2604be5
DLR
3568 if (adapter->params.rev == T3_REV_C)
3569 t3_set_reg_field(adapter, A_ULPTX_CONFIG, 0,
3570 F_CFG_CQE_SOP_MASK);
3571
8a9fab22 3572 t3_write_reg(adapter, A_PM1_RX_CFG, 0xffffffff);
3f61e427
DLR
3573 t3_write_reg(adapter, A_PM1_RX_MODE, 0);
3574 t3_write_reg(adapter, A_PM1_TX_MODE, 0);
952cdf33 3575 chan_init_hw(adapter, adapter->params.chan_map);
4d22de3e
DLR
3576 t3_sge_init(adapter, &adapter->params.sge);
3577
f231e0a5
DLR
3578 t3_write_reg(adapter, A_T3DBG_GPIO_ACT_LOW, calc_gpio_intr(adapter));
3579
4d22de3e
DLR
3580 t3_write_reg(adapter, A_CIM_HOST_ACC_DATA, vpd->uclk | fw_params);
3581 t3_write_reg(adapter, A_CIM_BOOT_CFG,
3582 V_BOOTADDR(FW_FLASH_BOOT_ADDR >> 2));
3583 t3_read_reg(adapter, A_CIM_BOOT_CFG); /* flush */
3584
b881955b 3585 attempts = 100;
4d22de3e
DLR
3586 do { /* wait for uP to initialize */
3587 msleep(20);
3588 } while (t3_read_reg(adapter, A_CIM_HOST_ACC_DATA) && --attempts);
8ac3ba68
DLR
3589 if (!attempts) {
3590 CH_ERR(adapter, "uP initialization timed out\n");
4d22de3e 3591 goto out_err;
8ac3ba68 3592 }
4d22de3e
DLR
3593
3594 err = 0;
3595out_err:
3596 return err;
3597}
3598
3599/**
3600 * get_pci_mode - determine a card's PCI mode
3601 * @adapter: the adapter
3602 * @p: where to store the PCI settings
3603 *
3604 * Determines a card's PCI mode and associated parameters, such as speed
3605 * and width.
3606 */
7b9b0943 3607static void get_pci_mode(struct adapter *adapter, struct pci_params *p)
4d22de3e
DLR
3608{
3609 static unsigned short speed_map[] = { 33, 66, 100, 133 };
3610 u32 pci_mode, pcie_cap;
3611
3612 pcie_cap = pci_find_capability(adapter->pdev, PCI_CAP_ID_EXP);
3613 if (pcie_cap) {
3614 u16 val;
3615
3616 p->variant = PCI_VARIANT_PCIE;
3617 p->pcie_cap_addr = pcie_cap;
3618 pci_read_config_word(adapter->pdev, pcie_cap + PCI_EXP_LNKSTA,
3619 &val);
3620 p->width = (val >> 4) & 0x3f;
3621 return;
3622 }
3623
3624 pci_mode = t3_read_reg(adapter, A_PCIX_MODE);
3625 p->speed = speed_map[G_PCLKRANGE(pci_mode)];
3626 p->width = (pci_mode & F_64BIT) ? 64 : 32;
3627 pci_mode = G_PCIXINITPAT(pci_mode);
3628 if (pci_mode == 0)
3629 p->variant = PCI_VARIANT_PCI;
3630 else if (pci_mode < 4)
3631 p->variant = PCI_VARIANT_PCIX_MODE1_PARITY;
3632 else if (pci_mode < 8)
3633 p->variant = PCI_VARIANT_PCIX_MODE1_ECC;
3634 else
3635 p->variant = PCI_VARIANT_PCIX_266_MODE2;
3636}
3637
3638/**
3639 * init_link_config - initialize a link's SW state
3640 * @lc: structure holding the link state
3641 * @ai: information about the current card
3642 *
3643 * Initializes the SW state maintained for each link, including the link's
3644 * capabilities and default speed/duplex/flow-control/autonegotiation
3645 * settings.
3646 */
7b9b0943 3647static void init_link_config(struct link_config *lc, unsigned int caps)
4d22de3e
DLR
3648{
3649 lc->supported = caps;
3650 lc->requested_speed = lc->speed = SPEED_INVALID;
3651 lc->requested_duplex = lc->duplex = DUPLEX_INVALID;
3652 lc->requested_fc = lc->fc = PAUSE_RX | PAUSE_TX;
3653 if (lc->supported & SUPPORTED_Autoneg) {
3654 lc->advertising = lc->supported;
3655 lc->autoneg = AUTONEG_ENABLE;
3656 lc->requested_fc |= PAUSE_AUTONEG;
3657 } else {
3658 lc->advertising = 0;
3659 lc->autoneg = AUTONEG_DISABLE;
3660 }
3661}
3662
3663/**
3664 * mc7_calc_size - calculate MC7 memory size
3665 * @cfg: the MC7 configuration
3666 *
3667 * Calculates the size of an MC7 memory in bytes from the value of its
3668 * configuration register.
3669 */
7b9b0943 3670static unsigned int mc7_calc_size(u32 cfg)
4d22de3e
DLR
3671{
3672 unsigned int width = G_WIDTH(cfg);
3673 unsigned int banks = !!(cfg & F_BKS) + 1;
3674 unsigned int org = !!(cfg & F_ORG) + 1;
3675 unsigned int density = G_DEN(cfg);
3676 unsigned int MBs = ((256 << density) * banks) / (org << width);
3677
3678 return MBs << 20;
3679}
3680
7b9b0943
RD
3681static void mc7_prep(struct adapter *adapter, struct mc7 *mc7,
3682 unsigned int base_addr, const char *name)
4d22de3e
DLR
3683{
3684 u32 cfg;
3685
3686 mc7->adapter = adapter;
3687 mc7->name = name;
3688 mc7->offset = base_addr - MC7_PMRX_BASE_ADDR;
3689 cfg = t3_read_reg(adapter, mc7->offset + A_MC7_CFG);
8ac3ba68 3690 mc7->size = mc7->size = G_DEN(cfg) == M_DEN ? 0 : mc7_calc_size(cfg);
4d22de3e
DLR
3691 mc7->width = G_WIDTH(cfg);
3692}
3693
3694void mac_prep(struct cmac *mac, struct adapter *adapter, int index)
3695{
9073e3a3
DLR
3696 u16 devid;
3697
4d22de3e 3698 mac->adapter = adapter;
9073e3a3
DLR
3699 pci_read_config_word(adapter->pdev, 0x2, &devid);
3700
3701 if (devid == 0x37 && !adapter->params.vpd.xauicfg[1])
88045b3c 3702 index = 0;
4d22de3e
DLR
3703 mac->offset = (XGMAC0_1_BASE_ADDR - XGMAC0_0_BASE_ADDR) * index;
3704 mac->nucast = 1;
3705
3706 if (adapter->params.rev == 0 && uses_xaui(adapter)) {
3707 t3_write_reg(adapter, A_XGM_SERDES_CTRL + mac->offset,
3708 is_10G(adapter) ? 0x2901c04 : 0x2301c04);
3709 t3_set_reg_field(adapter, A_XGM_PORT_CFG + mac->offset,
3710 F_ENRGMII, 0);
3711 }
3712}
3713
3714void early_hw_init(struct adapter *adapter, const struct adapter_info *ai)
3715{
3716 u32 val = V_PORTSPEED(is_10G(adapter) ? 3 : 2);
3717
3718 mi1_init(adapter, ai);
3719 t3_write_reg(adapter, A_I2C_CFG, /* set for 80KHz */
3720 V_I2C_CLKDIV(adapter->params.vpd.cclk / 80 - 1));
3721 t3_write_reg(adapter, A_T3DBG_GPIO_EN,
3722 ai->gpio_out | F_GPIO0_OEN | F_GPIO0_OUT_VAL);
8ac3ba68 3723 t3_write_reg(adapter, A_MC5_DB_SERVER_INDEX, 0);
b881955b 3724 t3_write_reg(adapter, A_SG_OCO_BASE, V_BASE1(0xfff));
4d22de3e
DLR
3725
3726 if (adapter->params.rev == 0 || !uses_xaui(adapter))
3727 val |= F_ENRGMII;
3728
3729 /* Enable MAC clocks so we can access the registers */
3730 t3_write_reg(adapter, A_XGM_PORT_CFG, val);
3731 t3_read_reg(adapter, A_XGM_PORT_CFG);
3732
3733 val |= F_CLKDIVRESET_;
3734 t3_write_reg(adapter, A_XGM_PORT_CFG, val);
3735 t3_read_reg(adapter, A_XGM_PORT_CFG);
3736 t3_write_reg(adapter, XGM_REG(A_XGM_PORT_CFG, 1), val);
3737 t3_read_reg(adapter, A_XGM_PORT_CFG);
3738}
3739
3740/*
2eab17ab 3741 * Reset the adapter.
e4d08359 3742 * Older PCIe cards lose their config space during reset, PCI-X
4d22de3e
DLR
3743 * ones don't.
3744 */
20d3fc11 3745int t3_reset_adapter(struct adapter *adapter)
4d22de3e 3746{
2eab17ab 3747 int i, save_and_restore_pcie =
e4d08359 3748 adapter->params.rev < T3_REV_B2 && is_pcie(adapter);
4d22de3e
DLR
3749 uint16_t devid = 0;
3750
e4d08359 3751 if (save_and_restore_pcie)
4d22de3e
DLR
3752 pci_save_state(adapter->pdev);
3753 t3_write_reg(adapter, A_PL_RST, F_CRSTWRM | F_CRSTWRMMODE);
3754
3755 /*
3756 * Delay. Give Some time to device to reset fully.
3757 * XXX The delay time should be modified.
3758 */
3759 for (i = 0; i < 10; i++) {
3760 msleep(50);
3761 pci_read_config_word(adapter->pdev, 0x00, &devid);
3762 if (devid == 0x1425)
3763 break;
3764 }
3765
3766 if (devid != 0x1425)
3767 return -1;
3768
e4d08359 3769 if (save_and_restore_pcie)
4d22de3e
DLR
3770 pci_restore_state(adapter->pdev);
3771 return 0;
3772}
3773
7b9b0943 3774static int init_parity(struct adapter *adap)
b881955b
DLR
3775{
3776 int i, err, addr;
3777
3778 if (t3_read_reg(adap, A_SG_CONTEXT_CMD) & F_CONTEXT_CMD_BUSY)
3779 return -EBUSY;
3780
3781 for (err = i = 0; !err && i < 16; i++)
3782 err = clear_sge_ctxt(adap, i, F_EGRESS);
3783 for (i = 0xfff0; !err && i <= 0xffff; i++)
3784 err = clear_sge_ctxt(adap, i, F_EGRESS);
3785 for (i = 0; !err && i < SGE_QSETS; i++)
3786 err = clear_sge_ctxt(adap, i, F_RESPONSEQ);
3787 if (err)
3788 return err;
3789
3790 t3_write_reg(adap, A_CIM_IBQ_DBG_DATA, 0);
3791 for (i = 0; i < 4; i++)
3792 for (addr = 0; addr <= M_IBQDBGADDR; addr++) {
3793 t3_write_reg(adap, A_CIM_IBQ_DBG_CFG, F_IBQDBGEN |
3794 F_IBQDBGWR | V_IBQDBGQID(i) |
3795 V_IBQDBGADDR(addr));
3796 err = t3_wait_op_done(adap, A_CIM_IBQ_DBG_CFG,
3797 F_IBQDBGBUSY, 0, 2, 1);
3798 if (err)
3799 return err;
3800 }
3801 return 0;
3802}
3803
4d22de3e
DLR
3804/*
3805 * Initialize adapter SW state for the various HW modules, set initial values
3806 * for some adapter tunables, take PHYs out of reset, and initialize the MDIO
3807 * interface.
3808 */
7b9b0943
RD
3809int t3_prep_adapter(struct adapter *adapter, const struct adapter_info *ai,
3810 int reset)
4d22de3e
DLR
3811{
3812 int ret;
04497982 3813 unsigned int i, j = -1;
4d22de3e
DLR
3814
3815 get_pci_mode(adapter, &adapter->params.pci);
3816
3817 adapter->params.info = ai;
952cdf33 3818 adapter->params.nports = ai->nports0 + ai->nports1;
00b64f2a 3819 adapter->params.chan_map = (!!ai->nports0) | (!!ai->nports1 << 1);
4d22de3e 3820 adapter->params.rev = t3_read_reg(adapter, A_PL_REV);
fc882196
DLR
3821 /*
3822 * We used to only run the "adapter check task" once a second if
3823 * we had PHYs which didn't support interrupts (we would check
3824 * their link status once a second). Now we check other conditions
3825 * in that routine which could potentially impose a very high
3826 * interrupt load on the system. As such, we now always scan the
3827 * adapter state once a second ...
3828 */
3829 adapter->params.linkpoll_period = 10;
4d22de3e
DLR
3830 adapter->params.stats_update_period = is_10G(adapter) ?
3831 MAC_STATS_ACCUM_SECS : (MAC_STATS_ACCUM_SECS * 10);
3832 adapter->params.pci.vpd_cap_addr =
3833 pci_find_capability(adapter->pdev, PCI_CAP_ID_VPD);
3834 ret = get_vpd_params(adapter, &adapter->params.vpd);
3835 if (ret < 0)
3836 return ret;
3837
3838 if (reset && t3_reset_adapter(adapter))
3839 return -1;
3840
3841 t3_sge_prep(adapter, &adapter->params.sge);
3842
3843 if (adapter->params.vpd.mclk) {
3844 struct tp_params *p = &adapter->params.tp;
3845
3846 mc7_prep(adapter, &adapter->pmrx, MC7_PMRX_BASE_ADDR, "PMRX");
3847 mc7_prep(adapter, &adapter->pmtx, MC7_PMTX_BASE_ADDR, "PMTX");
3848 mc7_prep(adapter, &adapter->cm, MC7_CM_BASE_ADDR, "CM");
3849
952cdf33 3850 p->nchan = adapter->params.chan_map == 3 ? 2 : 1;
4d22de3e
DLR
3851 p->pmrx_size = t3_mc7_size(&adapter->pmrx);
3852 p->pmtx_size = t3_mc7_size(&adapter->pmtx);
3853 p->cm_size = t3_mc7_size(&adapter->cm);
3854 p->chan_rx_size = p->pmrx_size / 2; /* only 1 Rx channel */
3855 p->chan_tx_size = p->pmtx_size / p->nchan;
3856 p->rx_pg_size = 64 * 1024;
3857 p->tx_pg_size = is_10G(adapter) ? 64 * 1024 : 16 * 1024;
3858 p->rx_num_pgs = pm_num_pages(p->chan_rx_size, p->rx_pg_size);
3859 p->tx_num_pgs = pm_num_pages(p->chan_tx_size, p->tx_pg_size);
3860 p->ntimer_qs = p->cm_size >= (128 << 20) ||
3861 adapter->params.rev > 0 ? 12 : 6;
8ac3ba68
DLR
3862 }
3863
3864 adapter->params.offload = t3_mc7_size(&adapter->pmrx) &&
3865 t3_mc7_size(&adapter->pmtx) &&
3866 t3_mc7_size(&adapter->cm);
4d22de3e 3867
8ac3ba68 3868 if (is_offload(adapter)) {
4d22de3e
DLR
3869 adapter->params.mc5.nservers = DEFAULT_NSERVERS;
3870 adapter->params.mc5.nfilters = adapter->params.rev > 0 ?
3871 DEFAULT_NFILTERS : 0;
3872 adapter->params.mc5.nroutes = 0;
3873 t3_mc5_prep(adapter, &adapter->mc5, MC5_MODE_144_BIT);
3874
3875 init_mtus(adapter->params.mtus);
3876 init_cong_ctrl(adapter->params.a_wnd, adapter->params.b_wnd);
3877 }
3878
3879 early_hw_init(adapter, ai);
b881955b
DLR
3880 ret = init_parity(adapter);
3881 if (ret)
3882 return ret;
4d22de3e
DLR
3883
3884 for_each_port(adapter, i) {
3885 u8 hw_addr[6];
04497982 3886 const struct port_type_info *pti;
4d22de3e
DLR
3887 struct port_info *p = adap2pinfo(adapter, i);
3888
04497982
DLR
3889 while (!adapter->params.vpd.port_type[++j])
3890 ;
4d22de3e 3891
04497982 3892 pti = &port_types[adapter->params.vpd.port_type[j]];
9f64306b
DLR
3893 if (!pti->phy_prep) {
3894 CH_ALERT(adapter, "Invalid port type index %d\n",
3895 adapter->params.vpd.port_type[j]);
3896 return -EINVAL;
3897 }
3898
86c890ab 3899 p->phy.mdio.dev = adapter->port[i];
04497982
DLR
3900 ret = pti->phy_prep(&p->phy, adapter, ai->phy_base_addr + j,
3901 ai->mdio_ops);
78e4689e
DLR
3902 if (ret)
3903 return ret;
4d22de3e 3904 mac_prep(&p->mac, adapter, j);
4d22de3e
DLR
3905
3906 /*
3907 * The VPD EEPROM stores the base Ethernet address for the
3908 * card. A port's address is derived from the base by adding
3909 * the port's index to the base's low octet.
3910 */
3911 memcpy(hw_addr, adapter->params.vpd.eth_base, 5);
3912 hw_addr[5] = adapter->params.vpd.eth_base[5] + i;
3913
3914 memcpy(adapter->port[i]->dev_addr, hw_addr,
3915 ETH_ALEN);
3916 memcpy(adapter->port[i]->perm_addr, hw_addr,
3917 ETH_ALEN);
04497982 3918 init_link_config(&p->link_config, p->phy.caps);
4d22de3e 3919 p->phy.ops->power_down(&p->phy, 1);
fc882196
DLR
3920
3921 /*
3922 * If the PHY doesn't support interrupts for link status
3923 * changes, schedule a scan of the adapter links at least
3924 * once a second.
3925 */
3926 if (!(p->phy.caps & SUPPORTED_IRQ) &&
3927 adapter->params.linkpoll_period > 10)
4d22de3e
DLR
3928 adapter->params.linkpoll_period = 10;
3929 }
3930
3931 return 0;
3932}
3933
3934void t3_led_ready(struct adapter *adapter)
3935{
3936 t3_set_reg_field(adapter, A_T3DBG_GPIO_EN, F_GPIO0_OUT_VAL,
3937 F_GPIO0_OUT_VAL);
3938}
204e2f98
DLR
3939
3940int t3_replay_prep_adapter(struct adapter *adapter)
3941{
3942 const struct adapter_info *ai = adapter->params.info;
04497982 3943 unsigned int i, j = -1;
204e2f98
DLR
3944 int ret;
3945
3946 early_hw_init(adapter, ai);
3947 ret = init_parity(adapter);
3948 if (ret)
3949 return ret;
3950
3951 for_each_port(adapter, i) {
04497982 3952 const struct port_type_info *pti;
204e2f98 3953 struct port_info *p = adap2pinfo(adapter, i);
204e2f98 3954
04497982
DLR
3955 while (!adapter->params.vpd.port_type[++j])
3956 ;
3957
3958 pti = &port_types[adapter->params.vpd.port_type[j]];
0f07c4ee 3959 ret = pti->phy_prep(&p->phy, adapter, p->phy.mdio.prtad, NULL);
78e4689e
DLR
3960 if (ret)
3961 return ret;
204e2f98 3962 p->phy.ops->power_down(&p->phy, 1);
204e2f98
DLR
3963 }
3964
3965return 0;
3966}
3967