]> bbs.cooldavid.org Git - net-next-2.6.git/blame - drivers/net/cxgb4/t4_hw.c
cxgb4: function namespace cleanup (v3)
[net-next-2.6.git] / drivers / net / cxgb4 / t4_hw.c
CommitLineData
56d36be4
DM
1/*
2 * This file is part of the Chelsio T4 Ethernet driver for Linux.
3 *
4 * Copyright (c) 2003-2010 Chelsio Communications, Inc. All rights reserved.
5 *
6 * This software is available to you under a choice of one of two
7 * licenses. You may choose to be licensed under the terms of the GNU
8 * General Public License (GPL) Version 2, available from the file
9 * COPYING in the main directory of this source tree, or the
10 * OpenIB.org BSD license below:
11 *
12 * Redistribution and use in source and binary forms, with or
13 * without modification, are permitted provided that the following
14 * conditions are met:
15 *
16 * - Redistributions of source code must retain the above
17 * copyright notice, this list of conditions and the following
18 * disclaimer.
19 *
20 * - Redistributions in binary form must reproduce the above
21 * copyright notice, this list of conditions and the following
22 * disclaimer in the documentation and/or other materials
23 * provided with the distribution.
24 *
25 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
26 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
27 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
28 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
29 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
30 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
31 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
32 * SOFTWARE.
33 */
34
35#include <linux/init.h>
36#include <linux/delay.h>
37#include "cxgb4.h"
38#include "t4_regs.h"
39#include "t4fw_api.h"
40
41/**
42 * t4_wait_op_done_val - wait until an operation is completed
43 * @adapter: the adapter performing the operation
44 * @reg: the register to check for completion
45 * @mask: a single-bit field within @reg that indicates completion
46 * @polarity: the value of the field when the operation is completed
47 * @attempts: number of check iterations
48 * @delay: delay in usecs between iterations
49 * @valp: where to store the value of the register at completion time
50 *
51 * Wait until an operation is completed by checking a bit in a register
52 * up to @attempts times. If @valp is not NULL the value of the register
53 * at the time it indicated completion is stored there. Returns 0 if the
54 * operation completes and -EAGAIN otherwise.
55 */
de498c89
RD
56static int t4_wait_op_done_val(struct adapter *adapter, int reg, u32 mask,
57 int polarity, int attempts, int delay, u32 *valp)
56d36be4
DM
58{
59 while (1) {
60 u32 val = t4_read_reg(adapter, reg);
61
62 if (!!(val & mask) == polarity) {
63 if (valp)
64 *valp = val;
65 return 0;
66 }
67 if (--attempts == 0)
68 return -EAGAIN;
69 if (delay)
70 udelay(delay);
71 }
72}
73
74static inline int t4_wait_op_done(struct adapter *adapter, int reg, u32 mask,
75 int polarity, int attempts, int delay)
76{
77 return t4_wait_op_done_val(adapter, reg, mask, polarity, attempts,
78 delay, NULL);
79}
80
81/**
82 * t4_set_reg_field - set a register field to a value
83 * @adapter: the adapter to program
84 * @addr: the register address
85 * @mask: specifies the portion of the register to modify
86 * @val: the new value for the register field
87 *
88 * Sets a register field specified by the supplied mask to the
89 * given value.
90 */
91void t4_set_reg_field(struct adapter *adapter, unsigned int addr, u32 mask,
92 u32 val)
93{
94 u32 v = t4_read_reg(adapter, addr) & ~mask;
95
96 t4_write_reg(adapter, addr, v | val);
97 (void) t4_read_reg(adapter, addr); /* flush */
98}
99
100/**
101 * t4_read_indirect - read indirectly addressed registers
102 * @adap: the adapter
103 * @addr_reg: register holding the indirect address
104 * @data_reg: register holding the value of the indirect register
105 * @vals: where the read register values are stored
106 * @nregs: how many indirect registers to read
107 * @start_idx: index of first indirect register to read
108 *
109 * Reads registers that are accessed indirectly through an address/data
110 * register pair.
111 */
de498c89
RD
112static void t4_read_indirect(struct adapter *adap, unsigned int addr_reg,
113 unsigned int data_reg, u32 *vals,
114 unsigned int nregs, unsigned int start_idx)
56d36be4
DM
115{
116 while (nregs--) {
117 t4_write_reg(adap, addr_reg, start_idx);
118 *vals++ = t4_read_reg(adap, data_reg);
119 start_idx++;
120 }
121}
122
56d36be4
DM
123/*
124 * Get the reply to a mailbox command and store it in @rpl in big-endian order.
125 */
126static void get_mbox_rpl(struct adapter *adap, __be64 *rpl, int nflit,
127 u32 mbox_addr)
128{
129 for ( ; nflit; nflit--, mbox_addr += 8)
130 *rpl++ = cpu_to_be64(t4_read_reg64(adap, mbox_addr));
131}
132
133/*
134 * Handle a FW assertion reported in a mailbox.
135 */
136static void fw_asrt(struct adapter *adap, u32 mbox_addr)
137{
138 struct fw_debug_cmd asrt;
139
140 get_mbox_rpl(adap, (__be64 *)&asrt, sizeof(asrt) / 8, mbox_addr);
141 dev_alert(adap->pdev_dev,
142 "FW assertion at %.16s:%u, val0 %#x, val1 %#x\n",
143 asrt.u.assert.filename_0_7, ntohl(asrt.u.assert.line),
144 ntohl(asrt.u.assert.x), ntohl(asrt.u.assert.y));
145}
146
147static void dump_mbox(struct adapter *adap, int mbox, u32 data_reg)
148{
149 dev_err(adap->pdev_dev,
150 "mbox %d: %llx %llx %llx %llx %llx %llx %llx %llx\n", mbox,
151 (unsigned long long)t4_read_reg64(adap, data_reg),
152 (unsigned long long)t4_read_reg64(adap, data_reg + 8),
153 (unsigned long long)t4_read_reg64(adap, data_reg + 16),
154 (unsigned long long)t4_read_reg64(adap, data_reg + 24),
155 (unsigned long long)t4_read_reg64(adap, data_reg + 32),
156 (unsigned long long)t4_read_reg64(adap, data_reg + 40),
157 (unsigned long long)t4_read_reg64(adap, data_reg + 48),
158 (unsigned long long)t4_read_reg64(adap, data_reg + 56));
159}
160
161/**
162 * t4_wr_mbox_meat - send a command to FW through the given mailbox
163 * @adap: the adapter
164 * @mbox: index of the mailbox to use
165 * @cmd: the command to write
166 * @size: command length in bytes
167 * @rpl: where to optionally store the reply
168 * @sleep_ok: if true we may sleep while awaiting command completion
169 *
170 * Sends the given command to FW through the selected mailbox and waits
171 * for the FW to execute the command. If @rpl is not %NULL it is used to
172 * store the FW's reply to the command. The command and its optional
173 * reply are of the same length. FW can take up to %FW_CMD_MAX_TIMEOUT ms
174 * to respond. @sleep_ok determines whether we may sleep while awaiting
175 * the response. If sleeping is allowed we use progressive backoff
176 * otherwise we spin.
177 *
178 * The return value is 0 on success or a negative errno on failure. A
179 * failure can happen either because we are not able to execute the
180 * command or FW executes it but signals an error. In the latter case
181 * the return value is the error code indicated by FW (negated).
182 */
183int t4_wr_mbox_meat(struct adapter *adap, int mbox, const void *cmd, int size,
184 void *rpl, bool sleep_ok)
185{
186 static int delay[] = {
187 1, 1, 3, 5, 10, 10, 20, 50, 100, 200
188 };
189
190 u32 v;
191 u64 res;
192 int i, ms, delay_idx;
193 const __be64 *p = cmd;
194 u32 data_reg = PF_REG(mbox, CIM_PF_MAILBOX_DATA);
195 u32 ctl_reg = PF_REG(mbox, CIM_PF_MAILBOX_CTRL);
196
197 if ((size & 15) || size > MBOX_LEN)
198 return -EINVAL;
199
204dc3c0
DM
200 /*
201 * If the device is off-line, as in EEH, commands will time out.
202 * Fail them early so we don't waste time waiting.
203 */
204 if (adap->pdev->error_state != pci_channel_io_normal)
205 return -EIO;
206
56d36be4
DM
207 v = MBOWNER_GET(t4_read_reg(adap, ctl_reg));
208 for (i = 0; v == MBOX_OWNER_NONE && i < 3; i++)
209 v = MBOWNER_GET(t4_read_reg(adap, ctl_reg));
210
211 if (v != MBOX_OWNER_DRV)
212 return v ? -EBUSY : -ETIMEDOUT;
213
214 for (i = 0; i < size; i += 8)
215 t4_write_reg64(adap, data_reg + i, be64_to_cpu(*p++));
216
217 t4_write_reg(adap, ctl_reg, MBMSGVALID | MBOWNER(MBOX_OWNER_FW));
218 t4_read_reg(adap, ctl_reg); /* flush write */
219
220 delay_idx = 0;
221 ms = delay[0];
222
223 for (i = 0; i < FW_CMD_MAX_TIMEOUT; i += ms) {
224 if (sleep_ok) {
225 ms = delay[delay_idx]; /* last element may repeat */
226 if (delay_idx < ARRAY_SIZE(delay) - 1)
227 delay_idx++;
228 msleep(ms);
229 } else
230 mdelay(ms);
231
232 v = t4_read_reg(adap, ctl_reg);
233 if (MBOWNER_GET(v) == MBOX_OWNER_DRV) {
234 if (!(v & MBMSGVALID)) {
235 t4_write_reg(adap, ctl_reg, 0);
236 continue;
237 }
238
239 res = t4_read_reg64(adap, data_reg);
240 if (FW_CMD_OP_GET(res >> 32) == FW_DEBUG_CMD) {
241 fw_asrt(adap, data_reg);
242 res = FW_CMD_RETVAL(EIO);
243 } else if (rpl)
244 get_mbox_rpl(adap, rpl, size / 8, data_reg);
245
246 if (FW_CMD_RETVAL_GET((int)res))
247 dump_mbox(adap, mbox, data_reg);
248 t4_write_reg(adap, ctl_reg, 0);
249 return -FW_CMD_RETVAL_GET((int)res);
250 }
251 }
252
253 dump_mbox(adap, mbox, data_reg);
254 dev_err(adap->pdev_dev, "command %#x in mailbox %d timed out\n",
255 *(const u8 *)cmd, mbox);
256 return -ETIMEDOUT;
257}
258
259/**
260 * t4_mc_read - read from MC through backdoor accesses
261 * @adap: the adapter
262 * @addr: address of first byte requested
263 * @data: 64 bytes of data containing the requested address
264 * @ecc: where to store the corresponding 64-bit ECC word
265 *
266 * Read 64 bytes of data from MC starting at a 64-byte-aligned address
267 * that covers the requested address @addr. If @parity is not %NULL it
268 * is assigned the 64-bit ECC word for the read data.
269 */
270int t4_mc_read(struct adapter *adap, u32 addr, __be32 *data, u64 *ecc)
271{
272 int i;
273
274 if (t4_read_reg(adap, MC_BIST_CMD) & START_BIST)
275 return -EBUSY;
276 t4_write_reg(adap, MC_BIST_CMD_ADDR, addr & ~0x3fU);
277 t4_write_reg(adap, MC_BIST_CMD_LEN, 64);
278 t4_write_reg(adap, MC_BIST_DATA_PATTERN, 0xc);
279 t4_write_reg(adap, MC_BIST_CMD, BIST_OPCODE(1) | START_BIST |
280 BIST_CMD_GAP(1));
281 i = t4_wait_op_done(adap, MC_BIST_CMD, START_BIST, 0, 10, 1);
282 if (i)
283 return i;
284
285#define MC_DATA(i) MC_BIST_STATUS_REG(MC_BIST_STATUS_RDATA, i)
286
287 for (i = 15; i >= 0; i--)
288 *data++ = htonl(t4_read_reg(adap, MC_DATA(i)));
289 if (ecc)
290 *ecc = t4_read_reg64(adap, MC_DATA(16));
291#undef MC_DATA
292 return 0;
293}
294
295/**
296 * t4_edc_read - read from EDC through backdoor accesses
297 * @adap: the adapter
298 * @idx: which EDC to access
299 * @addr: address of first byte requested
300 * @data: 64 bytes of data containing the requested address
301 * @ecc: where to store the corresponding 64-bit ECC word
302 *
303 * Read 64 bytes of data from EDC starting at a 64-byte-aligned address
304 * that covers the requested address @addr. If @parity is not %NULL it
305 * is assigned the 64-bit ECC word for the read data.
306 */
307int t4_edc_read(struct adapter *adap, int idx, u32 addr, __be32 *data, u64 *ecc)
308{
309 int i;
310
311 idx *= EDC_STRIDE;
312 if (t4_read_reg(adap, EDC_BIST_CMD + idx) & START_BIST)
313 return -EBUSY;
314 t4_write_reg(adap, EDC_BIST_CMD_ADDR + idx, addr & ~0x3fU);
315 t4_write_reg(adap, EDC_BIST_CMD_LEN + idx, 64);
316 t4_write_reg(adap, EDC_BIST_DATA_PATTERN + idx, 0xc);
317 t4_write_reg(adap, EDC_BIST_CMD + idx,
318 BIST_OPCODE(1) | BIST_CMD_GAP(1) | START_BIST);
319 i = t4_wait_op_done(adap, EDC_BIST_CMD + idx, START_BIST, 0, 10, 1);
320 if (i)
321 return i;
322
323#define EDC_DATA(i) (EDC_BIST_STATUS_REG(EDC_BIST_STATUS_RDATA, i) + idx)
324
325 for (i = 15; i >= 0; i--)
326 *data++ = htonl(t4_read_reg(adap, EDC_DATA(i)));
327 if (ecc)
328 *ecc = t4_read_reg64(adap, EDC_DATA(16));
329#undef EDC_DATA
330 return 0;
331}
332
56d36be4
DM
333/*
334 * Partial EEPROM Vital Product Data structure. Includes only the ID and
226ec5fd 335 * VPD-R header.
56d36be4 336 */
226ec5fd 337struct t4_vpd_hdr {
56d36be4
DM
338 u8 id_tag;
339 u8 id_len[2];
340 u8 id_data[ID_LEN];
341 u8 vpdr_tag;
342 u8 vpdr_len[2];
56d36be4
DM
343};
344
345#define EEPROM_STAT_ADDR 0x7bfc
346#define VPD_BASE 0
226ec5fd 347#define VPD_LEN 512
56d36be4
DM
348
349/**
350 * t4_seeprom_wp - enable/disable EEPROM write protection
351 * @adapter: the adapter
352 * @enable: whether to enable or disable write protection
353 *
354 * Enables or disables write protection on the serial EEPROM.
355 */
356int t4_seeprom_wp(struct adapter *adapter, bool enable)
357{
358 unsigned int v = enable ? 0xc : 0;
359 int ret = pci_write_vpd(adapter->pdev, EEPROM_STAT_ADDR, 4, &v);
360 return ret < 0 ? ret : 0;
361}
362
363/**
364 * get_vpd_params - read VPD parameters from VPD EEPROM
365 * @adapter: adapter to read
366 * @p: where to store the parameters
367 *
368 * Reads card parameters stored in VPD EEPROM.
369 */
370static int get_vpd_params(struct adapter *adapter, struct vpd_params *p)
371{
226ec5fd
DM
372 int i, ret;
373 int ec, sn, v2;
374 u8 vpd[VPD_LEN], csum;
375 unsigned int vpdr_len;
376 const struct t4_vpd_hdr *v;
56d36be4 377
226ec5fd 378 ret = pci_read_vpd(adapter->pdev, VPD_BASE, sizeof(vpd), vpd);
56d36be4
DM
379 if (ret < 0)
380 return ret;
381
226ec5fd
DM
382 v = (const struct t4_vpd_hdr *)vpd;
383 vpdr_len = pci_vpd_lrdt_size(&v->vpdr_tag);
384 if (vpdr_len + sizeof(struct t4_vpd_hdr) > VPD_LEN) {
385 dev_err(adapter->pdev_dev, "bad VPD-R length %u\n", vpdr_len);
386 return -EINVAL;
387 }
388
389#define FIND_VPD_KW(var, name) do { \
390 var = pci_vpd_find_info_keyword(&v->id_tag, sizeof(struct t4_vpd_hdr), \
391 vpdr_len, name); \
392 if (var < 0) { \
393 dev_err(adapter->pdev_dev, "missing VPD keyword " name "\n"); \
394 return -EINVAL; \
395 } \
396 var += PCI_VPD_INFO_FLD_HDR_SIZE; \
397} while (0)
398
399 FIND_VPD_KW(i, "RV");
400 for (csum = 0; i >= 0; i--)
401 csum += vpd[i];
56d36be4
DM
402
403 if (csum) {
404 dev_err(adapter->pdev_dev,
405 "corrupted VPD EEPROM, actual csum %u\n", csum);
406 return -EINVAL;
407 }
408
226ec5fd
DM
409 FIND_VPD_KW(ec, "EC");
410 FIND_VPD_KW(sn, "SN");
411 FIND_VPD_KW(v2, "V2");
412#undef FIND_VPD_KW
413
414 p->cclk = simple_strtoul(vpd + v2, NULL, 10);
415 memcpy(p->id, v->id_data, ID_LEN);
56d36be4 416 strim(p->id);
226ec5fd 417 memcpy(p->ec, vpd + ec, EC_LEN);
56d36be4 418 strim(p->ec);
226ec5fd
DM
419 i = pci_vpd_info_field_size(vpd + sn - PCI_VPD_INFO_FLD_HDR_SIZE);
420 memcpy(p->sn, vpd + sn, min(i, SERNUM_LEN));
56d36be4
DM
421 strim(p->sn);
422 return 0;
423}
424
425/* serial flash and firmware constants */
426enum {
427 SF_ATTEMPTS = 10, /* max retries for SF operations */
428
429 /* flash command opcodes */
430 SF_PROG_PAGE = 2, /* program page */
431 SF_WR_DISABLE = 4, /* disable writes */
432 SF_RD_STATUS = 5, /* read status register */
433 SF_WR_ENABLE = 6, /* enable writes */
434 SF_RD_DATA_FAST = 0xb, /* read flash */
900a6596 435 SF_RD_ID = 0x9f, /* read ID */
56d36be4
DM
436 SF_ERASE_SECTOR = 0xd8, /* erase sector */
437
900a6596 438 FW_MAX_SIZE = 512 * 1024,
56d36be4
DM
439};
440
441/**
442 * sf1_read - read data from the serial flash
443 * @adapter: the adapter
444 * @byte_cnt: number of bytes to read
445 * @cont: whether another operation will be chained
446 * @lock: whether to lock SF for PL access only
447 * @valp: where to store the read data
448 *
449 * Reads up to 4 bytes of data from the serial flash. The location of
450 * the read needs to be specified prior to calling this by issuing the
451 * appropriate commands to the serial flash.
452 */
453static int sf1_read(struct adapter *adapter, unsigned int byte_cnt, int cont,
454 int lock, u32 *valp)
455{
456 int ret;
457
458 if (!byte_cnt || byte_cnt > 4)
459 return -EINVAL;
460 if (t4_read_reg(adapter, SF_OP) & BUSY)
461 return -EBUSY;
462 cont = cont ? SF_CONT : 0;
463 lock = lock ? SF_LOCK : 0;
464 t4_write_reg(adapter, SF_OP, lock | cont | BYTECNT(byte_cnt - 1));
465 ret = t4_wait_op_done(adapter, SF_OP, BUSY, 0, SF_ATTEMPTS, 5);
466 if (!ret)
467 *valp = t4_read_reg(adapter, SF_DATA);
468 return ret;
469}
470
471/**
472 * sf1_write - write data to the serial flash
473 * @adapter: the adapter
474 * @byte_cnt: number of bytes to write
475 * @cont: whether another operation will be chained
476 * @lock: whether to lock SF for PL access only
477 * @val: value to write
478 *
479 * Writes up to 4 bytes of data to the serial flash. The location of
480 * the write needs to be specified prior to calling this by issuing the
481 * appropriate commands to the serial flash.
482 */
483static int sf1_write(struct adapter *adapter, unsigned int byte_cnt, int cont,
484 int lock, u32 val)
485{
486 if (!byte_cnt || byte_cnt > 4)
487 return -EINVAL;
488 if (t4_read_reg(adapter, SF_OP) & BUSY)
489 return -EBUSY;
490 cont = cont ? SF_CONT : 0;
491 lock = lock ? SF_LOCK : 0;
492 t4_write_reg(adapter, SF_DATA, val);
493 t4_write_reg(adapter, SF_OP, lock |
494 cont | BYTECNT(byte_cnt - 1) | OP_WR);
495 return t4_wait_op_done(adapter, SF_OP, BUSY, 0, SF_ATTEMPTS, 5);
496}
497
498/**
499 * flash_wait_op - wait for a flash operation to complete
500 * @adapter: the adapter
501 * @attempts: max number of polls of the status register
502 * @delay: delay between polls in ms
503 *
504 * Wait for a flash operation to complete by polling the status register.
505 */
506static int flash_wait_op(struct adapter *adapter, int attempts, int delay)
507{
508 int ret;
509 u32 status;
510
511 while (1) {
512 if ((ret = sf1_write(adapter, 1, 1, 1, SF_RD_STATUS)) != 0 ||
513 (ret = sf1_read(adapter, 1, 0, 1, &status)) != 0)
514 return ret;
515 if (!(status & 1))
516 return 0;
517 if (--attempts == 0)
518 return -EAGAIN;
519 if (delay)
520 msleep(delay);
521 }
522}
523
524/**
525 * t4_read_flash - read words from serial flash
526 * @adapter: the adapter
527 * @addr: the start address for the read
528 * @nwords: how many 32-bit words to read
529 * @data: where to store the read data
530 * @byte_oriented: whether to store data as bytes or as words
531 *
532 * Read the specified number of 32-bit words from the serial flash.
533 * If @byte_oriented is set the read data is stored as a byte array
534 * (i.e., big-endian), otherwise as 32-bit words in the platform's
535 * natural endianess.
536 */
de498c89
RD
537static int t4_read_flash(struct adapter *adapter, unsigned int addr,
538 unsigned int nwords, u32 *data, int byte_oriented)
56d36be4
DM
539{
540 int ret;
541
900a6596 542 if (addr + nwords * sizeof(u32) > adapter->params.sf_size || (addr & 3))
56d36be4
DM
543 return -EINVAL;
544
545 addr = swab32(addr) | SF_RD_DATA_FAST;
546
547 if ((ret = sf1_write(adapter, 4, 1, 0, addr)) != 0 ||
548 (ret = sf1_read(adapter, 1, 1, 0, data)) != 0)
549 return ret;
550
551 for ( ; nwords; nwords--, data++) {
552 ret = sf1_read(adapter, 4, nwords > 1, nwords == 1, data);
553 if (nwords == 1)
554 t4_write_reg(adapter, SF_OP, 0); /* unlock SF */
555 if (ret)
556 return ret;
557 if (byte_oriented)
558 *data = htonl(*data);
559 }
560 return 0;
561}
562
563/**
564 * t4_write_flash - write up to a page of data to the serial flash
565 * @adapter: the adapter
566 * @addr: the start address to write
567 * @n: length of data to write in bytes
568 * @data: the data to write
569 *
570 * Writes up to a page of data (256 bytes) to the serial flash starting
571 * at the given address. All the data must be written to the same page.
572 */
573static int t4_write_flash(struct adapter *adapter, unsigned int addr,
574 unsigned int n, const u8 *data)
575{
576 int ret;
577 u32 buf[64];
578 unsigned int i, c, left, val, offset = addr & 0xff;
579
900a6596 580 if (addr >= adapter->params.sf_size || offset + n > SF_PAGE_SIZE)
56d36be4
DM
581 return -EINVAL;
582
583 val = swab32(addr) | SF_PROG_PAGE;
584
585 if ((ret = sf1_write(adapter, 1, 0, 1, SF_WR_ENABLE)) != 0 ||
586 (ret = sf1_write(adapter, 4, 1, 1, val)) != 0)
587 goto unlock;
588
589 for (left = n; left; left -= c) {
590 c = min(left, 4U);
591 for (val = 0, i = 0; i < c; ++i)
592 val = (val << 8) + *data++;
593
594 ret = sf1_write(adapter, c, c != left, 1, val);
595 if (ret)
596 goto unlock;
597 }
900a6596 598 ret = flash_wait_op(adapter, 8, 1);
56d36be4
DM
599 if (ret)
600 goto unlock;
601
602 t4_write_reg(adapter, SF_OP, 0); /* unlock SF */
603
604 /* Read the page to verify the write succeeded */
605 ret = t4_read_flash(adapter, addr & ~0xff, ARRAY_SIZE(buf), buf, 1);
606 if (ret)
607 return ret;
608
609 if (memcmp(data - n, (u8 *)buf + offset, n)) {
610 dev_err(adapter->pdev_dev,
611 "failed to correctly write the flash page at %#x\n",
612 addr);
613 return -EIO;
614 }
615 return 0;
616
617unlock:
618 t4_write_reg(adapter, SF_OP, 0); /* unlock SF */
619 return ret;
620}
621
622/**
623 * get_fw_version - read the firmware version
624 * @adapter: the adapter
625 * @vers: where to place the version
626 *
627 * Reads the FW version from flash.
628 */
629static int get_fw_version(struct adapter *adapter, u32 *vers)
630{
900a6596
DM
631 return t4_read_flash(adapter, adapter->params.sf_fw_start +
632 offsetof(struct fw_hdr, fw_ver), 1, vers, 0);
56d36be4
DM
633}
634
635/**
636 * get_tp_version - read the TP microcode version
637 * @adapter: the adapter
638 * @vers: where to place the version
639 *
640 * Reads the TP microcode version from flash.
641 */
642static int get_tp_version(struct adapter *adapter, u32 *vers)
643{
900a6596
DM
644 return t4_read_flash(adapter, adapter->params.sf_fw_start +
645 offsetof(struct fw_hdr, tp_microcode_ver),
56d36be4
DM
646 1, vers, 0);
647}
648
649/**
650 * t4_check_fw_version - check if the FW is compatible with this driver
651 * @adapter: the adapter
652 *
653 * Checks if an adapter's FW is compatible with the driver. Returns 0
654 * if there's exact match, a negative error if the version could not be
655 * read or there's a major version mismatch, and a positive value if the
656 * expected major version is found but there's a minor version mismatch.
657 */
658int t4_check_fw_version(struct adapter *adapter)
659{
660 u32 api_vers[2];
661 int ret, major, minor, micro;
662
663 ret = get_fw_version(adapter, &adapter->params.fw_vers);
664 if (!ret)
665 ret = get_tp_version(adapter, &adapter->params.tp_vers);
666 if (!ret)
900a6596
DM
667 ret = t4_read_flash(adapter, adapter->params.sf_fw_start +
668 offsetof(struct fw_hdr, intfver_nic),
669 2, api_vers, 1);
56d36be4
DM
670 if (ret)
671 return ret;
672
673 major = FW_HDR_FW_VER_MAJOR_GET(adapter->params.fw_vers);
674 minor = FW_HDR_FW_VER_MINOR_GET(adapter->params.fw_vers);
675 micro = FW_HDR_FW_VER_MICRO_GET(adapter->params.fw_vers);
676 memcpy(adapter->params.api_vers, api_vers,
677 sizeof(adapter->params.api_vers));
678
679 if (major != FW_VERSION_MAJOR) { /* major mismatch - fail */
680 dev_err(adapter->pdev_dev,
681 "card FW has major version %u, driver wants %u\n",
682 major, FW_VERSION_MAJOR);
683 return -EINVAL;
684 }
685
686 if (minor == FW_VERSION_MINOR && micro == FW_VERSION_MICRO)
687 return 0; /* perfect match */
688
689 /* Minor/micro version mismatch. Report it but often it's OK. */
690 return 1;
691}
692
693/**
694 * t4_flash_erase_sectors - erase a range of flash sectors
695 * @adapter: the adapter
696 * @start: the first sector to erase
697 * @end: the last sector to erase
698 *
699 * Erases the sectors in the given inclusive range.
700 */
701static int t4_flash_erase_sectors(struct adapter *adapter, int start, int end)
702{
703 int ret = 0;
704
705 while (start <= end) {
706 if ((ret = sf1_write(adapter, 1, 0, 1, SF_WR_ENABLE)) != 0 ||
707 (ret = sf1_write(adapter, 4, 0, 1,
708 SF_ERASE_SECTOR | (start << 8))) != 0 ||
900a6596 709 (ret = flash_wait_op(adapter, 14, 500)) != 0) {
56d36be4
DM
710 dev_err(adapter->pdev_dev,
711 "erase of flash sector %d failed, error %d\n",
712 start, ret);
713 break;
714 }
715 start++;
716 }
717 t4_write_reg(adapter, SF_OP, 0); /* unlock SF */
718 return ret;
719}
720
721/**
722 * t4_load_fw - download firmware
723 * @adap: the adapter
724 * @fw_data: the firmware image to write
725 * @size: image size
726 *
727 * Write the supplied firmware image to the card's serial flash.
728 */
729int t4_load_fw(struct adapter *adap, const u8 *fw_data, unsigned int size)
730{
731 u32 csum;
732 int ret, addr;
733 unsigned int i;
734 u8 first_page[SF_PAGE_SIZE];
735 const u32 *p = (const u32 *)fw_data;
736 const struct fw_hdr *hdr = (const struct fw_hdr *)fw_data;
900a6596
DM
737 unsigned int sf_sec_size = adap->params.sf_size / adap->params.sf_nsec;
738 unsigned int fw_img_start = adap->params.sf_fw_start;
739 unsigned int fw_start_sec = fw_img_start / sf_sec_size;
56d36be4
DM
740
741 if (!size) {
742 dev_err(adap->pdev_dev, "FW image has no data\n");
743 return -EINVAL;
744 }
745 if (size & 511) {
746 dev_err(adap->pdev_dev,
747 "FW image size not multiple of 512 bytes\n");
748 return -EINVAL;
749 }
750 if (ntohs(hdr->len512) * 512 != size) {
751 dev_err(adap->pdev_dev,
752 "FW image size differs from size in FW header\n");
753 return -EINVAL;
754 }
755 if (size > FW_MAX_SIZE) {
756 dev_err(adap->pdev_dev, "FW image too large, max is %u bytes\n",
757 FW_MAX_SIZE);
758 return -EFBIG;
759 }
760
761 for (csum = 0, i = 0; i < size / sizeof(csum); i++)
762 csum += ntohl(p[i]);
763
764 if (csum != 0xffffffff) {
765 dev_err(adap->pdev_dev,
766 "corrupted firmware image, checksum %#x\n", csum);
767 return -EINVAL;
768 }
769
900a6596
DM
770 i = DIV_ROUND_UP(size, sf_sec_size); /* # of sectors spanned */
771 ret = t4_flash_erase_sectors(adap, fw_start_sec, fw_start_sec + i - 1);
56d36be4
DM
772 if (ret)
773 goto out;
774
775 /*
776 * We write the correct version at the end so the driver can see a bad
777 * version if the FW write fails. Start by writing a copy of the
778 * first page with a bad version.
779 */
780 memcpy(first_page, fw_data, SF_PAGE_SIZE);
781 ((struct fw_hdr *)first_page)->fw_ver = htonl(0xffffffff);
900a6596 782 ret = t4_write_flash(adap, fw_img_start, SF_PAGE_SIZE, first_page);
56d36be4
DM
783 if (ret)
784 goto out;
785
900a6596 786 addr = fw_img_start;
56d36be4
DM
787 for (size -= SF_PAGE_SIZE; size; size -= SF_PAGE_SIZE) {
788 addr += SF_PAGE_SIZE;
789 fw_data += SF_PAGE_SIZE;
790 ret = t4_write_flash(adap, addr, SF_PAGE_SIZE, fw_data);
791 if (ret)
792 goto out;
793 }
794
795 ret = t4_write_flash(adap,
900a6596 796 fw_img_start + offsetof(struct fw_hdr, fw_ver),
56d36be4
DM
797 sizeof(hdr->fw_ver), (const u8 *)&hdr->fw_ver);
798out:
799 if (ret)
800 dev_err(adap->pdev_dev, "firmware download failed, error %d\n",
801 ret);
802 return ret;
803}
804
805#define ADVERT_MASK (FW_PORT_CAP_SPEED_100M | FW_PORT_CAP_SPEED_1G |\
806 FW_PORT_CAP_SPEED_10G | FW_PORT_CAP_ANEG)
807
808/**
809 * t4_link_start - apply link configuration to MAC/PHY
810 * @phy: the PHY to setup
811 * @mac: the MAC to setup
812 * @lc: the requested link configuration
813 *
814 * Set up a port's MAC and PHY according to a desired link configuration.
815 * - If the PHY can auto-negotiate first decide what to advertise, then
816 * enable/disable auto-negotiation as desired, and reset.
817 * - If the PHY does not auto-negotiate just reset it.
818 * - If auto-negotiation is off set the MAC to the proper speed/duplex/FC,
819 * otherwise do it later based on the outcome of auto-negotiation.
820 */
821int t4_link_start(struct adapter *adap, unsigned int mbox, unsigned int port,
822 struct link_config *lc)
823{
824 struct fw_port_cmd c;
825 unsigned int fc = 0, mdi = FW_PORT_MDI(FW_PORT_MDI_AUTO);
826
827 lc->link_ok = 0;
828 if (lc->requested_fc & PAUSE_RX)
829 fc |= FW_PORT_CAP_FC_RX;
830 if (lc->requested_fc & PAUSE_TX)
831 fc |= FW_PORT_CAP_FC_TX;
832
833 memset(&c, 0, sizeof(c));
834 c.op_to_portid = htonl(FW_CMD_OP(FW_PORT_CMD) | FW_CMD_REQUEST |
835 FW_CMD_EXEC | FW_PORT_CMD_PORTID(port));
836 c.action_to_len16 = htonl(FW_PORT_CMD_ACTION(FW_PORT_ACTION_L1_CFG) |
837 FW_LEN16(c));
838
839 if (!(lc->supported & FW_PORT_CAP_ANEG)) {
840 c.u.l1cfg.rcap = htonl((lc->supported & ADVERT_MASK) | fc);
841 lc->fc = lc->requested_fc & (PAUSE_RX | PAUSE_TX);
842 } else if (lc->autoneg == AUTONEG_DISABLE) {
843 c.u.l1cfg.rcap = htonl(lc->requested_speed | fc | mdi);
844 lc->fc = lc->requested_fc & (PAUSE_RX | PAUSE_TX);
845 } else
846 c.u.l1cfg.rcap = htonl(lc->advertising | fc | mdi);
847
848 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
849}
850
851/**
852 * t4_restart_aneg - restart autonegotiation
853 * @adap: the adapter
854 * @mbox: mbox to use for the FW command
855 * @port: the port id
856 *
857 * Restarts autonegotiation for the selected port.
858 */
859int t4_restart_aneg(struct adapter *adap, unsigned int mbox, unsigned int port)
860{
861 struct fw_port_cmd c;
862
863 memset(&c, 0, sizeof(c));
864 c.op_to_portid = htonl(FW_CMD_OP(FW_PORT_CMD) | FW_CMD_REQUEST |
865 FW_CMD_EXEC | FW_PORT_CMD_PORTID(port));
866 c.action_to_len16 = htonl(FW_PORT_CMD_ACTION(FW_PORT_ACTION_L1_CFG) |
867 FW_LEN16(c));
868 c.u.l1cfg.rcap = htonl(FW_PORT_CAP_ANEG);
869 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
870}
871
56d36be4
DM
872struct intr_info {
873 unsigned int mask; /* bits to check in interrupt status */
874 const char *msg; /* message to print or NULL */
875 short stat_idx; /* stat counter to increment or -1 */
876 unsigned short fatal; /* whether the condition reported is fatal */
877};
878
879/**
880 * t4_handle_intr_status - table driven interrupt handler
881 * @adapter: the adapter that generated the interrupt
882 * @reg: the interrupt status register to process
883 * @acts: table of interrupt actions
884 *
885 * A table driven interrupt handler that applies a set of masks to an
886 * interrupt status word and performs the corresponding actions if the
887 * interrupts described by the mask have occured. The actions include
888 * optionally emitting a warning or alert message. The table is terminated
889 * by an entry specifying mask 0. Returns the number of fatal interrupt
890 * conditions.
891 */
892static int t4_handle_intr_status(struct adapter *adapter, unsigned int reg,
893 const struct intr_info *acts)
894{
895 int fatal = 0;
896 unsigned int mask = 0;
897 unsigned int status = t4_read_reg(adapter, reg);
898
899 for ( ; acts->mask; ++acts) {
900 if (!(status & acts->mask))
901 continue;
902 if (acts->fatal) {
903 fatal++;
904 dev_alert(adapter->pdev_dev, "%s (0x%x)\n", acts->msg,
905 status & acts->mask);
906 } else if (acts->msg && printk_ratelimit())
907 dev_warn(adapter->pdev_dev, "%s (0x%x)\n", acts->msg,
908 status & acts->mask);
909 mask |= acts->mask;
910 }
911 status &= mask;
912 if (status) /* clear processed interrupts */
913 t4_write_reg(adapter, reg, status);
914 return fatal;
915}
916
917/*
918 * Interrupt handler for the PCIE module.
919 */
920static void pcie_intr_handler(struct adapter *adapter)
921{
922 static struct intr_info sysbus_intr_info[] = {
923 { RNPP, "RXNP array parity error", -1, 1 },
924 { RPCP, "RXPC array parity error", -1, 1 },
925 { RCIP, "RXCIF array parity error", -1, 1 },
926 { RCCP, "Rx completions control array parity error", -1, 1 },
927 { RFTP, "RXFT array parity error", -1, 1 },
928 { 0 }
929 };
930 static struct intr_info pcie_port_intr_info[] = {
931 { TPCP, "TXPC array parity error", -1, 1 },
932 { TNPP, "TXNP array parity error", -1, 1 },
933 { TFTP, "TXFT array parity error", -1, 1 },
934 { TCAP, "TXCA array parity error", -1, 1 },
935 { TCIP, "TXCIF array parity error", -1, 1 },
936 { RCAP, "RXCA array parity error", -1, 1 },
937 { OTDD, "outbound request TLP discarded", -1, 1 },
938 { RDPE, "Rx data parity error", -1, 1 },
939 { TDUE, "Tx uncorrectable data error", -1, 1 },
940 { 0 }
941 };
942 static struct intr_info pcie_intr_info[] = {
943 { MSIADDRLPERR, "MSI AddrL parity error", -1, 1 },
944 { MSIADDRHPERR, "MSI AddrH parity error", -1, 1 },
945 { MSIDATAPERR, "MSI data parity error", -1, 1 },
946 { MSIXADDRLPERR, "MSI-X AddrL parity error", -1, 1 },
947 { MSIXADDRHPERR, "MSI-X AddrH parity error", -1, 1 },
948 { MSIXDATAPERR, "MSI-X data parity error", -1, 1 },
949 { MSIXDIPERR, "MSI-X DI parity error", -1, 1 },
950 { PIOCPLPERR, "PCI PIO completion FIFO parity error", -1, 1 },
951 { PIOREQPERR, "PCI PIO request FIFO parity error", -1, 1 },
952 { TARTAGPERR, "PCI PCI target tag FIFO parity error", -1, 1 },
953 { CCNTPERR, "PCI CMD channel count parity error", -1, 1 },
954 { CREQPERR, "PCI CMD channel request parity error", -1, 1 },
955 { CRSPPERR, "PCI CMD channel response parity error", -1, 1 },
956 { DCNTPERR, "PCI DMA channel count parity error", -1, 1 },
957 { DREQPERR, "PCI DMA channel request parity error", -1, 1 },
958 { DRSPPERR, "PCI DMA channel response parity error", -1, 1 },
959 { HCNTPERR, "PCI HMA channel count parity error", -1, 1 },
960 { HREQPERR, "PCI HMA channel request parity error", -1, 1 },
961 { HRSPPERR, "PCI HMA channel response parity error", -1, 1 },
962 { CFGSNPPERR, "PCI config snoop FIFO parity error", -1, 1 },
963 { FIDPERR, "PCI FID parity error", -1, 1 },
964 { INTXCLRPERR, "PCI INTx clear parity error", -1, 1 },
965 { MATAGPERR, "PCI MA tag parity error", -1, 1 },
966 { PIOTAGPERR, "PCI PIO tag parity error", -1, 1 },
967 { RXCPLPERR, "PCI Rx completion parity error", -1, 1 },
968 { RXWRPERR, "PCI Rx write parity error", -1, 1 },
969 { RPLPERR, "PCI replay buffer parity error", -1, 1 },
970 { PCIESINT, "PCI core secondary fault", -1, 1 },
971 { PCIEPINT, "PCI core primary fault", -1, 1 },
972 { UNXSPLCPLERR, "PCI unexpected split completion error", -1, 0 },
973 { 0 }
974 };
975
976 int fat;
977
978 fat = t4_handle_intr_status(adapter,
979 PCIE_CORE_UTL_SYSTEM_BUS_AGENT_STATUS,
980 sysbus_intr_info) +
981 t4_handle_intr_status(adapter,
982 PCIE_CORE_UTL_PCI_EXPRESS_PORT_STATUS,
983 pcie_port_intr_info) +
984 t4_handle_intr_status(adapter, PCIE_INT_CAUSE, pcie_intr_info);
985 if (fat)
986 t4_fatal_err(adapter);
987}
988
989/*
990 * TP interrupt handler.
991 */
992static void tp_intr_handler(struct adapter *adapter)
993{
994 static struct intr_info tp_intr_info[] = {
995 { 0x3fffffff, "TP parity error", -1, 1 },
996 { FLMTXFLSTEMPTY, "TP out of Tx pages", -1, 1 },
997 { 0 }
998 };
999
1000 if (t4_handle_intr_status(adapter, TP_INT_CAUSE, tp_intr_info))
1001 t4_fatal_err(adapter);
1002}
1003
1004/*
1005 * SGE interrupt handler.
1006 */
1007static void sge_intr_handler(struct adapter *adapter)
1008{
1009 u64 v;
1010
1011 static struct intr_info sge_intr_info[] = {
1012 { ERR_CPL_EXCEED_IQE_SIZE,
1013 "SGE received CPL exceeding IQE size", -1, 1 },
1014 { ERR_INVALID_CIDX_INC,
1015 "SGE GTS CIDX increment too large", -1, 0 },
1016 { ERR_CPL_OPCODE_0, "SGE received 0-length CPL", -1, 0 },
1017 { ERR_DROPPED_DB, "SGE doorbell dropped", -1, 0 },
1018 { ERR_DATA_CPL_ON_HIGH_QID1 | ERR_DATA_CPL_ON_HIGH_QID0,
1019 "SGE IQID > 1023 received CPL for FL", -1, 0 },
1020 { ERR_BAD_DB_PIDX3, "SGE DBP 3 pidx increment too large", -1,
1021 0 },
1022 { ERR_BAD_DB_PIDX2, "SGE DBP 2 pidx increment too large", -1,
1023 0 },
1024 { ERR_BAD_DB_PIDX1, "SGE DBP 1 pidx increment too large", -1,
1025 0 },
1026 { ERR_BAD_DB_PIDX0, "SGE DBP 0 pidx increment too large", -1,
1027 0 },
1028 { ERR_ING_CTXT_PRIO,
1029 "SGE too many priority ingress contexts", -1, 0 },
1030 { ERR_EGR_CTXT_PRIO,
1031 "SGE too many priority egress contexts", -1, 0 },
1032 { INGRESS_SIZE_ERR, "SGE illegal ingress QID", -1, 0 },
1033 { EGRESS_SIZE_ERR, "SGE illegal egress QID", -1, 0 },
1034 { 0 }
1035 };
1036
1037 v = (u64)t4_read_reg(adapter, SGE_INT_CAUSE1) |
1038 ((u64)t4_read_reg(adapter, SGE_INT_CAUSE2) << 32);
1039 if (v) {
1040 dev_alert(adapter->pdev_dev, "SGE parity error (%#llx)\n",
1041 (unsigned long long)v);
1042 t4_write_reg(adapter, SGE_INT_CAUSE1, v);
1043 t4_write_reg(adapter, SGE_INT_CAUSE2, v >> 32);
1044 }
1045
1046 if (t4_handle_intr_status(adapter, SGE_INT_CAUSE3, sge_intr_info) ||
1047 v != 0)
1048 t4_fatal_err(adapter);
1049}
1050
1051/*
1052 * CIM interrupt handler.
1053 */
1054static void cim_intr_handler(struct adapter *adapter)
1055{
1056 static struct intr_info cim_intr_info[] = {
1057 { PREFDROPINT, "CIM control register prefetch drop", -1, 1 },
1058 { OBQPARERR, "CIM OBQ parity error", -1, 1 },
1059 { IBQPARERR, "CIM IBQ parity error", -1, 1 },
1060 { MBUPPARERR, "CIM mailbox uP parity error", -1, 1 },
1061 { MBHOSTPARERR, "CIM mailbox host parity error", -1, 1 },
1062 { TIEQINPARERRINT, "CIM TIEQ outgoing parity error", -1, 1 },
1063 { TIEQOUTPARERRINT, "CIM TIEQ incoming parity error", -1, 1 },
1064 { 0 }
1065 };
1066 static struct intr_info cim_upintr_info[] = {
1067 { RSVDSPACEINT, "CIM reserved space access", -1, 1 },
1068 { ILLTRANSINT, "CIM illegal transaction", -1, 1 },
1069 { ILLWRINT, "CIM illegal write", -1, 1 },
1070 { ILLRDINT, "CIM illegal read", -1, 1 },
1071 { ILLRDBEINT, "CIM illegal read BE", -1, 1 },
1072 { ILLWRBEINT, "CIM illegal write BE", -1, 1 },
1073 { SGLRDBOOTINT, "CIM single read from boot space", -1, 1 },
1074 { SGLWRBOOTINT, "CIM single write to boot space", -1, 1 },
1075 { BLKWRBOOTINT, "CIM block write to boot space", -1, 1 },
1076 { SGLRDFLASHINT, "CIM single read from flash space", -1, 1 },
1077 { SGLWRFLASHINT, "CIM single write to flash space", -1, 1 },
1078 { BLKWRFLASHINT, "CIM block write to flash space", -1, 1 },
1079 { SGLRDEEPROMINT, "CIM single EEPROM read", -1, 1 },
1080 { SGLWREEPROMINT, "CIM single EEPROM write", -1, 1 },
1081 { BLKRDEEPROMINT, "CIM block EEPROM read", -1, 1 },
1082 { BLKWREEPROMINT, "CIM block EEPROM write", -1, 1 },
1083 { SGLRDCTLINT , "CIM single read from CTL space", -1, 1 },
1084 { SGLWRCTLINT , "CIM single write to CTL space", -1, 1 },
1085 { BLKRDCTLINT , "CIM block read from CTL space", -1, 1 },
1086 { BLKWRCTLINT , "CIM block write to CTL space", -1, 1 },
1087 { SGLRDPLINT , "CIM single read from PL space", -1, 1 },
1088 { SGLWRPLINT , "CIM single write to PL space", -1, 1 },
1089 { BLKRDPLINT , "CIM block read from PL space", -1, 1 },
1090 { BLKWRPLINT , "CIM block write to PL space", -1, 1 },
1091 { REQOVRLOOKUPINT , "CIM request FIFO overwrite", -1, 1 },
1092 { RSPOVRLOOKUPINT , "CIM response FIFO overwrite", -1, 1 },
1093 { TIMEOUTINT , "CIM PIF timeout", -1, 1 },
1094 { TIMEOUTMAINT , "CIM PIF MA timeout", -1, 1 },
1095 { 0 }
1096 };
1097
1098 int fat;
1099
1100 fat = t4_handle_intr_status(adapter, CIM_HOST_INT_CAUSE,
1101 cim_intr_info) +
1102 t4_handle_intr_status(adapter, CIM_HOST_UPACC_INT_CAUSE,
1103 cim_upintr_info);
1104 if (fat)
1105 t4_fatal_err(adapter);
1106}
1107
1108/*
1109 * ULP RX interrupt handler.
1110 */
1111static void ulprx_intr_handler(struct adapter *adapter)
1112{
1113 static struct intr_info ulprx_intr_info[] = {
91e9a1ec 1114 { 0x1800000, "ULPRX context error", -1, 1 },
56d36be4
DM
1115 { 0x7fffff, "ULPRX parity error", -1, 1 },
1116 { 0 }
1117 };
1118
1119 if (t4_handle_intr_status(adapter, ULP_RX_INT_CAUSE, ulprx_intr_info))
1120 t4_fatal_err(adapter);
1121}
1122
1123/*
1124 * ULP TX interrupt handler.
1125 */
1126static void ulptx_intr_handler(struct adapter *adapter)
1127{
1128 static struct intr_info ulptx_intr_info[] = {
1129 { PBL_BOUND_ERR_CH3, "ULPTX channel 3 PBL out of bounds", -1,
1130 0 },
1131 { PBL_BOUND_ERR_CH2, "ULPTX channel 2 PBL out of bounds", -1,
1132 0 },
1133 { PBL_BOUND_ERR_CH1, "ULPTX channel 1 PBL out of bounds", -1,
1134 0 },
1135 { PBL_BOUND_ERR_CH0, "ULPTX channel 0 PBL out of bounds", -1,
1136 0 },
1137 { 0xfffffff, "ULPTX parity error", -1, 1 },
1138 { 0 }
1139 };
1140
1141 if (t4_handle_intr_status(adapter, ULP_TX_INT_CAUSE, ulptx_intr_info))
1142 t4_fatal_err(adapter);
1143}
1144
1145/*
1146 * PM TX interrupt handler.
1147 */
1148static void pmtx_intr_handler(struct adapter *adapter)
1149{
1150 static struct intr_info pmtx_intr_info[] = {
1151 { PCMD_LEN_OVFL0, "PMTX channel 0 pcmd too large", -1, 1 },
1152 { PCMD_LEN_OVFL1, "PMTX channel 1 pcmd too large", -1, 1 },
1153 { PCMD_LEN_OVFL2, "PMTX channel 2 pcmd too large", -1, 1 },
1154 { ZERO_C_CMD_ERROR, "PMTX 0-length pcmd", -1, 1 },
1155 { PMTX_FRAMING_ERROR, "PMTX framing error", -1, 1 },
1156 { OESPI_PAR_ERROR, "PMTX oespi parity error", -1, 1 },
1157 { DB_OPTIONS_PAR_ERROR, "PMTX db_options parity error", -1, 1 },
1158 { ICSPI_PAR_ERROR, "PMTX icspi parity error", -1, 1 },
1159 { C_PCMD_PAR_ERROR, "PMTX c_pcmd parity error", -1, 1},
1160 { 0 }
1161 };
1162
1163 if (t4_handle_intr_status(adapter, PM_TX_INT_CAUSE, pmtx_intr_info))
1164 t4_fatal_err(adapter);
1165}
1166
1167/*
1168 * PM RX interrupt handler.
1169 */
1170static void pmrx_intr_handler(struct adapter *adapter)
1171{
1172 static struct intr_info pmrx_intr_info[] = {
1173 { ZERO_E_CMD_ERROR, "PMRX 0-length pcmd", -1, 1 },
1174 { PMRX_FRAMING_ERROR, "PMRX framing error", -1, 1 },
1175 { OCSPI_PAR_ERROR, "PMRX ocspi parity error", -1, 1 },
1176 { DB_OPTIONS_PAR_ERROR, "PMRX db_options parity error", -1, 1 },
1177 { IESPI_PAR_ERROR, "PMRX iespi parity error", -1, 1 },
1178 { E_PCMD_PAR_ERROR, "PMRX e_pcmd parity error", -1, 1},
1179 { 0 }
1180 };
1181
1182 if (t4_handle_intr_status(adapter, PM_RX_INT_CAUSE, pmrx_intr_info))
1183 t4_fatal_err(adapter);
1184}
1185
1186/*
1187 * CPL switch interrupt handler.
1188 */
1189static void cplsw_intr_handler(struct adapter *adapter)
1190{
1191 static struct intr_info cplsw_intr_info[] = {
1192 { CIM_OP_MAP_PERR, "CPLSW CIM op_map parity error", -1, 1 },
1193 { CIM_OVFL_ERROR, "CPLSW CIM overflow", -1, 1 },
1194 { TP_FRAMING_ERROR, "CPLSW TP framing error", -1, 1 },
1195 { SGE_FRAMING_ERROR, "CPLSW SGE framing error", -1, 1 },
1196 { CIM_FRAMING_ERROR, "CPLSW CIM framing error", -1, 1 },
1197 { ZERO_SWITCH_ERROR, "CPLSW no-switch error", -1, 1 },
1198 { 0 }
1199 };
1200
1201 if (t4_handle_intr_status(adapter, CPL_INTR_CAUSE, cplsw_intr_info))
1202 t4_fatal_err(adapter);
1203}
1204
1205/*
1206 * LE interrupt handler.
1207 */
1208static void le_intr_handler(struct adapter *adap)
1209{
1210 static struct intr_info le_intr_info[] = {
1211 { LIPMISS, "LE LIP miss", -1, 0 },
1212 { LIP0, "LE 0 LIP error", -1, 0 },
1213 { PARITYERR, "LE parity error", -1, 1 },
1214 { UNKNOWNCMD, "LE unknown command", -1, 1 },
1215 { REQQPARERR, "LE request queue parity error", -1, 1 },
1216 { 0 }
1217 };
1218
1219 if (t4_handle_intr_status(adap, LE_DB_INT_CAUSE, le_intr_info))
1220 t4_fatal_err(adap);
1221}
1222
1223/*
1224 * MPS interrupt handler.
1225 */
1226static void mps_intr_handler(struct adapter *adapter)
1227{
1228 static struct intr_info mps_rx_intr_info[] = {
1229 { 0xffffff, "MPS Rx parity error", -1, 1 },
1230 { 0 }
1231 };
1232 static struct intr_info mps_tx_intr_info[] = {
1233 { TPFIFO, "MPS Tx TP FIFO parity error", -1, 1 },
1234 { NCSIFIFO, "MPS Tx NC-SI FIFO parity error", -1, 1 },
1235 { TXDATAFIFO, "MPS Tx data FIFO parity error", -1, 1 },
1236 { TXDESCFIFO, "MPS Tx desc FIFO parity error", -1, 1 },
1237 { BUBBLE, "MPS Tx underflow", -1, 1 },
1238 { SECNTERR, "MPS Tx SOP/EOP error", -1, 1 },
1239 { FRMERR, "MPS Tx framing error", -1, 1 },
1240 { 0 }
1241 };
1242 static struct intr_info mps_trc_intr_info[] = {
1243 { FILTMEM, "MPS TRC filter parity error", -1, 1 },
1244 { PKTFIFO, "MPS TRC packet FIFO parity error", -1, 1 },
1245 { MISCPERR, "MPS TRC misc parity error", -1, 1 },
1246 { 0 }
1247 };
1248 static struct intr_info mps_stat_sram_intr_info[] = {
1249 { 0x1fffff, "MPS statistics SRAM parity error", -1, 1 },
1250 { 0 }
1251 };
1252 static struct intr_info mps_stat_tx_intr_info[] = {
1253 { 0xfffff, "MPS statistics Tx FIFO parity error", -1, 1 },
1254 { 0 }
1255 };
1256 static struct intr_info mps_stat_rx_intr_info[] = {
1257 { 0xffffff, "MPS statistics Rx FIFO parity error", -1, 1 },
1258 { 0 }
1259 };
1260 static struct intr_info mps_cls_intr_info[] = {
1261 { MATCHSRAM, "MPS match SRAM parity error", -1, 1 },
1262 { MATCHTCAM, "MPS match TCAM parity error", -1, 1 },
1263 { HASHSRAM, "MPS hash SRAM parity error", -1, 1 },
1264 { 0 }
1265 };
1266
1267 int fat;
1268
1269 fat = t4_handle_intr_status(adapter, MPS_RX_PERR_INT_CAUSE,
1270 mps_rx_intr_info) +
1271 t4_handle_intr_status(adapter, MPS_TX_INT_CAUSE,
1272 mps_tx_intr_info) +
1273 t4_handle_intr_status(adapter, MPS_TRC_INT_CAUSE,
1274 mps_trc_intr_info) +
1275 t4_handle_intr_status(adapter, MPS_STAT_PERR_INT_CAUSE_SRAM,
1276 mps_stat_sram_intr_info) +
1277 t4_handle_intr_status(adapter, MPS_STAT_PERR_INT_CAUSE_TX_FIFO,
1278 mps_stat_tx_intr_info) +
1279 t4_handle_intr_status(adapter, MPS_STAT_PERR_INT_CAUSE_RX_FIFO,
1280 mps_stat_rx_intr_info) +
1281 t4_handle_intr_status(adapter, MPS_CLS_INT_CAUSE,
1282 mps_cls_intr_info);
1283
1284 t4_write_reg(adapter, MPS_INT_CAUSE, CLSINT | TRCINT |
1285 RXINT | TXINT | STATINT);
1286 t4_read_reg(adapter, MPS_INT_CAUSE); /* flush */
1287 if (fat)
1288 t4_fatal_err(adapter);
1289}
1290
1291#define MEM_INT_MASK (PERR_INT_CAUSE | ECC_CE_INT_CAUSE | ECC_UE_INT_CAUSE)
1292
1293/*
1294 * EDC/MC interrupt handler.
1295 */
1296static void mem_intr_handler(struct adapter *adapter, int idx)
1297{
1298 static const char name[3][5] = { "EDC0", "EDC1", "MC" };
1299
1300 unsigned int addr, cnt_addr, v;
1301
1302 if (idx <= MEM_EDC1) {
1303 addr = EDC_REG(EDC_INT_CAUSE, idx);
1304 cnt_addr = EDC_REG(EDC_ECC_STATUS, idx);
1305 } else {
1306 addr = MC_INT_CAUSE;
1307 cnt_addr = MC_ECC_STATUS;
1308 }
1309
1310 v = t4_read_reg(adapter, addr) & MEM_INT_MASK;
1311 if (v & PERR_INT_CAUSE)
1312 dev_alert(adapter->pdev_dev, "%s FIFO parity error\n",
1313 name[idx]);
1314 if (v & ECC_CE_INT_CAUSE) {
1315 u32 cnt = ECC_CECNT_GET(t4_read_reg(adapter, cnt_addr));
1316
1317 t4_write_reg(adapter, cnt_addr, ECC_CECNT_MASK);
1318 if (printk_ratelimit())
1319 dev_warn(adapter->pdev_dev,
1320 "%u %s correctable ECC data error%s\n",
1321 cnt, name[idx], cnt > 1 ? "s" : "");
1322 }
1323 if (v & ECC_UE_INT_CAUSE)
1324 dev_alert(adapter->pdev_dev,
1325 "%s uncorrectable ECC data error\n", name[idx]);
1326
1327 t4_write_reg(adapter, addr, v);
1328 if (v & (PERR_INT_CAUSE | ECC_UE_INT_CAUSE))
1329 t4_fatal_err(adapter);
1330}
1331
1332/*
1333 * MA interrupt handler.
1334 */
1335static void ma_intr_handler(struct adapter *adap)
1336{
1337 u32 v, status = t4_read_reg(adap, MA_INT_CAUSE);
1338
1339 if (status & MEM_PERR_INT_CAUSE)
1340 dev_alert(adap->pdev_dev,
1341 "MA parity error, parity status %#x\n",
1342 t4_read_reg(adap, MA_PARITY_ERROR_STATUS));
1343 if (status & MEM_WRAP_INT_CAUSE) {
1344 v = t4_read_reg(adap, MA_INT_WRAP_STATUS);
1345 dev_alert(adap->pdev_dev, "MA address wrap-around error by "
1346 "client %u to address %#x\n",
1347 MEM_WRAP_CLIENT_NUM_GET(v),
1348 MEM_WRAP_ADDRESS_GET(v) << 4);
1349 }
1350 t4_write_reg(adap, MA_INT_CAUSE, status);
1351 t4_fatal_err(adap);
1352}
1353
1354/*
1355 * SMB interrupt handler.
1356 */
1357static void smb_intr_handler(struct adapter *adap)
1358{
1359 static struct intr_info smb_intr_info[] = {
1360 { MSTTXFIFOPARINT, "SMB master Tx FIFO parity error", -1, 1 },
1361 { MSTRXFIFOPARINT, "SMB master Rx FIFO parity error", -1, 1 },
1362 { SLVFIFOPARINT, "SMB slave FIFO parity error", -1, 1 },
1363 { 0 }
1364 };
1365
1366 if (t4_handle_intr_status(adap, SMB_INT_CAUSE, smb_intr_info))
1367 t4_fatal_err(adap);
1368}
1369
1370/*
1371 * NC-SI interrupt handler.
1372 */
1373static void ncsi_intr_handler(struct adapter *adap)
1374{
1375 static struct intr_info ncsi_intr_info[] = {
1376 { CIM_DM_PRTY_ERR, "NC-SI CIM parity error", -1, 1 },
1377 { MPS_DM_PRTY_ERR, "NC-SI MPS parity error", -1, 1 },
1378 { TXFIFO_PRTY_ERR, "NC-SI Tx FIFO parity error", -1, 1 },
1379 { RXFIFO_PRTY_ERR, "NC-SI Rx FIFO parity error", -1, 1 },
1380 { 0 }
1381 };
1382
1383 if (t4_handle_intr_status(adap, NCSI_INT_CAUSE, ncsi_intr_info))
1384 t4_fatal_err(adap);
1385}
1386
1387/*
1388 * XGMAC interrupt handler.
1389 */
1390static void xgmac_intr_handler(struct adapter *adap, int port)
1391{
1392 u32 v = t4_read_reg(adap, PORT_REG(port, XGMAC_PORT_INT_CAUSE));
1393
1394 v &= TXFIFO_PRTY_ERR | RXFIFO_PRTY_ERR;
1395 if (!v)
1396 return;
1397
1398 if (v & TXFIFO_PRTY_ERR)
1399 dev_alert(adap->pdev_dev, "XGMAC %d Tx FIFO parity error\n",
1400 port);
1401 if (v & RXFIFO_PRTY_ERR)
1402 dev_alert(adap->pdev_dev, "XGMAC %d Rx FIFO parity error\n",
1403 port);
1404 t4_write_reg(adap, PORT_REG(port, XGMAC_PORT_INT_CAUSE), v);
1405 t4_fatal_err(adap);
1406}
1407
1408/*
1409 * PL interrupt handler.
1410 */
1411static void pl_intr_handler(struct adapter *adap)
1412{
1413 static struct intr_info pl_intr_info[] = {
1414 { FATALPERR, "T4 fatal parity error", -1, 1 },
1415 { PERRVFID, "PL VFID_MAP parity error", -1, 1 },
1416 { 0 }
1417 };
1418
1419 if (t4_handle_intr_status(adap, PL_PL_INT_CAUSE, pl_intr_info))
1420 t4_fatal_err(adap);
1421}
1422
63bcceec 1423#define PF_INTR_MASK (PFSW)
56d36be4
DM
1424#define GLBL_INTR_MASK (CIM | MPS | PL | PCIE | MC | EDC0 | \
1425 EDC1 | LE | TP | MA | PM_TX | PM_RX | ULP_RX | \
1426 CPL_SWITCH | SGE | ULP_TX)
1427
1428/**
1429 * t4_slow_intr_handler - control path interrupt handler
1430 * @adapter: the adapter
1431 *
1432 * T4 interrupt handler for non-data global interrupt events, e.g., errors.
1433 * The designation 'slow' is because it involves register reads, while
1434 * data interrupts typically don't involve any MMIOs.
1435 */
1436int t4_slow_intr_handler(struct adapter *adapter)
1437{
1438 u32 cause = t4_read_reg(adapter, PL_INT_CAUSE);
1439
1440 if (!(cause & GLBL_INTR_MASK))
1441 return 0;
1442 if (cause & CIM)
1443 cim_intr_handler(adapter);
1444 if (cause & MPS)
1445 mps_intr_handler(adapter);
1446 if (cause & NCSI)
1447 ncsi_intr_handler(adapter);
1448 if (cause & PL)
1449 pl_intr_handler(adapter);
1450 if (cause & SMB)
1451 smb_intr_handler(adapter);
1452 if (cause & XGMAC0)
1453 xgmac_intr_handler(adapter, 0);
1454 if (cause & XGMAC1)
1455 xgmac_intr_handler(adapter, 1);
1456 if (cause & XGMAC_KR0)
1457 xgmac_intr_handler(adapter, 2);
1458 if (cause & XGMAC_KR1)
1459 xgmac_intr_handler(adapter, 3);
1460 if (cause & PCIE)
1461 pcie_intr_handler(adapter);
1462 if (cause & MC)
1463 mem_intr_handler(adapter, MEM_MC);
1464 if (cause & EDC0)
1465 mem_intr_handler(adapter, MEM_EDC0);
1466 if (cause & EDC1)
1467 mem_intr_handler(adapter, MEM_EDC1);
1468 if (cause & LE)
1469 le_intr_handler(adapter);
1470 if (cause & TP)
1471 tp_intr_handler(adapter);
1472 if (cause & MA)
1473 ma_intr_handler(adapter);
1474 if (cause & PM_TX)
1475 pmtx_intr_handler(adapter);
1476 if (cause & PM_RX)
1477 pmrx_intr_handler(adapter);
1478 if (cause & ULP_RX)
1479 ulprx_intr_handler(adapter);
1480 if (cause & CPL_SWITCH)
1481 cplsw_intr_handler(adapter);
1482 if (cause & SGE)
1483 sge_intr_handler(adapter);
1484 if (cause & ULP_TX)
1485 ulptx_intr_handler(adapter);
1486
1487 /* Clear the interrupts just processed for which we are the master. */
1488 t4_write_reg(adapter, PL_INT_CAUSE, cause & GLBL_INTR_MASK);
1489 (void) t4_read_reg(adapter, PL_INT_CAUSE); /* flush */
1490 return 1;
1491}
1492
1493/**
1494 * t4_intr_enable - enable interrupts
1495 * @adapter: the adapter whose interrupts should be enabled
1496 *
1497 * Enable PF-specific interrupts for the calling function and the top-level
1498 * interrupt concentrator for global interrupts. Interrupts are already
1499 * enabled at each module, here we just enable the roots of the interrupt
1500 * hierarchies.
1501 *
1502 * Note: this function should be called only when the driver manages
1503 * non PF-specific interrupts from the various HW modules. Only one PCI
1504 * function at a time should be doing this.
1505 */
1506void t4_intr_enable(struct adapter *adapter)
1507{
1508 u32 pf = SOURCEPF_GET(t4_read_reg(adapter, PL_WHOAMI));
1509
1510 t4_write_reg(adapter, SGE_INT_ENABLE3, ERR_CPL_EXCEED_IQE_SIZE |
1511 ERR_INVALID_CIDX_INC | ERR_CPL_OPCODE_0 |
1512 ERR_DROPPED_DB | ERR_DATA_CPL_ON_HIGH_QID1 |
1513 ERR_DATA_CPL_ON_HIGH_QID0 | ERR_BAD_DB_PIDX3 |
1514 ERR_BAD_DB_PIDX2 | ERR_BAD_DB_PIDX1 |
1515 ERR_BAD_DB_PIDX0 | ERR_ING_CTXT_PRIO |
1516 ERR_EGR_CTXT_PRIO | INGRESS_SIZE_ERR |
1517 EGRESS_SIZE_ERR);
1518 t4_write_reg(adapter, MYPF_REG(PL_PF_INT_ENABLE), PF_INTR_MASK);
1519 t4_set_reg_field(adapter, PL_INT_MAP0, 0, 1 << pf);
1520}
1521
1522/**
1523 * t4_intr_disable - disable interrupts
1524 * @adapter: the adapter whose interrupts should be disabled
1525 *
1526 * Disable interrupts. We only disable the top-level interrupt
1527 * concentrators. The caller must be a PCI function managing global
1528 * interrupts.
1529 */
1530void t4_intr_disable(struct adapter *adapter)
1531{
1532 u32 pf = SOURCEPF_GET(t4_read_reg(adapter, PL_WHOAMI));
1533
1534 t4_write_reg(adapter, MYPF_REG(PL_PF_INT_ENABLE), 0);
1535 t4_set_reg_field(adapter, PL_INT_MAP0, 1 << pf, 0);
1536}
1537
56d36be4
DM
1538/**
1539 * hash_mac_addr - return the hash value of a MAC address
1540 * @addr: the 48-bit Ethernet MAC address
1541 *
1542 * Hashes a MAC address according to the hash function used by HW inexact
1543 * (hash) address matching.
1544 */
1545static int hash_mac_addr(const u8 *addr)
1546{
1547 u32 a = ((u32)addr[0] << 16) | ((u32)addr[1] << 8) | addr[2];
1548 u32 b = ((u32)addr[3] << 16) | ((u32)addr[4] << 8) | addr[5];
1549 a ^= b;
1550 a ^= (a >> 12);
1551 a ^= (a >> 6);
1552 return a & 0x3f;
1553}
1554
1555/**
1556 * t4_config_rss_range - configure a portion of the RSS mapping table
1557 * @adapter: the adapter
1558 * @mbox: mbox to use for the FW command
1559 * @viid: virtual interface whose RSS subtable is to be written
1560 * @start: start entry in the table to write
1561 * @n: how many table entries to write
1562 * @rspq: values for the response queue lookup table
1563 * @nrspq: number of values in @rspq
1564 *
1565 * Programs the selected part of the VI's RSS mapping table with the
1566 * provided values. If @nrspq < @n the supplied values are used repeatedly
1567 * until the full table range is populated.
1568 *
1569 * The caller must ensure the values in @rspq are in the range allowed for
1570 * @viid.
1571 */
1572int t4_config_rss_range(struct adapter *adapter, int mbox, unsigned int viid,
1573 int start, int n, const u16 *rspq, unsigned int nrspq)
1574{
1575 int ret;
1576 const u16 *rsp = rspq;
1577 const u16 *rsp_end = rspq + nrspq;
1578 struct fw_rss_ind_tbl_cmd cmd;
1579
1580 memset(&cmd, 0, sizeof(cmd));
1581 cmd.op_to_viid = htonl(FW_CMD_OP(FW_RSS_IND_TBL_CMD) |
1582 FW_CMD_REQUEST | FW_CMD_WRITE |
1583 FW_RSS_IND_TBL_CMD_VIID(viid));
1584 cmd.retval_len16 = htonl(FW_LEN16(cmd));
1585
1586 /* each fw_rss_ind_tbl_cmd takes up to 32 entries */
1587 while (n > 0) {
1588 int nq = min(n, 32);
1589 __be32 *qp = &cmd.iq0_to_iq2;
1590
1591 cmd.niqid = htons(nq);
1592 cmd.startidx = htons(start);
1593
1594 start += nq;
1595 n -= nq;
1596
1597 while (nq > 0) {
1598 unsigned int v;
1599
1600 v = FW_RSS_IND_TBL_CMD_IQ0(*rsp);
1601 if (++rsp >= rsp_end)
1602 rsp = rspq;
1603 v |= FW_RSS_IND_TBL_CMD_IQ1(*rsp);
1604 if (++rsp >= rsp_end)
1605 rsp = rspq;
1606 v |= FW_RSS_IND_TBL_CMD_IQ2(*rsp);
1607 if (++rsp >= rsp_end)
1608 rsp = rspq;
1609
1610 *qp++ = htonl(v);
1611 nq -= 3;
1612 }
1613
1614 ret = t4_wr_mbox(adapter, mbox, &cmd, sizeof(cmd), NULL);
1615 if (ret)
1616 return ret;
1617 }
1618 return 0;
1619}
1620
1621/**
1622 * t4_config_glbl_rss - configure the global RSS mode
1623 * @adapter: the adapter
1624 * @mbox: mbox to use for the FW command
1625 * @mode: global RSS mode
1626 * @flags: mode-specific flags
1627 *
1628 * Sets the global RSS mode.
1629 */
1630int t4_config_glbl_rss(struct adapter *adapter, int mbox, unsigned int mode,
1631 unsigned int flags)
1632{
1633 struct fw_rss_glb_config_cmd c;
1634
1635 memset(&c, 0, sizeof(c));
1636 c.op_to_write = htonl(FW_CMD_OP(FW_RSS_GLB_CONFIG_CMD) |
1637 FW_CMD_REQUEST | FW_CMD_WRITE);
1638 c.retval_len16 = htonl(FW_LEN16(c));
1639 if (mode == FW_RSS_GLB_CONFIG_CMD_MODE_MANUAL) {
1640 c.u.manual.mode_pkd = htonl(FW_RSS_GLB_CONFIG_CMD_MODE(mode));
1641 } else if (mode == FW_RSS_GLB_CONFIG_CMD_MODE_BASICVIRTUAL) {
1642 c.u.basicvirtual.mode_pkd =
1643 htonl(FW_RSS_GLB_CONFIG_CMD_MODE(mode));
1644 c.u.basicvirtual.synmapen_to_hashtoeplitz = htonl(flags);
1645 } else
1646 return -EINVAL;
1647 return t4_wr_mbox(adapter, mbox, &c, sizeof(c), NULL);
1648}
1649
56d36be4
DM
1650/**
1651 * t4_tp_get_tcp_stats - read TP's TCP MIB counters
1652 * @adap: the adapter
1653 * @v4: holds the TCP/IP counter values
1654 * @v6: holds the TCP/IPv6 counter values
1655 *
1656 * Returns the values of TP's TCP/IP and TCP/IPv6 MIB counters.
1657 * Either @v4 or @v6 may be %NULL to skip the corresponding stats.
1658 */
1659void t4_tp_get_tcp_stats(struct adapter *adap, struct tp_tcp_stats *v4,
1660 struct tp_tcp_stats *v6)
1661{
1662 u32 val[TP_MIB_TCP_RXT_SEG_LO - TP_MIB_TCP_OUT_RST + 1];
1663
1664#define STAT_IDX(x) ((TP_MIB_TCP_##x) - TP_MIB_TCP_OUT_RST)
1665#define STAT(x) val[STAT_IDX(x)]
1666#define STAT64(x) (((u64)STAT(x##_HI) << 32) | STAT(x##_LO))
1667
1668 if (v4) {
1669 t4_read_indirect(adap, TP_MIB_INDEX, TP_MIB_DATA, val,
1670 ARRAY_SIZE(val), TP_MIB_TCP_OUT_RST);
1671 v4->tcpOutRsts = STAT(OUT_RST);
1672 v4->tcpInSegs = STAT64(IN_SEG);
1673 v4->tcpOutSegs = STAT64(OUT_SEG);
1674 v4->tcpRetransSegs = STAT64(RXT_SEG);
1675 }
1676 if (v6) {
1677 t4_read_indirect(adap, TP_MIB_INDEX, TP_MIB_DATA, val,
1678 ARRAY_SIZE(val), TP_MIB_TCP_V6OUT_RST);
1679 v6->tcpOutRsts = STAT(OUT_RST);
1680 v6->tcpInSegs = STAT64(IN_SEG);
1681 v6->tcpOutSegs = STAT64(OUT_SEG);
1682 v6->tcpRetransSegs = STAT64(RXT_SEG);
1683 }
1684#undef STAT64
1685#undef STAT
1686#undef STAT_IDX
1687}
1688
56d36be4
DM
1689/**
1690 * t4_read_mtu_tbl - returns the values in the HW path MTU table
1691 * @adap: the adapter
1692 * @mtus: where to store the MTU values
1693 * @mtu_log: where to store the MTU base-2 log (may be %NULL)
1694 *
1695 * Reads the HW path MTU table.
1696 */
1697void t4_read_mtu_tbl(struct adapter *adap, u16 *mtus, u8 *mtu_log)
1698{
1699 u32 v;
1700 int i;
1701
1702 for (i = 0; i < NMTUS; ++i) {
1703 t4_write_reg(adap, TP_MTU_TABLE,
1704 MTUINDEX(0xff) | MTUVALUE(i));
1705 v = t4_read_reg(adap, TP_MTU_TABLE);
1706 mtus[i] = MTUVALUE_GET(v);
1707 if (mtu_log)
1708 mtu_log[i] = MTUWIDTH_GET(v);
1709 }
1710}
1711
1712/**
1713 * init_cong_ctrl - initialize congestion control parameters
1714 * @a: the alpha values for congestion control
1715 * @b: the beta values for congestion control
1716 *
1717 * Initialize the congestion control parameters.
1718 */
1719static void __devinit init_cong_ctrl(unsigned short *a, unsigned short *b)
1720{
1721 a[0] = a[1] = a[2] = a[3] = a[4] = a[5] = a[6] = a[7] = a[8] = 1;
1722 a[9] = 2;
1723 a[10] = 3;
1724 a[11] = 4;
1725 a[12] = 5;
1726 a[13] = 6;
1727 a[14] = 7;
1728 a[15] = 8;
1729 a[16] = 9;
1730 a[17] = 10;
1731 a[18] = 14;
1732 a[19] = 17;
1733 a[20] = 21;
1734 a[21] = 25;
1735 a[22] = 30;
1736 a[23] = 35;
1737 a[24] = 45;
1738 a[25] = 60;
1739 a[26] = 80;
1740 a[27] = 100;
1741 a[28] = 200;
1742 a[29] = 300;
1743 a[30] = 400;
1744 a[31] = 500;
1745
1746 b[0] = b[1] = b[2] = b[3] = b[4] = b[5] = b[6] = b[7] = b[8] = 0;
1747 b[9] = b[10] = 1;
1748 b[11] = b[12] = 2;
1749 b[13] = b[14] = b[15] = b[16] = 3;
1750 b[17] = b[18] = b[19] = b[20] = b[21] = 4;
1751 b[22] = b[23] = b[24] = b[25] = b[26] = b[27] = 5;
1752 b[28] = b[29] = 6;
1753 b[30] = b[31] = 7;
1754}
1755
1756/* The minimum additive increment value for the congestion control table */
1757#define CC_MIN_INCR 2U
1758
1759/**
1760 * t4_load_mtus - write the MTU and congestion control HW tables
1761 * @adap: the adapter
1762 * @mtus: the values for the MTU table
1763 * @alpha: the values for the congestion control alpha parameter
1764 * @beta: the values for the congestion control beta parameter
1765 *
1766 * Write the HW MTU table with the supplied MTUs and the high-speed
1767 * congestion control table with the supplied alpha, beta, and MTUs.
1768 * We write the two tables together because the additive increments
1769 * depend on the MTUs.
1770 */
1771void t4_load_mtus(struct adapter *adap, const unsigned short *mtus,
1772 const unsigned short *alpha, const unsigned short *beta)
1773{
1774 static const unsigned int avg_pkts[NCCTRL_WIN] = {
1775 2, 6, 10, 14, 20, 28, 40, 56, 80, 112, 160, 224, 320, 448, 640,
1776 896, 1281, 1792, 2560, 3584, 5120, 7168, 10240, 14336, 20480,
1777 28672, 40960, 57344, 81920, 114688, 163840, 229376
1778 };
1779
1780 unsigned int i, w;
1781
1782 for (i = 0; i < NMTUS; ++i) {
1783 unsigned int mtu = mtus[i];
1784 unsigned int log2 = fls(mtu);
1785
1786 if (!(mtu & ((1 << log2) >> 2))) /* round */
1787 log2--;
1788 t4_write_reg(adap, TP_MTU_TABLE, MTUINDEX(i) |
1789 MTUWIDTH(log2) | MTUVALUE(mtu));
1790
1791 for (w = 0; w < NCCTRL_WIN; ++w) {
1792 unsigned int inc;
1793
1794 inc = max(((mtu - 40) * alpha[w]) / avg_pkts[w],
1795 CC_MIN_INCR);
1796
1797 t4_write_reg(adap, TP_CCTRL_TABLE, (i << 21) |
1798 (w << 16) | (beta[w] << 13) | inc);
1799 }
1800 }
1801}
1802
56d36be4
DM
1803/**
1804 * get_mps_bg_map - return the buffer groups associated with a port
1805 * @adap: the adapter
1806 * @idx: the port index
1807 *
1808 * Returns a bitmap indicating which MPS buffer groups are associated
1809 * with the given port. Bit i is set if buffer group i is used by the
1810 * port.
1811 */
1812static unsigned int get_mps_bg_map(struct adapter *adap, int idx)
1813{
1814 u32 n = NUMPORTS_GET(t4_read_reg(adap, MPS_CMN_CTL));
1815
1816 if (n == 0)
1817 return idx == 0 ? 0xf : 0;
1818 if (n == 1)
1819 return idx < 2 ? (3 << (2 * idx)) : 0;
1820 return 1 << idx;
1821}
1822
1823/**
1824 * t4_get_port_stats - collect port statistics
1825 * @adap: the adapter
1826 * @idx: the port index
1827 * @p: the stats structure to fill
1828 *
1829 * Collect statistics related to the given port from HW.
1830 */
1831void t4_get_port_stats(struct adapter *adap, int idx, struct port_stats *p)
1832{
1833 u32 bgmap = get_mps_bg_map(adap, idx);
1834
1835#define GET_STAT(name) \
1836 t4_read_reg64(adap, PORT_REG(idx, MPS_PORT_STAT_##name##_L))
1837#define GET_STAT_COM(name) t4_read_reg64(adap, MPS_STAT_##name##_L)
1838
1839 p->tx_octets = GET_STAT(TX_PORT_BYTES);
1840 p->tx_frames = GET_STAT(TX_PORT_FRAMES);
1841 p->tx_bcast_frames = GET_STAT(TX_PORT_BCAST);
1842 p->tx_mcast_frames = GET_STAT(TX_PORT_MCAST);
1843 p->tx_ucast_frames = GET_STAT(TX_PORT_UCAST);
1844 p->tx_error_frames = GET_STAT(TX_PORT_ERROR);
1845 p->tx_frames_64 = GET_STAT(TX_PORT_64B);
1846 p->tx_frames_65_127 = GET_STAT(TX_PORT_65B_127B);
1847 p->tx_frames_128_255 = GET_STAT(TX_PORT_128B_255B);
1848 p->tx_frames_256_511 = GET_STAT(TX_PORT_256B_511B);
1849 p->tx_frames_512_1023 = GET_STAT(TX_PORT_512B_1023B);
1850 p->tx_frames_1024_1518 = GET_STAT(TX_PORT_1024B_1518B);
1851 p->tx_frames_1519_max = GET_STAT(TX_PORT_1519B_MAX);
1852 p->tx_drop = GET_STAT(TX_PORT_DROP);
1853 p->tx_pause = GET_STAT(TX_PORT_PAUSE);
1854 p->tx_ppp0 = GET_STAT(TX_PORT_PPP0);
1855 p->tx_ppp1 = GET_STAT(TX_PORT_PPP1);
1856 p->tx_ppp2 = GET_STAT(TX_PORT_PPP2);
1857 p->tx_ppp3 = GET_STAT(TX_PORT_PPP3);
1858 p->tx_ppp4 = GET_STAT(TX_PORT_PPP4);
1859 p->tx_ppp5 = GET_STAT(TX_PORT_PPP5);
1860 p->tx_ppp6 = GET_STAT(TX_PORT_PPP6);
1861 p->tx_ppp7 = GET_STAT(TX_PORT_PPP7);
1862
1863 p->rx_octets = GET_STAT(RX_PORT_BYTES);
1864 p->rx_frames = GET_STAT(RX_PORT_FRAMES);
1865 p->rx_bcast_frames = GET_STAT(RX_PORT_BCAST);
1866 p->rx_mcast_frames = GET_STAT(RX_PORT_MCAST);
1867 p->rx_ucast_frames = GET_STAT(RX_PORT_UCAST);
1868 p->rx_too_long = GET_STAT(RX_PORT_MTU_ERROR);
1869 p->rx_jabber = GET_STAT(RX_PORT_MTU_CRC_ERROR);
1870 p->rx_fcs_err = GET_STAT(RX_PORT_CRC_ERROR);
1871 p->rx_len_err = GET_STAT(RX_PORT_LEN_ERROR);
1872 p->rx_symbol_err = GET_STAT(RX_PORT_SYM_ERROR);
1873 p->rx_runt = GET_STAT(RX_PORT_LESS_64B);
1874 p->rx_frames_64 = GET_STAT(RX_PORT_64B);
1875 p->rx_frames_65_127 = GET_STAT(RX_PORT_65B_127B);
1876 p->rx_frames_128_255 = GET_STAT(RX_PORT_128B_255B);
1877 p->rx_frames_256_511 = GET_STAT(RX_PORT_256B_511B);
1878 p->rx_frames_512_1023 = GET_STAT(RX_PORT_512B_1023B);
1879 p->rx_frames_1024_1518 = GET_STAT(RX_PORT_1024B_1518B);
1880 p->rx_frames_1519_max = GET_STAT(RX_PORT_1519B_MAX);
1881 p->rx_pause = GET_STAT(RX_PORT_PAUSE);
1882 p->rx_ppp0 = GET_STAT(RX_PORT_PPP0);
1883 p->rx_ppp1 = GET_STAT(RX_PORT_PPP1);
1884 p->rx_ppp2 = GET_STAT(RX_PORT_PPP2);
1885 p->rx_ppp3 = GET_STAT(RX_PORT_PPP3);
1886 p->rx_ppp4 = GET_STAT(RX_PORT_PPP4);
1887 p->rx_ppp5 = GET_STAT(RX_PORT_PPP5);
1888 p->rx_ppp6 = GET_STAT(RX_PORT_PPP6);
1889 p->rx_ppp7 = GET_STAT(RX_PORT_PPP7);
1890
1891 p->rx_ovflow0 = (bgmap & 1) ? GET_STAT_COM(RX_BG_0_MAC_DROP_FRAME) : 0;
1892 p->rx_ovflow1 = (bgmap & 2) ? GET_STAT_COM(RX_BG_1_MAC_DROP_FRAME) : 0;
1893 p->rx_ovflow2 = (bgmap & 4) ? GET_STAT_COM(RX_BG_2_MAC_DROP_FRAME) : 0;
1894 p->rx_ovflow3 = (bgmap & 8) ? GET_STAT_COM(RX_BG_3_MAC_DROP_FRAME) : 0;
1895 p->rx_trunc0 = (bgmap & 1) ? GET_STAT_COM(RX_BG_0_MAC_TRUNC_FRAME) : 0;
1896 p->rx_trunc1 = (bgmap & 2) ? GET_STAT_COM(RX_BG_1_MAC_TRUNC_FRAME) : 0;
1897 p->rx_trunc2 = (bgmap & 4) ? GET_STAT_COM(RX_BG_2_MAC_TRUNC_FRAME) : 0;
1898 p->rx_trunc3 = (bgmap & 8) ? GET_STAT_COM(RX_BG_3_MAC_TRUNC_FRAME) : 0;
1899
1900#undef GET_STAT
1901#undef GET_STAT_COM
1902}
1903
56d36be4
DM
1904/**
1905 * t4_wol_magic_enable - enable/disable magic packet WoL
1906 * @adap: the adapter
1907 * @port: the physical port index
1908 * @addr: MAC address expected in magic packets, %NULL to disable
1909 *
1910 * Enables/disables magic packet wake-on-LAN for the selected port.
1911 */
1912void t4_wol_magic_enable(struct adapter *adap, unsigned int port,
1913 const u8 *addr)
1914{
1915 if (addr) {
1916 t4_write_reg(adap, PORT_REG(port, XGMAC_PORT_MAGIC_MACID_LO),
1917 (addr[2] << 24) | (addr[3] << 16) |
1918 (addr[4] << 8) | addr[5]);
1919 t4_write_reg(adap, PORT_REG(port, XGMAC_PORT_MAGIC_MACID_HI),
1920 (addr[0] << 8) | addr[1]);
1921 }
1922 t4_set_reg_field(adap, PORT_REG(port, XGMAC_PORT_CFG2), MAGICEN,
1923 addr ? MAGICEN : 0);
1924}
1925
1926/**
1927 * t4_wol_pat_enable - enable/disable pattern-based WoL
1928 * @adap: the adapter
1929 * @port: the physical port index
1930 * @map: bitmap of which HW pattern filters to set
1931 * @mask0: byte mask for bytes 0-63 of a packet
1932 * @mask1: byte mask for bytes 64-127 of a packet
1933 * @crc: Ethernet CRC for selected bytes
1934 * @enable: enable/disable switch
1935 *
1936 * Sets the pattern filters indicated in @map to mask out the bytes
1937 * specified in @mask0/@mask1 in received packets and compare the CRC of
1938 * the resulting packet against @crc. If @enable is %true pattern-based
1939 * WoL is enabled, otherwise disabled.
1940 */
1941int t4_wol_pat_enable(struct adapter *adap, unsigned int port, unsigned int map,
1942 u64 mask0, u64 mask1, unsigned int crc, bool enable)
1943{
1944 int i;
1945
1946 if (!enable) {
1947 t4_set_reg_field(adap, PORT_REG(port, XGMAC_PORT_CFG2),
1948 PATEN, 0);
1949 return 0;
1950 }
1951 if (map > 0xff)
1952 return -EINVAL;
1953
1954#define EPIO_REG(name) PORT_REG(port, XGMAC_PORT_EPIO_##name)
1955
1956 t4_write_reg(adap, EPIO_REG(DATA1), mask0 >> 32);
1957 t4_write_reg(adap, EPIO_REG(DATA2), mask1);
1958 t4_write_reg(adap, EPIO_REG(DATA3), mask1 >> 32);
1959
1960 for (i = 0; i < NWOL_PAT; i++, map >>= 1) {
1961 if (!(map & 1))
1962 continue;
1963
1964 /* write byte masks */
1965 t4_write_reg(adap, EPIO_REG(DATA0), mask0);
1966 t4_write_reg(adap, EPIO_REG(OP), ADDRESS(i) | EPIOWR);
1967 t4_read_reg(adap, EPIO_REG(OP)); /* flush */
1968 if (t4_read_reg(adap, EPIO_REG(OP)) & BUSY)
1969 return -ETIMEDOUT;
1970
1971 /* write CRC */
1972 t4_write_reg(adap, EPIO_REG(DATA0), crc);
1973 t4_write_reg(adap, EPIO_REG(OP), ADDRESS(i + 32) | EPIOWR);
1974 t4_read_reg(adap, EPIO_REG(OP)); /* flush */
1975 if (t4_read_reg(adap, EPIO_REG(OP)) & BUSY)
1976 return -ETIMEDOUT;
1977 }
1978#undef EPIO_REG
1979
1980 t4_set_reg_field(adap, PORT_REG(port, XGMAC_PORT_CFG2), 0, PATEN);
1981 return 0;
1982}
1983
1984#define INIT_CMD(var, cmd, rd_wr) do { \
1985 (var).op_to_write = htonl(FW_CMD_OP(FW_##cmd##_CMD) | \
1986 FW_CMD_REQUEST | FW_CMD_##rd_wr); \
1987 (var).retval_len16 = htonl(FW_LEN16(var)); \
1988} while (0)
1989
1990/**
1991 * t4_mdio_rd - read a PHY register through MDIO
1992 * @adap: the adapter
1993 * @mbox: mailbox to use for the FW command
1994 * @phy_addr: the PHY address
1995 * @mmd: the PHY MMD to access (0 for clause 22 PHYs)
1996 * @reg: the register to read
1997 * @valp: where to store the value
1998 *
1999 * Issues a FW command through the given mailbox to read a PHY register.
2000 */
2001int t4_mdio_rd(struct adapter *adap, unsigned int mbox, unsigned int phy_addr,
2002 unsigned int mmd, unsigned int reg, u16 *valp)
2003{
2004 int ret;
2005 struct fw_ldst_cmd c;
2006
2007 memset(&c, 0, sizeof(c));
2008 c.op_to_addrspace = htonl(FW_CMD_OP(FW_LDST_CMD) | FW_CMD_REQUEST |
2009 FW_CMD_READ | FW_LDST_CMD_ADDRSPACE(FW_LDST_ADDRSPC_MDIO));
2010 c.cycles_to_len16 = htonl(FW_LEN16(c));
2011 c.u.mdio.paddr_mmd = htons(FW_LDST_CMD_PADDR(phy_addr) |
2012 FW_LDST_CMD_MMD(mmd));
2013 c.u.mdio.raddr = htons(reg);
2014
2015 ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
2016 if (ret == 0)
2017 *valp = ntohs(c.u.mdio.rval);
2018 return ret;
2019}
2020
2021/**
2022 * t4_mdio_wr - write a PHY register through MDIO
2023 * @adap: the adapter
2024 * @mbox: mailbox to use for the FW command
2025 * @phy_addr: the PHY address
2026 * @mmd: the PHY MMD to access (0 for clause 22 PHYs)
2027 * @reg: the register to write
2028 * @valp: value to write
2029 *
2030 * Issues a FW command through the given mailbox to write a PHY register.
2031 */
2032int t4_mdio_wr(struct adapter *adap, unsigned int mbox, unsigned int phy_addr,
2033 unsigned int mmd, unsigned int reg, u16 val)
2034{
2035 struct fw_ldst_cmd c;
2036
2037 memset(&c, 0, sizeof(c));
2038 c.op_to_addrspace = htonl(FW_CMD_OP(FW_LDST_CMD) | FW_CMD_REQUEST |
2039 FW_CMD_WRITE | FW_LDST_CMD_ADDRSPACE(FW_LDST_ADDRSPC_MDIO));
2040 c.cycles_to_len16 = htonl(FW_LEN16(c));
2041 c.u.mdio.paddr_mmd = htons(FW_LDST_CMD_PADDR(phy_addr) |
2042 FW_LDST_CMD_MMD(mmd));
2043 c.u.mdio.raddr = htons(reg);
2044 c.u.mdio.rval = htons(val);
2045
2046 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
2047}
2048
2049/**
2050 * t4_fw_hello - establish communication with FW
2051 * @adap: the adapter
2052 * @mbox: mailbox to use for the FW command
2053 * @evt_mbox: mailbox to receive async FW events
2054 * @master: specifies the caller's willingness to be the device master
2055 * @state: returns the current device state
2056 *
2057 * Issues a command to establish communication with FW.
2058 */
2059int t4_fw_hello(struct adapter *adap, unsigned int mbox, unsigned int evt_mbox,
2060 enum dev_master master, enum dev_state *state)
2061{
2062 int ret;
2063 struct fw_hello_cmd c;
2064
2065 INIT_CMD(c, HELLO, WRITE);
2066 c.err_to_mbasyncnot = htonl(
2067 FW_HELLO_CMD_MASTERDIS(master == MASTER_CANT) |
2068 FW_HELLO_CMD_MASTERFORCE(master == MASTER_MUST) |
2069 FW_HELLO_CMD_MBMASTER(master == MASTER_MUST ? mbox : 0xff) |
2070 FW_HELLO_CMD_MBASYNCNOT(evt_mbox));
2071
2072 ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
2073 if (ret == 0 && state) {
2074 u32 v = ntohl(c.err_to_mbasyncnot);
2075 if (v & FW_HELLO_CMD_INIT)
2076 *state = DEV_STATE_INIT;
2077 else if (v & FW_HELLO_CMD_ERR)
2078 *state = DEV_STATE_ERR;
2079 else
2080 *state = DEV_STATE_UNINIT;
2081 }
2082 return ret;
2083}
2084
2085/**
2086 * t4_fw_bye - end communication with FW
2087 * @adap: the adapter
2088 * @mbox: mailbox to use for the FW command
2089 *
2090 * Issues a command to terminate communication with FW.
2091 */
2092int t4_fw_bye(struct adapter *adap, unsigned int mbox)
2093{
2094 struct fw_bye_cmd c;
2095
2096 INIT_CMD(c, BYE, WRITE);
2097 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
2098}
2099
2100/**
2101 * t4_init_cmd - ask FW to initialize the device
2102 * @adap: the adapter
2103 * @mbox: mailbox to use for the FW command
2104 *
2105 * Issues a command to FW to partially initialize the device. This
2106 * performs initialization that generally doesn't depend on user input.
2107 */
2108int t4_early_init(struct adapter *adap, unsigned int mbox)
2109{
2110 struct fw_initialize_cmd c;
2111
2112 INIT_CMD(c, INITIALIZE, WRITE);
2113 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
2114}
2115
2116/**
2117 * t4_fw_reset - issue a reset to FW
2118 * @adap: the adapter
2119 * @mbox: mailbox to use for the FW command
2120 * @reset: specifies the type of reset to perform
2121 *
2122 * Issues a reset command of the specified type to FW.
2123 */
2124int t4_fw_reset(struct adapter *adap, unsigned int mbox, int reset)
2125{
2126 struct fw_reset_cmd c;
2127
2128 INIT_CMD(c, RESET, WRITE);
2129 c.val = htonl(reset);
2130 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
2131}
2132
2133/**
2134 * t4_query_params - query FW or device parameters
2135 * @adap: the adapter
2136 * @mbox: mailbox to use for the FW command
2137 * @pf: the PF
2138 * @vf: the VF
2139 * @nparams: the number of parameters
2140 * @params: the parameter names
2141 * @val: the parameter values
2142 *
2143 * Reads the value of FW or device parameters. Up to 7 parameters can be
2144 * queried at once.
2145 */
2146int t4_query_params(struct adapter *adap, unsigned int mbox, unsigned int pf,
2147 unsigned int vf, unsigned int nparams, const u32 *params,
2148 u32 *val)
2149{
2150 int i, ret;
2151 struct fw_params_cmd c;
2152 __be32 *p = &c.param[0].mnem;
2153
2154 if (nparams > 7)
2155 return -EINVAL;
2156
2157 memset(&c, 0, sizeof(c));
2158 c.op_to_vfn = htonl(FW_CMD_OP(FW_PARAMS_CMD) | FW_CMD_REQUEST |
2159 FW_CMD_READ | FW_PARAMS_CMD_PFN(pf) |
2160 FW_PARAMS_CMD_VFN(vf));
2161 c.retval_len16 = htonl(FW_LEN16(c));
2162 for (i = 0; i < nparams; i++, p += 2)
2163 *p = htonl(*params++);
2164
2165 ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
2166 if (ret == 0)
2167 for (i = 0, p = &c.param[0].val; i < nparams; i++, p += 2)
2168 *val++ = ntohl(*p);
2169 return ret;
2170}
2171
2172/**
2173 * t4_set_params - sets FW or device parameters
2174 * @adap: the adapter
2175 * @mbox: mailbox to use for the FW command
2176 * @pf: the PF
2177 * @vf: the VF
2178 * @nparams: the number of parameters
2179 * @params: the parameter names
2180 * @val: the parameter values
2181 *
2182 * Sets the value of FW or device parameters. Up to 7 parameters can be
2183 * specified at once.
2184 */
2185int t4_set_params(struct adapter *adap, unsigned int mbox, unsigned int pf,
2186 unsigned int vf, unsigned int nparams, const u32 *params,
2187 const u32 *val)
2188{
2189 struct fw_params_cmd c;
2190 __be32 *p = &c.param[0].mnem;
2191
2192 if (nparams > 7)
2193 return -EINVAL;
2194
2195 memset(&c, 0, sizeof(c));
2196 c.op_to_vfn = htonl(FW_CMD_OP(FW_PARAMS_CMD) | FW_CMD_REQUEST |
2197 FW_CMD_WRITE | FW_PARAMS_CMD_PFN(pf) |
2198 FW_PARAMS_CMD_VFN(vf));
2199 c.retval_len16 = htonl(FW_LEN16(c));
2200 while (nparams--) {
2201 *p++ = htonl(*params++);
2202 *p++ = htonl(*val++);
2203 }
2204
2205 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
2206}
2207
2208/**
2209 * t4_cfg_pfvf - configure PF/VF resource limits
2210 * @adap: the adapter
2211 * @mbox: mailbox to use for the FW command
2212 * @pf: the PF being configured
2213 * @vf: the VF being configured
2214 * @txq: the max number of egress queues
2215 * @txq_eth_ctrl: the max number of egress Ethernet or control queues
2216 * @rxqi: the max number of interrupt-capable ingress queues
2217 * @rxq: the max number of interruptless ingress queues
2218 * @tc: the PCI traffic class
2219 * @vi: the max number of virtual interfaces
2220 * @cmask: the channel access rights mask for the PF/VF
2221 * @pmask: the port access rights mask for the PF/VF
2222 * @nexact: the maximum number of exact MPS filters
2223 * @rcaps: read capabilities
2224 * @wxcaps: write/execute capabilities
2225 *
2226 * Configures resource limits and capabilities for a physical or virtual
2227 * function.
2228 */
2229int t4_cfg_pfvf(struct adapter *adap, unsigned int mbox, unsigned int pf,
2230 unsigned int vf, unsigned int txq, unsigned int txq_eth_ctrl,
2231 unsigned int rxqi, unsigned int rxq, unsigned int tc,
2232 unsigned int vi, unsigned int cmask, unsigned int pmask,
2233 unsigned int nexact, unsigned int rcaps, unsigned int wxcaps)
2234{
2235 struct fw_pfvf_cmd c;
2236
2237 memset(&c, 0, sizeof(c));
2238 c.op_to_vfn = htonl(FW_CMD_OP(FW_PFVF_CMD) | FW_CMD_REQUEST |
2239 FW_CMD_WRITE | FW_PFVF_CMD_PFN(pf) |
2240 FW_PFVF_CMD_VFN(vf));
2241 c.retval_len16 = htonl(FW_LEN16(c));
2242 c.niqflint_niq = htonl(FW_PFVF_CMD_NIQFLINT(rxqi) |
2243 FW_PFVF_CMD_NIQ(rxq));
81323b74 2244 c.type_to_neq = htonl(FW_PFVF_CMD_CMASK(cmask) |
56d36be4
DM
2245 FW_PFVF_CMD_PMASK(pmask) |
2246 FW_PFVF_CMD_NEQ(txq));
2247 c.tc_to_nexactf = htonl(FW_PFVF_CMD_TC(tc) | FW_PFVF_CMD_NVI(vi) |
2248 FW_PFVF_CMD_NEXACTF(nexact));
2249 c.r_caps_to_nethctrl = htonl(FW_PFVF_CMD_R_CAPS(rcaps) |
2250 FW_PFVF_CMD_WX_CAPS(wxcaps) |
2251 FW_PFVF_CMD_NETHCTRL(txq_eth_ctrl));
2252 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
2253}
2254
2255/**
2256 * t4_alloc_vi - allocate a virtual interface
2257 * @adap: the adapter
2258 * @mbox: mailbox to use for the FW command
2259 * @port: physical port associated with the VI
2260 * @pf: the PF owning the VI
2261 * @vf: the VF owning the VI
2262 * @nmac: number of MAC addresses needed (1 to 5)
2263 * @mac: the MAC addresses of the VI
2264 * @rss_size: size of RSS table slice associated with this VI
2265 *
2266 * Allocates a virtual interface for the given physical port. If @mac is
2267 * not %NULL it contains the MAC addresses of the VI as assigned by FW.
2268 * @mac should be large enough to hold @nmac Ethernet addresses, they are
2269 * stored consecutively so the space needed is @nmac * 6 bytes.
2270 * Returns a negative error number or the non-negative VI id.
2271 */
2272int t4_alloc_vi(struct adapter *adap, unsigned int mbox, unsigned int port,
2273 unsigned int pf, unsigned int vf, unsigned int nmac, u8 *mac,
2274 unsigned int *rss_size)
2275{
2276 int ret;
2277 struct fw_vi_cmd c;
2278
2279 memset(&c, 0, sizeof(c));
2280 c.op_to_vfn = htonl(FW_CMD_OP(FW_VI_CMD) | FW_CMD_REQUEST |
2281 FW_CMD_WRITE | FW_CMD_EXEC |
2282 FW_VI_CMD_PFN(pf) | FW_VI_CMD_VFN(vf));
2283 c.alloc_to_len16 = htonl(FW_VI_CMD_ALLOC | FW_LEN16(c));
2284 c.portid_pkd = FW_VI_CMD_PORTID(port);
2285 c.nmac = nmac - 1;
2286
2287 ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
2288 if (ret)
2289 return ret;
2290
2291 if (mac) {
2292 memcpy(mac, c.mac, sizeof(c.mac));
2293 switch (nmac) {
2294 case 5:
2295 memcpy(mac + 24, c.nmac3, sizeof(c.nmac3));
2296 case 4:
2297 memcpy(mac + 18, c.nmac2, sizeof(c.nmac2));
2298 case 3:
2299 memcpy(mac + 12, c.nmac1, sizeof(c.nmac1));
2300 case 2:
2301 memcpy(mac + 6, c.nmac0, sizeof(c.nmac0));
2302 }
2303 }
2304 if (rss_size)
2305 *rss_size = FW_VI_CMD_RSSSIZE_GET(ntohs(c.rsssize_pkd));
a0881cab 2306 return FW_VI_CMD_VIID_GET(ntohs(c.type_viid));
56d36be4
DM
2307}
2308
56d36be4
DM
2309/**
2310 * t4_set_rxmode - set Rx properties of a virtual interface
2311 * @adap: the adapter
2312 * @mbox: mailbox to use for the FW command
2313 * @viid: the VI id
2314 * @mtu: the new MTU or -1
2315 * @promisc: 1 to enable promiscuous mode, 0 to disable it, -1 no change
2316 * @all_multi: 1 to enable all-multi mode, 0 to disable it, -1 no change
2317 * @bcast: 1 to enable broadcast Rx, 0 to disable it, -1 no change
f8f5aafa 2318 * @vlanex: 1 to enable HW VLAN extraction, 0 to disable it, -1 no change
56d36be4
DM
2319 * @sleep_ok: if true we may sleep while awaiting command completion
2320 *
2321 * Sets Rx properties of a virtual interface.
2322 */
2323int t4_set_rxmode(struct adapter *adap, unsigned int mbox, unsigned int viid,
f8f5aafa
DM
2324 int mtu, int promisc, int all_multi, int bcast, int vlanex,
2325 bool sleep_ok)
56d36be4
DM
2326{
2327 struct fw_vi_rxmode_cmd c;
2328
2329 /* convert to FW values */
2330 if (mtu < 0)
2331 mtu = FW_RXMODE_MTU_NO_CHG;
2332 if (promisc < 0)
2333 promisc = FW_VI_RXMODE_CMD_PROMISCEN_MASK;
2334 if (all_multi < 0)
2335 all_multi = FW_VI_RXMODE_CMD_ALLMULTIEN_MASK;
2336 if (bcast < 0)
2337 bcast = FW_VI_RXMODE_CMD_BROADCASTEN_MASK;
f8f5aafa
DM
2338 if (vlanex < 0)
2339 vlanex = FW_VI_RXMODE_CMD_VLANEXEN_MASK;
56d36be4
DM
2340
2341 memset(&c, 0, sizeof(c));
2342 c.op_to_viid = htonl(FW_CMD_OP(FW_VI_RXMODE_CMD) | FW_CMD_REQUEST |
2343 FW_CMD_WRITE | FW_VI_RXMODE_CMD_VIID(viid));
2344 c.retval_len16 = htonl(FW_LEN16(c));
f8f5aafa
DM
2345 c.mtu_to_vlanexen = htonl(FW_VI_RXMODE_CMD_MTU(mtu) |
2346 FW_VI_RXMODE_CMD_PROMISCEN(promisc) |
2347 FW_VI_RXMODE_CMD_ALLMULTIEN(all_multi) |
2348 FW_VI_RXMODE_CMD_BROADCASTEN(bcast) |
2349 FW_VI_RXMODE_CMD_VLANEXEN(vlanex));
56d36be4
DM
2350 return t4_wr_mbox_meat(adap, mbox, &c, sizeof(c), NULL, sleep_ok);
2351}
2352
2353/**
2354 * t4_alloc_mac_filt - allocates exact-match filters for MAC addresses
2355 * @adap: the adapter
2356 * @mbox: mailbox to use for the FW command
2357 * @viid: the VI id
2358 * @free: if true any existing filters for this VI id are first removed
2359 * @naddr: the number of MAC addresses to allocate filters for (up to 7)
2360 * @addr: the MAC address(es)
2361 * @idx: where to store the index of each allocated filter
2362 * @hash: pointer to hash address filter bitmap
2363 * @sleep_ok: call is allowed to sleep
2364 *
2365 * Allocates an exact-match filter for each of the supplied addresses and
2366 * sets it to the corresponding address. If @idx is not %NULL it should
2367 * have at least @naddr entries, each of which will be set to the index of
2368 * the filter allocated for the corresponding MAC address. If a filter
2369 * could not be allocated for an address its index is set to 0xffff.
2370 * If @hash is not %NULL addresses that fail to allocate an exact filter
2371 * are hashed and update the hash filter bitmap pointed at by @hash.
2372 *
2373 * Returns a negative error number or the number of filters allocated.
2374 */
2375int t4_alloc_mac_filt(struct adapter *adap, unsigned int mbox,
2376 unsigned int viid, bool free, unsigned int naddr,
2377 const u8 **addr, u16 *idx, u64 *hash, bool sleep_ok)
2378{
2379 int i, ret;
2380 struct fw_vi_mac_cmd c;
2381 struct fw_vi_mac_exact *p;
2382
2383 if (naddr > 7)
2384 return -EINVAL;
2385
2386 memset(&c, 0, sizeof(c));
2387 c.op_to_viid = htonl(FW_CMD_OP(FW_VI_MAC_CMD) | FW_CMD_REQUEST |
2388 FW_CMD_WRITE | (free ? FW_CMD_EXEC : 0) |
2389 FW_VI_MAC_CMD_VIID(viid));
2390 c.freemacs_to_len16 = htonl(FW_VI_MAC_CMD_FREEMACS(free) |
2391 FW_CMD_LEN16((naddr + 2) / 2));
2392
2393 for (i = 0, p = c.u.exact; i < naddr; i++, p++) {
2394 p->valid_to_idx = htons(FW_VI_MAC_CMD_VALID |
2395 FW_VI_MAC_CMD_IDX(FW_VI_MAC_ADD_MAC));
2396 memcpy(p->macaddr, addr[i], sizeof(p->macaddr));
2397 }
2398
2399 ret = t4_wr_mbox_meat(adap, mbox, &c, sizeof(c), &c, sleep_ok);
2400 if (ret)
2401 return ret;
2402
2403 for (i = 0, p = c.u.exact; i < naddr; i++, p++) {
2404 u16 index = FW_VI_MAC_CMD_IDX_GET(ntohs(p->valid_to_idx));
2405
2406 if (idx)
2407 idx[i] = index >= NEXACT_MAC ? 0xffff : index;
2408 if (index < NEXACT_MAC)
2409 ret++;
2410 else if (hash)
2411 *hash |= (1 << hash_mac_addr(addr[i]));
2412 }
2413 return ret;
2414}
2415
2416/**
2417 * t4_change_mac - modifies the exact-match filter for a MAC address
2418 * @adap: the adapter
2419 * @mbox: mailbox to use for the FW command
2420 * @viid: the VI id
2421 * @idx: index of existing filter for old value of MAC address, or -1
2422 * @addr: the new MAC address value
2423 * @persist: whether a new MAC allocation should be persistent
2424 * @add_smt: if true also add the address to the HW SMT
2425 *
2426 * Modifies an exact-match filter and sets it to the new MAC address.
2427 * Note that in general it is not possible to modify the value of a given
2428 * filter so the generic way to modify an address filter is to free the one
2429 * being used by the old address value and allocate a new filter for the
2430 * new address value. @idx can be -1 if the address is a new addition.
2431 *
2432 * Returns a negative error number or the index of the filter with the new
2433 * MAC value.
2434 */
2435int t4_change_mac(struct adapter *adap, unsigned int mbox, unsigned int viid,
2436 int idx, const u8 *addr, bool persist, bool add_smt)
2437{
2438 int ret, mode;
2439 struct fw_vi_mac_cmd c;
2440 struct fw_vi_mac_exact *p = c.u.exact;
2441
2442 if (idx < 0) /* new allocation */
2443 idx = persist ? FW_VI_MAC_ADD_PERSIST_MAC : FW_VI_MAC_ADD_MAC;
2444 mode = add_smt ? FW_VI_MAC_SMT_AND_MPSTCAM : FW_VI_MAC_MPS_TCAM_ENTRY;
2445
2446 memset(&c, 0, sizeof(c));
2447 c.op_to_viid = htonl(FW_CMD_OP(FW_VI_MAC_CMD) | FW_CMD_REQUEST |
2448 FW_CMD_WRITE | FW_VI_MAC_CMD_VIID(viid));
2449 c.freemacs_to_len16 = htonl(FW_CMD_LEN16(1));
2450 p->valid_to_idx = htons(FW_VI_MAC_CMD_VALID |
2451 FW_VI_MAC_CMD_SMAC_RESULT(mode) |
2452 FW_VI_MAC_CMD_IDX(idx));
2453 memcpy(p->macaddr, addr, sizeof(p->macaddr));
2454
2455 ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
2456 if (ret == 0) {
2457 ret = FW_VI_MAC_CMD_IDX_GET(ntohs(p->valid_to_idx));
2458 if (ret >= NEXACT_MAC)
2459 ret = -ENOMEM;
2460 }
2461 return ret;
2462}
2463
2464/**
2465 * t4_set_addr_hash - program the MAC inexact-match hash filter
2466 * @adap: the adapter
2467 * @mbox: mailbox to use for the FW command
2468 * @viid: the VI id
2469 * @ucast: whether the hash filter should also match unicast addresses
2470 * @vec: the value to be written to the hash filter
2471 * @sleep_ok: call is allowed to sleep
2472 *
2473 * Sets the 64-bit inexact-match hash filter for a virtual interface.
2474 */
2475int t4_set_addr_hash(struct adapter *adap, unsigned int mbox, unsigned int viid,
2476 bool ucast, u64 vec, bool sleep_ok)
2477{
2478 struct fw_vi_mac_cmd c;
2479
2480 memset(&c, 0, sizeof(c));
2481 c.op_to_viid = htonl(FW_CMD_OP(FW_VI_MAC_CMD) | FW_CMD_REQUEST |
2482 FW_CMD_WRITE | FW_VI_ENABLE_CMD_VIID(viid));
2483 c.freemacs_to_len16 = htonl(FW_VI_MAC_CMD_HASHVECEN |
2484 FW_VI_MAC_CMD_HASHUNIEN(ucast) |
2485 FW_CMD_LEN16(1));
2486 c.u.hash.hashvec = cpu_to_be64(vec);
2487 return t4_wr_mbox_meat(adap, mbox, &c, sizeof(c), NULL, sleep_ok);
2488}
2489
2490/**
2491 * t4_enable_vi - enable/disable a virtual interface
2492 * @adap: the adapter
2493 * @mbox: mailbox to use for the FW command
2494 * @viid: the VI id
2495 * @rx_en: 1=enable Rx, 0=disable Rx
2496 * @tx_en: 1=enable Tx, 0=disable Tx
2497 *
2498 * Enables/disables a virtual interface.
2499 */
2500int t4_enable_vi(struct adapter *adap, unsigned int mbox, unsigned int viid,
2501 bool rx_en, bool tx_en)
2502{
2503 struct fw_vi_enable_cmd c;
2504
2505 memset(&c, 0, sizeof(c));
2506 c.op_to_viid = htonl(FW_CMD_OP(FW_VI_ENABLE_CMD) | FW_CMD_REQUEST |
2507 FW_CMD_EXEC | FW_VI_ENABLE_CMD_VIID(viid));
2508 c.ien_to_len16 = htonl(FW_VI_ENABLE_CMD_IEN(rx_en) |
2509 FW_VI_ENABLE_CMD_EEN(tx_en) | FW_LEN16(c));
2510 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
2511}
2512
2513/**
2514 * t4_identify_port - identify a VI's port by blinking its LED
2515 * @adap: the adapter
2516 * @mbox: mailbox to use for the FW command
2517 * @viid: the VI id
2518 * @nblinks: how many times to blink LED at 2.5 Hz
2519 *
2520 * Identifies a VI's port by blinking its LED.
2521 */
2522int t4_identify_port(struct adapter *adap, unsigned int mbox, unsigned int viid,
2523 unsigned int nblinks)
2524{
2525 struct fw_vi_enable_cmd c;
2526
2527 c.op_to_viid = htonl(FW_CMD_OP(FW_VI_ENABLE_CMD) | FW_CMD_REQUEST |
2528 FW_CMD_EXEC | FW_VI_ENABLE_CMD_VIID(viid));
2529 c.ien_to_len16 = htonl(FW_VI_ENABLE_CMD_LED | FW_LEN16(c));
2530 c.blinkdur = htons(nblinks);
2531 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
56d36be4
DM
2532}
2533
2534/**
2535 * t4_iq_free - free an ingress queue and its FLs
2536 * @adap: the adapter
2537 * @mbox: mailbox to use for the FW command
2538 * @pf: the PF owning the queues
2539 * @vf: the VF owning the queues
2540 * @iqtype: the ingress queue type
2541 * @iqid: ingress queue id
2542 * @fl0id: FL0 queue id or 0xffff if no attached FL0
2543 * @fl1id: FL1 queue id or 0xffff if no attached FL1
2544 *
2545 * Frees an ingress queue and its associated FLs, if any.
2546 */
2547int t4_iq_free(struct adapter *adap, unsigned int mbox, unsigned int pf,
2548 unsigned int vf, unsigned int iqtype, unsigned int iqid,
2549 unsigned int fl0id, unsigned int fl1id)
2550{
2551 struct fw_iq_cmd c;
2552
2553 memset(&c, 0, sizeof(c));
2554 c.op_to_vfn = htonl(FW_CMD_OP(FW_IQ_CMD) | FW_CMD_REQUEST |
2555 FW_CMD_EXEC | FW_IQ_CMD_PFN(pf) |
2556 FW_IQ_CMD_VFN(vf));
2557 c.alloc_to_len16 = htonl(FW_IQ_CMD_FREE | FW_LEN16(c));
2558 c.type_to_iqandstindex = htonl(FW_IQ_CMD_TYPE(iqtype));
2559 c.iqid = htons(iqid);
2560 c.fl0id = htons(fl0id);
2561 c.fl1id = htons(fl1id);
2562 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
2563}
2564
2565/**
2566 * t4_eth_eq_free - free an Ethernet egress queue
2567 * @adap: the adapter
2568 * @mbox: mailbox to use for the FW command
2569 * @pf: the PF owning the queue
2570 * @vf: the VF owning the queue
2571 * @eqid: egress queue id
2572 *
2573 * Frees an Ethernet egress queue.
2574 */
2575int t4_eth_eq_free(struct adapter *adap, unsigned int mbox, unsigned int pf,
2576 unsigned int vf, unsigned int eqid)
2577{
2578 struct fw_eq_eth_cmd c;
2579
2580 memset(&c, 0, sizeof(c));
2581 c.op_to_vfn = htonl(FW_CMD_OP(FW_EQ_ETH_CMD) | FW_CMD_REQUEST |
2582 FW_CMD_EXEC | FW_EQ_ETH_CMD_PFN(pf) |
2583 FW_EQ_ETH_CMD_VFN(vf));
2584 c.alloc_to_len16 = htonl(FW_EQ_ETH_CMD_FREE | FW_LEN16(c));
2585 c.eqid_pkd = htonl(FW_EQ_ETH_CMD_EQID(eqid));
2586 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
2587}
2588
2589/**
2590 * t4_ctrl_eq_free - free a control egress queue
2591 * @adap: the adapter
2592 * @mbox: mailbox to use for the FW command
2593 * @pf: the PF owning the queue
2594 * @vf: the VF owning the queue
2595 * @eqid: egress queue id
2596 *
2597 * Frees a control egress queue.
2598 */
2599int t4_ctrl_eq_free(struct adapter *adap, unsigned int mbox, unsigned int pf,
2600 unsigned int vf, unsigned int eqid)
2601{
2602 struct fw_eq_ctrl_cmd c;
2603
2604 memset(&c, 0, sizeof(c));
2605 c.op_to_vfn = htonl(FW_CMD_OP(FW_EQ_CTRL_CMD) | FW_CMD_REQUEST |
2606 FW_CMD_EXEC | FW_EQ_CTRL_CMD_PFN(pf) |
2607 FW_EQ_CTRL_CMD_VFN(vf));
2608 c.alloc_to_len16 = htonl(FW_EQ_CTRL_CMD_FREE | FW_LEN16(c));
2609 c.cmpliqid_eqid = htonl(FW_EQ_CTRL_CMD_EQID(eqid));
2610 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
2611}
2612
2613/**
2614 * t4_ofld_eq_free - free an offload egress queue
2615 * @adap: the adapter
2616 * @mbox: mailbox to use for the FW command
2617 * @pf: the PF owning the queue
2618 * @vf: the VF owning the queue
2619 * @eqid: egress queue id
2620 *
2621 * Frees a control egress queue.
2622 */
2623int t4_ofld_eq_free(struct adapter *adap, unsigned int mbox, unsigned int pf,
2624 unsigned int vf, unsigned int eqid)
2625{
2626 struct fw_eq_ofld_cmd c;
2627
2628 memset(&c, 0, sizeof(c));
2629 c.op_to_vfn = htonl(FW_CMD_OP(FW_EQ_OFLD_CMD) | FW_CMD_REQUEST |
2630 FW_CMD_EXEC | FW_EQ_OFLD_CMD_PFN(pf) |
2631 FW_EQ_OFLD_CMD_VFN(vf));
2632 c.alloc_to_len16 = htonl(FW_EQ_OFLD_CMD_FREE | FW_LEN16(c));
2633 c.eqid_pkd = htonl(FW_EQ_OFLD_CMD_EQID(eqid));
2634 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
2635}
2636
2637/**
2638 * t4_handle_fw_rpl - process a FW reply message
2639 * @adap: the adapter
2640 * @rpl: start of the FW message
2641 *
2642 * Processes a FW message, such as link state change messages.
2643 */
2644int t4_handle_fw_rpl(struct adapter *adap, const __be64 *rpl)
2645{
2646 u8 opcode = *(const u8 *)rpl;
2647
2648 if (opcode == FW_PORT_CMD) { /* link/module state change message */
2649 int speed = 0, fc = 0;
2650 const struct fw_port_cmd *p = (void *)rpl;
2651 int chan = FW_PORT_CMD_PORTID_GET(ntohl(p->op_to_portid));
2652 int port = adap->chan_map[chan];
2653 struct port_info *pi = adap2pinfo(adap, port);
2654 struct link_config *lc = &pi->link_cfg;
2655 u32 stat = ntohl(p->u.info.lstatus_to_modtype);
2656 int link_ok = (stat & FW_PORT_CMD_LSTATUS) != 0;
2657 u32 mod = FW_PORT_CMD_MODTYPE_GET(stat);
2658
2659 if (stat & FW_PORT_CMD_RXPAUSE)
2660 fc |= PAUSE_RX;
2661 if (stat & FW_PORT_CMD_TXPAUSE)
2662 fc |= PAUSE_TX;
2663 if (stat & FW_PORT_CMD_LSPEED(FW_PORT_CAP_SPEED_100M))
2664 speed = SPEED_100;
2665 else if (stat & FW_PORT_CMD_LSPEED(FW_PORT_CAP_SPEED_1G))
2666 speed = SPEED_1000;
2667 else if (stat & FW_PORT_CMD_LSPEED(FW_PORT_CAP_SPEED_10G))
2668 speed = SPEED_10000;
2669
2670 if (link_ok != lc->link_ok || speed != lc->speed ||
2671 fc != lc->fc) { /* something changed */
2672 lc->link_ok = link_ok;
2673 lc->speed = speed;
2674 lc->fc = fc;
2675 t4_os_link_changed(adap, port, link_ok);
2676 }
2677 if (mod != pi->mod_type) {
2678 pi->mod_type = mod;
2679 t4_os_portmod_changed(adap, port);
2680 }
2681 }
2682 return 0;
2683}
2684
2685static void __devinit get_pci_mode(struct adapter *adapter,
2686 struct pci_params *p)
2687{
2688 u16 val;
2689 u32 pcie_cap = pci_pcie_cap(adapter->pdev);
2690
2691 if (pcie_cap) {
2692 pci_read_config_word(adapter->pdev, pcie_cap + PCI_EXP_LNKSTA,
2693 &val);
2694 p->speed = val & PCI_EXP_LNKSTA_CLS;
2695 p->width = (val & PCI_EXP_LNKSTA_NLW) >> 4;
2696 }
2697}
2698
2699/**
2700 * init_link_config - initialize a link's SW state
2701 * @lc: structure holding the link state
2702 * @caps: link capabilities
2703 *
2704 * Initializes the SW state maintained for each link, including the link's
2705 * capabilities and default speed/flow-control/autonegotiation settings.
2706 */
2707static void __devinit init_link_config(struct link_config *lc,
2708 unsigned int caps)
2709{
2710 lc->supported = caps;
2711 lc->requested_speed = 0;
2712 lc->speed = 0;
2713 lc->requested_fc = lc->fc = PAUSE_RX | PAUSE_TX;
2714 if (lc->supported & FW_PORT_CAP_ANEG) {
2715 lc->advertising = lc->supported & ADVERT_MASK;
2716 lc->autoneg = AUTONEG_ENABLE;
2717 lc->requested_fc |= PAUSE_AUTONEG;
2718 } else {
2719 lc->advertising = 0;
2720 lc->autoneg = AUTONEG_DISABLE;
2721 }
2722}
2723
204dc3c0 2724int t4_wait_dev_ready(struct adapter *adap)
56d36be4
DM
2725{
2726 if (t4_read_reg(adap, PL_WHOAMI) != 0xffffffff)
2727 return 0;
2728 msleep(500);
2729 return t4_read_reg(adap, PL_WHOAMI) != 0xffffffff ? 0 : -EIO;
2730}
2731
900a6596
DM
2732static int __devinit get_flash_params(struct adapter *adap)
2733{
2734 int ret;
2735 u32 info;
2736
2737 ret = sf1_write(adap, 1, 1, 0, SF_RD_ID);
2738 if (!ret)
2739 ret = sf1_read(adap, 3, 0, 1, &info);
2740 t4_write_reg(adap, SF_OP, 0); /* unlock SF */
2741 if (ret)
2742 return ret;
2743
2744 if ((info & 0xff) != 0x20) /* not a Numonix flash */
2745 return -EINVAL;
2746 info >>= 16; /* log2 of size */
2747 if (info >= 0x14 && info < 0x18)
2748 adap->params.sf_nsec = 1 << (info - 16);
2749 else if (info == 0x18)
2750 adap->params.sf_nsec = 64;
2751 else
2752 return -EINVAL;
2753 adap->params.sf_size = 1 << info;
2754 adap->params.sf_fw_start =
2755 t4_read_reg(adap, CIM_BOOT_CFG) & BOOTADDR_MASK;
2756 return 0;
2757}
2758
56d36be4
DM
2759/**
2760 * t4_prep_adapter - prepare SW and HW for operation
2761 * @adapter: the adapter
2762 * @reset: if true perform a HW reset
2763 *
2764 * Initialize adapter SW state for the various HW modules, set initial
2765 * values for some adapter tunables, take PHYs out of reset, and
2766 * initialize the MDIO interface.
2767 */
2768int __devinit t4_prep_adapter(struct adapter *adapter)
2769{
2770 int ret;
2771
204dc3c0 2772 ret = t4_wait_dev_ready(adapter);
56d36be4
DM
2773 if (ret < 0)
2774 return ret;
2775
2776 get_pci_mode(adapter, &adapter->params.pci);
2777 adapter->params.rev = t4_read_reg(adapter, PL_REV);
2778
900a6596
DM
2779 ret = get_flash_params(adapter);
2780 if (ret < 0) {
2781 dev_err(adapter->pdev_dev, "error %d identifying flash\n", ret);
2782 return ret;
2783 }
2784
56d36be4
DM
2785 ret = get_vpd_params(adapter, &adapter->params.vpd);
2786 if (ret < 0)
2787 return ret;
2788
2789 init_cong_ctrl(adapter->params.a_wnd, adapter->params.b_wnd);
2790
2791 /*
2792 * Default port for debugging in case we can't reach FW.
2793 */
2794 adapter->params.nports = 1;
2795 adapter->params.portvec = 1;
2796 return 0;
2797}
2798
2799int __devinit t4_port_init(struct adapter *adap, int mbox, int pf, int vf)
2800{
2801 u8 addr[6];
2802 int ret, i, j = 0;
2803 struct fw_port_cmd c;
f796564a 2804 struct fw_rss_vi_config_cmd rvc;
56d36be4
DM
2805
2806 memset(&c, 0, sizeof(c));
f796564a 2807 memset(&rvc, 0, sizeof(rvc));
56d36be4
DM
2808
2809 for_each_port(adap, i) {
2810 unsigned int rss_size;
2811 struct port_info *p = adap2pinfo(adap, i);
2812
2813 while ((adap->params.portvec & (1 << j)) == 0)
2814 j++;
2815
2816 c.op_to_portid = htonl(FW_CMD_OP(FW_PORT_CMD) |
2817 FW_CMD_REQUEST | FW_CMD_READ |
2818 FW_PORT_CMD_PORTID(j));
2819 c.action_to_len16 = htonl(
2820 FW_PORT_CMD_ACTION(FW_PORT_ACTION_GET_PORT_INFO) |
2821 FW_LEN16(c));
2822 ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
2823 if (ret)
2824 return ret;
2825
2826 ret = t4_alloc_vi(adap, mbox, j, pf, vf, 1, addr, &rss_size);
2827 if (ret < 0)
2828 return ret;
2829
2830 p->viid = ret;
2831 p->tx_chan = j;
2832 p->lport = j;
2833 p->rss_size = rss_size;
2834 memcpy(adap->port[i]->dev_addr, addr, ETH_ALEN);
2835 memcpy(adap->port[i]->perm_addr, addr, ETH_ALEN);
f21ce1c3 2836 adap->port[i]->dev_id = j;
56d36be4
DM
2837
2838 ret = ntohl(c.u.info.lstatus_to_modtype);
2839 p->mdio_addr = (ret & FW_PORT_CMD_MDIOCAP) ?
2840 FW_PORT_CMD_MDIOADDR_GET(ret) : -1;
2841 p->port_type = FW_PORT_CMD_PTYPE_GET(ret);
a0881cab 2842 p->mod_type = FW_PORT_MOD_TYPE_NA;
56d36be4 2843
f796564a
DM
2844 rvc.op_to_viid = htonl(FW_CMD_OP(FW_RSS_VI_CONFIG_CMD) |
2845 FW_CMD_REQUEST | FW_CMD_READ |
2846 FW_RSS_VI_CONFIG_CMD_VIID(p->viid));
2847 rvc.retval_len16 = htonl(FW_LEN16(rvc));
2848 ret = t4_wr_mbox(adap, mbox, &rvc, sizeof(rvc), &rvc);
2849 if (ret)
2850 return ret;
2851 p->rss_mode = ntohl(rvc.u.basicvirtual.defaultq_to_udpen);
2852
56d36be4
DM
2853 init_link_config(&p->link_cfg, ntohs(c.u.info.pcap));
2854 j++;
2855 }
2856 return 0;
2857}