]> bbs.cooldavid.org Git - net-next-2.6.git/blame - drivers/net/qlcnic/qlcnic_init.c
qlcnic: change all P3 references to P3P
[net-next-2.6.git] / drivers / net / qlcnic / qlcnic_init.c
CommitLineData
af19b491
AKS
1/*
2 * Copyright (C) 2009 - QLogic Corporation.
3 * All rights reserved.
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License
7 * as published by the Free Software Foundation; either version 2
8 * of the License, or (at your option) any later version.
9 *
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 59 Temple Place - Suite 330, Boston,
18 * MA 02111-1307, USA.
19 *
20 * The full GNU General Public License is included in this distribution
21 * in the file called "COPYING".
22 *
23 */
24
25#include <linux/netdevice.h>
26#include <linux/delay.h>
5a0e3ad6 27#include <linux/slab.h>
8cf61f89 28#include <linux/if_vlan.h>
af19b491
AKS
29#include "qlcnic.h"
30
31struct crb_addr_pair {
32 u32 addr;
33 u32 data;
34};
35
36#define QLCNIC_MAX_CRB_XFORM 60
37static unsigned int crb_addr_xform[QLCNIC_MAX_CRB_XFORM];
38
39#define crb_addr_transform(name) \
40 (crb_addr_xform[QLCNIC_HW_PX_MAP_CRB_##name] = \
41 QLCNIC_HW_CRB_HUB_AGT_ADR_##name << 20)
42
43#define QLCNIC_ADDR_ERROR (0xffffffff)
44
45static void
46qlcnic_post_rx_buffers_nodb(struct qlcnic_adapter *adapter,
47 struct qlcnic_host_rds_ring *rds_ring);
48
4e70812b
SC
49static int
50qlcnic_check_fw_hearbeat(struct qlcnic_adapter *adapter);
51
af19b491
AKS
52static void crb_addr_transform_setup(void)
53{
54 crb_addr_transform(XDMA);
55 crb_addr_transform(TIMR);
56 crb_addr_transform(SRE);
57 crb_addr_transform(SQN3);
58 crb_addr_transform(SQN2);
59 crb_addr_transform(SQN1);
60 crb_addr_transform(SQN0);
61 crb_addr_transform(SQS3);
62 crb_addr_transform(SQS2);
63 crb_addr_transform(SQS1);
64 crb_addr_transform(SQS0);
65 crb_addr_transform(RPMX7);
66 crb_addr_transform(RPMX6);
67 crb_addr_transform(RPMX5);
68 crb_addr_transform(RPMX4);
69 crb_addr_transform(RPMX3);
70 crb_addr_transform(RPMX2);
71 crb_addr_transform(RPMX1);
72 crb_addr_transform(RPMX0);
73 crb_addr_transform(ROMUSB);
74 crb_addr_transform(SN);
75 crb_addr_transform(QMN);
76 crb_addr_transform(QMS);
77 crb_addr_transform(PGNI);
78 crb_addr_transform(PGND);
79 crb_addr_transform(PGN3);
80 crb_addr_transform(PGN2);
81 crb_addr_transform(PGN1);
82 crb_addr_transform(PGN0);
83 crb_addr_transform(PGSI);
84 crb_addr_transform(PGSD);
85 crb_addr_transform(PGS3);
86 crb_addr_transform(PGS2);
87 crb_addr_transform(PGS1);
88 crb_addr_transform(PGS0);
89 crb_addr_transform(PS);
90 crb_addr_transform(PH);
91 crb_addr_transform(NIU);
92 crb_addr_transform(I2Q);
93 crb_addr_transform(EG);
94 crb_addr_transform(MN);
95 crb_addr_transform(MS);
96 crb_addr_transform(CAS2);
97 crb_addr_transform(CAS1);
98 crb_addr_transform(CAS0);
99 crb_addr_transform(CAM);
100 crb_addr_transform(C2C1);
101 crb_addr_transform(C2C0);
102 crb_addr_transform(SMB);
103 crb_addr_transform(OCM0);
104 crb_addr_transform(I2C0);
105}
106
107void qlcnic_release_rx_buffers(struct qlcnic_adapter *adapter)
108{
109 struct qlcnic_recv_context *recv_ctx;
110 struct qlcnic_host_rds_ring *rds_ring;
111 struct qlcnic_rx_buffer *rx_buf;
112 int i, ring;
113
114 recv_ctx = &adapter->recv_ctx;
115 for (ring = 0; ring < adapter->max_rds_rings; ring++) {
116 rds_ring = &recv_ctx->rds_rings[ring];
117 for (i = 0; i < rds_ring->num_desc; ++i) {
118 rx_buf = &(rds_ring->rx_buf_arr[i]);
96659828 119 if (rx_buf->skb == NULL)
af19b491 120 continue;
96659828 121
af19b491
AKS
122 pci_unmap_single(adapter->pdev,
123 rx_buf->dma,
124 rds_ring->dma_size,
125 PCI_DMA_FROMDEVICE);
96659828
AKS
126
127 dev_kfree_skb_any(rx_buf->skb);
af19b491
AKS
128 }
129 }
130}
131
8a15ad1f
AKS
132void qlcnic_reset_rx_buffers_list(struct qlcnic_adapter *adapter)
133{
134 struct qlcnic_recv_context *recv_ctx;
135 struct qlcnic_host_rds_ring *rds_ring;
136 struct qlcnic_rx_buffer *rx_buf;
137 int i, ring;
138
139 recv_ctx = &adapter->recv_ctx;
140 for (ring = 0; ring < adapter->max_rds_rings; ring++) {
141 rds_ring = &recv_ctx->rds_rings[ring];
142
8a15ad1f
AKS
143 INIT_LIST_HEAD(&rds_ring->free_list);
144
145 rx_buf = rds_ring->rx_buf_arr;
146 for (i = 0; i < rds_ring->num_desc; i++) {
147 list_add_tail(&rx_buf->list,
148 &rds_ring->free_list);
149 rx_buf++;
150 }
8a15ad1f
AKS
151 }
152}
153
af19b491
AKS
154void qlcnic_release_tx_buffers(struct qlcnic_adapter *adapter)
155{
156 struct qlcnic_cmd_buffer *cmd_buf;
157 struct qlcnic_skb_frag *buffrag;
158 int i, j;
159 struct qlcnic_host_tx_ring *tx_ring = adapter->tx_ring;
160
161 cmd_buf = tx_ring->cmd_buf_arr;
162 for (i = 0; i < tx_ring->num_desc; i++) {
163 buffrag = cmd_buf->frag_array;
164 if (buffrag->dma) {
165 pci_unmap_single(adapter->pdev, buffrag->dma,
166 buffrag->length, PCI_DMA_TODEVICE);
167 buffrag->dma = 0ULL;
168 }
169 for (j = 0; j < cmd_buf->frag_count; j++) {
170 buffrag++;
171 if (buffrag->dma) {
172 pci_unmap_page(adapter->pdev, buffrag->dma,
173 buffrag->length,
174 PCI_DMA_TODEVICE);
175 buffrag->dma = 0ULL;
176 }
177 }
178 if (cmd_buf->skb) {
179 dev_kfree_skb_any(cmd_buf->skb);
180 cmd_buf->skb = NULL;
181 }
182 cmd_buf++;
183 }
184}
185
186void qlcnic_free_sw_resources(struct qlcnic_adapter *adapter)
187{
188 struct qlcnic_recv_context *recv_ctx;
189 struct qlcnic_host_rds_ring *rds_ring;
190 struct qlcnic_host_tx_ring *tx_ring;
191 int ring;
192
193 recv_ctx = &adapter->recv_ctx;
194
195 if (recv_ctx->rds_rings == NULL)
196 goto skip_rds;
197
198 for (ring = 0; ring < adapter->max_rds_rings; ring++) {
199 rds_ring = &recv_ctx->rds_rings[ring];
200 vfree(rds_ring->rx_buf_arr);
201 rds_ring->rx_buf_arr = NULL;
202 }
203 kfree(recv_ctx->rds_rings);
204
205skip_rds:
206 if (adapter->tx_ring == NULL)
207 return;
208
209 tx_ring = adapter->tx_ring;
210 vfree(tx_ring->cmd_buf_arr);
ef71ff83 211 tx_ring->cmd_buf_arr = NULL;
af19b491 212 kfree(adapter->tx_ring);
ef71ff83 213 adapter->tx_ring = NULL;
af19b491
AKS
214}
215
216int qlcnic_alloc_sw_resources(struct qlcnic_adapter *adapter)
217{
218 struct qlcnic_recv_context *recv_ctx;
219 struct qlcnic_host_rds_ring *rds_ring;
220 struct qlcnic_host_sds_ring *sds_ring;
221 struct qlcnic_host_tx_ring *tx_ring;
222 struct qlcnic_rx_buffer *rx_buf;
223 int ring, i, size;
224
225 struct qlcnic_cmd_buffer *cmd_buf_arr;
226 struct net_device *netdev = adapter->netdev;
227
228 size = sizeof(struct qlcnic_host_tx_ring);
229 tx_ring = kzalloc(size, GFP_KERNEL);
230 if (tx_ring == NULL) {
231 dev_err(&netdev->dev, "failed to allocate tx ring struct\n");
232 return -ENOMEM;
233 }
234 adapter->tx_ring = tx_ring;
235
236 tx_ring->num_desc = adapter->num_txd;
237 tx_ring->txq = netdev_get_tx_queue(netdev, 0);
238
239 cmd_buf_arr = vmalloc(TX_BUFF_RINGSIZE(tx_ring));
240 if (cmd_buf_arr == NULL) {
241 dev_err(&netdev->dev, "failed to allocate cmd buffer ring\n");
aadd8184 242 goto err_out;
af19b491
AKS
243 }
244 memset(cmd_buf_arr, 0, TX_BUFF_RINGSIZE(tx_ring));
245 tx_ring->cmd_buf_arr = cmd_buf_arr;
246
247 recv_ctx = &adapter->recv_ctx;
248
249 size = adapter->max_rds_rings * sizeof(struct qlcnic_host_rds_ring);
250 rds_ring = kzalloc(size, GFP_KERNEL);
251 if (rds_ring == NULL) {
252 dev_err(&netdev->dev, "failed to allocate rds ring struct\n");
aadd8184 253 goto err_out;
af19b491
AKS
254 }
255 recv_ctx->rds_rings = rds_ring;
256
257 for (ring = 0; ring < adapter->max_rds_rings; ring++) {
258 rds_ring = &recv_ctx->rds_rings[ring];
259 switch (ring) {
260 case RCV_RING_NORMAL:
261 rds_ring->num_desc = adapter->num_rxd;
ff1b1bf8 262 rds_ring->dma_size = QLCNIC_P3P_RX_BUF_MAX_LEN;
251a84c9 263 rds_ring->skb_size = rds_ring->dma_size + NET_IP_ALIGN;
af19b491
AKS
264 break;
265
266 case RCV_RING_JUMBO:
267 rds_ring->num_desc = adapter->num_jumbo_rxd;
268 rds_ring->dma_size =
ff1b1bf8 269 QLCNIC_P3P_RX_JUMBO_BUF_MAX_LEN;
af19b491
AKS
270
271 if (adapter->capabilities & QLCNIC_FW_CAPABILITY_HW_LRO)
272 rds_ring->dma_size += QLCNIC_LRO_BUFFER_EXTRA;
273
274 rds_ring->skb_size =
275 rds_ring->dma_size + NET_IP_ALIGN;
276 break;
af19b491
AKS
277 }
278 rds_ring->rx_buf_arr = (struct qlcnic_rx_buffer *)
279 vmalloc(RCV_BUFF_RINGSIZE(rds_ring));
280 if (rds_ring->rx_buf_arr == NULL) {
281 dev_err(&netdev->dev, "Failed to allocate "
282 "rx buffer ring %d\n", ring);
283 goto err_out;
284 }
285 memset(rds_ring->rx_buf_arr, 0, RCV_BUFF_RINGSIZE(rds_ring));
286 INIT_LIST_HEAD(&rds_ring->free_list);
287 /*
288 * Now go through all of them, set reference handles
289 * and put them in the queues.
290 */
291 rx_buf = rds_ring->rx_buf_arr;
292 for (i = 0; i < rds_ring->num_desc; i++) {
293 list_add_tail(&rx_buf->list,
294 &rds_ring->free_list);
295 rx_buf->ref_handle = i;
af19b491
AKS
296 rx_buf++;
297 }
298 spin_lock_init(&rds_ring->lock);
299 }
300
301 for (ring = 0; ring < adapter->max_sds_rings; ring++) {
302 sds_ring = &recv_ctx->sds_rings[ring];
303 sds_ring->irq = adapter->msix_entries[ring].vector;
304 sds_ring->adapter = adapter;
305 sds_ring->num_desc = adapter->num_rxd;
306
307 for (i = 0; i < NUM_RCV_DESC_RINGS; i++)
308 INIT_LIST_HEAD(&sds_ring->free_list[i]);
309 }
310
311 return 0;
312
313err_out:
314 qlcnic_free_sw_resources(adapter);
315 return -ENOMEM;
316}
317
318/*
319 * Utility to translate from internal Phantom CRB address
320 * to external PCI CRB address.
321 */
322static u32 qlcnic_decode_crb_addr(u32 addr)
323{
324 int i;
325 u32 base_addr, offset, pci_base;
326
327 crb_addr_transform_setup();
328
329 pci_base = QLCNIC_ADDR_ERROR;
330 base_addr = addr & 0xfff00000;
331 offset = addr & 0x000fffff;
332
333 for (i = 0; i < QLCNIC_MAX_CRB_XFORM; i++) {
334 if (crb_addr_xform[i] == base_addr) {
335 pci_base = i << 20;
336 break;
337 }
338 }
339 if (pci_base == QLCNIC_ADDR_ERROR)
340 return pci_base;
341 else
342 return pci_base + offset;
343}
344
345#define QLCNIC_MAX_ROM_WAIT_USEC 100
346
347static int qlcnic_wait_rom_done(struct qlcnic_adapter *adapter)
348{
349 long timeout = 0;
350 long done = 0;
351
352 cond_resched();
353
354 while (done == 0) {
355 done = QLCRD32(adapter, QLCNIC_ROMUSB_GLB_STATUS);
356 done &= 2;
357 if (++timeout >= QLCNIC_MAX_ROM_WAIT_USEC) {
358 dev_err(&adapter->pdev->dev,
359 "Timeout reached waiting for rom done");
360 return -EIO;
361 }
362 udelay(1);
363 }
364 return 0;
365}
366
367static int do_rom_fast_read(struct qlcnic_adapter *adapter,
368 int addr, int *valp)
369{
370 QLCWR32(adapter, QLCNIC_ROMUSB_ROM_ADDRESS, addr);
371 QLCWR32(adapter, QLCNIC_ROMUSB_ROM_DUMMY_BYTE_CNT, 0);
372 QLCWR32(adapter, QLCNIC_ROMUSB_ROM_ABYTE_CNT, 3);
373 QLCWR32(adapter, QLCNIC_ROMUSB_ROM_INSTR_OPCODE, 0xb);
374 if (qlcnic_wait_rom_done(adapter)) {
375 dev_err(&adapter->pdev->dev, "Error waiting for rom done\n");
376 return -EIO;
377 }
378 /* reset abyte_cnt and dummy_byte_cnt */
379 QLCWR32(adapter, QLCNIC_ROMUSB_ROM_ABYTE_CNT, 0);
380 udelay(10);
381 QLCWR32(adapter, QLCNIC_ROMUSB_ROM_DUMMY_BYTE_CNT, 0);
382
383 *valp = QLCRD32(adapter, QLCNIC_ROMUSB_ROM_RDATA);
384 return 0;
385}
386
387static int do_rom_fast_read_words(struct qlcnic_adapter *adapter, int addr,
388 u8 *bytes, size_t size)
389{
390 int addridx;
391 int ret = 0;
392
393 for (addridx = addr; addridx < (addr + size); addridx += 4) {
394 int v;
395 ret = do_rom_fast_read(adapter, addridx, &v);
396 if (ret != 0)
397 break;
398 *(__le32 *)bytes = cpu_to_le32(v);
399 bytes += 4;
400 }
401
402 return ret;
403}
404
405int
406qlcnic_rom_fast_read_words(struct qlcnic_adapter *adapter, int addr,
407 u8 *bytes, size_t size)
408{
409 int ret;
410
411 ret = qlcnic_rom_lock(adapter);
412 if (ret < 0)
413 return ret;
414
415 ret = do_rom_fast_read_words(adapter, addr, bytes, size);
416
417 qlcnic_rom_unlock(adapter);
418 return ret;
419}
420
421int qlcnic_rom_fast_read(struct qlcnic_adapter *adapter, int addr, int *valp)
422{
423 int ret;
424
425 if (qlcnic_rom_lock(adapter) != 0)
426 return -EIO;
427
428 ret = do_rom_fast_read(adapter, addr, valp);
429 qlcnic_rom_unlock(adapter);
430 return ret;
431}
432
433int qlcnic_pinit_from_rom(struct qlcnic_adapter *adapter)
434{
435 int addr, val;
436 int i, n, init_delay;
437 struct crb_addr_pair *buf;
438 unsigned offset;
439 u32 off;
440 struct pci_dev *pdev = adapter->pdev;
441
d4066833
SC
442 QLCWR32(adapter, CRB_CMDPEG_STATE, 0);
443 QLCWR32(adapter, CRB_RCVPEG_STATE, 0);
444
af19b491 445 qlcnic_rom_lock(adapter);
469c221f 446 QLCWR32(adapter, QLCNIC_ROMUSB_GLB_SW_RESET, 0xfeffffff);
af19b491
AKS
447 qlcnic_rom_unlock(adapter);
448
d4066833 449 /* Init HW CRB block */
af19b491
AKS
450 if (qlcnic_rom_fast_read(adapter, 0, &n) != 0 || (n != 0xcafecafe) ||
451 qlcnic_rom_fast_read(adapter, 4, &n) != 0) {
452 dev_err(&pdev->dev, "ERROR Reading crb_init area: val:%x\n", n);
453 return -EIO;
454 }
455 offset = n & 0xffffU;
456 n = (n >> 16) & 0xffffU;
457
458 if (n >= 1024) {
459 dev_err(&pdev->dev, "QLOGIC card flash not initialized.\n");
460 return -EIO;
461 }
462
463 buf = kcalloc(n, sizeof(struct crb_addr_pair), GFP_KERNEL);
464 if (buf == NULL) {
465 dev_err(&pdev->dev, "Unable to calloc memory for rom read.\n");
466 return -ENOMEM;
467 }
468
469 for (i = 0; i < n; i++) {
470 if (qlcnic_rom_fast_read(adapter, 8*i + 4*offset, &val) != 0 ||
471 qlcnic_rom_fast_read(adapter, 8*i + 4*offset + 4, &addr) != 0) {
472 kfree(buf);
473 return -EIO;
474 }
475
476 buf[i].addr = addr;
477 buf[i].data = val;
478 }
479
480 for (i = 0; i < n; i++) {
481
482 off = qlcnic_decode_crb_addr(buf[i].addr);
483 if (off == QLCNIC_ADDR_ERROR) {
484 dev_err(&pdev->dev, "CRB init value out of range %x\n",
485 buf[i].addr);
486 continue;
487 }
488 off += QLCNIC_PCI_CRBSPACE;
489
490 if (off & 1)
491 continue;
492
493 /* skipping cold reboot MAGIC */
494 if (off == QLCNIC_CAM_RAM(0x1fc))
495 continue;
496 if (off == (QLCNIC_CRB_I2C0 + 0x1c))
497 continue;
498 if (off == (ROMUSB_GLB + 0xbc)) /* do not reset PCI */
499 continue;
500 if (off == (ROMUSB_GLB + 0xa8))
501 continue;
502 if (off == (ROMUSB_GLB + 0xc8)) /* core clock */
503 continue;
504 if (off == (ROMUSB_GLB + 0x24)) /* MN clock */
505 continue;
506 if (off == (ROMUSB_GLB + 0x1c)) /* MS clock */
507 continue;
508 if ((off & 0x0ff00000) == QLCNIC_CRB_DDR_NET)
509 continue;
510 /* skip the function enable register */
511 if (off == QLCNIC_PCIE_REG(PCIE_SETUP_FUNCTION))
512 continue;
513 if (off == QLCNIC_PCIE_REG(PCIE_SETUP_FUNCTION2))
514 continue;
515 if ((off & 0x0ff00000) == QLCNIC_CRB_SMB)
516 continue;
517
518 init_delay = 1;
519 /* After writing this register, HW needs time for CRB */
520 /* to quiet down (else crb_window returns 0xffffffff) */
521 if (off == QLCNIC_ROMUSB_GLB_SW_RESET)
522 init_delay = 1000;
523
524 QLCWR32(adapter, off, buf[i].data);
525
526 msleep(init_delay);
527 }
528 kfree(buf);
529
d4066833 530 /* Initialize protocol process engine */
af19b491 531 QLCWR32(adapter, QLCNIC_CRB_PEG_NET_D + 0xec, 0x1e);
af19b491
AKS
532 QLCWR32(adapter, QLCNIC_CRB_PEG_NET_D + 0x4c, 8);
533 QLCWR32(adapter, QLCNIC_CRB_PEG_NET_I + 0x4c, 8);
af19b491
AKS
534 QLCWR32(adapter, QLCNIC_CRB_PEG_NET_0 + 0x8, 0);
535 QLCWR32(adapter, QLCNIC_CRB_PEG_NET_0 + 0xc, 0);
536 QLCWR32(adapter, QLCNIC_CRB_PEG_NET_1 + 0x8, 0);
537 QLCWR32(adapter, QLCNIC_CRB_PEG_NET_1 + 0xc, 0);
538 QLCWR32(adapter, QLCNIC_CRB_PEG_NET_2 + 0x8, 0);
539 QLCWR32(adapter, QLCNIC_CRB_PEG_NET_2 + 0xc, 0);
540 QLCWR32(adapter, QLCNIC_CRB_PEG_NET_3 + 0x8, 0);
541 QLCWR32(adapter, QLCNIC_CRB_PEG_NET_3 + 0xc, 0);
d4066833
SC
542 QLCWR32(adapter, QLCNIC_CRB_PEG_NET_4 + 0x8, 0);
543 QLCWR32(adapter, QLCNIC_CRB_PEG_NET_4 + 0xc, 0);
544 msleep(1);
545 QLCWR32(adapter, QLCNIC_PEG_HALT_STATUS1, 0);
546 QLCWR32(adapter, QLCNIC_PEG_HALT_STATUS2, 0);
af19b491
AKS
547 return 0;
548}
549
4e70812b 550static int qlcnic_cmd_peg_ready(struct qlcnic_adapter *adapter)
d4066833 551{
4e70812b
SC
552 u32 val;
553 int retries = QLCNIC_CMDPEG_CHECK_RETRY_COUNT;
d4066833 554
d4066833 555 do {
4e70812b
SC
556 val = QLCRD32(adapter, CRB_CMDPEG_STATE);
557
558 switch (val) {
559 case PHAN_INITIALIZE_COMPLETE:
560 case PHAN_INITIALIZE_ACK:
561 return 0;
562 case PHAN_INITIALIZE_FAILED:
563 goto out_err;
564 default:
565 break;
d4066833 566 }
4e70812b
SC
567
568 msleep(QLCNIC_CMDPEG_CHECK_DELAY);
569
d4066833
SC
570 } while (--retries);
571
4e70812b
SC
572 QLCWR32(adapter, CRB_CMDPEG_STATE, PHAN_INITIALIZE_FAILED);
573
574out_err:
575 dev_err(&adapter->pdev->dev, "Command Peg initialization not "
576 "complete, state: 0x%x.\n", val);
577 return -EIO;
578}
579
580static int
581qlcnic_receive_peg_ready(struct qlcnic_adapter *adapter)
582{
583 u32 val;
584 int retries = QLCNIC_RCVPEG_CHECK_RETRY_COUNT;
585
586 do {
587 val = QLCRD32(adapter, CRB_RCVPEG_STATE);
588
589 if (val == PHAN_PEG_RCV_INITIALIZED)
590 return 0;
591
592 msleep(QLCNIC_RCVPEG_CHECK_DELAY);
593
594 } while (--retries);
595
596 if (!retries) {
597 dev_err(&adapter->pdev->dev, "Receive Peg initialization not "
598 "complete, state: 0x%x.\n", val);
599 return -EIO;
600 }
601
602 return 0;
603}
604
605int
606qlcnic_check_fw_status(struct qlcnic_adapter *adapter)
607{
608 int err;
609
610 err = qlcnic_cmd_peg_ready(adapter);
611 if (err)
612 return err;
613
614 err = qlcnic_receive_peg_ready(adapter);
615 if (err)
616 return err;
617
618 QLCWR32(adapter, CRB_CMDPEG_STATE, PHAN_INITIALIZE_ACK);
619
620 return err;
d4066833
SC
621}
622
b3a24649 623int
aa5e18c0
SC
624qlcnic_setup_idc_param(struct qlcnic_adapter *adapter) {
625
626 int timeo;
b3a24649
SC
627 u32 val;
628
45918e2f
AC
629 val = QLCRD32(adapter, QLCNIC_CRB_DEV_PARTITION_INFO);
630 val = QLC_DEV_GET_DRV(val, adapter->portnum);
631 if ((val & 0x3) != QLCNIC_TYPE_NIC) {
632 dev_err(&adapter->pdev->dev,
633 "Not an Ethernet NIC func=%u\n", val);
634 return -EIO;
b3a24649 635 }
45918e2f 636 adapter->physical_port = (val >> 2);
aa5e18c0 637 if (qlcnic_rom_fast_read(adapter, QLCNIC_ROM_DEV_INIT_TIMEOUT, &timeo))
251b036a 638 timeo = QLCNIC_INIT_TIMEOUT_SECS;
aa5e18c0
SC
639
640 adapter->dev_init_timeo = timeo;
641
642 if (qlcnic_rom_fast_read(adapter, QLCNIC_ROM_DRV_RESET_TIMEOUT, &timeo))
251b036a 643 timeo = QLCNIC_RESET_TIMEOUT_SECS;
aa5e18c0
SC
644
645 adapter->reset_ack_timeo = timeo;
b3a24649
SC
646
647 return 0;
aa5e18c0
SC
648}
649
8f891387 650int
651qlcnic_check_flash_fw_ver(struct qlcnic_adapter *adapter)
652{
653 u32 ver = -1, min_ver;
654
655 qlcnic_rom_fast_read(adapter, QLCNIC_FW_VERSION_OFFSET, (int *)&ver);
656
657 ver = QLCNIC_DECODE_VERSION(ver);
658 min_ver = QLCNIC_MIN_FW_VERSION;
659
660 if (ver < min_ver) {
661 dev_err(&adapter->pdev->dev,
662 "firmware version %d.%d.%d unsupported."
663 "Min supported version %d.%d.%d\n",
664 _major(ver), _minor(ver), _build(ver),
665 _major(min_ver), _minor(min_ver), _build(min_ver));
666 return -EINVAL;
667 }
668
669 return 0;
670}
671
af19b491
AKS
672static int
673qlcnic_has_mn(struct qlcnic_adapter *adapter)
674{
8f891387 675 u32 capability;
af19b491
AKS
676 capability = 0;
677
251a84c9
AKS
678 capability = QLCRD32(adapter, QLCNIC_PEG_TUNE_CAPABILITY);
679 if (capability & QLCNIC_PEG_TUNE_MN_PRESENT)
680 return 1;
af19b491 681
af19b491
AKS
682 return 0;
683}
684
685static
686struct uni_table_desc *qlcnic_get_table_desc(const u8 *unirom, int section)
687{
688 u32 i;
689 struct uni_table_desc *directory = (struct uni_table_desc *) &unirom[0];
690 __le32 entries = cpu_to_le32(directory->num_entries);
691
692 for (i = 0; i < entries; i++) {
693
694 __le32 offs = cpu_to_le32(directory->findex) +
695 (i * cpu_to_le32(directory->entry_size));
696 __le32 tab_type = cpu_to_le32(*((u32 *)&unirom[offs] + 8));
697
698 if (tab_type == section)
699 return (struct uni_table_desc *) &unirom[offs];
700 }
701
702 return NULL;
703}
704
b7eff100
SC
705#define FILEHEADER_SIZE (14 * 4)
706
af19b491 707static int
b7eff100 708qlcnic_validate_header(struct qlcnic_adapter *adapter)
af19b491 709{
af19b491 710 const u8 *unirom = adapter->fw->data;
b7eff100
SC
711 struct uni_table_desc *directory = (struct uni_table_desc *) &unirom[0];
712 __le32 fw_file_size = adapter->fw->size;
af19b491 713 __le32 entries;
b7eff100
SC
714 __le32 entry_size;
715 __le32 tab_size;
716
717 if (fw_file_size < FILEHEADER_SIZE)
718 return -EINVAL;
719
720 entries = cpu_to_le32(directory->num_entries);
721 entry_size = cpu_to_le32(directory->entry_size);
722 tab_size = cpu_to_le32(directory->findex) + (entries * entry_size);
723
724 if (fw_file_size < tab_size)
725 return -EINVAL;
726
727 return 0;
728}
729
730static int
731qlcnic_validate_bootld(struct qlcnic_adapter *adapter)
732{
733 struct uni_table_desc *tab_desc;
734 struct uni_data_desc *descr;
735 const u8 *unirom = adapter->fw->data;
736 int idx = cpu_to_le32(*((int *)&unirom[adapter->file_prd_off] +
737 QLCNIC_UNI_BOOTLD_IDX_OFF));
738 __le32 offs;
739 __le32 tab_size;
740 __le32 data_size;
741
742 tab_desc = qlcnic_get_table_desc(unirom, QLCNIC_UNI_DIR_SECT_BOOTLD);
743
744 if (!tab_desc)
745 return -EINVAL;
746
747 tab_size = cpu_to_le32(tab_desc->findex) +
22dfaa86 748 (cpu_to_le32(tab_desc->entry_size) * (idx + 1));
b7eff100
SC
749
750 if (adapter->fw->size < tab_size)
751 return -EINVAL;
752
753 offs = cpu_to_le32(tab_desc->findex) +
754 (cpu_to_le32(tab_desc->entry_size) * (idx));
755 descr = (struct uni_data_desc *)&unirom[offs];
756
22dfaa86 757 data_size = cpu_to_le32(descr->findex) + cpu_to_le32(descr->size);
b7eff100
SC
758
759 if (adapter->fw->size < data_size)
760 return -EINVAL;
761
762 return 0;
763}
764
765static int
766qlcnic_validate_fw(struct qlcnic_adapter *adapter)
767{
768 struct uni_table_desc *tab_desc;
769 struct uni_data_desc *descr;
770 const u8 *unirom = adapter->fw->data;
771 int idx = cpu_to_le32(*((int *)&unirom[adapter->file_prd_off] +
772 QLCNIC_UNI_FIRMWARE_IDX_OFF));
773 __le32 offs;
774 __le32 tab_size;
775 __le32 data_size;
776
777 tab_desc = qlcnic_get_table_desc(unirom, QLCNIC_UNI_DIR_SECT_FW);
778
779 if (!tab_desc)
780 return -EINVAL;
781
782 tab_size = cpu_to_le32(tab_desc->findex) +
22dfaa86 783 (cpu_to_le32(tab_desc->entry_size) * (idx + 1));
b7eff100
SC
784
785 if (adapter->fw->size < tab_size)
786 return -EINVAL;
787
788 offs = cpu_to_le32(tab_desc->findex) +
789 (cpu_to_le32(tab_desc->entry_size) * (idx));
790 descr = (struct uni_data_desc *)&unirom[offs];
22dfaa86 791 data_size = cpu_to_le32(descr->findex) + cpu_to_le32(descr->size);
b7eff100
SC
792
793 if (adapter->fw->size < data_size)
794 return -EINVAL;
795
796 return 0;
797}
798
799static int
800qlcnic_validate_product_offs(struct qlcnic_adapter *adapter)
801{
802 struct uni_table_desc *ptab_descr;
803 const u8 *unirom = adapter->fw->data;
af19b491 804 int mn_present = qlcnic_has_mn(adapter);
b7eff100
SC
805 __le32 entries;
806 __le32 entry_size;
807 __le32 tab_size;
808 u32 i;
af19b491
AKS
809
810 ptab_descr = qlcnic_get_table_desc(unirom,
811 QLCNIC_UNI_DIR_SECT_PRODUCT_TBL);
b7eff100
SC
812 if (!ptab_descr)
813 return -EINVAL;
af19b491
AKS
814
815 entries = cpu_to_le32(ptab_descr->num_entries);
b7eff100
SC
816 entry_size = cpu_to_le32(ptab_descr->entry_size);
817 tab_size = cpu_to_le32(ptab_descr->findex) + (entries * entry_size);
818
819 if (adapter->fw->size < tab_size)
820 return -EINVAL;
821
af19b491
AKS
822nomn:
823 for (i = 0; i < entries; i++) {
824
825 __le32 flags, file_chiprev, offs;
826 u8 chiprev = adapter->ahw.revision_id;
827 u32 flagbit;
828
829 offs = cpu_to_le32(ptab_descr->findex) +
830 (i * cpu_to_le32(ptab_descr->entry_size));
831 flags = cpu_to_le32(*((int *)&unirom[offs] +
832 QLCNIC_UNI_FLAGS_OFF));
833 file_chiprev = cpu_to_le32(*((int *)&unirom[offs] +
834 QLCNIC_UNI_CHIP_REV_OFF));
835
836 flagbit = mn_present ? 1 : 2;
837
838 if ((chiprev == file_chiprev) &&
839 ((1ULL << flagbit) & flags)) {
840 adapter->file_prd_off = offs;
841 return 0;
842 }
843 }
844 if (mn_present) {
845 mn_present = 0;
846 goto nomn;
847 }
b7eff100
SC
848 return -EINVAL;
849}
850
851static int
852qlcnic_validate_unified_romimage(struct qlcnic_adapter *adapter)
853{
854 if (qlcnic_validate_header(adapter)) {
855 dev_err(&adapter->pdev->dev,
856 "unified image: header validation failed\n");
857 return -EINVAL;
858 }
859
860 if (qlcnic_validate_product_offs(adapter)) {
861 dev_err(&adapter->pdev->dev,
862 "unified image: product validation failed\n");
863 return -EINVAL;
864 }
865
866 if (qlcnic_validate_bootld(adapter)) {
867 dev_err(&adapter->pdev->dev,
868 "unified image: bootld validation failed\n");
869 return -EINVAL;
870 }
871
872 if (qlcnic_validate_fw(adapter)) {
873 dev_err(&adapter->pdev->dev,
874 "unified image: firmware validation failed\n");
875 return -EINVAL;
876 }
877
878 return 0;
af19b491
AKS
879}
880
881static
882struct uni_data_desc *qlcnic_get_data_desc(struct qlcnic_adapter *adapter,
883 u32 section, u32 idx_offset)
884{
885 const u8 *unirom = adapter->fw->data;
886 int idx = cpu_to_le32(*((int *)&unirom[adapter->file_prd_off] +
887 idx_offset));
888 struct uni_table_desc *tab_desc;
889 __le32 offs;
890
891 tab_desc = qlcnic_get_table_desc(unirom, section);
892
893 if (tab_desc == NULL)
894 return NULL;
895
896 offs = cpu_to_le32(tab_desc->findex) +
897 (cpu_to_le32(tab_desc->entry_size) * idx);
898
899 return (struct uni_data_desc *)&unirom[offs];
900}
901
902static u8 *
903qlcnic_get_bootld_offs(struct qlcnic_adapter *adapter)
904{
905 u32 offs = QLCNIC_BOOTLD_START;
906
907 if (adapter->fw_type == QLCNIC_UNIFIED_ROMIMAGE)
908 offs = cpu_to_le32((qlcnic_get_data_desc(adapter,
909 QLCNIC_UNI_DIR_SECT_BOOTLD,
910 QLCNIC_UNI_BOOTLD_IDX_OFF))->findex);
911
912 return (u8 *)&adapter->fw->data[offs];
913}
914
915static u8 *
916qlcnic_get_fw_offs(struct qlcnic_adapter *adapter)
917{
918 u32 offs = QLCNIC_IMAGE_START;
919
920 if (adapter->fw_type == QLCNIC_UNIFIED_ROMIMAGE)
921 offs = cpu_to_le32((qlcnic_get_data_desc(adapter,
922 QLCNIC_UNI_DIR_SECT_FW,
923 QLCNIC_UNI_FIRMWARE_IDX_OFF))->findex);
924
925 return (u8 *)&adapter->fw->data[offs];
926}
927
928static __le32
929qlcnic_get_fw_size(struct qlcnic_adapter *adapter)
930{
931 if (adapter->fw_type == QLCNIC_UNIFIED_ROMIMAGE)
932 return cpu_to_le32((qlcnic_get_data_desc(adapter,
933 QLCNIC_UNI_DIR_SECT_FW,
934 QLCNIC_UNI_FIRMWARE_IDX_OFF))->size);
935 else
936 return cpu_to_le32(
937 *(u32 *)&adapter->fw->data[QLCNIC_FW_SIZE_OFFSET]);
938}
939
940static __le32
941qlcnic_get_fw_version(struct qlcnic_adapter *adapter)
942{
943 struct uni_data_desc *fw_data_desc;
944 const struct firmware *fw = adapter->fw;
945 __le32 major, minor, sub;
946 const u8 *ver_str;
947 int i, ret;
948
949 if (adapter->fw_type != QLCNIC_UNIFIED_ROMIMAGE)
950 return cpu_to_le32(*(u32 *)&fw->data[QLCNIC_FW_VERSION_OFFSET]);
951
952 fw_data_desc = qlcnic_get_data_desc(adapter, QLCNIC_UNI_DIR_SECT_FW,
953 QLCNIC_UNI_FIRMWARE_IDX_OFF);
954 ver_str = fw->data + cpu_to_le32(fw_data_desc->findex) +
955 cpu_to_le32(fw_data_desc->size) - 17;
956
957 for (i = 0; i < 12; i++) {
958 if (!strncmp(&ver_str[i], "REV=", 4)) {
959 ret = sscanf(&ver_str[i+4], "%u.%u.%u ",
960 &major, &minor, &sub);
961 if (ret != 3)
962 return 0;
963 else
964 return major + (minor << 8) + (sub << 16);
965 }
966 }
967
968 return 0;
969}
970
971static __le32
972qlcnic_get_bios_version(struct qlcnic_adapter *adapter)
973{
974 const struct firmware *fw = adapter->fw;
975 __le32 bios_ver, prd_off = adapter->file_prd_off;
976
977 if (adapter->fw_type != QLCNIC_UNIFIED_ROMIMAGE)
978 return cpu_to_le32(
979 *(u32 *)&fw->data[QLCNIC_BIOS_VERSION_OFFSET]);
980
981 bios_ver = cpu_to_le32(*((u32 *) (&fw->data[prd_off])
982 + QLCNIC_UNI_BIOS_VERSION_OFF));
983
addd5abf 984 return (bios_ver << 16) + ((bios_ver >> 8) & 0xff00) + (bios_ver >> 24);
af19b491
AKS
985}
986
091754a1
SC
987static void qlcnic_rom_lock_recovery(struct qlcnic_adapter *adapter)
988{
989 if (qlcnic_pcie_sem_lock(adapter, 2, QLCNIC_ROM_LOCK_ID))
990 dev_info(&adapter->pdev->dev, "Resetting rom_lock\n");
991
992 qlcnic_pcie_sem_unlock(adapter, 2);
993}
994
4e70812b
SC
995static int
996qlcnic_check_fw_hearbeat(struct qlcnic_adapter *adapter)
997{
998 u32 heartbeat, ret = -EIO;
999 int retries = QLCNIC_HEARTBEAT_CHECK_RETRY_COUNT;
1000
1001 adapter->heartbeat = QLCRD32(adapter, QLCNIC_PEG_ALIVE_COUNTER);
1002
1003 do {
1004 msleep(QLCNIC_HEARTBEAT_PERIOD_MSECS);
1005 heartbeat = QLCRD32(adapter, QLCNIC_PEG_ALIVE_COUNTER);
1006 if (heartbeat != adapter->heartbeat) {
1007 ret = QLCNIC_RCODE_SUCCESS;
1008 break;
1009 }
1010 } while (--retries);
1011
1012 return ret;
1013}
1014
af19b491
AKS
1015int
1016qlcnic_need_fw_reset(struct qlcnic_adapter *adapter)
1017{
4e70812b 1018 if (qlcnic_check_fw_hearbeat(adapter)) {
091754a1 1019 qlcnic_rom_lock_recovery(adapter);
af19b491 1020 return 1;
091754a1 1021 }
af19b491 1022
091754a1 1023 if (adapter->need_fw_reset)
af19b491
AKS
1024 return 1;
1025
97f29d82
AKS
1026 if (adapter->fw)
1027 return 1;
af19b491
AKS
1028
1029 return 0;
1030}
1031
1032static const char *fw_name[] = {
1033 QLCNIC_UNIFIED_ROMIMAGE_NAME,
1034 QLCNIC_FLASH_ROMIMAGE_NAME,
1035};
1036
1037int
1038qlcnic_load_firmware(struct qlcnic_adapter *adapter)
1039{
1040 u64 *ptr64;
1041 u32 i, flashaddr, size;
1042 const struct firmware *fw = adapter->fw;
1043 struct pci_dev *pdev = adapter->pdev;
1044
1045 dev_info(&pdev->dev, "loading firmware from %s\n",
1046 fw_name[adapter->fw_type]);
1047
1048 if (fw) {
1049 __le64 data;
1050
1051 size = (QLCNIC_IMAGE_START - QLCNIC_BOOTLD_START) / 8;
1052
1053 ptr64 = (u64 *)qlcnic_get_bootld_offs(adapter);
1054 flashaddr = QLCNIC_BOOTLD_START;
1055
1056 for (i = 0; i < size; i++) {
1057 data = cpu_to_le64(ptr64[i]);
1058
1059 if (qlcnic_pci_mem_write_2M(adapter, flashaddr, data))
1060 return -EIO;
1061
1062 flashaddr += 8;
1063 }
1064
1065 size = (__force u32)qlcnic_get_fw_size(adapter) / 8;
1066
1067 ptr64 = (u64 *)qlcnic_get_fw_offs(adapter);
1068 flashaddr = QLCNIC_IMAGE_START;
1069
1070 for (i = 0; i < size; i++) {
1071 data = cpu_to_le64(ptr64[i]);
1072
1073 if (qlcnic_pci_mem_write_2M(adapter,
1074 flashaddr, data))
1075 return -EIO;
1076
1077 flashaddr += 8;
1078 }
0bc92b5b
AKS
1079
1080 size = (__force u32)qlcnic_get_fw_size(adapter) % 8;
1081 if (size) {
1082 data = cpu_to_le64(ptr64[i]);
1083
1084 if (qlcnic_pci_mem_write_2M(adapter,
1085 flashaddr, data))
1086 return -EIO;
1087 }
1088
af19b491
AKS
1089 } else {
1090 u64 data;
1091 u32 hi, lo;
1092
1093 size = (QLCNIC_IMAGE_START - QLCNIC_BOOTLD_START) / 8;
1094 flashaddr = QLCNIC_BOOTLD_START;
1095
1096 for (i = 0; i < size; i++) {
1097 if (qlcnic_rom_fast_read(adapter,
1098 flashaddr, (int *)&lo) != 0)
1099 return -EIO;
1100 if (qlcnic_rom_fast_read(adapter,
1101 flashaddr + 4, (int *)&hi) != 0)
1102 return -EIO;
1103
1104 data = (((u64)hi << 32) | lo);
1105
1106 if (qlcnic_pci_mem_write_2M(adapter,
1107 flashaddr, data))
1108 return -EIO;
1109
1110 flashaddr += 8;
1111 }
1112 }
1113 msleep(1);
1114
1115 QLCWR32(adapter, QLCNIC_CRB_PEG_NET_0 + 0x18, 0x1020);
1116 QLCWR32(adapter, QLCNIC_ROMUSB_GLB_SW_RESET, 0x80001e);
1117 return 0;
1118}
1119
1120static int
1121qlcnic_validate_firmware(struct qlcnic_adapter *adapter)
1122{
1123 __le32 val;
8f891387 1124 u32 ver, bios, min_size;
af19b491
AKS
1125 struct pci_dev *pdev = adapter->pdev;
1126 const struct firmware *fw = adapter->fw;
1127 u8 fw_type = adapter->fw_type;
1128
1129 if (fw_type == QLCNIC_UNIFIED_ROMIMAGE) {
b7eff100 1130 if (qlcnic_validate_unified_romimage(adapter))
af19b491
AKS
1131 return -EINVAL;
1132
1133 min_size = QLCNIC_UNI_FW_MIN_SIZE;
1134 } else {
1135 val = cpu_to_le32(*(u32 *)&fw->data[QLCNIC_FW_MAGIC_OFFSET]);
1136 if ((__force u32)val != QLCNIC_BDINFO_MAGIC)
1137 return -EINVAL;
1138
1139 min_size = QLCNIC_FW_MIN_SIZE;
1140 }
1141
1142 if (fw->size < min_size)
1143 return -EINVAL;
1144
1145 val = qlcnic_get_fw_version(adapter);
af19b491
AKS
1146 ver = QLCNIC_DECODE_VERSION(val);
1147
8f891387 1148 if (ver < QLCNIC_MIN_FW_VERSION) {
af19b491
AKS
1149 dev_err(&pdev->dev,
1150 "%s: firmware version %d.%d.%d unsupported\n",
1151 fw_name[fw_type], _major(ver), _minor(ver), _build(ver));
1152 return -EINVAL;
1153 }
1154
1155 val = qlcnic_get_bios_version(adapter);
1156 qlcnic_rom_fast_read(adapter, QLCNIC_BIOS_VERSION_OFFSET, (int *)&bios);
1157 if ((__force u32)val != bios) {
1158 dev_err(&pdev->dev, "%s: firmware bios is incompatible\n",
1159 fw_name[fw_type]);
1160 return -EINVAL;
1161 }
1162
af19b491
AKS
1163 QLCWR32(adapter, QLCNIC_CAM_RAM(0x1fc), QLCNIC_BDINFO_MAGIC);
1164 return 0;
1165}
1166
1167static void
1168qlcnic_get_next_fwtype(struct qlcnic_adapter *adapter)
1169{
1170 u8 fw_type;
1171
1172 switch (adapter->fw_type) {
1173 case QLCNIC_UNKNOWN_ROMIMAGE:
1174 fw_type = QLCNIC_UNIFIED_ROMIMAGE;
1175 break;
1176
1177 case QLCNIC_UNIFIED_ROMIMAGE:
1178 default:
1179 fw_type = QLCNIC_FLASH_ROMIMAGE;
1180 break;
1181 }
1182
1183 adapter->fw_type = fw_type;
1184}
1185
1186
1187
1188void qlcnic_request_firmware(struct qlcnic_adapter *adapter)
1189{
1190 struct pci_dev *pdev = adapter->pdev;
1191 int rc;
1192
1193 adapter->fw_type = QLCNIC_UNKNOWN_ROMIMAGE;
1194
1195next:
1196 qlcnic_get_next_fwtype(adapter);
1197
1198 if (adapter->fw_type == QLCNIC_FLASH_ROMIMAGE) {
1199 adapter->fw = NULL;
1200 } else {
1201 rc = request_firmware(&adapter->fw,
1202 fw_name[adapter->fw_type], &pdev->dev);
1203 if (rc != 0)
1204 goto next;
1205
1206 rc = qlcnic_validate_firmware(adapter);
1207 if (rc != 0) {
1208 release_firmware(adapter->fw);
1209 msleep(1);
1210 goto next;
1211 }
1212 }
1213}
1214
1215
1216void
1217qlcnic_release_firmware(struct qlcnic_adapter *adapter)
1218{
1219 if (adapter->fw)
1220 release_firmware(adapter->fw);
1221 adapter->fw = NULL;
1222}
1223
af19b491
AKS
1224static void
1225qlcnic_handle_linkevent(struct qlcnic_adapter *adapter,
1226 struct qlcnic_fw_msg *msg)
1227{
1228 u32 cable_OUI;
1229 u16 cable_len;
1230 u16 link_speed;
1231 u8 link_status, module, duplex, autoneg;
1232 struct net_device *netdev = adapter->netdev;
1233
1234 adapter->has_link_events = 1;
1235
1236 cable_OUI = msg->body[1] & 0xffffffff;
1237 cable_len = (msg->body[1] >> 32) & 0xffff;
1238 link_speed = (msg->body[1] >> 48) & 0xffff;
1239
1240 link_status = msg->body[2] & 0xff;
1241 duplex = (msg->body[2] >> 16) & 0xff;
1242 autoneg = (msg->body[2] >> 24) & 0xff;
1243
1244 module = (msg->body[2] >> 8) & 0xff;
1245 if (module == LINKEVENT_MODULE_TWINAX_UNSUPPORTED_CABLE)
1246 dev_info(&netdev->dev, "unsupported cable: OUI 0x%x, "
1247 "length %d\n", cable_OUI, cable_len);
1248 else if (module == LINKEVENT_MODULE_TWINAX_UNSUPPORTED_CABLELEN)
1249 dev_info(&netdev->dev, "unsupported cable length %d\n",
1250 cable_len);
1251
1252 qlcnic_advert_link_change(adapter, link_status);
1253
1254 if (duplex == LINKEVENT_FULL_DUPLEX)
1255 adapter->link_duplex = DUPLEX_FULL;
1256 else
1257 adapter->link_duplex = DUPLEX_HALF;
1258
1259 adapter->module_type = module;
1260 adapter->link_autoneg = autoneg;
1261 adapter->link_speed = link_speed;
1262}
1263
1264static void
1265qlcnic_handle_fw_message(int desc_cnt, int index,
1266 struct qlcnic_host_sds_ring *sds_ring)
1267{
1268 struct qlcnic_fw_msg msg;
1269 struct status_desc *desc;
1270 int i = 0, opcode;
1271
1272 while (desc_cnt > 0 && i < 8) {
1273 desc = &sds_ring->desc_head[index];
1274 msg.words[i++] = le64_to_cpu(desc->status_desc_data[0]);
1275 msg.words[i++] = le64_to_cpu(desc->status_desc_data[1]);
1276
1277 index = get_next_index(index, sds_ring->num_desc);
1278 desc_cnt--;
1279 }
1280
1281 opcode = qlcnic_get_nic_msg_opcode(msg.body[0]);
1282 switch (opcode) {
1283 case QLCNIC_C2H_OPCODE_GET_LINKEVENT_RESPONSE:
1284 qlcnic_handle_linkevent(sds_ring->adapter, &msg);
1285 break;
1286 default:
1287 break;
1288 }
1289}
1290
1291static int
1292qlcnic_alloc_rx_skb(struct qlcnic_adapter *adapter,
1293 struct qlcnic_host_rds_ring *rds_ring,
1294 struct qlcnic_rx_buffer *buffer)
1295{
1296 struct sk_buff *skb;
1297 dma_addr_t dma;
1298 struct pci_dev *pdev = adapter->pdev;
1299
96659828
AKS
1300 skb = dev_alloc_skb(rds_ring->skb_size);
1301 if (!skb) {
8bfe8b91 1302 adapter->stats.skb_alloc_failure++;
af19b491 1303 return -ENOMEM;
8bfe8b91 1304 }
af19b491 1305
04746ff1 1306 skb_reserve(skb, NET_IP_ALIGN);
af19b491
AKS
1307
1308 dma = pci_map_single(pdev, skb->data,
1309 rds_ring->dma_size, PCI_DMA_FROMDEVICE);
1310
1311 if (pci_dma_mapping_error(pdev, dma)) {
8ae6df97 1312 adapter->stats.rx_dma_map_error++;
af19b491 1313 dev_kfree_skb_any(skb);
af19b491
AKS
1314 return -ENOMEM;
1315 }
1316
1317 buffer->skb = skb;
1318 buffer->dma = dma;
af19b491
AKS
1319
1320 return 0;
1321}
1322
1323static struct sk_buff *qlcnic_process_rxbuf(struct qlcnic_adapter *adapter,
1324 struct qlcnic_host_rds_ring *rds_ring, u16 index, u16 cksum)
1325{
1326 struct qlcnic_rx_buffer *buffer;
1327 struct sk_buff *skb;
1328
1329 buffer = &rds_ring->rx_buf_arr[index];
1330
96659828
AKS
1331 if (unlikely(buffer->skb == NULL)) {
1332 WARN_ON(1);
1333 return NULL;
1334 }
1335
af19b491
AKS
1336 pci_unmap_single(adapter->pdev, buffer->dma, rds_ring->dma_size,
1337 PCI_DMA_FROMDEVICE);
1338
1339 skb = buffer->skb;
af19b491 1340
d807b3f7
AKS
1341 if (likely(adapter->rx_csum && (cksum == STATUS_CKSUM_OK ||
1342 cksum == STATUS_CKSUM_LOOP))) {
af19b491
AKS
1343 adapter->stats.csummed++;
1344 skb->ip_summed = CHECKSUM_UNNECESSARY;
1345 } else {
bc8acf2c 1346 skb_checksum_none_assert(skb);
af19b491
AKS
1347 }
1348
1349 skb->dev = adapter->netdev;
1350
1351 buffer->skb = NULL;
96659828 1352
af19b491
AKS
1353 return skb;
1354}
1355
8cf61f89 1356static int
d5790663
AKS
1357qlcnic_check_rx_tagging(struct qlcnic_adapter *adapter, struct sk_buff *skb,
1358 u16 *vlan_tag)
8cf61f89 1359{
8cf61f89
AKS
1360 struct ethhdr *eth_hdr;
1361
d5790663
AKS
1362 if (!__vlan_get_tag(skb, vlan_tag)) {
1363 eth_hdr = (struct ethhdr *) skb->data;
1364 memmove(skb->data + VLAN_HLEN, eth_hdr, ETH_ALEN * 2);
1365 skb_pull(skb, VLAN_HLEN);
1366 }
1367 if (!adapter->pvid)
1368 return 0;
1369
1370 if (*vlan_tag == adapter->pvid) {
1371 /* Outer vlan tag. Packet should follow non-vlan path */
1372 *vlan_tag = 0xffff;
1373 return 0;
8cf61f89
AKS
1374 }
1375 if (adapter->flags & QLCNIC_TAGGING_ENABLED)
1376 return 0;
1377
d5790663 1378 return -EINVAL;
8cf61f89
AKS
1379}
1380
af19b491
AKS
1381static struct qlcnic_rx_buffer *
1382qlcnic_process_rcv(struct qlcnic_adapter *adapter,
1383 struct qlcnic_host_sds_ring *sds_ring,
1384 int ring, u64 sts_data0)
1385{
1386 struct net_device *netdev = adapter->netdev;
1387 struct qlcnic_recv_context *recv_ctx = &adapter->recv_ctx;
1388 struct qlcnic_rx_buffer *buffer;
1389 struct sk_buff *skb;
1390 struct qlcnic_host_rds_ring *rds_ring;
1391 int index, length, cksum, pkt_offset;
d5790663 1392 u16 vid = 0xffff;
af19b491
AKS
1393
1394 if (unlikely(ring >= adapter->max_rds_rings))
1395 return NULL;
1396
1397 rds_ring = &recv_ctx->rds_rings[ring];
1398
1399 index = qlcnic_get_sts_refhandle(sts_data0);
1400 if (unlikely(index >= rds_ring->num_desc))
1401 return NULL;
1402
1403 buffer = &rds_ring->rx_buf_arr[index];
1404
1405 length = qlcnic_get_sts_totallength(sts_data0);
1406 cksum = qlcnic_get_sts_status(sts_data0);
1407 pkt_offset = qlcnic_get_sts_pkt_offset(sts_data0);
1408
1409 skb = qlcnic_process_rxbuf(adapter, rds_ring, index, cksum);
1410 if (!skb)
1411 return buffer;
1412
1413 if (length > rds_ring->skb_size)
1414 skb_put(skb, rds_ring->skb_size);
1415 else
1416 skb_put(skb, length);
1417
1418 if (pkt_offset)
1419 skb_pull(skb, pkt_offset);
1420
d5790663
AKS
1421 if (unlikely(qlcnic_check_rx_tagging(adapter, skb, &vid))) {
1422 adapter->stats.rxdropped++;
1423 dev_kfree_skb(skb);
1424 return buffer;
8cf61f89
AKS
1425 }
1426
af19b491
AKS
1427 skb->protocol = eth_type_trans(skb, netdev);
1428
d5790663 1429 if ((vid != 0xffff) && adapter->vlgrp)
5718d3b4 1430 vlan_gro_receive(&sds_ring->napi, adapter->vlgrp, vid, skb);
d5790663
AKS
1431 else
1432 napi_gro_receive(&sds_ring->napi, skb);
af19b491
AKS
1433
1434 adapter->stats.rx_pkts++;
1435 adapter->stats.rxbytes += length;
1436
1437 return buffer;
1438}
1439
1440#define QLC_TCP_HDR_SIZE 20
1441#define QLC_TCP_TS_OPTION_SIZE 12
1442#define QLC_TCP_TS_HDR_SIZE (QLC_TCP_HDR_SIZE + QLC_TCP_TS_OPTION_SIZE)
1443
1444static struct qlcnic_rx_buffer *
1445qlcnic_process_lro(struct qlcnic_adapter *adapter,
1446 struct qlcnic_host_sds_ring *sds_ring,
1447 int ring, u64 sts_data0, u64 sts_data1)
1448{
1449 struct net_device *netdev = adapter->netdev;
1450 struct qlcnic_recv_context *recv_ctx = &adapter->recv_ctx;
1451 struct qlcnic_rx_buffer *buffer;
1452 struct sk_buff *skb;
1453 struct qlcnic_host_rds_ring *rds_ring;
1454 struct iphdr *iph;
1455 struct tcphdr *th;
1456 bool push, timestamp;
1457 int l2_hdr_offset, l4_hdr_offset;
1458 int index;
1459 u16 lro_length, length, data_offset;
1460 u32 seq_number;
d5790663 1461 u16 vid = 0xffff;
af19b491
AKS
1462
1463 if (unlikely(ring > adapter->max_rds_rings))
1464 return NULL;
1465
1466 rds_ring = &recv_ctx->rds_rings[ring];
1467
1468 index = qlcnic_get_lro_sts_refhandle(sts_data0);
1469 if (unlikely(index > rds_ring->num_desc))
1470 return NULL;
1471
1472 buffer = &rds_ring->rx_buf_arr[index];
1473
1474 timestamp = qlcnic_get_lro_sts_timestamp(sts_data0);
1475 lro_length = qlcnic_get_lro_sts_length(sts_data0);
1476 l2_hdr_offset = qlcnic_get_lro_sts_l2_hdr_offset(sts_data0);
1477 l4_hdr_offset = qlcnic_get_lro_sts_l4_hdr_offset(sts_data0);
1478 push = qlcnic_get_lro_sts_push_flag(sts_data0);
1479 seq_number = qlcnic_get_lro_sts_seq_number(sts_data1);
1480
1481 skb = qlcnic_process_rxbuf(adapter, rds_ring, index, STATUS_CKSUM_OK);
1482 if (!skb)
1483 return buffer;
1484
1485 if (timestamp)
1486 data_offset = l4_hdr_offset + QLC_TCP_TS_HDR_SIZE;
1487 else
1488 data_offset = l4_hdr_offset + QLC_TCP_HDR_SIZE;
1489
1490 skb_put(skb, lro_length + data_offset);
1491
af19b491 1492 skb_pull(skb, l2_hdr_offset);
8cf61f89 1493
d5790663
AKS
1494 if (unlikely(qlcnic_check_rx_tagging(adapter, skb, &vid))) {
1495 adapter->stats.rxdropped++;
1496 dev_kfree_skb(skb);
1497 return buffer;
8cf61f89 1498 }
d5790663 1499
af19b491
AKS
1500 skb->protocol = eth_type_trans(skb, netdev);
1501
1502 iph = (struct iphdr *)skb->data;
1503 th = (struct tcphdr *)(skb->data + (iph->ihl << 2));
1504
1505 length = (iph->ihl << 2) + (th->doff << 2) + lro_length;
1506 iph->tot_len = htons(length);
1507 iph->check = 0;
1508 iph->check = ip_fast_csum((unsigned char *)iph, iph->ihl);
1509 th->psh = push;
1510 th->seq = htonl(seq_number);
1511
1512 length = skb->len;
1513
d5790663
AKS
1514 if ((vid != 0xffff) && adapter->vlgrp)
1515 vlan_hwaccel_receive_skb(skb, adapter->vlgrp, vid);
1516 else
1517 netif_receive_skb(skb);
af19b491
AKS
1518
1519 adapter->stats.lro_pkts++;
8bfe8b91 1520 adapter->stats.lrobytes += length;
af19b491
AKS
1521
1522 return buffer;
1523}
1524
1525int
1526qlcnic_process_rcv_ring(struct qlcnic_host_sds_ring *sds_ring, int max)
1527{
1528 struct qlcnic_adapter *adapter = sds_ring->adapter;
1529 struct list_head *cur;
1530 struct status_desc *desc;
1531 struct qlcnic_rx_buffer *rxbuf;
1532 u64 sts_data0, sts_data1;
1533
1534 int count = 0;
1535 int opcode, ring, desc_cnt;
1536 u32 consumer = sds_ring->consumer;
1537
1538 while (count < max) {
1539 desc = &sds_ring->desc_head[consumer];
1540 sts_data0 = le64_to_cpu(desc->status_desc_data[0]);
1541
1542 if (!(sts_data0 & STATUS_OWNER_HOST))
1543 break;
1544
1545 desc_cnt = qlcnic_get_sts_desc_cnt(sts_data0);
1546 opcode = qlcnic_get_sts_opcode(sts_data0);
1547
1548 switch (opcode) {
1549 case QLCNIC_RXPKT_DESC:
1550 case QLCNIC_OLD_RXPKT_DESC:
1551 case QLCNIC_SYN_OFFLOAD:
1552 ring = qlcnic_get_sts_type(sts_data0);
1553 rxbuf = qlcnic_process_rcv(adapter, sds_ring,
1554 ring, sts_data0);
1555 break;
1556 case QLCNIC_LRO_DESC:
1557 ring = qlcnic_get_lro_sts_type(sts_data0);
1558 sts_data1 = le64_to_cpu(desc->status_desc_data[1]);
1559 rxbuf = qlcnic_process_lro(adapter, sds_ring,
1560 ring, sts_data0, sts_data1);
1561 break;
1562 case QLCNIC_RESPONSE_DESC:
1563 qlcnic_handle_fw_message(desc_cnt, consumer, sds_ring);
1564 default:
1565 goto skip;
1566 }
1567
1568 WARN_ON(desc_cnt > 1);
1569
96659828 1570 if (likely(rxbuf))
af19b491 1571 list_add_tail(&rxbuf->list, &sds_ring->free_list[ring]);
8ae6df97
AKS
1572 else
1573 adapter->stats.null_rxbuf++;
af19b491
AKS
1574
1575skip:
1576 for (; desc_cnt > 0; desc_cnt--) {
1577 desc = &sds_ring->desc_head[consumer];
1578 desc->status_desc_data[0] =
1579 cpu_to_le64(STATUS_OWNER_PHANTOM);
1580 consumer = get_next_index(consumer, sds_ring->num_desc);
1581 }
1582 count++;
1583 }
1584
1585 for (ring = 0; ring < adapter->max_rds_rings; ring++) {
1586 struct qlcnic_host_rds_ring *rds_ring =
1587 &adapter->recv_ctx.rds_rings[ring];
1588
1589 if (!list_empty(&sds_ring->free_list[ring])) {
1590 list_for_each(cur, &sds_ring->free_list[ring]) {
1591 rxbuf = list_entry(cur,
1592 struct qlcnic_rx_buffer, list);
1593 qlcnic_alloc_rx_skb(adapter, rds_ring, rxbuf);
1594 }
1595 spin_lock(&rds_ring->lock);
1596 list_splice_tail_init(&sds_ring->free_list[ring],
1597 &rds_ring->free_list);
1598 spin_unlock(&rds_ring->lock);
1599 }
1600
1601 qlcnic_post_rx_buffers_nodb(adapter, rds_ring);
1602 }
1603
1604 if (count) {
1605 sds_ring->consumer = consumer;
1606 writel(consumer, sds_ring->crb_sts_consumer);
1607 }
1608
1609 return count;
1610}
1611
1612void
1613qlcnic_post_rx_buffers(struct qlcnic_adapter *adapter, u32 ringid,
1614 struct qlcnic_host_rds_ring *rds_ring)
1615{
1616 struct rcv_desc *pdesc;
1617 struct qlcnic_rx_buffer *buffer;
1618 int producer, count = 0;
1619 struct list_head *head;
1620
1621 producer = rds_ring->producer;
1622
af19b491
AKS
1623 head = &rds_ring->free_list;
1624 while (!list_empty(head)) {
1625
1626 buffer = list_entry(head->next, struct qlcnic_rx_buffer, list);
1627
1628 if (!buffer->skb) {
1629 if (qlcnic_alloc_rx_skb(adapter, rds_ring, buffer))
1630 break;
1631 }
1632
1633 count++;
1634 list_del(&buffer->list);
1635
1636 /* make a rcv descriptor */
1637 pdesc = &rds_ring->desc_head[producer];
1638 pdesc->addr_buffer = cpu_to_le64(buffer->dma);
1639 pdesc->reference_handle = cpu_to_le16(buffer->ref_handle);
1640 pdesc->buffer_length = cpu_to_le32(rds_ring->dma_size);
1641
1642 producer = get_next_index(producer, rds_ring->num_desc);
1643 }
af19b491
AKS
1644
1645 if (count) {
1646 rds_ring->producer = producer;
1647 writel((producer-1) & (rds_ring->num_desc-1),
1648 rds_ring->crb_rcv_producer);
1649 }
1650}
1651
1652static void
1653qlcnic_post_rx_buffers_nodb(struct qlcnic_adapter *adapter,
1654 struct qlcnic_host_rds_ring *rds_ring)
1655{
1656 struct rcv_desc *pdesc;
1657 struct qlcnic_rx_buffer *buffer;
1658 int producer, count = 0;
1659 struct list_head *head;
1660
af19b491
AKS
1661 if (!spin_trylock(&rds_ring->lock))
1662 return;
1663
ed6f1353
AKS
1664 producer = rds_ring->producer;
1665
af19b491
AKS
1666 head = &rds_ring->free_list;
1667 while (!list_empty(head)) {
1668
1669 buffer = list_entry(head->next, struct qlcnic_rx_buffer, list);
1670
1671 if (!buffer->skb) {
1672 if (qlcnic_alloc_rx_skb(adapter, rds_ring, buffer))
1673 break;
1674 }
1675
1676 count++;
1677 list_del(&buffer->list);
1678
1679 /* make a rcv descriptor */
1680 pdesc = &rds_ring->desc_head[producer];
1681 pdesc->reference_handle = cpu_to_le16(buffer->ref_handle);
1682 pdesc->buffer_length = cpu_to_le32(rds_ring->dma_size);
1683 pdesc->addr_buffer = cpu_to_le64(buffer->dma);
1684
1685 producer = get_next_index(producer, rds_ring->num_desc);
1686 }
1687
1688 if (count) {
1689 rds_ring->producer = producer;
1690 writel((producer - 1) & (rds_ring->num_desc - 1),
1691 rds_ring->crb_rcv_producer);
1692 }
1693 spin_unlock(&rds_ring->lock);
1694}
1695
31dee692
AKS
1696static void dump_skb(struct sk_buff *skb)
1697{
1698 int i;
1699 unsigned char *data = skb->data;
1700
1701 for (i = 0; i < skb->len; i++) {
1702 printk("%02x ", data[i]);
1703 if ((i & 0x0f) == 8)
1704 printk("\n");
1705 }
1706}
1707
cdaff185
AKS
1708static struct qlcnic_rx_buffer *
1709qlcnic_process_rcv_diag(struct qlcnic_adapter *adapter,
1710 struct qlcnic_host_sds_ring *sds_ring,
1711 int ring, u64 sts_data0)
1712{
1713 struct qlcnic_recv_context *recv_ctx = &adapter->recv_ctx;
1714 struct qlcnic_rx_buffer *buffer;
1715 struct sk_buff *skb;
1716 struct qlcnic_host_rds_ring *rds_ring;
1717 int index, length, cksum, pkt_offset;
1718
1719 if (unlikely(ring >= adapter->max_rds_rings))
1720 return NULL;
1721
1722 rds_ring = &recv_ctx->rds_rings[ring];
1723
1724 index = qlcnic_get_sts_refhandle(sts_data0);
1725 if (unlikely(index >= rds_ring->num_desc))
1726 return NULL;
1727
1728 buffer = &rds_ring->rx_buf_arr[index];
1729
1730 length = qlcnic_get_sts_totallength(sts_data0);
1731 cksum = qlcnic_get_sts_status(sts_data0);
1732 pkt_offset = qlcnic_get_sts_pkt_offset(sts_data0);
1733
1734 skb = qlcnic_process_rxbuf(adapter, rds_ring, index, cksum);
1735 if (!skb)
1736 return buffer;
1737
31dee692
AKS
1738 if (length > rds_ring->skb_size)
1739 skb_put(skb, rds_ring->skb_size);
1740 else
1741 skb_put(skb, length);
cdaff185
AKS
1742
1743 if (pkt_offset)
1744 skb_pull(skb, pkt_offset);
1745
cdaff185
AKS
1746 if (!qlcnic_check_loopback_buff(skb->data))
1747 adapter->diag_cnt++;
31dee692
AKS
1748 else
1749 dump_skb(skb);
cdaff185
AKS
1750
1751 dev_kfree_skb_any(skb);
8bfe8b91
SC
1752 adapter->stats.rx_pkts++;
1753 adapter->stats.rxbytes += length;
cdaff185
AKS
1754
1755 return buffer;
1756}
1757
1758void
1759qlcnic_process_rcv_ring_diag(struct qlcnic_host_sds_ring *sds_ring)
1760{
1761 struct qlcnic_adapter *adapter = sds_ring->adapter;
1762 struct status_desc *desc;
1763 struct qlcnic_rx_buffer *rxbuf;
1764 u64 sts_data0;
1765
1766 int opcode, ring, desc_cnt;
1767 u32 consumer = sds_ring->consumer;
1768
1769 desc = &sds_ring->desc_head[consumer];
1770 sts_data0 = le64_to_cpu(desc->status_desc_data[0]);
1771
1772 if (!(sts_data0 & STATUS_OWNER_HOST))
1773 return;
1774
1775 desc_cnt = qlcnic_get_sts_desc_cnt(sts_data0);
1776 opcode = qlcnic_get_sts_opcode(sts_data0);
1777
1778 ring = qlcnic_get_sts_type(sts_data0);
1779 rxbuf = qlcnic_process_rcv_diag(adapter, sds_ring,
1780 ring, sts_data0);
1781
1782 desc->status_desc_data[0] = cpu_to_le64(STATUS_OWNER_PHANTOM);
1783 consumer = get_next_index(consumer, sds_ring->num_desc);
1784
1785 sds_ring->consumer = consumer;
1786 writel(consumer, sds_ring->crb_sts_consumer);
1787}
2e9d722d
AC
1788
1789void
1790qlcnic_fetch_mac(struct qlcnic_adapter *adapter, u32 off1, u32 off2,
1791 u8 alt_mac, u8 *mac)
1792{
1793 u32 mac_low, mac_high;
1794 int i;
1795
1796 mac_low = QLCRD32(adapter, off1);
1797 mac_high = QLCRD32(adapter, off2);
1798
1799 if (alt_mac) {
1800 mac_low |= (mac_low >> 16) | (mac_high << 16);
1801 mac_high >>= 16;
1802 }
1803
1804 for (i = 0; i < 2; i++)
1805 mac[i] = (u8)(mac_high >> ((1 - i) * 8));
1806 for (i = 2; i < 6; i++)
1807 mac[i] = (u8)(mac_low >> ((5 - i) * 8));
1808}