]> bbs.cooldavid.org Git - net-next-2.6.git/blame - drivers/net/qlcnic/qlcnic_main.c
qlcnic: add interrupt diagnostic test
[net-next-2.6.git] / drivers / net / qlcnic / qlcnic_main.c
CommitLineData
af19b491
AKS
1/*
2 * Copyright (C) 2009 - QLogic Corporation.
3 * All rights reserved.
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License
7 * as published by the Free Software Foundation; either version 2
8 * of the License, or (at your option) any later version.
9 *
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 59 Temple Place - Suite 330, Boston,
18 * MA 02111-1307, USA.
19 *
20 * The full GNU General Public License is included in this distribution
21 * in the file called "COPYING".
22 *
23 */
24
25#include <linux/vmalloc.h>
26#include <linux/interrupt.h>
27
28#include "qlcnic.h"
29
30#include <linux/dma-mapping.h>
31#include <linux/if_vlan.h>
32#include <net/ip.h>
33#include <linux/ipv6.h>
34#include <linux/inetdevice.h>
35#include <linux/sysfs.h>
36
37MODULE_DESCRIPTION("QLogic 10 GbE Converged Ethernet Driver");
38MODULE_LICENSE("GPL");
39MODULE_VERSION(QLCNIC_LINUX_VERSIONID);
40MODULE_FIRMWARE(QLCNIC_UNIFIED_ROMIMAGE_NAME);
41
42char qlcnic_driver_name[] = "qlcnic";
43static const char qlcnic_driver_string[] = "QLogic Converged Ethernet Driver v"
44 QLCNIC_LINUX_VERSIONID;
45
46static int port_mode = QLCNIC_PORT_MODE_AUTO_NEG;
47
48/* Default to restricted 1G auto-neg mode */
49static int wol_port_mode = 5;
50
51static int use_msi = 1;
52module_param(use_msi, int, 0644);
53MODULE_PARM_DESC(use_msi, "MSI interrupt (0=disabled, 1=enabled");
54
55static int use_msi_x = 1;
56module_param(use_msi_x, int, 0644);
57MODULE_PARM_DESC(use_msi_x, "MSI-X interrupt (0=disabled, 1=enabled");
58
59static int auto_fw_reset = AUTO_FW_RESET_ENABLED;
60module_param(auto_fw_reset, int, 0644);
61MODULE_PARM_DESC(auto_fw_reset, "Auto firmware reset (0=disabled, 1=enabled");
62
63static int __devinit qlcnic_probe(struct pci_dev *pdev,
64 const struct pci_device_id *ent);
65static void __devexit qlcnic_remove(struct pci_dev *pdev);
66static int qlcnic_open(struct net_device *netdev);
67static int qlcnic_close(struct net_device *netdev);
68static netdev_tx_t qlcnic_xmit_frame(struct sk_buff *,
69 struct net_device *);
70static void qlcnic_tx_timeout(struct net_device *netdev);
71static void qlcnic_tx_timeout_task(struct work_struct *work);
72static void qlcnic_attach_work(struct work_struct *work);
73static void qlcnic_fwinit_work(struct work_struct *work);
74static void qlcnic_fw_poll_work(struct work_struct *work);
75static void qlcnic_schedule_work(struct qlcnic_adapter *adapter,
76 work_func_t func, int delay);
77static void qlcnic_cancel_fw_work(struct qlcnic_adapter *adapter);
78static int qlcnic_poll(struct napi_struct *napi, int budget);
79#ifdef CONFIG_NET_POLL_CONTROLLER
80static void qlcnic_poll_controller(struct net_device *netdev);
81#endif
82
83static void qlcnic_create_sysfs_entries(struct qlcnic_adapter *adapter);
84static void qlcnic_remove_sysfs_entries(struct qlcnic_adapter *adapter);
85static void qlcnic_create_diag_entries(struct qlcnic_adapter *adapter);
86static void qlcnic_remove_diag_entries(struct qlcnic_adapter *adapter);
87
88static void qlcnic_clr_all_drv_state(struct qlcnic_adapter *adapter);
89static int qlcnic_can_start_firmware(struct qlcnic_adapter *adapter);
90
7eb9855d 91static irqreturn_t qlcnic_tmp_intr(int irq, void *data);
af19b491
AKS
92static irqreturn_t qlcnic_intr(int irq, void *data);
93static irqreturn_t qlcnic_msi_intr(int irq, void *data);
94static irqreturn_t qlcnic_msix_intr(int irq, void *data);
95
96static struct net_device_stats *qlcnic_get_stats(struct net_device *netdev);
97static void qlcnic_config_indev_addr(struct net_device *dev, unsigned long);
98
99/* PCI Device ID Table */
100#define ENTRY(device) \
101 {PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, (device)), \
102 .class = PCI_CLASS_NETWORK_ETHERNET << 8, .class_mask = ~0}
103
104#define PCI_DEVICE_ID_QLOGIC_QLE824X 0x8020
105
6a902881 106static DEFINE_PCI_DEVICE_TABLE(qlcnic_pci_tbl) = {
af19b491
AKS
107 ENTRY(PCI_DEVICE_ID_QLOGIC_QLE824X),
108 {0,}
109};
110
111MODULE_DEVICE_TABLE(pci, qlcnic_pci_tbl);
112
113
114void
115qlcnic_update_cmd_producer(struct qlcnic_adapter *adapter,
116 struct qlcnic_host_tx_ring *tx_ring)
117{
118 writel(tx_ring->producer, tx_ring->crb_cmd_producer);
119
120 if (qlcnic_tx_avail(tx_ring) <= TX_STOP_THRESH) {
121 netif_stop_queue(adapter->netdev);
122 smp_mb();
123 }
124}
125
126static const u32 msi_tgt_status[8] = {
127 ISR_INT_TARGET_STATUS, ISR_INT_TARGET_STATUS_F1,
128 ISR_INT_TARGET_STATUS_F2, ISR_INT_TARGET_STATUS_F3,
129 ISR_INT_TARGET_STATUS_F4, ISR_INT_TARGET_STATUS_F5,
130 ISR_INT_TARGET_STATUS_F6, ISR_INT_TARGET_STATUS_F7
131};
132
133static const
134struct qlcnic_legacy_intr_set legacy_intr[] = QLCNIC_LEGACY_INTR_CONFIG;
135
136static inline void qlcnic_disable_int(struct qlcnic_host_sds_ring *sds_ring)
137{
138 writel(0, sds_ring->crb_intr_mask);
139}
140
141static inline void qlcnic_enable_int(struct qlcnic_host_sds_ring *sds_ring)
142{
143 struct qlcnic_adapter *adapter = sds_ring->adapter;
144
145 writel(0x1, sds_ring->crb_intr_mask);
146
147 if (!QLCNIC_IS_MSI_FAMILY(adapter))
148 writel(0xfbff, adapter->tgt_mask_reg);
149}
150
151static int
152qlcnic_alloc_sds_rings(struct qlcnic_recv_context *recv_ctx, int count)
153{
154 int size = sizeof(struct qlcnic_host_sds_ring) * count;
155
156 recv_ctx->sds_rings = kzalloc(size, GFP_KERNEL);
157
158 return (recv_ctx->sds_rings == NULL);
159}
160
161static void
162qlcnic_free_sds_rings(struct qlcnic_recv_context *recv_ctx)
163{
164 if (recv_ctx->sds_rings != NULL)
165 kfree(recv_ctx->sds_rings);
166
167 recv_ctx->sds_rings = NULL;
168}
169
170static int
171qlcnic_napi_add(struct qlcnic_adapter *adapter, struct net_device *netdev)
172{
173 int ring;
174 struct qlcnic_host_sds_ring *sds_ring;
175 struct qlcnic_recv_context *recv_ctx = &adapter->recv_ctx;
176
177 if (qlcnic_alloc_sds_rings(recv_ctx, adapter->max_sds_rings))
178 return -ENOMEM;
179
180 for (ring = 0; ring < adapter->max_sds_rings; ring++) {
181 sds_ring = &recv_ctx->sds_rings[ring];
182 netif_napi_add(netdev, &sds_ring->napi,
183 qlcnic_poll, QLCNIC_NETDEV_WEIGHT);
184 }
185
186 return 0;
187}
188
189static void
190qlcnic_napi_del(struct qlcnic_adapter *adapter)
191{
192 int ring;
193 struct qlcnic_host_sds_ring *sds_ring;
194 struct qlcnic_recv_context *recv_ctx = &adapter->recv_ctx;
195
196 for (ring = 0; ring < adapter->max_sds_rings; ring++) {
197 sds_ring = &recv_ctx->sds_rings[ring];
198 netif_napi_del(&sds_ring->napi);
199 }
200
201 qlcnic_free_sds_rings(&adapter->recv_ctx);
202}
203
204static void
205qlcnic_napi_enable(struct qlcnic_adapter *adapter)
206{
207 int ring;
208 struct qlcnic_host_sds_ring *sds_ring;
209 struct qlcnic_recv_context *recv_ctx = &adapter->recv_ctx;
210
211 for (ring = 0; ring < adapter->max_sds_rings; ring++) {
212 sds_ring = &recv_ctx->sds_rings[ring];
213 napi_enable(&sds_ring->napi);
214 qlcnic_enable_int(sds_ring);
215 }
216}
217
218static void
219qlcnic_napi_disable(struct qlcnic_adapter *adapter)
220{
221 int ring;
222 struct qlcnic_host_sds_ring *sds_ring;
223 struct qlcnic_recv_context *recv_ctx = &adapter->recv_ctx;
224
225 for (ring = 0; ring < adapter->max_sds_rings; ring++) {
226 sds_ring = &recv_ctx->sds_rings[ring];
227 qlcnic_disable_int(sds_ring);
228 napi_synchronize(&sds_ring->napi);
229 napi_disable(&sds_ring->napi);
230 }
231}
232
233static void qlcnic_clear_stats(struct qlcnic_adapter *adapter)
234{
235 memset(&adapter->stats, 0, sizeof(adapter->stats));
236 return;
237}
238
239static int qlcnic_set_dma_mask(struct qlcnic_adapter *adapter)
240{
241 struct pci_dev *pdev = adapter->pdev;
242 u64 mask, cmask;
243
244 adapter->pci_using_dac = 0;
245
246 mask = DMA_BIT_MASK(39);
247 cmask = mask;
248
249 if (pci_set_dma_mask(pdev, mask) == 0 &&
250 pci_set_consistent_dma_mask(pdev, cmask) == 0) {
251 adapter->pci_using_dac = 1;
252 return 0;
253 }
254
255 return -EIO;
256}
257
258/* Update addressable range if firmware supports it */
259static int
260qlcnic_update_dma_mask(struct qlcnic_adapter *adapter)
261{
262 int change, shift, err;
263 u64 mask, old_mask, old_cmask;
264 struct pci_dev *pdev = adapter->pdev;
265
266 change = 0;
267
268 shift = QLCRD32(adapter, CRB_DMA_SHIFT);
269 if (shift > 32)
270 return 0;
271
272 if (shift > 9)
273 change = 1;
274
275 if (change) {
276 old_mask = pdev->dma_mask;
277 old_cmask = pdev->dev.coherent_dma_mask;
278
279 mask = DMA_BIT_MASK(32+shift);
280
281 err = pci_set_dma_mask(pdev, mask);
282 if (err)
283 goto err_out;
284
285 err = pci_set_consistent_dma_mask(pdev, mask);
286 if (err)
287 goto err_out;
288 dev_info(&pdev->dev, "using %d-bit dma mask\n", 32+shift);
289 }
290
291 return 0;
292
293err_out:
294 pci_set_dma_mask(pdev, old_mask);
295 pci_set_consistent_dma_mask(pdev, old_cmask);
296 return err;
297}
298
299static void qlcnic_set_port_mode(struct qlcnic_adapter *adapter)
300{
301 u32 val, data;
302
303 val = adapter->ahw.board_type;
304 if ((val == QLCNIC_BRDTYPE_P3_HMEZ) ||
305 (val == QLCNIC_BRDTYPE_P3_XG_LOM)) {
306 if (port_mode == QLCNIC_PORT_MODE_802_3_AP) {
307 data = QLCNIC_PORT_MODE_802_3_AP;
308 QLCWR32(adapter, QLCNIC_PORT_MODE_ADDR, data);
309 } else if (port_mode == QLCNIC_PORT_MODE_XG) {
310 data = QLCNIC_PORT_MODE_XG;
311 QLCWR32(adapter, QLCNIC_PORT_MODE_ADDR, data);
312 } else if (port_mode == QLCNIC_PORT_MODE_AUTO_NEG_1G) {
313 data = QLCNIC_PORT_MODE_AUTO_NEG_1G;
314 QLCWR32(adapter, QLCNIC_PORT_MODE_ADDR, data);
315 } else if (port_mode == QLCNIC_PORT_MODE_AUTO_NEG_XG) {
316 data = QLCNIC_PORT_MODE_AUTO_NEG_XG;
317 QLCWR32(adapter, QLCNIC_PORT_MODE_ADDR, data);
318 } else {
319 data = QLCNIC_PORT_MODE_AUTO_NEG;
320 QLCWR32(adapter, QLCNIC_PORT_MODE_ADDR, data);
321 }
322
323 if ((wol_port_mode != QLCNIC_PORT_MODE_802_3_AP) &&
324 (wol_port_mode != QLCNIC_PORT_MODE_XG) &&
325 (wol_port_mode != QLCNIC_PORT_MODE_AUTO_NEG_1G) &&
326 (wol_port_mode != QLCNIC_PORT_MODE_AUTO_NEG_XG)) {
327 wol_port_mode = QLCNIC_PORT_MODE_AUTO_NEG;
328 }
329 QLCWR32(adapter, QLCNIC_WOL_PORT_MODE, wol_port_mode);
330 }
331}
332
333static void qlcnic_set_msix_bit(struct pci_dev *pdev, int enable)
334{
335 u32 control;
336 int pos;
337
338 pos = pci_find_capability(pdev, PCI_CAP_ID_MSIX);
339 if (pos) {
340 pci_read_config_dword(pdev, pos, &control);
341 if (enable)
342 control |= PCI_MSIX_FLAGS_ENABLE;
343 else
344 control = 0;
345 pci_write_config_dword(pdev, pos, control);
346 }
347}
348
349static void qlcnic_init_msix_entries(struct qlcnic_adapter *adapter, int count)
350{
351 int i;
352
353 for (i = 0; i < count; i++)
354 adapter->msix_entries[i].entry = i;
355}
356
357static int
358qlcnic_read_mac_addr(struct qlcnic_adapter *adapter)
359{
360 int i;
361 unsigned char *p;
362 u64 mac_addr;
363 struct net_device *netdev = adapter->netdev;
364 struct pci_dev *pdev = adapter->pdev;
365
366 if (qlcnic_get_mac_addr(adapter, &mac_addr) != 0)
367 return -EIO;
368
369 p = (unsigned char *)&mac_addr;
370 for (i = 0; i < 6; i++)
371 netdev->dev_addr[i] = *(p + 5 - i);
372
373 memcpy(netdev->perm_addr, netdev->dev_addr, netdev->addr_len);
374 memcpy(adapter->mac_addr, netdev->dev_addr, netdev->addr_len);
375
376 /* set station address */
377
378 if (!is_valid_ether_addr(netdev->perm_addr))
379 dev_warn(&pdev->dev, "Bad MAC address %pM.\n",
380 netdev->dev_addr);
381
382 return 0;
383}
384
385static int qlcnic_set_mac(struct net_device *netdev, void *p)
386{
387 struct qlcnic_adapter *adapter = netdev_priv(netdev);
388 struct sockaddr *addr = p;
389
390 if (!is_valid_ether_addr(addr->sa_data))
391 return -EINVAL;
392
393 if (netif_running(netdev)) {
394 netif_device_detach(netdev);
395 qlcnic_napi_disable(adapter);
396 }
397
398 memcpy(adapter->mac_addr, addr->sa_data, netdev->addr_len);
399 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
400 qlcnic_set_multi(adapter->netdev);
401
402 if (netif_running(netdev)) {
403 netif_device_attach(netdev);
404 qlcnic_napi_enable(adapter);
405 }
406 return 0;
407}
408
409static const struct net_device_ops qlcnic_netdev_ops = {
410 .ndo_open = qlcnic_open,
411 .ndo_stop = qlcnic_close,
412 .ndo_start_xmit = qlcnic_xmit_frame,
413 .ndo_get_stats = qlcnic_get_stats,
414 .ndo_validate_addr = eth_validate_addr,
415 .ndo_set_multicast_list = qlcnic_set_multi,
416 .ndo_set_mac_address = qlcnic_set_mac,
417 .ndo_change_mtu = qlcnic_change_mtu,
418 .ndo_tx_timeout = qlcnic_tx_timeout,
419#ifdef CONFIG_NET_POLL_CONTROLLER
420 .ndo_poll_controller = qlcnic_poll_controller,
421#endif
422};
423
424static void
425qlcnic_setup_intr(struct qlcnic_adapter *adapter)
426{
427 const struct qlcnic_legacy_intr_set *legacy_intrp;
428 struct pci_dev *pdev = adapter->pdev;
429 int err, num_msix;
430
431 if (adapter->rss_supported) {
432 num_msix = (num_online_cpus() >= MSIX_ENTRIES_PER_ADAPTER) ?
433 MSIX_ENTRIES_PER_ADAPTER : 2;
434 } else
435 num_msix = 1;
436
437 adapter->max_sds_rings = 1;
438
439 adapter->flags &= ~(QLCNIC_MSI_ENABLED | QLCNIC_MSIX_ENABLED);
440
441 legacy_intrp = &legacy_intr[adapter->ahw.pci_func];
442
443 adapter->int_vec_bit = legacy_intrp->int_vec_bit;
444 adapter->tgt_status_reg = qlcnic_get_ioaddr(adapter,
445 legacy_intrp->tgt_status_reg);
446 adapter->tgt_mask_reg = qlcnic_get_ioaddr(adapter,
447 legacy_intrp->tgt_mask_reg);
448 adapter->isr_int_vec = qlcnic_get_ioaddr(adapter, ISR_INT_VECTOR);
449
450 adapter->crb_int_state_reg = qlcnic_get_ioaddr(adapter,
451 ISR_INT_STATE_REG);
452
453 qlcnic_set_msix_bit(pdev, 0);
454
455 if (adapter->msix_supported) {
456
457 qlcnic_init_msix_entries(adapter, num_msix);
458 err = pci_enable_msix(pdev, adapter->msix_entries, num_msix);
459 if (err == 0) {
460 adapter->flags |= QLCNIC_MSIX_ENABLED;
461 qlcnic_set_msix_bit(pdev, 1);
462
463 if (adapter->rss_supported)
464 adapter->max_sds_rings = num_msix;
465
466 dev_info(&pdev->dev, "using msi-x interrupts\n");
467 return;
468 }
469
470 if (err > 0)
471 pci_disable_msix(pdev);
472
473 /* fall through for msi */
474 }
475
476 if (use_msi && !pci_enable_msi(pdev)) {
477 adapter->flags |= QLCNIC_MSI_ENABLED;
478 adapter->tgt_status_reg = qlcnic_get_ioaddr(adapter,
479 msi_tgt_status[adapter->ahw.pci_func]);
480 dev_info(&pdev->dev, "using msi interrupts\n");
481 adapter->msix_entries[0].vector = pdev->irq;
482 return;
483 }
484
485 dev_info(&pdev->dev, "using legacy interrupts\n");
486 adapter->msix_entries[0].vector = pdev->irq;
487}
488
489static void
490qlcnic_teardown_intr(struct qlcnic_adapter *adapter)
491{
492 if (adapter->flags & QLCNIC_MSIX_ENABLED)
493 pci_disable_msix(adapter->pdev);
494 if (adapter->flags & QLCNIC_MSI_ENABLED)
495 pci_disable_msi(adapter->pdev);
496}
497
498static void
499qlcnic_cleanup_pci_map(struct qlcnic_adapter *adapter)
500{
501 if (adapter->ahw.pci_base0 != NULL)
502 iounmap(adapter->ahw.pci_base0);
503}
504
505static int
506qlcnic_setup_pci_map(struct qlcnic_adapter *adapter)
507{
508 void __iomem *mem_ptr0 = NULL;
509 resource_size_t mem_base;
510 unsigned long mem_len, pci_len0 = 0;
511
512 struct pci_dev *pdev = adapter->pdev;
513 int pci_func = adapter->ahw.pci_func;
514
515 /*
516 * Set the CRB window to invalid. If any register in window 0 is
517 * accessed it should set the window to 0 and then reset it to 1.
518 */
519 adapter->ahw.crb_win = -1;
520 adapter->ahw.ocm_win = -1;
521
522 /* remap phys address */
523 mem_base = pci_resource_start(pdev, 0); /* 0 is for BAR 0 */
524 mem_len = pci_resource_len(pdev, 0);
525
526 if (mem_len == QLCNIC_PCI_2MB_SIZE) {
527
528 mem_ptr0 = pci_ioremap_bar(pdev, 0);
529 if (mem_ptr0 == NULL) {
530 dev_err(&pdev->dev, "failed to map PCI bar 0\n");
531 return -EIO;
532 }
533 pci_len0 = mem_len;
534 } else {
535 return -EIO;
536 }
537
538 dev_info(&pdev->dev, "%dMB memory map\n", (int)(mem_len>>20));
539
540 adapter->ahw.pci_base0 = mem_ptr0;
541 adapter->ahw.pci_len0 = pci_len0;
542
543 adapter->ahw.ocm_win_crb = qlcnic_get_ioaddr(adapter,
544 QLCNIC_PCIX_PS_REG(PCIX_OCM_WINDOW_REG(pci_func)));
545
546 return 0;
547}
548
549static void get_brd_name(struct qlcnic_adapter *adapter, char *name)
550{
551 struct pci_dev *pdev = adapter->pdev;
552 int i, found = 0;
553
554 for (i = 0; i < NUM_SUPPORTED_BOARDS; ++i) {
555 if (qlcnic_boards[i].vendor == pdev->vendor &&
556 qlcnic_boards[i].device == pdev->device &&
557 qlcnic_boards[i].sub_vendor == pdev->subsystem_vendor &&
558 qlcnic_boards[i].sub_device == pdev->subsystem_device) {
559 strcpy(name, qlcnic_boards[i].short_name);
560 found = 1;
561 break;
562 }
563
564 }
565
566 if (!found)
567 name = "Unknown";
568}
569
570static void
571qlcnic_check_options(struct qlcnic_adapter *adapter)
572{
573 u32 fw_major, fw_minor, fw_build;
574 char brd_name[QLCNIC_MAX_BOARD_NAME_LEN];
575 char serial_num[32];
576 int i, offset, val;
577 int *ptr32;
578 struct pci_dev *pdev = adapter->pdev;
579
580 adapter->driver_mismatch = 0;
581
582 ptr32 = (int *)&serial_num;
583 offset = QLCNIC_FW_SERIAL_NUM_OFFSET;
584 for (i = 0; i < 8; i++) {
585 if (qlcnic_rom_fast_read(adapter, offset, &val) == -1) {
586 dev_err(&pdev->dev, "error reading board info\n");
587 adapter->driver_mismatch = 1;
588 return;
589 }
590 ptr32[i] = cpu_to_le32(val);
591 offset += sizeof(u32);
592 }
593
594 fw_major = QLCRD32(adapter, QLCNIC_FW_VERSION_MAJOR);
595 fw_minor = QLCRD32(adapter, QLCNIC_FW_VERSION_MINOR);
596 fw_build = QLCRD32(adapter, QLCNIC_FW_VERSION_SUB);
597
598 adapter->fw_version = QLCNIC_VERSION_CODE(fw_major, fw_minor, fw_build);
599
600 if (adapter->portnum == 0) {
601 get_brd_name(adapter, brd_name);
602
603 pr_info("%s: %s Board Chip rev 0x%x\n",
604 module_name(THIS_MODULE),
605 brd_name, adapter->ahw.revision_id);
606 }
607
608 if (adapter->fw_version < QLCNIC_VERSION_CODE(3, 4, 216)) {
609 adapter->driver_mismatch = 1;
610 dev_warn(&pdev->dev, "firmware version %d.%d.%d unsupported\n",
611 fw_major, fw_minor, fw_build);
612 return;
613 }
614
615 i = QLCRD32(adapter, QLCNIC_SRE_MISC);
616 adapter->ahw.cut_through = (i & 0x8000) ? 1 : 0;
617
618 dev_info(&pdev->dev, "firmware v%d.%d.%d [%s]\n",
619 fw_major, fw_minor, fw_build,
620 adapter->ahw.cut_through ? "cut-through" : "legacy");
621
622 if (adapter->fw_version >= QLCNIC_VERSION_CODE(4, 0, 222))
623 adapter->capabilities = QLCRD32(adapter, CRB_FW_CAPABILITIES_1);
624
625 adapter->flags &= ~QLCNIC_LRO_ENABLED;
626
627 if (adapter->ahw.port_type == QLCNIC_XGBE) {
628 adapter->num_rxd = DEFAULT_RCV_DESCRIPTORS_10G;
629 adapter->num_jumbo_rxd = MAX_JUMBO_RCV_DESCRIPTORS_10G;
630 } else if (adapter->ahw.port_type == QLCNIC_GBE) {
631 adapter->num_rxd = DEFAULT_RCV_DESCRIPTORS_1G;
632 adapter->num_jumbo_rxd = MAX_JUMBO_RCV_DESCRIPTORS_1G;
633 }
634
635 adapter->msix_supported = !!use_msi_x;
636 adapter->rss_supported = !!use_msi_x;
637
638 adapter->num_txd = MAX_CMD_DESCRIPTORS;
639
640 adapter->num_lro_rxd = 0;
641 adapter->max_rds_rings = 2;
642}
643
644static int
645qlcnic_start_firmware(struct qlcnic_adapter *adapter)
646{
647 int val, err, first_boot;
648
649 err = qlcnic_set_dma_mask(adapter);
650 if (err)
651 return err;
652
653 if (!qlcnic_can_start_firmware(adapter))
654 goto wait_init;
655
656 first_boot = QLCRD32(adapter, QLCNIC_CAM_RAM(0x1fc));
657 if (first_boot == 0x55555555)
658 /* This is the first boot after power up */
659 QLCWR32(adapter, QLCNIC_CAM_RAM(0x1fc), QLCNIC_BDINFO_MAGIC);
660
661 qlcnic_request_firmware(adapter);
662
663 err = qlcnic_need_fw_reset(adapter);
664 if (err < 0)
665 goto err_out;
666 if (err == 0)
667 goto wait_init;
668
669 if (first_boot != 0x55555555) {
670 QLCWR32(adapter, CRB_CMDPEG_STATE, 0);
671 qlcnic_pinit_from_rom(adapter);
672 msleep(1);
673 }
674
675 QLCWR32(adapter, CRB_DMA_SHIFT, 0x55555555);
676 QLCWR32(adapter, QLCNIC_PEG_HALT_STATUS1, 0);
677 QLCWR32(adapter, QLCNIC_PEG_HALT_STATUS2, 0);
678
679 qlcnic_set_port_mode(adapter);
680
681 err = qlcnic_load_firmware(adapter);
682 if (err)
683 goto err_out;
684
685 qlcnic_release_firmware(adapter);
686
687 val = (_QLCNIC_LINUX_MAJOR << 16)
688 | ((_QLCNIC_LINUX_MINOR << 8))
689 | (_QLCNIC_LINUX_SUBVERSION);
690 QLCWR32(adapter, CRB_DRIVER_VERSION, val);
691
692wait_init:
693 /* Handshake with the card before we register the devices. */
694 err = qlcnic_phantom_init(adapter);
695 if (err)
696 goto err_out;
697
698 QLCWR32(adapter, QLCNIC_CRB_DEV_STATE, QLCNIC_DEV_READY);
699
700 qlcnic_update_dma_mask(adapter);
701
702 qlcnic_check_options(adapter);
703
704 adapter->need_fw_reset = 0;
705
706 /* fall through and release firmware */
707
708err_out:
709 qlcnic_release_firmware(adapter);
710 return err;
711}
712
713static int
714qlcnic_request_irq(struct qlcnic_adapter *adapter)
715{
716 irq_handler_t handler;
717 struct qlcnic_host_sds_ring *sds_ring;
718 int err, ring;
719
720 unsigned long flags = 0;
721 struct net_device *netdev = adapter->netdev;
722 struct qlcnic_recv_context *recv_ctx = &adapter->recv_ctx;
723
7eb9855d
AKS
724 if (adapter->diag_test == QLCNIC_INTERRUPT_TEST) {
725 handler = qlcnic_tmp_intr;
726 if (!QLCNIC_IS_MSI_FAMILY(adapter))
727 flags |= IRQF_SHARED;
728
729 } else {
730 if (adapter->flags & QLCNIC_MSIX_ENABLED)
731 handler = qlcnic_msix_intr;
732 else if (adapter->flags & QLCNIC_MSI_ENABLED)
733 handler = qlcnic_msi_intr;
734 else {
735 flags |= IRQF_SHARED;
736 handler = qlcnic_intr;
737 }
af19b491
AKS
738 }
739 adapter->irq = netdev->irq;
740
741 for (ring = 0; ring < adapter->max_sds_rings; ring++) {
742 sds_ring = &recv_ctx->sds_rings[ring];
743 sprintf(sds_ring->name, "%s[%d]", netdev->name, ring);
744 err = request_irq(sds_ring->irq, handler,
745 flags, sds_ring->name, sds_ring);
746 if (err)
747 return err;
748 }
749
750 return 0;
751}
752
753static void
754qlcnic_free_irq(struct qlcnic_adapter *adapter)
755{
756 int ring;
757 struct qlcnic_host_sds_ring *sds_ring;
758
759 struct qlcnic_recv_context *recv_ctx = &adapter->recv_ctx;
760
761 for (ring = 0; ring < adapter->max_sds_rings; ring++) {
762 sds_ring = &recv_ctx->sds_rings[ring];
763 free_irq(sds_ring->irq, sds_ring);
764 }
765}
766
767static void
768qlcnic_init_coalesce_defaults(struct qlcnic_adapter *adapter)
769{
770 adapter->coal.flags = QLCNIC_INTR_DEFAULT;
771 adapter->coal.normal.data.rx_time_us =
772 QLCNIC_DEFAULT_INTR_COALESCE_RX_TIME_US;
773 adapter->coal.normal.data.rx_packets =
774 QLCNIC_DEFAULT_INTR_COALESCE_RX_PACKETS;
775 adapter->coal.normal.data.tx_time_us =
776 QLCNIC_DEFAULT_INTR_COALESCE_TX_TIME_US;
777 adapter->coal.normal.data.tx_packets =
778 QLCNIC_DEFAULT_INTR_COALESCE_TX_PACKETS;
779}
780
781static int
782__qlcnic_up(struct qlcnic_adapter *adapter, struct net_device *netdev)
783{
784 if (adapter->is_up != QLCNIC_ADAPTER_UP_MAGIC)
785 return -EIO;
786
787 qlcnic_set_multi(netdev);
788 qlcnic_fw_cmd_set_mtu(adapter, netdev->mtu);
789
790 adapter->ahw.linkup = 0;
791
792 if (adapter->max_sds_rings > 1)
793 qlcnic_config_rss(adapter, 1);
794
795 qlcnic_config_intr_coalesce(adapter);
796
797 if (adapter->capabilities & QLCNIC_FW_CAPABILITY_HW_LRO)
798 qlcnic_config_hw_lro(adapter, QLCNIC_LRO_ENABLED);
799
800 qlcnic_napi_enable(adapter);
801
802 qlcnic_linkevent_request(adapter, 1);
803
804 set_bit(__QLCNIC_DEV_UP, &adapter->state);
805 return 0;
806}
807
808/* Usage: During resume and firmware recovery module.*/
809
810static int
811qlcnic_up(struct qlcnic_adapter *adapter, struct net_device *netdev)
812{
813 int err = 0;
814
815 rtnl_lock();
816 if (netif_running(netdev))
817 err = __qlcnic_up(adapter, netdev);
818 rtnl_unlock();
819
820 return err;
821}
822
823static void
824__qlcnic_down(struct qlcnic_adapter *adapter, struct net_device *netdev)
825{
826 if (adapter->is_up != QLCNIC_ADAPTER_UP_MAGIC)
827 return;
828
829 if (!test_and_clear_bit(__QLCNIC_DEV_UP, &adapter->state))
830 return;
831
832 smp_mb();
833 spin_lock(&adapter->tx_clean_lock);
834 netif_carrier_off(netdev);
835 netif_tx_disable(netdev);
836
837 qlcnic_free_mac_list(adapter);
838
839 qlcnic_nic_set_promisc(adapter, QLCNIC_NIU_NON_PROMISC_MODE);
840
841 qlcnic_napi_disable(adapter);
842
843 qlcnic_release_tx_buffers(adapter);
844 spin_unlock(&adapter->tx_clean_lock);
845}
846
847/* Usage: During suspend and firmware recovery module */
848
849static void
850qlcnic_down(struct qlcnic_adapter *adapter, struct net_device *netdev)
851{
852 rtnl_lock();
853 if (netif_running(netdev))
854 __qlcnic_down(adapter, netdev);
855 rtnl_unlock();
856
857}
858
859static int
860qlcnic_attach(struct qlcnic_adapter *adapter)
861{
862 struct net_device *netdev = adapter->netdev;
863 struct pci_dev *pdev = adapter->pdev;
864 int err, ring;
865 struct qlcnic_host_rds_ring *rds_ring;
866
867 if (adapter->is_up == QLCNIC_ADAPTER_UP_MAGIC)
868 return 0;
869
870 err = qlcnic_init_firmware(adapter);
871 if (err)
872 return err;
873
874 err = qlcnic_napi_add(adapter, netdev);
875 if (err)
876 return err;
877
878 err = qlcnic_alloc_sw_resources(adapter);
879 if (err) {
880 dev_err(&pdev->dev, "Error in setting sw resources\n");
881 return err;
882 }
883
884 err = qlcnic_alloc_hw_resources(adapter);
885 if (err) {
886 dev_err(&pdev->dev, "Error in setting hw resources\n");
887 goto err_out_free_sw;
888 }
889
890
891 for (ring = 0; ring < adapter->max_rds_rings; ring++) {
892 rds_ring = &adapter->recv_ctx.rds_rings[ring];
893 qlcnic_post_rx_buffers(adapter, ring, rds_ring);
894 }
895
896 err = qlcnic_request_irq(adapter);
897 if (err) {
898 dev_err(&pdev->dev, "failed to setup interrupt\n");
899 goto err_out_free_rxbuf;
900 }
901
902 qlcnic_init_coalesce_defaults(adapter);
903
904 qlcnic_create_sysfs_entries(adapter);
905
906 adapter->is_up = QLCNIC_ADAPTER_UP_MAGIC;
907 return 0;
908
909err_out_free_rxbuf:
910 qlcnic_release_rx_buffers(adapter);
911 qlcnic_free_hw_resources(adapter);
912err_out_free_sw:
913 qlcnic_free_sw_resources(adapter);
914 return err;
915}
916
917static void
918qlcnic_detach(struct qlcnic_adapter *adapter)
919{
920 if (adapter->is_up != QLCNIC_ADAPTER_UP_MAGIC)
921 return;
922
923 qlcnic_remove_sysfs_entries(adapter);
924
925 qlcnic_free_hw_resources(adapter);
926 qlcnic_release_rx_buffers(adapter);
927 qlcnic_free_irq(adapter);
928 qlcnic_napi_del(adapter);
929 qlcnic_free_sw_resources(adapter);
930
931 adapter->is_up = 0;
932}
933
7eb9855d
AKS
934void qlcnic_diag_free_res(struct net_device *netdev, int max_sds_rings)
935{
936 struct qlcnic_adapter *adapter = netdev_priv(netdev);
937 struct qlcnic_host_sds_ring *sds_ring;
938 int ring;
939
940 for (ring = 0; ring < adapter->max_sds_rings; ring++) {
941 sds_ring = &adapter->recv_ctx.sds_rings[ring];
942 qlcnic_disable_int(sds_ring);
943 }
944
945 qlcnic_detach(adapter);
946
947 adapter->diag_test = 0;
948 adapter->max_sds_rings = max_sds_rings;
949
950 if (qlcnic_attach(adapter))
951 return;
952
953 if (netif_running(netdev))
954 __qlcnic_up(adapter, netdev);
955
956 netif_device_attach(netdev);
957}
958
959int qlcnic_diag_alloc_res(struct net_device *netdev, int test)
960{
961 struct qlcnic_adapter *adapter = netdev_priv(netdev);
962 struct qlcnic_host_sds_ring *sds_ring;
963 int ring;
964 int ret;
965
966 netif_device_detach(netdev);
967
968 if (netif_running(netdev))
969 __qlcnic_down(adapter, netdev);
970
971 qlcnic_detach(adapter);
972
973 adapter->max_sds_rings = 1;
974 adapter->diag_test = test;
975
976 ret = qlcnic_attach(adapter);
977 if (ret)
978 return ret;
979
980 for (ring = 0; ring < adapter->max_sds_rings; ring++) {
981 sds_ring = &adapter->recv_ctx.sds_rings[ring];
982 qlcnic_enable_int(sds_ring);
983 }
984
985 return 0;
986}
987
af19b491
AKS
988int
989qlcnic_reset_context(struct qlcnic_adapter *adapter)
990{
991 int err = 0;
992 struct net_device *netdev = adapter->netdev;
993
994 if (test_and_set_bit(__QLCNIC_RESETTING, &adapter->state))
995 return -EBUSY;
996
997 if (adapter->is_up == QLCNIC_ADAPTER_UP_MAGIC) {
998
999 netif_device_detach(netdev);
1000
1001 if (netif_running(netdev))
1002 __qlcnic_down(adapter, netdev);
1003
1004 qlcnic_detach(adapter);
1005
1006 if (netif_running(netdev)) {
1007 err = qlcnic_attach(adapter);
1008 if (!err)
1009 err = __qlcnic_up(adapter, netdev);
1010
1011 if (err)
1012 goto done;
1013 }
1014
1015 netif_device_attach(netdev);
1016 }
1017
1018done:
1019 clear_bit(__QLCNIC_RESETTING, &adapter->state);
1020 return err;
1021}
1022
1023static int
1024qlcnic_setup_netdev(struct qlcnic_adapter *adapter,
1025 struct net_device *netdev)
1026{
1027 int err;
1028 struct pci_dev *pdev = adapter->pdev;
1029
1030 adapter->rx_csum = 1;
1031 adapter->mc_enabled = 0;
1032 adapter->max_mc_count = 38;
1033
1034 netdev->netdev_ops = &qlcnic_netdev_ops;
1035 netdev->watchdog_timeo = 2*HZ;
1036
1037 qlcnic_change_mtu(netdev, netdev->mtu);
1038
1039 SET_ETHTOOL_OPS(netdev, &qlcnic_ethtool_ops);
1040
1041 netdev->features |= (NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_TSO);
1042 netdev->features |= (NETIF_F_GRO);
1043 netdev->vlan_features |= (NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_TSO);
1044
1045 netdev->features |= (NETIF_F_IPV6_CSUM | NETIF_F_TSO6);
1046 netdev->vlan_features |= (NETIF_F_IPV6_CSUM | NETIF_F_TSO6);
1047
1048 if (adapter->pci_using_dac) {
1049 netdev->features |= NETIF_F_HIGHDMA;
1050 netdev->vlan_features |= NETIF_F_HIGHDMA;
1051 }
1052
1053 if (adapter->capabilities & QLCNIC_FW_CAPABILITY_FVLANTX)
1054 netdev->features |= (NETIF_F_HW_VLAN_TX);
1055
1056 if (adapter->capabilities & QLCNIC_FW_CAPABILITY_HW_LRO)
1057 netdev->features |= NETIF_F_LRO;
1058
1059 netdev->irq = adapter->msix_entries[0].vector;
1060
1061 INIT_WORK(&adapter->tx_timeout_task, qlcnic_tx_timeout_task);
1062
1063 if (qlcnic_read_mac_addr(adapter))
1064 dev_warn(&pdev->dev, "failed to read mac addr\n");
1065
1066 netif_carrier_off(netdev);
1067 netif_stop_queue(netdev);
1068
1069 err = register_netdev(netdev);
1070 if (err) {
1071 dev_err(&pdev->dev, "failed to register net device\n");
1072 return err;
1073 }
1074
1075 return 0;
1076}
1077
1078static int __devinit
1079qlcnic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
1080{
1081 struct net_device *netdev = NULL;
1082 struct qlcnic_adapter *adapter = NULL;
1083 int err;
1084 int pci_func_id = PCI_FUNC(pdev->devfn);
1085 uint8_t revision_id;
1086
1087 err = pci_enable_device(pdev);
1088 if (err)
1089 return err;
1090
1091 if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
1092 err = -ENODEV;
1093 goto err_out_disable_pdev;
1094 }
1095
1096 err = pci_request_regions(pdev, qlcnic_driver_name);
1097 if (err)
1098 goto err_out_disable_pdev;
1099
1100 pci_set_master(pdev);
1101
1102 netdev = alloc_etherdev(sizeof(struct qlcnic_adapter));
1103 if (!netdev) {
1104 dev_err(&pdev->dev, "failed to allocate net_device\n");
1105 err = -ENOMEM;
1106 goto err_out_free_res;
1107 }
1108
1109 SET_NETDEV_DEV(netdev, &pdev->dev);
1110
1111 adapter = netdev_priv(netdev);
1112 adapter->netdev = netdev;
1113 adapter->pdev = pdev;
1114 adapter->ahw.pci_func = pci_func_id;
1115
1116 revision_id = pdev->revision;
1117 adapter->ahw.revision_id = revision_id;
1118
1119 rwlock_init(&adapter->ahw.crb_lock);
1120 mutex_init(&adapter->ahw.mem_lock);
1121
1122 spin_lock_init(&adapter->tx_clean_lock);
1123 INIT_LIST_HEAD(&adapter->mac_list);
1124
1125 err = qlcnic_setup_pci_map(adapter);
1126 if (err)
1127 goto err_out_free_netdev;
1128
1129 /* This will be reset for mezz cards */
1130 adapter->portnum = pci_func_id;
1131
1132 err = qlcnic_get_board_info(adapter);
1133 if (err) {
1134 dev_err(&pdev->dev, "Error getting board config info.\n");
1135 goto err_out_iounmap;
1136 }
1137
1138
1139 err = qlcnic_start_firmware(adapter);
1140 if (err)
1141 goto err_out_decr_ref;
1142
1143 /*
1144 * See if the firmware gave us a virtual-physical port mapping.
1145 */
1146 adapter->physical_port = adapter->portnum;
1147
1148 qlcnic_clear_stats(adapter);
1149
1150 qlcnic_setup_intr(adapter);
1151
1152 err = qlcnic_setup_netdev(adapter, netdev);
1153 if (err)
1154 goto err_out_disable_msi;
1155
1156 pci_set_drvdata(pdev, adapter);
1157
1158 qlcnic_schedule_work(adapter, qlcnic_fw_poll_work, FW_POLL_DELAY);
1159
1160 switch (adapter->ahw.port_type) {
1161 case QLCNIC_GBE:
1162 dev_info(&adapter->pdev->dev, "%s: GbE port initialized\n",
1163 adapter->netdev->name);
1164 break;
1165 case QLCNIC_XGBE:
1166 dev_info(&adapter->pdev->dev, "%s: XGbE port initialized\n",
1167 adapter->netdev->name);
1168 break;
1169 }
1170
1171 qlcnic_create_diag_entries(adapter);
1172
1173 return 0;
1174
1175err_out_disable_msi:
1176 qlcnic_teardown_intr(adapter);
1177
1178err_out_decr_ref:
1179 qlcnic_clr_all_drv_state(adapter);
1180
1181err_out_iounmap:
1182 qlcnic_cleanup_pci_map(adapter);
1183
1184err_out_free_netdev:
1185 free_netdev(netdev);
1186
1187err_out_free_res:
1188 pci_release_regions(pdev);
1189
1190err_out_disable_pdev:
1191 pci_set_drvdata(pdev, NULL);
1192 pci_disable_device(pdev);
1193 return err;
1194}
1195
1196static void __devexit qlcnic_remove(struct pci_dev *pdev)
1197{
1198 struct qlcnic_adapter *adapter;
1199 struct net_device *netdev;
1200
1201 adapter = pci_get_drvdata(pdev);
1202 if (adapter == NULL)
1203 return;
1204
1205 netdev = adapter->netdev;
1206
1207 qlcnic_cancel_fw_work(adapter);
1208
1209 unregister_netdev(netdev);
1210
1211 cancel_work_sync(&adapter->tx_timeout_task);
1212
1213 qlcnic_detach(adapter);
1214
1215 qlcnic_clr_all_drv_state(adapter);
1216
1217 clear_bit(__QLCNIC_RESETTING, &adapter->state);
1218
1219 qlcnic_teardown_intr(adapter);
1220
1221 qlcnic_remove_diag_entries(adapter);
1222
1223 qlcnic_cleanup_pci_map(adapter);
1224
1225 qlcnic_release_firmware(adapter);
1226
1227 pci_release_regions(pdev);
1228 pci_disable_device(pdev);
1229 pci_set_drvdata(pdev, NULL);
1230
1231 free_netdev(netdev);
1232}
1233static int __qlcnic_shutdown(struct pci_dev *pdev)
1234{
1235 struct qlcnic_adapter *adapter = pci_get_drvdata(pdev);
1236 struct net_device *netdev = adapter->netdev;
1237 int retval;
1238
1239 netif_device_detach(netdev);
1240
1241 qlcnic_cancel_fw_work(adapter);
1242
1243 if (netif_running(netdev))
1244 qlcnic_down(adapter, netdev);
1245
1246 cancel_work_sync(&adapter->tx_timeout_task);
1247
1248 qlcnic_detach(adapter);
1249
1250 qlcnic_clr_all_drv_state(adapter);
1251
1252 clear_bit(__QLCNIC_RESETTING, &adapter->state);
1253
1254 retval = pci_save_state(pdev);
1255 if (retval)
1256 return retval;
1257
1258 if (qlcnic_wol_supported(adapter)) {
1259 pci_enable_wake(pdev, PCI_D3cold, 1);
1260 pci_enable_wake(pdev, PCI_D3hot, 1);
1261 }
1262
1263 return 0;
1264}
1265
1266static void qlcnic_shutdown(struct pci_dev *pdev)
1267{
1268 if (__qlcnic_shutdown(pdev))
1269 return;
1270
1271 pci_disable_device(pdev);
1272}
1273
1274#ifdef CONFIG_PM
1275static int
1276qlcnic_suspend(struct pci_dev *pdev, pm_message_t state)
1277{
1278 int retval;
1279
1280 retval = __qlcnic_shutdown(pdev);
1281 if (retval)
1282 return retval;
1283
1284 pci_set_power_state(pdev, pci_choose_state(pdev, state));
1285 return 0;
1286}
1287
1288static int
1289qlcnic_resume(struct pci_dev *pdev)
1290{
1291 struct qlcnic_adapter *adapter = pci_get_drvdata(pdev);
1292 struct net_device *netdev = adapter->netdev;
1293 int err;
1294
1295 err = pci_enable_device(pdev);
1296 if (err)
1297 return err;
1298
1299 pci_set_power_state(pdev, PCI_D0);
1300 pci_set_master(pdev);
1301 pci_restore_state(pdev);
1302
1303 adapter->ahw.crb_win = -1;
1304 adapter->ahw.ocm_win = -1;
1305
1306 err = qlcnic_start_firmware(adapter);
1307 if (err) {
1308 dev_err(&pdev->dev, "failed to start firmware\n");
1309 return err;
1310 }
1311
1312 if (netif_running(netdev)) {
1313 err = qlcnic_attach(adapter);
1314 if (err)
1315 goto err_out;
1316
1317 err = qlcnic_up(adapter, netdev);
1318 if (err)
1319 goto err_out_detach;
1320
1321
1322 qlcnic_config_indev_addr(netdev, NETDEV_UP);
1323 }
1324
1325 netif_device_attach(netdev);
1326 qlcnic_schedule_work(adapter, qlcnic_fw_poll_work, FW_POLL_DELAY);
1327 return 0;
1328
1329err_out_detach:
1330 qlcnic_detach(adapter);
1331err_out:
1332 qlcnic_clr_all_drv_state(adapter);
1333 return err;
1334}
1335#endif
1336
1337static int qlcnic_open(struct net_device *netdev)
1338{
1339 struct qlcnic_adapter *adapter = netdev_priv(netdev);
1340 int err;
1341
1342 if (adapter->driver_mismatch)
1343 return -EIO;
1344
1345 err = qlcnic_attach(adapter);
1346 if (err)
1347 return err;
1348
1349 err = __qlcnic_up(adapter, netdev);
1350 if (err)
1351 goto err_out;
1352
1353 netif_start_queue(netdev);
1354
1355 return 0;
1356
1357err_out:
1358 qlcnic_detach(adapter);
1359 return err;
1360}
1361
1362/*
1363 * qlcnic_close - Disables a network interface entry point
1364 */
1365static int qlcnic_close(struct net_device *netdev)
1366{
1367 struct qlcnic_adapter *adapter = netdev_priv(netdev);
1368
1369 __qlcnic_down(adapter, netdev);
1370 return 0;
1371}
1372
1373static void
1374qlcnic_tso_check(struct net_device *netdev,
1375 struct qlcnic_host_tx_ring *tx_ring,
1376 struct cmd_desc_type0 *first_desc,
1377 struct sk_buff *skb)
1378{
1379 u8 opcode = TX_ETHER_PKT;
1380 __be16 protocol = skb->protocol;
1381 u16 flags = 0, vid = 0;
1382 u32 producer;
1383 int copied, offset, copy_len, hdr_len = 0, tso = 0, vlan_oob = 0;
1384 struct cmd_desc_type0 *hwdesc;
1385 struct vlan_ethhdr *vh;
1386
1387 if (protocol == cpu_to_be16(ETH_P_8021Q)) {
1388
1389 vh = (struct vlan_ethhdr *)skb->data;
1390 protocol = vh->h_vlan_encapsulated_proto;
1391 flags = FLAGS_VLAN_TAGGED;
1392
1393 } else if (vlan_tx_tag_present(skb)) {
1394
1395 flags = FLAGS_VLAN_OOB;
1396 vid = vlan_tx_tag_get(skb);
1397 qlcnic_set_tx_vlan_tci(first_desc, vid);
1398 vlan_oob = 1;
1399 }
1400
1401 if ((netdev->features & (NETIF_F_TSO | NETIF_F_TSO6)) &&
1402 skb_shinfo(skb)->gso_size > 0) {
1403
1404 hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
1405
1406 first_desc->mss = cpu_to_le16(skb_shinfo(skb)->gso_size);
1407 first_desc->total_hdr_length = hdr_len;
1408 if (vlan_oob) {
1409 first_desc->total_hdr_length += VLAN_HLEN;
1410 first_desc->tcp_hdr_offset = VLAN_HLEN;
1411 first_desc->ip_hdr_offset = VLAN_HLEN;
1412 /* Only in case of TSO on vlan device */
1413 flags |= FLAGS_VLAN_TAGGED;
1414 }
1415
1416 opcode = (protocol == cpu_to_be16(ETH_P_IPV6)) ?
1417 TX_TCP_LSO6 : TX_TCP_LSO;
1418 tso = 1;
1419
1420 } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
1421 u8 l4proto;
1422
1423 if (protocol == cpu_to_be16(ETH_P_IP)) {
1424 l4proto = ip_hdr(skb)->protocol;
1425
1426 if (l4proto == IPPROTO_TCP)
1427 opcode = TX_TCP_PKT;
1428 else if (l4proto == IPPROTO_UDP)
1429 opcode = TX_UDP_PKT;
1430 } else if (protocol == cpu_to_be16(ETH_P_IPV6)) {
1431 l4proto = ipv6_hdr(skb)->nexthdr;
1432
1433 if (l4proto == IPPROTO_TCP)
1434 opcode = TX_TCPV6_PKT;
1435 else if (l4proto == IPPROTO_UDP)
1436 opcode = TX_UDPV6_PKT;
1437 }
1438 }
1439
1440 first_desc->tcp_hdr_offset += skb_transport_offset(skb);
1441 first_desc->ip_hdr_offset += skb_network_offset(skb);
1442 qlcnic_set_tx_flags_opcode(first_desc, flags, opcode);
1443
1444 if (!tso)
1445 return;
1446
1447 /* For LSO, we need to copy the MAC/IP/TCP headers into
1448 * the descriptor ring
1449 */
1450 producer = tx_ring->producer;
1451 copied = 0;
1452 offset = 2;
1453
1454 if (vlan_oob) {
1455 /* Create a TSO vlan header template for firmware */
1456
1457 hwdesc = &tx_ring->desc_head[producer];
1458 tx_ring->cmd_buf_arr[producer].skb = NULL;
1459
1460 copy_len = min((int)sizeof(struct cmd_desc_type0) - offset,
1461 hdr_len + VLAN_HLEN);
1462
1463 vh = (struct vlan_ethhdr *)((char *)hwdesc + 2);
1464 skb_copy_from_linear_data(skb, vh, 12);
1465 vh->h_vlan_proto = htons(ETH_P_8021Q);
1466 vh->h_vlan_TCI = htons(vid);
1467 skb_copy_from_linear_data_offset(skb, 12,
1468 (char *)vh + 16, copy_len - 16);
1469
1470 copied = copy_len - VLAN_HLEN;
1471 offset = 0;
1472
1473 producer = get_next_index(producer, tx_ring->num_desc);
1474 }
1475
1476 while (copied < hdr_len) {
1477
1478 copy_len = min((int)sizeof(struct cmd_desc_type0) - offset,
1479 (hdr_len - copied));
1480
1481 hwdesc = &tx_ring->desc_head[producer];
1482 tx_ring->cmd_buf_arr[producer].skb = NULL;
1483
1484 skb_copy_from_linear_data_offset(skb, copied,
1485 (char *)hwdesc + offset, copy_len);
1486
1487 copied += copy_len;
1488 offset = 0;
1489
1490 producer = get_next_index(producer, tx_ring->num_desc);
1491 }
1492
1493 tx_ring->producer = producer;
1494 barrier();
1495}
1496
1497static int
1498qlcnic_map_tx_skb(struct pci_dev *pdev,
1499 struct sk_buff *skb, struct qlcnic_cmd_buffer *pbuf)
1500{
1501 struct qlcnic_skb_frag *nf;
1502 struct skb_frag_struct *frag;
1503 int i, nr_frags;
1504 dma_addr_t map;
1505
1506 nr_frags = skb_shinfo(skb)->nr_frags;
1507 nf = &pbuf->frag_array[0];
1508
1509 map = pci_map_single(pdev, skb->data,
1510 skb_headlen(skb), PCI_DMA_TODEVICE);
1511 if (pci_dma_mapping_error(pdev, map))
1512 goto out_err;
1513
1514 nf->dma = map;
1515 nf->length = skb_headlen(skb);
1516
1517 for (i = 0; i < nr_frags; i++) {
1518 frag = &skb_shinfo(skb)->frags[i];
1519 nf = &pbuf->frag_array[i+1];
1520
1521 map = pci_map_page(pdev, frag->page, frag->page_offset,
1522 frag->size, PCI_DMA_TODEVICE);
1523 if (pci_dma_mapping_error(pdev, map))
1524 goto unwind;
1525
1526 nf->dma = map;
1527 nf->length = frag->size;
1528 }
1529
1530 return 0;
1531
1532unwind:
1533 while (--i >= 0) {
1534 nf = &pbuf->frag_array[i+1];
1535 pci_unmap_page(pdev, nf->dma, nf->length, PCI_DMA_TODEVICE);
1536 }
1537
1538 nf = &pbuf->frag_array[0];
1539 pci_unmap_single(pdev, nf->dma, skb_headlen(skb), PCI_DMA_TODEVICE);
1540
1541out_err:
1542 return -ENOMEM;
1543}
1544
1545static inline void
1546qlcnic_clear_cmddesc(u64 *desc)
1547{
1548 desc[0] = 0ULL;
1549 desc[2] = 0ULL;
1550}
1551
1552static netdev_tx_t
1553qlcnic_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
1554{
1555 struct qlcnic_adapter *adapter = netdev_priv(netdev);
1556 struct qlcnic_host_tx_ring *tx_ring = adapter->tx_ring;
1557 struct qlcnic_cmd_buffer *pbuf;
1558 struct qlcnic_skb_frag *buffrag;
1559 struct cmd_desc_type0 *hwdesc, *first_desc;
1560 struct pci_dev *pdev;
1561 int i, k;
1562
1563 u32 producer;
1564 int frag_count, no_of_desc;
1565 u32 num_txd = tx_ring->num_desc;
1566
1567 frag_count = skb_shinfo(skb)->nr_frags + 1;
1568
1569 /* 4 fragments per cmd des */
1570 no_of_desc = (frag_count + 3) >> 2;
1571
1572 if (unlikely(no_of_desc + 2 > qlcnic_tx_avail(tx_ring))) {
1573 netif_stop_queue(netdev);
1574 return NETDEV_TX_BUSY;
1575 }
1576
1577 producer = tx_ring->producer;
1578 pbuf = &tx_ring->cmd_buf_arr[producer];
1579
1580 pdev = adapter->pdev;
1581
1582 if (qlcnic_map_tx_skb(pdev, skb, pbuf))
1583 goto drop_packet;
1584
1585 pbuf->skb = skb;
1586 pbuf->frag_count = frag_count;
1587
1588 first_desc = hwdesc = &tx_ring->desc_head[producer];
1589 qlcnic_clear_cmddesc((u64 *)hwdesc);
1590
1591 qlcnic_set_tx_frags_len(first_desc, frag_count, skb->len);
1592 qlcnic_set_tx_port(first_desc, adapter->portnum);
1593
1594 for (i = 0; i < frag_count; i++) {
1595
1596 k = i % 4;
1597
1598 if ((k == 0) && (i > 0)) {
1599 /* move to next desc.*/
1600 producer = get_next_index(producer, num_txd);
1601 hwdesc = &tx_ring->desc_head[producer];
1602 qlcnic_clear_cmddesc((u64 *)hwdesc);
1603 tx_ring->cmd_buf_arr[producer].skb = NULL;
1604 }
1605
1606 buffrag = &pbuf->frag_array[i];
1607
1608 hwdesc->buffer_length[k] = cpu_to_le16(buffrag->length);
1609 switch (k) {
1610 case 0:
1611 hwdesc->addr_buffer1 = cpu_to_le64(buffrag->dma);
1612 break;
1613 case 1:
1614 hwdesc->addr_buffer2 = cpu_to_le64(buffrag->dma);
1615 break;
1616 case 2:
1617 hwdesc->addr_buffer3 = cpu_to_le64(buffrag->dma);
1618 break;
1619 case 3:
1620 hwdesc->addr_buffer4 = cpu_to_le64(buffrag->dma);
1621 break;
1622 }
1623 }
1624
1625 tx_ring->producer = get_next_index(producer, num_txd);
1626
1627 qlcnic_tso_check(netdev, tx_ring, first_desc, skb);
1628
1629 qlcnic_update_cmd_producer(adapter, tx_ring);
1630
1631 adapter->stats.txbytes += skb->len;
1632 adapter->stats.xmitcalled++;
1633
1634 return NETDEV_TX_OK;
1635
1636drop_packet:
1637 adapter->stats.txdropped++;
1638 dev_kfree_skb_any(skb);
1639 return NETDEV_TX_OK;
1640}
1641
1642static int qlcnic_check_temp(struct qlcnic_adapter *adapter)
1643{
1644 struct net_device *netdev = adapter->netdev;
1645 u32 temp, temp_state, temp_val;
1646 int rv = 0;
1647
1648 temp = QLCRD32(adapter, CRB_TEMP_STATE);
1649
1650 temp_state = qlcnic_get_temp_state(temp);
1651 temp_val = qlcnic_get_temp_val(temp);
1652
1653 if (temp_state == QLCNIC_TEMP_PANIC) {
1654 dev_err(&netdev->dev,
1655 "Device temperature %d degrees C exceeds"
1656 " maximum allowed. Hardware has been shut down.\n",
1657 temp_val);
1658 rv = 1;
1659 } else if (temp_state == QLCNIC_TEMP_WARN) {
1660 if (adapter->temp == QLCNIC_TEMP_NORMAL) {
1661 dev_err(&netdev->dev,
1662 "Device temperature %d degrees C "
1663 "exceeds operating range."
1664 " Immediate action needed.\n",
1665 temp_val);
1666 }
1667 } else {
1668 if (adapter->temp == QLCNIC_TEMP_WARN) {
1669 dev_info(&netdev->dev,
1670 "Device temperature is now %d degrees C"
1671 " in normal range.\n", temp_val);
1672 }
1673 }
1674 adapter->temp = temp_state;
1675 return rv;
1676}
1677
1678void qlcnic_advert_link_change(struct qlcnic_adapter *adapter, int linkup)
1679{
1680 struct net_device *netdev = adapter->netdev;
1681
1682 if (adapter->ahw.linkup && !linkup) {
1683 dev_info(&netdev->dev, "NIC Link is down\n");
1684 adapter->ahw.linkup = 0;
1685 if (netif_running(netdev)) {
1686 netif_carrier_off(netdev);
1687 netif_stop_queue(netdev);
1688 }
1689 } else if (!adapter->ahw.linkup && linkup) {
1690 dev_info(&netdev->dev, "NIC Link is up\n");
1691 adapter->ahw.linkup = 1;
1692 if (netif_running(netdev)) {
1693 netif_carrier_on(netdev);
1694 netif_wake_queue(netdev);
1695 }
1696 }
1697}
1698
1699static void qlcnic_tx_timeout(struct net_device *netdev)
1700{
1701 struct qlcnic_adapter *adapter = netdev_priv(netdev);
1702
1703 if (test_bit(__QLCNIC_RESETTING, &adapter->state))
1704 return;
1705
1706 dev_err(&netdev->dev, "transmit timeout, resetting.\n");
1707 schedule_work(&adapter->tx_timeout_task);
1708}
1709
1710static void qlcnic_tx_timeout_task(struct work_struct *work)
1711{
1712 struct qlcnic_adapter *adapter =
1713 container_of(work, struct qlcnic_adapter, tx_timeout_task);
1714
1715 if (!netif_running(adapter->netdev))
1716 return;
1717
1718 if (test_and_set_bit(__QLCNIC_RESETTING, &adapter->state))
1719 return;
1720
1721 if (++adapter->tx_timeo_cnt >= QLCNIC_MAX_TX_TIMEOUTS)
1722 goto request_reset;
1723
1724 clear_bit(__QLCNIC_RESETTING, &adapter->state);
1725 if (!qlcnic_reset_context(adapter)) {
1726 adapter->netdev->trans_start = jiffies;
1727 return;
1728
1729 /* context reset failed, fall through for fw reset */
1730 }
1731
1732request_reset:
1733 adapter->need_fw_reset = 1;
1734 clear_bit(__QLCNIC_RESETTING, &adapter->state);
1735}
1736
1737static struct net_device_stats *qlcnic_get_stats(struct net_device *netdev)
1738{
1739 struct qlcnic_adapter *adapter = netdev_priv(netdev);
1740 struct net_device_stats *stats = &netdev->stats;
1741
1742 memset(stats, 0, sizeof(*stats));
1743
1744 stats->rx_packets = adapter->stats.rx_pkts + adapter->stats.lro_pkts;
1745 stats->tx_packets = adapter->stats.xmitfinished;
1746 stats->rx_bytes = adapter->stats.rxbytes;
1747 stats->tx_bytes = adapter->stats.txbytes;
1748 stats->rx_dropped = adapter->stats.rxdropped;
1749 stats->tx_dropped = adapter->stats.txdropped;
1750
1751 return stats;
1752}
1753
7eb9855d 1754static irqreturn_t qlcnic_clear_legacy_intr(struct qlcnic_adapter *adapter)
af19b491 1755{
af19b491
AKS
1756 u32 status;
1757
1758 status = readl(adapter->isr_int_vec);
1759
1760 if (!(status & adapter->int_vec_bit))
1761 return IRQ_NONE;
1762
1763 /* check interrupt state machine, to be sure */
1764 status = readl(adapter->crb_int_state_reg);
1765 if (!ISR_LEGACY_INT_TRIGGERED(status))
1766 return IRQ_NONE;
1767
1768 writel(0xffffffff, adapter->tgt_status_reg);
1769 /* read twice to ensure write is flushed */
1770 readl(adapter->isr_int_vec);
1771 readl(adapter->isr_int_vec);
1772
7eb9855d
AKS
1773 return IRQ_HANDLED;
1774}
1775
1776static irqreturn_t qlcnic_tmp_intr(int irq, void *data)
1777{
1778 struct qlcnic_host_sds_ring *sds_ring = data;
1779 struct qlcnic_adapter *adapter = sds_ring->adapter;
1780
1781 if (adapter->flags & QLCNIC_MSIX_ENABLED)
1782 goto done;
1783 else if (adapter->flags & QLCNIC_MSI_ENABLED) {
1784 writel(0xffffffff, adapter->tgt_status_reg);
1785 goto done;
1786 }
1787
1788 if (qlcnic_clear_legacy_intr(adapter) == IRQ_NONE)
1789 return IRQ_NONE;
1790
1791done:
1792 adapter->diag_cnt++;
1793 qlcnic_enable_int(sds_ring);
1794 return IRQ_HANDLED;
1795}
1796
1797static irqreturn_t qlcnic_intr(int irq, void *data)
1798{
1799 struct qlcnic_host_sds_ring *sds_ring = data;
1800 struct qlcnic_adapter *adapter = sds_ring->adapter;
1801
1802 if (qlcnic_clear_legacy_intr(adapter) == IRQ_NONE)
1803 return IRQ_NONE;
1804
af19b491
AKS
1805 napi_schedule(&sds_ring->napi);
1806
1807 return IRQ_HANDLED;
1808}
1809
1810static irqreturn_t qlcnic_msi_intr(int irq, void *data)
1811{
1812 struct qlcnic_host_sds_ring *sds_ring = data;
1813 struct qlcnic_adapter *adapter = sds_ring->adapter;
1814
1815 /* clear interrupt */
1816 writel(0xffffffff, adapter->tgt_status_reg);
1817
1818 napi_schedule(&sds_ring->napi);
1819 return IRQ_HANDLED;
1820}
1821
1822static irqreturn_t qlcnic_msix_intr(int irq, void *data)
1823{
1824 struct qlcnic_host_sds_ring *sds_ring = data;
1825
1826 napi_schedule(&sds_ring->napi);
1827 return IRQ_HANDLED;
1828}
1829
1830static int qlcnic_process_cmd_ring(struct qlcnic_adapter *adapter)
1831{
1832 u32 sw_consumer, hw_consumer;
1833 int count = 0, i;
1834 struct qlcnic_cmd_buffer *buffer;
1835 struct pci_dev *pdev = adapter->pdev;
1836 struct net_device *netdev = adapter->netdev;
1837 struct qlcnic_skb_frag *frag;
1838 int done;
1839 struct qlcnic_host_tx_ring *tx_ring = adapter->tx_ring;
1840
1841 if (!spin_trylock(&adapter->tx_clean_lock))
1842 return 1;
1843
1844 sw_consumer = tx_ring->sw_consumer;
1845 hw_consumer = le32_to_cpu(*(tx_ring->hw_consumer));
1846
1847 while (sw_consumer != hw_consumer) {
1848 buffer = &tx_ring->cmd_buf_arr[sw_consumer];
1849 if (buffer->skb) {
1850 frag = &buffer->frag_array[0];
1851 pci_unmap_single(pdev, frag->dma, frag->length,
1852 PCI_DMA_TODEVICE);
1853 frag->dma = 0ULL;
1854 for (i = 1; i < buffer->frag_count; i++) {
1855 frag++;
1856 pci_unmap_page(pdev, frag->dma, frag->length,
1857 PCI_DMA_TODEVICE);
1858 frag->dma = 0ULL;
1859 }
1860
1861 adapter->stats.xmitfinished++;
1862 dev_kfree_skb_any(buffer->skb);
1863 buffer->skb = NULL;
1864 }
1865
1866 sw_consumer = get_next_index(sw_consumer, tx_ring->num_desc);
1867 if (++count >= MAX_STATUS_HANDLE)
1868 break;
1869 }
1870
1871 if (count && netif_running(netdev)) {
1872 tx_ring->sw_consumer = sw_consumer;
1873
1874 smp_mb();
1875
1876 if (netif_queue_stopped(netdev) && netif_carrier_ok(netdev)) {
1877 __netif_tx_lock(tx_ring->txq, smp_processor_id());
1878 if (qlcnic_tx_avail(tx_ring) > TX_STOP_THRESH) {
1879 netif_wake_queue(netdev);
1880 adapter->tx_timeo_cnt = 0;
1881 }
1882 __netif_tx_unlock(tx_ring->txq);
1883 }
1884 }
1885 /*
1886 * If everything is freed up to consumer then check if the ring is full
1887 * If the ring is full then check if more needs to be freed and
1888 * schedule the call back again.
1889 *
1890 * This happens when there are 2 CPUs. One could be freeing and the
1891 * other filling it. If the ring is full when we get out of here and
1892 * the card has already interrupted the host then the host can miss the
1893 * interrupt.
1894 *
1895 * There is still a possible race condition and the host could miss an
1896 * interrupt. The card has to take care of this.
1897 */
1898 hw_consumer = le32_to_cpu(*(tx_ring->hw_consumer));
1899 done = (sw_consumer == hw_consumer);
1900 spin_unlock(&adapter->tx_clean_lock);
1901
1902 return done;
1903}
1904
1905static int qlcnic_poll(struct napi_struct *napi, int budget)
1906{
1907 struct qlcnic_host_sds_ring *sds_ring =
1908 container_of(napi, struct qlcnic_host_sds_ring, napi);
1909
1910 struct qlcnic_adapter *adapter = sds_ring->adapter;
1911
1912 int tx_complete;
1913 int work_done;
1914
1915 tx_complete = qlcnic_process_cmd_ring(adapter);
1916
1917 work_done = qlcnic_process_rcv_ring(sds_ring, budget);
1918
1919 if ((work_done < budget) && tx_complete) {
1920 napi_complete(&sds_ring->napi);
1921 if (test_bit(__QLCNIC_DEV_UP, &adapter->state))
1922 qlcnic_enable_int(sds_ring);
1923 }
1924
1925 return work_done;
1926}
1927
1928#ifdef CONFIG_NET_POLL_CONTROLLER
1929static void qlcnic_poll_controller(struct net_device *netdev)
1930{
1931 struct qlcnic_adapter *adapter = netdev_priv(netdev);
1932 disable_irq(adapter->irq);
1933 qlcnic_intr(adapter->irq, adapter);
1934 enable_irq(adapter->irq);
1935}
1936#endif
1937
1938static void
1939qlcnic_set_drv_state(struct qlcnic_adapter *adapter, int state)
1940{
1941 u32 val;
1942
1943 WARN_ON(state != QLCNIC_DEV_NEED_RESET &&
1944 state != QLCNIC_DEV_NEED_QUISCENT);
1945
1946 if (qlcnic_api_lock(adapter))
1947 return ;
1948
1949 val = QLCRD32(adapter, QLCNIC_CRB_DRV_STATE);
1950
1951 if (state == QLCNIC_DEV_NEED_RESET)
1952 val |= ((u32)0x1 << (adapter->portnum * 4));
1953 else if (state == QLCNIC_DEV_NEED_QUISCENT)
1954 val |= ((u32)0x1 << ((adapter->portnum * 4) + 1));
1955
1956 QLCWR32(adapter, QLCNIC_CRB_DRV_STATE, val);
1957
1958 qlcnic_api_unlock(adapter);
1959}
1960
1b95a839
AKS
1961static int
1962qlcnic_clr_drv_state(struct qlcnic_adapter *adapter)
1963{
1964 u32 val;
1965
1966 if (qlcnic_api_lock(adapter))
1967 return -EBUSY;
1968
1969 val = QLCRD32(adapter, QLCNIC_CRB_DRV_STATE);
1970 val &= ~((u32)0x3 << (adapter->portnum * 4));
1971 QLCWR32(adapter, QLCNIC_CRB_DRV_STATE, val);
1972
1973 qlcnic_api_unlock(adapter);
1974
1975 return 0;
1976}
1977
af19b491
AKS
1978static void
1979qlcnic_clr_all_drv_state(struct qlcnic_adapter *adapter)
1980{
1981 u32 val;
1982
1983 if (qlcnic_api_lock(adapter))
1984 goto err;
1985
1986 val = QLCRD32(adapter, QLCNIC_CRB_DEV_REF_COUNT);
1987 val &= ~((u32)0x1 << (adapter->portnum * 4));
1988 QLCWR32(adapter, QLCNIC_CRB_DEV_REF_COUNT, val);
1989
1990 if (!(val & 0x11111111))
1991 QLCWR32(adapter, QLCNIC_CRB_DEV_STATE, QLCNIC_DEV_COLD);
1992
1993 val = QLCRD32(adapter, QLCNIC_CRB_DRV_STATE);
1994 val &= ~((u32)0x3 << (adapter->portnum * 4));
1995 QLCWR32(adapter, QLCNIC_CRB_DRV_STATE, val);
1996
1997 qlcnic_api_unlock(adapter);
1998err:
1999 adapter->fw_fail_cnt = 0;
2000 clear_bit(__QLCNIC_START_FW, &adapter->state);
2001 clear_bit(__QLCNIC_RESETTING, &adapter->state);
2002}
2003
2004static int
2005qlcnic_check_drv_state(struct qlcnic_adapter *adapter)
2006{
2007 int act, state;
2008
2009 state = QLCRD32(adapter, QLCNIC_CRB_DRV_STATE);
2010 act = QLCRD32(adapter, QLCNIC_CRB_DEV_REF_COUNT);
2011
2012 if (((state & 0x11111111) == (act & 0x11111111)) ||
2013 ((act & 0x11111111) == ((state >> 1) & 0x11111111)))
2014 return 0;
2015 else
2016 return 1;
2017}
2018
2019static int
2020qlcnic_can_start_firmware(struct qlcnic_adapter *adapter)
2021{
2022 u32 val, prev_state;
2023 int cnt = 0;
2024 int portnum = adapter->portnum;
2025
2026 if (qlcnic_api_lock(adapter))
2027 return -1;
2028
2029 val = QLCRD32(adapter, QLCNIC_CRB_DEV_REF_COUNT);
2030 if (!(val & ((int)0x1 << (portnum * 4)))) {
2031 val |= ((u32)0x1 << (portnum * 4));
2032 QLCWR32(adapter, QLCNIC_CRB_DEV_REF_COUNT, val);
2033 } else if (test_and_clear_bit(__QLCNIC_START_FW, &adapter->state)) {
2034 goto start_fw;
2035 }
2036
2037 prev_state = QLCRD32(adapter, QLCNIC_CRB_DEV_STATE);
2038
2039 switch (prev_state) {
2040 case QLCNIC_DEV_COLD:
2041start_fw:
2042 QLCWR32(adapter, QLCNIC_CRB_DEV_STATE, QLCNIC_DEV_INITALIZING);
2043 qlcnic_api_unlock(adapter);
2044 return 1;
2045
2046 case QLCNIC_DEV_READY:
2047 qlcnic_api_unlock(adapter);
2048 return 0;
2049
2050 case QLCNIC_DEV_NEED_RESET:
2051 val = QLCRD32(adapter, QLCNIC_CRB_DRV_STATE);
2052 val |= ((u32)0x1 << (portnum * 4));
2053 QLCWR32(adapter, QLCNIC_CRB_DRV_STATE, val);
2054 break;
2055
2056 case QLCNIC_DEV_NEED_QUISCENT:
2057 val = QLCRD32(adapter, QLCNIC_CRB_DRV_STATE);
2058 val |= ((u32)0x1 << ((portnum * 4) + 1));
2059 QLCWR32(adapter, QLCNIC_CRB_DRV_STATE, val);
2060 break;
2061
2062 case QLCNIC_DEV_FAILED:
2063 qlcnic_api_unlock(adapter);
2064 return -1;
2065 }
2066
2067 qlcnic_api_unlock(adapter);
2068 msleep(1000);
2069 while ((QLCRD32(adapter, QLCNIC_CRB_DEV_STATE) != QLCNIC_DEV_READY) &&
2070 ++cnt < 20)
2071 msleep(1000);
2072
2073 if (cnt >= 20)
2074 return -1;
2075
2076 if (qlcnic_api_lock(adapter))
2077 return -1;
2078
2079 val = QLCRD32(adapter, QLCNIC_CRB_DRV_STATE);
2080 val &= ~((u32)0x3 << (portnum * 4));
2081 QLCWR32(adapter, QLCNIC_CRB_DRV_STATE, val);
2082
2083 qlcnic_api_unlock(adapter);
2084
2085 return 0;
2086}
2087
2088static void
2089qlcnic_fwinit_work(struct work_struct *work)
2090{
2091 struct qlcnic_adapter *adapter = container_of(work,
2092 struct qlcnic_adapter, fw_work.work);
2093 int dev_state;
2094
2095 if (++adapter->fw_wait_cnt > FW_POLL_THRESH)
2096 goto err_ret;
2097
2098 if (test_bit(__QLCNIC_START_FW, &adapter->state)) {
2099
2100 if (qlcnic_check_drv_state(adapter)) {
2101 qlcnic_schedule_work(adapter,
2102 qlcnic_fwinit_work, FW_POLL_DELAY);
2103 return;
2104 }
2105
2106 if (!qlcnic_start_firmware(adapter)) {
2107 qlcnic_schedule_work(adapter, qlcnic_attach_work, 0);
2108 return;
2109 }
2110
2111 goto err_ret;
2112 }
2113
2114 dev_state = QLCRD32(adapter, QLCNIC_CRB_DEV_STATE);
2115 switch (dev_state) {
2116 case QLCNIC_DEV_READY:
2117 if (!qlcnic_start_firmware(adapter)) {
2118 qlcnic_schedule_work(adapter, qlcnic_attach_work, 0);
2119 return;
2120 }
2121 case QLCNIC_DEV_FAILED:
2122 break;
2123
2124 default:
2125 qlcnic_schedule_work(adapter,
2126 qlcnic_fwinit_work, 2 * FW_POLL_DELAY);
2127 return;
2128 }
2129
2130err_ret:
2131 qlcnic_clr_all_drv_state(adapter);
2132}
2133
2134static void
2135qlcnic_detach_work(struct work_struct *work)
2136{
2137 struct qlcnic_adapter *adapter = container_of(work,
2138 struct qlcnic_adapter, fw_work.work);
2139 struct net_device *netdev = adapter->netdev;
2140 u32 status;
2141
2142 netif_device_detach(netdev);
2143
2144 qlcnic_down(adapter, netdev);
2145
ce668443 2146 rtnl_lock();
af19b491 2147 qlcnic_detach(adapter);
ce668443 2148 rtnl_unlock();
af19b491
AKS
2149
2150 status = QLCRD32(adapter, QLCNIC_PEG_HALT_STATUS1);
2151
2152 if (status & QLCNIC_RCODE_FATAL_ERROR)
2153 goto err_ret;
2154
2155 if (adapter->temp == QLCNIC_TEMP_PANIC)
2156 goto err_ret;
2157
2158 qlcnic_set_drv_state(adapter, adapter->dev_state);
2159
2160 adapter->fw_wait_cnt = 0;
2161
2162 qlcnic_schedule_work(adapter, qlcnic_fwinit_work, FW_POLL_DELAY);
2163
2164 return;
2165
2166err_ret:
2167 qlcnic_clr_all_drv_state(adapter);
2168
2169}
2170
2171static void
2172qlcnic_dev_request_reset(struct qlcnic_adapter *adapter)
2173{
2174 u32 state;
2175
2176 if (qlcnic_api_lock(adapter))
2177 return;
2178
2179 state = QLCRD32(adapter, QLCNIC_CRB_DEV_STATE);
2180
2181 if (state != QLCNIC_DEV_INITALIZING && state != QLCNIC_DEV_NEED_RESET) {
2182 QLCWR32(adapter, QLCNIC_CRB_DEV_STATE, QLCNIC_DEV_NEED_RESET);
2183 set_bit(__QLCNIC_START_FW, &adapter->state);
2184 }
2185
2186 qlcnic_api_unlock(adapter);
2187}
2188
2189static void
2190qlcnic_schedule_work(struct qlcnic_adapter *adapter,
2191 work_func_t func, int delay)
2192{
2193 INIT_DELAYED_WORK(&adapter->fw_work, func);
2194 schedule_delayed_work(&adapter->fw_work, round_jiffies_relative(delay));
2195}
2196
2197static void
2198qlcnic_cancel_fw_work(struct qlcnic_adapter *adapter)
2199{
2200 while (test_and_set_bit(__QLCNIC_RESETTING, &adapter->state))
2201 msleep(10);
2202
2203 cancel_delayed_work_sync(&adapter->fw_work);
2204}
2205
2206static void
2207qlcnic_attach_work(struct work_struct *work)
2208{
2209 struct qlcnic_adapter *adapter = container_of(work,
2210 struct qlcnic_adapter, fw_work.work);
2211 struct net_device *netdev = adapter->netdev;
2212 int err;
2213
2214 if (netif_running(netdev)) {
2215 err = qlcnic_attach(adapter);
2216 if (err)
2217 goto done;
2218
2219 err = qlcnic_up(adapter, netdev);
2220 if (err) {
2221 qlcnic_detach(adapter);
2222 goto done;
2223 }
2224
2225 qlcnic_config_indev_addr(netdev, NETDEV_UP);
2226 }
2227
2228 netif_device_attach(netdev);
2229
2230done:
2231 adapter->fw_fail_cnt = 0;
2232 clear_bit(__QLCNIC_RESETTING, &adapter->state);
1b95a839
AKS
2233
2234 if (!qlcnic_clr_drv_state(adapter))
2235 qlcnic_schedule_work(adapter, qlcnic_fw_poll_work,
2236 FW_POLL_DELAY);
af19b491
AKS
2237}
2238
2239static int
2240qlcnic_check_health(struct qlcnic_adapter *adapter)
2241{
2242 u32 state = 0, heartbit;
2243 struct net_device *netdev = adapter->netdev;
2244
2245 if (qlcnic_check_temp(adapter))
2246 goto detach;
2247
2248 if (adapter->need_fw_reset) {
2249 qlcnic_dev_request_reset(adapter);
2250 goto detach;
2251 }
2252
2253 state = QLCRD32(adapter, QLCNIC_CRB_DEV_STATE);
2254 if (state == QLCNIC_DEV_NEED_RESET || state == QLCNIC_DEV_NEED_QUISCENT)
2255 adapter->need_fw_reset = 1;
2256
2257 heartbit = QLCRD32(adapter, QLCNIC_PEG_ALIVE_COUNTER);
2258 if (heartbit != adapter->heartbit) {
2259 adapter->heartbit = heartbit;
2260 adapter->fw_fail_cnt = 0;
2261 if (adapter->need_fw_reset)
2262 goto detach;
2263 return 0;
2264 }
2265
2266 if (++adapter->fw_fail_cnt < FW_FAIL_THRESH)
2267 return 0;
2268
2269 qlcnic_dev_request_reset(adapter);
2270
2271 clear_bit(__QLCNIC_FW_ATTACHED, &adapter->state);
2272
2273 dev_info(&netdev->dev, "firmware hang detected\n");
2274
2275detach:
2276 adapter->dev_state = (state == QLCNIC_DEV_NEED_QUISCENT) ? state :
2277 QLCNIC_DEV_NEED_RESET;
2278
2279 if ((auto_fw_reset == AUTO_FW_RESET_ENABLED) &&
2280 !test_and_set_bit(__QLCNIC_RESETTING, &adapter->state))
2281 qlcnic_schedule_work(adapter, qlcnic_detach_work, 0);
2282
2283 return 1;
2284}
2285
2286static void
2287qlcnic_fw_poll_work(struct work_struct *work)
2288{
2289 struct qlcnic_adapter *adapter = container_of(work,
2290 struct qlcnic_adapter, fw_work.work);
2291
2292 if (test_bit(__QLCNIC_RESETTING, &adapter->state))
2293 goto reschedule;
2294
2295
2296 if (qlcnic_check_health(adapter))
2297 return;
2298
2299reschedule:
2300 qlcnic_schedule_work(adapter, qlcnic_fw_poll_work, FW_POLL_DELAY);
2301}
2302
2303static ssize_t
2304qlcnic_store_bridged_mode(struct device *dev,
2305 struct device_attribute *attr, const char *buf, size_t len)
2306{
2307 struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
2308 unsigned long new;
2309 int ret = -EINVAL;
2310
2311 if (!(adapter->capabilities & QLCNIC_FW_CAPABILITY_BDG))
2312 goto err_out;
2313
2314 if (adapter->is_up != QLCNIC_ADAPTER_UP_MAGIC)
2315 goto err_out;
2316
2317 if (strict_strtoul(buf, 2, &new))
2318 goto err_out;
2319
2320 if (!qlcnic_config_bridged_mode(adapter, !!new))
2321 ret = len;
2322
2323err_out:
2324 return ret;
2325}
2326
2327static ssize_t
2328qlcnic_show_bridged_mode(struct device *dev,
2329 struct device_attribute *attr, char *buf)
2330{
2331 struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
2332 int bridged_mode = 0;
2333
2334 if (adapter->capabilities & QLCNIC_FW_CAPABILITY_BDG)
2335 bridged_mode = !!(adapter->flags & QLCNIC_BRIDGE_ENABLED);
2336
2337 return sprintf(buf, "%d\n", bridged_mode);
2338}
2339
2340static struct device_attribute dev_attr_bridged_mode = {
2341 .attr = {.name = "bridged_mode", .mode = (S_IRUGO | S_IWUSR)},
2342 .show = qlcnic_show_bridged_mode,
2343 .store = qlcnic_store_bridged_mode,
2344};
2345
2346static ssize_t
2347qlcnic_store_diag_mode(struct device *dev,
2348 struct device_attribute *attr, const char *buf, size_t len)
2349{
2350 struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
2351 unsigned long new;
2352
2353 if (strict_strtoul(buf, 2, &new))
2354 return -EINVAL;
2355
2356 if (!!new != !!(adapter->flags & QLCNIC_DIAG_ENABLED))
2357 adapter->flags ^= QLCNIC_DIAG_ENABLED;
2358
2359 return len;
2360}
2361
2362static ssize_t
2363qlcnic_show_diag_mode(struct device *dev,
2364 struct device_attribute *attr, char *buf)
2365{
2366 struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
2367
2368 return sprintf(buf, "%d\n",
2369 !!(adapter->flags & QLCNIC_DIAG_ENABLED));
2370}
2371
2372static struct device_attribute dev_attr_diag_mode = {
2373 .attr = {.name = "diag_mode", .mode = (S_IRUGO | S_IWUSR)},
2374 .show = qlcnic_show_diag_mode,
2375 .store = qlcnic_store_diag_mode,
2376};
2377
2378static int
2379qlcnic_sysfs_validate_crb(struct qlcnic_adapter *adapter,
2380 loff_t offset, size_t size)
2381{
2382 if (!(adapter->flags & QLCNIC_DIAG_ENABLED))
2383 return -EIO;
2384
2385 if ((size != 4) || (offset & 0x3))
2386 return -EINVAL;
2387
2388 if (offset < QLCNIC_PCI_CRBSPACE)
2389 return -EINVAL;
2390
2391 return 0;
2392}
2393
2394static ssize_t
2395qlcnic_sysfs_read_crb(struct kobject *kobj, struct bin_attribute *attr,
2396 char *buf, loff_t offset, size_t size)
2397{
2398 struct device *dev = container_of(kobj, struct device, kobj);
2399 struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
2400 u32 data;
2401 int ret;
2402
2403 ret = qlcnic_sysfs_validate_crb(adapter, offset, size);
2404 if (ret != 0)
2405 return ret;
2406
2407 data = QLCRD32(adapter, offset);
2408 memcpy(buf, &data, size);
2409 return size;
2410}
2411
2412static ssize_t
2413qlcnic_sysfs_write_crb(struct kobject *kobj, struct bin_attribute *attr,
2414 char *buf, loff_t offset, size_t size)
2415{
2416 struct device *dev = container_of(kobj, struct device, kobj);
2417 struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
2418 u32 data;
2419 int ret;
2420
2421 ret = qlcnic_sysfs_validate_crb(adapter, offset, size);
2422 if (ret != 0)
2423 return ret;
2424
2425 memcpy(&data, buf, size);
2426 QLCWR32(adapter, offset, data);
2427 return size;
2428}
2429
2430static int
2431qlcnic_sysfs_validate_mem(struct qlcnic_adapter *adapter,
2432 loff_t offset, size_t size)
2433{
2434 if (!(adapter->flags & QLCNIC_DIAG_ENABLED))
2435 return -EIO;
2436
2437 if ((size != 8) || (offset & 0x7))
2438 return -EIO;
2439
2440 return 0;
2441}
2442
2443static ssize_t
2444qlcnic_sysfs_read_mem(struct kobject *kobj, struct bin_attribute *attr,
2445 char *buf, loff_t offset, size_t size)
2446{
2447 struct device *dev = container_of(kobj, struct device, kobj);
2448 struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
2449 u64 data;
2450 int ret;
2451
2452 ret = qlcnic_sysfs_validate_mem(adapter, offset, size);
2453 if (ret != 0)
2454 return ret;
2455
2456 if (qlcnic_pci_mem_read_2M(adapter, offset, &data))
2457 return -EIO;
2458
2459 memcpy(buf, &data, size);
2460
2461 return size;
2462}
2463
2464static ssize_t
2465qlcnic_sysfs_write_mem(struct kobject *kobj, struct bin_attribute *attr,
2466 char *buf, loff_t offset, size_t size)
2467{
2468 struct device *dev = container_of(kobj, struct device, kobj);
2469 struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
2470 u64 data;
2471 int ret;
2472
2473 ret = qlcnic_sysfs_validate_mem(adapter, offset, size);
2474 if (ret != 0)
2475 return ret;
2476
2477 memcpy(&data, buf, size);
2478
2479 if (qlcnic_pci_mem_write_2M(adapter, offset, data))
2480 return -EIO;
2481
2482 return size;
2483}
2484
2485
2486static struct bin_attribute bin_attr_crb = {
2487 .attr = {.name = "crb", .mode = (S_IRUGO | S_IWUSR)},
2488 .size = 0,
2489 .read = qlcnic_sysfs_read_crb,
2490 .write = qlcnic_sysfs_write_crb,
2491};
2492
2493static struct bin_attribute bin_attr_mem = {
2494 .attr = {.name = "mem", .mode = (S_IRUGO | S_IWUSR)},
2495 .size = 0,
2496 .read = qlcnic_sysfs_read_mem,
2497 .write = qlcnic_sysfs_write_mem,
2498};
2499
2500static void
2501qlcnic_create_sysfs_entries(struct qlcnic_adapter *adapter)
2502{
2503 struct device *dev = &adapter->pdev->dev;
2504
2505 if (adapter->capabilities & QLCNIC_FW_CAPABILITY_BDG)
2506 if (device_create_file(dev, &dev_attr_bridged_mode))
2507 dev_warn(dev,
2508 "failed to create bridged_mode sysfs entry\n");
2509}
2510
2511static void
2512qlcnic_remove_sysfs_entries(struct qlcnic_adapter *adapter)
2513{
2514 struct device *dev = &adapter->pdev->dev;
2515
2516 if (adapter->capabilities & QLCNIC_FW_CAPABILITY_BDG)
2517 device_remove_file(dev, &dev_attr_bridged_mode);
2518}
2519
2520static void
2521qlcnic_create_diag_entries(struct qlcnic_adapter *adapter)
2522{
2523 struct device *dev = &adapter->pdev->dev;
2524
2525 if (device_create_file(dev, &dev_attr_diag_mode))
2526 dev_info(dev, "failed to create diag_mode sysfs entry\n");
2527 if (device_create_bin_file(dev, &bin_attr_crb))
2528 dev_info(dev, "failed to create crb sysfs entry\n");
2529 if (device_create_bin_file(dev, &bin_attr_mem))
2530 dev_info(dev, "failed to create mem sysfs entry\n");
2531}
2532
2533
2534static void
2535qlcnic_remove_diag_entries(struct qlcnic_adapter *adapter)
2536{
2537 struct device *dev = &adapter->pdev->dev;
2538
2539 device_remove_file(dev, &dev_attr_diag_mode);
2540 device_remove_bin_file(dev, &bin_attr_crb);
2541 device_remove_bin_file(dev, &bin_attr_mem);
2542}
2543
2544#ifdef CONFIG_INET
2545
2546#define is_qlcnic_netdev(dev) (dev->netdev_ops == &qlcnic_netdev_ops)
2547
2548static int
2549qlcnic_destip_supported(struct qlcnic_adapter *adapter)
2550{
2551 if (adapter->ahw.cut_through)
2552 return 0;
2553
2554 return 1;
2555}
2556
2557static void
2558qlcnic_config_indev_addr(struct net_device *dev, unsigned long event)
2559{
2560 struct in_device *indev;
2561 struct qlcnic_adapter *adapter = netdev_priv(dev);
2562
2563 if (!qlcnic_destip_supported(adapter))
2564 return;
2565
2566 indev = in_dev_get(dev);
2567 if (!indev)
2568 return;
2569
2570 for_ifa(indev) {
2571 switch (event) {
2572 case NETDEV_UP:
2573 qlcnic_config_ipaddr(adapter,
2574 ifa->ifa_address, QLCNIC_IP_UP);
2575 break;
2576 case NETDEV_DOWN:
2577 qlcnic_config_ipaddr(adapter,
2578 ifa->ifa_address, QLCNIC_IP_DOWN);
2579 break;
2580 default:
2581 break;
2582 }
2583 } endfor_ifa(indev);
2584
2585 in_dev_put(indev);
2586 return;
2587}
2588
2589static int qlcnic_netdev_event(struct notifier_block *this,
2590 unsigned long event, void *ptr)
2591{
2592 struct qlcnic_adapter *adapter;
2593 struct net_device *dev = (struct net_device *)ptr;
2594
2595recheck:
2596 if (dev == NULL)
2597 goto done;
2598
2599 if (dev->priv_flags & IFF_802_1Q_VLAN) {
2600 dev = vlan_dev_real_dev(dev);
2601 goto recheck;
2602 }
2603
2604 if (!is_qlcnic_netdev(dev))
2605 goto done;
2606
2607 adapter = netdev_priv(dev);
2608
2609 if (!adapter)
2610 goto done;
2611
2612 if (adapter->is_up != QLCNIC_ADAPTER_UP_MAGIC)
2613 goto done;
2614
2615 qlcnic_config_indev_addr(dev, event);
2616done:
2617 return NOTIFY_DONE;
2618}
2619
2620static int
2621qlcnic_inetaddr_event(struct notifier_block *this,
2622 unsigned long event, void *ptr)
2623{
2624 struct qlcnic_adapter *adapter;
2625 struct net_device *dev;
2626
2627 struct in_ifaddr *ifa = (struct in_ifaddr *)ptr;
2628
2629 dev = ifa->ifa_dev ? ifa->ifa_dev->dev : NULL;
2630
2631recheck:
2632 if (dev == NULL || !netif_running(dev))
2633 goto done;
2634
2635 if (dev->priv_flags & IFF_802_1Q_VLAN) {
2636 dev = vlan_dev_real_dev(dev);
2637 goto recheck;
2638 }
2639
2640 if (!is_qlcnic_netdev(dev))
2641 goto done;
2642
2643 adapter = netdev_priv(dev);
2644
2645 if (!adapter || !qlcnic_destip_supported(adapter))
2646 goto done;
2647
2648 if (adapter->is_up != QLCNIC_ADAPTER_UP_MAGIC)
2649 goto done;
2650
2651 switch (event) {
2652 case NETDEV_UP:
2653 qlcnic_config_ipaddr(adapter, ifa->ifa_address, QLCNIC_IP_UP);
2654 break;
2655 case NETDEV_DOWN:
2656 qlcnic_config_ipaddr(adapter, ifa->ifa_address, QLCNIC_IP_DOWN);
2657 break;
2658 default:
2659 break;
2660 }
2661
2662done:
2663 return NOTIFY_DONE;
2664}
2665
2666static struct notifier_block qlcnic_netdev_cb = {
2667 .notifier_call = qlcnic_netdev_event,
2668};
2669
2670static struct notifier_block qlcnic_inetaddr_cb = {
2671 .notifier_call = qlcnic_inetaddr_event,
2672};
2673#else
2674static void
2675qlcnic_config_indev_addr(struct net_device *dev, unsigned long event)
2676{ }
2677#endif
2678
2679static struct pci_driver qlcnic_driver = {
2680 .name = qlcnic_driver_name,
2681 .id_table = qlcnic_pci_tbl,
2682 .probe = qlcnic_probe,
2683 .remove = __devexit_p(qlcnic_remove),
2684#ifdef CONFIG_PM
2685 .suspend = qlcnic_suspend,
2686 .resume = qlcnic_resume,
2687#endif
2688 .shutdown = qlcnic_shutdown
2689};
2690
2691static int __init qlcnic_init_module(void)
2692{
2693
2694 printk(KERN_INFO "%s\n", qlcnic_driver_string);
2695
2696#ifdef CONFIG_INET
2697 register_netdevice_notifier(&qlcnic_netdev_cb);
2698 register_inetaddr_notifier(&qlcnic_inetaddr_cb);
2699#endif
2700
2701
2702 return pci_register_driver(&qlcnic_driver);
2703}
2704
2705module_init(qlcnic_init_module);
2706
2707static void __exit qlcnic_exit_module(void)
2708{
2709
2710 pci_unregister_driver(&qlcnic_driver);
2711
2712#ifdef CONFIG_INET
2713 unregister_inetaddr_notifier(&qlcnic_inetaddr_cb);
2714 unregister_netdevice_notifier(&qlcnic_netdev_cb);
2715#endif
2716}
2717
2718module_exit(qlcnic_exit_module);