2 * Copyright (C) 2009 - QLogic Corporation.
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License
7 * as published by the Free Software Foundation; either version 2
8 * of the License, or (at your option) any later version.
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 59 Temple Place - Suite 330, Boston,
20 * The full GNU General Public License is included in this distribution
21 * in the file called "COPYING".
25 #include <linux/slab.h>
26 #include <linux/vmalloc.h>
27 #include <linux/interrupt.h>
31 #include <linux/dma-mapping.h>
32 #include <linux/if_vlan.h>
34 #include <linux/ipv6.h>
35 #include <linux/inetdevice.h>
36 #include <linux/sysfs.h>
37 #include <linux/aer.h>
39 MODULE_DESCRIPTION("QLogic 1/10 GbE Converged/Intelligent Ethernet Driver");
40 MODULE_LICENSE("GPL");
41 MODULE_VERSION(QLCNIC_LINUX_VERSIONID);
42 MODULE_FIRMWARE(QLCNIC_UNIFIED_ROMIMAGE_NAME);
44 char qlcnic_driver_name[] = "qlcnic";
45 static const char qlcnic_driver_string[] = "QLogic 1/10 GbE "
46 "Converged/Intelligent Ethernet Driver v" QLCNIC_LINUX_VERSIONID;
48 static int port_mode = QLCNIC_PORT_MODE_AUTO_NEG;
50 /* Default to restricted 1G auto-neg mode */
51 static int wol_port_mode = 5;
53 static int use_msi = 1;
54 module_param(use_msi, int, 0644);
55 MODULE_PARM_DESC(use_msi, "MSI interrupt (0=disabled, 1=enabled");
57 static int use_msi_x = 1;
58 module_param(use_msi_x, int, 0644);
59 MODULE_PARM_DESC(use_msi_x, "MSI-X interrupt (0=disabled, 1=enabled");
61 static int auto_fw_reset = AUTO_FW_RESET_ENABLED;
62 module_param(auto_fw_reset, int, 0644);
63 MODULE_PARM_DESC(auto_fw_reset, "Auto firmware reset (0=disabled, 1=enabled");
65 static int load_fw_file;
66 module_param(load_fw_file, int, 0644);
67 MODULE_PARM_DESC(load_fw_file, "Load firmware from (0=flash, 1=file");
69 static int qlcnic_config_npars;
70 module_param(qlcnic_config_npars, int, 0644);
71 MODULE_PARM_DESC(qlcnic_config_npars, "Configure NPARs (0=disabled, 1=enabled");
73 static int __devinit qlcnic_probe(struct pci_dev *pdev,
74 const struct pci_device_id *ent);
75 static void __devexit qlcnic_remove(struct pci_dev *pdev);
76 static int qlcnic_open(struct net_device *netdev);
77 static int qlcnic_close(struct net_device *netdev);
78 static void qlcnic_tx_timeout(struct net_device *netdev);
79 static void qlcnic_attach_work(struct work_struct *work);
80 static void qlcnic_fwinit_work(struct work_struct *work);
81 static void qlcnic_fw_poll_work(struct work_struct *work);
82 static void qlcnic_schedule_work(struct qlcnic_adapter *adapter,
83 work_func_t func, int delay);
84 static void qlcnic_cancel_fw_work(struct qlcnic_adapter *adapter);
85 static int qlcnic_poll(struct napi_struct *napi, int budget);
86 static int qlcnic_rx_poll(struct napi_struct *napi, int budget);
87 #ifdef CONFIG_NET_POLL_CONTROLLER
88 static void qlcnic_poll_controller(struct net_device *netdev);
91 static void qlcnic_create_sysfs_entries(struct qlcnic_adapter *adapter);
92 static void qlcnic_remove_sysfs_entries(struct qlcnic_adapter *adapter);
93 static void qlcnic_create_diag_entries(struct qlcnic_adapter *adapter);
94 static void qlcnic_remove_diag_entries(struct qlcnic_adapter *adapter);
96 static void qlcnic_idc_debug_info(struct qlcnic_adapter *adapter, u8 encoding);
97 static void qlcnic_clr_all_drv_state(struct qlcnic_adapter *adapter);
98 static int qlcnic_can_start_firmware(struct qlcnic_adapter *adapter);
100 static irqreturn_t qlcnic_tmp_intr(int irq, void *data);
101 static irqreturn_t qlcnic_intr(int irq, void *data);
102 static irqreturn_t qlcnic_msi_intr(int irq, void *data);
103 static irqreturn_t qlcnic_msix_intr(int irq, void *data);
105 static struct net_device_stats *qlcnic_get_stats(struct net_device *netdev);
106 static void qlcnic_config_indev_addr(struct net_device *dev, unsigned long);
107 static int qlcnic_start_firmware(struct qlcnic_adapter *);
109 static void qlcnic_dev_set_npar_ready(struct qlcnic_adapter *);
110 static int qlcnicvf_config_led(struct qlcnic_adapter *, u32, u32);
111 static int qlcnicvf_config_bridged_mode(struct qlcnic_adapter *, u32);
112 static int qlcnicvf_start_firmware(struct qlcnic_adapter *);
113 /* PCI Device ID Table */
114 #define ENTRY(device) \
115 {PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, (device)), \
116 .class = PCI_CLASS_NETWORK_ETHERNET << 8, .class_mask = ~0}
118 #define PCI_DEVICE_ID_QLOGIC_QLE824X 0x8020
120 static DEFINE_PCI_DEVICE_TABLE(qlcnic_pci_tbl) = {
121 ENTRY(PCI_DEVICE_ID_QLOGIC_QLE824X),
125 MODULE_DEVICE_TABLE(pci, qlcnic_pci_tbl);
129 qlcnic_update_cmd_producer(struct qlcnic_adapter *adapter,
130 struct qlcnic_host_tx_ring *tx_ring)
132 writel(tx_ring->producer, tx_ring->crb_cmd_producer);
135 static const u32 msi_tgt_status[8] = {
136 ISR_INT_TARGET_STATUS, ISR_INT_TARGET_STATUS_F1,
137 ISR_INT_TARGET_STATUS_F2, ISR_INT_TARGET_STATUS_F3,
138 ISR_INT_TARGET_STATUS_F4, ISR_INT_TARGET_STATUS_F5,
139 ISR_INT_TARGET_STATUS_F6, ISR_INT_TARGET_STATUS_F7
143 struct qlcnic_legacy_intr_set legacy_intr[] = QLCNIC_LEGACY_INTR_CONFIG;
145 static inline void qlcnic_disable_int(struct qlcnic_host_sds_ring *sds_ring)
147 writel(0, sds_ring->crb_intr_mask);
150 static inline void qlcnic_enable_int(struct qlcnic_host_sds_ring *sds_ring)
152 struct qlcnic_adapter *adapter = sds_ring->adapter;
154 writel(0x1, sds_ring->crb_intr_mask);
156 if (!QLCNIC_IS_MSI_FAMILY(adapter))
157 writel(0xfbff, adapter->tgt_mask_reg);
161 qlcnic_alloc_sds_rings(struct qlcnic_recv_context *recv_ctx, int count)
163 int size = sizeof(struct qlcnic_host_sds_ring) * count;
165 recv_ctx->sds_rings = kzalloc(size, GFP_KERNEL);
167 return (recv_ctx->sds_rings == NULL);
171 qlcnic_free_sds_rings(struct qlcnic_recv_context *recv_ctx)
173 if (recv_ctx->sds_rings != NULL)
174 kfree(recv_ctx->sds_rings);
176 recv_ctx->sds_rings = NULL;
180 qlcnic_napi_add(struct qlcnic_adapter *adapter, struct net_device *netdev)
183 struct qlcnic_host_sds_ring *sds_ring;
184 struct qlcnic_recv_context *recv_ctx = &adapter->recv_ctx;
186 if (qlcnic_alloc_sds_rings(recv_ctx, adapter->max_sds_rings))
189 for (ring = 0; ring < adapter->max_sds_rings; ring++) {
190 sds_ring = &recv_ctx->sds_rings[ring];
192 if (ring == adapter->max_sds_rings - 1)
193 netif_napi_add(netdev, &sds_ring->napi, qlcnic_poll,
194 QLCNIC_NETDEV_WEIGHT/adapter->max_sds_rings);
196 netif_napi_add(netdev, &sds_ring->napi,
197 qlcnic_rx_poll, QLCNIC_NETDEV_WEIGHT*2);
204 qlcnic_napi_del(struct qlcnic_adapter *adapter)
207 struct qlcnic_host_sds_ring *sds_ring;
208 struct qlcnic_recv_context *recv_ctx = &adapter->recv_ctx;
210 for (ring = 0; ring < adapter->max_sds_rings; ring++) {
211 sds_ring = &recv_ctx->sds_rings[ring];
212 netif_napi_del(&sds_ring->napi);
215 qlcnic_free_sds_rings(&adapter->recv_ctx);
219 qlcnic_napi_enable(struct qlcnic_adapter *adapter)
222 struct qlcnic_host_sds_ring *sds_ring;
223 struct qlcnic_recv_context *recv_ctx = &adapter->recv_ctx;
225 if (adapter->is_up != QLCNIC_ADAPTER_UP_MAGIC)
228 for (ring = 0; ring < adapter->max_sds_rings; ring++) {
229 sds_ring = &recv_ctx->sds_rings[ring];
230 napi_enable(&sds_ring->napi);
231 qlcnic_enable_int(sds_ring);
236 qlcnic_napi_disable(struct qlcnic_adapter *adapter)
239 struct qlcnic_host_sds_ring *sds_ring;
240 struct qlcnic_recv_context *recv_ctx = &adapter->recv_ctx;
242 if (adapter->is_up != QLCNIC_ADAPTER_UP_MAGIC)
245 for (ring = 0; ring < adapter->max_sds_rings; ring++) {
246 sds_ring = &recv_ctx->sds_rings[ring];
247 qlcnic_disable_int(sds_ring);
248 napi_synchronize(&sds_ring->napi);
249 napi_disable(&sds_ring->napi);
253 static void qlcnic_clear_stats(struct qlcnic_adapter *adapter)
255 memset(&adapter->stats, 0, sizeof(adapter->stats));
258 static void qlcnic_set_port_mode(struct qlcnic_adapter *adapter)
262 val = adapter->ahw.board_type;
263 if ((val == QLCNIC_BRDTYPE_P3_HMEZ) ||
264 (val == QLCNIC_BRDTYPE_P3_XG_LOM)) {
265 if (port_mode == QLCNIC_PORT_MODE_802_3_AP) {
266 data = QLCNIC_PORT_MODE_802_3_AP;
267 QLCWR32(adapter, QLCNIC_PORT_MODE_ADDR, data);
268 } else if (port_mode == QLCNIC_PORT_MODE_XG) {
269 data = QLCNIC_PORT_MODE_XG;
270 QLCWR32(adapter, QLCNIC_PORT_MODE_ADDR, data);
271 } else if (port_mode == QLCNIC_PORT_MODE_AUTO_NEG_1G) {
272 data = QLCNIC_PORT_MODE_AUTO_NEG_1G;
273 QLCWR32(adapter, QLCNIC_PORT_MODE_ADDR, data);
274 } else if (port_mode == QLCNIC_PORT_MODE_AUTO_NEG_XG) {
275 data = QLCNIC_PORT_MODE_AUTO_NEG_XG;
276 QLCWR32(adapter, QLCNIC_PORT_MODE_ADDR, data);
278 data = QLCNIC_PORT_MODE_AUTO_NEG;
279 QLCWR32(adapter, QLCNIC_PORT_MODE_ADDR, data);
282 if ((wol_port_mode != QLCNIC_PORT_MODE_802_3_AP) &&
283 (wol_port_mode != QLCNIC_PORT_MODE_XG) &&
284 (wol_port_mode != QLCNIC_PORT_MODE_AUTO_NEG_1G) &&
285 (wol_port_mode != QLCNIC_PORT_MODE_AUTO_NEG_XG)) {
286 wol_port_mode = QLCNIC_PORT_MODE_AUTO_NEG;
288 QLCWR32(adapter, QLCNIC_WOL_PORT_MODE, wol_port_mode);
292 static void qlcnic_set_msix_bit(struct pci_dev *pdev, int enable)
297 pos = pci_find_capability(pdev, PCI_CAP_ID_MSIX);
299 pci_read_config_dword(pdev, pos, &control);
301 control |= PCI_MSIX_FLAGS_ENABLE;
304 pci_write_config_dword(pdev, pos, control);
308 static void qlcnic_init_msix_entries(struct qlcnic_adapter *adapter, int count)
312 for (i = 0; i < count; i++)
313 adapter->msix_entries[i].entry = i;
317 qlcnic_read_mac_addr(struct qlcnic_adapter *adapter)
319 u8 mac_addr[ETH_ALEN];
320 struct net_device *netdev = adapter->netdev;
321 struct pci_dev *pdev = adapter->pdev;
323 if (adapter->nic_ops->get_mac_addr(adapter, mac_addr) != 0)
326 memcpy(netdev->dev_addr, mac_addr, ETH_ALEN);
327 memcpy(netdev->perm_addr, netdev->dev_addr, netdev->addr_len);
328 memcpy(adapter->mac_addr, netdev->dev_addr, netdev->addr_len);
330 /* set station address */
332 if (!is_valid_ether_addr(netdev->perm_addr))
333 dev_warn(&pdev->dev, "Bad MAC address %pM.\n",
339 static int qlcnic_set_mac(struct net_device *netdev, void *p)
341 struct qlcnic_adapter *adapter = netdev_priv(netdev);
342 struct sockaddr *addr = p;
344 if (!is_valid_ether_addr(addr->sa_data))
347 if (test_bit(__QLCNIC_DEV_UP, &adapter->state)) {
348 netif_device_detach(netdev);
349 qlcnic_napi_disable(adapter);
352 memcpy(adapter->mac_addr, addr->sa_data, netdev->addr_len);
353 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
354 qlcnic_set_multi(adapter->netdev);
356 if (test_bit(__QLCNIC_DEV_UP, &adapter->state)) {
357 netif_device_attach(netdev);
358 qlcnic_napi_enable(adapter);
363 static const struct net_device_ops qlcnic_netdev_ops = {
364 .ndo_open = qlcnic_open,
365 .ndo_stop = qlcnic_close,
366 .ndo_start_xmit = qlcnic_xmit_frame,
367 .ndo_get_stats = qlcnic_get_stats,
368 .ndo_validate_addr = eth_validate_addr,
369 .ndo_set_multicast_list = qlcnic_set_multi,
370 .ndo_set_mac_address = qlcnic_set_mac,
371 .ndo_change_mtu = qlcnic_change_mtu,
372 .ndo_tx_timeout = qlcnic_tx_timeout,
373 #ifdef CONFIG_NET_POLL_CONTROLLER
374 .ndo_poll_controller = qlcnic_poll_controller,
378 static struct qlcnic_nic_template qlcnic_ops = {
379 .get_mac_addr = qlcnic_get_mac_address,
380 .config_bridged_mode = qlcnic_config_bridged_mode,
381 .config_led = qlcnic_config_led,
382 .start_firmware = qlcnic_start_firmware
385 static struct qlcnic_nic_template qlcnic_vf_ops = {
386 .get_mac_addr = qlcnic_get_mac_address,
387 .config_bridged_mode = qlcnicvf_config_bridged_mode,
388 .config_led = qlcnicvf_config_led,
389 .start_firmware = qlcnicvf_start_firmware
393 qlcnic_setup_intr(struct qlcnic_adapter *adapter)
395 const struct qlcnic_legacy_intr_set *legacy_intrp;
396 struct pci_dev *pdev = adapter->pdev;
399 if (adapter->rss_supported) {
400 num_msix = (num_online_cpus() >= MSIX_ENTRIES_PER_ADAPTER) ?
401 MSIX_ENTRIES_PER_ADAPTER : 2;
405 adapter->max_sds_rings = 1;
407 adapter->flags &= ~(QLCNIC_MSI_ENABLED | QLCNIC_MSIX_ENABLED);
409 legacy_intrp = &legacy_intr[adapter->ahw.pci_func];
411 adapter->int_vec_bit = legacy_intrp->int_vec_bit;
412 adapter->tgt_status_reg = qlcnic_get_ioaddr(adapter,
413 legacy_intrp->tgt_status_reg);
414 adapter->tgt_mask_reg = qlcnic_get_ioaddr(adapter,
415 legacy_intrp->tgt_mask_reg);
416 adapter->isr_int_vec = qlcnic_get_ioaddr(adapter, ISR_INT_VECTOR);
418 adapter->crb_int_state_reg = qlcnic_get_ioaddr(adapter,
421 qlcnic_set_msix_bit(pdev, 0);
423 if (adapter->msix_supported) {
425 qlcnic_init_msix_entries(adapter, num_msix);
426 err = pci_enable_msix(pdev, adapter->msix_entries, num_msix);
428 adapter->flags |= QLCNIC_MSIX_ENABLED;
429 qlcnic_set_msix_bit(pdev, 1);
431 if (adapter->rss_supported)
432 adapter->max_sds_rings = num_msix;
434 dev_info(&pdev->dev, "using msi-x interrupts\n");
439 pci_disable_msix(pdev);
441 /* fall through for msi */
444 if (use_msi && !pci_enable_msi(pdev)) {
445 adapter->flags |= QLCNIC_MSI_ENABLED;
446 adapter->tgt_status_reg = qlcnic_get_ioaddr(adapter,
447 msi_tgt_status[adapter->ahw.pci_func]);
448 dev_info(&pdev->dev, "using msi interrupts\n");
449 adapter->msix_entries[0].vector = pdev->irq;
453 dev_info(&pdev->dev, "using legacy interrupts\n");
454 adapter->msix_entries[0].vector = pdev->irq;
458 qlcnic_teardown_intr(struct qlcnic_adapter *adapter)
460 if (adapter->flags & QLCNIC_MSIX_ENABLED)
461 pci_disable_msix(adapter->pdev);
462 if (adapter->flags & QLCNIC_MSI_ENABLED)
463 pci_disable_msi(adapter->pdev);
467 qlcnic_cleanup_pci_map(struct qlcnic_adapter *adapter)
469 if (adapter->ahw.pci_base0 != NULL)
470 iounmap(adapter->ahw.pci_base0);
474 qlcnic_init_pci_info(struct qlcnic_adapter *adapter)
476 struct qlcnic_pci_info *pci_info;
480 pci_info = kcalloc(QLCNIC_MAX_PCI_FUNC, sizeof(*pci_info), GFP_KERNEL);
484 adapter->npars = kzalloc(sizeof(struct qlcnic_npar_info) *
485 QLCNIC_MAX_PCI_FUNC, GFP_KERNEL);
486 if (!adapter->npars) {
491 adapter->eswitch = kzalloc(sizeof(struct qlcnic_eswitch) *
492 QLCNIC_NIU_MAX_XG_PORTS, GFP_KERNEL);
493 if (!adapter->eswitch) {
498 ret = qlcnic_get_pci_info(adapter, pci_info);
502 for (i = 0; i < QLCNIC_MAX_PCI_FUNC; i++) {
503 pfn = pci_info[i].id;
504 if (pfn > QLCNIC_MAX_PCI_FUNC)
505 return QL_STATUS_INVALID_PARAM;
506 adapter->npars[pfn].active = pci_info[i].active;
507 adapter->npars[pfn].type = pci_info[i].type;
508 adapter->npars[pfn].phy_port = pci_info[i].default_port;
509 adapter->npars[pfn].mac_learning = DEFAULT_MAC_LEARN;
510 adapter->npars[pfn].min_bw = pci_info[i].tx_min_bw;
511 adapter->npars[pfn].max_bw = pci_info[i].tx_max_bw;
514 for (i = 0; i < QLCNIC_NIU_MAX_XG_PORTS; i++)
515 adapter->eswitch[i].flags |= QLCNIC_SWITCH_ENABLE;
521 kfree(adapter->eswitch);
522 adapter->eswitch = NULL;
524 kfree(adapter->npars);
525 adapter->npars = NULL;
533 qlcnic_set_function_modes(struct qlcnic_adapter *adapter)
538 u32 data = QLCNIC_MGMT_FUNC;
539 void __iomem *priv_op = adapter->ahw.pci_base0 + QLCNIC_DRV_OP_MODE;
541 /* If other drivers are not in use set their privilege level */
542 ref_count = QLCRD32(adapter, QLCNIC_CRB_DEV_REF_COUNT);
543 ret = qlcnic_api_lock(adapter);
547 if (qlcnic_config_npars) {
548 for (i = 0; i < QLCNIC_MAX_PCI_FUNC; i++) {
550 if (adapter->npars[i].type != QLCNIC_TYPE_NIC ||
551 id == adapter->ahw.pci_func)
553 data |= (qlcnic_config_npars &
554 QLC_DEV_SET_DRV(0xf, id));
557 data = readl(priv_op);
558 data = (data & ~QLC_DEV_SET_DRV(0xf, adapter->ahw.pci_func)) |
559 (QLC_DEV_SET_DRV(QLCNIC_MGMT_FUNC,
560 adapter->ahw.pci_func));
562 writel(data, priv_op);
563 qlcnic_api_unlock(adapter);
569 qlcnic_get_driver_mode(struct qlcnic_adapter *adapter)
571 void __iomem *msix_base_addr;
572 void __iomem *priv_op;
573 struct qlcnic_info nic_info;
576 u32 op_mode, priv_level;
578 /* Determine FW API version */
579 adapter->fw_hal_version = readl(adapter->ahw.pci_base0 + QLCNIC_FW_API);
581 /* Find PCI function number */
582 pci_read_config_dword(adapter->pdev, QLCNIC_MSIX_TABLE_OFFSET, &func);
583 msix_base_addr = adapter->ahw.pci_base0 + QLCNIC_MSIX_BASE;
584 msix_base = readl(msix_base_addr);
585 func = (func - msix_base)/QLCNIC_MSIX_TBL_PGSIZE;
586 adapter->ahw.pci_func = func;
588 if (!qlcnic_get_nic_info(adapter, &nic_info, adapter->ahw.pci_func)) {
589 adapter->capabilities = nic_info.capabilities;
591 if (adapter->capabilities & BIT_6)
592 adapter->flags |= QLCNIC_ESWITCH_ENABLED;
594 adapter->flags &= ~QLCNIC_ESWITCH_ENABLED;
597 if (!(adapter->flags & QLCNIC_ESWITCH_ENABLED)) {
598 adapter->nic_ops = &qlcnic_ops;
599 return adapter->fw_hal_version;
602 /* Determine function privilege level */
603 priv_op = adapter->ahw.pci_base0 + QLCNIC_DRV_OP_MODE;
604 op_mode = readl(priv_op);
605 if (op_mode == QLC_DEV_DRV_DEFAULT)
606 priv_level = QLCNIC_MGMT_FUNC;
608 priv_level = QLC_DEV_GET_DRV(op_mode, adapter->ahw.pci_func);
610 switch (priv_level) {
611 case QLCNIC_MGMT_FUNC:
612 adapter->op_mode = QLCNIC_MGMT_FUNC;
613 adapter->nic_ops = &qlcnic_ops;
614 qlcnic_init_pci_info(adapter);
615 /* Set privilege level for other functions */
616 qlcnic_set_function_modes(adapter);
617 dev_info(&adapter->pdev->dev,
618 "HAL Version: %d, Management function\n",
619 adapter->fw_hal_version);
621 case QLCNIC_PRIV_FUNC:
622 adapter->op_mode = QLCNIC_PRIV_FUNC;
623 dev_info(&adapter->pdev->dev,
624 "HAL Version: %d, Privileged function\n",
625 adapter->fw_hal_version);
626 adapter->nic_ops = &qlcnic_ops;
628 case QLCNIC_NON_PRIV_FUNC:
629 adapter->op_mode = QLCNIC_NON_PRIV_FUNC;
630 dev_info(&adapter->pdev->dev,
631 "HAL Version: %d Non Privileged function\n",
632 adapter->fw_hal_version);
633 adapter->nic_ops = &qlcnic_vf_ops;
636 dev_info(&adapter->pdev->dev, "Unknown function mode: %d\n",
640 return adapter->fw_hal_version;
644 qlcnic_setup_pci_map(struct qlcnic_adapter *adapter)
646 void __iomem *mem_ptr0 = NULL;
647 resource_size_t mem_base;
648 unsigned long mem_len, pci_len0 = 0;
650 struct pci_dev *pdev = adapter->pdev;
652 /* remap phys address */
653 mem_base = pci_resource_start(pdev, 0); /* 0 is for BAR 0 */
654 mem_len = pci_resource_len(pdev, 0);
656 if (mem_len == QLCNIC_PCI_2MB_SIZE) {
658 mem_ptr0 = pci_ioremap_bar(pdev, 0);
659 if (mem_ptr0 == NULL) {
660 dev_err(&pdev->dev, "failed to map PCI bar 0\n");
668 dev_info(&pdev->dev, "%dMB memory map\n", (int)(mem_len>>20));
670 adapter->ahw.pci_base0 = mem_ptr0;
671 adapter->ahw.pci_len0 = pci_len0;
673 if (!qlcnic_get_driver_mode(adapter)) {
674 iounmap(adapter->ahw.pci_base0);
678 adapter->ahw.ocm_win_crb = qlcnic_get_ioaddr(adapter,
679 QLCNIC_PCIX_PS_REG(PCIX_OCM_WINDOW_REG(adapter->ahw.pci_func)));
684 static void get_brd_name(struct qlcnic_adapter *adapter, char *name)
686 struct pci_dev *pdev = adapter->pdev;
689 for (i = 0; i < NUM_SUPPORTED_BOARDS; ++i) {
690 if (qlcnic_boards[i].vendor == pdev->vendor &&
691 qlcnic_boards[i].device == pdev->device &&
692 qlcnic_boards[i].sub_vendor == pdev->subsystem_vendor &&
693 qlcnic_boards[i].sub_device == pdev->subsystem_device) {
694 sprintf(name, "%pM: %s" ,
696 qlcnic_boards[i].short_name);
704 sprintf(name, "%pM Gigabit Ethernet", adapter->mac_addr);
708 qlcnic_check_options(struct qlcnic_adapter *adapter)
710 u32 fw_major, fw_minor, fw_build;
711 char brd_name[QLCNIC_MAX_BOARD_NAME_LEN];
712 struct pci_dev *pdev = adapter->pdev;
713 struct qlcnic_info nic_info;
715 fw_major = QLCRD32(adapter, QLCNIC_FW_VERSION_MAJOR);
716 fw_minor = QLCRD32(adapter, QLCNIC_FW_VERSION_MINOR);
717 fw_build = QLCRD32(adapter, QLCNIC_FW_VERSION_SUB);
719 adapter->fw_version = QLCNIC_VERSION_CODE(fw_major, fw_minor, fw_build);
721 if (adapter->portnum == 0) {
722 get_brd_name(adapter, brd_name);
724 pr_info("%s: %s Board Chip rev 0x%x\n",
725 module_name(THIS_MODULE),
726 brd_name, adapter->ahw.revision_id);
729 dev_info(&pdev->dev, "firmware v%d.%d.%d\n",
730 fw_major, fw_minor, fw_build);
732 adapter->flags &= ~QLCNIC_LRO_ENABLED;
734 if (adapter->ahw.port_type == QLCNIC_XGBE) {
735 adapter->num_rxd = DEFAULT_RCV_DESCRIPTORS_10G;
736 adapter->num_jumbo_rxd = MAX_JUMBO_RCV_DESCRIPTORS_10G;
737 } else if (adapter->ahw.port_type == QLCNIC_GBE) {
738 adapter->num_rxd = DEFAULT_RCV_DESCRIPTORS_1G;
739 adapter->num_jumbo_rxd = MAX_JUMBO_RCV_DESCRIPTORS_1G;
742 if (!qlcnic_get_nic_info(adapter, &nic_info, adapter->ahw.pci_func)) {
743 adapter->physical_port = nic_info.phys_port;
744 adapter->switch_mode = nic_info.switch_mode;
745 adapter->max_tx_ques = nic_info.max_tx_ques;
746 adapter->max_rx_ques = nic_info.max_rx_ques;
747 adapter->capabilities = nic_info.capabilities;
748 adapter->max_mac_filters = nic_info.max_mac_filters;
749 adapter->max_mtu = nic_info.max_mtu;
752 adapter->msix_supported = !!use_msi_x;
753 adapter->rss_supported = !!use_msi_x;
755 adapter->num_txd = MAX_CMD_DESCRIPTORS;
757 adapter->max_rds_rings = 2;
761 qlcnic_reset_npar_config(struct qlcnic_adapter *adapter)
764 struct qlcnic_npar_info *npar;
765 struct qlcnic_info nic_info;
767 if (!(adapter->flags & QLCNIC_ESWITCH_ENABLED) ||
768 !adapter->need_fw_reset)
771 if (adapter->op_mode == QLCNIC_MGMT_FUNC) {
772 /* Set the NPAR config data after FW reset */
773 for (i = 0; i < QLCNIC_MAX_PCI_FUNC; i++) {
774 npar = &adapter->npars[i];
775 if (npar->type != QLCNIC_TYPE_NIC)
777 err = qlcnic_get_nic_info(adapter, &nic_info, i);
780 nic_info.min_tx_bw = npar->min_bw;
781 nic_info.max_tx_bw = npar->max_bw;
782 err = qlcnic_set_nic_info(adapter, &nic_info);
786 if (npar->enable_pm) {
787 err = qlcnic_config_port_mirroring(adapter,
788 npar->dest_npar, 1, i);
793 npar->mac_learning = DEFAULT_MAC_LEARN;
794 npar->host_vlan_tag = 0;
795 npar->promisc_mode = 0;
796 npar->discard_tagged = 0;
805 qlcnic_start_firmware(struct qlcnic_adapter *adapter)
807 int val, err, first_boot;
809 err = qlcnic_can_start_firmware(adapter);
815 first_boot = QLCRD32(adapter, QLCNIC_CAM_RAM(0x1fc));
816 if (first_boot == 0x55555555)
817 /* This is the first boot after power up */
818 QLCWR32(adapter, QLCNIC_CAM_RAM(0x1fc), QLCNIC_BDINFO_MAGIC);
821 qlcnic_request_firmware(adapter);
823 if (qlcnic_check_flash_fw_ver(adapter))
826 adapter->fw_type = QLCNIC_FLASH_ROMIMAGE;
829 err = qlcnic_need_fw_reset(adapter);
835 if (first_boot != 0x55555555) {
836 QLCWR32(adapter, CRB_CMDPEG_STATE, 0);
837 QLCWR32(adapter, CRB_RCVPEG_STATE, 0);
838 qlcnic_pinit_from_rom(adapter);
842 QLCWR32(adapter, QLCNIC_PEG_HALT_STATUS1, 0);
843 QLCWR32(adapter, QLCNIC_PEG_HALT_STATUS2, 0);
845 qlcnic_set_port_mode(adapter);
847 err = qlcnic_load_firmware(adapter);
851 qlcnic_release_firmware(adapter);
853 val = (_QLCNIC_LINUX_MAJOR << 16)
854 | ((_QLCNIC_LINUX_MINOR << 8))
855 | (_QLCNIC_LINUX_SUBVERSION);
856 QLCWR32(adapter, CRB_DRIVER_VERSION, val);
859 /* Handshake with the card before we register the devices. */
860 err = qlcnic_init_firmware(adapter);
864 QLCWR32(adapter, QLCNIC_CRB_DEV_STATE, QLCNIC_DEV_READY);
865 qlcnic_idc_debug_info(adapter, 1);
867 qlcnic_check_options(adapter);
868 if (qlcnic_reset_npar_config(adapter))
870 qlcnic_dev_set_npar_ready(adapter);
872 adapter->need_fw_reset = 0;
874 qlcnic_release_firmware(adapter);
878 QLCWR32(adapter, QLCNIC_CRB_DEV_STATE, QLCNIC_DEV_FAILED);
879 dev_err(&adapter->pdev->dev, "Device state set to failed\n");
880 qlcnic_release_firmware(adapter);
885 qlcnic_request_irq(struct qlcnic_adapter *adapter)
887 irq_handler_t handler;
888 struct qlcnic_host_sds_ring *sds_ring;
891 unsigned long flags = 0;
892 struct net_device *netdev = adapter->netdev;
893 struct qlcnic_recv_context *recv_ctx = &adapter->recv_ctx;
895 if (adapter->diag_test == QLCNIC_INTERRUPT_TEST) {
896 handler = qlcnic_tmp_intr;
897 if (!QLCNIC_IS_MSI_FAMILY(adapter))
898 flags |= IRQF_SHARED;
901 if (adapter->flags & QLCNIC_MSIX_ENABLED)
902 handler = qlcnic_msix_intr;
903 else if (adapter->flags & QLCNIC_MSI_ENABLED)
904 handler = qlcnic_msi_intr;
906 flags |= IRQF_SHARED;
907 handler = qlcnic_intr;
910 adapter->irq = netdev->irq;
912 for (ring = 0; ring < adapter->max_sds_rings; ring++) {
913 sds_ring = &recv_ctx->sds_rings[ring];
914 sprintf(sds_ring->name, "%s[%d]", netdev->name, ring);
915 err = request_irq(sds_ring->irq, handler,
916 flags, sds_ring->name, sds_ring);
925 qlcnic_free_irq(struct qlcnic_adapter *adapter)
928 struct qlcnic_host_sds_ring *sds_ring;
930 struct qlcnic_recv_context *recv_ctx = &adapter->recv_ctx;
932 for (ring = 0; ring < adapter->max_sds_rings; ring++) {
933 sds_ring = &recv_ctx->sds_rings[ring];
934 free_irq(sds_ring->irq, sds_ring);
939 qlcnic_init_coalesce_defaults(struct qlcnic_adapter *adapter)
941 adapter->coal.flags = QLCNIC_INTR_DEFAULT;
942 adapter->coal.normal.data.rx_time_us =
943 QLCNIC_DEFAULT_INTR_COALESCE_RX_TIME_US;
944 adapter->coal.normal.data.rx_packets =
945 QLCNIC_DEFAULT_INTR_COALESCE_RX_PACKETS;
946 adapter->coal.normal.data.tx_time_us =
947 QLCNIC_DEFAULT_INTR_COALESCE_TX_TIME_US;
948 adapter->coal.normal.data.tx_packets =
949 QLCNIC_DEFAULT_INTR_COALESCE_TX_PACKETS;
953 __qlcnic_up(struct qlcnic_adapter *adapter, struct net_device *netdev)
956 struct qlcnic_host_rds_ring *rds_ring;
958 if (adapter->is_up != QLCNIC_ADAPTER_UP_MAGIC)
961 if (test_bit(__QLCNIC_DEV_UP, &adapter->state))
964 if (qlcnic_fw_create_ctx(adapter))
967 for (ring = 0; ring < adapter->max_rds_rings; ring++) {
968 rds_ring = &adapter->recv_ctx.rds_rings[ring];
969 qlcnic_post_rx_buffers(adapter, ring, rds_ring);
972 qlcnic_set_multi(netdev);
973 qlcnic_fw_cmd_set_mtu(adapter, netdev->mtu);
975 adapter->ahw.linkup = 0;
977 if (adapter->max_sds_rings > 1)
978 qlcnic_config_rss(adapter, 1);
980 qlcnic_config_intr_coalesce(adapter);
982 if (netdev->features & NETIF_F_LRO)
983 qlcnic_config_hw_lro(adapter, QLCNIC_LRO_ENABLED);
985 qlcnic_napi_enable(adapter);
987 qlcnic_linkevent_request(adapter, 1);
989 adapter->reset_context = 0;
990 set_bit(__QLCNIC_DEV_UP, &adapter->state);
994 /* Usage: During resume and firmware recovery module.*/
997 qlcnic_up(struct qlcnic_adapter *adapter, struct net_device *netdev)
1002 if (netif_running(netdev))
1003 err = __qlcnic_up(adapter, netdev);
1010 __qlcnic_down(struct qlcnic_adapter *adapter, struct net_device *netdev)
1012 if (adapter->is_up != QLCNIC_ADAPTER_UP_MAGIC)
1015 if (!test_and_clear_bit(__QLCNIC_DEV_UP, &adapter->state))
1019 spin_lock(&adapter->tx_clean_lock);
1020 netif_carrier_off(netdev);
1021 netif_tx_disable(netdev);
1023 qlcnic_free_mac_list(adapter);
1025 qlcnic_nic_set_promisc(adapter, QLCNIC_NIU_NON_PROMISC_MODE);
1027 qlcnic_napi_disable(adapter);
1029 qlcnic_fw_destroy_ctx(adapter);
1031 qlcnic_reset_rx_buffers_list(adapter);
1032 qlcnic_release_tx_buffers(adapter);
1033 spin_unlock(&adapter->tx_clean_lock);
1036 /* Usage: During suspend and firmware recovery module */
1039 qlcnic_down(struct qlcnic_adapter *adapter, struct net_device *netdev)
1042 if (netif_running(netdev))
1043 __qlcnic_down(adapter, netdev);
1049 qlcnic_attach(struct qlcnic_adapter *adapter)
1051 struct net_device *netdev = adapter->netdev;
1052 struct pci_dev *pdev = adapter->pdev;
1055 if (adapter->is_up == QLCNIC_ADAPTER_UP_MAGIC)
1058 err = qlcnic_napi_add(adapter, netdev);
1062 err = qlcnic_alloc_sw_resources(adapter);
1064 dev_err(&pdev->dev, "Error in setting sw resources\n");
1065 goto err_out_napi_del;
1068 err = qlcnic_alloc_hw_resources(adapter);
1070 dev_err(&pdev->dev, "Error in setting hw resources\n");
1071 goto err_out_free_sw;
1074 err = qlcnic_request_irq(adapter);
1076 dev_err(&pdev->dev, "failed to setup interrupt\n");
1077 goto err_out_free_hw;
1080 qlcnic_init_coalesce_defaults(adapter);
1082 qlcnic_create_sysfs_entries(adapter);
1084 adapter->is_up = QLCNIC_ADAPTER_UP_MAGIC;
1088 qlcnic_free_hw_resources(adapter);
1090 qlcnic_free_sw_resources(adapter);
1092 qlcnic_napi_del(adapter);
1097 qlcnic_detach(struct qlcnic_adapter *adapter)
1099 if (adapter->is_up != QLCNIC_ADAPTER_UP_MAGIC)
1102 qlcnic_remove_sysfs_entries(adapter);
1104 qlcnic_free_hw_resources(adapter);
1105 qlcnic_release_rx_buffers(adapter);
1106 qlcnic_free_irq(adapter);
1107 qlcnic_napi_del(adapter);
1108 qlcnic_free_sw_resources(adapter);
1113 void qlcnic_diag_free_res(struct net_device *netdev, int max_sds_rings)
1115 struct qlcnic_adapter *adapter = netdev_priv(netdev);
1116 struct qlcnic_host_sds_ring *sds_ring;
1119 clear_bit(__QLCNIC_DEV_UP, &adapter->state);
1120 if (adapter->diag_test == QLCNIC_INTERRUPT_TEST) {
1121 for (ring = 0; ring < adapter->max_sds_rings; ring++) {
1122 sds_ring = &adapter->recv_ctx.sds_rings[ring];
1123 qlcnic_disable_int(sds_ring);
1127 qlcnic_fw_destroy_ctx(adapter);
1129 qlcnic_detach(adapter);
1131 adapter->diag_test = 0;
1132 adapter->max_sds_rings = max_sds_rings;
1134 if (qlcnic_attach(adapter))
1137 if (netif_running(netdev))
1138 __qlcnic_up(adapter, netdev);
1140 netif_device_attach(netdev);
1143 int qlcnic_diag_alloc_res(struct net_device *netdev, int test)
1145 struct qlcnic_adapter *adapter = netdev_priv(netdev);
1146 struct qlcnic_host_sds_ring *sds_ring;
1147 struct qlcnic_host_rds_ring *rds_ring;
1151 netif_device_detach(netdev);
1153 if (netif_running(netdev))
1154 __qlcnic_down(adapter, netdev);
1156 qlcnic_detach(adapter);
1158 adapter->max_sds_rings = 1;
1159 adapter->diag_test = test;
1161 ret = qlcnic_attach(adapter);
1163 netif_device_attach(netdev);
1167 ret = qlcnic_fw_create_ctx(adapter);
1169 qlcnic_detach(adapter);
1170 netif_device_attach(netdev);
1174 for (ring = 0; ring < adapter->max_rds_rings; ring++) {
1175 rds_ring = &adapter->recv_ctx.rds_rings[ring];
1176 qlcnic_post_rx_buffers(adapter, ring, rds_ring);
1179 if (adapter->diag_test == QLCNIC_INTERRUPT_TEST) {
1180 for (ring = 0; ring < adapter->max_sds_rings; ring++) {
1181 sds_ring = &adapter->recv_ctx.sds_rings[ring];
1182 qlcnic_enable_int(sds_ring);
1185 set_bit(__QLCNIC_DEV_UP, &adapter->state);
1190 /* Reset context in hardware only */
1192 qlcnic_reset_hw_context(struct qlcnic_adapter *adapter)
1194 struct net_device *netdev = adapter->netdev;
1196 if (test_and_set_bit(__QLCNIC_RESETTING, &adapter->state))
1199 netif_device_detach(netdev);
1201 qlcnic_down(adapter, netdev);
1203 qlcnic_up(adapter, netdev);
1205 netif_device_attach(netdev);
1207 clear_bit(__QLCNIC_RESETTING, &adapter->state);
1212 qlcnic_reset_context(struct qlcnic_adapter *adapter)
1215 struct net_device *netdev = adapter->netdev;
1217 if (test_and_set_bit(__QLCNIC_RESETTING, &adapter->state))
1220 if (adapter->is_up == QLCNIC_ADAPTER_UP_MAGIC) {
1222 netif_device_detach(netdev);
1224 if (netif_running(netdev))
1225 __qlcnic_down(adapter, netdev);
1227 qlcnic_detach(adapter);
1229 if (netif_running(netdev)) {
1230 err = qlcnic_attach(adapter);
1232 __qlcnic_up(adapter, netdev);
1235 netif_device_attach(netdev);
1238 clear_bit(__QLCNIC_RESETTING, &adapter->state);
1243 qlcnic_setup_netdev(struct qlcnic_adapter *adapter,
1244 struct net_device *netdev, u8 pci_using_dac)
1247 struct pci_dev *pdev = adapter->pdev;
1249 adapter->rx_csum = 1;
1250 adapter->mc_enabled = 0;
1251 adapter->max_mc_count = 38;
1253 netdev->netdev_ops = &qlcnic_netdev_ops;
1254 netdev->watchdog_timeo = 5*HZ;
1256 qlcnic_change_mtu(netdev, netdev->mtu);
1258 SET_ETHTOOL_OPS(netdev, &qlcnic_ethtool_ops);
1260 netdev->features |= (NETIF_F_SG | NETIF_F_IP_CSUM |
1261 NETIF_F_IPV6_CSUM | NETIF_F_GRO);
1262 netdev->vlan_features |= (NETIF_F_SG | NETIF_F_IP_CSUM |
1265 if (adapter->capabilities & QLCNIC_FW_CAPABILITY_TSO) {
1266 netdev->features |= (NETIF_F_TSO | NETIF_F_TSO6);
1267 netdev->vlan_features |= (NETIF_F_TSO | NETIF_F_TSO6);
1270 if (pci_using_dac) {
1271 netdev->features |= NETIF_F_HIGHDMA;
1272 netdev->vlan_features |= NETIF_F_HIGHDMA;
1275 if (adapter->capabilities & QLCNIC_FW_CAPABILITY_FVLANTX)
1276 netdev->features |= (NETIF_F_HW_VLAN_TX);
1278 if (adapter->capabilities & QLCNIC_FW_CAPABILITY_HW_LRO)
1279 netdev->features |= NETIF_F_LRO;
1281 netdev->irq = adapter->msix_entries[0].vector;
1283 if (qlcnic_read_mac_addr(adapter))
1284 dev_warn(&pdev->dev, "failed to read mac addr\n");
1286 netif_carrier_off(netdev);
1287 netif_stop_queue(netdev);
1289 err = register_netdev(netdev);
1291 dev_err(&pdev->dev, "failed to register net device\n");
1298 static int qlcnic_set_dma_mask(struct pci_dev *pdev, u8 *pci_using_dac)
1300 if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) &&
1301 !pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)))
1303 else if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(32)) &&
1304 !pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32)))
1307 dev_err(&pdev->dev, "Unable to set DMA mask, aborting\n");
1314 static int __devinit
1315 qlcnic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
1317 struct net_device *netdev = NULL;
1318 struct qlcnic_adapter *adapter = NULL;
1320 uint8_t revision_id;
1321 uint8_t pci_using_dac;
1323 err = pci_enable_device(pdev);
1327 if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
1329 goto err_out_disable_pdev;
1332 err = qlcnic_set_dma_mask(pdev, &pci_using_dac);
1334 goto err_out_disable_pdev;
1336 err = pci_request_regions(pdev, qlcnic_driver_name);
1338 goto err_out_disable_pdev;
1340 pci_set_master(pdev);
1341 pci_enable_pcie_error_reporting(pdev);
1343 netdev = alloc_etherdev(sizeof(struct qlcnic_adapter));
1345 dev_err(&pdev->dev, "failed to allocate net_device\n");
1347 goto err_out_free_res;
1350 SET_NETDEV_DEV(netdev, &pdev->dev);
1352 adapter = netdev_priv(netdev);
1353 adapter->netdev = netdev;
1354 adapter->pdev = pdev;
1355 adapter->dev_rst_time = jiffies;
1357 revision_id = pdev->revision;
1358 adapter->ahw.revision_id = revision_id;
1360 rwlock_init(&adapter->ahw.crb_lock);
1361 mutex_init(&adapter->ahw.mem_lock);
1363 spin_lock_init(&adapter->tx_clean_lock);
1364 INIT_LIST_HEAD(&adapter->mac_list);
1366 err = qlcnic_setup_pci_map(adapter);
1368 goto err_out_free_netdev;
1370 /* This will be reset for mezz cards */
1371 adapter->portnum = adapter->ahw.pci_func;
1373 err = qlcnic_get_board_info(adapter);
1375 dev_err(&pdev->dev, "Error getting board config info.\n");
1376 goto err_out_iounmap;
1379 if (qlcnic_read_mac_addr(adapter))
1380 dev_warn(&pdev->dev, "failed to read mac addr\n");
1382 if (qlcnic_setup_idc_param(adapter))
1383 goto err_out_iounmap;
1385 err = adapter->nic_ops->start_firmware(adapter);
1387 dev_err(&pdev->dev, "Loading fw failed.Please Reboot\n");
1388 goto err_out_decr_ref;
1391 qlcnic_clear_stats(adapter);
1393 qlcnic_setup_intr(adapter);
1395 err = qlcnic_setup_netdev(adapter, netdev, pci_using_dac);
1397 goto err_out_disable_msi;
1399 pci_set_drvdata(pdev, adapter);
1401 qlcnic_schedule_work(adapter, qlcnic_fw_poll_work, FW_POLL_DELAY);
1403 switch (adapter->ahw.port_type) {
1405 dev_info(&adapter->pdev->dev, "%s: GbE port initialized\n",
1406 adapter->netdev->name);
1409 dev_info(&adapter->pdev->dev, "%s: XGbE port initialized\n",
1410 adapter->netdev->name);
1414 qlcnic_create_diag_entries(adapter);
1418 err_out_disable_msi:
1419 qlcnic_teardown_intr(adapter);
1422 qlcnic_clr_all_drv_state(adapter);
1425 qlcnic_cleanup_pci_map(adapter);
1427 err_out_free_netdev:
1428 free_netdev(netdev);
1431 pci_release_regions(pdev);
1433 err_out_disable_pdev:
1434 pci_set_drvdata(pdev, NULL);
1435 pci_disable_device(pdev);
1439 static void __devexit qlcnic_remove(struct pci_dev *pdev)
1441 struct qlcnic_adapter *adapter;
1442 struct net_device *netdev;
1444 adapter = pci_get_drvdata(pdev);
1445 if (adapter == NULL)
1448 netdev = adapter->netdev;
1450 qlcnic_cancel_fw_work(adapter);
1452 unregister_netdev(netdev);
1454 qlcnic_detach(adapter);
1456 if (adapter->npars != NULL)
1457 kfree(adapter->npars);
1458 if (adapter->eswitch != NULL)
1459 kfree(adapter->eswitch);
1461 qlcnic_clr_all_drv_state(adapter);
1463 clear_bit(__QLCNIC_RESETTING, &adapter->state);
1465 qlcnic_teardown_intr(adapter);
1467 qlcnic_remove_diag_entries(adapter);
1469 qlcnic_cleanup_pci_map(adapter);
1471 qlcnic_release_firmware(adapter);
1473 pci_disable_pcie_error_reporting(pdev);
1474 pci_release_regions(pdev);
1475 pci_disable_device(pdev);
1476 pci_set_drvdata(pdev, NULL);
1478 free_netdev(netdev);
1480 static int __qlcnic_shutdown(struct pci_dev *pdev)
1482 struct qlcnic_adapter *adapter = pci_get_drvdata(pdev);
1483 struct net_device *netdev = adapter->netdev;
1486 netif_device_detach(netdev);
1488 qlcnic_cancel_fw_work(adapter);
1490 if (netif_running(netdev))
1491 qlcnic_down(adapter, netdev);
1493 qlcnic_clr_all_drv_state(adapter);
1495 clear_bit(__QLCNIC_RESETTING, &adapter->state);
1497 retval = pci_save_state(pdev);
1501 if (qlcnic_wol_supported(adapter)) {
1502 pci_enable_wake(pdev, PCI_D3cold, 1);
1503 pci_enable_wake(pdev, PCI_D3hot, 1);
1509 static void qlcnic_shutdown(struct pci_dev *pdev)
1511 if (__qlcnic_shutdown(pdev))
1514 pci_disable_device(pdev);
1519 qlcnic_suspend(struct pci_dev *pdev, pm_message_t state)
1523 retval = __qlcnic_shutdown(pdev);
1527 pci_set_power_state(pdev, pci_choose_state(pdev, state));
1532 qlcnic_resume(struct pci_dev *pdev)
1534 struct qlcnic_adapter *adapter = pci_get_drvdata(pdev);
1535 struct net_device *netdev = adapter->netdev;
1538 err = pci_enable_device(pdev);
1542 pci_set_power_state(pdev, PCI_D0);
1543 pci_set_master(pdev);
1544 pci_restore_state(pdev);
1546 err = adapter->nic_ops->start_firmware(adapter);
1548 dev_err(&pdev->dev, "failed to start firmware\n");
1552 if (netif_running(netdev)) {
1553 err = qlcnic_up(adapter, netdev);
1557 qlcnic_config_indev_addr(netdev, NETDEV_UP);
1560 netif_device_attach(netdev);
1561 qlcnic_schedule_work(adapter, qlcnic_fw_poll_work, FW_POLL_DELAY);
1566 static int qlcnic_open(struct net_device *netdev)
1568 struct qlcnic_adapter *adapter = netdev_priv(netdev);
1571 err = qlcnic_attach(adapter);
1575 err = __qlcnic_up(adapter, netdev);
1579 netif_start_queue(netdev);
1584 qlcnic_detach(adapter);
1589 * qlcnic_close - Disables a network interface entry point
1591 static int qlcnic_close(struct net_device *netdev)
1593 struct qlcnic_adapter *adapter = netdev_priv(netdev);
1595 __qlcnic_down(adapter, netdev);
1600 qlcnic_tso_check(struct net_device *netdev,
1601 struct qlcnic_host_tx_ring *tx_ring,
1602 struct cmd_desc_type0 *first_desc,
1603 struct sk_buff *skb)
1605 u8 opcode = TX_ETHER_PKT;
1606 __be16 protocol = skb->protocol;
1607 u16 flags = 0, vid = 0;
1608 int copied, offset, copy_len, hdr_len = 0, tso = 0, vlan_oob = 0;
1609 struct cmd_desc_type0 *hwdesc;
1610 struct vlan_ethhdr *vh;
1611 struct qlcnic_adapter *adapter = netdev_priv(netdev);
1612 u32 producer = tx_ring->producer;
1614 if (protocol == cpu_to_be16(ETH_P_8021Q)) {
1616 vh = (struct vlan_ethhdr *)skb->data;
1617 protocol = vh->h_vlan_encapsulated_proto;
1618 flags = FLAGS_VLAN_TAGGED;
1620 } else if (vlan_tx_tag_present(skb)) {
1622 flags = FLAGS_VLAN_OOB;
1623 vid = vlan_tx_tag_get(skb);
1624 qlcnic_set_tx_vlan_tci(first_desc, vid);
1628 if (*(skb->data) & BIT_0) {
1630 memcpy(&first_desc->eth_addr, skb->data, ETH_ALEN);
1633 if ((netdev->features & (NETIF_F_TSO | NETIF_F_TSO6)) &&
1634 skb_shinfo(skb)->gso_size > 0) {
1636 hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
1638 first_desc->mss = cpu_to_le16(skb_shinfo(skb)->gso_size);
1639 first_desc->total_hdr_length = hdr_len;
1641 first_desc->total_hdr_length += VLAN_HLEN;
1642 first_desc->tcp_hdr_offset = VLAN_HLEN;
1643 first_desc->ip_hdr_offset = VLAN_HLEN;
1644 /* Only in case of TSO on vlan device */
1645 flags |= FLAGS_VLAN_TAGGED;
1648 opcode = (protocol == cpu_to_be16(ETH_P_IPV6)) ?
1649 TX_TCP_LSO6 : TX_TCP_LSO;
1652 } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
1655 if (protocol == cpu_to_be16(ETH_P_IP)) {
1656 l4proto = ip_hdr(skb)->protocol;
1658 if (l4proto == IPPROTO_TCP)
1659 opcode = TX_TCP_PKT;
1660 else if (l4proto == IPPROTO_UDP)
1661 opcode = TX_UDP_PKT;
1662 } else if (protocol == cpu_to_be16(ETH_P_IPV6)) {
1663 l4proto = ipv6_hdr(skb)->nexthdr;
1665 if (l4proto == IPPROTO_TCP)
1666 opcode = TX_TCPV6_PKT;
1667 else if (l4proto == IPPROTO_UDP)
1668 opcode = TX_UDPV6_PKT;
1672 first_desc->tcp_hdr_offset += skb_transport_offset(skb);
1673 first_desc->ip_hdr_offset += skb_network_offset(skb);
1674 qlcnic_set_tx_flags_opcode(first_desc, flags, opcode);
1679 /* For LSO, we need to copy the MAC/IP/TCP headers into
1680 * the descriptor ring
1686 /* Create a TSO vlan header template for firmware */
1688 hwdesc = &tx_ring->desc_head[producer];
1689 tx_ring->cmd_buf_arr[producer].skb = NULL;
1691 copy_len = min((int)sizeof(struct cmd_desc_type0) - offset,
1692 hdr_len + VLAN_HLEN);
1694 vh = (struct vlan_ethhdr *)((char *)hwdesc + 2);
1695 skb_copy_from_linear_data(skb, vh, 12);
1696 vh->h_vlan_proto = htons(ETH_P_8021Q);
1697 vh->h_vlan_TCI = htons(vid);
1698 skb_copy_from_linear_data_offset(skb, 12,
1699 (char *)vh + 16, copy_len - 16);
1701 copied = copy_len - VLAN_HLEN;
1704 producer = get_next_index(producer, tx_ring->num_desc);
1707 while (copied < hdr_len) {
1709 copy_len = min((int)sizeof(struct cmd_desc_type0) - offset,
1710 (hdr_len - copied));
1712 hwdesc = &tx_ring->desc_head[producer];
1713 tx_ring->cmd_buf_arr[producer].skb = NULL;
1715 skb_copy_from_linear_data_offset(skb, copied,
1716 (char *)hwdesc + offset, copy_len);
1721 producer = get_next_index(producer, tx_ring->num_desc);
1724 tx_ring->producer = producer;
1726 adapter->stats.lso_frames++;
1730 qlcnic_map_tx_skb(struct pci_dev *pdev,
1731 struct sk_buff *skb, struct qlcnic_cmd_buffer *pbuf)
1733 struct qlcnic_skb_frag *nf;
1734 struct skb_frag_struct *frag;
1738 nr_frags = skb_shinfo(skb)->nr_frags;
1739 nf = &pbuf->frag_array[0];
1741 map = pci_map_single(pdev, skb->data,
1742 skb_headlen(skb), PCI_DMA_TODEVICE);
1743 if (pci_dma_mapping_error(pdev, map))
1747 nf->length = skb_headlen(skb);
1749 for (i = 0; i < nr_frags; i++) {
1750 frag = &skb_shinfo(skb)->frags[i];
1751 nf = &pbuf->frag_array[i+1];
1753 map = pci_map_page(pdev, frag->page, frag->page_offset,
1754 frag->size, PCI_DMA_TODEVICE);
1755 if (pci_dma_mapping_error(pdev, map))
1759 nf->length = frag->size;
1766 nf = &pbuf->frag_array[i+1];
1767 pci_unmap_page(pdev, nf->dma, nf->length, PCI_DMA_TODEVICE);
1770 nf = &pbuf->frag_array[0];
1771 pci_unmap_single(pdev, nf->dma, skb_headlen(skb), PCI_DMA_TODEVICE);
1778 qlcnic_clear_cmddesc(u64 *desc)
1785 qlcnic_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
1787 struct qlcnic_adapter *adapter = netdev_priv(netdev);
1788 struct qlcnic_host_tx_ring *tx_ring = adapter->tx_ring;
1789 struct qlcnic_cmd_buffer *pbuf;
1790 struct qlcnic_skb_frag *buffrag;
1791 struct cmd_desc_type0 *hwdesc, *first_desc;
1792 struct pci_dev *pdev;
1796 int frag_count, no_of_desc;
1797 u32 num_txd = tx_ring->num_desc;
1799 if (!test_bit(__QLCNIC_DEV_UP, &adapter->state)) {
1800 netif_stop_queue(netdev);
1801 return NETDEV_TX_BUSY;
1804 frag_count = skb_shinfo(skb)->nr_frags + 1;
1806 /* 4 fragments per cmd des */
1807 no_of_desc = (frag_count + 3) >> 2;
1809 if (unlikely(qlcnic_tx_avail(tx_ring) <= TX_STOP_THRESH)) {
1810 netif_stop_queue(netdev);
1812 if (qlcnic_tx_avail(tx_ring) > TX_STOP_THRESH)
1813 netif_start_queue(netdev);
1815 adapter->stats.xmit_off++;
1816 return NETDEV_TX_BUSY;
1820 producer = tx_ring->producer;
1821 pbuf = &tx_ring->cmd_buf_arr[producer];
1823 pdev = adapter->pdev;
1825 if (qlcnic_map_tx_skb(pdev, skb, pbuf)) {
1826 adapter->stats.tx_dma_map_error++;
1831 pbuf->frag_count = frag_count;
1833 first_desc = hwdesc = &tx_ring->desc_head[producer];
1834 qlcnic_clear_cmddesc((u64 *)hwdesc);
1836 qlcnic_set_tx_frags_len(first_desc, frag_count, skb->len);
1837 qlcnic_set_tx_port(first_desc, adapter->portnum);
1839 for (i = 0; i < frag_count; i++) {
1843 if ((k == 0) && (i > 0)) {
1844 /* move to next desc.*/
1845 producer = get_next_index(producer, num_txd);
1846 hwdesc = &tx_ring->desc_head[producer];
1847 qlcnic_clear_cmddesc((u64 *)hwdesc);
1848 tx_ring->cmd_buf_arr[producer].skb = NULL;
1851 buffrag = &pbuf->frag_array[i];
1853 hwdesc->buffer_length[k] = cpu_to_le16(buffrag->length);
1856 hwdesc->addr_buffer1 = cpu_to_le64(buffrag->dma);
1859 hwdesc->addr_buffer2 = cpu_to_le64(buffrag->dma);
1862 hwdesc->addr_buffer3 = cpu_to_le64(buffrag->dma);
1865 hwdesc->addr_buffer4 = cpu_to_le64(buffrag->dma);
1870 tx_ring->producer = get_next_index(producer, num_txd);
1872 qlcnic_tso_check(netdev, tx_ring, first_desc, skb);
1874 qlcnic_update_cmd_producer(adapter, tx_ring);
1876 adapter->stats.txbytes += skb->len;
1877 adapter->stats.xmitcalled++;
1879 return NETDEV_TX_OK;
1882 adapter->stats.txdropped++;
1883 dev_kfree_skb_any(skb);
1884 return NETDEV_TX_OK;
1887 static int qlcnic_check_temp(struct qlcnic_adapter *adapter)
1889 struct net_device *netdev = adapter->netdev;
1890 u32 temp, temp_state, temp_val;
1893 temp = QLCRD32(adapter, CRB_TEMP_STATE);
1895 temp_state = qlcnic_get_temp_state(temp);
1896 temp_val = qlcnic_get_temp_val(temp);
1898 if (temp_state == QLCNIC_TEMP_PANIC) {
1899 dev_err(&netdev->dev,
1900 "Device temperature %d degrees C exceeds"
1901 " maximum allowed. Hardware has been shut down.\n",
1904 } else if (temp_state == QLCNIC_TEMP_WARN) {
1905 if (adapter->temp == QLCNIC_TEMP_NORMAL) {
1906 dev_err(&netdev->dev,
1907 "Device temperature %d degrees C "
1908 "exceeds operating range."
1909 " Immediate action needed.\n",
1913 if (adapter->temp == QLCNIC_TEMP_WARN) {
1914 dev_info(&netdev->dev,
1915 "Device temperature is now %d degrees C"
1916 " in normal range.\n", temp_val);
1919 adapter->temp = temp_state;
1923 void qlcnic_advert_link_change(struct qlcnic_adapter *adapter, int linkup)
1925 struct net_device *netdev = adapter->netdev;
1927 if (adapter->ahw.linkup && !linkup) {
1928 netdev_info(netdev, "NIC Link is down\n");
1929 adapter->ahw.linkup = 0;
1930 if (netif_running(netdev)) {
1931 netif_carrier_off(netdev);
1932 netif_stop_queue(netdev);
1934 } else if (!adapter->ahw.linkup && linkup) {
1935 netdev_info(netdev, "NIC Link is up\n");
1936 adapter->ahw.linkup = 1;
1937 if (netif_running(netdev)) {
1938 netif_carrier_on(netdev);
1939 netif_wake_queue(netdev);
1944 static void qlcnic_tx_timeout(struct net_device *netdev)
1946 struct qlcnic_adapter *adapter = netdev_priv(netdev);
1948 if (test_bit(__QLCNIC_RESETTING, &adapter->state))
1951 dev_err(&netdev->dev, "transmit timeout, resetting.\n");
1953 if (++adapter->tx_timeo_cnt >= QLCNIC_MAX_TX_TIMEOUTS)
1954 adapter->need_fw_reset = 1;
1956 adapter->reset_context = 1;
1959 static struct net_device_stats *qlcnic_get_stats(struct net_device *netdev)
1961 struct qlcnic_adapter *adapter = netdev_priv(netdev);
1962 struct net_device_stats *stats = &netdev->stats;
1964 memset(stats, 0, sizeof(*stats));
1966 stats->rx_packets = adapter->stats.rx_pkts + adapter->stats.lro_pkts;
1967 stats->tx_packets = adapter->stats.xmitfinished;
1968 stats->rx_bytes = adapter->stats.rxbytes + adapter->stats.lrobytes;
1969 stats->tx_bytes = adapter->stats.txbytes;
1970 stats->rx_dropped = adapter->stats.rxdropped;
1971 stats->tx_dropped = adapter->stats.txdropped;
1976 static irqreturn_t qlcnic_clear_legacy_intr(struct qlcnic_adapter *adapter)
1980 status = readl(adapter->isr_int_vec);
1982 if (!(status & adapter->int_vec_bit))
1985 /* check interrupt state machine, to be sure */
1986 status = readl(adapter->crb_int_state_reg);
1987 if (!ISR_LEGACY_INT_TRIGGERED(status))
1990 writel(0xffffffff, adapter->tgt_status_reg);
1991 /* read twice to ensure write is flushed */
1992 readl(adapter->isr_int_vec);
1993 readl(adapter->isr_int_vec);
1998 static irqreturn_t qlcnic_tmp_intr(int irq, void *data)
2000 struct qlcnic_host_sds_ring *sds_ring = data;
2001 struct qlcnic_adapter *adapter = sds_ring->adapter;
2003 if (adapter->flags & QLCNIC_MSIX_ENABLED)
2005 else if (adapter->flags & QLCNIC_MSI_ENABLED) {
2006 writel(0xffffffff, adapter->tgt_status_reg);
2010 if (qlcnic_clear_legacy_intr(adapter) == IRQ_NONE)
2014 adapter->diag_cnt++;
2015 qlcnic_enable_int(sds_ring);
2019 static irqreturn_t qlcnic_intr(int irq, void *data)
2021 struct qlcnic_host_sds_ring *sds_ring = data;
2022 struct qlcnic_adapter *adapter = sds_ring->adapter;
2024 if (qlcnic_clear_legacy_intr(adapter) == IRQ_NONE)
2027 napi_schedule(&sds_ring->napi);
2032 static irqreturn_t qlcnic_msi_intr(int irq, void *data)
2034 struct qlcnic_host_sds_ring *sds_ring = data;
2035 struct qlcnic_adapter *adapter = sds_ring->adapter;
2037 /* clear interrupt */
2038 writel(0xffffffff, adapter->tgt_status_reg);
2040 napi_schedule(&sds_ring->napi);
2044 static irqreturn_t qlcnic_msix_intr(int irq, void *data)
2046 struct qlcnic_host_sds_ring *sds_ring = data;
2048 napi_schedule(&sds_ring->napi);
2052 static int qlcnic_process_cmd_ring(struct qlcnic_adapter *adapter)
2054 u32 sw_consumer, hw_consumer;
2056 struct qlcnic_cmd_buffer *buffer;
2057 struct pci_dev *pdev = adapter->pdev;
2058 struct net_device *netdev = adapter->netdev;
2059 struct qlcnic_skb_frag *frag;
2061 struct qlcnic_host_tx_ring *tx_ring = adapter->tx_ring;
2063 if (!spin_trylock(&adapter->tx_clean_lock))
2066 sw_consumer = tx_ring->sw_consumer;
2067 hw_consumer = le32_to_cpu(*(tx_ring->hw_consumer));
2069 while (sw_consumer != hw_consumer) {
2070 buffer = &tx_ring->cmd_buf_arr[sw_consumer];
2072 frag = &buffer->frag_array[0];
2073 pci_unmap_single(pdev, frag->dma, frag->length,
2076 for (i = 1; i < buffer->frag_count; i++) {
2078 pci_unmap_page(pdev, frag->dma, frag->length,
2083 adapter->stats.xmitfinished++;
2084 dev_kfree_skb_any(buffer->skb);
2088 sw_consumer = get_next_index(sw_consumer, tx_ring->num_desc);
2089 if (++count >= MAX_STATUS_HANDLE)
2093 if (count && netif_running(netdev)) {
2094 tx_ring->sw_consumer = sw_consumer;
2098 if (netif_queue_stopped(netdev) && netif_carrier_ok(netdev)) {
2099 if (qlcnic_tx_avail(tx_ring) > TX_STOP_THRESH) {
2100 netif_wake_queue(netdev);
2101 adapter->stats.xmit_on++;
2104 adapter->tx_timeo_cnt = 0;
2107 * If everything is freed up to consumer then check if the ring is full
2108 * If the ring is full then check if more needs to be freed and
2109 * schedule the call back again.
2111 * This happens when there are 2 CPUs. One could be freeing and the
2112 * other filling it. If the ring is full when we get out of here and
2113 * the card has already interrupted the host then the host can miss the
2116 * There is still a possible race condition and the host could miss an
2117 * interrupt. The card has to take care of this.
2119 hw_consumer = le32_to_cpu(*(tx_ring->hw_consumer));
2120 done = (sw_consumer == hw_consumer);
2121 spin_unlock(&adapter->tx_clean_lock);
2126 static int qlcnic_poll(struct napi_struct *napi, int budget)
2128 struct qlcnic_host_sds_ring *sds_ring =
2129 container_of(napi, struct qlcnic_host_sds_ring, napi);
2131 struct qlcnic_adapter *adapter = sds_ring->adapter;
2136 tx_complete = qlcnic_process_cmd_ring(adapter);
2138 work_done = qlcnic_process_rcv_ring(sds_ring, budget);
2140 if ((work_done < budget) && tx_complete) {
2141 napi_complete(&sds_ring->napi);
2142 if (test_bit(__QLCNIC_DEV_UP, &adapter->state))
2143 qlcnic_enable_int(sds_ring);
2149 static int qlcnic_rx_poll(struct napi_struct *napi, int budget)
2151 struct qlcnic_host_sds_ring *sds_ring =
2152 container_of(napi, struct qlcnic_host_sds_ring, napi);
2154 struct qlcnic_adapter *adapter = sds_ring->adapter;
2157 work_done = qlcnic_process_rcv_ring(sds_ring, budget);
2159 if (work_done < budget) {
2160 napi_complete(&sds_ring->napi);
2161 if (test_bit(__QLCNIC_DEV_UP, &adapter->state))
2162 qlcnic_enable_int(sds_ring);
2168 #ifdef CONFIG_NET_POLL_CONTROLLER
2169 static void qlcnic_poll_controller(struct net_device *netdev)
2171 struct qlcnic_adapter *adapter = netdev_priv(netdev);
2172 disable_irq(adapter->irq);
2173 qlcnic_intr(adapter->irq, adapter);
2174 enable_irq(adapter->irq);
2179 qlcnic_idc_debug_info(struct qlcnic_adapter *adapter, u8 encoding)
2183 val = adapter->portnum & 0xf;
2184 val |= encoding << 7;
2185 val |= (jiffies - adapter->dev_rst_time) << 8;
2187 QLCWR32(adapter, QLCNIC_CRB_DRV_SCRATCH, val);
2188 adapter->dev_rst_time = jiffies;
2192 qlcnic_set_drv_state(struct qlcnic_adapter *adapter, u8 state)
2196 WARN_ON(state != QLCNIC_DEV_NEED_RESET &&
2197 state != QLCNIC_DEV_NEED_QUISCENT);
2199 if (qlcnic_api_lock(adapter))
2202 val = QLCRD32(adapter, QLCNIC_CRB_DRV_STATE);
2204 if (state == QLCNIC_DEV_NEED_RESET)
2205 QLC_DEV_SET_RST_RDY(val, adapter->portnum);
2206 else if (state == QLCNIC_DEV_NEED_QUISCENT)
2207 QLC_DEV_SET_QSCNT_RDY(val, adapter->portnum);
2209 QLCWR32(adapter, QLCNIC_CRB_DRV_STATE, val);
2211 qlcnic_api_unlock(adapter);
2217 qlcnic_clr_drv_state(struct qlcnic_adapter *adapter)
2221 if (qlcnic_api_lock(adapter))
2224 val = QLCRD32(adapter, QLCNIC_CRB_DRV_STATE);
2225 QLC_DEV_CLR_RST_QSCNT(val, adapter->portnum);
2226 QLCWR32(adapter, QLCNIC_CRB_DRV_STATE, val);
2228 qlcnic_api_unlock(adapter);
2234 qlcnic_clr_all_drv_state(struct qlcnic_adapter *adapter)
2238 if (qlcnic_api_lock(adapter))
2241 val = QLCRD32(adapter, QLCNIC_CRB_DEV_REF_COUNT);
2242 QLC_DEV_CLR_REF_CNT(val, adapter->portnum);
2243 QLCWR32(adapter, QLCNIC_CRB_DEV_REF_COUNT, val);
2245 if (!(val & 0x11111111))
2246 QLCWR32(adapter, QLCNIC_CRB_DEV_STATE, QLCNIC_DEV_COLD);
2248 val = QLCRD32(adapter, QLCNIC_CRB_DRV_STATE);
2249 QLC_DEV_CLR_RST_QSCNT(val, adapter->portnum);
2250 QLCWR32(adapter, QLCNIC_CRB_DRV_STATE, val);
2252 qlcnic_api_unlock(adapter);
2254 adapter->fw_fail_cnt = 0;
2255 clear_bit(__QLCNIC_START_FW, &adapter->state);
2256 clear_bit(__QLCNIC_RESETTING, &adapter->state);
2259 /* Grab api lock, before checking state */
2261 qlcnic_check_drv_state(struct qlcnic_adapter *adapter)
2265 state = QLCRD32(adapter, QLCNIC_CRB_DRV_STATE);
2266 act = QLCRD32(adapter, QLCNIC_CRB_DEV_REF_COUNT);
2268 if (((state & 0x11111111) == (act & 0x11111111)) ||
2269 ((act & 0x11111111) == ((state >> 1) & 0x11111111)))
2275 static int qlcnic_check_idc_ver(struct qlcnic_adapter *adapter)
2277 u32 val = QLCRD32(adapter, QLCNIC_CRB_DRV_IDC_VER);
2279 if (val != QLCNIC_DRV_IDC_VER) {
2280 dev_warn(&adapter->pdev->dev, "IDC Version mismatch, driver's"
2281 " idc ver = %x; reqd = %x\n", QLCNIC_DRV_IDC_VER, val);
2288 qlcnic_can_start_firmware(struct qlcnic_adapter *adapter)
2290 u32 val, prev_state;
2291 u8 dev_init_timeo = adapter->dev_init_timeo;
2292 u8 portnum = adapter->portnum;
2295 if (test_and_clear_bit(__QLCNIC_START_FW, &adapter->state))
2298 if (qlcnic_api_lock(adapter))
2301 val = QLCRD32(adapter, QLCNIC_CRB_DEV_REF_COUNT);
2302 if (!(val & (1 << (portnum * 4)))) {
2303 QLC_DEV_SET_REF_CNT(val, portnum);
2304 QLCWR32(adapter, QLCNIC_CRB_DEV_REF_COUNT, val);
2307 prev_state = QLCRD32(adapter, QLCNIC_CRB_DEV_STATE);
2308 QLCDB(adapter, HW, "Device state = %u\n", prev_state);
2310 switch (prev_state) {
2311 case QLCNIC_DEV_COLD:
2312 QLCWR32(adapter, QLCNIC_CRB_DEV_STATE, QLCNIC_DEV_INITIALIZING);
2313 QLCWR32(adapter, QLCNIC_CRB_DRV_IDC_VER, QLCNIC_DRV_IDC_VER);
2314 qlcnic_idc_debug_info(adapter, 0);
2315 qlcnic_api_unlock(adapter);
2318 case QLCNIC_DEV_READY:
2319 ret = qlcnic_check_idc_ver(adapter);
2320 qlcnic_api_unlock(adapter);
2323 case QLCNIC_DEV_NEED_RESET:
2324 val = QLCRD32(adapter, QLCNIC_CRB_DRV_STATE);
2325 QLC_DEV_SET_RST_RDY(val, portnum);
2326 QLCWR32(adapter, QLCNIC_CRB_DRV_STATE, val);
2329 case QLCNIC_DEV_NEED_QUISCENT:
2330 val = QLCRD32(adapter, QLCNIC_CRB_DRV_STATE);
2331 QLC_DEV_SET_QSCNT_RDY(val, portnum);
2332 QLCWR32(adapter, QLCNIC_CRB_DRV_STATE, val);
2335 case QLCNIC_DEV_FAILED:
2336 dev_err(&adapter->pdev->dev, "Device in failed state.\n");
2337 qlcnic_api_unlock(adapter);
2340 case QLCNIC_DEV_INITIALIZING:
2341 case QLCNIC_DEV_QUISCENT:
2345 qlcnic_api_unlock(adapter);
2349 prev_state = QLCRD32(adapter, QLCNIC_CRB_DEV_STATE);
2351 if (prev_state == QLCNIC_DEV_QUISCENT)
2353 } while ((prev_state != QLCNIC_DEV_READY) && --dev_init_timeo);
2355 if (!dev_init_timeo) {
2356 dev_err(&adapter->pdev->dev,
2357 "Waiting for device to initialize timeout\n");
2361 if (qlcnic_api_lock(adapter))
2364 val = QLCRD32(adapter, QLCNIC_CRB_DRV_STATE);
2365 QLC_DEV_CLR_RST_QSCNT(val, portnum);
2366 QLCWR32(adapter, QLCNIC_CRB_DRV_STATE, val);
2368 ret = qlcnic_check_idc_ver(adapter);
2369 qlcnic_api_unlock(adapter);
2375 qlcnic_fwinit_work(struct work_struct *work)
2377 struct qlcnic_adapter *adapter = container_of(work,
2378 struct qlcnic_adapter, fw_work.work);
2379 u32 dev_state = 0xf;
2381 if (qlcnic_api_lock(adapter))
2384 dev_state = QLCRD32(adapter, QLCNIC_CRB_DEV_STATE);
2385 if (dev_state == QLCNIC_DEV_QUISCENT) {
2386 qlcnic_api_unlock(adapter);
2387 qlcnic_schedule_work(adapter, qlcnic_fwinit_work,
2392 if (adapter->op_mode == QLCNIC_NON_PRIV_FUNC) {
2393 qlcnic_api_unlock(adapter);
2397 if (adapter->fw_wait_cnt++ > adapter->reset_ack_timeo) {
2398 dev_err(&adapter->pdev->dev, "Reset:Failed to get ack %d sec\n",
2399 adapter->reset_ack_timeo);
2400 goto skip_ack_check;
2403 if (!qlcnic_check_drv_state(adapter)) {
2405 dev_state = QLCRD32(adapter, QLCNIC_CRB_DEV_STATE);
2407 if (dev_state == QLCNIC_DEV_NEED_QUISCENT) {
2408 QLCWR32(adapter, QLCNIC_CRB_DEV_STATE,
2409 QLCNIC_DEV_QUISCENT);
2410 qlcnic_schedule_work(adapter, qlcnic_fwinit_work,
2412 QLCDB(adapter, DRV, "Quiscing the driver\n");
2413 qlcnic_idc_debug_info(adapter, 0);
2415 qlcnic_api_unlock(adapter);
2419 if (dev_state == QLCNIC_DEV_NEED_RESET) {
2420 QLCWR32(adapter, QLCNIC_CRB_DEV_STATE,
2421 QLCNIC_DEV_INITIALIZING);
2422 set_bit(__QLCNIC_START_FW, &adapter->state);
2423 QLCDB(adapter, DRV, "Restarting fw\n");
2424 qlcnic_idc_debug_info(adapter, 0);
2427 qlcnic_api_unlock(adapter);
2429 if (!adapter->nic_ops->start_firmware(adapter)) {
2430 qlcnic_schedule_work(adapter, qlcnic_attach_work, 0);
2436 qlcnic_api_unlock(adapter);
2439 dev_state = QLCRD32(adapter, QLCNIC_CRB_DEV_STATE);
2440 QLCDB(adapter, HW, "Func waiting: Device state=%u\n", dev_state);
2442 switch (dev_state) {
2443 case QLCNIC_DEV_READY:
2444 if (!adapter->nic_ops->start_firmware(adapter)) {
2445 qlcnic_schedule_work(adapter, qlcnic_attach_work, 0);
2448 case QLCNIC_DEV_FAILED:
2451 qlcnic_schedule_work(adapter,
2452 qlcnic_fwinit_work, FW_POLL_DELAY);
2457 dev_err(&adapter->pdev->dev, "Fwinit work failed state=%u "
2458 "fw_wait_cnt=%u\n", dev_state, adapter->fw_wait_cnt);
2459 netif_device_attach(adapter->netdev);
2460 qlcnic_clr_all_drv_state(adapter);
2464 qlcnic_detach_work(struct work_struct *work)
2466 struct qlcnic_adapter *adapter = container_of(work,
2467 struct qlcnic_adapter, fw_work.work);
2468 struct net_device *netdev = adapter->netdev;
2471 netif_device_detach(netdev);
2473 qlcnic_down(adapter, netdev);
2475 status = QLCRD32(adapter, QLCNIC_PEG_HALT_STATUS1);
2477 if (status & QLCNIC_RCODE_FATAL_ERROR)
2480 if (adapter->temp == QLCNIC_TEMP_PANIC)
2483 if (qlcnic_set_drv_state(adapter, adapter->dev_state))
2486 adapter->fw_wait_cnt = 0;
2488 qlcnic_schedule_work(adapter, qlcnic_fwinit_work, FW_POLL_DELAY);
2493 dev_err(&adapter->pdev->dev, "detach failed; status=%d temp=%d\n",
2494 status, adapter->temp);
2495 netif_device_attach(netdev);
2496 qlcnic_clr_all_drv_state(adapter);
2500 /*Transit NPAR state to NON Operational */
2502 qlcnic_set_npar_non_operational(struct qlcnic_adapter *adapter)
2506 state = QLCRD32(adapter, QLCNIC_CRB_DEV_NPAR_STATE);
2507 if (state == QLCNIC_DEV_NPAR_NON_OPER)
2510 if (qlcnic_api_lock(adapter))
2512 QLCWR32(adapter, QLCNIC_CRB_DEV_NPAR_STATE, QLCNIC_DEV_NPAR_NON_OPER);
2513 qlcnic_api_unlock(adapter);
2516 /*Transit to RESET state from READY state only */
2518 qlcnic_dev_request_reset(struct qlcnic_adapter *adapter)
2522 adapter->need_fw_reset = 1;
2523 if (qlcnic_api_lock(adapter))
2526 state = QLCRD32(adapter, QLCNIC_CRB_DEV_STATE);
2528 if (state == QLCNIC_DEV_READY) {
2529 QLCWR32(adapter, QLCNIC_CRB_DEV_STATE, QLCNIC_DEV_NEED_RESET);
2530 QLCDB(adapter, DRV, "NEED_RESET state set\n");
2531 qlcnic_idc_debug_info(adapter, 0);
2534 QLCWR32(adapter, QLCNIC_CRB_DEV_NPAR_STATE, QLCNIC_DEV_NPAR_NON_OPER);
2535 qlcnic_api_unlock(adapter);
2538 /* Transit to NPAR READY state from NPAR NOT READY state */
2540 qlcnic_dev_set_npar_ready(struct qlcnic_adapter *adapter)
2542 if (!(adapter->flags & QLCNIC_ESWITCH_ENABLED) ||
2543 adapter->op_mode != QLCNIC_MGMT_FUNC)
2545 if (qlcnic_api_lock(adapter))
2548 QLCWR32(adapter, QLCNIC_CRB_DEV_NPAR_STATE, QLCNIC_DEV_NPAR_OPER);
2549 QLCDB(adapter, DRV, "NPAR operational state set\n");
2551 qlcnic_api_unlock(adapter);
2555 qlcnic_schedule_work(struct qlcnic_adapter *adapter,
2556 work_func_t func, int delay)
2558 if (test_bit(__QLCNIC_AER, &adapter->state))
2561 INIT_DELAYED_WORK(&adapter->fw_work, func);
2562 schedule_delayed_work(&adapter->fw_work, round_jiffies_relative(delay));
2566 qlcnic_cancel_fw_work(struct qlcnic_adapter *adapter)
2568 while (test_and_set_bit(__QLCNIC_RESETTING, &adapter->state))
2571 cancel_delayed_work_sync(&adapter->fw_work);
2575 qlcnic_attach_work(struct work_struct *work)
2577 struct qlcnic_adapter *adapter = container_of(work,
2578 struct qlcnic_adapter, fw_work.work);
2579 struct net_device *netdev = adapter->netdev;
2581 if (netif_running(netdev)) {
2582 if (qlcnic_up(adapter, netdev))
2585 qlcnic_config_indev_addr(netdev, NETDEV_UP);
2589 netif_device_attach(netdev);
2590 adapter->fw_fail_cnt = 0;
2591 clear_bit(__QLCNIC_RESETTING, &adapter->state);
2593 if (!qlcnic_clr_drv_state(adapter))
2594 qlcnic_schedule_work(adapter, qlcnic_fw_poll_work,
2599 qlcnic_check_health(struct qlcnic_adapter *adapter)
2601 u32 state = 0, heartbit;
2602 struct net_device *netdev = adapter->netdev;
2604 if (qlcnic_check_temp(adapter))
2607 if (adapter->need_fw_reset)
2608 qlcnic_dev_request_reset(adapter);
2610 state = QLCRD32(adapter, QLCNIC_CRB_DEV_STATE);
2611 if (state == QLCNIC_DEV_NEED_RESET ||
2612 state == QLCNIC_DEV_NEED_QUISCENT) {
2613 qlcnic_set_npar_non_operational(adapter);
2614 adapter->need_fw_reset = 1;
2617 heartbit = QLCRD32(adapter, QLCNIC_PEG_ALIVE_COUNTER);
2618 if (heartbit != adapter->heartbit) {
2619 adapter->heartbit = heartbit;
2620 adapter->fw_fail_cnt = 0;
2621 if (adapter->need_fw_reset)
2624 if (adapter->reset_context &&
2625 auto_fw_reset == AUTO_FW_RESET_ENABLED) {
2626 qlcnic_reset_hw_context(adapter);
2627 adapter->netdev->trans_start = jiffies;
2633 if (++adapter->fw_fail_cnt < FW_FAIL_THRESH)
2636 qlcnic_dev_request_reset(adapter);
2638 if ((auto_fw_reset == AUTO_FW_RESET_ENABLED))
2639 clear_bit(__QLCNIC_FW_ATTACHED, &adapter->state);
2641 dev_info(&netdev->dev, "firmware hang detected\n");
2644 adapter->dev_state = (state == QLCNIC_DEV_NEED_QUISCENT) ? state :
2645 QLCNIC_DEV_NEED_RESET;
2647 if ((auto_fw_reset == AUTO_FW_RESET_ENABLED) &&
2648 !test_and_set_bit(__QLCNIC_RESETTING, &adapter->state)) {
2650 qlcnic_schedule_work(adapter, qlcnic_detach_work, 0);
2651 QLCDB(adapter, DRV, "fw recovery scheduled.\n");
2658 qlcnic_fw_poll_work(struct work_struct *work)
2660 struct qlcnic_adapter *adapter = container_of(work,
2661 struct qlcnic_adapter, fw_work.work);
2663 if (test_bit(__QLCNIC_RESETTING, &adapter->state))
2667 if (qlcnic_check_health(adapter))
2671 qlcnic_schedule_work(adapter, qlcnic_fw_poll_work, FW_POLL_DELAY);
2674 static int qlcnic_is_first_func(struct pci_dev *pdev)
2676 struct pci_dev *oth_pdev;
2677 int val = pdev->devfn;
2680 oth_pdev = pci_get_domain_bus_and_slot(pci_domain_nr
2681 (pdev->bus), pdev->bus->number,
2682 PCI_DEVFN(PCI_SLOT(pdev->devfn), val));
2686 if (oth_pdev->current_state != PCI_D3cold) {
2687 pci_dev_put(oth_pdev);
2690 pci_dev_put(oth_pdev);
2695 static int qlcnic_attach_func(struct pci_dev *pdev)
2697 int err, first_func;
2698 struct qlcnic_adapter *adapter = pci_get_drvdata(pdev);
2699 struct net_device *netdev = adapter->netdev;
2701 pdev->error_state = pci_channel_io_normal;
2703 err = pci_enable_device(pdev);
2707 pci_set_power_state(pdev, PCI_D0);
2708 pci_set_master(pdev);
2709 pci_restore_state(pdev);
2711 first_func = qlcnic_is_first_func(pdev);
2713 if (qlcnic_api_lock(adapter))
2716 if (adapter->op_mode != QLCNIC_NON_PRIV_FUNC && first_func) {
2717 adapter->need_fw_reset = 1;
2718 set_bit(__QLCNIC_START_FW, &adapter->state);
2719 QLCWR32(adapter, QLCNIC_CRB_DEV_STATE, QLCNIC_DEV_INITIALIZING);
2720 QLCDB(adapter, DRV, "Restarting fw\n");
2722 qlcnic_api_unlock(adapter);
2724 err = adapter->nic_ops->start_firmware(adapter);
2728 qlcnic_clr_drv_state(adapter);
2729 qlcnic_setup_intr(adapter);
2731 if (netif_running(netdev)) {
2732 err = qlcnic_attach(adapter);
2734 qlcnic_clr_all_drv_state(adapter);
2735 clear_bit(__QLCNIC_AER, &adapter->state);
2736 netif_device_attach(netdev);
2740 err = qlcnic_up(adapter, netdev);
2744 qlcnic_config_indev_addr(netdev, NETDEV_UP);
2747 netif_device_attach(netdev);
2751 static pci_ers_result_t qlcnic_io_error_detected(struct pci_dev *pdev,
2752 pci_channel_state_t state)
2754 struct qlcnic_adapter *adapter = pci_get_drvdata(pdev);
2755 struct net_device *netdev = adapter->netdev;
2757 if (state == pci_channel_io_perm_failure)
2758 return PCI_ERS_RESULT_DISCONNECT;
2760 if (state == pci_channel_io_normal)
2761 return PCI_ERS_RESULT_RECOVERED;
2763 set_bit(__QLCNIC_AER, &adapter->state);
2764 netif_device_detach(netdev);
2766 cancel_delayed_work_sync(&adapter->fw_work);
2768 if (netif_running(netdev))
2769 qlcnic_down(adapter, netdev);
2771 qlcnic_detach(adapter);
2772 qlcnic_teardown_intr(adapter);
2774 clear_bit(__QLCNIC_RESETTING, &adapter->state);
2776 pci_save_state(pdev);
2777 pci_disable_device(pdev);
2779 return PCI_ERS_RESULT_NEED_RESET;
2782 static pci_ers_result_t qlcnic_io_slot_reset(struct pci_dev *pdev)
2784 return qlcnic_attach_func(pdev) ? PCI_ERS_RESULT_DISCONNECT :
2785 PCI_ERS_RESULT_RECOVERED;
2788 static void qlcnic_io_resume(struct pci_dev *pdev)
2790 struct qlcnic_adapter *adapter = pci_get_drvdata(pdev);
2792 pci_cleanup_aer_uncorrect_error_status(pdev);
2794 if (QLCRD32(adapter, QLCNIC_CRB_DEV_STATE) == QLCNIC_DEV_READY &&
2795 test_and_clear_bit(__QLCNIC_AER, &adapter->state))
2796 qlcnic_schedule_work(adapter, qlcnic_fw_poll_work,
2802 qlcnicvf_start_firmware(struct qlcnic_adapter *adapter)
2805 u8 npar_opt_timeo = QLCNIC_DEV_NPAR_OPER_TIMEO;
2808 err = qlcnic_can_start_firmware(adapter);
2812 npar_state = QLCRD32(adapter, QLCNIC_CRB_DEV_NPAR_STATE);
2813 while (npar_state != QLCNIC_DEV_NPAR_OPER && --npar_opt_timeo) {
2815 npar_state = QLCRD32(adapter, QLCNIC_CRB_DEV_NPAR_STATE);
2818 if (!npar_opt_timeo) {
2819 dev_err(&adapter->pdev->dev,
2820 "Waiting for NPAR state to opertional timeout\n");
2824 qlcnic_check_options(adapter);
2826 adapter->need_fw_reset = 0;
2832 qlcnicvf_config_bridged_mode(struct qlcnic_adapter *adapter, u32 enable)
2838 qlcnicvf_config_led(struct qlcnic_adapter *adapter, u32 state, u32 rate)
2844 qlcnic_store_bridged_mode(struct device *dev,
2845 struct device_attribute *attr, const char *buf, size_t len)
2847 struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
2851 if (!(adapter->capabilities & QLCNIC_FW_CAPABILITY_BDG))
2854 if (!test_bit(__QLCNIC_DEV_UP, &adapter->state))
2857 if (strict_strtoul(buf, 2, &new))
2860 if (!adapter->nic_ops->config_bridged_mode(adapter, !!new))
2868 qlcnic_show_bridged_mode(struct device *dev,
2869 struct device_attribute *attr, char *buf)
2871 struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
2872 int bridged_mode = 0;
2874 if (adapter->capabilities & QLCNIC_FW_CAPABILITY_BDG)
2875 bridged_mode = !!(adapter->flags & QLCNIC_BRIDGE_ENABLED);
2877 return sprintf(buf, "%d\n", bridged_mode);
2880 static struct device_attribute dev_attr_bridged_mode = {
2881 .attr = {.name = "bridged_mode", .mode = (S_IRUGO | S_IWUSR)},
2882 .show = qlcnic_show_bridged_mode,
2883 .store = qlcnic_store_bridged_mode,
2887 qlcnic_store_diag_mode(struct device *dev,
2888 struct device_attribute *attr, const char *buf, size_t len)
2890 struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
2893 if (strict_strtoul(buf, 2, &new))
2896 if (!!new != !!(adapter->flags & QLCNIC_DIAG_ENABLED))
2897 adapter->flags ^= QLCNIC_DIAG_ENABLED;
2903 qlcnic_show_diag_mode(struct device *dev,
2904 struct device_attribute *attr, char *buf)
2906 struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
2908 return sprintf(buf, "%d\n",
2909 !!(adapter->flags & QLCNIC_DIAG_ENABLED));
2912 static struct device_attribute dev_attr_diag_mode = {
2913 .attr = {.name = "diag_mode", .mode = (S_IRUGO | S_IWUSR)},
2914 .show = qlcnic_show_diag_mode,
2915 .store = qlcnic_store_diag_mode,
2919 qlcnic_sysfs_validate_crb(struct qlcnic_adapter *adapter,
2920 loff_t offset, size_t size)
2922 size_t crb_size = 4;
2924 if (!(adapter->flags & QLCNIC_DIAG_ENABLED))
2927 if (offset < QLCNIC_PCI_CRBSPACE) {
2928 if (ADDR_IN_RANGE(offset, QLCNIC_PCI_CAMQM,
2929 QLCNIC_PCI_CAMQM_END))
2935 if ((size != crb_size) || (offset & (crb_size-1)))
2942 qlcnic_sysfs_read_crb(struct file *filp, struct kobject *kobj,
2943 struct bin_attribute *attr,
2944 char *buf, loff_t offset, size_t size)
2946 struct device *dev = container_of(kobj, struct device, kobj);
2947 struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
2952 ret = qlcnic_sysfs_validate_crb(adapter, offset, size);
2956 if (ADDR_IN_RANGE(offset, QLCNIC_PCI_CAMQM, QLCNIC_PCI_CAMQM_END)) {
2957 qlcnic_pci_camqm_read_2M(adapter, offset, &qmdata);
2958 memcpy(buf, &qmdata, size);
2960 data = QLCRD32(adapter, offset);
2961 memcpy(buf, &data, size);
2967 qlcnic_sysfs_write_crb(struct file *filp, struct kobject *kobj,
2968 struct bin_attribute *attr,
2969 char *buf, loff_t offset, size_t size)
2971 struct device *dev = container_of(kobj, struct device, kobj);
2972 struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
2977 ret = qlcnic_sysfs_validate_crb(adapter, offset, size);
2981 if (ADDR_IN_RANGE(offset, QLCNIC_PCI_CAMQM, QLCNIC_PCI_CAMQM_END)) {
2982 memcpy(&qmdata, buf, size);
2983 qlcnic_pci_camqm_write_2M(adapter, offset, qmdata);
2985 memcpy(&data, buf, size);
2986 QLCWR32(adapter, offset, data);
2992 qlcnic_sysfs_validate_mem(struct qlcnic_adapter *adapter,
2993 loff_t offset, size_t size)
2995 if (!(adapter->flags & QLCNIC_DIAG_ENABLED))
2998 if ((size != 8) || (offset & 0x7))
3005 qlcnic_sysfs_read_mem(struct file *filp, struct kobject *kobj,
3006 struct bin_attribute *attr,
3007 char *buf, loff_t offset, size_t size)
3009 struct device *dev = container_of(kobj, struct device, kobj);
3010 struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
3014 ret = qlcnic_sysfs_validate_mem(adapter, offset, size);
3018 if (qlcnic_pci_mem_read_2M(adapter, offset, &data))
3021 memcpy(buf, &data, size);
3027 qlcnic_sysfs_write_mem(struct file *filp, struct kobject *kobj,
3028 struct bin_attribute *attr,
3029 char *buf, loff_t offset, size_t size)
3031 struct device *dev = container_of(kobj, struct device, kobj);
3032 struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
3036 ret = qlcnic_sysfs_validate_mem(adapter, offset, size);
3040 memcpy(&data, buf, size);
3042 if (qlcnic_pci_mem_write_2M(adapter, offset, data))
3049 static struct bin_attribute bin_attr_crb = {
3050 .attr = {.name = "crb", .mode = (S_IRUGO | S_IWUSR)},
3052 .read = qlcnic_sysfs_read_crb,
3053 .write = qlcnic_sysfs_write_crb,
3056 static struct bin_attribute bin_attr_mem = {
3057 .attr = {.name = "mem", .mode = (S_IRUGO | S_IWUSR)},
3059 .read = qlcnic_sysfs_read_mem,
3060 .write = qlcnic_sysfs_write_mem,
3064 validate_pm_config(struct qlcnic_adapter *adapter,
3065 struct qlcnic_pm_func_cfg *pm_cfg, int count)
3068 u8 src_pci_func, s_esw_id, d_esw_id;
3072 for (i = 0; i < count; i++) {
3073 src_pci_func = pm_cfg[i].pci_func;
3074 dest_pci_func = pm_cfg[i].dest_npar;
3075 if (src_pci_func >= QLCNIC_MAX_PCI_FUNC
3076 || dest_pci_func >= QLCNIC_MAX_PCI_FUNC)
3077 return QL_STATUS_INVALID_PARAM;
3079 if (adapter->npars[src_pci_func].type != QLCNIC_TYPE_NIC)
3080 return QL_STATUS_INVALID_PARAM;
3082 if (adapter->npars[dest_pci_func].type != QLCNIC_TYPE_NIC)
3083 return QL_STATUS_INVALID_PARAM;
3085 if (!IS_VALID_MODE(pm_cfg[i].action))
3086 return QL_STATUS_INVALID_PARAM;
3088 s_esw_id = adapter->npars[src_pci_func].phy_port;
3089 d_esw_id = adapter->npars[dest_pci_func].phy_port;
3091 if (s_esw_id != d_esw_id)
3092 return QL_STATUS_INVALID_PARAM;
3100 qlcnic_sysfs_write_pm_config(struct file *filp, struct kobject *kobj,
3101 struct bin_attribute *attr, char *buf, loff_t offset, size_t size)
3103 struct device *dev = container_of(kobj, struct device, kobj);
3104 struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
3105 struct qlcnic_pm_func_cfg *pm_cfg;
3106 u32 id, action, pci_func;
3107 int count, rem, i, ret;
3109 count = size / sizeof(struct qlcnic_pm_func_cfg);
3110 rem = size % sizeof(struct qlcnic_pm_func_cfg);
3112 return QL_STATUS_INVALID_PARAM;
3114 pm_cfg = (struct qlcnic_pm_func_cfg *) buf;
3116 ret = validate_pm_config(adapter, pm_cfg, count);
3119 for (i = 0; i < count; i++) {
3120 pci_func = pm_cfg[i].pci_func;
3121 action = pm_cfg[i].action;
3122 id = adapter->npars[pci_func].phy_port;
3123 ret = qlcnic_config_port_mirroring(adapter, id,
3129 for (i = 0; i < count; i++) {
3130 pci_func = pm_cfg[i].pci_func;
3131 id = adapter->npars[pci_func].phy_port;
3132 adapter->npars[pci_func].enable_pm = pm_cfg[i].action;
3133 adapter->npars[pci_func].dest_npar = id;
3139 qlcnic_sysfs_read_pm_config(struct file *filp, struct kobject *kobj,
3140 struct bin_attribute *attr, char *buf, loff_t offset, size_t size)
3142 struct device *dev = container_of(kobj, struct device, kobj);
3143 struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
3144 struct qlcnic_pm_func_cfg pm_cfg[QLCNIC_MAX_PCI_FUNC];
3147 if (size != sizeof(pm_cfg))
3148 return QL_STATUS_INVALID_PARAM;
3150 for (i = 0; i < QLCNIC_MAX_PCI_FUNC; i++) {
3151 if (adapter->npars[i].type != QLCNIC_TYPE_NIC)
3153 pm_cfg[i].action = adapter->npars[i].enable_pm;
3154 pm_cfg[i].dest_npar = 0;
3155 pm_cfg[i].pci_func = i;
3157 memcpy(buf, &pm_cfg, size);
3163 validate_esw_config(struct qlcnic_adapter *adapter,
3164 struct qlcnic_esw_func_cfg *esw_cfg, int count)
3169 for (i = 0; i < count; i++) {
3170 pci_func = esw_cfg[i].pci_func;
3171 if (pci_func >= QLCNIC_MAX_PCI_FUNC)
3172 return QL_STATUS_INVALID_PARAM;
3174 if (adapter->npars[i].type != QLCNIC_TYPE_NIC)
3175 return QL_STATUS_INVALID_PARAM;
3177 if (esw_cfg->host_vlan_tag == 1)
3178 if (!IS_VALID_VLAN(esw_cfg[i].vlan_id))
3179 return QL_STATUS_INVALID_PARAM;
3181 if (!IS_VALID_MODE(esw_cfg[i].promisc_mode)
3182 || !IS_VALID_MODE(esw_cfg[i].host_vlan_tag)
3183 || !IS_VALID_MODE(esw_cfg[i].mac_learning)
3184 || !IS_VALID_MODE(esw_cfg[i].discard_tagged))
3185 return QL_STATUS_INVALID_PARAM;
3192 qlcnic_sysfs_write_esw_config(struct file *file, struct kobject *kobj,
3193 struct bin_attribute *attr, char *buf, loff_t offset, size_t size)
3195 struct device *dev = container_of(kobj, struct device, kobj);
3196 struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
3197 struct qlcnic_esw_func_cfg *esw_cfg;
3198 int count, rem, i, ret;
3201 count = size / sizeof(struct qlcnic_esw_func_cfg);
3202 rem = size % sizeof(struct qlcnic_esw_func_cfg);
3204 return QL_STATUS_INVALID_PARAM;
3206 esw_cfg = (struct qlcnic_esw_func_cfg *) buf;
3207 ret = validate_esw_config(adapter, esw_cfg, count);
3211 for (i = 0; i < count; i++) {
3212 pci_func = esw_cfg[i].pci_func;
3213 id = adapter->npars[pci_func].phy_port;
3214 ret = qlcnic_config_switch_port(adapter, id,
3215 esw_cfg[i].host_vlan_tag,
3216 esw_cfg[i].discard_tagged,
3217 esw_cfg[i].promisc_mode,
3218 esw_cfg[i].mac_learning,
3219 esw_cfg[i].pci_func,
3220 esw_cfg[i].vlan_id);
3225 for (i = 0; i < count; i++) {
3226 pci_func = esw_cfg[i].pci_func;
3227 adapter->npars[pci_func].promisc_mode = esw_cfg[i].promisc_mode;
3228 adapter->npars[pci_func].mac_learning = esw_cfg[i].mac_learning;
3229 adapter->npars[pci_func].vlan_id = esw_cfg[i].vlan_id;
3230 adapter->npars[pci_func].discard_tagged =
3231 esw_cfg[i].discard_tagged;
3232 adapter->npars[pci_func].host_vlan_tag =
3233 esw_cfg[i].host_vlan_tag;
3240 qlcnic_sysfs_read_esw_config(struct file *file, struct kobject *kobj,
3241 struct bin_attribute *attr, char *buf, loff_t offset, size_t size)
3243 struct device *dev = container_of(kobj, struct device, kobj);
3244 struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
3245 struct qlcnic_esw_func_cfg esw_cfg[QLCNIC_MAX_PCI_FUNC];
3248 if (size != sizeof(esw_cfg))
3249 return QL_STATUS_INVALID_PARAM;
3251 for (i = 0; i < QLCNIC_MAX_PCI_FUNC; i++) {
3252 if (adapter->npars[i].type != QLCNIC_TYPE_NIC)
3255 esw_cfg[i].host_vlan_tag = adapter->npars[i].host_vlan_tag;
3256 esw_cfg[i].promisc_mode = adapter->npars[i].promisc_mode;
3257 esw_cfg[i].discard_tagged = adapter->npars[i].discard_tagged;
3258 esw_cfg[i].vlan_id = adapter->npars[i].vlan_id;
3259 esw_cfg[i].mac_learning = adapter->npars[i].mac_learning;
3261 memcpy(buf, &esw_cfg, size);
3267 validate_npar_config(struct qlcnic_adapter *adapter,
3268 struct qlcnic_npar_func_cfg *np_cfg, int count)
3272 for (i = 0; i < count; i++) {
3273 pci_func = np_cfg[i].pci_func;
3274 if (pci_func >= QLCNIC_MAX_PCI_FUNC)
3275 return QL_STATUS_INVALID_PARAM;
3277 if (adapter->npars[pci_func].type != QLCNIC_TYPE_NIC)
3278 return QL_STATUS_INVALID_PARAM;
3280 if (!IS_VALID_BW(np_cfg[i].min_bw)
3281 || !IS_VALID_BW(np_cfg[i].max_bw)
3282 || !IS_VALID_RX_QUEUES(np_cfg[i].max_rx_queues)
3283 || !IS_VALID_TX_QUEUES(np_cfg[i].max_tx_queues))
3284 return QL_STATUS_INVALID_PARAM;
3290 qlcnic_sysfs_write_npar_config(struct file *file, struct kobject *kobj,
3291 struct bin_attribute *attr, char *buf, loff_t offset, size_t size)
3293 struct device *dev = container_of(kobj, struct device, kobj);
3294 struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
3295 struct qlcnic_info nic_info;
3296 struct qlcnic_npar_func_cfg *np_cfg;
3297 int i, count, rem, ret;
3300 count = size / sizeof(struct qlcnic_npar_func_cfg);
3301 rem = size % sizeof(struct qlcnic_npar_func_cfg);
3303 return QL_STATUS_INVALID_PARAM;
3305 np_cfg = (struct qlcnic_npar_func_cfg *) buf;
3306 ret = validate_npar_config(adapter, np_cfg, count);
3310 for (i = 0; i < count ; i++) {
3311 pci_func = np_cfg[i].pci_func;
3312 ret = qlcnic_get_nic_info(adapter, &nic_info, pci_func);
3315 nic_info.pci_func = pci_func;
3316 nic_info.min_tx_bw = np_cfg[i].min_bw;
3317 nic_info.max_tx_bw = np_cfg[i].max_bw;
3318 ret = qlcnic_set_nic_info(adapter, &nic_info);
3321 adapter->npars[i].min_bw = nic_info.min_tx_bw;
3322 adapter->npars[i].max_bw = nic_info.max_tx_bw;
3329 qlcnic_sysfs_read_npar_config(struct file *file, struct kobject *kobj,
3330 struct bin_attribute *attr, char *buf, loff_t offset, size_t size)
3332 struct device *dev = container_of(kobj, struct device, kobj);
3333 struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
3334 struct qlcnic_info nic_info;
3335 struct qlcnic_npar_func_cfg np_cfg[QLCNIC_MAX_PCI_FUNC];
3338 if (size != sizeof(np_cfg))
3339 return QL_STATUS_INVALID_PARAM;
3341 for (i = 0; i < QLCNIC_MAX_PCI_FUNC ; i++) {
3342 if (adapter->npars[i].type != QLCNIC_TYPE_NIC)
3344 ret = qlcnic_get_nic_info(adapter, &nic_info, i);
3348 np_cfg[i].pci_func = i;
3349 np_cfg[i].op_mode = nic_info.op_mode;
3350 np_cfg[i].port_num = nic_info.phys_port;
3351 np_cfg[i].fw_capab = nic_info.capabilities;
3352 np_cfg[i].min_bw = nic_info.min_tx_bw ;
3353 np_cfg[i].max_bw = nic_info.max_tx_bw;
3354 np_cfg[i].max_tx_queues = nic_info.max_tx_ques;
3355 np_cfg[i].max_rx_queues = nic_info.max_rx_ques;
3357 memcpy(buf, &np_cfg, size);
3362 qlcnic_sysfs_get_port_stats(struct file *file, struct kobject *kobj,
3363 struct bin_attribute *attr, char *buf, loff_t offset, size_t size)
3365 struct device *dev = container_of(kobj, struct device, kobj);
3366 struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
3367 struct qlcnic_esw_statistics port_stats;
3370 if (size != sizeof(struct qlcnic_esw_statistics))
3371 return QL_STATUS_INVALID_PARAM;
3373 if (offset >= QLCNIC_MAX_PCI_FUNC)
3374 return QL_STATUS_INVALID_PARAM;
3376 memset(&port_stats, 0, size);
3377 ret = qlcnic_get_port_stats(adapter, offset, QLCNIC_QUERY_RX_COUNTER,
3382 ret = qlcnic_get_port_stats(adapter, offset, QLCNIC_QUERY_TX_COUNTER,
3387 memcpy(buf, &port_stats, size);
3392 qlcnic_sysfs_get_esw_stats(struct file *file, struct kobject *kobj,
3393 struct bin_attribute *attr, char *buf, loff_t offset, size_t size)
3395 struct device *dev = container_of(kobj, struct device, kobj);
3396 struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
3397 struct qlcnic_esw_statistics esw_stats;
3400 if (size != sizeof(struct qlcnic_esw_statistics))
3401 return QL_STATUS_INVALID_PARAM;
3403 if (offset >= QLCNIC_NIU_MAX_XG_PORTS)
3404 return QL_STATUS_INVALID_PARAM;
3406 memset(&esw_stats, 0, size);
3407 ret = qlcnic_get_eswitch_stats(adapter, offset, QLCNIC_QUERY_RX_COUNTER,
3412 ret = qlcnic_get_eswitch_stats(adapter, offset, QLCNIC_QUERY_TX_COUNTER,
3417 memcpy(buf, &esw_stats, size);
3422 qlcnic_sysfs_clear_esw_stats(struct file *file, struct kobject *kobj,
3423 struct bin_attribute *attr, char *buf, loff_t offset, size_t size)
3425 struct device *dev = container_of(kobj, struct device, kobj);
3426 struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
3429 if (offset >= QLCNIC_NIU_MAX_XG_PORTS)
3430 return QL_STATUS_INVALID_PARAM;
3432 ret = qlcnic_clear_esw_stats(adapter, QLCNIC_STATS_ESWITCH, offset,
3433 QLCNIC_QUERY_RX_COUNTER);
3437 ret = qlcnic_clear_esw_stats(adapter, QLCNIC_STATS_ESWITCH, offset,
3438 QLCNIC_QUERY_TX_COUNTER);
3446 qlcnic_sysfs_clear_port_stats(struct file *file, struct kobject *kobj,
3447 struct bin_attribute *attr, char *buf, loff_t offset, size_t size)
3450 struct device *dev = container_of(kobj, struct device, kobj);
3451 struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
3454 if (offset >= QLCNIC_MAX_PCI_FUNC)
3455 return QL_STATUS_INVALID_PARAM;
3457 ret = qlcnic_clear_esw_stats(adapter, QLCNIC_STATS_PORT, offset,
3458 QLCNIC_QUERY_RX_COUNTER);
3462 ret = qlcnic_clear_esw_stats(adapter, QLCNIC_STATS_PORT, offset,
3463 QLCNIC_QUERY_TX_COUNTER);
3471 qlcnic_sysfs_read_pci_config(struct file *file, struct kobject *kobj,
3472 struct bin_attribute *attr, char *buf, loff_t offset, size_t size)
3474 struct device *dev = container_of(kobj, struct device, kobj);
3475 struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
3476 struct qlcnic_pci_func_cfg pci_cfg[QLCNIC_MAX_PCI_FUNC];
3477 struct qlcnic_pci_info *pci_info;
3480 if (size != sizeof(pci_cfg))
3481 return QL_STATUS_INVALID_PARAM;
3483 pci_info = kcalloc(QLCNIC_MAX_PCI_FUNC, sizeof(*pci_info), GFP_KERNEL);
3487 ret = qlcnic_get_pci_info(adapter, pci_info);
3493 for (i = 0; i < QLCNIC_MAX_PCI_FUNC ; i++) {
3494 pci_cfg[i].pci_func = pci_info[i].id;
3495 pci_cfg[i].func_type = pci_info[i].type;
3496 pci_cfg[i].port_num = pci_info[i].default_port;
3497 pci_cfg[i].min_bw = pci_info[i].tx_min_bw;
3498 pci_cfg[i].max_bw = pci_info[i].tx_max_bw;
3499 memcpy(&pci_cfg[i].def_mac_addr, &pci_info[i].mac, ETH_ALEN);
3501 memcpy(buf, &pci_cfg, size);
3505 static struct bin_attribute bin_attr_npar_config = {
3506 .attr = {.name = "npar_config", .mode = (S_IRUGO | S_IWUSR)},
3508 .read = qlcnic_sysfs_read_npar_config,
3509 .write = qlcnic_sysfs_write_npar_config,
3512 static struct bin_attribute bin_attr_pci_config = {
3513 .attr = {.name = "pci_config", .mode = (S_IRUGO | S_IWUSR)},
3515 .read = qlcnic_sysfs_read_pci_config,
3519 static struct bin_attribute bin_attr_port_stats = {
3520 .attr = {.name = "port_stats", .mode = (S_IRUGO | S_IWUSR)},
3522 .read = qlcnic_sysfs_get_port_stats,
3523 .write = qlcnic_sysfs_clear_port_stats,
3526 static struct bin_attribute bin_attr_esw_stats = {
3527 .attr = {.name = "esw_stats", .mode = (S_IRUGO | S_IWUSR)},
3529 .read = qlcnic_sysfs_get_esw_stats,
3530 .write = qlcnic_sysfs_clear_esw_stats,
3533 static struct bin_attribute bin_attr_esw_config = {
3534 .attr = {.name = "esw_config", .mode = (S_IRUGO | S_IWUSR)},
3536 .read = qlcnic_sysfs_read_esw_config,
3537 .write = qlcnic_sysfs_write_esw_config,
3540 static struct bin_attribute bin_attr_pm_config = {
3541 .attr = {.name = "pm_config", .mode = (S_IRUGO | S_IWUSR)},
3543 .read = qlcnic_sysfs_read_pm_config,
3544 .write = qlcnic_sysfs_write_pm_config,
3548 qlcnic_create_sysfs_entries(struct qlcnic_adapter *adapter)
3550 struct device *dev = &adapter->pdev->dev;
3552 if (adapter->capabilities & QLCNIC_FW_CAPABILITY_BDG)
3553 if (device_create_file(dev, &dev_attr_bridged_mode))
3555 "failed to create bridged_mode sysfs entry\n");
3559 qlcnic_remove_sysfs_entries(struct qlcnic_adapter *adapter)
3561 struct device *dev = &adapter->pdev->dev;
3563 if (adapter->capabilities & QLCNIC_FW_CAPABILITY_BDG)
3564 device_remove_file(dev, &dev_attr_bridged_mode);
3568 qlcnic_create_diag_entries(struct qlcnic_adapter *adapter)
3570 struct device *dev = &adapter->pdev->dev;
3572 if (device_create_bin_file(dev, &bin_attr_port_stats))
3573 dev_info(dev, "failed to create port stats sysfs entry");
3575 if (adapter->op_mode == QLCNIC_NON_PRIV_FUNC)
3577 if (device_create_file(dev, &dev_attr_diag_mode))
3578 dev_info(dev, "failed to create diag_mode sysfs entry\n");
3579 if (device_create_bin_file(dev, &bin_attr_crb))
3580 dev_info(dev, "failed to create crb sysfs entry\n");
3581 if (device_create_bin_file(dev, &bin_attr_mem))
3582 dev_info(dev, "failed to create mem sysfs entry\n");
3583 if (!(adapter->flags & QLCNIC_ESWITCH_ENABLED) ||
3584 adapter->op_mode != QLCNIC_MGMT_FUNC)
3586 if (device_create_bin_file(dev, &bin_attr_pci_config))
3587 dev_info(dev, "failed to create pci config sysfs entry");
3588 if (device_create_bin_file(dev, &bin_attr_npar_config))
3589 dev_info(dev, "failed to create npar config sysfs entry");
3590 if (device_create_bin_file(dev, &bin_attr_esw_config))
3591 dev_info(dev, "failed to create esw config sysfs entry");
3592 if (device_create_bin_file(dev, &bin_attr_pm_config))
3593 dev_info(dev, "failed to create pm config sysfs entry");
3594 if (device_create_bin_file(dev, &bin_attr_esw_stats))
3595 dev_info(dev, "failed to create eswitch stats sysfs entry");
3599 qlcnic_remove_diag_entries(struct qlcnic_adapter *adapter)
3601 struct device *dev = &adapter->pdev->dev;
3603 device_remove_bin_file(dev, &bin_attr_port_stats);
3605 if (adapter->op_mode == QLCNIC_NON_PRIV_FUNC)
3607 device_remove_file(dev, &dev_attr_diag_mode);
3608 device_remove_bin_file(dev, &bin_attr_crb);
3609 device_remove_bin_file(dev, &bin_attr_mem);
3610 if (!(adapter->flags & QLCNIC_ESWITCH_ENABLED) ||
3611 adapter->op_mode != QLCNIC_MGMT_FUNC)
3613 device_remove_bin_file(dev, &bin_attr_pci_config);
3614 device_remove_bin_file(dev, &bin_attr_npar_config);
3615 device_remove_bin_file(dev, &bin_attr_esw_config);
3616 device_remove_bin_file(dev, &bin_attr_pm_config);
3617 device_remove_bin_file(dev, &bin_attr_esw_stats);
3622 #define is_qlcnic_netdev(dev) (dev->netdev_ops == &qlcnic_netdev_ops)
3625 qlcnic_config_indev_addr(struct net_device *dev, unsigned long event)
3627 struct in_device *indev;
3628 struct qlcnic_adapter *adapter = netdev_priv(dev);
3630 indev = in_dev_get(dev);
3637 qlcnic_config_ipaddr(adapter,
3638 ifa->ifa_address, QLCNIC_IP_UP);
3641 qlcnic_config_ipaddr(adapter,
3642 ifa->ifa_address, QLCNIC_IP_DOWN);
3647 } endfor_ifa(indev);
3652 static int qlcnic_netdev_event(struct notifier_block *this,
3653 unsigned long event, void *ptr)
3655 struct qlcnic_adapter *adapter;
3656 struct net_device *dev = (struct net_device *)ptr;
3662 if (dev->priv_flags & IFF_802_1Q_VLAN) {
3663 dev = vlan_dev_real_dev(dev);
3667 if (!is_qlcnic_netdev(dev))
3670 adapter = netdev_priv(dev);
3675 if (!test_bit(__QLCNIC_DEV_UP, &adapter->state))
3678 qlcnic_config_indev_addr(dev, event);
3684 qlcnic_inetaddr_event(struct notifier_block *this,
3685 unsigned long event, void *ptr)
3687 struct qlcnic_adapter *adapter;
3688 struct net_device *dev;
3690 struct in_ifaddr *ifa = (struct in_ifaddr *)ptr;
3692 dev = ifa->ifa_dev ? ifa->ifa_dev->dev : NULL;
3695 if (dev == NULL || !netif_running(dev))
3698 if (dev->priv_flags & IFF_802_1Q_VLAN) {
3699 dev = vlan_dev_real_dev(dev);
3703 if (!is_qlcnic_netdev(dev))
3706 adapter = netdev_priv(dev);
3711 if (!test_bit(__QLCNIC_DEV_UP, &adapter->state))
3716 qlcnic_config_ipaddr(adapter, ifa->ifa_address, QLCNIC_IP_UP);
3719 qlcnic_config_ipaddr(adapter, ifa->ifa_address, QLCNIC_IP_DOWN);
3729 static struct notifier_block qlcnic_netdev_cb = {
3730 .notifier_call = qlcnic_netdev_event,
3733 static struct notifier_block qlcnic_inetaddr_cb = {
3734 .notifier_call = qlcnic_inetaddr_event,
3738 qlcnic_config_indev_addr(struct net_device *dev, unsigned long event)
3741 static struct pci_error_handlers qlcnic_err_handler = {
3742 .error_detected = qlcnic_io_error_detected,
3743 .slot_reset = qlcnic_io_slot_reset,
3744 .resume = qlcnic_io_resume,
3747 static struct pci_driver qlcnic_driver = {
3748 .name = qlcnic_driver_name,
3749 .id_table = qlcnic_pci_tbl,
3750 .probe = qlcnic_probe,
3751 .remove = __devexit_p(qlcnic_remove),
3753 .suspend = qlcnic_suspend,
3754 .resume = qlcnic_resume,
3756 .shutdown = qlcnic_shutdown,
3757 .err_handler = &qlcnic_err_handler
3761 static int __init qlcnic_init_module(void)
3765 printk(KERN_INFO "%s\n", qlcnic_driver_string);
3768 register_netdevice_notifier(&qlcnic_netdev_cb);
3769 register_inetaddr_notifier(&qlcnic_inetaddr_cb);
3772 ret = pci_register_driver(&qlcnic_driver);
3775 unregister_inetaddr_notifier(&qlcnic_inetaddr_cb);
3776 unregister_netdevice_notifier(&qlcnic_netdev_cb);
3783 module_init(qlcnic_init_module);
3785 static void __exit qlcnic_exit_module(void)
3788 pci_unregister_driver(&qlcnic_driver);
3791 unregister_inetaddr_notifier(&qlcnic_inetaddr_cb);
3792 unregister_netdevice_notifier(&qlcnic_netdev_cb);
3796 module_exit(qlcnic_exit_module);