]> bbs.cooldavid.org Git - net-next-2.6.git/blame - drivers/net/qlcnic/qlcnic_main.c
qlcnic: cleanup port mode setting
[net-next-2.6.git] / drivers / net / qlcnic / qlcnic_main.c
CommitLineData
af19b491
AKS
1/*
2 * Copyright (C) 2009 - QLogic Corporation.
3 * All rights reserved.
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License
7 * as published by the Free Software Foundation; either version 2
8 * of the License, or (at your option) any later version.
9 *
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 59 Temple Place - Suite 330, Boston,
18 * MA 02111-1307, USA.
19 *
20 * The full GNU General Public License is included in this distribution
21 * in the file called "COPYING".
22 *
23 */
24
5a0e3ad6 25#include <linux/slab.h>
af19b491
AKS
26#include <linux/vmalloc.h>
27#include <linux/interrupt.h>
28
29#include "qlcnic.h"
30
7e56cac4 31#include <linux/swab.h>
af19b491
AKS
32#include <linux/dma-mapping.h>
33#include <linux/if_vlan.h>
34#include <net/ip.h>
35#include <linux/ipv6.h>
36#include <linux/inetdevice.h>
37#include <linux/sysfs.h>
451724c8 38#include <linux/aer.h>
af19b491 39
7f9a0c34 40MODULE_DESCRIPTION("QLogic 1/10 GbE Converged/Intelligent Ethernet Driver");
af19b491
AKS
41MODULE_LICENSE("GPL");
42MODULE_VERSION(QLCNIC_LINUX_VERSIONID);
43MODULE_FIRMWARE(QLCNIC_UNIFIED_ROMIMAGE_NAME);
44
45char qlcnic_driver_name[] = "qlcnic";
7f9a0c34
SV
46static const char qlcnic_driver_string[] = "QLogic 1/10 GbE "
47 "Converged/Intelligent Ethernet Driver v" QLCNIC_LINUX_VERSIONID;
af19b491 48
b5e5492c
AKS
49static int qlcnic_mac_learn;
50module_param(qlcnic_mac_learn, int, 0644);
51MODULE_PARM_DESC(qlcnic_mac_learn, "Mac Filter (0=disabled, 1=enabled)");
52
af19b491
AKS
53static int use_msi = 1;
54module_param(use_msi, int, 0644);
55MODULE_PARM_DESC(use_msi, "MSI interrupt (0=disabled, 1=enabled");
56
57static int use_msi_x = 1;
58module_param(use_msi_x, int, 0644);
59MODULE_PARM_DESC(use_msi_x, "MSI-X interrupt (0=disabled, 1=enabled");
60
61static int auto_fw_reset = AUTO_FW_RESET_ENABLED;
62module_param(auto_fw_reset, int, 0644);
63MODULE_PARM_DESC(auto_fw_reset, "Auto firmware reset (0=disabled, 1=enabled");
64
4d5bdb38
AKS
65static int load_fw_file;
66module_param(load_fw_file, int, 0644);
67MODULE_PARM_DESC(load_fw_file, "Load firmware from (0=flash, 1=file");
68
2e9d722d
AC
69static int qlcnic_config_npars;
70module_param(qlcnic_config_npars, int, 0644);
71MODULE_PARM_DESC(qlcnic_config_npars, "Configure NPARs (0=disabled, 1=enabled");
72
af19b491
AKS
73static int __devinit qlcnic_probe(struct pci_dev *pdev,
74 const struct pci_device_id *ent);
75static void __devexit qlcnic_remove(struct pci_dev *pdev);
76static int qlcnic_open(struct net_device *netdev);
77static int qlcnic_close(struct net_device *netdev);
af19b491 78static void qlcnic_tx_timeout(struct net_device *netdev);
af19b491
AKS
79static void qlcnic_attach_work(struct work_struct *work);
80static void qlcnic_fwinit_work(struct work_struct *work);
81static void qlcnic_fw_poll_work(struct work_struct *work);
82static void qlcnic_schedule_work(struct qlcnic_adapter *adapter,
83 work_func_t func, int delay);
84static void qlcnic_cancel_fw_work(struct qlcnic_adapter *adapter);
85static int qlcnic_poll(struct napi_struct *napi, int budget);
8f891387 86static int qlcnic_rx_poll(struct napi_struct *napi, int budget);
af19b491
AKS
87#ifdef CONFIG_NET_POLL_CONTROLLER
88static void qlcnic_poll_controller(struct net_device *netdev);
89#endif
90
91static void qlcnic_create_sysfs_entries(struct qlcnic_adapter *adapter);
92static void qlcnic_remove_sysfs_entries(struct qlcnic_adapter *adapter);
93static void qlcnic_create_diag_entries(struct qlcnic_adapter *adapter);
94static void qlcnic_remove_diag_entries(struct qlcnic_adapter *adapter);
95
6df900e9 96static void qlcnic_idc_debug_info(struct qlcnic_adapter *adapter, u8 encoding);
21854f02 97static void qlcnic_clr_all_drv_state(struct qlcnic_adapter *adapter, u8);
af19b491
AKS
98static int qlcnic_can_start_firmware(struct qlcnic_adapter *adapter);
99
7eb9855d 100static irqreturn_t qlcnic_tmp_intr(int irq, void *data);
af19b491
AKS
101static irqreturn_t qlcnic_intr(int irq, void *data);
102static irqreturn_t qlcnic_msi_intr(int irq, void *data);
103static irqreturn_t qlcnic_msix_intr(int irq, void *data);
104
105static struct net_device_stats *qlcnic_get_stats(struct net_device *netdev);
aec1e845 106static void qlcnic_restore_indev_addr(struct net_device *dev, unsigned long);
9f26f547
AC
107static int qlcnic_start_firmware(struct qlcnic_adapter *);
108
b5e5492c
AKS
109static void qlcnic_alloc_lb_filters_mem(struct qlcnic_adapter *adapter);
110static void qlcnic_free_lb_filters_mem(struct qlcnic_adapter *adapter);
9f26f547 111static void qlcnic_dev_set_npar_ready(struct qlcnic_adapter *);
9f26f547
AC
112static int qlcnicvf_config_led(struct qlcnic_adapter *, u32, u32);
113static int qlcnicvf_config_bridged_mode(struct qlcnic_adapter *, u32);
114static int qlcnicvf_start_firmware(struct qlcnic_adapter *);
0325d69b
RB
115static void qlcnic_set_netdev_features(struct qlcnic_adapter *,
116 struct qlcnic_esw_func_cfg *);
af19b491
AKS
117/* PCI Device ID Table */
118#define ENTRY(device) \
119 {PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, (device)), \
120 .class = PCI_CLASS_NETWORK_ETHERNET << 8, .class_mask = ~0}
121
122#define PCI_DEVICE_ID_QLOGIC_QLE824X 0x8020
123
6a902881 124static DEFINE_PCI_DEVICE_TABLE(qlcnic_pci_tbl) = {
af19b491
AKS
125 ENTRY(PCI_DEVICE_ID_QLOGIC_QLE824X),
126 {0,}
127};
128
129MODULE_DEVICE_TABLE(pci, qlcnic_pci_tbl);
130
131
132void
133qlcnic_update_cmd_producer(struct qlcnic_adapter *adapter,
134 struct qlcnic_host_tx_ring *tx_ring)
135{
136 writel(tx_ring->producer, tx_ring->crb_cmd_producer);
af19b491
AKS
137}
138
139static const u32 msi_tgt_status[8] = {
140 ISR_INT_TARGET_STATUS, ISR_INT_TARGET_STATUS_F1,
141 ISR_INT_TARGET_STATUS_F2, ISR_INT_TARGET_STATUS_F3,
142 ISR_INT_TARGET_STATUS_F4, ISR_INT_TARGET_STATUS_F5,
143 ISR_INT_TARGET_STATUS_F6, ISR_INT_TARGET_STATUS_F7
144};
145
146static const
147struct qlcnic_legacy_intr_set legacy_intr[] = QLCNIC_LEGACY_INTR_CONFIG;
148
149static inline void qlcnic_disable_int(struct qlcnic_host_sds_ring *sds_ring)
150{
151 writel(0, sds_ring->crb_intr_mask);
152}
153
154static inline void qlcnic_enable_int(struct qlcnic_host_sds_ring *sds_ring)
155{
156 struct qlcnic_adapter *adapter = sds_ring->adapter;
157
158 writel(0x1, sds_ring->crb_intr_mask);
159
160 if (!QLCNIC_IS_MSI_FAMILY(adapter))
161 writel(0xfbff, adapter->tgt_mask_reg);
162}
163
164static int
165qlcnic_alloc_sds_rings(struct qlcnic_recv_context *recv_ctx, int count)
166{
167 int size = sizeof(struct qlcnic_host_sds_ring) * count;
168
169 recv_ctx->sds_rings = kzalloc(size, GFP_KERNEL);
170
807540ba 171 return recv_ctx->sds_rings == NULL;
af19b491
AKS
172}
173
174static void
175qlcnic_free_sds_rings(struct qlcnic_recv_context *recv_ctx)
176{
177 if (recv_ctx->sds_rings != NULL)
178 kfree(recv_ctx->sds_rings);
179
180 recv_ctx->sds_rings = NULL;
181}
182
183static int
184qlcnic_napi_add(struct qlcnic_adapter *adapter, struct net_device *netdev)
185{
186 int ring;
187 struct qlcnic_host_sds_ring *sds_ring;
188 struct qlcnic_recv_context *recv_ctx = &adapter->recv_ctx;
189
190 if (qlcnic_alloc_sds_rings(recv_ctx, adapter->max_sds_rings))
191 return -ENOMEM;
192
193 for (ring = 0; ring < adapter->max_sds_rings; ring++) {
194 sds_ring = &recv_ctx->sds_rings[ring];
8f891387 195
196 if (ring == adapter->max_sds_rings - 1)
197 netif_napi_add(netdev, &sds_ring->napi, qlcnic_poll,
198 QLCNIC_NETDEV_WEIGHT/adapter->max_sds_rings);
199 else
200 netif_napi_add(netdev, &sds_ring->napi,
201 qlcnic_rx_poll, QLCNIC_NETDEV_WEIGHT*2);
af19b491
AKS
202 }
203
204 return 0;
205}
206
207static void
208qlcnic_napi_del(struct qlcnic_adapter *adapter)
209{
210 int ring;
211 struct qlcnic_host_sds_ring *sds_ring;
212 struct qlcnic_recv_context *recv_ctx = &adapter->recv_ctx;
213
214 for (ring = 0; ring < adapter->max_sds_rings; ring++) {
215 sds_ring = &recv_ctx->sds_rings[ring];
216 netif_napi_del(&sds_ring->napi);
217 }
218
219 qlcnic_free_sds_rings(&adapter->recv_ctx);
220}
221
222static void
223qlcnic_napi_enable(struct qlcnic_adapter *adapter)
224{
225 int ring;
226 struct qlcnic_host_sds_ring *sds_ring;
227 struct qlcnic_recv_context *recv_ctx = &adapter->recv_ctx;
228
780ab790
AKS
229 if (adapter->is_up != QLCNIC_ADAPTER_UP_MAGIC)
230 return;
231
af19b491
AKS
232 for (ring = 0; ring < adapter->max_sds_rings; ring++) {
233 sds_ring = &recv_ctx->sds_rings[ring];
234 napi_enable(&sds_ring->napi);
235 qlcnic_enable_int(sds_ring);
236 }
237}
238
239static void
240qlcnic_napi_disable(struct qlcnic_adapter *adapter)
241{
242 int ring;
243 struct qlcnic_host_sds_ring *sds_ring;
244 struct qlcnic_recv_context *recv_ctx = &adapter->recv_ctx;
245
780ab790
AKS
246 if (adapter->is_up != QLCNIC_ADAPTER_UP_MAGIC)
247 return;
248
af19b491
AKS
249 for (ring = 0; ring < adapter->max_sds_rings; ring++) {
250 sds_ring = &recv_ctx->sds_rings[ring];
251 qlcnic_disable_int(sds_ring);
252 napi_synchronize(&sds_ring->napi);
253 napi_disable(&sds_ring->napi);
254 }
255}
256
257static void qlcnic_clear_stats(struct qlcnic_adapter *adapter)
258{
259 memset(&adapter->stats, 0, sizeof(adapter->stats));
af19b491
AKS
260}
261
af19b491
AKS
262static void qlcnic_set_msix_bit(struct pci_dev *pdev, int enable)
263{
264 u32 control;
265 int pos;
266
267 pos = pci_find_capability(pdev, PCI_CAP_ID_MSIX);
268 if (pos) {
269 pci_read_config_dword(pdev, pos, &control);
270 if (enable)
271 control |= PCI_MSIX_FLAGS_ENABLE;
272 else
273 control = 0;
274 pci_write_config_dword(pdev, pos, control);
275 }
276}
277
278static void qlcnic_init_msix_entries(struct qlcnic_adapter *adapter, int count)
279{
280 int i;
281
282 for (i = 0; i < count; i++)
283 adapter->msix_entries[i].entry = i;
284}
285
286static int
287qlcnic_read_mac_addr(struct qlcnic_adapter *adapter)
288{
2e9d722d 289 u8 mac_addr[ETH_ALEN];
af19b491
AKS
290 struct net_device *netdev = adapter->netdev;
291 struct pci_dev *pdev = adapter->pdev;
292
da48e6c3 293 if (qlcnic_get_mac_address(adapter, mac_addr) != 0)
af19b491
AKS
294 return -EIO;
295
2e9d722d 296 memcpy(netdev->dev_addr, mac_addr, ETH_ALEN);
af19b491
AKS
297 memcpy(netdev->perm_addr, netdev->dev_addr, netdev->addr_len);
298 memcpy(adapter->mac_addr, netdev->dev_addr, netdev->addr_len);
299
300 /* set station address */
301
302 if (!is_valid_ether_addr(netdev->perm_addr))
303 dev_warn(&pdev->dev, "Bad MAC address %pM.\n",
304 netdev->dev_addr);
305
306 return 0;
307}
308
309static int qlcnic_set_mac(struct net_device *netdev, void *p)
310{
311 struct qlcnic_adapter *adapter = netdev_priv(netdev);
312 struct sockaddr *addr = p;
313
7373373d
RB
314 if ((adapter->flags & QLCNIC_MAC_OVERRIDE_DISABLED))
315 return -EOPNOTSUPP;
316
af19b491
AKS
317 if (!is_valid_ether_addr(addr->sa_data))
318 return -EINVAL;
319
8a15ad1f 320 if (test_bit(__QLCNIC_DEV_UP, &adapter->state)) {
af19b491
AKS
321 netif_device_detach(netdev);
322 qlcnic_napi_disable(adapter);
323 }
324
325 memcpy(adapter->mac_addr, addr->sa_data, netdev->addr_len);
326 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
327 qlcnic_set_multi(adapter->netdev);
328
8a15ad1f 329 if (test_bit(__QLCNIC_DEV_UP, &adapter->state)) {
af19b491
AKS
330 netif_device_attach(netdev);
331 qlcnic_napi_enable(adapter);
332 }
333 return 0;
334}
335
d5790663
AKS
336static void qlcnic_vlan_rx_register(struct net_device *netdev,
337 struct vlan_group *grp)
338{
339 struct qlcnic_adapter *adapter = netdev_priv(netdev);
340 adapter->vlgrp = grp;
341}
342
af19b491
AKS
343static const struct net_device_ops qlcnic_netdev_ops = {
344 .ndo_open = qlcnic_open,
345 .ndo_stop = qlcnic_close,
346 .ndo_start_xmit = qlcnic_xmit_frame,
347 .ndo_get_stats = qlcnic_get_stats,
348 .ndo_validate_addr = eth_validate_addr,
349 .ndo_set_multicast_list = qlcnic_set_multi,
350 .ndo_set_mac_address = qlcnic_set_mac,
351 .ndo_change_mtu = qlcnic_change_mtu,
352 .ndo_tx_timeout = qlcnic_tx_timeout,
d5790663 353 .ndo_vlan_rx_register = qlcnic_vlan_rx_register,
af19b491
AKS
354#ifdef CONFIG_NET_POLL_CONTROLLER
355 .ndo_poll_controller = qlcnic_poll_controller,
356#endif
357};
358
2e9d722d 359static struct qlcnic_nic_template qlcnic_ops = {
2e9d722d
AC
360 .config_bridged_mode = qlcnic_config_bridged_mode,
361 .config_led = qlcnic_config_led,
9f26f547
AC
362 .start_firmware = qlcnic_start_firmware
363};
364
365static struct qlcnic_nic_template qlcnic_vf_ops = {
9f26f547
AC
366 .config_bridged_mode = qlcnicvf_config_bridged_mode,
367 .config_led = qlcnicvf_config_led,
9f26f547 368 .start_firmware = qlcnicvf_start_firmware
2e9d722d
AC
369};
370
af19b491
AKS
371static void
372qlcnic_setup_intr(struct qlcnic_adapter *adapter)
373{
374 const struct qlcnic_legacy_intr_set *legacy_intrp;
375 struct pci_dev *pdev = adapter->pdev;
376 int err, num_msix;
377
378 if (adapter->rss_supported) {
379 num_msix = (num_online_cpus() >= MSIX_ENTRIES_PER_ADAPTER) ?
380 MSIX_ENTRIES_PER_ADAPTER : 2;
381 } else
382 num_msix = 1;
383
384 adapter->max_sds_rings = 1;
385
386 adapter->flags &= ~(QLCNIC_MSI_ENABLED | QLCNIC_MSIX_ENABLED);
387
388 legacy_intrp = &legacy_intr[adapter->ahw.pci_func];
389
390 adapter->int_vec_bit = legacy_intrp->int_vec_bit;
391 adapter->tgt_status_reg = qlcnic_get_ioaddr(adapter,
392 legacy_intrp->tgt_status_reg);
393 adapter->tgt_mask_reg = qlcnic_get_ioaddr(adapter,
394 legacy_intrp->tgt_mask_reg);
395 adapter->isr_int_vec = qlcnic_get_ioaddr(adapter, ISR_INT_VECTOR);
396
397 adapter->crb_int_state_reg = qlcnic_get_ioaddr(adapter,
398 ISR_INT_STATE_REG);
399
400 qlcnic_set_msix_bit(pdev, 0);
401
402 if (adapter->msix_supported) {
403
404 qlcnic_init_msix_entries(adapter, num_msix);
405 err = pci_enable_msix(pdev, adapter->msix_entries, num_msix);
406 if (err == 0) {
407 adapter->flags |= QLCNIC_MSIX_ENABLED;
408 qlcnic_set_msix_bit(pdev, 1);
409
410 if (adapter->rss_supported)
411 adapter->max_sds_rings = num_msix;
412
413 dev_info(&pdev->dev, "using msi-x interrupts\n");
414 return;
415 }
416
417 if (err > 0)
418 pci_disable_msix(pdev);
419
420 /* fall through for msi */
421 }
422
423 if (use_msi && !pci_enable_msi(pdev)) {
424 adapter->flags |= QLCNIC_MSI_ENABLED;
425 adapter->tgt_status_reg = qlcnic_get_ioaddr(adapter,
426 msi_tgt_status[adapter->ahw.pci_func]);
427 dev_info(&pdev->dev, "using msi interrupts\n");
428 adapter->msix_entries[0].vector = pdev->irq;
429 return;
430 }
431
432 dev_info(&pdev->dev, "using legacy interrupts\n");
433 adapter->msix_entries[0].vector = pdev->irq;
434}
435
436static void
437qlcnic_teardown_intr(struct qlcnic_adapter *adapter)
438{
439 if (adapter->flags & QLCNIC_MSIX_ENABLED)
440 pci_disable_msix(adapter->pdev);
441 if (adapter->flags & QLCNIC_MSI_ENABLED)
442 pci_disable_msi(adapter->pdev);
443}
444
445static void
446qlcnic_cleanup_pci_map(struct qlcnic_adapter *adapter)
447{
448 if (adapter->ahw.pci_base0 != NULL)
449 iounmap(adapter->ahw.pci_base0);
450}
451
346fe763
RB
452static int
453qlcnic_init_pci_info(struct qlcnic_adapter *adapter)
454{
e88db3bd 455 struct qlcnic_pci_info *pci_info;
900853a4 456 int i, ret = 0;
346fe763
RB
457 u8 pfn;
458
e88db3bd
DC
459 pci_info = kcalloc(QLCNIC_MAX_PCI_FUNC, sizeof(*pci_info), GFP_KERNEL);
460 if (!pci_info)
461 return -ENOMEM;
462
ca315ac2 463 adapter->npars = kzalloc(sizeof(struct qlcnic_npar_info) *
346fe763 464 QLCNIC_MAX_PCI_FUNC, GFP_KERNEL);
e88db3bd 465 if (!adapter->npars) {
900853a4 466 ret = -ENOMEM;
e88db3bd
DC
467 goto err_pci_info;
468 }
346fe763 469
ca315ac2 470 adapter->eswitch = kzalloc(sizeof(struct qlcnic_eswitch) *
346fe763
RB
471 QLCNIC_NIU_MAX_XG_PORTS, GFP_KERNEL);
472 if (!adapter->eswitch) {
900853a4 473 ret = -ENOMEM;
ca315ac2 474 goto err_npars;
346fe763
RB
475 }
476
477 ret = qlcnic_get_pci_info(adapter, pci_info);
ca315ac2
DC
478 if (ret)
479 goto err_eswitch;
346fe763 480
ca315ac2
DC
481 for (i = 0; i < QLCNIC_MAX_PCI_FUNC; i++) {
482 pfn = pci_info[i].id;
483 if (pfn > QLCNIC_MAX_PCI_FUNC)
484 return QL_STATUS_INVALID_PARAM;
a1c0c459
SC
485 adapter->npars[pfn].active = (u8)pci_info[i].active;
486 adapter->npars[pfn].type = (u8)pci_info[i].type;
487 adapter->npars[pfn].phy_port = (u8)pci_info[i].default_port;
ca315ac2
DC
488 adapter->npars[pfn].min_bw = pci_info[i].tx_min_bw;
489 adapter->npars[pfn].max_bw = pci_info[i].tx_max_bw;
346fe763
RB
490 }
491
ca315ac2
DC
492 for (i = 0; i < QLCNIC_NIU_MAX_XG_PORTS; i++)
493 adapter->eswitch[i].flags |= QLCNIC_SWITCH_ENABLE;
494
e88db3bd 495 kfree(pci_info);
ca315ac2
DC
496 return 0;
497
498err_eswitch:
346fe763
RB
499 kfree(adapter->eswitch);
500 adapter->eswitch = NULL;
ca315ac2 501err_npars:
346fe763 502 kfree(adapter->npars);
ca315ac2 503 adapter->npars = NULL;
e88db3bd
DC
504err_pci_info:
505 kfree(pci_info);
346fe763
RB
506
507 return ret;
508}
509
2e9d722d
AC
510static int
511qlcnic_set_function_modes(struct qlcnic_adapter *adapter)
512{
513 u8 id;
514 u32 ref_count;
515 int i, ret = 1;
516 u32 data = QLCNIC_MGMT_FUNC;
517 void __iomem *priv_op = adapter->ahw.pci_base0 + QLCNIC_DRV_OP_MODE;
518
519 /* If other drivers are not in use set their privilege level */
31018e06 520 ref_count = QLCRD32(adapter, QLCNIC_CRB_DRV_ACTIVE);
2e9d722d
AC
521 ret = qlcnic_api_lock(adapter);
522 if (ret)
523 goto err_lock;
2e9d722d 524
0e33c664
AC
525 if (qlcnic_config_npars) {
526 for (i = 0; i < QLCNIC_MAX_PCI_FUNC; i++) {
346fe763 527 id = i;
0e33c664
AC
528 if (adapter->npars[i].type != QLCNIC_TYPE_NIC ||
529 id == adapter->ahw.pci_func)
530 continue;
531 data |= (qlcnic_config_npars &
532 QLC_DEV_SET_DRV(0xf, id));
533 }
534 } else {
535 data = readl(priv_op);
536 data = (data & ~QLC_DEV_SET_DRV(0xf, adapter->ahw.pci_func)) |
537 (QLC_DEV_SET_DRV(QLCNIC_MGMT_FUNC,
538 adapter->ahw.pci_func));
2e9d722d
AC
539 }
540 writel(data, priv_op);
2e9d722d
AC
541 qlcnic_api_unlock(adapter);
542err_lock:
543 return ret;
544}
545
0866d96d
AC
546static void
547qlcnic_check_vf(struct qlcnic_adapter *adapter)
2e9d722d
AC
548{
549 void __iomem *msix_base_addr;
550 void __iomem *priv_op;
551 u32 func;
552 u32 msix_base;
553 u32 op_mode, priv_level;
554
555 /* Determine FW API version */
556 adapter->fw_hal_version = readl(adapter->ahw.pci_base0 + QLCNIC_FW_API);
2e9d722d
AC
557
558 /* Find PCI function number */
559 pci_read_config_dword(adapter->pdev, QLCNIC_MSIX_TABLE_OFFSET, &func);
560 msix_base_addr = adapter->ahw.pci_base0 + QLCNIC_MSIX_BASE;
561 msix_base = readl(msix_base_addr);
562 func = (func - msix_base)/QLCNIC_MSIX_TBL_PGSIZE;
563 adapter->ahw.pci_func = func;
564
565 /* Determine function privilege level */
566 priv_op = adapter->ahw.pci_base0 + QLCNIC_DRV_OP_MODE;
567 op_mode = readl(priv_op);
0e33c664 568 if (op_mode == QLC_DEV_DRV_DEFAULT)
2e9d722d 569 priv_level = QLCNIC_MGMT_FUNC;
0e33c664 570 else
2e9d722d
AC
571 priv_level = QLC_DEV_GET_DRV(op_mode, adapter->ahw.pci_func);
572
0866d96d 573 if (priv_level == QLCNIC_NON_PRIV_FUNC) {
9f26f547
AC
574 adapter->op_mode = QLCNIC_NON_PRIV_FUNC;
575 dev_info(&adapter->pdev->dev,
576 "HAL Version: %d Non Privileged function\n",
577 adapter->fw_hal_version);
578 adapter->nic_ops = &qlcnic_vf_ops;
0866d96d
AC
579 } else
580 adapter->nic_ops = &qlcnic_ops;
2e9d722d
AC
581}
582
af19b491
AKS
583static int
584qlcnic_setup_pci_map(struct qlcnic_adapter *adapter)
585{
586 void __iomem *mem_ptr0 = NULL;
587 resource_size_t mem_base;
588 unsigned long mem_len, pci_len0 = 0;
589
590 struct pci_dev *pdev = adapter->pdev;
af19b491 591
af19b491
AKS
592 /* remap phys address */
593 mem_base = pci_resource_start(pdev, 0); /* 0 is for BAR 0 */
594 mem_len = pci_resource_len(pdev, 0);
595
596 if (mem_len == QLCNIC_PCI_2MB_SIZE) {
597
598 mem_ptr0 = pci_ioremap_bar(pdev, 0);
599 if (mem_ptr0 == NULL) {
600 dev_err(&pdev->dev, "failed to map PCI bar 0\n");
601 return -EIO;
602 }
603 pci_len0 = mem_len;
604 } else {
605 return -EIO;
606 }
607
608 dev_info(&pdev->dev, "%dMB memory map\n", (int)(mem_len>>20));
609
610 adapter->ahw.pci_base0 = mem_ptr0;
611 adapter->ahw.pci_len0 = pci_len0;
612
0866d96d 613 qlcnic_check_vf(adapter);
2e9d722d 614
af19b491 615 adapter->ahw.ocm_win_crb = qlcnic_get_ioaddr(adapter,
2e9d722d 616 QLCNIC_PCIX_PS_REG(PCIX_OCM_WINDOW_REG(adapter->ahw.pci_func)));
af19b491
AKS
617
618 return 0;
619}
620
621static void get_brd_name(struct qlcnic_adapter *adapter, char *name)
622{
623 struct pci_dev *pdev = adapter->pdev;
624 int i, found = 0;
625
626 for (i = 0; i < NUM_SUPPORTED_BOARDS; ++i) {
627 if (qlcnic_boards[i].vendor == pdev->vendor &&
628 qlcnic_boards[i].device == pdev->device &&
629 qlcnic_boards[i].sub_vendor == pdev->subsystem_vendor &&
630 qlcnic_boards[i].sub_device == pdev->subsystem_device) {
02f6e46f
SC
631 sprintf(name, "%pM: %s" ,
632 adapter->mac_addr,
633 qlcnic_boards[i].short_name);
af19b491
AKS
634 found = 1;
635 break;
636 }
637
638 }
639
640 if (!found)
7f9a0c34 641 sprintf(name, "%pM Gigabit Ethernet", adapter->mac_addr);
af19b491
AKS
642}
643
644static void
645qlcnic_check_options(struct qlcnic_adapter *adapter)
646{
647 u32 fw_major, fw_minor, fw_build;
af19b491 648 struct pci_dev *pdev = adapter->pdev;
af19b491
AKS
649
650 fw_major = QLCRD32(adapter, QLCNIC_FW_VERSION_MAJOR);
651 fw_minor = QLCRD32(adapter, QLCNIC_FW_VERSION_MINOR);
652 fw_build = QLCRD32(adapter, QLCNIC_FW_VERSION_SUB);
653
654 adapter->fw_version = QLCNIC_VERSION_CODE(fw_major, fw_minor, fw_build);
655
251a84c9
AKS
656 dev_info(&pdev->dev, "firmware v%d.%d.%d\n",
657 fw_major, fw_minor, fw_build);
af19b491 658
af19b491
AKS
659 adapter->flags &= ~QLCNIC_LRO_ENABLED;
660
661 if (adapter->ahw.port_type == QLCNIC_XGBE) {
662 adapter->num_rxd = DEFAULT_RCV_DESCRIPTORS_10G;
663 adapter->num_jumbo_rxd = MAX_JUMBO_RCV_DESCRIPTORS_10G;
664 } else if (adapter->ahw.port_type == QLCNIC_GBE) {
665 adapter->num_rxd = DEFAULT_RCV_DESCRIPTORS_1G;
666 adapter->num_jumbo_rxd = MAX_JUMBO_RCV_DESCRIPTORS_1G;
667 }
668
669 adapter->msix_supported = !!use_msi_x;
670 adapter->rss_supported = !!use_msi_x;
671
672 adapter->num_txd = MAX_CMD_DESCRIPTORS;
673
251b036a 674 adapter->max_rds_rings = MAX_RDS_RINGS;
af19b491
AKS
675}
676
174240a8
RB
677static int
678qlcnic_initialize_nic(struct qlcnic_adapter *adapter)
679{
680 int err;
681 struct qlcnic_info nic_info;
682
683 err = qlcnic_get_nic_info(adapter, &nic_info, adapter->ahw.pci_func);
684 if (err)
685 return err;
686
a1c0c459 687 adapter->physical_port = (u8)nic_info.phys_port;
174240a8
RB
688 adapter->switch_mode = nic_info.switch_mode;
689 adapter->max_tx_ques = nic_info.max_tx_ques;
690 adapter->max_rx_ques = nic_info.max_rx_ques;
691 adapter->capabilities = nic_info.capabilities;
692 adapter->max_mac_filters = nic_info.max_mac_filters;
693 adapter->max_mtu = nic_info.max_mtu;
694
695 if (adapter->capabilities & BIT_6)
696 adapter->flags |= QLCNIC_ESWITCH_ENABLED;
697 else
698 adapter->flags &= ~QLCNIC_ESWITCH_ENABLED;
699
700 return err;
701}
702
8cf61f89
AKS
703static void
704qlcnic_set_vlan_config(struct qlcnic_adapter *adapter,
705 struct qlcnic_esw_func_cfg *esw_cfg)
706{
707 if (esw_cfg->discard_tagged)
708 adapter->flags &= ~QLCNIC_TAGGING_ENABLED;
709 else
710 adapter->flags |= QLCNIC_TAGGING_ENABLED;
711
712 if (esw_cfg->vlan_id)
713 adapter->pvid = esw_cfg->vlan_id;
714 else
715 adapter->pvid = 0;
716}
717
0325d69b
RB
718static void
719qlcnic_set_eswitch_port_features(struct qlcnic_adapter *adapter,
720 struct qlcnic_esw_func_cfg *esw_cfg)
721{
fe4d434d 722 adapter->flags &= ~QLCNIC_MACSPOOF;
7373373d 723 adapter->flags &= ~QLCNIC_MAC_OVERRIDE_DISABLED;
7613c87b
RB
724
725 if (esw_cfg->mac_anti_spoof)
726 adapter->flags |= QLCNIC_MACSPOOF;
fe4d434d 727
7373373d
RB
728 if (!esw_cfg->mac_override)
729 adapter->flags |= QLCNIC_MAC_OVERRIDE_DISABLED;
730
0325d69b
RB
731 qlcnic_set_netdev_features(adapter, esw_cfg);
732}
733
734static int
735qlcnic_set_eswitch_port_config(struct qlcnic_adapter *adapter)
736{
737 struct qlcnic_esw_func_cfg esw_cfg;
738
739 if (!(adapter->flags & QLCNIC_ESWITCH_ENABLED))
740 return 0;
741
742 esw_cfg.pci_func = adapter->ahw.pci_func;
743 if (qlcnic_get_eswitch_port_config(adapter, &esw_cfg))
744 return -EIO;
8cf61f89 745 qlcnic_set_vlan_config(adapter, &esw_cfg);
0325d69b
RB
746 qlcnic_set_eswitch_port_features(adapter, &esw_cfg);
747
748 return 0;
749}
750
751static void
752qlcnic_set_netdev_features(struct qlcnic_adapter *adapter,
753 struct qlcnic_esw_func_cfg *esw_cfg)
754{
755 struct net_device *netdev = adapter->netdev;
756 unsigned long features, vlan_features;
757
758 features = (NETIF_F_SG | NETIF_F_IP_CSUM |
759 NETIF_F_IPV6_CSUM | NETIF_F_GRO);
760 vlan_features = (NETIF_F_SG | NETIF_F_IP_CSUM |
761 NETIF_F_IPV6_CSUM);
762
763 if (adapter->capabilities & QLCNIC_FW_CAPABILITY_TSO) {
764 features |= (NETIF_F_TSO | NETIF_F_TSO6);
765 vlan_features |= (NETIF_F_TSO | NETIF_F_TSO6);
766 }
767 if (adapter->capabilities & QLCNIC_FW_CAPABILITY_HW_LRO)
768 features |= NETIF_F_LRO;
769
770 if (esw_cfg->offload_flags & BIT_0) {
771 netdev->features |= features;
772 adapter->rx_csum = 1;
773 if (!(esw_cfg->offload_flags & BIT_1))
774 netdev->features &= ~NETIF_F_TSO;
775 if (!(esw_cfg->offload_flags & BIT_2))
776 netdev->features &= ~NETIF_F_TSO6;
777 } else {
778 netdev->features &= ~features;
779 adapter->rx_csum = 0;
780 }
781
782 netdev->vlan_features = (features & vlan_features);
783}
784
0866d96d
AC
785static int
786qlcnic_check_eswitch_mode(struct qlcnic_adapter *adapter)
787{
788 void __iomem *priv_op;
789 u32 op_mode, priv_level;
790 int err = 0;
791
174240a8
RB
792 err = qlcnic_initialize_nic(adapter);
793 if (err)
794 return err;
795
0866d96d
AC
796 if (adapter->flags & QLCNIC_ADAPTER_INITIALIZED)
797 return 0;
798
799 priv_op = adapter->ahw.pci_base0 + QLCNIC_DRV_OP_MODE;
800 op_mode = readl(priv_op);
801 priv_level = QLC_DEV_GET_DRV(op_mode, adapter->ahw.pci_func);
802
803 if (op_mode == QLC_DEV_DRV_DEFAULT)
804 priv_level = QLCNIC_MGMT_FUNC;
805 else
806 priv_level = QLC_DEV_GET_DRV(op_mode, adapter->ahw.pci_func);
807
174240a8 808 if (adapter->flags & QLCNIC_ESWITCH_ENABLED) {
0866d96d
AC
809 if (priv_level == QLCNIC_MGMT_FUNC) {
810 adapter->op_mode = QLCNIC_MGMT_FUNC;
811 err = qlcnic_init_pci_info(adapter);
812 if (err)
813 return err;
814 /* Set privilege level for other functions */
815 qlcnic_set_function_modes(adapter);
816 dev_info(&adapter->pdev->dev,
817 "HAL Version: %d, Management function\n",
818 adapter->fw_hal_version);
819 } else if (priv_level == QLCNIC_PRIV_FUNC) {
820 adapter->op_mode = QLCNIC_PRIV_FUNC;
821 dev_info(&adapter->pdev->dev,
822 "HAL Version: %d, Privileged function\n",
823 adapter->fw_hal_version);
824 }
174240a8 825 }
0866d96d
AC
826
827 adapter->flags |= QLCNIC_ADAPTER_INITIALIZED;
828
829 return err;
830}
831
0325d69b
RB
832static int
833qlcnic_set_default_offload_settings(struct qlcnic_adapter *adapter)
834{
835 struct qlcnic_esw_func_cfg esw_cfg;
836 struct qlcnic_npar_info *npar;
837 u8 i;
838
174240a8 839 if (adapter->need_fw_reset)
0325d69b
RB
840 return 0;
841
842 for (i = 0; i < QLCNIC_MAX_PCI_FUNC; i++) {
843 if (adapter->npars[i].type != QLCNIC_TYPE_NIC)
844 continue;
845 memset(&esw_cfg, 0, sizeof(struct qlcnic_esw_func_cfg));
846 esw_cfg.pci_func = i;
847 esw_cfg.offload_flags = BIT_0;
7373373d 848 esw_cfg.mac_override = BIT_0;
0325d69b
RB
849 if (adapter->capabilities & QLCNIC_FW_CAPABILITY_TSO)
850 esw_cfg.offload_flags |= (BIT_1 | BIT_2);
851 if (qlcnic_config_switch_port(adapter, &esw_cfg))
852 return -EIO;
853 npar = &adapter->npars[i];
854 npar->pvid = esw_cfg.vlan_id;
7373373d 855 npar->mac_override = esw_cfg.mac_override;
0325d69b
RB
856 npar->mac_anti_spoof = esw_cfg.mac_anti_spoof;
857 npar->discard_tagged = esw_cfg.discard_tagged;
858 npar->promisc_mode = esw_cfg.promisc_mode;
859 npar->offload_flags = esw_cfg.offload_flags;
860 }
861
862 return 0;
863}
864
4e8acb01
RB
865static int
866qlcnic_reset_eswitch_config(struct qlcnic_adapter *adapter,
867 struct qlcnic_npar_info *npar, int pci_func)
868{
869 struct qlcnic_esw_func_cfg esw_cfg;
870 esw_cfg.op_mode = QLCNIC_PORT_DEFAULTS;
871 esw_cfg.pci_func = pci_func;
872 esw_cfg.vlan_id = npar->pvid;
7373373d 873 esw_cfg.mac_override = npar->mac_override;
4e8acb01
RB
874 esw_cfg.discard_tagged = npar->discard_tagged;
875 esw_cfg.mac_anti_spoof = npar->mac_anti_spoof;
876 esw_cfg.offload_flags = npar->offload_flags;
877 esw_cfg.promisc_mode = npar->promisc_mode;
878 if (qlcnic_config_switch_port(adapter, &esw_cfg))
879 return -EIO;
880
881 esw_cfg.op_mode = QLCNIC_ADD_VLAN;
882 if (qlcnic_config_switch_port(adapter, &esw_cfg))
883 return -EIO;
884
885 return 0;
886}
887
cea8975e
AC
888static int
889qlcnic_reset_npar_config(struct qlcnic_adapter *adapter)
890{
4e8acb01 891 int i, err;
cea8975e
AC
892 struct qlcnic_npar_info *npar;
893 struct qlcnic_info nic_info;
894
174240a8 895 if (!adapter->need_fw_reset)
cea8975e
AC
896 return 0;
897
4e8acb01
RB
898 /* Set the NPAR config data after FW reset */
899 for (i = 0; i < QLCNIC_MAX_PCI_FUNC; i++) {
900 npar = &adapter->npars[i];
901 if (npar->type != QLCNIC_TYPE_NIC)
902 continue;
903 err = qlcnic_get_nic_info(adapter, &nic_info, i);
904 if (err)
905 return err;
906 nic_info.min_tx_bw = npar->min_bw;
907 nic_info.max_tx_bw = npar->max_bw;
908 err = qlcnic_set_nic_info(adapter, &nic_info);
909 if (err)
910 return err;
cea8975e 911
4e8acb01
RB
912 if (npar->enable_pm) {
913 err = qlcnic_config_port_mirroring(adapter,
914 npar->dest_npar, 1, i);
915 if (err)
916 return err;
cea8975e 917 }
4e8acb01
RB
918 err = qlcnic_reset_eswitch_config(adapter, npar, i);
919 if (err)
920 return err;
cea8975e 921 }
4e8acb01 922 return 0;
cea8975e
AC
923}
924
78f84e1a
AKS
925static int qlcnic_check_npar_opertional(struct qlcnic_adapter *adapter)
926{
927 u8 npar_opt_timeo = QLCNIC_DEV_NPAR_OPER_TIMEO;
928 u32 npar_state;
929
930 if (adapter->op_mode == QLCNIC_MGMT_FUNC)
931 return 0;
932
933 npar_state = QLCRD32(adapter, QLCNIC_CRB_DEV_NPAR_STATE);
934 while (npar_state != QLCNIC_DEV_NPAR_OPER && --npar_opt_timeo) {
935 msleep(1000);
936 npar_state = QLCRD32(adapter, QLCNIC_CRB_DEV_NPAR_STATE);
937 }
938 if (!npar_opt_timeo) {
939 dev_err(&adapter->pdev->dev,
940 "Waiting for NPAR state to opertional timeout\n");
941 return -EIO;
942 }
943 return 0;
944}
945
174240a8
RB
946static int
947qlcnic_set_mgmt_operations(struct qlcnic_adapter *adapter)
948{
949 int err;
950
951 if (!(adapter->flags & QLCNIC_ESWITCH_ENABLED) ||
952 adapter->op_mode != QLCNIC_MGMT_FUNC)
953 return 0;
954
955 err = qlcnic_set_default_offload_settings(adapter);
956 if (err)
957 return err;
958
959 err = qlcnic_reset_npar_config(adapter);
960 if (err)
961 return err;
962
963 qlcnic_dev_set_npar_ready(adapter);
964
965 return err;
966}
967
af19b491
AKS
968static int
969qlcnic_start_firmware(struct qlcnic_adapter *adapter)
970{
d4066833 971 int err;
af19b491 972
aa5e18c0
SC
973 err = qlcnic_can_start_firmware(adapter);
974 if (err < 0)
975 return err;
976 else if (!err)
d4066833 977 goto check_fw_status;
af19b491 978
4d5bdb38
AKS
979 if (load_fw_file)
980 qlcnic_request_firmware(adapter);
8f891387 981 else {
8cfdce08
SC
982 err = qlcnic_check_flash_fw_ver(adapter);
983 if (err)
8f891387 984 goto err_out;
985
4d5bdb38 986 adapter->fw_type = QLCNIC_FLASH_ROMIMAGE;
8f891387 987 }
af19b491
AKS
988
989 err = qlcnic_need_fw_reset(adapter);
af19b491 990 if (err == 0)
4e70812b 991 goto check_fw_status;
af19b491 992
d4066833
SC
993 err = qlcnic_pinit_from_rom(adapter);
994 if (err)
995 goto err_out;
af19b491
AKS
996
997 err = qlcnic_load_firmware(adapter);
998 if (err)
999 goto err_out;
1000
1001 qlcnic_release_firmware(adapter);
d4066833 1002 QLCWR32(adapter, CRB_DRIVER_VERSION, QLCNIC_DRIVER_VERSION);
af19b491 1003
d4066833
SC
1004check_fw_status:
1005 err = qlcnic_check_fw_status(adapter);
af19b491
AKS
1006 if (err)
1007 goto err_out;
1008
1009 QLCWR32(adapter, QLCNIC_CRB_DEV_STATE, QLCNIC_DEV_READY);
6df900e9 1010 qlcnic_idc_debug_info(adapter, 1);
b18971d1 1011
0866d96d
AC
1012 err = qlcnic_check_eswitch_mode(adapter);
1013 if (err) {
1014 dev_err(&adapter->pdev->dev,
1015 "Memory allocation failed for eswitch\n");
1016 goto err_out;
1017 }
174240a8
RB
1018 err = qlcnic_set_mgmt_operations(adapter);
1019 if (err)
1020 goto err_out;
1021
1022 qlcnic_check_options(adapter);
af19b491
AKS
1023 adapter->need_fw_reset = 0;
1024
a7fc948f
AKS
1025 qlcnic_release_firmware(adapter);
1026 return 0;
af19b491
AKS
1027
1028err_out:
a7fc948f
AKS
1029 QLCWR32(adapter, QLCNIC_CRB_DEV_STATE, QLCNIC_DEV_FAILED);
1030 dev_err(&adapter->pdev->dev, "Device state set to failed\n");
0866d96d 1031
af19b491
AKS
1032 qlcnic_release_firmware(adapter);
1033 return err;
1034}
1035
1036static int
1037qlcnic_request_irq(struct qlcnic_adapter *adapter)
1038{
1039 irq_handler_t handler;
1040 struct qlcnic_host_sds_ring *sds_ring;
1041 int err, ring;
1042
1043 unsigned long flags = 0;
1044 struct net_device *netdev = adapter->netdev;
1045 struct qlcnic_recv_context *recv_ctx = &adapter->recv_ctx;
1046
7eb9855d
AKS
1047 if (adapter->diag_test == QLCNIC_INTERRUPT_TEST) {
1048 handler = qlcnic_tmp_intr;
1049 if (!QLCNIC_IS_MSI_FAMILY(adapter))
1050 flags |= IRQF_SHARED;
1051
1052 } else {
1053 if (adapter->flags & QLCNIC_MSIX_ENABLED)
1054 handler = qlcnic_msix_intr;
1055 else if (adapter->flags & QLCNIC_MSI_ENABLED)
1056 handler = qlcnic_msi_intr;
1057 else {
1058 flags |= IRQF_SHARED;
1059 handler = qlcnic_intr;
1060 }
af19b491
AKS
1061 }
1062 adapter->irq = netdev->irq;
1063
1064 for (ring = 0; ring < adapter->max_sds_rings; ring++) {
1065 sds_ring = &recv_ctx->sds_rings[ring];
1066 sprintf(sds_ring->name, "%s[%d]", netdev->name, ring);
1067 err = request_irq(sds_ring->irq, handler,
1068 flags, sds_ring->name, sds_ring);
1069 if (err)
1070 return err;
1071 }
1072
1073 return 0;
1074}
1075
1076static void
1077qlcnic_free_irq(struct qlcnic_adapter *adapter)
1078{
1079 int ring;
1080 struct qlcnic_host_sds_ring *sds_ring;
1081
1082 struct qlcnic_recv_context *recv_ctx = &adapter->recv_ctx;
1083
1084 for (ring = 0; ring < adapter->max_sds_rings; ring++) {
1085 sds_ring = &recv_ctx->sds_rings[ring];
1086 free_irq(sds_ring->irq, sds_ring);
1087 }
1088}
1089
1090static void
1091qlcnic_init_coalesce_defaults(struct qlcnic_adapter *adapter)
1092{
1093 adapter->coal.flags = QLCNIC_INTR_DEFAULT;
1094 adapter->coal.normal.data.rx_time_us =
1095 QLCNIC_DEFAULT_INTR_COALESCE_RX_TIME_US;
1096 adapter->coal.normal.data.rx_packets =
1097 QLCNIC_DEFAULT_INTR_COALESCE_RX_PACKETS;
1098 adapter->coal.normal.data.tx_time_us =
1099 QLCNIC_DEFAULT_INTR_COALESCE_TX_TIME_US;
1100 adapter->coal.normal.data.tx_packets =
1101 QLCNIC_DEFAULT_INTR_COALESCE_TX_PACKETS;
1102}
1103
1104static int
1105__qlcnic_up(struct qlcnic_adapter *adapter, struct net_device *netdev)
1106{
8a15ad1f
AKS
1107 int ring;
1108 struct qlcnic_host_rds_ring *rds_ring;
1109
af19b491
AKS
1110 if (adapter->is_up != QLCNIC_ADAPTER_UP_MAGIC)
1111 return -EIO;
1112
8a15ad1f
AKS
1113 if (test_bit(__QLCNIC_DEV_UP, &adapter->state))
1114 return 0;
0325d69b
RB
1115 if (qlcnic_set_eswitch_port_config(adapter))
1116 return -EIO;
8a15ad1f
AKS
1117
1118 if (qlcnic_fw_create_ctx(adapter))
1119 return -EIO;
1120
1121 for (ring = 0; ring < adapter->max_rds_rings; ring++) {
1122 rds_ring = &adapter->recv_ctx.rds_rings[ring];
1123 qlcnic_post_rx_buffers(adapter, ring, rds_ring);
1124 }
1125
af19b491
AKS
1126 qlcnic_set_multi(netdev);
1127 qlcnic_fw_cmd_set_mtu(adapter, netdev->mtu);
1128
1129 adapter->ahw.linkup = 0;
1130
1131 if (adapter->max_sds_rings > 1)
1132 qlcnic_config_rss(adapter, 1);
1133
1134 qlcnic_config_intr_coalesce(adapter);
1135
24763d80 1136 if (netdev->features & NETIF_F_LRO)
af19b491
AKS
1137 qlcnic_config_hw_lro(adapter, QLCNIC_LRO_ENABLED);
1138
1139 qlcnic_napi_enable(adapter);
1140
1141 qlcnic_linkevent_request(adapter, 1);
1142
68bf1c68 1143 adapter->reset_context = 0;
af19b491
AKS
1144 set_bit(__QLCNIC_DEV_UP, &adapter->state);
1145 return 0;
1146}
1147
1148/* Usage: During resume and firmware recovery module.*/
1149
1150static int
1151qlcnic_up(struct qlcnic_adapter *adapter, struct net_device *netdev)
1152{
1153 int err = 0;
1154
1155 rtnl_lock();
1156 if (netif_running(netdev))
1157 err = __qlcnic_up(adapter, netdev);
1158 rtnl_unlock();
1159
1160 return err;
1161}
1162
1163static void
1164__qlcnic_down(struct qlcnic_adapter *adapter, struct net_device *netdev)
1165{
1166 if (adapter->is_up != QLCNIC_ADAPTER_UP_MAGIC)
1167 return;
1168
1169 if (!test_and_clear_bit(__QLCNIC_DEV_UP, &adapter->state))
1170 return;
1171
1172 smp_mb();
1173 spin_lock(&adapter->tx_clean_lock);
1174 netif_carrier_off(netdev);
1175 netif_tx_disable(netdev);
1176
1177 qlcnic_free_mac_list(adapter);
1178
b5e5492c
AKS
1179 if (adapter->fhash.fnum)
1180 qlcnic_delete_lb_filters(adapter);
1181
af19b491
AKS
1182 qlcnic_nic_set_promisc(adapter, QLCNIC_NIU_NON_PROMISC_MODE);
1183
1184 qlcnic_napi_disable(adapter);
1185
8a15ad1f
AKS
1186 qlcnic_fw_destroy_ctx(adapter);
1187
1188 qlcnic_reset_rx_buffers_list(adapter);
af19b491
AKS
1189 qlcnic_release_tx_buffers(adapter);
1190 spin_unlock(&adapter->tx_clean_lock);
1191}
1192
1193/* Usage: During suspend and firmware recovery module */
1194
1195static void
1196qlcnic_down(struct qlcnic_adapter *adapter, struct net_device *netdev)
1197{
1198 rtnl_lock();
1199 if (netif_running(netdev))
1200 __qlcnic_down(adapter, netdev);
1201 rtnl_unlock();
1202
1203}
1204
1205static int
1206qlcnic_attach(struct qlcnic_adapter *adapter)
1207{
1208 struct net_device *netdev = adapter->netdev;
1209 struct pci_dev *pdev = adapter->pdev;
8a15ad1f 1210 int err;
af19b491
AKS
1211
1212 if (adapter->is_up == QLCNIC_ADAPTER_UP_MAGIC)
1213 return 0;
1214
af19b491
AKS
1215 err = qlcnic_napi_add(adapter, netdev);
1216 if (err)
1217 return err;
1218
1219 err = qlcnic_alloc_sw_resources(adapter);
1220 if (err) {
1221 dev_err(&pdev->dev, "Error in setting sw resources\n");
8a15ad1f 1222 goto err_out_napi_del;
af19b491
AKS
1223 }
1224
1225 err = qlcnic_alloc_hw_resources(adapter);
1226 if (err) {
1227 dev_err(&pdev->dev, "Error in setting hw resources\n");
1228 goto err_out_free_sw;
1229 }
1230
af19b491
AKS
1231 err = qlcnic_request_irq(adapter);
1232 if (err) {
1233 dev_err(&pdev->dev, "failed to setup interrupt\n");
8a15ad1f 1234 goto err_out_free_hw;
af19b491
AKS
1235 }
1236
1237 qlcnic_init_coalesce_defaults(adapter);
1238
1239 qlcnic_create_sysfs_entries(adapter);
1240
1241 adapter->is_up = QLCNIC_ADAPTER_UP_MAGIC;
1242 return 0;
1243
8a15ad1f 1244err_out_free_hw:
af19b491
AKS
1245 qlcnic_free_hw_resources(adapter);
1246err_out_free_sw:
1247 qlcnic_free_sw_resources(adapter);
8a15ad1f
AKS
1248err_out_napi_del:
1249 qlcnic_napi_del(adapter);
af19b491
AKS
1250 return err;
1251}
1252
1253static void
1254qlcnic_detach(struct qlcnic_adapter *adapter)
1255{
1256 if (adapter->is_up != QLCNIC_ADAPTER_UP_MAGIC)
1257 return;
1258
1259 qlcnic_remove_sysfs_entries(adapter);
1260
1261 qlcnic_free_hw_resources(adapter);
1262 qlcnic_release_rx_buffers(adapter);
1263 qlcnic_free_irq(adapter);
1264 qlcnic_napi_del(adapter);
1265 qlcnic_free_sw_resources(adapter);
1266
1267 adapter->is_up = 0;
1268}
1269
7eb9855d
AKS
1270void qlcnic_diag_free_res(struct net_device *netdev, int max_sds_rings)
1271{
1272 struct qlcnic_adapter *adapter = netdev_priv(netdev);
1273 struct qlcnic_host_sds_ring *sds_ring;
1274 int ring;
1275
78ad3892 1276 clear_bit(__QLCNIC_DEV_UP, &adapter->state);
cdaff185
AKS
1277 if (adapter->diag_test == QLCNIC_INTERRUPT_TEST) {
1278 for (ring = 0; ring < adapter->max_sds_rings; ring++) {
1279 sds_ring = &adapter->recv_ctx.sds_rings[ring];
1280 qlcnic_disable_int(sds_ring);
1281 }
7eb9855d
AKS
1282 }
1283
8a15ad1f
AKS
1284 qlcnic_fw_destroy_ctx(adapter);
1285
7eb9855d
AKS
1286 qlcnic_detach(adapter);
1287
1288 adapter->diag_test = 0;
1289 adapter->max_sds_rings = max_sds_rings;
1290
1291 if (qlcnic_attach(adapter))
34ce3626 1292 goto out;
7eb9855d
AKS
1293
1294 if (netif_running(netdev))
1295 __qlcnic_up(adapter, netdev);
34ce3626 1296out:
7eb9855d
AKS
1297 netif_device_attach(netdev);
1298}
1299
1300int qlcnic_diag_alloc_res(struct net_device *netdev, int test)
1301{
1302 struct qlcnic_adapter *adapter = netdev_priv(netdev);
1303 struct qlcnic_host_sds_ring *sds_ring;
8a15ad1f 1304 struct qlcnic_host_rds_ring *rds_ring;
7eb9855d
AKS
1305 int ring;
1306 int ret;
1307
1308 netif_device_detach(netdev);
1309
1310 if (netif_running(netdev))
1311 __qlcnic_down(adapter, netdev);
1312
1313 qlcnic_detach(adapter);
1314
1315 adapter->max_sds_rings = 1;
1316 adapter->diag_test = test;
1317
1318 ret = qlcnic_attach(adapter);
34ce3626
AKS
1319 if (ret) {
1320 netif_device_attach(netdev);
7eb9855d 1321 return ret;
34ce3626 1322 }
7eb9855d 1323
8a15ad1f
AKS
1324 ret = qlcnic_fw_create_ctx(adapter);
1325 if (ret) {
1326 qlcnic_detach(adapter);
57e46248 1327 netif_device_attach(netdev);
8a15ad1f
AKS
1328 return ret;
1329 }
1330
1331 for (ring = 0; ring < adapter->max_rds_rings; ring++) {
1332 rds_ring = &adapter->recv_ctx.rds_rings[ring];
1333 qlcnic_post_rx_buffers(adapter, ring, rds_ring);
1334 }
1335
cdaff185
AKS
1336 if (adapter->diag_test == QLCNIC_INTERRUPT_TEST) {
1337 for (ring = 0; ring < adapter->max_sds_rings; ring++) {
1338 sds_ring = &adapter->recv_ctx.sds_rings[ring];
1339 qlcnic_enable_int(sds_ring);
1340 }
7eb9855d 1341 }
78ad3892 1342 set_bit(__QLCNIC_DEV_UP, &adapter->state);
7eb9855d
AKS
1343
1344 return 0;
1345}
1346
68bf1c68
AKS
1347/* Reset context in hardware only */
1348static int
1349qlcnic_reset_hw_context(struct qlcnic_adapter *adapter)
1350{
1351 struct net_device *netdev = adapter->netdev;
1352
1353 if (test_and_set_bit(__QLCNIC_RESETTING, &adapter->state))
1354 return -EBUSY;
1355
1356 netif_device_detach(netdev);
1357
1358 qlcnic_down(adapter, netdev);
1359
1360 qlcnic_up(adapter, netdev);
1361
1362 netif_device_attach(netdev);
1363
1364 clear_bit(__QLCNIC_RESETTING, &adapter->state);
1365 return 0;
1366}
1367
af19b491
AKS
1368int
1369qlcnic_reset_context(struct qlcnic_adapter *adapter)
1370{
1371 int err = 0;
1372 struct net_device *netdev = adapter->netdev;
1373
1374 if (test_and_set_bit(__QLCNIC_RESETTING, &adapter->state))
1375 return -EBUSY;
1376
1377 if (adapter->is_up == QLCNIC_ADAPTER_UP_MAGIC) {
1378
1379 netif_device_detach(netdev);
1380
1381 if (netif_running(netdev))
1382 __qlcnic_down(adapter, netdev);
1383
1384 qlcnic_detach(adapter);
1385
1386 if (netif_running(netdev)) {
1387 err = qlcnic_attach(adapter);
1388 if (!err)
34ce3626 1389 __qlcnic_up(adapter, netdev);
af19b491
AKS
1390 }
1391
1392 netif_device_attach(netdev);
1393 }
1394
af19b491
AKS
1395 clear_bit(__QLCNIC_RESETTING, &adapter->state);
1396 return err;
1397}
1398
1399static int
1400qlcnic_setup_netdev(struct qlcnic_adapter *adapter,
1bb09fb9 1401 struct net_device *netdev, u8 pci_using_dac)
af19b491
AKS
1402{
1403 int err;
1404 struct pci_dev *pdev = adapter->pdev;
1405
1406 adapter->rx_csum = 1;
1407 adapter->mc_enabled = 0;
1408 adapter->max_mc_count = 38;
1409
1410 netdev->netdev_ops = &qlcnic_netdev_ops;
ef71ff83 1411 netdev->watchdog_timeo = 5*HZ;
af19b491
AKS
1412
1413 qlcnic_change_mtu(netdev, netdev->mtu);
1414
1415 SET_ETHTOOL_OPS(netdev, &qlcnic_ethtool_ops);
1416
2e9d722d 1417 netdev->features |= (NETIF_F_SG | NETIF_F_IP_CSUM |
d5790663 1418 NETIF_F_IPV6_CSUM | NETIF_F_GRO | NETIF_F_HW_VLAN_RX);
2e9d722d 1419 netdev->vlan_features |= (NETIF_F_SG | NETIF_F_IP_CSUM |
ac8d0c4f
AC
1420 NETIF_F_IPV6_CSUM);
1421
1422 if (adapter->capabilities & QLCNIC_FW_CAPABILITY_TSO) {
1423 netdev->features |= (NETIF_F_TSO | NETIF_F_TSO6);
1424 netdev->vlan_features |= (NETIF_F_TSO | NETIF_F_TSO6);
1425 }
af19b491 1426
1bb09fb9 1427 if (pci_using_dac) {
af19b491
AKS
1428 netdev->features |= NETIF_F_HIGHDMA;
1429 netdev->vlan_features |= NETIF_F_HIGHDMA;
1430 }
1431
1432 if (adapter->capabilities & QLCNIC_FW_CAPABILITY_FVLANTX)
1433 netdev->features |= (NETIF_F_HW_VLAN_TX);
1434
1435 if (adapter->capabilities & QLCNIC_FW_CAPABILITY_HW_LRO)
1436 netdev->features |= NETIF_F_LRO;
af19b491
AKS
1437 netdev->irq = adapter->msix_entries[0].vector;
1438
af19b491
AKS
1439 netif_carrier_off(netdev);
1440 netif_stop_queue(netdev);
1441
1442 err = register_netdev(netdev);
1443 if (err) {
1444 dev_err(&pdev->dev, "failed to register net device\n");
1445 return err;
1446 }
1447
1448 return 0;
1449}
1450
1bb09fb9
AKS
1451static int qlcnic_set_dma_mask(struct pci_dev *pdev, u8 *pci_using_dac)
1452{
1453 if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) &&
1454 !pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)))
1455 *pci_using_dac = 1;
1456 else if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(32)) &&
1457 !pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32)))
1458 *pci_using_dac = 0;
1459 else {
1460 dev_err(&pdev->dev, "Unable to set DMA mask, aborting\n");
1461 return -EIO;
1462 }
1463
1464 return 0;
1465}
1466
af19b491
AKS
1467static int __devinit
1468qlcnic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
1469{
1470 struct net_device *netdev = NULL;
1471 struct qlcnic_adapter *adapter = NULL;
1472 int err;
af19b491 1473 uint8_t revision_id;
1bb09fb9 1474 uint8_t pci_using_dac;
da48e6c3 1475 char brd_name[QLCNIC_MAX_BOARD_NAME_LEN];
af19b491
AKS
1476
1477 err = pci_enable_device(pdev);
1478 if (err)
1479 return err;
1480
1481 if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
1482 err = -ENODEV;
1483 goto err_out_disable_pdev;
1484 }
1485
1bb09fb9
AKS
1486 err = qlcnic_set_dma_mask(pdev, &pci_using_dac);
1487 if (err)
1488 goto err_out_disable_pdev;
1489
af19b491
AKS
1490 err = pci_request_regions(pdev, qlcnic_driver_name);
1491 if (err)
1492 goto err_out_disable_pdev;
1493
1494 pci_set_master(pdev);
451724c8 1495 pci_enable_pcie_error_reporting(pdev);
af19b491
AKS
1496
1497 netdev = alloc_etherdev(sizeof(struct qlcnic_adapter));
1498 if (!netdev) {
1499 dev_err(&pdev->dev, "failed to allocate net_device\n");
1500 err = -ENOMEM;
1501 goto err_out_free_res;
1502 }
1503
1504 SET_NETDEV_DEV(netdev, &pdev->dev);
1505
1506 adapter = netdev_priv(netdev);
1507 adapter->netdev = netdev;
1508 adapter->pdev = pdev;
6df900e9 1509 adapter->dev_rst_time = jiffies;
af19b491
AKS
1510
1511 revision_id = pdev->revision;
1512 adapter->ahw.revision_id = revision_id;
1513
1514 rwlock_init(&adapter->ahw.crb_lock);
1515 mutex_init(&adapter->ahw.mem_lock);
1516
1517 spin_lock_init(&adapter->tx_clean_lock);
1518 INIT_LIST_HEAD(&adapter->mac_list);
1519
1520 err = qlcnic_setup_pci_map(adapter);
1521 if (err)
1522 goto err_out_free_netdev;
1523
1524 /* This will be reset for mezz cards */
2e9d722d 1525 adapter->portnum = adapter->ahw.pci_func;
af19b491
AKS
1526
1527 err = qlcnic_get_board_info(adapter);
1528 if (err) {
1529 dev_err(&pdev->dev, "Error getting board config info.\n");
1530 goto err_out_iounmap;
1531 }
1532
8cfdce08
SC
1533 err = qlcnic_setup_idc_param(adapter);
1534 if (err)
b3a24649 1535 goto err_out_iounmap;
af19b491 1536
9f26f547 1537 err = adapter->nic_ops->start_firmware(adapter);
a7fc948f
AKS
1538 if (err) {
1539 dev_err(&pdev->dev, "Loading fw failed.Please Reboot\n");
af19b491 1540 goto err_out_decr_ref;
a7fc948f 1541 }
af19b491 1542
da48e6c3
RB
1543 if (qlcnic_read_mac_addr(adapter))
1544 dev_warn(&pdev->dev, "failed to read mac addr\n");
1545
1546 if (adapter->portnum == 0) {
1547 get_brd_name(adapter, brd_name);
1548
1549 pr_info("%s: %s Board Chip rev 0x%x\n",
1550 module_name(THIS_MODULE),
1551 brd_name, adapter->ahw.revision_id);
1552 }
1553
af19b491
AKS
1554 qlcnic_clear_stats(adapter);
1555
1556 qlcnic_setup_intr(adapter);
1557
1bb09fb9 1558 err = qlcnic_setup_netdev(adapter, netdev, pci_using_dac);
af19b491
AKS
1559 if (err)
1560 goto err_out_disable_msi;
1561
1562 pci_set_drvdata(pdev, adapter);
1563
1564 qlcnic_schedule_work(adapter, qlcnic_fw_poll_work, FW_POLL_DELAY);
1565
1566 switch (adapter->ahw.port_type) {
1567 case QLCNIC_GBE:
1568 dev_info(&adapter->pdev->dev, "%s: GbE port initialized\n",
1569 adapter->netdev->name);
1570 break;
1571 case QLCNIC_XGBE:
1572 dev_info(&adapter->pdev->dev, "%s: XGbE port initialized\n",
1573 adapter->netdev->name);
1574 break;
1575 }
1576
b5e5492c 1577 qlcnic_alloc_lb_filters_mem(adapter);
af19b491
AKS
1578 qlcnic_create_diag_entries(adapter);
1579
1580 return 0;
1581
1582err_out_disable_msi:
1583 qlcnic_teardown_intr(adapter);
1584
1585err_out_decr_ref:
21854f02 1586 qlcnic_clr_all_drv_state(adapter, 0);
af19b491
AKS
1587
1588err_out_iounmap:
1589 qlcnic_cleanup_pci_map(adapter);
1590
1591err_out_free_netdev:
1592 free_netdev(netdev);
1593
1594err_out_free_res:
1595 pci_release_regions(pdev);
1596
1597err_out_disable_pdev:
1598 pci_set_drvdata(pdev, NULL);
1599 pci_disable_device(pdev);
1600 return err;
1601}
1602
1603static void __devexit qlcnic_remove(struct pci_dev *pdev)
1604{
1605 struct qlcnic_adapter *adapter;
1606 struct net_device *netdev;
1607
1608 adapter = pci_get_drvdata(pdev);
1609 if (adapter == NULL)
1610 return;
1611
1612 netdev = adapter->netdev;
1613
1614 qlcnic_cancel_fw_work(adapter);
1615
1616 unregister_netdev(netdev);
1617
af19b491
AKS
1618 qlcnic_detach(adapter);
1619
2e9d722d
AC
1620 if (adapter->npars != NULL)
1621 kfree(adapter->npars);
1622 if (adapter->eswitch != NULL)
1623 kfree(adapter->eswitch);
1624
21854f02 1625 qlcnic_clr_all_drv_state(adapter, 0);
af19b491
AKS
1626
1627 clear_bit(__QLCNIC_RESETTING, &adapter->state);
1628
b5e5492c
AKS
1629 qlcnic_free_lb_filters_mem(adapter);
1630
af19b491
AKS
1631 qlcnic_teardown_intr(adapter);
1632
1633 qlcnic_remove_diag_entries(adapter);
1634
1635 qlcnic_cleanup_pci_map(adapter);
1636
1637 qlcnic_release_firmware(adapter);
1638
451724c8 1639 pci_disable_pcie_error_reporting(pdev);
af19b491
AKS
1640 pci_release_regions(pdev);
1641 pci_disable_device(pdev);
1642 pci_set_drvdata(pdev, NULL);
1643
1644 free_netdev(netdev);
1645}
1646static int __qlcnic_shutdown(struct pci_dev *pdev)
1647{
1648 struct qlcnic_adapter *adapter = pci_get_drvdata(pdev);
1649 struct net_device *netdev = adapter->netdev;
1650 int retval;
1651
1652 netif_device_detach(netdev);
1653
1654 qlcnic_cancel_fw_work(adapter);
1655
1656 if (netif_running(netdev))
1657 qlcnic_down(adapter, netdev);
1658
21854f02 1659 qlcnic_clr_all_drv_state(adapter, 0);
af19b491
AKS
1660
1661 clear_bit(__QLCNIC_RESETTING, &adapter->state);
1662
1663 retval = pci_save_state(pdev);
1664 if (retval)
1665 return retval;
1666
1667 if (qlcnic_wol_supported(adapter)) {
1668 pci_enable_wake(pdev, PCI_D3cold, 1);
1669 pci_enable_wake(pdev, PCI_D3hot, 1);
1670 }
1671
1672 return 0;
1673}
1674
1675static void qlcnic_shutdown(struct pci_dev *pdev)
1676{
1677 if (__qlcnic_shutdown(pdev))
1678 return;
1679
1680 pci_disable_device(pdev);
1681}
1682
1683#ifdef CONFIG_PM
1684static int
1685qlcnic_suspend(struct pci_dev *pdev, pm_message_t state)
1686{
1687 int retval;
1688
1689 retval = __qlcnic_shutdown(pdev);
1690 if (retval)
1691 return retval;
1692
1693 pci_set_power_state(pdev, pci_choose_state(pdev, state));
1694 return 0;
1695}
1696
1697static int
1698qlcnic_resume(struct pci_dev *pdev)
1699{
1700 struct qlcnic_adapter *adapter = pci_get_drvdata(pdev);
1701 struct net_device *netdev = adapter->netdev;
1702 int err;
1703
1704 err = pci_enable_device(pdev);
1705 if (err)
1706 return err;
1707
1708 pci_set_power_state(pdev, PCI_D0);
1709 pci_set_master(pdev);
1710 pci_restore_state(pdev);
1711
9f26f547 1712 err = adapter->nic_ops->start_firmware(adapter);
af19b491
AKS
1713 if (err) {
1714 dev_err(&pdev->dev, "failed to start firmware\n");
1715 return err;
1716 }
1717
1718 if (netif_running(netdev)) {
af19b491
AKS
1719 err = qlcnic_up(adapter, netdev);
1720 if (err)
52486a3a 1721 goto done;
af19b491 1722
aec1e845 1723 qlcnic_restore_indev_addr(netdev, NETDEV_UP);
af19b491 1724 }
52486a3a 1725done:
af19b491
AKS
1726 netif_device_attach(netdev);
1727 qlcnic_schedule_work(adapter, qlcnic_fw_poll_work, FW_POLL_DELAY);
1728 return 0;
af19b491
AKS
1729}
1730#endif
1731
1732static int qlcnic_open(struct net_device *netdev)
1733{
1734 struct qlcnic_adapter *adapter = netdev_priv(netdev);
1735 int err;
1736
af19b491
AKS
1737 err = qlcnic_attach(adapter);
1738 if (err)
1739 return err;
1740
1741 err = __qlcnic_up(adapter, netdev);
1742 if (err)
1743 goto err_out;
1744
1745 netif_start_queue(netdev);
1746
1747 return 0;
1748
1749err_out:
1750 qlcnic_detach(adapter);
1751 return err;
1752}
1753
1754/*
1755 * qlcnic_close - Disables a network interface entry point
1756 */
1757static int qlcnic_close(struct net_device *netdev)
1758{
1759 struct qlcnic_adapter *adapter = netdev_priv(netdev);
1760
1761 __qlcnic_down(adapter, netdev);
1762 return 0;
1763}
1764
b5e5492c
AKS
1765static void
1766qlcnic_alloc_lb_filters_mem(struct qlcnic_adapter *adapter)
1767{
1768 void *head;
1769 int i;
1770
1771 if (!qlcnic_mac_learn)
1772 return;
1773
1774 spin_lock_init(&adapter->mac_learn_lock);
1775
1776 head = kcalloc(QLCNIC_LB_MAX_FILTERS, sizeof(struct hlist_head),
1777 GFP_KERNEL);
1778 if (!head)
1779 return;
1780
1781 adapter->fhash.fmax = QLCNIC_LB_MAX_FILTERS;
1782 adapter->fhash.fhead = (struct hlist_head *)head;
1783
1784 for (i = 0; i < adapter->fhash.fmax; i++)
1785 INIT_HLIST_HEAD(&adapter->fhash.fhead[i]);
1786}
1787
1788static void qlcnic_free_lb_filters_mem(struct qlcnic_adapter *adapter)
1789{
1790 if (adapter->fhash.fmax && adapter->fhash.fhead)
1791 kfree(adapter->fhash.fhead);
1792
1793 adapter->fhash.fhead = NULL;
1794 adapter->fhash.fmax = 0;
1795}
1796
1797static void qlcnic_change_filter(struct qlcnic_adapter *adapter,
7e56cac4 1798 u64 uaddr, __le16 vlan_id, struct qlcnic_host_tx_ring *tx_ring)
b5e5492c
AKS
1799{
1800 struct cmd_desc_type0 *hwdesc;
1801 struct qlcnic_nic_req *req;
1802 struct qlcnic_mac_req *mac_req;
7e56cac4 1803 struct qlcnic_vlan_req *vlan_req;
b5e5492c
AKS
1804 u32 producer;
1805 u64 word;
1806
1807 producer = tx_ring->producer;
1808 hwdesc = &tx_ring->desc_head[tx_ring->producer];
1809
1810 req = (struct qlcnic_nic_req *)hwdesc;
1811 memset(req, 0, sizeof(struct qlcnic_nic_req));
1812 req->qhdr = cpu_to_le64(QLCNIC_REQUEST << 23);
1813
1814 word = QLCNIC_MAC_EVENT | ((u64)(adapter->portnum) << 16);
1815 req->req_hdr = cpu_to_le64(word);
1816
1817 mac_req = (struct qlcnic_mac_req *)&(req->words[0]);
03c5d770 1818 mac_req->op = vlan_id ? QLCNIC_MAC_VLAN_ADD : QLCNIC_MAC_ADD;
b5e5492c
AKS
1819 memcpy(mac_req->mac_addr, &uaddr, ETH_ALEN);
1820
7e56cac4
SC
1821 vlan_req = (struct qlcnic_vlan_req *)&req->words[1];
1822 vlan_req->vlan_id = vlan_id;
03c5d770 1823
b5e5492c
AKS
1824 tx_ring->producer = get_next_index(producer, tx_ring->num_desc);
1825}
1826
1827#define QLCNIC_MAC_HASH(MAC)\
1828 ((((MAC) & 0x70000) >> 0x10) | (((MAC) & 0x70000000000ULL) >> 0x25))
1829
1830static void
1831qlcnic_send_filter(struct qlcnic_adapter *adapter,
1832 struct qlcnic_host_tx_ring *tx_ring,
1833 struct cmd_desc_type0 *first_desc,
1834 struct sk_buff *skb)
1835{
1836 struct ethhdr *phdr = (struct ethhdr *)(skb->data);
1837 struct qlcnic_filter *fil, *tmp_fil;
1838 struct hlist_node *tmp_hnode, *n;
1839 struct hlist_head *head;
1840 u64 src_addr = 0;
7e56cac4 1841 __le16 vlan_id = 0;
b5e5492c
AKS
1842 u8 hindex;
1843
1844 if (!compare_ether_addr(phdr->h_source, adapter->mac_addr))
1845 return;
1846
1847 if (adapter->fhash.fnum >= adapter->fhash.fmax)
1848 return;
1849
03c5d770
AKS
1850 /* Only NPAR capable devices support vlan based learning*/
1851 if (adapter->flags & QLCNIC_ESWITCH_ENABLED)
1852 vlan_id = first_desc->vlan_TCI;
b5e5492c
AKS
1853 memcpy(&src_addr, phdr->h_source, ETH_ALEN);
1854 hindex = QLCNIC_MAC_HASH(src_addr) & (QLCNIC_LB_MAX_FILTERS - 1);
1855 head = &(adapter->fhash.fhead[hindex]);
1856
1857 hlist_for_each_entry_safe(tmp_fil, tmp_hnode, n, head, fnode) {
03c5d770
AKS
1858 if (!memcmp(tmp_fil->faddr, &src_addr, ETH_ALEN) &&
1859 tmp_fil->vlan_id == vlan_id) {
b5e5492c
AKS
1860 tmp_fil->ftime = jiffies;
1861 return;
1862 }
1863 }
1864
1865 fil = kzalloc(sizeof(struct qlcnic_filter), GFP_ATOMIC);
1866 if (!fil)
1867 return;
1868
03c5d770 1869 qlcnic_change_filter(adapter, src_addr, vlan_id, tx_ring);
b5e5492c
AKS
1870
1871 fil->ftime = jiffies;
03c5d770 1872 fil->vlan_id = vlan_id;
b5e5492c
AKS
1873 memcpy(fil->faddr, &src_addr, ETH_ALEN);
1874 spin_lock(&adapter->mac_learn_lock);
1875 hlist_add_head(&(fil->fnode), head);
1876 adapter->fhash.fnum++;
1877 spin_unlock(&adapter->mac_learn_lock);
1878}
1879
af19b491
AKS
1880static void
1881qlcnic_tso_check(struct net_device *netdev,
1882 struct qlcnic_host_tx_ring *tx_ring,
1883 struct cmd_desc_type0 *first_desc,
1884 struct sk_buff *skb)
1885{
1886 u8 opcode = TX_ETHER_PKT;
1887 __be16 protocol = skb->protocol;
8cf61f89
AKS
1888 u16 flags = 0;
1889 int copied, offset, copy_len, hdr_len = 0, tso = 0;
af19b491
AKS
1890 struct cmd_desc_type0 *hwdesc;
1891 struct vlan_ethhdr *vh;
8bfe8b91 1892 struct qlcnic_adapter *adapter = netdev_priv(netdev);
2e9d722d 1893 u32 producer = tx_ring->producer;
7e56cac4
SC
1894 __le16 vlan_oob = first_desc->flags_opcode &
1895 cpu_to_le16(FLAGS_VLAN_OOB);
af19b491 1896
2e9d722d
AC
1897 if (*(skb->data) & BIT_0) {
1898 flags |= BIT_0;
1899 memcpy(&first_desc->eth_addr, skb->data, ETH_ALEN);
1900 }
1901
af19b491
AKS
1902 if ((netdev->features & (NETIF_F_TSO | NETIF_F_TSO6)) &&
1903 skb_shinfo(skb)->gso_size > 0) {
1904
1905 hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
1906
1907 first_desc->mss = cpu_to_le16(skb_shinfo(skb)->gso_size);
1908 first_desc->total_hdr_length = hdr_len;
1909 if (vlan_oob) {
1910 first_desc->total_hdr_length += VLAN_HLEN;
1911 first_desc->tcp_hdr_offset = VLAN_HLEN;
1912 first_desc->ip_hdr_offset = VLAN_HLEN;
1913 /* Only in case of TSO on vlan device */
1914 flags |= FLAGS_VLAN_TAGGED;
1915 }
1916
1917 opcode = (protocol == cpu_to_be16(ETH_P_IPV6)) ?
1918 TX_TCP_LSO6 : TX_TCP_LSO;
1919 tso = 1;
1920
1921 } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
1922 u8 l4proto;
1923
1924 if (protocol == cpu_to_be16(ETH_P_IP)) {
1925 l4proto = ip_hdr(skb)->protocol;
1926
1927 if (l4proto == IPPROTO_TCP)
1928 opcode = TX_TCP_PKT;
1929 else if (l4proto == IPPROTO_UDP)
1930 opcode = TX_UDP_PKT;
1931 } else if (protocol == cpu_to_be16(ETH_P_IPV6)) {
1932 l4proto = ipv6_hdr(skb)->nexthdr;
1933
1934 if (l4proto == IPPROTO_TCP)
1935 opcode = TX_TCPV6_PKT;
1936 else if (l4proto == IPPROTO_UDP)
1937 opcode = TX_UDPV6_PKT;
1938 }
1939 }
1940
1941 first_desc->tcp_hdr_offset += skb_transport_offset(skb);
1942 first_desc->ip_hdr_offset += skb_network_offset(skb);
1943 qlcnic_set_tx_flags_opcode(first_desc, flags, opcode);
1944
1945 if (!tso)
1946 return;
1947
1948 /* For LSO, we need to copy the MAC/IP/TCP headers into
1949 * the descriptor ring
1950 */
af19b491
AKS
1951 copied = 0;
1952 offset = 2;
1953
1954 if (vlan_oob) {
1955 /* Create a TSO vlan header template for firmware */
1956
1957 hwdesc = &tx_ring->desc_head[producer];
1958 tx_ring->cmd_buf_arr[producer].skb = NULL;
1959
1960 copy_len = min((int)sizeof(struct cmd_desc_type0) - offset,
1961 hdr_len + VLAN_HLEN);
1962
1963 vh = (struct vlan_ethhdr *)((char *)hwdesc + 2);
1964 skb_copy_from_linear_data(skb, vh, 12);
1965 vh->h_vlan_proto = htons(ETH_P_8021Q);
7e56cac4
SC
1966 vh->h_vlan_TCI = (__be16)swab16((u16)first_desc->vlan_TCI);
1967
af19b491
AKS
1968 skb_copy_from_linear_data_offset(skb, 12,
1969 (char *)vh + 16, copy_len - 16);
1970
1971 copied = copy_len - VLAN_HLEN;
1972 offset = 0;
1973
1974 producer = get_next_index(producer, tx_ring->num_desc);
1975 }
1976
1977 while (copied < hdr_len) {
1978
1979 copy_len = min((int)sizeof(struct cmd_desc_type0) - offset,
1980 (hdr_len - copied));
1981
1982 hwdesc = &tx_ring->desc_head[producer];
1983 tx_ring->cmd_buf_arr[producer].skb = NULL;
1984
1985 skb_copy_from_linear_data_offset(skb, copied,
1986 (char *)hwdesc + offset, copy_len);
1987
1988 copied += copy_len;
1989 offset = 0;
1990
1991 producer = get_next_index(producer, tx_ring->num_desc);
1992 }
1993
1994 tx_ring->producer = producer;
1995 barrier();
8bfe8b91 1996 adapter->stats.lso_frames++;
af19b491
AKS
1997}
1998
1999static int
2000qlcnic_map_tx_skb(struct pci_dev *pdev,
2001 struct sk_buff *skb, struct qlcnic_cmd_buffer *pbuf)
2002{
2003 struct qlcnic_skb_frag *nf;
2004 struct skb_frag_struct *frag;
2005 int i, nr_frags;
2006 dma_addr_t map;
2007
2008 nr_frags = skb_shinfo(skb)->nr_frags;
2009 nf = &pbuf->frag_array[0];
2010
2011 map = pci_map_single(pdev, skb->data,
2012 skb_headlen(skb), PCI_DMA_TODEVICE);
2013 if (pci_dma_mapping_error(pdev, map))
2014 goto out_err;
2015
2016 nf->dma = map;
2017 nf->length = skb_headlen(skb);
2018
2019 for (i = 0; i < nr_frags; i++) {
2020 frag = &skb_shinfo(skb)->frags[i];
2021 nf = &pbuf->frag_array[i+1];
2022
2023 map = pci_map_page(pdev, frag->page, frag->page_offset,
2024 frag->size, PCI_DMA_TODEVICE);
2025 if (pci_dma_mapping_error(pdev, map))
2026 goto unwind;
2027
2028 nf->dma = map;
2029 nf->length = frag->size;
2030 }
2031
2032 return 0;
2033
2034unwind:
2035 while (--i >= 0) {
2036 nf = &pbuf->frag_array[i+1];
2037 pci_unmap_page(pdev, nf->dma, nf->length, PCI_DMA_TODEVICE);
2038 }
2039
2040 nf = &pbuf->frag_array[0];
2041 pci_unmap_single(pdev, nf->dma, skb_headlen(skb), PCI_DMA_TODEVICE);
2042
2043out_err:
2044 return -ENOMEM;
2045}
2046
8cf61f89
AKS
2047static int
2048qlcnic_check_tx_tagging(struct qlcnic_adapter *adapter,
2049 struct sk_buff *skb,
2050 struct cmd_desc_type0 *first_desc)
2051{
2052 u8 opcode = 0;
2053 u16 flags = 0;
2054 __be16 protocol = skb->protocol;
2055 struct vlan_ethhdr *vh;
2056
2057 if (protocol == cpu_to_be16(ETH_P_8021Q)) {
2058 vh = (struct vlan_ethhdr *)skb->data;
2059 protocol = vh->h_vlan_encapsulated_proto;
2060 flags = FLAGS_VLAN_TAGGED;
2061 qlcnic_set_tx_vlan_tci(first_desc, ntohs(vh->h_vlan_TCI));
2062 } else if (vlan_tx_tag_present(skb)) {
2063 flags = FLAGS_VLAN_OOB;
2064 qlcnic_set_tx_vlan_tci(first_desc, vlan_tx_tag_get(skb));
2065 }
2066 if (unlikely(adapter->pvid)) {
2067 if (first_desc->vlan_TCI &&
2068 !(adapter->flags & QLCNIC_TAGGING_ENABLED))
2069 return -EIO;
2070 if (first_desc->vlan_TCI &&
2071 (adapter->flags & QLCNIC_TAGGING_ENABLED))
2072 goto set_flags;
2073
2074 flags = FLAGS_VLAN_OOB;
2075 qlcnic_set_tx_vlan_tci(first_desc, adapter->pvid);
2076 }
2077set_flags:
2078 qlcnic_set_tx_flags_opcode(first_desc, flags, opcode);
2079 return 0;
2080}
2081
af19b491
AKS
2082static inline void
2083qlcnic_clear_cmddesc(u64 *desc)
2084{
2085 desc[0] = 0ULL;
2086 desc[2] = 0ULL;
8cf61f89 2087 desc[7] = 0ULL;
af19b491
AKS
2088}
2089
cdaff185 2090netdev_tx_t
af19b491
AKS
2091qlcnic_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
2092{
2093 struct qlcnic_adapter *adapter = netdev_priv(netdev);
2094 struct qlcnic_host_tx_ring *tx_ring = adapter->tx_ring;
2095 struct qlcnic_cmd_buffer *pbuf;
2096 struct qlcnic_skb_frag *buffrag;
2097 struct cmd_desc_type0 *hwdesc, *first_desc;
2098 struct pci_dev *pdev;
dcb50aff 2099 struct ethhdr *phdr;
af19b491
AKS
2100 int i, k;
2101
2102 u32 producer;
2103 int frag_count, no_of_desc;
2104 u32 num_txd = tx_ring->num_desc;
2105
780ab790
AKS
2106 if (!test_bit(__QLCNIC_DEV_UP, &adapter->state)) {
2107 netif_stop_queue(netdev);
2108 return NETDEV_TX_BUSY;
2109 }
2110
fe4d434d 2111 if (adapter->flags & QLCNIC_MACSPOOF) {
dcb50aff
RB
2112 phdr = (struct ethhdr *)skb->data;
2113 if (compare_ether_addr(phdr->h_source,
fe4d434d
SC
2114 adapter->mac_addr))
2115 goto drop_packet;
2116 }
2117
af19b491
AKS
2118 frag_count = skb_shinfo(skb)->nr_frags + 1;
2119
2120 /* 4 fragments per cmd des */
2121 no_of_desc = (frag_count + 3) >> 2;
2122
ef71ff83 2123 if (unlikely(qlcnic_tx_avail(tx_ring) <= TX_STOP_THRESH)) {
af19b491 2124 netif_stop_queue(netdev);
ef71ff83
RB
2125 smp_mb();
2126 if (qlcnic_tx_avail(tx_ring) > TX_STOP_THRESH)
2127 netif_start_queue(netdev);
2128 else {
2129 adapter->stats.xmit_off++;
2130 return NETDEV_TX_BUSY;
2131 }
af19b491
AKS
2132 }
2133
2134 producer = tx_ring->producer;
2135 pbuf = &tx_ring->cmd_buf_arr[producer];
2136
2137 pdev = adapter->pdev;
2138
8cf61f89
AKS
2139 first_desc = hwdesc = &tx_ring->desc_head[producer];
2140 qlcnic_clear_cmddesc((u64 *)hwdesc);
2141
2142 if (qlcnic_check_tx_tagging(adapter, skb, first_desc))
2143 goto drop_packet;
2144
8ae6df97
AKS
2145 if (qlcnic_map_tx_skb(pdev, skb, pbuf)) {
2146 adapter->stats.tx_dma_map_error++;
af19b491 2147 goto drop_packet;
8ae6df97 2148 }
af19b491
AKS
2149
2150 pbuf->skb = skb;
2151 pbuf->frag_count = frag_count;
2152
af19b491
AKS
2153 qlcnic_set_tx_frags_len(first_desc, frag_count, skb->len);
2154 qlcnic_set_tx_port(first_desc, adapter->portnum);
2155
2156 for (i = 0; i < frag_count; i++) {
2157
2158 k = i % 4;
2159
2160 if ((k == 0) && (i > 0)) {
2161 /* move to next desc.*/
2162 producer = get_next_index(producer, num_txd);
2163 hwdesc = &tx_ring->desc_head[producer];
2164 qlcnic_clear_cmddesc((u64 *)hwdesc);
2165 tx_ring->cmd_buf_arr[producer].skb = NULL;
2166 }
2167
2168 buffrag = &pbuf->frag_array[i];
2169
2170 hwdesc->buffer_length[k] = cpu_to_le16(buffrag->length);
2171 switch (k) {
2172 case 0:
2173 hwdesc->addr_buffer1 = cpu_to_le64(buffrag->dma);
2174 break;
2175 case 1:
2176 hwdesc->addr_buffer2 = cpu_to_le64(buffrag->dma);
2177 break;
2178 case 2:
2179 hwdesc->addr_buffer3 = cpu_to_le64(buffrag->dma);
2180 break;
2181 case 3:
2182 hwdesc->addr_buffer4 = cpu_to_le64(buffrag->dma);
2183 break;
2184 }
2185 }
2186
2187 tx_ring->producer = get_next_index(producer, num_txd);
2188
2189 qlcnic_tso_check(netdev, tx_ring, first_desc, skb);
2190
b5e5492c
AKS
2191 if (qlcnic_mac_learn)
2192 qlcnic_send_filter(adapter, tx_ring, first_desc, skb);
2193
af19b491
AKS
2194 qlcnic_update_cmd_producer(adapter, tx_ring);
2195
2196 adapter->stats.txbytes += skb->len;
2197 adapter->stats.xmitcalled++;
2198
2199 return NETDEV_TX_OK;
2200
2201drop_packet:
2202 adapter->stats.txdropped++;
2203 dev_kfree_skb_any(skb);
2204 return NETDEV_TX_OK;
2205}
2206
2207static int qlcnic_check_temp(struct qlcnic_adapter *adapter)
2208{
2209 struct net_device *netdev = adapter->netdev;
2210 u32 temp, temp_state, temp_val;
2211 int rv = 0;
2212
2213 temp = QLCRD32(adapter, CRB_TEMP_STATE);
2214
2215 temp_state = qlcnic_get_temp_state(temp);
2216 temp_val = qlcnic_get_temp_val(temp);
2217
2218 if (temp_state == QLCNIC_TEMP_PANIC) {
2219 dev_err(&netdev->dev,
2220 "Device temperature %d degrees C exceeds"
2221 " maximum allowed. Hardware has been shut down.\n",
2222 temp_val);
2223 rv = 1;
2224 } else if (temp_state == QLCNIC_TEMP_WARN) {
2225 if (adapter->temp == QLCNIC_TEMP_NORMAL) {
2226 dev_err(&netdev->dev,
2227 "Device temperature %d degrees C "
2228 "exceeds operating range."
2229 " Immediate action needed.\n",
2230 temp_val);
2231 }
2232 } else {
2233 if (adapter->temp == QLCNIC_TEMP_WARN) {
2234 dev_info(&netdev->dev,
2235 "Device temperature is now %d degrees C"
2236 " in normal range.\n", temp_val);
2237 }
2238 }
2239 adapter->temp = temp_state;
2240 return rv;
2241}
2242
2243void qlcnic_advert_link_change(struct qlcnic_adapter *adapter, int linkup)
2244{
2245 struct net_device *netdev = adapter->netdev;
2246
2247 if (adapter->ahw.linkup && !linkup) {
69324275 2248 netdev_info(netdev, "NIC Link is down\n");
af19b491
AKS
2249 adapter->ahw.linkup = 0;
2250 if (netif_running(netdev)) {
2251 netif_carrier_off(netdev);
2252 netif_stop_queue(netdev);
2253 }
2254 } else if (!adapter->ahw.linkup && linkup) {
69324275 2255 netdev_info(netdev, "NIC Link is up\n");
af19b491
AKS
2256 adapter->ahw.linkup = 1;
2257 if (netif_running(netdev)) {
2258 netif_carrier_on(netdev);
2259 netif_wake_queue(netdev);
2260 }
2261 }
2262}
2263
2264static void qlcnic_tx_timeout(struct net_device *netdev)
2265{
2266 struct qlcnic_adapter *adapter = netdev_priv(netdev);
2267
2268 if (test_bit(__QLCNIC_RESETTING, &adapter->state))
2269 return;
2270
2271 dev_err(&netdev->dev, "transmit timeout, resetting.\n");
af19b491
AKS
2272
2273 if (++adapter->tx_timeo_cnt >= QLCNIC_MAX_TX_TIMEOUTS)
68bf1c68
AKS
2274 adapter->need_fw_reset = 1;
2275 else
2276 adapter->reset_context = 1;
af19b491
AKS
2277}
2278
2279static struct net_device_stats *qlcnic_get_stats(struct net_device *netdev)
2280{
2281 struct qlcnic_adapter *adapter = netdev_priv(netdev);
2282 struct net_device_stats *stats = &netdev->stats;
2283
af19b491
AKS
2284 stats->rx_packets = adapter->stats.rx_pkts + adapter->stats.lro_pkts;
2285 stats->tx_packets = adapter->stats.xmitfinished;
7e382594 2286 stats->rx_bytes = adapter->stats.rxbytes + adapter->stats.lrobytes;
af19b491
AKS
2287 stats->tx_bytes = adapter->stats.txbytes;
2288 stats->rx_dropped = adapter->stats.rxdropped;
2289 stats->tx_dropped = adapter->stats.txdropped;
2290
2291 return stats;
2292}
2293
7eb9855d 2294static irqreturn_t qlcnic_clear_legacy_intr(struct qlcnic_adapter *adapter)
af19b491 2295{
af19b491
AKS
2296 u32 status;
2297
2298 status = readl(adapter->isr_int_vec);
2299
2300 if (!(status & adapter->int_vec_bit))
2301 return IRQ_NONE;
2302
2303 /* check interrupt state machine, to be sure */
2304 status = readl(adapter->crb_int_state_reg);
2305 if (!ISR_LEGACY_INT_TRIGGERED(status))
2306 return IRQ_NONE;
2307
2308 writel(0xffffffff, adapter->tgt_status_reg);
2309 /* read twice to ensure write is flushed */
2310 readl(adapter->isr_int_vec);
2311 readl(adapter->isr_int_vec);
2312
7eb9855d
AKS
2313 return IRQ_HANDLED;
2314}
2315
2316static irqreturn_t qlcnic_tmp_intr(int irq, void *data)
2317{
2318 struct qlcnic_host_sds_ring *sds_ring = data;
2319 struct qlcnic_adapter *adapter = sds_ring->adapter;
2320
2321 if (adapter->flags & QLCNIC_MSIX_ENABLED)
2322 goto done;
2323 else if (adapter->flags & QLCNIC_MSI_ENABLED) {
2324 writel(0xffffffff, adapter->tgt_status_reg);
2325 goto done;
2326 }
2327
2328 if (qlcnic_clear_legacy_intr(adapter) == IRQ_NONE)
2329 return IRQ_NONE;
2330
2331done:
2332 adapter->diag_cnt++;
2333 qlcnic_enable_int(sds_ring);
2334 return IRQ_HANDLED;
2335}
2336
2337static irqreturn_t qlcnic_intr(int irq, void *data)
2338{
2339 struct qlcnic_host_sds_ring *sds_ring = data;
2340 struct qlcnic_adapter *adapter = sds_ring->adapter;
2341
2342 if (qlcnic_clear_legacy_intr(adapter) == IRQ_NONE)
2343 return IRQ_NONE;
2344
af19b491
AKS
2345 napi_schedule(&sds_ring->napi);
2346
2347 return IRQ_HANDLED;
2348}
2349
2350static irqreturn_t qlcnic_msi_intr(int irq, void *data)
2351{
2352 struct qlcnic_host_sds_ring *sds_ring = data;
2353 struct qlcnic_adapter *adapter = sds_ring->adapter;
2354
2355 /* clear interrupt */
2356 writel(0xffffffff, adapter->tgt_status_reg);
2357
2358 napi_schedule(&sds_ring->napi);
2359 return IRQ_HANDLED;
2360}
2361
2362static irqreturn_t qlcnic_msix_intr(int irq, void *data)
2363{
2364 struct qlcnic_host_sds_ring *sds_ring = data;
2365
2366 napi_schedule(&sds_ring->napi);
2367 return IRQ_HANDLED;
2368}
2369
2370static int qlcnic_process_cmd_ring(struct qlcnic_adapter *adapter)
2371{
2372 u32 sw_consumer, hw_consumer;
2373 int count = 0, i;
2374 struct qlcnic_cmd_buffer *buffer;
2375 struct pci_dev *pdev = adapter->pdev;
2376 struct net_device *netdev = adapter->netdev;
2377 struct qlcnic_skb_frag *frag;
2378 int done;
2379 struct qlcnic_host_tx_ring *tx_ring = adapter->tx_ring;
2380
2381 if (!spin_trylock(&adapter->tx_clean_lock))
2382 return 1;
2383
2384 sw_consumer = tx_ring->sw_consumer;
2385 hw_consumer = le32_to_cpu(*(tx_ring->hw_consumer));
2386
2387 while (sw_consumer != hw_consumer) {
2388 buffer = &tx_ring->cmd_buf_arr[sw_consumer];
2389 if (buffer->skb) {
2390 frag = &buffer->frag_array[0];
2391 pci_unmap_single(pdev, frag->dma, frag->length,
2392 PCI_DMA_TODEVICE);
2393 frag->dma = 0ULL;
2394 for (i = 1; i < buffer->frag_count; i++) {
2395 frag++;
2396 pci_unmap_page(pdev, frag->dma, frag->length,
2397 PCI_DMA_TODEVICE);
2398 frag->dma = 0ULL;
2399 }
2400
2401 adapter->stats.xmitfinished++;
2402 dev_kfree_skb_any(buffer->skb);
2403 buffer->skb = NULL;
2404 }
2405
2406 sw_consumer = get_next_index(sw_consumer, tx_ring->num_desc);
2407 if (++count >= MAX_STATUS_HANDLE)
2408 break;
2409 }
2410
2411 if (count && netif_running(netdev)) {
2412 tx_ring->sw_consumer = sw_consumer;
2413
2414 smp_mb();
2415
2416 if (netif_queue_stopped(netdev) && netif_carrier_ok(netdev)) {
af19b491
AKS
2417 if (qlcnic_tx_avail(tx_ring) > TX_STOP_THRESH) {
2418 netif_wake_queue(netdev);
8bfe8b91 2419 adapter->stats.xmit_on++;
af19b491 2420 }
af19b491 2421 }
ef71ff83 2422 adapter->tx_timeo_cnt = 0;
af19b491
AKS
2423 }
2424 /*
2425 * If everything is freed up to consumer then check if the ring is full
2426 * If the ring is full then check if more needs to be freed and
2427 * schedule the call back again.
2428 *
2429 * This happens when there are 2 CPUs. One could be freeing and the
2430 * other filling it. If the ring is full when we get out of here and
2431 * the card has already interrupted the host then the host can miss the
2432 * interrupt.
2433 *
2434 * There is still a possible race condition and the host could miss an
2435 * interrupt. The card has to take care of this.
2436 */
2437 hw_consumer = le32_to_cpu(*(tx_ring->hw_consumer));
2438 done = (sw_consumer == hw_consumer);
2439 spin_unlock(&adapter->tx_clean_lock);
2440
2441 return done;
2442}
2443
2444static int qlcnic_poll(struct napi_struct *napi, int budget)
2445{
2446 struct qlcnic_host_sds_ring *sds_ring =
2447 container_of(napi, struct qlcnic_host_sds_ring, napi);
2448
2449 struct qlcnic_adapter *adapter = sds_ring->adapter;
2450
2451 int tx_complete;
2452 int work_done;
2453
2454 tx_complete = qlcnic_process_cmd_ring(adapter);
2455
2456 work_done = qlcnic_process_rcv_ring(sds_ring, budget);
2457
2458 if ((work_done < budget) && tx_complete) {
2459 napi_complete(&sds_ring->napi);
2460 if (test_bit(__QLCNIC_DEV_UP, &adapter->state))
2461 qlcnic_enable_int(sds_ring);
2462 }
2463
2464 return work_done;
2465}
2466
8f891387 2467static int qlcnic_rx_poll(struct napi_struct *napi, int budget)
2468{
2469 struct qlcnic_host_sds_ring *sds_ring =
2470 container_of(napi, struct qlcnic_host_sds_ring, napi);
2471
2472 struct qlcnic_adapter *adapter = sds_ring->adapter;
2473 int work_done;
2474
2475 work_done = qlcnic_process_rcv_ring(sds_ring, budget);
2476
2477 if (work_done < budget) {
2478 napi_complete(&sds_ring->napi);
2479 if (test_bit(__QLCNIC_DEV_UP, &adapter->state))
2480 qlcnic_enable_int(sds_ring);
2481 }
2482
2483 return work_done;
2484}
2485
af19b491
AKS
2486#ifdef CONFIG_NET_POLL_CONTROLLER
2487static void qlcnic_poll_controller(struct net_device *netdev)
2488{
bf82791e
YL
2489 int ring;
2490 struct qlcnic_host_sds_ring *sds_ring;
af19b491 2491 struct qlcnic_adapter *adapter = netdev_priv(netdev);
bf82791e
YL
2492 struct qlcnic_recv_context *recv_ctx = &adapter->recv_ctx;
2493
af19b491 2494 disable_irq(adapter->irq);
bf82791e
YL
2495 for (ring = 0; ring < adapter->max_sds_rings; ring++) {
2496 sds_ring = &recv_ctx->sds_rings[ring];
2497 qlcnic_intr(adapter->irq, sds_ring);
2498 }
af19b491
AKS
2499 enable_irq(adapter->irq);
2500}
2501#endif
2502
6df900e9
SC
2503static void
2504qlcnic_idc_debug_info(struct qlcnic_adapter *adapter, u8 encoding)
2505{
2506 u32 val;
2507
2508 val = adapter->portnum & 0xf;
2509 val |= encoding << 7;
2510 val |= (jiffies - adapter->dev_rst_time) << 8;
2511
2512 QLCWR32(adapter, QLCNIC_CRB_DRV_SCRATCH, val);
2513 adapter->dev_rst_time = jiffies;
2514}
2515
ade91f8e
AKS
2516static int
2517qlcnic_set_drv_state(struct qlcnic_adapter *adapter, u8 state)
af19b491
AKS
2518{
2519 u32 val;
2520
2521 WARN_ON(state != QLCNIC_DEV_NEED_RESET &&
2522 state != QLCNIC_DEV_NEED_QUISCENT);
2523
2524 if (qlcnic_api_lock(adapter))
ade91f8e 2525 return -EIO;
af19b491
AKS
2526
2527 val = QLCRD32(adapter, QLCNIC_CRB_DRV_STATE);
2528
2529 if (state == QLCNIC_DEV_NEED_RESET)
6d2a4724 2530 QLC_DEV_SET_RST_RDY(val, adapter->portnum);
af19b491 2531 else if (state == QLCNIC_DEV_NEED_QUISCENT)
6d2a4724 2532 QLC_DEV_SET_QSCNT_RDY(val, adapter->portnum);
af19b491
AKS
2533
2534 QLCWR32(adapter, QLCNIC_CRB_DRV_STATE, val);
2535
2536 qlcnic_api_unlock(adapter);
ade91f8e
AKS
2537
2538 return 0;
af19b491
AKS
2539}
2540
1b95a839
AKS
2541static int
2542qlcnic_clr_drv_state(struct qlcnic_adapter *adapter)
2543{
2544 u32 val;
2545
2546 if (qlcnic_api_lock(adapter))
2547 return -EBUSY;
2548
2549 val = QLCRD32(adapter, QLCNIC_CRB_DRV_STATE);
6d2a4724 2550 QLC_DEV_CLR_RST_QSCNT(val, adapter->portnum);
1b95a839
AKS
2551 QLCWR32(adapter, QLCNIC_CRB_DRV_STATE, val);
2552
2553 qlcnic_api_unlock(adapter);
2554
2555 return 0;
2556}
2557
af19b491 2558static void
21854f02 2559qlcnic_clr_all_drv_state(struct qlcnic_adapter *adapter, u8 failed)
af19b491
AKS
2560{
2561 u32 val;
2562
2563 if (qlcnic_api_lock(adapter))
2564 goto err;
2565
31018e06 2566 val = QLCRD32(adapter, QLCNIC_CRB_DRV_ACTIVE);
6d2a4724 2567 QLC_DEV_CLR_REF_CNT(val, adapter->portnum);
31018e06 2568 QLCWR32(adapter, QLCNIC_CRB_DRV_ACTIVE, val);
af19b491 2569
21854f02
AKS
2570 if (failed) {
2571 QLCWR32(adapter, QLCNIC_CRB_DEV_STATE, QLCNIC_DEV_FAILED);
2572 dev_info(&adapter->pdev->dev,
2573 "Device state set to Failed. Please Reboot\n");
2574 } else if (!(val & 0x11111111))
af19b491
AKS
2575 QLCWR32(adapter, QLCNIC_CRB_DEV_STATE, QLCNIC_DEV_COLD);
2576
2577 val = QLCRD32(adapter, QLCNIC_CRB_DRV_STATE);
6d2a4724 2578 QLC_DEV_CLR_RST_QSCNT(val, adapter->portnum);
af19b491
AKS
2579 QLCWR32(adapter, QLCNIC_CRB_DRV_STATE, val);
2580
2581 qlcnic_api_unlock(adapter);
2582err:
2583 adapter->fw_fail_cnt = 0;
2584 clear_bit(__QLCNIC_START_FW, &adapter->state);
2585 clear_bit(__QLCNIC_RESETTING, &adapter->state);
2586}
2587
f73dfc50 2588/* Grab api lock, before checking state */
af19b491
AKS
2589static int
2590qlcnic_check_drv_state(struct qlcnic_adapter *adapter)
2591{
2592 int act, state;
2593
2594 state = QLCRD32(adapter, QLCNIC_CRB_DRV_STATE);
31018e06 2595 act = QLCRD32(adapter, QLCNIC_CRB_DRV_ACTIVE);
af19b491
AKS
2596
2597 if (((state & 0x11111111) == (act & 0x11111111)) ||
2598 ((act & 0x11111111) == ((state >> 1) & 0x11111111)))
2599 return 0;
2600 else
2601 return 1;
2602}
2603
96f8118c
SC
2604static int qlcnic_check_idc_ver(struct qlcnic_adapter *adapter)
2605{
2606 u32 val = QLCRD32(adapter, QLCNIC_CRB_DRV_IDC_VER);
2607
2608 if (val != QLCNIC_DRV_IDC_VER) {
2609 dev_warn(&adapter->pdev->dev, "IDC Version mismatch, driver's"
2610 " idc ver = %x; reqd = %x\n", QLCNIC_DRV_IDC_VER, val);
2611 }
2612
2613 return 0;
2614}
2615
af19b491
AKS
2616static int
2617qlcnic_can_start_firmware(struct qlcnic_adapter *adapter)
2618{
2619 u32 val, prev_state;
aa5e18c0 2620 u8 dev_init_timeo = adapter->dev_init_timeo;
6d2a4724 2621 u8 portnum = adapter->portnum;
96f8118c 2622 u8 ret;
af19b491 2623
f73dfc50
AKS
2624 if (test_and_clear_bit(__QLCNIC_START_FW, &adapter->state))
2625 return 1;
2626
af19b491
AKS
2627 if (qlcnic_api_lock(adapter))
2628 return -1;
2629
31018e06 2630 val = QLCRD32(adapter, QLCNIC_CRB_DRV_ACTIVE);
6d2a4724
AKS
2631 if (!(val & (1 << (portnum * 4)))) {
2632 QLC_DEV_SET_REF_CNT(val, portnum);
31018e06 2633 QLCWR32(adapter, QLCNIC_CRB_DRV_ACTIVE, val);
af19b491
AKS
2634 }
2635
2636 prev_state = QLCRD32(adapter, QLCNIC_CRB_DEV_STATE);
65b5b420 2637 QLCDB(adapter, HW, "Device state = %u\n", prev_state);
af19b491
AKS
2638
2639 switch (prev_state) {
2640 case QLCNIC_DEV_COLD:
bbd8c6a4 2641 QLCWR32(adapter, QLCNIC_CRB_DEV_STATE, QLCNIC_DEV_INITIALIZING);
96f8118c 2642 QLCWR32(adapter, QLCNIC_CRB_DRV_IDC_VER, QLCNIC_DRV_IDC_VER);
6df900e9 2643 qlcnic_idc_debug_info(adapter, 0);
af19b491
AKS
2644 qlcnic_api_unlock(adapter);
2645 return 1;
2646
2647 case QLCNIC_DEV_READY:
96f8118c 2648 ret = qlcnic_check_idc_ver(adapter);
af19b491 2649 qlcnic_api_unlock(adapter);
96f8118c 2650 return ret;
af19b491
AKS
2651
2652 case QLCNIC_DEV_NEED_RESET:
2653 val = QLCRD32(adapter, QLCNIC_CRB_DRV_STATE);
6d2a4724 2654 QLC_DEV_SET_RST_RDY(val, portnum);
af19b491
AKS
2655 QLCWR32(adapter, QLCNIC_CRB_DRV_STATE, val);
2656 break;
2657
2658 case QLCNIC_DEV_NEED_QUISCENT:
2659 val = QLCRD32(adapter, QLCNIC_CRB_DRV_STATE);
6d2a4724 2660 QLC_DEV_SET_QSCNT_RDY(val, portnum);
af19b491
AKS
2661 QLCWR32(adapter, QLCNIC_CRB_DRV_STATE, val);
2662 break;
2663
2664 case QLCNIC_DEV_FAILED:
a7fc948f 2665 dev_err(&adapter->pdev->dev, "Device in failed state.\n");
af19b491
AKS
2666 qlcnic_api_unlock(adapter);
2667 return -1;
bbd8c6a4
AKS
2668
2669 case QLCNIC_DEV_INITIALIZING:
2670 case QLCNIC_DEV_QUISCENT:
2671 break;
af19b491
AKS
2672 }
2673
2674 qlcnic_api_unlock(adapter);
aa5e18c0
SC
2675
2676 do {
af19b491 2677 msleep(1000);
a5e463d0
SC
2678 prev_state = QLCRD32(adapter, QLCNIC_CRB_DEV_STATE);
2679
2680 if (prev_state == QLCNIC_DEV_QUISCENT)
2681 continue;
2682 } while ((prev_state != QLCNIC_DEV_READY) && --dev_init_timeo);
af19b491 2683
65b5b420
AKS
2684 if (!dev_init_timeo) {
2685 dev_err(&adapter->pdev->dev,
2686 "Waiting for device to initialize timeout\n");
af19b491 2687 return -1;
65b5b420 2688 }
af19b491
AKS
2689
2690 if (qlcnic_api_lock(adapter))
2691 return -1;
2692
2693 val = QLCRD32(adapter, QLCNIC_CRB_DRV_STATE);
6d2a4724 2694 QLC_DEV_CLR_RST_QSCNT(val, portnum);
af19b491
AKS
2695 QLCWR32(adapter, QLCNIC_CRB_DRV_STATE, val);
2696
96f8118c 2697 ret = qlcnic_check_idc_ver(adapter);
af19b491
AKS
2698 qlcnic_api_unlock(adapter);
2699
96f8118c 2700 return ret;
af19b491
AKS
2701}
2702
2703static void
2704qlcnic_fwinit_work(struct work_struct *work)
2705{
2706 struct qlcnic_adapter *adapter = container_of(work,
2707 struct qlcnic_adapter, fw_work.work);
3c4b23b1 2708 u32 dev_state = 0xf;
af19b491 2709
f73dfc50
AKS
2710 if (qlcnic_api_lock(adapter))
2711 goto err_ret;
af19b491 2712
a5e463d0
SC
2713 dev_state = QLCRD32(adapter, QLCNIC_CRB_DEV_STATE);
2714 if (dev_state == QLCNIC_DEV_QUISCENT) {
2715 qlcnic_api_unlock(adapter);
2716 qlcnic_schedule_work(adapter, qlcnic_fwinit_work,
2717 FW_POLL_DELAY * 2);
2718 return;
2719 }
2720
9f26f547 2721 if (adapter->op_mode == QLCNIC_NON_PRIV_FUNC) {
3c4b23b1
AKS
2722 qlcnic_api_unlock(adapter);
2723 goto wait_npar;
9f26f547
AC
2724 }
2725
f73dfc50
AKS
2726 if (adapter->fw_wait_cnt++ > adapter->reset_ack_timeo) {
2727 dev_err(&adapter->pdev->dev, "Reset:Failed to get ack %d sec\n",
2728 adapter->reset_ack_timeo);
2729 goto skip_ack_check;
2730 }
2731
2732 if (!qlcnic_check_drv_state(adapter)) {
2733skip_ack_check:
2734 dev_state = QLCRD32(adapter, QLCNIC_CRB_DEV_STATE);
a5e463d0
SC
2735
2736 if (dev_state == QLCNIC_DEV_NEED_QUISCENT) {
2737 QLCWR32(adapter, QLCNIC_CRB_DEV_STATE,
2738 QLCNIC_DEV_QUISCENT);
2739 qlcnic_schedule_work(adapter, qlcnic_fwinit_work,
2740 FW_POLL_DELAY * 2);
2741 QLCDB(adapter, DRV, "Quiscing the driver\n");
6df900e9
SC
2742 qlcnic_idc_debug_info(adapter, 0);
2743
a5e463d0
SC
2744 qlcnic_api_unlock(adapter);
2745 return;
2746 }
2747
f73dfc50
AKS
2748 if (dev_state == QLCNIC_DEV_NEED_RESET) {
2749 QLCWR32(adapter, QLCNIC_CRB_DEV_STATE,
2750 QLCNIC_DEV_INITIALIZING);
2751 set_bit(__QLCNIC_START_FW, &adapter->state);
2752 QLCDB(adapter, DRV, "Restarting fw\n");
6df900e9 2753 qlcnic_idc_debug_info(adapter, 0);
af19b491
AKS
2754 }
2755
f73dfc50
AKS
2756 qlcnic_api_unlock(adapter);
2757
9f26f547 2758 if (!adapter->nic_ops->start_firmware(adapter)) {
af19b491 2759 qlcnic_schedule_work(adapter, qlcnic_attach_work, 0);
b18971d1 2760 adapter->fw_wait_cnt = 0;
af19b491
AKS
2761 return;
2762 }
af19b491
AKS
2763 goto err_ret;
2764 }
2765
f73dfc50 2766 qlcnic_api_unlock(adapter);
aa5e18c0 2767
9f26f547 2768wait_npar:
af19b491 2769 dev_state = QLCRD32(adapter, QLCNIC_CRB_DEV_STATE);
f73dfc50 2770 QLCDB(adapter, HW, "Func waiting: Device state=%u\n", dev_state);
65b5b420 2771
af19b491 2772 switch (dev_state) {
3c4b23b1 2773 case QLCNIC_DEV_READY:
9f26f547 2774 if (!adapter->nic_ops->start_firmware(adapter)) {
f73dfc50 2775 qlcnic_schedule_work(adapter, qlcnic_attach_work, 0);
b18971d1 2776 adapter->fw_wait_cnt = 0;
f73dfc50
AKS
2777 return;
2778 }
3c4b23b1
AKS
2779 case QLCNIC_DEV_FAILED:
2780 break;
2781 default:
2782 qlcnic_schedule_work(adapter,
2783 qlcnic_fwinit_work, FW_POLL_DELAY);
2784 return;
af19b491
AKS
2785 }
2786
2787err_ret:
f73dfc50
AKS
2788 dev_err(&adapter->pdev->dev, "Fwinit work failed state=%u "
2789 "fw_wait_cnt=%u\n", dev_state, adapter->fw_wait_cnt);
34ce3626 2790 netif_device_attach(adapter->netdev);
21854f02 2791 qlcnic_clr_all_drv_state(adapter, 0);
af19b491
AKS
2792}
2793
2794static void
2795qlcnic_detach_work(struct work_struct *work)
2796{
2797 struct qlcnic_adapter *adapter = container_of(work,
2798 struct qlcnic_adapter, fw_work.work);
2799 struct net_device *netdev = adapter->netdev;
2800 u32 status;
2801
2802 netif_device_detach(netdev);
2803
2804 qlcnic_down(adapter, netdev);
2805
af19b491
AKS
2806 status = QLCRD32(adapter, QLCNIC_PEG_HALT_STATUS1);
2807
2808 if (status & QLCNIC_RCODE_FATAL_ERROR)
2809 goto err_ret;
2810
2811 if (adapter->temp == QLCNIC_TEMP_PANIC)
2812 goto err_ret;
2813
ade91f8e
AKS
2814 if (qlcnic_set_drv_state(adapter, adapter->dev_state))
2815 goto err_ret;
af19b491
AKS
2816
2817 adapter->fw_wait_cnt = 0;
2818
2819 qlcnic_schedule_work(adapter, qlcnic_fwinit_work, FW_POLL_DELAY);
2820
2821 return;
2822
2823err_ret:
65b5b420
AKS
2824 dev_err(&adapter->pdev->dev, "detach failed; status=%d temp=%d\n",
2825 status, adapter->temp);
34ce3626 2826 netif_device_attach(netdev);
21854f02 2827 qlcnic_clr_all_drv_state(adapter, 1);
af19b491
AKS
2828}
2829
3c4b23b1
AKS
2830/*Transit NPAR state to NON Operational */
2831static void
2832qlcnic_set_npar_non_operational(struct qlcnic_adapter *adapter)
2833{
2834 u32 state;
2835
2836 state = QLCRD32(adapter, QLCNIC_CRB_DEV_NPAR_STATE);
2837 if (state == QLCNIC_DEV_NPAR_NON_OPER)
2838 return;
2839
2840 if (qlcnic_api_lock(adapter))
2841 return;
2842 QLCWR32(adapter, QLCNIC_CRB_DEV_NPAR_STATE, QLCNIC_DEV_NPAR_NON_OPER);
2843 qlcnic_api_unlock(adapter);
2844}
2845
f73dfc50 2846/*Transit to RESET state from READY state only */
af19b491
AKS
2847static void
2848qlcnic_dev_request_reset(struct qlcnic_adapter *adapter)
2849{
2850 u32 state;
2851
cea8975e 2852 adapter->need_fw_reset = 1;
af19b491
AKS
2853 if (qlcnic_api_lock(adapter))
2854 return;
2855
2856 state = QLCRD32(adapter, QLCNIC_CRB_DEV_STATE);
2857
f73dfc50 2858 if (state == QLCNIC_DEV_READY) {
af19b491 2859 QLCWR32(adapter, QLCNIC_CRB_DEV_STATE, QLCNIC_DEV_NEED_RESET);
65b5b420 2860 QLCDB(adapter, DRV, "NEED_RESET state set\n");
6df900e9 2861 qlcnic_idc_debug_info(adapter, 0);
af19b491
AKS
2862 }
2863
3c4b23b1 2864 QLCWR32(adapter, QLCNIC_CRB_DEV_NPAR_STATE, QLCNIC_DEV_NPAR_NON_OPER);
af19b491
AKS
2865 qlcnic_api_unlock(adapter);
2866}
2867
9f26f547
AC
2868/* Transit to NPAR READY state from NPAR NOT READY state */
2869static void
2870qlcnic_dev_set_npar_ready(struct qlcnic_adapter *adapter)
2871{
9f26f547
AC
2872 if (qlcnic_api_lock(adapter))
2873 return;
2874
3c4b23b1
AKS
2875 QLCWR32(adapter, QLCNIC_CRB_DEV_NPAR_STATE, QLCNIC_DEV_NPAR_OPER);
2876 QLCDB(adapter, DRV, "NPAR operational state set\n");
9f26f547
AC
2877
2878 qlcnic_api_unlock(adapter);
2879}
2880
af19b491
AKS
2881static void
2882qlcnic_schedule_work(struct qlcnic_adapter *adapter,
2883 work_func_t func, int delay)
2884{
451724c8
SC
2885 if (test_bit(__QLCNIC_AER, &adapter->state))
2886 return;
2887
af19b491
AKS
2888 INIT_DELAYED_WORK(&adapter->fw_work, func);
2889 schedule_delayed_work(&adapter->fw_work, round_jiffies_relative(delay));
2890}
2891
2892static void
2893qlcnic_cancel_fw_work(struct qlcnic_adapter *adapter)
2894{
2895 while (test_and_set_bit(__QLCNIC_RESETTING, &adapter->state))
2896 msleep(10);
2897
2898 cancel_delayed_work_sync(&adapter->fw_work);
2899}
2900
2901static void
2902qlcnic_attach_work(struct work_struct *work)
2903{
2904 struct qlcnic_adapter *adapter = container_of(work,
2905 struct qlcnic_adapter, fw_work.work);
2906 struct net_device *netdev = adapter->netdev;
b18971d1 2907 u32 npar_state;
af19b491 2908
b18971d1
AKS
2909 if (adapter->op_mode != QLCNIC_MGMT_FUNC) {
2910 npar_state = QLCRD32(adapter, QLCNIC_CRB_DEV_NPAR_STATE);
2911 if (adapter->fw_wait_cnt++ > QLCNIC_DEV_NPAR_OPER_TIMEO)
2912 qlcnic_clr_all_drv_state(adapter, 0);
2913 else if (npar_state != QLCNIC_DEV_NPAR_OPER)
2914 qlcnic_schedule_work(adapter, qlcnic_attach_work,
2915 FW_POLL_DELAY);
2916 else
2917 goto attach;
2918 QLCDB(adapter, DRV, "Waiting for NPAR state to operational\n");
2919 return;
2920 }
2921attach:
af19b491 2922 if (netif_running(netdev)) {
52486a3a 2923 if (qlcnic_up(adapter, netdev))
af19b491 2924 goto done;
af19b491 2925
aec1e845 2926 qlcnic_restore_indev_addr(netdev, NETDEV_UP);
af19b491
AKS
2927 }
2928
af19b491 2929done:
34ce3626 2930 netif_device_attach(netdev);
af19b491
AKS
2931 adapter->fw_fail_cnt = 0;
2932 clear_bit(__QLCNIC_RESETTING, &adapter->state);
1b95a839
AKS
2933
2934 if (!qlcnic_clr_drv_state(adapter))
2935 qlcnic_schedule_work(adapter, qlcnic_fw_poll_work,
2936 FW_POLL_DELAY);
af19b491
AKS
2937}
2938
2939static int
2940qlcnic_check_health(struct qlcnic_adapter *adapter)
2941{
4e70812b 2942 u32 state = 0, heartbeat;
af19b491
AKS
2943 struct net_device *netdev = adapter->netdev;
2944
2945 if (qlcnic_check_temp(adapter))
2946 goto detach;
2947
2372a5f1 2948 if (adapter->need_fw_reset)
af19b491 2949 qlcnic_dev_request_reset(adapter);
af19b491
AKS
2950
2951 state = QLCRD32(adapter, QLCNIC_CRB_DEV_STATE);
3c4b23b1
AKS
2952 if (state == QLCNIC_DEV_NEED_RESET ||
2953 state == QLCNIC_DEV_NEED_QUISCENT) {
2954 qlcnic_set_npar_non_operational(adapter);
af19b491 2955 adapter->need_fw_reset = 1;
3c4b23b1 2956 }
af19b491 2957
4e70812b
SC
2958 heartbeat = QLCRD32(adapter, QLCNIC_PEG_ALIVE_COUNTER);
2959 if (heartbeat != adapter->heartbeat) {
2960 adapter->heartbeat = heartbeat;
af19b491
AKS
2961 adapter->fw_fail_cnt = 0;
2962 if (adapter->need_fw_reset)
2963 goto detach;
68bf1c68 2964
0df170b6
AKS
2965 if (adapter->reset_context &&
2966 auto_fw_reset == AUTO_FW_RESET_ENABLED) {
68bf1c68
AKS
2967 qlcnic_reset_hw_context(adapter);
2968 adapter->netdev->trans_start = jiffies;
2969 }
2970
af19b491
AKS
2971 return 0;
2972 }
2973
2974 if (++adapter->fw_fail_cnt < FW_FAIL_THRESH)
2975 return 0;
2976
2977 qlcnic_dev_request_reset(adapter);
2978
0df170b6
AKS
2979 if ((auto_fw_reset == AUTO_FW_RESET_ENABLED))
2980 clear_bit(__QLCNIC_FW_ATTACHED, &adapter->state);
af19b491
AKS
2981
2982 dev_info(&netdev->dev, "firmware hang detected\n");
2983
2984detach:
2985 adapter->dev_state = (state == QLCNIC_DEV_NEED_QUISCENT) ? state :
2986 QLCNIC_DEV_NEED_RESET;
2987
2988 if ((auto_fw_reset == AUTO_FW_RESET_ENABLED) &&
65b5b420
AKS
2989 !test_and_set_bit(__QLCNIC_RESETTING, &adapter->state)) {
2990
af19b491 2991 qlcnic_schedule_work(adapter, qlcnic_detach_work, 0);
65b5b420
AKS
2992 QLCDB(adapter, DRV, "fw recovery scheduled.\n");
2993 }
af19b491
AKS
2994
2995 return 1;
2996}
2997
2998static void
2999qlcnic_fw_poll_work(struct work_struct *work)
3000{
3001 struct qlcnic_adapter *adapter = container_of(work,
3002 struct qlcnic_adapter, fw_work.work);
3003
3004 if (test_bit(__QLCNIC_RESETTING, &adapter->state))
3005 goto reschedule;
3006
3007
3008 if (qlcnic_check_health(adapter))
3009 return;
3010
b5e5492c
AKS
3011 if (adapter->fhash.fnum)
3012 qlcnic_prune_lb_filters(adapter);
3013
af19b491
AKS
3014reschedule:
3015 qlcnic_schedule_work(adapter, qlcnic_fw_poll_work, FW_POLL_DELAY);
3016}
3017
451724c8
SC
3018static int qlcnic_is_first_func(struct pci_dev *pdev)
3019{
3020 struct pci_dev *oth_pdev;
3021 int val = pdev->devfn;
3022
3023 while (val-- > 0) {
3024 oth_pdev = pci_get_domain_bus_and_slot(pci_domain_nr
3025 (pdev->bus), pdev->bus->number,
3026 PCI_DEVFN(PCI_SLOT(pdev->devfn), val));
bfc978fa
AKS
3027 if (!oth_pdev)
3028 continue;
451724c8 3029
bfc978fa
AKS
3030 if (oth_pdev->current_state != PCI_D3cold) {
3031 pci_dev_put(oth_pdev);
451724c8 3032 return 0;
bfc978fa
AKS
3033 }
3034 pci_dev_put(oth_pdev);
451724c8
SC
3035 }
3036 return 1;
3037}
3038
3039static int qlcnic_attach_func(struct pci_dev *pdev)
3040{
3041 int err, first_func;
3042 struct qlcnic_adapter *adapter = pci_get_drvdata(pdev);
3043 struct net_device *netdev = adapter->netdev;
3044
3045 pdev->error_state = pci_channel_io_normal;
3046
3047 err = pci_enable_device(pdev);
3048 if (err)
3049 return err;
3050
3051 pci_set_power_state(pdev, PCI_D0);
3052 pci_set_master(pdev);
3053 pci_restore_state(pdev);
3054
3055 first_func = qlcnic_is_first_func(pdev);
3056
3057 if (qlcnic_api_lock(adapter))
3058 return -EINVAL;
3059
933fce12 3060 if (adapter->op_mode != QLCNIC_NON_PRIV_FUNC && first_func) {
451724c8
SC
3061 adapter->need_fw_reset = 1;
3062 set_bit(__QLCNIC_START_FW, &adapter->state);
3063 QLCWR32(adapter, QLCNIC_CRB_DEV_STATE, QLCNIC_DEV_INITIALIZING);
3064 QLCDB(adapter, DRV, "Restarting fw\n");
3065 }
3066 qlcnic_api_unlock(adapter);
3067
3068 err = adapter->nic_ops->start_firmware(adapter);
3069 if (err)
3070 return err;
3071
3072 qlcnic_clr_drv_state(adapter);
3073 qlcnic_setup_intr(adapter);
3074
3075 if (netif_running(netdev)) {
3076 err = qlcnic_attach(adapter);
3077 if (err) {
21854f02 3078 qlcnic_clr_all_drv_state(adapter, 1);
451724c8
SC
3079 clear_bit(__QLCNIC_AER, &adapter->state);
3080 netif_device_attach(netdev);
3081 return err;
3082 }
3083
3084 err = qlcnic_up(adapter, netdev);
3085 if (err)
3086 goto done;
3087
aec1e845 3088 qlcnic_restore_indev_addr(netdev, NETDEV_UP);
451724c8
SC
3089 }
3090 done:
3091 netif_device_attach(netdev);
3092 return err;
3093}
3094
3095static pci_ers_result_t qlcnic_io_error_detected(struct pci_dev *pdev,
3096 pci_channel_state_t state)
3097{
3098 struct qlcnic_adapter *adapter = pci_get_drvdata(pdev);
3099 struct net_device *netdev = adapter->netdev;
3100
3101 if (state == pci_channel_io_perm_failure)
3102 return PCI_ERS_RESULT_DISCONNECT;
3103
3104 if (state == pci_channel_io_normal)
3105 return PCI_ERS_RESULT_RECOVERED;
3106
3107 set_bit(__QLCNIC_AER, &adapter->state);
3108 netif_device_detach(netdev);
3109
3110 cancel_delayed_work_sync(&adapter->fw_work);
3111
3112 if (netif_running(netdev))
3113 qlcnic_down(adapter, netdev);
3114
3115 qlcnic_detach(adapter);
3116 qlcnic_teardown_intr(adapter);
3117
3118 clear_bit(__QLCNIC_RESETTING, &adapter->state);
3119
3120 pci_save_state(pdev);
3121 pci_disable_device(pdev);
3122
3123 return PCI_ERS_RESULT_NEED_RESET;
3124}
3125
3126static pci_ers_result_t qlcnic_io_slot_reset(struct pci_dev *pdev)
3127{
3128 return qlcnic_attach_func(pdev) ? PCI_ERS_RESULT_DISCONNECT :
3129 PCI_ERS_RESULT_RECOVERED;
3130}
3131
3132static void qlcnic_io_resume(struct pci_dev *pdev)
3133{
3134 struct qlcnic_adapter *adapter = pci_get_drvdata(pdev);
3135
3136 pci_cleanup_aer_uncorrect_error_status(pdev);
3137
3138 if (QLCRD32(adapter, QLCNIC_CRB_DEV_STATE) == QLCNIC_DEV_READY &&
3139 test_and_clear_bit(__QLCNIC_AER, &adapter->state))
3140 qlcnic_schedule_work(adapter, qlcnic_fw_poll_work,
3141 FW_POLL_DELAY);
3142}
3143
87eb743b
AC
3144static int
3145qlcnicvf_start_firmware(struct qlcnic_adapter *adapter)
3146{
3147 int err;
3148
3149 err = qlcnic_can_start_firmware(adapter);
3150 if (err)
3151 return err;
3152
78f84e1a
AKS
3153 err = qlcnic_check_npar_opertional(adapter);
3154 if (err)
3155 return err;
3c4b23b1 3156
174240a8
RB
3157 err = qlcnic_initialize_nic(adapter);
3158 if (err)
3159 return err;
3160
87eb743b
AC
3161 qlcnic_check_options(adapter);
3162
7373373d
RB
3163 err = qlcnic_set_eswitch_port_config(adapter);
3164 if (err)
3165 return err;
3166
87eb743b
AC
3167 adapter->need_fw_reset = 0;
3168
3169 return err;
3170}
3171
3172static int
3173qlcnicvf_config_bridged_mode(struct qlcnic_adapter *adapter, u32 enable)
3174{
3175 return -EOPNOTSUPP;
3176}
3177
3178static int
3179qlcnicvf_config_led(struct qlcnic_adapter *adapter, u32 state, u32 rate)
3180{
3181 return -EOPNOTSUPP;
3182}
3183
af19b491
AKS
3184static ssize_t
3185qlcnic_store_bridged_mode(struct device *dev,
3186 struct device_attribute *attr, const char *buf, size_t len)
3187{
3188 struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
3189 unsigned long new;
3190 int ret = -EINVAL;
3191
3192 if (!(adapter->capabilities & QLCNIC_FW_CAPABILITY_BDG))
3193 goto err_out;
3194
8a15ad1f 3195 if (!test_bit(__QLCNIC_DEV_UP, &adapter->state))
af19b491
AKS
3196 goto err_out;
3197
3198 if (strict_strtoul(buf, 2, &new))
3199 goto err_out;
3200
2e9d722d 3201 if (!adapter->nic_ops->config_bridged_mode(adapter, !!new))
af19b491
AKS
3202 ret = len;
3203
3204err_out:
3205 return ret;
3206}
3207
3208static ssize_t
3209qlcnic_show_bridged_mode(struct device *dev,
3210 struct device_attribute *attr, char *buf)
3211{
3212 struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
3213 int bridged_mode = 0;
3214
3215 if (adapter->capabilities & QLCNIC_FW_CAPABILITY_BDG)
3216 bridged_mode = !!(adapter->flags & QLCNIC_BRIDGE_ENABLED);
3217
3218 return sprintf(buf, "%d\n", bridged_mode);
3219}
3220
3221static struct device_attribute dev_attr_bridged_mode = {
3222 .attr = {.name = "bridged_mode", .mode = (S_IRUGO | S_IWUSR)},
3223 .show = qlcnic_show_bridged_mode,
3224 .store = qlcnic_store_bridged_mode,
3225};
3226
3227static ssize_t
3228qlcnic_store_diag_mode(struct device *dev,
3229 struct device_attribute *attr, const char *buf, size_t len)
3230{
3231 struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
3232 unsigned long new;
3233
3234 if (strict_strtoul(buf, 2, &new))
3235 return -EINVAL;
3236
3237 if (!!new != !!(adapter->flags & QLCNIC_DIAG_ENABLED))
3238 adapter->flags ^= QLCNIC_DIAG_ENABLED;
3239
3240 return len;
3241}
3242
3243static ssize_t
3244qlcnic_show_diag_mode(struct device *dev,
3245 struct device_attribute *attr, char *buf)
3246{
3247 struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
3248
3249 return sprintf(buf, "%d\n",
3250 !!(adapter->flags & QLCNIC_DIAG_ENABLED));
3251}
3252
3253static struct device_attribute dev_attr_diag_mode = {
3254 .attr = {.name = "diag_mode", .mode = (S_IRUGO | S_IWUSR)},
3255 .show = qlcnic_show_diag_mode,
3256 .store = qlcnic_store_diag_mode,
3257};
3258
3259static int
3260qlcnic_sysfs_validate_crb(struct qlcnic_adapter *adapter,
3261 loff_t offset, size_t size)
3262{
897e8c7c
DP
3263 size_t crb_size = 4;
3264
af19b491
AKS
3265 if (!(adapter->flags & QLCNIC_DIAG_ENABLED))
3266 return -EIO;
3267
897e8c7c
DP
3268 if (offset < QLCNIC_PCI_CRBSPACE) {
3269 if (ADDR_IN_RANGE(offset, QLCNIC_PCI_CAMQM,
3270 QLCNIC_PCI_CAMQM_END))
3271 crb_size = 8;
3272 else
3273 return -EINVAL;
3274 }
af19b491 3275
897e8c7c
DP
3276 if ((size != crb_size) || (offset & (crb_size-1)))
3277 return -EINVAL;
af19b491
AKS
3278
3279 return 0;
3280}
3281
3282static ssize_t
2c3c8bea
CW
3283qlcnic_sysfs_read_crb(struct file *filp, struct kobject *kobj,
3284 struct bin_attribute *attr,
af19b491
AKS
3285 char *buf, loff_t offset, size_t size)
3286{
3287 struct device *dev = container_of(kobj, struct device, kobj);
3288 struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
3289 u32 data;
897e8c7c 3290 u64 qmdata;
af19b491
AKS
3291 int ret;
3292
3293 ret = qlcnic_sysfs_validate_crb(adapter, offset, size);
3294 if (ret != 0)
3295 return ret;
3296
897e8c7c
DP
3297 if (ADDR_IN_RANGE(offset, QLCNIC_PCI_CAMQM, QLCNIC_PCI_CAMQM_END)) {
3298 qlcnic_pci_camqm_read_2M(adapter, offset, &qmdata);
3299 memcpy(buf, &qmdata, size);
3300 } else {
3301 data = QLCRD32(adapter, offset);
3302 memcpy(buf, &data, size);
3303 }
af19b491
AKS
3304 return size;
3305}
3306
3307static ssize_t
2c3c8bea
CW
3308qlcnic_sysfs_write_crb(struct file *filp, struct kobject *kobj,
3309 struct bin_attribute *attr,
af19b491
AKS
3310 char *buf, loff_t offset, size_t size)
3311{
3312 struct device *dev = container_of(kobj, struct device, kobj);
3313 struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
3314 u32 data;
897e8c7c 3315 u64 qmdata;
af19b491
AKS
3316 int ret;
3317
3318 ret = qlcnic_sysfs_validate_crb(adapter, offset, size);
3319 if (ret != 0)
3320 return ret;
3321
897e8c7c
DP
3322 if (ADDR_IN_RANGE(offset, QLCNIC_PCI_CAMQM, QLCNIC_PCI_CAMQM_END)) {
3323 memcpy(&qmdata, buf, size);
3324 qlcnic_pci_camqm_write_2M(adapter, offset, qmdata);
3325 } else {
3326 memcpy(&data, buf, size);
3327 QLCWR32(adapter, offset, data);
3328 }
af19b491
AKS
3329 return size;
3330}
3331
3332static int
3333qlcnic_sysfs_validate_mem(struct qlcnic_adapter *adapter,
3334 loff_t offset, size_t size)
3335{
3336 if (!(adapter->flags & QLCNIC_DIAG_ENABLED))
3337 return -EIO;
3338
3339 if ((size != 8) || (offset & 0x7))
3340 return -EIO;
3341
3342 return 0;
3343}
3344
3345static ssize_t
2c3c8bea
CW
3346qlcnic_sysfs_read_mem(struct file *filp, struct kobject *kobj,
3347 struct bin_attribute *attr,
af19b491
AKS
3348 char *buf, loff_t offset, size_t size)
3349{
3350 struct device *dev = container_of(kobj, struct device, kobj);
3351 struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
3352 u64 data;
3353 int ret;
3354
3355 ret = qlcnic_sysfs_validate_mem(adapter, offset, size);
3356 if (ret != 0)
3357 return ret;
3358
3359 if (qlcnic_pci_mem_read_2M(adapter, offset, &data))
3360 return -EIO;
3361
3362 memcpy(buf, &data, size);
3363
3364 return size;
3365}
3366
3367static ssize_t
2c3c8bea
CW
3368qlcnic_sysfs_write_mem(struct file *filp, struct kobject *kobj,
3369 struct bin_attribute *attr,
af19b491
AKS
3370 char *buf, loff_t offset, size_t size)
3371{
3372 struct device *dev = container_of(kobj, struct device, kobj);
3373 struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
3374 u64 data;
3375 int ret;
3376
3377 ret = qlcnic_sysfs_validate_mem(adapter, offset, size);
3378 if (ret != 0)
3379 return ret;
3380
3381 memcpy(&data, buf, size);
3382
3383 if (qlcnic_pci_mem_write_2M(adapter, offset, data))
3384 return -EIO;
3385
3386 return size;
3387}
3388
3389
3390static struct bin_attribute bin_attr_crb = {
3391 .attr = {.name = "crb", .mode = (S_IRUGO | S_IWUSR)},
3392 .size = 0,
3393 .read = qlcnic_sysfs_read_crb,
3394 .write = qlcnic_sysfs_write_crb,
3395};
3396
3397static struct bin_attribute bin_attr_mem = {
3398 .attr = {.name = "mem", .mode = (S_IRUGO | S_IWUSR)},
3399 .size = 0,
3400 .read = qlcnic_sysfs_read_mem,
3401 .write = qlcnic_sysfs_write_mem,
3402};
3403
cea8975e 3404static int
346fe763
RB
3405validate_pm_config(struct qlcnic_adapter *adapter,
3406 struct qlcnic_pm_func_cfg *pm_cfg, int count)
3407{
3408
3409 u8 src_pci_func, s_esw_id, d_esw_id;
3410 u8 dest_pci_func;
3411 int i;
3412
3413 for (i = 0; i < count; i++) {
3414 src_pci_func = pm_cfg[i].pci_func;
3415 dest_pci_func = pm_cfg[i].dest_npar;
3416 if (src_pci_func >= QLCNIC_MAX_PCI_FUNC
3417 || dest_pci_func >= QLCNIC_MAX_PCI_FUNC)
3418 return QL_STATUS_INVALID_PARAM;
3419
3420 if (adapter->npars[src_pci_func].type != QLCNIC_TYPE_NIC)
3421 return QL_STATUS_INVALID_PARAM;
3422
3423 if (adapter->npars[dest_pci_func].type != QLCNIC_TYPE_NIC)
3424 return QL_STATUS_INVALID_PARAM;
3425
346fe763
RB
3426 s_esw_id = adapter->npars[src_pci_func].phy_port;
3427 d_esw_id = adapter->npars[dest_pci_func].phy_port;
3428
3429 if (s_esw_id != d_esw_id)
3430 return QL_STATUS_INVALID_PARAM;
3431
3432 }
3433 return 0;
3434
3435}
3436
3437static ssize_t
3438qlcnic_sysfs_write_pm_config(struct file *filp, struct kobject *kobj,
3439 struct bin_attribute *attr, char *buf, loff_t offset, size_t size)
3440{
3441 struct device *dev = container_of(kobj, struct device, kobj);
3442 struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
3443 struct qlcnic_pm_func_cfg *pm_cfg;
3444 u32 id, action, pci_func;
3445 int count, rem, i, ret;
3446
3447 count = size / sizeof(struct qlcnic_pm_func_cfg);
3448 rem = size % sizeof(struct qlcnic_pm_func_cfg);
3449 if (rem)
3450 return QL_STATUS_INVALID_PARAM;
3451
3452 pm_cfg = (struct qlcnic_pm_func_cfg *) buf;
3453
3454 ret = validate_pm_config(adapter, pm_cfg, count);
3455 if (ret)
3456 return ret;
3457 for (i = 0; i < count; i++) {
3458 pci_func = pm_cfg[i].pci_func;
4e8acb01 3459 action = !!pm_cfg[i].action;
346fe763
RB
3460 id = adapter->npars[pci_func].phy_port;
3461 ret = qlcnic_config_port_mirroring(adapter, id,
3462 action, pci_func);
3463 if (ret)
3464 return ret;
3465 }
3466
3467 for (i = 0; i < count; i++) {
3468 pci_func = pm_cfg[i].pci_func;
3469 id = adapter->npars[pci_func].phy_port;
4e8acb01 3470 adapter->npars[pci_func].enable_pm = !!pm_cfg[i].action;
346fe763
RB
3471 adapter->npars[pci_func].dest_npar = id;
3472 }
3473 return size;
3474}
3475
3476static ssize_t
3477qlcnic_sysfs_read_pm_config(struct file *filp, struct kobject *kobj,
3478 struct bin_attribute *attr, char *buf, loff_t offset, size_t size)
3479{
3480 struct device *dev = container_of(kobj, struct device, kobj);
3481 struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
3482 struct qlcnic_pm_func_cfg pm_cfg[QLCNIC_MAX_PCI_FUNC];
3483 int i;
3484
3485 if (size != sizeof(pm_cfg))
3486 return QL_STATUS_INVALID_PARAM;
3487
3488 for (i = 0; i < QLCNIC_MAX_PCI_FUNC; i++) {
3489 if (adapter->npars[i].type != QLCNIC_TYPE_NIC)
3490 continue;
3491 pm_cfg[i].action = adapter->npars[i].enable_pm;
3492 pm_cfg[i].dest_npar = 0;
3493 pm_cfg[i].pci_func = i;
3494 }
3495 memcpy(buf, &pm_cfg, size);
3496
3497 return size;
3498}
3499
cea8975e 3500static int
346fe763 3501validate_esw_config(struct qlcnic_adapter *adapter,
4e8acb01 3502 struct qlcnic_esw_func_cfg *esw_cfg, int count)
346fe763 3503{
7613c87b 3504 u32 op_mode;
346fe763
RB
3505 u8 pci_func;
3506 int i;
7613c87b
RB
3507
3508 op_mode = readl(adapter->ahw.pci_base0 + QLCNIC_DRV_OP_MODE);
3509
346fe763
RB
3510 for (i = 0; i < count; i++) {
3511 pci_func = esw_cfg[i].pci_func;
3512 if (pci_func >= QLCNIC_MAX_PCI_FUNC)
3513 return QL_STATUS_INVALID_PARAM;
3514
4e8acb01
RB
3515 if (adapter->op_mode == QLCNIC_MGMT_FUNC)
3516 if (adapter->npars[pci_func].type != QLCNIC_TYPE_NIC)
3517 return QL_STATUS_INVALID_PARAM;
346fe763 3518
4e8acb01
RB
3519 switch (esw_cfg[i].op_mode) {
3520 case QLCNIC_PORT_DEFAULTS:
7613c87b 3521 if (QLC_DEV_GET_DRV(op_mode, pci_func) !=
7373373d 3522 QLCNIC_NON_PRIV_FUNC) {
7613c87b 3523 esw_cfg[i].mac_anti_spoof = 0;
7373373d
RB
3524 esw_cfg[i].mac_override = 1;
3525 }
4e8acb01
RB
3526 break;
3527 case QLCNIC_ADD_VLAN:
346fe763
RB
3528 if (!IS_VALID_VLAN(esw_cfg[i].vlan_id))
3529 return QL_STATUS_INVALID_PARAM;
4e8acb01
RB
3530 if (!esw_cfg[i].op_type)
3531 return QL_STATUS_INVALID_PARAM;
3532 break;
3533 case QLCNIC_DEL_VLAN:
4e8acb01
RB
3534 if (!esw_cfg[i].op_type)
3535 return QL_STATUS_INVALID_PARAM;
3536 break;
3537 default:
346fe763 3538 return QL_STATUS_INVALID_PARAM;
4e8acb01 3539 }
346fe763 3540 }
346fe763
RB
3541 return 0;
3542}
3543
3544static ssize_t
3545qlcnic_sysfs_write_esw_config(struct file *file, struct kobject *kobj,
3546 struct bin_attribute *attr, char *buf, loff_t offset, size_t size)
3547{
3548 struct device *dev = container_of(kobj, struct device, kobj);
3549 struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
3550 struct qlcnic_esw_func_cfg *esw_cfg;
4e8acb01 3551 struct qlcnic_npar_info *npar;
346fe763 3552 int count, rem, i, ret;
0325d69b 3553 u8 pci_func, op_mode = 0;
346fe763
RB
3554
3555 count = size / sizeof(struct qlcnic_esw_func_cfg);
3556 rem = size % sizeof(struct qlcnic_esw_func_cfg);
3557 if (rem)
3558 return QL_STATUS_INVALID_PARAM;
3559
3560 esw_cfg = (struct qlcnic_esw_func_cfg *) buf;
3561 ret = validate_esw_config(adapter, esw_cfg, count);
3562 if (ret)
3563 return ret;
3564
3565 for (i = 0; i < count; i++) {
0325d69b
RB
3566 if (adapter->op_mode == QLCNIC_MGMT_FUNC)
3567 if (qlcnic_config_switch_port(adapter, &esw_cfg[i]))
3568 return QL_STATUS_INVALID_PARAM;
e9a47700
RB
3569
3570 if (adapter->ahw.pci_func != esw_cfg[i].pci_func)
3571 continue;
3572
3573 op_mode = esw_cfg[i].op_mode;
3574 qlcnic_get_eswitch_port_config(adapter, &esw_cfg[i]);
3575 esw_cfg[i].op_mode = op_mode;
3576 esw_cfg[i].pci_func = adapter->ahw.pci_func;
3577
3578 switch (esw_cfg[i].op_mode) {
3579 case QLCNIC_PORT_DEFAULTS:
3580 qlcnic_set_eswitch_port_features(adapter, &esw_cfg[i]);
3581 break;
8cf61f89
AKS
3582 case QLCNIC_ADD_VLAN:
3583 qlcnic_set_vlan_config(adapter, &esw_cfg[i]);
3584 break;
3585 case QLCNIC_DEL_VLAN:
3586 esw_cfg[i].vlan_id = 0;
3587 qlcnic_set_vlan_config(adapter, &esw_cfg[i]);
3588 break;
0325d69b 3589 }
346fe763
RB
3590 }
3591
0325d69b
RB
3592 if (adapter->op_mode != QLCNIC_MGMT_FUNC)
3593 goto out;
e9a47700 3594
346fe763
RB
3595 for (i = 0; i < count; i++) {
3596 pci_func = esw_cfg[i].pci_func;
4e8acb01
RB
3597 npar = &adapter->npars[pci_func];
3598 switch (esw_cfg[i].op_mode) {
3599 case QLCNIC_PORT_DEFAULTS:
3600 npar->promisc_mode = esw_cfg[i].promisc_mode;
7373373d 3601 npar->mac_override = esw_cfg[i].mac_override;
4e8acb01
RB
3602 npar->offload_flags = esw_cfg[i].offload_flags;
3603 npar->mac_anti_spoof = esw_cfg[i].mac_anti_spoof;
3604 npar->discard_tagged = esw_cfg[i].discard_tagged;
3605 break;
3606 case QLCNIC_ADD_VLAN:
3607 npar->pvid = esw_cfg[i].vlan_id;
3608 break;
3609 case QLCNIC_DEL_VLAN:
3610 npar->pvid = 0;
3611 break;
3612 }
346fe763 3613 }
0325d69b 3614out:
346fe763
RB
3615 return size;
3616}
3617
3618static ssize_t
3619qlcnic_sysfs_read_esw_config(struct file *file, struct kobject *kobj,
3620 struct bin_attribute *attr, char *buf, loff_t offset, size_t size)
3621{
3622 struct device *dev = container_of(kobj, struct device, kobj);
3623 struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
3624 struct qlcnic_esw_func_cfg esw_cfg[QLCNIC_MAX_PCI_FUNC];
4e8acb01 3625 u8 i;
346fe763
RB
3626
3627 if (size != sizeof(esw_cfg))
3628 return QL_STATUS_INVALID_PARAM;
3629
3630 for (i = 0; i < QLCNIC_MAX_PCI_FUNC; i++) {
3631 if (adapter->npars[i].type != QLCNIC_TYPE_NIC)
3632 continue;
4e8acb01
RB
3633 esw_cfg[i].pci_func = i;
3634 if (qlcnic_get_eswitch_port_config(adapter, &esw_cfg[i]))
3635 return QL_STATUS_INVALID_PARAM;
346fe763
RB
3636 }
3637 memcpy(buf, &esw_cfg, size);
3638
3639 return size;
3640}
3641
cea8975e 3642static int
346fe763
RB
3643validate_npar_config(struct qlcnic_adapter *adapter,
3644 struct qlcnic_npar_func_cfg *np_cfg, int count)
3645{
3646 u8 pci_func, i;
3647
3648 for (i = 0; i < count; i++) {
3649 pci_func = np_cfg[i].pci_func;
3650 if (pci_func >= QLCNIC_MAX_PCI_FUNC)
3651 return QL_STATUS_INVALID_PARAM;
3652
3653 if (adapter->npars[pci_func].type != QLCNIC_TYPE_NIC)
3654 return QL_STATUS_INVALID_PARAM;
3655
3656 if (!IS_VALID_BW(np_cfg[i].min_bw)
3657 || !IS_VALID_BW(np_cfg[i].max_bw)
3658 || !IS_VALID_RX_QUEUES(np_cfg[i].max_rx_queues)
3659 || !IS_VALID_TX_QUEUES(np_cfg[i].max_tx_queues))
3660 return QL_STATUS_INVALID_PARAM;
3661 }
3662 return 0;
3663}
3664
3665static ssize_t
3666qlcnic_sysfs_write_npar_config(struct file *file, struct kobject *kobj,
3667 struct bin_attribute *attr, char *buf, loff_t offset, size_t size)
3668{
3669 struct device *dev = container_of(kobj, struct device, kobj);
3670 struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
3671 struct qlcnic_info nic_info;
3672 struct qlcnic_npar_func_cfg *np_cfg;
3673 int i, count, rem, ret;
3674 u8 pci_func;
3675
3676 count = size / sizeof(struct qlcnic_npar_func_cfg);
3677 rem = size % sizeof(struct qlcnic_npar_func_cfg);
3678 if (rem)
3679 return QL_STATUS_INVALID_PARAM;
3680
3681 np_cfg = (struct qlcnic_npar_func_cfg *) buf;
3682 ret = validate_npar_config(adapter, np_cfg, count);
3683 if (ret)
3684 return ret;
3685
3686 for (i = 0; i < count ; i++) {
3687 pci_func = np_cfg[i].pci_func;
3688 ret = qlcnic_get_nic_info(adapter, &nic_info, pci_func);
3689 if (ret)
3690 return ret;
3691 nic_info.pci_func = pci_func;
3692 nic_info.min_tx_bw = np_cfg[i].min_bw;
3693 nic_info.max_tx_bw = np_cfg[i].max_bw;
3694 ret = qlcnic_set_nic_info(adapter, &nic_info);
3695 if (ret)
3696 return ret;
cea8975e
AC
3697 adapter->npars[i].min_bw = nic_info.min_tx_bw;
3698 adapter->npars[i].max_bw = nic_info.max_tx_bw;
346fe763
RB
3699 }
3700
3701 return size;
3702
3703}
3704static ssize_t
3705qlcnic_sysfs_read_npar_config(struct file *file, struct kobject *kobj,
3706 struct bin_attribute *attr, char *buf, loff_t offset, size_t size)
3707{
3708 struct device *dev = container_of(kobj, struct device, kobj);
3709 struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
3710 struct qlcnic_info nic_info;
3711 struct qlcnic_npar_func_cfg np_cfg[QLCNIC_MAX_PCI_FUNC];
3712 int i, ret;
3713
3714 if (size != sizeof(np_cfg))
3715 return QL_STATUS_INVALID_PARAM;
3716
3717 for (i = 0; i < QLCNIC_MAX_PCI_FUNC ; i++) {
3718 if (adapter->npars[i].type != QLCNIC_TYPE_NIC)
3719 continue;
3720 ret = qlcnic_get_nic_info(adapter, &nic_info, i);
3721 if (ret)
3722 return ret;
3723
3724 np_cfg[i].pci_func = i;
a1c0c459 3725 np_cfg[i].op_mode = (u8)nic_info.op_mode;
346fe763
RB
3726 np_cfg[i].port_num = nic_info.phys_port;
3727 np_cfg[i].fw_capab = nic_info.capabilities;
3728 np_cfg[i].min_bw = nic_info.min_tx_bw ;
3729 np_cfg[i].max_bw = nic_info.max_tx_bw;
3730 np_cfg[i].max_tx_queues = nic_info.max_tx_ques;
3731 np_cfg[i].max_rx_queues = nic_info.max_rx_ques;
3732 }
3733 memcpy(buf, &np_cfg, size);
3734 return size;
3735}
3736
b6021212
AKS
3737static ssize_t
3738qlcnic_sysfs_get_port_stats(struct file *file, struct kobject *kobj,
3739 struct bin_attribute *attr, char *buf, loff_t offset, size_t size)
3740{
3741 struct device *dev = container_of(kobj, struct device, kobj);
3742 struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
3743 struct qlcnic_esw_statistics port_stats;
3744 int ret;
3745
3746 if (size != sizeof(struct qlcnic_esw_statistics))
3747 return QL_STATUS_INVALID_PARAM;
3748
3749 if (offset >= QLCNIC_MAX_PCI_FUNC)
3750 return QL_STATUS_INVALID_PARAM;
3751
3752 memset(&port_stats, 0, size);
3753 ret = qlcnic_get_port_stats(adapter, offset, QLCNIC_QUERY_RX_COUNTER,
3754 &port_stats.rx);
3755 if (ret)
3756 return ret;
3757
3758 ret = qlcnic_get_port_stats(adapter, offset, QLCNIC_QUERY_TX_COUNTER,
3759 &port_stats.tx);
3760 if (ret)
3761 return ret;
3762
3763 memcpy(buf, &port_stats, size);
3764 return size;
3765}
3766
3767static ssize_t
3768qlcnic_sysfs_get_esw_stats(struct file *file, struct kobject *kobj,
3769 struct bin_attribute *attr, char *buf, loff_t offset, size_t size)
3770{
3771 struct device *dev = container_of(kobj, struct device, kobj);
3772 struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
3773 struct qlcnic_esw_statistics esw_stats;
3774 int ret;
3775
3776 if (size != sizeof(struct qlcnic_esw_statistics))
3777 return QL_STATUS_INVALID_PARAM;
3778
3779 if (offset >= QLCNIC_NIU_MAX_XG_PORTS)
3780 return QL_STATUS_INVALID_PARAM;
3781
3782 memset(&esw_stats, 0, size);
3783 ret = qlcnic_get_eswitch_stats(adapter, offset, QLCNIC_QUERY_RX_COUNTER,
3784 &esw_stats.rx);
3785 if (ret)
3786 return ret;
3787
3788 ret = qlcnic_get_eswitch_stats(adapter, offset, QLCNIC_QUERY_TX_COUNTER,
3789 &esw_stats.tx);
3790 if (ret)
3791 return ret;
3792
3793 memcpy(buf, &esw_stats, size);
3794 return size;
3795}
3796
3797static ssize_t
3798qlcnic_sysfs_clear_esw_stats(struct file *file, struct kobject *kobj,
3799 struct bin_attribute *attr, char *buf, loff_t offset, size_t size)
3800{
3801 struct device *dev = container_of(kobj, struct device, kobj);
3802 struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
3803 int ret;
3804
3805 if (offset >= QLCNIC_NIU_MAX_XG_PORTS)
3806 return QL_STATUS_INVALID_PARAM;
3807
3808 ret = qlcnic_clear_esw_stats(adapter, QLCNIC_STATS_ESWITCH, offset,
3809 QLCNIC_QUERY_RX_COUNTER);
3810 if (ret)
3811 return ret;
3812
3813 ret = qlcnic_clear_esw_stats(adapter, QLCNIC_STATS_ESWITCH, offset,
3814 QLCNIC_QUERY_TX_COUNTER);
3815 if (ret)
3816 return ret;
3817
3818 return size;
3819}
3820
3821static ssize_t
3822qlcnic_sysfs_clear_port_stats(struct file *file, struct kobject *kobj,
3823 struct bin_attribute *attr, char *buf, loff_t offset, size_t size)
3824{
3825
3826 struct device *dev = container_of(kobj, struct device, kobj);
3827 struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
3828 int ret;
3829
3830 if (offset >= QLCNIC_MAX_PCI_FUNC)
3831 return QL_STATUS_INVALID_PARAM;
3832
3833 ret = qlcnic_clear_esw_stats(adapter, QLCNIC_STATS_PORT, offset,
3834 QLCNIC_QUERY_RX_COUNTER);
3835 if (ret)
3836 return ret;
3837
3838 ret = qlcnic_clear_esw_stats(adapter, QLCNIC_STATS_PORT, offset,
3839 QLCNIC_QUERY_TX_COUNTER);
3840 if (ret)
3841 return ret;
3842
3843 return size;
3844}
3845
346fe763
RB
3846static ssize_t
3847qlcnic_sysfs_read_pci_config(struct file *file, struct kobject *kobj,
3848 struct bin_attribute *attr, char *buf, loff_t offset, size_t size)
3849{
3850 struct device *dev = container_of(kobj, struct device, kobj);
3851 struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
3852 struct qlcnic_pci_func_cfg pci_cfg[QLCNIC_MAX_PCI_FUNC];
e88db3bd 3853 struct qlcnic_pci_info *pci_info;
346fe763
RB
3854 int i, ret;
3855
3856 if (size != sizeof(pci_cfg))
3857 return QL_STATUS_INVALID_PARAM;
3858
e88db3bd
DC
3859 pci_info = kcalloc(QLCNIC_MAX_PCI_FUNC, sizeof(*pci_info), GFP_KERNEL);
3860 if (!pci_info)
3861 return -ENOMEM;
3862
346fe763 3863 ret = qlcnic_get_pci_info(adapter, pci_info);
e88db3bd
DC
3864 if (ret) {
3865 kfree(pci_info);
346fe763 3866 return ret;
e88db3bd 3867 }
346fe763
RB
3868
3869 for (i = 0; i < QLCNIC_MAX_PCI_FUNC ; i++) {
3870 pci_cfg[i].pci_func = pci_info[i].id;
3871 pci_cfg[i].func_type = pci_info[i].type;
3872 pci_cfg[i].port_num = pci_info[i].default_port;
3873 pci_cfg[i].min_bw = pci_info[i].tx_min_bw;
3874 pci_cfg[i].max_bw = pci_info[i].tx_max_bw;
3875 memcpy(&pci_cfg[i].def_mac_addr, &pci_info[i].mac, ETH_ALEN);
3876 }
3877 memcpy(buf, &pci_cfg, size);
e88db3bd 3878 kfree(pci_info);
346fe763 3879 return size;
346fe763
RB
3880}
3881static struct bin_attribute bin_attr_npar_config = {
3882 .attr = {.name = "npar_config", .mode = (S_IRUGO | S_IWUSR)},
3883 .size = 0,
3884 .read = qlcnic_sysfs_read_npar_config,
3885 .write = qlcnic_sysfs_write_npar_config,
3886};
3887
3888static struct bin_attribute bin_attr_pci_config = {
3889 .attr = {.name = "pci_config", .mode = (S_IRUGO | S_IWUSR)},
3890 .size = 0,
3891 .read = qlcnic_sysfs_read_pci_config,
3892 .write = NULL,
3893};
3894
b6021212
AKS
3895static struct bin_attribute bin_attr_port_stats = {
3896 .attr = {.name = "port_stats", .mode = (S_IRUGO | S_IWUSR)},
3897 .size = 0,
3898 .read = qlcnic_sysfs_get_port_stats,
3899 .write = qlcnic_sysfs_clear_port_stats,
3900};
3901
3902static struct bin_attribute bin_attr_esw_stats = {
3903 .attr = {.name = "esw_stats", .mode = (S_IRUGO | S_IWUSR)},
3904 .size = 0,
3905 .read = qlcnic_sysfs_get_esw_stats,
3906 .write = qlcnic_sysfs_clear_esw_stats,
3907};
3908
346fe763
RB
3909static struct bin_attribute bin_attr_esw_config = {
3910 .attr = {.name = "esw_config", .mode = (S_IRUGO | S_IWUSR)},
3911 .size = 0,
3912 .read = qlcnic_sysfs_read_esw_config,
3913 .write = qlcnic_sysfs_write_esw_config,
3914};
3915
3916static struct bin_attribute bin_attr_pm_config = {
3917 .attr = {.name = "pm_config", .mode = (S_IRUGO | S_IWUSR)},
3918 .size = 0,
3919 .read = qlcnic_sysfs_read_pm_config,
3920 .write = qlcnic_sysfs_write_pm_config,
3921};
3922
af19b491
AKS
3923static void
3924qlcnic_create_sysfs_entries(struct qlcnic_adapter *adapter)
3925{
3926 struct device *dev = &adapter->pdev->dev;
3927
3928 if (adapter->capabilities & QLCNIC_FW_CAPABILITY_BDG)
3929 if (device_create_file(dev, &dev_attr_bridged_mode))
3930 dev_warn(dev,
3931 "failed to create bridged_mode sysfs entry\n");
3932}
3933
3934static void
3935qlcnic_remove_sysfs_entries(struct qlcnic_adapter *adapter)
3936{
3937 struct device *dev = &adapter->pdev->dev;
3938
3939 if (adapter->capabilities & QLCNIC_FW_CAPABILITY_BDG)
3940 device_remove_file(dev, &dev_attr_bridged_mode);
3941}
3942
3943static void
3944qlcnic_create_diag_entries(struct qlcnic_adapter *adapter)
3945{
3946 struct device *dev = &adapter->pdev->dev;
3947
b6021212
AKS
3948 if (device_create_bin_file(dev, &bin_attr_port_stats))
3949 dev_info(dev, "failed to create port stats sysfs entry");
3950
132ff00a
AC
3951 if (adapter->op_mode == QLCNIC_NON_PRIV_FUNC)
3952 return;
af19b491
AKS
3953 if (device_create_file(dev, &dev_attr_diag_mode))
3954 dev_info(dev, "failed to create diag_mode sysfs entry\n");
3955 if (device_create_bin_file(dev, &bin_attr_crb))
3956 dev_info(dev, "failed to create crb sysfs entry\n");
3957 if (device_create_bin_file(dev, &bin_attr_mem))
3958 dev_info(dev, "failed to create mem sysfs entry\n");
4e8acb01
RB
3959 if (!(adapter->flags & QLCNIC_ESWITCH_ENABLED))
3960 return;
3961 if (device_create_bin_file(dev, &bin_attr_esw_config))
3962 dev_info(dev, "failed to create esw config sysfs entry");
3963 if (adapter->op_mode != QLCNIC_MGMT_FUNC)
346fe763
RB
3964 return;
3965 if (device_create_bin_file(dev, &bin_attr_pci_config))
3966 dev_info(dev, "failed to create pci config sysfs entry");
3967 if (device_create_bin_file(dev, &bin_attr_npar_config))
3968 dev_info(dev, "failed to create npar config sysfs entry");
346fe763
RB
3969 if (device_create_bin_file(dev, &bin_attr_pm_config))
3970 dev_info(dev, "failed to create pm config sysfs entry");
b6021212
AKS
3971 if (device_create_bin_file(dev, &bin_attr_esw_stats))
3972 dev_info(dev, "failed to create eswitch stats sysfs entry");
af19b491
AKS
3973}
3974
af19b491
AKS
3975static void
3976qlcnic_remove_diag_entries(struct qlcnic_adapter *adapter)
3977{
3978 struct device *dev = &adapter->pdev->dev;
3979
b6021212
AKS
3980 device_remove_bin_file(dev, &bin_attr_port_stats);
3981
132ff00a
AC
3982 if (adapter->op_mode == QLCNIC_NON_PRIV_FUNC)
3983 return;
af19b491
AKS
3984 device_remove_file(dev, &dev_attr_diag_mode);
3985 device_remove_bin_file(dev, &bin_attr_crb);
3986 device_remove_bin_file(dev, &bin_attr_mem);
4e8acb01
RB
3987 if (!(adapter->flags & QLCNIC_ESWITCH_ENABLED))
3988 return;
3989 device_remove_bin_file(dev, &bin_attr_esw_config);
3990 if (adapter->op_mode != QLCNIC_MGMT_FUNC)
346fe763
RB
3991 return;
3992 device_remove_bin_file(dev, &bin_attr_pci_config);
3993 device_remove_bin_file(dev, &bin_attr_npar_config);
346fe763 3994 device_remove_bin_file(dev, &bin_attr_pm_config);
b6021212 3995 device_remove_bin_file(dev, &bin_attr_esw_stats);
af19b491
AKS
3996}
3997
3998#ifdef CONFIG_INET
3999
4000#define is_qlcnic_netdev(dev) (dev->netdev_ops == &qlcnic_netdev_ops)
4001
af19b491 4002static void
aec1e845
AKS
4003qlcnic_config_indev_addr(struct qlcnic_adapter *adapter,
4004 struct net_device *dev, unsigned long event)
af19b491
AKS
4005{
4006 struct in_device *indev;
af19b491 4007
af19b491
AKS
4008 indev = in_dev_get(dev);
4009 if (!indev)
4010 return;
4011
4012 for_ifa(indev) {
4013 switch (event) {
4014 case NETDEV_UP:
4015 qlcnic_config_ipaddr(adapter,
4016 ifa->ifa_address, QLCNIC_IP_UP);
4017 break;
4018 case NETDEV_DOWN:
4019 qlcnic_config_ipaddr(adapter,
4020 ifa->ifa_address, QLCNIC_IP_DOWN);
4021 break;
4022 default:
4023 break;
4024 }
4025 } endfor_ifa(indev);
4026
4027 in_dev_put(indev);
af19b491
AKS
4028}
4029
aec1e845
AKS
4030static void
4031qlcnic_restore_indev_addr(struct net_device *netdev, unsigned long event)
4032{
4033 struct qlcnic_adapter *adapter = netdev_priv(netdev);
4034 struct net_device *dev;
4035 u16 vid;
4036
4037 qlcnic_config_indev_addr(adapter, netdev, event);
4038
4039 if (!adapter->vlgrp)
4040 return;
4041
4042 for (vid = 0; vid < VLAN_GROUP_ARRAY_LEN; vid++) {
4043 dev = vlan_group_get_device(adapter->vlgrp, vid);
4044 if (!dev)
4045 continue;
4046
4047 qlcnic_config_indev_addr(adapter, dev, event);
4048 }
4049}
4050
af19b491
AKS
4051static int qlcnic_netdev_event(struct notifier_block *this,
4052 unsigned long event, void *ptr)
4053{
4054 struct qlcnic_adapter *adapter;
4055 struct net_device *dev = (struct net_device *)ptr;
4056
4057recheck:
4058 if (dev == NULL)
4059 goto done;
4060
4061 if (dev->priv_flags & IFF_802_1Q_VLAN) {
4062 dev = vlan_dev_real_dev(dev);
4063 goto recheck;
4064 }
4065
4066 if (!is_qlcnic_netdev(dev))
4067 goto done;
4068
4069 adapter = netdev_priv(dev);
4070
4071 if (!adapter)
4072 goto done;
4073
8a15ad1f 4074 if (!test_bit(__QLCNIC_DEV_UP, &adapter->state))
af19b491
AKS
4075 goto done;
4076
aec1e845 4077 qlcnic_config_indev_addr(adapter, dev, event);
af19b491
AKS
4078done:
4079 return NOTIFY_DONE;
4080}
4081
4082static int
4083qlcnic_inetaddr_event(struct notifier_block *this,
4084 unsigned long event, void *ptr)
4085{
4086 struct qlcnic_adapter *adapter;
4087 struct net_device *dev;
4088
4089 struct in_ifaddr *ifa = (struct in_ifaddr *)ptr;
4090
4091 dev = ifa->ifa_dev ? ifa->ifa_dev->dev : NULL;
4092
4093recheck:
aec1e845 4094 if (dev == NULL)
af19b491
AKS
4095 goto done;
4096
4097 if (dev->priv_flags & IFF_802_1Q_VLAN) {
4098 dev = vlan_dev_real_dev(dev);
4099 goto recheck;
4100 }
4101
4102 if (!is_qlcnic_netdev(dev))
4103 goto done;
4104
4105 adapter = netdev_priv(dev);
4106
251a84c9 4107 if (!adapter)
af19b491
AKS
4108 goto done;
4109
8a15ad1f 4110 if (!test_bit(__QLCNIC_DEV_UP, &adapter->state))
af19b491
AKS
4111 goto done;
4112
4113 switch (event) {
4114 case NETDEV_UP:
4115 qlcnic_config_ipaddr(adapter, ifa->ifa_address, QLCNIC_IP_UP);
4116 break;
4117 case NETDEV_DOWN:
4118 qlcnic_config_ipaddr(adapter, ifa->ifa_address, QLCNIC_IP_DOWN);
4119 break;
4120 default:
4121 break;
4122 }
4123
4124done:
4125 return NOTIFY_DONE;
4126}
4127
4128static struct notifier_block qlcnic_netdev_cb = {
4129 .notifier_call = qlcnic_netdev_event,
4130};
4131
4132static struct notifier_block qlcnic_inetaddr_cb = {
4133 .notifier_call = qlcnic_inetaddr_event,
4134};
4135#else
4136static void
aec1e845 4137qlcnic_restore_indev_addr(struct net_device *dev, unsigned long event)
af19b491
AKS
4138{ }
4139#endif
451724c8
SC
4140static struct pci_error_handlers qlcnic_err_handler = {
4141 .error_detected = qlcnic_io_error_detected,
4142 .slot_reset = qlcnic_io_slot_reset,
4143 .resume = qlcnic_io_resume,
4144};
af19b491
AKS
4145
4146static struct pci_driver qlcnic_driver = {
4147 .name = qlcnic_driver_name,
4148 .id_table = qlcnic_pci_tbl,
4149 .probe = qlcnic_probe,
4150 .remove = __devexit_p(qlcnic_remove),
4151#ifdef CONFIG_PM
4152 .suspend = qlcnic_suspend,
4153 .resume = qlcnic_resume,
4154#endif
451724c8
SC
4155 .shutdown = qlcnic_shutdown,
4156 .err_handler = &qlcnic_err_handler
4157
af19b491
AKS
4158};
4159
4160static int __init qlcnic_init_module(void)
4161{
0cf3a14c 4162 int ret;
af19b491
AKS
4163
4164 printk(KERN_INFO "%s\n", qlcnic_driver_string);
4165
4166#ifdef CONFIG_INET
4167 register_netdevice_notifier(&qlcnic_netdev_cb);
4168 register_inetaddr_notifier(&qlcnic_inetaddr_cb);
4169#endif
4170
0cf3a14c
AKS
4171 ret = pci_register_driver(&qlcnic_driver);
4172 if (ret) {
4173#ifdef CONFIG_INET
4174 unregister_inetaddr_notifier(&qlcnic_inetaddr_cb);
4175 unregister_netdevice_notifier(&qlcnic_netdev_cb);
4176#endif
4177 }
af19b491 4178
0cf3a14c 4179 return ret;
af19b491
AKS
4180}
4181
4182module_init(qlcnic_init_module);
4183
4184static void __exit qlcnic_exit_module(void)
4185{
4186
4187 pci_unregister_driver(&qlcnic_driver);
4188
4189#ifdef CONFIG_INET
4190 unregister_inetaddr_notifier(&qlcnic_inetaddr_cb);
4191 unregister_netdevice_notifier(&qlcnic_netdev_cb);
4192#endif
4193}
4194
4195module_exit(qlcnic_exit_module);