]> bbs.cooldavid.org Git - net-next-2.6.git/blame - drivers/net/qlcnic/qlcnic_main.c
xps: Transmit Packet Steering
[net-next-2.6.git] / drivers / net / qlcnic / qlcnic_main.c
CommitLineData
af19b491
AKS
1/*
2 * Copyright (C) 2009 - QLogic Corporation.
3 * All rights reserved.
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License
7 * as published by the Free Software Foundation; either version 2
8 * of the License, or (at your option) any later version.
9 *
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 59 Temple Place - Suite 330, Boston,
18 * MA 02111-1307, USA.
19 *
20 * The full GNU General Public License is included in this distribution
21 * in the file called "COPYING".
22 *
23 */
24
5a0e3ad6 25#include <linux/slab.h>
af19b491
AKS
26#include <linux/vmalloc.h>
27#include <linux/interrupt.h>
28
29#include "qlcnic.h"
30
7e56cac4 31#include <linux/swab.h>
af19b491
AKS
32#include <linux/dma-mapping.h>
33#include <linux/if_vlan.h>
34#include <net/ip.h>
35#include <linux/ipv6.h>
36#include <linux/inetdevice.h>
37#include <linux/sysfs.h>
451724c8 38#include <linux/aer.h>
af19b491 39
7f9a0c34 40MODULE_DESCRIPTION("QLogic 1/10 GbE Converged/Intelligent Ethernet Driver");
af19b491
AKS
41MODULE_LICENSE("GPL");
42MODULE_VERSION(QLCNIC_LINUX_VERSIONID);
43MODULE_FIRMWARE(QLCNIC_UNIFIED_ROMIMAGE_NAME);
44
45char qlcnic_driver_name[] = "qlcnic";
7f9a0c34
SV
46static const char qlcnic_driver_string[] = "QLogic 1/10 GbE "
47 "Converged/Intelligent Ethernet Driver v" QLCNIC_LINUX_VERSIONID;
af19b491 48
f7ec804a 49static struct workqueue_struct *qlcnic_wq;
b5e5492c
AKS
50static int qlcnic_mac_learn;
51module_param(qlcnic_mac_learn, int, 0644);
52MODULE_PARM_DESC(qlcnic_mac_learn, "Mac Filter (0=disabled, 1=enabled)");
53
af19b491
AKS
54static int use_msi = 1;
55module_param(use_msi, int, 0644);
56MODULE_PARM_DESC(use_msi, "MSI interrupt (0=disabled, 1=enabled");
57
58static int use_msi_x = 1;
59module_param(use_msi_x, int, 0644);
60MODULE_PARM_DESC(use_msi_x, "MSI-X interrupt (0=disabled, 1=enabled");
61
62static int auto_fw_reset = AUTO_FW_RESET_ENABLED;
63module_param(auto_fw_reset, int, 0644);
64MODULE_PARM_DESC(auto_fw_reset, "Auto firmware reset (0=disabled, 1=enabled");
65
4d5bdb38
AKS
66static int load_fw_file;
67module_param(load_fw_file, int, 0644);
68MODULE_PARM_DESC(load_fw_file, "Load firmware from (0=flash, 1=file");
69
2e9d722d
AC
70static int qlcnic_config_npars;
71module_param(qlcnic_config_npars, int, 0644);
72MODULE_PARM_DESC(qlcnic_config_npars, "Configure NPARs (0=disabled, 1=enabled");
73
af19b491
AKS
74static int __devinit qlcnic_probe(struct pci_dev *pdev,
75 const struct pci_device_id *ent);
76static void __devexit qlcnic_remove(struct pci_dev *pdev);
77static int qlcnic_open(struct net_device *netdev);
78static int qlcnic_close(struct net_device *netdev);
af19b491 79static void qlcnic_tx_timeout(struct net_device *netdev);
af19b491
AKS
80static void qlcnic_attach_work(struct work_struct *work);
81static void qlcnic_fwinit_work(struct work_struct *work);
82static void qlcnic_fw_poll_work(struct work_struct *work);
83static void qlcnic_schedule_work(struct qlcnic_adapter *adapter,
84 work_func_t func, int delay);
85static void qlcnic_cancel_fw_work(struct qlcnic_adapter *adapter);
86static int qlcnic_poll(struct napi_struct *napi, int budget);
8f891387 87static int qlcnic_rx_poll(struct napi_struct *napi, int budget);
af19b491
AKS
88#ifdef CONFIG_NET_POLL_CONTROLLER
89static void qlcnic_poll_controller(struct net_device *netdev);
90#endif
91
92static void qlcnic_create_sysfs_entries(struct qlcnic_adapter *adapter);
93static void qlcnic_remove_sysfs_entries(struct qlcnic_adapter *adapter);
94static void qlcnic_create_diag_entries(struct qlcnic_adapter *adapter);
95static void qlcnic_remove_diag_entries(struct qlcnic_adapter *adapter);
96
6df900e9 97static void qlcnic_idc_debug_info(struct qlcnic_adapter *adapter, u8 encoding);
21854f02 98static void qlcnic_clr_all_drv_state(struct qlcnic_adapter *adapter, u8);
af19b491
AKS
99static int qlcnic_can_start_firmware(struct qlcnic_adapter *adapter);
100
7eb9855d 101static irqreturn_t qlcnic_tmp_intr(int irq, void *data);
af19b491
AKS
102static irqreturn_t qlcnic_intr(int irq, void *data);
103static irqreturn_t qlcnic_msi_intr(int irq, void *data);
104static irqreturn_t qlcnic_msix_intr(int irq, void *data);
105
106static struct net_device_stats *qlcnic_get_stats(struct net_device *netdev);
aec1e845 107static void qlcnic_restore_indev_addr(struct net_device *dev, unsigned long);
9f26f547
AC
108static int qlcnic_start_firmware(struct qlcnic_adapter *);
109
b5e5492c
AKS
110static void qlcnic_alloc_lb_filters_mem(struct qlcnic_adapter *adapter);
111static void qlcnic_free_lb_filters_mem(struct qlcnic_adapter *adapter);
9f26f547 112static void qlcnic_dev_set_npar_ready(struct qlcnic_adapter *);
9f26f547
AC
113static int qlcnicvf_config_led(struct qlcnic_adapter *, u32, u32);
114static int qlcnicvf_config_bridged_mode(struct qlcnic_adapter *, u32);
115static int qlcnicvf_start_firmware(struct qlcnic_adapter *);
0325d69b
RB
116static void qlcnic_set_netdev_features(struct qlcnic_adapter *,
117 struct qlcnic_esw_func_cfg *);
af19b491
AKS
118/* PCI Device ID Table */
119#define ENTRY(device) \
120 {PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, (device)), \
121 .class = PCI_CLASS_NETWORK_ETHERNET << 8, .class_mask = ~0}
122
123#define PCI_DEVICE_ID_QLOGIC_QLE824X 0x8020
124
6a902881 125static DEFINE_PCI_DEVICE_TABLE(qlcnic_pci_tbl) = {
af19b491
AKS
126 ENTRY(PCI_DEVICE_ID_QLOGIC_QLE824X),
127 {0,}
128};
129
130MODULE_DEVICE_TABLE(pci, qlcnic_pci_tbl);
131
132
133void
134qlcnic_update_cmd_producer(struct qlcnic_adapter *adapter,
135 struct qlcnic_host_tx_ring *tx_ring)
136{
137 writel(tx_ring->producer, tx_ring->crb_cmd_producer);
af19b491
AKS
138}
139
140static const u32 msi_tgt_status[8] = {
141 ISR_INT_TARGET_STATUS, ISR_INT_TARGET_STATUS_F1,
142 ISR_INT_TARGET_STATUS_F2, ISR_INT_TARGET_STATUS_F3,
143 ISR_INT_TARGET_STATUS_F4, ISR_INT_TARGET_STATUS_F5,
144 ISR_INT_TARGET_STATUS_F6, ISR_INT_TARGET_STATUS_F7
145};
146
147static const
148struct qlcnic_legacy_intr_set legacy_intr[] = QLCNIC_LEGACY_INTR_CONFIG;
149
150static inline void qlcnic_disable_int(struct qlcnic_host_sds_ring *sds_ring)
151{
152 writel(0, sds_ring->crb_intr_mask);
153}
154
155static inline void qlcnic_enable_int(struct qlcnic_host_sds_ring *sds_ring)
156{
157 struct qlcnic_adapter *adapter = sds_ring->adapter;
158
159 writel(0x1, sds_ring->crb_intr_mask);
160
161 if (!QLCNIC_IS_MSI_FAMILY(adapter))
162 writel(0xfbff, adapter->tgt_mask_reg);
163}
164
165static int
166qlcnic_alloc_sds_rings(struct qlcnic_recv_context *recv_ctx, int count)
167{
168 int size = sizeof(struct qlcnic_host_sds_ring) * count;
169
170 recv_ctx->sds_rings = kzalloc(size, GFP_KERNEL);
171
807540ba 172 return recv_ctx->sds_rings == NULL;
af19b491
AKS
173}
174
175static void
176qlcnic_free_sds_rings(struct qlcnic_recv_context *recv_ctx)
177{
178 if (recv_ctx->sds_rings != NULL)
179 kfree(recv_ctx->sds_rings);
180
181 recv_ctx->sds_rings = NULL;
182}
183
184static int
185qlcnic_napi_add(struct qlcnic_adapter *adapter, struct net_device *netdev)
186{
187 int ring;
188 struct qlcnic_host_sds_ring *sds_ring;
189 struct qlcnic_recv_context *recv_ctx = &adapter->recv_ctx;
190
191 if (qlcnic_alloc_sds_rings(recv_ctx, adapter->max_sds_rings))
192 return -ENOMEM;
193
194 for (ring = 0; ring < adapter->max_sds_rings; ring++) {
195 sds_ring = &recv_ctx->sds_rings[ring];
8f891387 196
197 if (ring == adapter->max_sds_rings - 1)
198 netif_napi_add(netdev, &sds_ring->napi, qlcnic_poll,
199 QLCNIC_NETDEV_WEIGHT/adapter->max_sds_rings);
200 else
201 netif_napi_add(netdev, &sds_ring->napi,
202 qlcnic_rx_poll, QLCNIC_NETDEV_WEIGHT*2);
af19b491
AKS
203 }
204
205 return 0;
206}
207
208static void
209qlcnic_napi_del(struct qlcnic_adapter *adapter)
210{
211 int ring;
212 struct qlcnic_host_sds_ring *sds_ring;
213 struct qlcnic_recv_context *recv_ctx = &adapter->recv_ctx;
214
215 for (ring = 0; ring < adapter->max_sds_rings; ring++) {
216 sds_ring = &recv_ctx->sds_rings[ring];
217 netif_napi_del(&sds_ring->napi);
218 }
219
220 qlcnic_free_sds_rings(&adapter->recv_ctx);
221}
222
223static void
224qlcnic_napi_enable(struct qlcnic_adapter *adapter)
225{
226 int ring;
227 struct qlcnic_host_sds_ring *sds_ring;
228 struct qlcnic_recv_context *recv_ctx = &adapter->recv_ctx;
229
780ab790
AKS
230 if (adapter->is_up != QLCNIC_ADAPTER_UP_MAGIC)
231 return;
232
af19b491
AKS
233 for (ring = 0; ring < adapter->max_sds_rings; ring++) {
234 sds_ring = &recv_ctx->sds_rings[ring];
235 napi_enable(&sds_ring->napi);
236 qlcnic_enable_int(sds_ring);
237 }
238}
239
240static void
241qlcnic_napi_disable(struct qlcnic_adapter *adapter)
242{
243 int ring;
244 struct qlcnic_host_sds_ring *sds_ring;
245 struct qlcnic_recv_context *recv_ctx = &adapter->recv_ctx;
246
780ab790
AKS
247 if (adapter->is_up != QLCNIC_ADAPTER_UP_MAGIC)
248 return;
249
af19b491
AKS
250 for (ring = 0; ring < adapter->max_sds_rings; ring++) {
251 sds_ring = &recv_ctx->sds_rings[ring];
252 qlcnic_disable_int(sds_ring);
253 napi_synchronize(&sds_ring->napi);
254 napi_disable(&sds_ring->napi);
255 }
256}
257
258static void qlcnic_clear_stats(struct qlcnic_adapter *adapter)
259{
260 memset(&adapter->stats, 0, sizeof(adapter->stats));
af19b491
AKS
261}
262
af19b491
AKS
263static void qlcnic_set_msix_bit(struct pci_dev *pdev, int enable)
264{
265 u32 control;
266 int pos;
267
268 pos = pci_find_capability(pdev, PCI_CAP_ID_MSIX);
269 if (pos) {
270 pci_read_config_dword(pdev, pos, &control);
271 if (enable)
272 control |= PCI_MSIX_FLAGS_ENABLE;
273 else
274 control = 0;
275 pci_write_config_dword(pdev, pos, control);
276 }
277}
278
279static void qlcnic_init_msix_entries(struct qlcnic_adapter *adapter, int count)
280{
281 int i;
282
283 for (i = 0; i < count; i++)
284 adapter->msix_entries[i].entry = i;
285}
286
287static int
288qlcnic_read_mac_addr(struct qlcnic_adapter *adapter)
289{
2e9d722d 290 u8 mac_addr[ETH_ALEN];
af19b491
AKS
291 struct net_device *netdev = adapter->netdev;
292 struct pci_dev *pdev = adapter->pdev;
293
da48e6c3 294 if (qlcnic_get_mac_address(adapter, mac_addr) != 0)
af19b491
AKS
295 return -EIO;
296
2e9d722d 297 memcpy(netdev->dev_addr, mac_addr, ETH_ALEN);
af19b491
AKS
298 memcpy(netdev->perm_addr, netdev->dev_addr, netdev->addr_len);
299 memcpy(adapter->mac_addr, netdev->dev_addr, netdev->addr_len);
300
301 /* set station address */
302
303 if (!is_valid_ether_addr(netdev->perm_addr))
304 dev_warn(&pdev->dev, "Bad MAC address %pM.\n",
305 netdev->dev_addr);
306
307 return 0;
308}
309
310static int qlcnic_set_mac(struct net_device *netdev, void *p)
311{
312 struct qlcnic_adapter *adapter = netdev_priv(netdev);
313 struct sockaddr *addr = p;
314
7373373d
RB
315 if ((adapter->flags & QLCNIC_MAC_OVERRIDE_DISABLED))
316 return -EOPNOTSUPP;
317
af19b491
AKS
318 if (!is_valid_ether_addr(addr->sa_data))
319 return -EINVAL;
320
8a15ad1f 321 if (test_bit(__QLCNIC_DEV_UP, &adapter->state)) {
af19b491
AKS
322 netif_device_detach(netdev);
323 qlcnic_napi_disable(adapter);
324 }
325
326 memcpy(adapter->mac_addr, addr->sa_data, netdev->addr_len);
327 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
328 qlcnic_set_multi(adapter->netdev);
329
8a15ad1f 330 if (test_bit(__QLCNIC_DEV_UP, &adapter->state)) {
af19b491
AKS
331 netif_device_attach(netdev);
332 qlcnic_napi_enable(adapter);
333 }
334 return 0;
335}
336
d5790663
AKS
337static void qlcnic_vlan_rx_register(struct net_device *netdev,
338 struct vlan_group *grp)
339{
340 struct qlcnic_adapter *adapter = netdev_priv(netdev);
341 adapter->vlgrp = grp;
342}
343
af19b491
AKS
344static const struct net_device_ops qlcnic_netdev_ops = {
345 .ndo_open = qlcnic_open,
346 .ndo_stop = qlcnic_close,
347 .ndo_start_xmit = qlcnic_xmit_frame,
348 .ndo_get_stats = qlcnic_get_stats,
349 .ndo_validate_addr = eth_validate_addr,
350 .ndo_set_multicast_list = qlcnic_set_multi,
351 .ndo_set_mac_address = qlcnic_set_mac,
352 .ndo_change_mtu = qlcnic_change_mtu,
353 .ndo_tx_timeout = qlcnic_tx_timeout,
d5790663 354 .ndo_vlan_rx_register = qlcnic_vlan_rx_register,
af19b491
AKS
355#ifdef CONFIG_NET_POLL_CONTROLLER
356 .ndo_poll_controller = qlcnic_poll_controller,
357#endif
358};
359
2e9d722d 360static struct qlcnic_nic_template qlcnic_ops = {
2e9d722d
AC
361 .config_bridged_mode = qlcnic_config_bridged_mode,
362 .config_led = qlcnic_config_led,
9f26f547
AC
363 .start_firmware = qlcnic_start_firmware
364};
365
366static struct qlcnic_nic_template qlcnic_vf_ops = {
9f26f547
AC
367 .config_bridged_mode = qlcnicvf_config_bridged_mode,
368 .config_led = qlcnicvf_config_led,
9f26f547 369 .start_firmware = qlcnicvf_start_firmware
2e9d722d
AC
370};
371
af19b491
AKS
372static void
373qlcnic_setup_intr(struct qlcnic_adapter *adapter)
374{
375 const struct qlcnic_legacy_intr_set *legacy_intrp;
376 struct pci_dev *pdev = adapter->pdev;
377 int err, num_msix;
378
379 if (adapter->rss_supported) {
380 num_msix = (num_online_cpus() >= MSIX_ENTRIES_PER_ADAPTER) ?
381 MSIX_ENTRIES_PER_ADAPTER : 2;
382 } else
383 num_msix = 1;
384
385 adapter->max_sds_rings = 1;
386
387 adapter->flags &= ~(QLCNIC_MSI_ENABLED | QLCNIC_MSIX_ENABLED);
388
389 legacy_intrp = &legacy_intr[adapter->ahw.pci_func];
390
391 adapter->int_vec_bit = legacy_intrp->int_vec_bit;
392 adapter->tgt_status_reg = qlcnic_get_ioaddr(adapter,
393 legacy_intrp->tgt_status_reg);
394 adapter->tgt_mask_reg = qlcnic_get_ioaddr(adapter,
395 legacy_intrp->tgt_mask_reg);
396 adapter->isr_int_vec = qlcnic_get_ioaddr(adapter, ISR_INT_VECTOR);
397
398 adapter->crb_int_state_reg = qlcnic_get_ioaddr(adapter,
399 ISR_INT_STATE_REG);
400
401 qlcnic_set_msix_bit(pdev, 0);
402
403 if (adapter->msix_supported) {
404
405 qlcnic_init_msix_entries(adapter, num_msix);
406 err = pci_enable_msix(pdev, adapter->msix_entries, num_msix);
407 if (err == 0) {
408 adapter->flags |= QLCNIC_MSIX_ENABLED;
409 qlcnic_set_msix_bit(pdev, 1);
410
411 if (adapter->rss_supported)
412 adapter->max_sds_rings = num_msix;
413
414 dev_info(&pdev->dev, "using msi-x interrupts\n");
415 return;
416 }
417
418 if (err > 0)
419 pci_disable_msix(pdev);
420
421 /* fall through for msi */
422 }
423
424 if (use_msi && !pci_enable_msi(pdev)) {
425 adapter->flags |= QLCNIC_MSI_ENABLED;
426 adapter->tgt_status_reg = qlcnic_get_ioaddr(adapter,
427 msi_tgt_status[adapter->ahw.pci_func]);
428 dev_info(&pdev->dev, "using msi interrupts\n");
429 adapter->msix_entries[0].vector = pdev->irq;
430 return;
431 }
432
433 dev_info(&pdev->dev, "using legacy interrupts\n");
434 adapter->msix_entries[0].vector = pdev->irq;
435}
436
437static void
438qlcnic_teardown_intr(struct qlcnic_adapter *adapter)
439{
440 if (adapter->flags & QLCNIC_MSIX_ENABLED)
441 pci_disable_msix(adapter->pdev);
442 if (adapter->flags & QLCNIC_MSI_ENABLED)
443 pci_disable_msi(adapter->pdev);
444}
445
446static void
447qlcnic_cleanup_pci_map(struct qlcnic_adapter *adapter)
448{
449 if (adapter->ahw.pci_base0 != NULL)
450 iounmap(adapter->ahw.pci_base0);
451}
452
346fe763
RB
453static int
454qlcnic_init_pci_info(struct qlcnic_adapter *adapter)
455{
e88db3bd 456 struct qlcnic_pci_info *pci_info;
900853a4 457 int i, ret = 0;
346fe763
RB
458 u8 pfn;
459
e88db3bd
DC
460 pci_info = kcalloc(QLCNIC_MAX_PCI_FUNC, sizeof(*pci_info), GFP_KERNEL);
461 if (!pci_info)
462 return -ENOMEM;
463
ca315ac2 464 adapter->npars = kzalloc(sizeof(struct qlcnic_npar_info) *
346fe763 465 QLCNIC_MAX_PCI_FUNC, GFP_KERNEL);
e88db3bd 466 if (!adapter->npars) {
900853a4 467 ret = -ENOMEM;
e88db3bd
DC
468 goto err_pci_info;
469 }
346fe763 470
ca315ac2 471 adapter->eswitch = kzalloc(sizeof(struct qlcnic_eswitch) *
346fe763
RB
472 QLCNIC_NIU_MAX_XG_PORTS, GFP_KERNEL);
473 if (!adapter->eswitch) {
900853a4 474 ret = -ENOMEM;
ca315ac2 475 goto err_npars;
346fe763
RB
476 }
477
478 ret = qlcnic_get_pci_info(adapter, pci_info);
ca315ac2
DC
479 if (ret)
480 goto err_eswitch;
346fe763 481
ca315ac2
DC
482 for (i = 0; i < QLCNIC_MAX_PCI_FUNC; i++) {
483 pfn = pci_info[i].id;
484 if (pfn > QLCNIC_MAX_PCI_FUNC)
485 return QL_STATUS_INVALID_PARAM;
a1c0c459
SC
486 adapter->npars[pfn].active = (u8)pci_info[i].active;
487 adapter->npars[pfn].type = (u8)pci_info[i].type;
488 adapter->npars[pfn].phy_port = (u8)pci_info[i].default_port;
ca315ac2
DC
489 adapter->npars[pfn].min_bw = pci_info[i].tx_min_bw;
490 adapter->npars[pfn].max_bw = pci_info[i].tx_max_bw;
346fe763
RB
491 }
492
ca315ac2
DC
493 for (i = 0; i < QLCNIC_NIU_MAX_XG_PORTS; i++)
494 adapter->eswitch[i].flags |= QLCNIC_SWITCH_ENABLE;
495
e88db3bd 496 kfree(pci_info);
ca315ac2
DC
497 return 0;
498
499err_eswitch:
346fe763
RB
500 kfree(adapter->eswitch);
501 adapter->eswitch = NULL;
ca315ac2 502err_npars:
346fe763 503 kfree(adapter->npars);
ca315ac2 504 adapter->npars = NULL;
e88db3bd
DC
505err_pci_info:
506 kfree(pci_info);
346fe763
RB
507
508 return ret;
509}
510
2e9d722d
AC
511static int
512qlcnic_set_function_modes(struct qlcnic_adapter *adapter)
513{
514 u8 id;
515 u32 ref_count;
516 int i, ret = 1;
517 u32 data = QLCNIC_MGMT_FUNC;
518 void __iomem *priv_op = adapter->ahw.pci_base0 + QLCNIC_DRV_OP_MODE;
519
520 /* If other drivers are not in use set their privilege level */
31018e06 521 ref_count = QLCRD32(adapter, QLCNIC_CRB_DRV_ACTIVE);
2e9d722d
AC
522 ret = qlcnic_api_lock(adapter);
523 if (ret)
524 goto err_lock;
2e9d722d 525
0e33c664
AC
526 if (qlcnic_config_npars) {
527 for (i = 0; i < QLCNIC_MAX_PCI_FUNC; i++) {
346fe763 528 id = i;
0e33c664
AC
529 if (adapter->npars[i].type != QLCNIC_TYPE_NIC ||
530 id == adapter->ahw.pci_func)
531 continue;
532 data |= (qlcnic_config_npars &
533 QLC_DEV_SET_DRV(0xf, id));
534 }
535 } else {
536 data = readl(priv_op);
537 data = (data & ~QLC_DEV_SET_DRV(0xf, adapter->ahw.pci_func)) |
538 (QLC_DEV_SET_DRV(QLCNIC_MGMT_FUNC,
539 adapter->ahw.pci_func));
2e9d722d
AC
540 }
541 writel(data, priv_op);
2e9d722d
AC
542 qlcnic_api_unlock(adapter);
543err_lock:
544 return ret;
545}
546
0866d96d
AC
547static void
548qlcnic_check_vf(struct qlcnic_adapter *adapter)
2e9d722d
AC
549{
550 void __iomem *msix_base_addr;
551 void __iomem *priv_op;
552 u32 func;
553 u32 msix_base;
554 u32 op_mode, priv_level;
555
556 /* Determine FW API version */
557 adapter->fw_hal_version = readl(adapter->ahw.pci_base0 + QLCNIC_FW_API);
2e9d722d
AC
558
559 /* Find PCI function number */
560 pci_read_config_dword(adapter->pdev, QLCNIC_MSIX_TABLE_OFFSET, &func);
561 msix_base_addr = adapter->ahw.pci_base0 + QLCNIC_MSIX_BASE;
562 msix_base = readl(msix_base_addr);
563 func = (func - msix_base)/QLCNIC_MSIX_TBL_PGSIZE;
564 adapter->ahw.pci_func = func;
565
566 /* Determine function privilege level */
567 priv_op = adapter->ahw.pci_base0 + QLCNIC_DRV_OP_MODE;
568 op_mode = readl(priv_op);
0e33c664 569 if (op_mode == QLC_DEV_DRV_DEFAULT)
2e9d722d 570 priv_level = QLCNIC_MGMT_FUNC;
0e33c664 571 else
2e9d722d
AC
572 priv_level = QLC_DEV_GET_DRV(op_mode, adapter->ahw.pci_func);
573
0866d96d 574 if (priv_level == QLCNIC_NON_PRIV_FUNC) {
9f26f547
AC
575 adapter->op_mode = QLCNIC_NON_PRIV_FUNC;
576 dev_info(&adapter->pdev->dev,
577 "HAL Version: %d Non Privileged function\n",
578 adapter->fw_hal_version);
579 adapter->nic_ops = &qlcnic_vf_ops;
0866d96d
AC
580 } else
581 adapter->nic_ops = &qlcnic_ops;
2e9d722d
AC
582}
583
af19b491
AKS
584static int
585qlcnic_setup_pci_map(struct qlcnic_adapter *adapter)
586{
587 void __iomem *mem_ptr0 = NULL;
588 resource_size_t mem_base;
589 unsigned long mem_len, pci_len0 = 0;
590
591 struct pci_dev *pdev = adapter->pdev;
af19b491 592
af19b491
AKS
593 /* remap phys address */
594 mem_base = pci_resource_start(pdev, 0); /* 0 is for BAR 0 */
595 mem_len = pci_resource_len(pdev, 0);
596
597 if (mem_len == QLCNIC_PCI_2MB_SIZE) {
598
599 mem_ptr0 = pci_ioremap_bar(pdev, 0);
600 if (mem_ptr0 == NULL) {
601 dev_err(&pdev->dev, "failed to map PCI bar 0\n");
602 return -EIO;
603 }
604 pci_len0 = mem_len;
605 } else {
606 return -EIO;
607 }
608
609 dev_info(&pdev->dev, "%dMB memory map\n", (int)(mem_len>>20));
610
611 adapter->ahw.pci_base0 = mem_ptr0;
612 adapter->ahw.pci_len0 = pci_len0;
613
0866d96d 614 qlcnic_check_vf(adapter);
2e9d722d 615
af19b491 616 adapter->ahw.ocm_win_crb = qlcnic_get_ioaddr(adapter,
2e9d722d 617 QLCNIC_PCIX_PS_REG(PCIX_OCM_WINDOW_REG(adapter->ahw.pci_func)));
af19b491
AKS
618
619 return 0;
620}
621
622static void get_brd_name(struct qlcnic_adapter *adapter, char *name)
623{
624 struct pci_dev *pdev = adapter->pdev;
625 int i, found = 0;
626
627 for (i = 0; i < NUM_SUPPORTED_BOARDS; ++i) {
628 if (qlcnic_boards[i].vendor == pdev->vendor &&
629 qlcnic_boards[i].device == pdev->device &&
630 qlcnic_boards[i].sub_vendor == pdev->subsystem_vendor &&
631 qlcnic_boards[i].sub_device == pdev->subsystem_device) {
02f6e46f
SC
632 sprintf(name, "%pM: %s" ,
633 adapter->mac_addr,
634 qlcnic_boards[i].short_name);
af19b491
AKS
635 found = 1;
636 break;
637 }
638
639 }
640
641 if (!found)
7f9a0c34 642 sprintf(name, "%pM Gigabit Ethernet", adapter->mac_addr);
af19b491
AKS
643}
644
645static void
646qlcnic_check_options(struct qlcnic_adapter *adapter)
647{
648 u32 fw_major, fw_minor, fw_build;
af19b491 649 struct pci_dev *pdev = adapter->pdev;
af19b491
AKS
650
651 fw_major = QLCRD32(adapter, QLCNIC_FW_VERSION_MAJOR);
652 fw_minor = QLCRD32(adapter, QLCNIC_FW_VERSION_MINOR);
653 fw_build = QLCRD32(adapter, QLCNIC_FW_VERSION_SUB);
654
655 adapter->fw_version = QLCNIC_VERSION_CODE(fw_major, fw_minor, fw_build);
656
251a84c9
AKS
657 dev_info(&pdev->dev, "firmware v%d.%d.%d\n",
658 fw_major, fw_minor, fw_build);
af19b491 659 if (adapter->ahw.port_type == QLCNIC_XGBE) {
90d19005
SC
660 if (adapter->flags & QLCNIC_ESWITCH_ENABLED) {
661 adapter->num_rxd = DEFAULT_RCV_DESCRIPTORS_VF;
662 adapter->max_rxd = MAX_RCV_DESCRIPTORS_VF;
663 } else {
664 adapter->num_rxd = DEFAULT_RCV_DESCRIPTORS_10G;
665 adapter->max_rxd = MAX_RCV_DESCRIPTORS_10G;
666 }
667
af19b491 668 adapter->num_jumbo_rxd = MAX_JUMBO_RCV_DESCRIPTORS_10G;
90d19005
SC
669 adapter->max_jumbo_rxd = MAX_JUMBO_RCV_DESCRIPTORS_10G;
670
af19b491
AKS
671 } else if (adapter->ahw.port_type == QLCNIC_GBE) {
672 adapter->num_rxd = DEFAULT_RCV_DESCRIPTORS_1G;
673 adapter->num_jumbo_rxd = MAX_JUMBO_RCV_DESCRIPTORS_1G;
90d19005
SC
674 adapter->max_jumbo_rxd = MAX_JUMBO_RCV_DESCRIPTORS_1G;
675 adapter->max_rxd = MAX_RCV_DESCRIPTORS_1G;
af19b491
AKS
676 }
677
678 adapter->msix_supported = !!use_msi_x;
679 adapter->rss_supported = !!use_msi_x;
680
681 adapter->num_txd = MAX_CMD_DESCRIPTORS;
682
251b036a 683 adapter->max_rds_rings = MAX_RDS_RINGS;
af19b491
AKS
684}
685
174240a8
RB
686static int
687qlcnic_initialize_nic(struct qlcnic_adapter *adapter)
688{
689 int err;
690 struct qlcnic_info nic_info;
691
692 err = qlcnic_get_nic_info(adapter, &nic_info, adapter->ahw.pci_func);
693 if (err)
694 return err;
695
a1c0c459 696 adapter->physical_port = (u8)nic_info.phys_port;
174240a8
RB
697 adapter->switch_mode = nic_info.switch_mode;
698 adapter->max_tx_ques = nic_info.max_tx_ques;
699 adapter->max_rx_ques = nic_info.max_rx_ques;
700 adapter->capabilities = nic_info.capabilities;
701 adapter->max_mac_filters = nic_info.max_mac_filters;
702 adapter->max_mtu = nic_info.max_mtu;
703
704 if (adapter->capabilities & BIT_6)
705 adapter->flags |= QLCNIC_ESWITCH_ENABLED;
706 else
707 adapter->flags &= ~QLCNIC_ESWITCH_ENABLED;
708
709 return err;
710}
711
8cf61f89
AKS
712static void
713qlcnic_set_vlan_config(struct qlcnic_adapter *adapter,
714 struct qlcnic_esw_func_cfg *esw_cfg)
715{
716 if (esw_cfg->discard_tagged)
717 adapter->flags &= ~QLCNIC_TAGGING_ENABLED;
718 else
719 adapter->flags |= QLCNIC_TAGGING_ENABLED;
720
721 if (esw_cfg->vlan_id)
722 adapter->pvid = esw_cfg->vlan_id;
723 else
724 adapter->pvid = 0;
725}
726
0325d69b
RB
727static void
728qlcnic_set_eswitch_port_features(struct qlcnic_adapter *adapter,
729 struct qlcnic_esw_func_cfg *esw_cfg)
730{
ee07c1a7
RB
731 adapter->flags &= ~(QLCNIC_MACSPOOF | QLCNIC_MAC_OVERRIDE_DISABLED |
732 QLCNIC_PROMISC_DISABLED);
7613c87b
RB
733
734 if (esw_cfg->mac_anti_spoof)
735 adapter->flags |= QLCNIC_MACSPOOF;
fe4d434d 736
7373373d
RB
737 if (!esw_cfg->mac_override)
738 adapter->flags |= QLCNIC_MAC_OVERRIDE_DISABLED;
739
ee07c1a7
RB
740 if (!esw_cfg->promisc_mode)
741 adapter->flags |= QLCNIC_PROMISC_DISABLED;
742
0325d69b
RB
743 qlcnic_set_netdev_features(adapter, esw_cfg);
744}
745
746static int
747qlcnic_set_eswitch_port_config(struct qlcnic_adapter *adapter)
748{
749 struct qlcnic_esw_func_cfg esw_cfg;
750
751 if (!(adapter->flags & QLCNIC_ESWITCH_ENABLED))
752 return 0;
753
754 esw_cfg.pci_func = adapter->ahw.pci_func;
755 if (qlcnic_get_eswitch_port_config(adapter, &esw_cfg))
756 return -EIO;
8cf61f89 757 qlcnic_set_vlan_config(adapter, &esw_cfg);
0325d69b
RB
758 qlcnic_set_eswitch_port_features(adapter, &esw_cfg);
759
760 return 0;
761}
762
763static void
764qlcnic_set_netdev_features(struct qlcnic_adapter *adapter,
765 struct qlcnic_esw_func_cfg *esw_cfg)
766{
767 struct net_device *netdev = adapter->netdev;
768 unsigned long features, vlan_features;
769
770 features = (NETIF_F_SG | NETIF_F_IP_CSUM |
771 NETIF_F_IPV6_CSUM | NETIF_F_GRO);
772 vlan_features = (NETIF_F_SG | NETIF_F_IP_CSUM |
773 NETIF_F_IPV6_CSUM);
774
775 if (adapter->capabilities & QLCNIC_FW_CAPABILITY_TSO) {
776 features |= (NETIF_F_TSO | NETIF_F_TSO6);
777 vlan_features |= (NETIF_F_TSO | NETIF_F_TSO6);
778 }
779 if (adapter->capabilities & QLCNIC_FW_CAPABILITY_HW_LRO)
780 features |= NETIF_F_LRO;
781
782 if (esw_cfg->offload_flags & BIT_0) {
783 netdev->features |= features;
784 adapter->rx_csum = 1;
785 if (!(esw_cfg->offload_flags & BIT_1))
786 netdev->features &= ~NETIF_F_TSO;
787 if (!(esw_cfg->offload_flags & BIT_2))
788 netdev->features &= ~NETIF_F_TSO6;
789 } else {
790 netdev->features &= ~features;
791 adapter->rx_csum = 0;
792 }
793
794 netdev->vlan_features = (features & vlan_features);
795}
796
0866d96d
AC
797static int
798qlcnic_check_eswitch_mode(struct qlcnic_adapter *adapter)
799{
800 void __iomem *priv_op;
801 u32 op_mode, priv_level;
802 int err = 0;
803
174240a8
RB
804 err = qlcnic_initialize_nic(adapter);
805 if (err)
806 return err;
807
0866d96d
AC
808 if (adapter->flags & QLCNIC_ADAPTER_INITIALIZED)
809 return 0;
810
811 priv_op = adapter->ahw.pci_base0 + QLCNIC_DRV_OP_MODE;
812 op_mode = readl(priv_op);
813 priv_level = QLC_DEV_GET_DRV(op_mode, adapter->ahw.pci_func);
814
815 if (op_mode == QLC_DEV_DRV_DEFAULT)
816 priv_level = QLCNIC_MGMT_FUNC;
817 else
818 priv_level = QLC_DEV_GET_DRV(op_mode, adapter->ahw.pci_func);
819
174240a8 820 if (adapter->flags & QLCNIC_ESWITCH_ENABLED) {
0866d96d
AC
821 if (priv_level == QLCNIC_MGMT_FUNC) {
822 adapter->op_mode = QLCNIC_MGMT_FUNC;
823 err = qlcnic_init_pci_info(adapter);
824 if (err)
825 return err;
826 /* Set privilege level for other functions */
827 qlcnic_set_function_modes(adapter);
828 dev_info(&adapter->pdev->dev,
829 "HAL Version: %d, Management function\n",
830 adapter->fw_hal_version);
831 } else if (priv_level == QLCNIC_PRIV_FUNC) {
832 adapter->op_mode = QLCNIC_PRIV_FUNC;
833 dev_info(&adapter->pdev->dev,
834 "HAL Version: %d, Privileged function\n",
835 adapter->fw_hal_version);
836 }
174240a8 837 }
0866d96d
AC
838
839 adapter->flags |= QLCNIC_ADAPTER_INITIALIZED;
840
841 return err;
842}
843
0325d69b
RB
844static int
845qlcnic_set_default_offload_settings(struct qlcnic_adapter *adapter)
846{
847 struct qlcnic_esw_func_cfg esw_cfg;
848 struct qlcnic_npar_info *npar;
849 u8 i;
850
174240a8 851 if (adapter->need_fw_reset)
0325d69b
RB
852 return 0;
853
854 for (i = 0; i < QLCNIC_MAX_PCI_FUNC; i++) {
855 if (adapter->npars[i].type != QLCNIC_TYPE_NIC)
856 continue;
857 memset(&esw_cfg, 0, sizeof(struct qlcnic_esw_func_cfg));
858 esw_cfg.pci_func = i;
859 esw_cfg.offload_flags = BIT_0;
7373373d 860 esw_cfg.mac_override = BIT_0;
ee07c1a7 861 esw_cfg.promisc_mode = BIT_0;
0325d69b
RB
862 if (adapter->capabilities & QLCNIC_FW_CAPABILITY_TSO)
863 esw_cfg.offload_flags |= (BIT_1 | BIT_2);
864 if (qlcnic_config_switch_port(adapter, &esw_cfg))
865 return -EIO;
866 npar = &adapter->npars[i];
867 npar->pvid = esw_cfg.vlan_id;
7373373d 868 npar->mac_override = esw_cfg.mac_override;
0325d69b
RB
869 npar->mac_anti_spoof = esw_cfg.mac_anti_spoof;
870 npar->discard_tagged = esw_cfg.discard_tagged;
871 npar->promisc_mode = esw_cfg.promisc_mode;
872 npar->offload_flags = esw_cfg.offload_flags;
873 }
874
875 return 0;
876}
877
4e8acb01
RB
878static int
879qlcnic_reset_eswitch_config(struct qlcnic_adapter *adapter,
880 struct qlcnic_npar_info *npar, int pci_func)
881{
882 struct qlcnic_esw_func_cfg esw_cfg;
883 esw_cfg.op_mode = QLCNIC_PORT_DEFAULTS;
884 esw_cfg.pci_func = pci_func;
885 esw_cfg.vlan_id = npar->pvid;
7373373d 886 esw_cfg.mac_override = npar->mac_override;
4e8acb01
RB
887 esw_cfg.discard_tagged = npar->discard_tagged;
888 esw_cfg.mac_anti_spoof = npar->mac_anti_spoof;
889 esw_cfg.offload_flags = npar->offload_flags;
890 esw_cfg.promisc_mode = npar->promisc_mode;
891 if (qlcnic_config_switch_port(adapter, &esw_cfg))
892 return -EIO;
893
894 esw_cfg.op_mode = QLCNIC_ADD_VLAN;
895 if (qlcnic_config_switch_port(adapter, &esw_cfg))
896 return -EIO;
897
898 return 0;
899}
900
cea8975e
AC
901static int
902qlcnic_reset_npar_config(struct qlcnic_adapter *adapter)
903{
4e8acb01 904 int i, err;
cea8975e
AC
905 struct qlcnic_npar_info *npar;
906 struct qlcnic_info nic_info;
907
174240a8 908 if (!adapter->need_fw_reset)
cea8975e
AC
909 return 0;
910
4e8acb01
RB
911 /* Set the NPAR config data after FW reset */
912 for (i = 0; i < QLCNIC_MAX_PCI_FUNC; i++) {
913 npar = &adapter->npars[i];
914 if (npar->type != QLCNIC_TYPE_NIC)
915 continue;
916 err = qlcnic_get_nic_info(adapter, &nic_info, i);
917 if (err)
918 return err;
919 nic_info.min_tx_bw = npar->min_bw;
920 nic_info.max_tx_bw = npar->max_bw;
921 err = qlcnic_set_nic_info(adapter, &nic_info);
922 if (err)
923 return err;
cea8975e 924
4e8acb01
RB
925 if (npar->enable_pm) {
926 err = qlcnic_config_port_mirroring(adapter,
927 npar->dest_npar, 1, i);
928 if (err)
929 return err;
cea8975e 930 }
4e8acb01
RB
931 err = qlcnic_reset_eswitch_config(adapter, npar, i);
932 if (err)
933 return err;
cea8975e 934 }
4e8acb01 935 return 0;
cea8975e
AC
936}
937
78f84e1a
AKS
938static int qlcnic_check_npar_opertional(struct qlcnic_adapter *adapter)
939{
940 u8 npar_opt_timeo = QLCNIC_DEV_NPAR_OPER_TIMEO;
941 u32 npar_state;
942
943 if (adapter->op_mode == QLCNIC_MGMT_FUNC)
944 return 0;
945
946 npar_state = QLCRD32(adapter, QLCNIC_CRB_DEV_NPAR_STATE);
947 while (npar_state != QLCNIC_DEV_NPAR_OPER && --npar_opt_timeo) {
948 msleep(1000);
949 npar_state = QLCRD32(adapter, QLCNIC_CRB_DEV_NPAR_STATE);
950 }
951 if (!npar_opt_timeo) {
952 dev_err(&adapter->pdev->dev,
953 "Waiting for NPAR state to opertional timeout\n");
954 return -EIO;
955 }
956 return 0;
957}
958
174240a8
RB
959static int
960qlcnic_set_mgmt_operations(struct qlcnic_adapter *adapter)
961{
962 int err;
963
964 if (!(adapter->flags & QLCNIC_ESWITCH_ENABLED) ||
965 adapter->op_mode != QLCNIC_MGMT_FUNC)
966 return 0;
967
968 err = qlcnic_set_default_offload_settings(adapter);
969 if (err)
970 return err;
971
972 err = qlcnic_reset_npar_config(adapter);
973 if (err)
974 return err;
975
976 qlcnic_dev_set_npar_ready(adapter);
977
978 return err;
979}
980
af19b491
AKS
981static int
982qlcnic_start_firmware(struct qlcnic_adapter *adapter)
983{
d4066833 984 int err;
af19b491 985
aa5e18c0
SC
986 err = qlcnic_can_start_firmware(adapter);
987 if (err < 0)
988 return err;
989 else if (!err)
d4066833 990 goto check_fw_status;
af19b491 991
4d5bdb38
AKS
992 if (load_fw_file)
993 qlcnic_request_firmware(adapter);
8f891387 994 else {
8cfdce08
SC
995 err = qlcnic_check_flash_fw_ver(adapter);
996 if (err)
8f891387 997 goto err_out;
998
4d5bdb38 999 adapter->fw_type = QLCNIC_FLASH_ROMIMAGE;
8f891387 1000 }
af19b491
AKS
1001
1002 err = qlcnic_need_fw_reset(adapter);
af19b491 1003 if (err == 0)
4e70812b 1004 goto check_fw_status;
af19b491 1005
d4066833
SC
1006 err = qlcnic_pinit_from_rom(adapter);
1007 if (err)
1008 goto err_out;
af19b491
AKS
1009
1010 err = qlcnic_load_firmware(adapter);
1011 if (err)
1012 goto err_out;
1013
1014 qlcnic_release_firmware(adapter);
d4066833 1015 QLCWR32(adapter, CRB_DRIVER_VERSION, QLCNIC_DRIVER_VERSION);
af19b491 1016
d4066833
SC
1017check_fw_status:
1018 err = qlcnic_check_fw_status(adapter);
af19b491
AKS
1019 if (err)
1020 goto err_out;
1021
1022 QLCWR32(adapter, QLCNIC_CRB_DEV_STATE, QLCNIC_DEV_READY);
6df900e9 1023 qlcnic_idc_debug_info(adapter, 1);
b18971d1 1024
0866d96d
AC
1025 err = qlcnic_check_eswitch_mode(adapter);
1026 if (err) {
1027 dev_err(&adapter->pdev->dev,
1028 "Memory allocation failed for eswitch\n");
1029 goto err_out;
1030 }
174240a8
RB
1031 err = qlcnic_set_mgmt_operations(adapter);
1032 if (err)
1033 goto err_out;
1034
1035 qlcnic_check_options(adapter);
af19b491
AKS
1036 adapter->need_fw_reset = 0;
1037
a7fc948f
AKS
1038 qlcnic_release_firmware(adapter);
1039 return 0;
af19b491
AKS
1040
1041err_out:
a7fc948f
AKS
1042 QLCWR32(adapter, QLCNIC_CRB_DEV_STATE, QLCNIC_DEV_FAILED);
1043 dev_err(&adapter->pdev->dev, "Device state set to failed\n");
0866d96d 1044
af19b491
AKS
1045 qlcnic_release_firmware(adapter);
1046 return err;
1047}
1048
1049static int
1050qlcnic_request_irq(struct qlcnic_adapter *adapter)
1051{
1052 irq_handler_t handler;
1053 struct qlcnic_host_sds_ring *sds_ring;
1054 int err, ring;
1055
1056 unsigned long flags = 0;
1057 struct net_device *netdev = adapter->netdev;
1058 struct qlcnic_recv_context *recv_ctx = &adapter->recv_ctx;
1059
7eb9855d
AKS
1060 if (adapter->diag_test == QLCNIC_INTERRUPT_TEST) {
1061 handler = qlcnic_tmp_intr;
1062 if (!QLCNIC_IS_MSI_FAMILY(adapter))
1063 flags |= IRQF_SHARED;
1064
1065 } else {
1066 if (adapter->flags & QLCNIC_MSIX_ENABLED)
1067 handler = qlcnic_msix_intr;
1068 else if (adapter->flags & QLCNIC_MSI_ENABLED)
1069 handler = qlcnic_msi_intr;
1070 else {
1071 flags |= IRQF_SHARED;
1072 handler = qlcnic_intr;
1073 }
af19b491
AKS
1074 }
1075 adapter->irq = netdev->irq;
1076
1077 for (ring = 0; ring < adapter->max_sds_rings; ring++) {
1078 sds_ring = &recv_ctx->sds_rings[ring];
1079 sprintf(sds_ring->name, "%s[%d]", netdev->name, ring);
1080 err = request_irq(sds_ring->irq, handler,
1081 flags, sds_ring->name, sds_ring);
1082 if (err)
1083 return err;
1084 }
1085
1086 return 0;
1087}
1088
1089static void
1090qlcnic_free_irq(struct qlcnic_adapter *adapter)
1091{
1092 int ring;
1093 struct qlcnic_host_sds_ring *sds_ring;
1094
1095 struct qlcnic_recv_context *recv_ctx = &adapter->recv_ctx;
1096
1097 for (ring = 0; ring < adapter->max_sds_rings; ring++) {
1098 sds_ring = &recv_ctx->sds_rings[ring];
1099 free_irq(sds_ring->irq, sds_ring);
1100 }
1101}
1102
1103static void
1104qlcnic_init_coalesce_defaults(struct qlcnic_adapter *adapter)
1105{
1106 adapter->coal.flags = QLCNIC_INTR_DEFAULT;
1107 adapter->coal.normal.data.rx_time_us =
1108 QLCNIC_DEFAULT_INTR_COALESCE_RX_TIME_US;
1109 adapter->coal.normal.data.rx_packets =
1110 QLCNIC_DEFAULT_INTR_COALESCE_RX_PACKETS;
1111 adapter->coal.normal.data.tx_time_us =
1112 QLCNIC_DEFAULT_INTR_COALESCE_TX_TIME_US;
1113 adapter->coal.normal.data.tx_packets =
1114 QLCNIC_DEFAULT_INTR_COALESCE_TX_PACKETS;
1115}
1116
1117static int
1118__qlcnic_up(struct qlcnic_adapter *adapter, struct net_device *netdev)
1119{
8a15ad1f
AKS
1120 int ring;
1121 struct qlcnic_host_rds_ring *rds_ring;
1122
af19b491
AKS
1123 if (adapter->is_up != QLCNIC_ADAPTER_UP_MAGIC)
1124 return -EIO;
1125
8a15ad1f
AKS
1126 if (test_bit(__QLCNIC_DEV_UP, &adapter->state))
1127 return 0;
0325d69b
RB
1128 if (qlcnic_set_eswitch_port_config(adapter))
1129 return -EIO;
8a15ad1f
AKS
1130
1131 if (qlcnic_fw_create_ctx(adapter))
1132 return -EIO;
1133
1134 for (ring = 0; ring < adapter->max_rds_rings; ring++) {
1135 rds_ring = &adapter->recv_ctx.rds_rings[ring];
1136 qlcnic_post_rx_buffers(adapter, ring, rds_ring);
1137 }
1138
af19b491
AKS
1139 qlcnic_set_multi(netdev);
1140 qlcnic_fw_cmd_set_mtu(adapter, netdev->mtu);
1141
1142 adapter->ahw.linkup = 0;
1143
1144 if (adapter->max_sds_rings > 1)
1145 qlcnic_config_rss(adapter, 1);
1146
1147 qlcnic_config_intr_coalesce(adapter);
1148
24763d80 1149 if (netdev->features & NETIF_F_LRO)
af19b491
AKS
1150 qlcnic_config_hw_lro(adapter, QLCNIC_LRO_ENABLED);
1151
1152 qlcnic_napi_enable(adapter);
1153
1154 qlcnic_linkevent_request(adapter, 1);
1155
68bf1c68 1156 adapter->reset_context = 0;
af19b491
AKS
1157 set_bit(__QLCNIC_DEV_UP, &adapter->state);
1158 return 0;
1159}
1160
1161/* Usage: During resume and firmware recovery module.*/
1162
1163static int
1164qlcnic_up(struct qlcnic_adapter *adapter, struct net_device *netdev)
1165{
1166 int err = 0;
1167
1168 rtnl_lock();
1169 if (netif_running(netdev))
1170 err = __qlcnic_up(adapter, netdev);
1171 rtnl_unlock();
1172
1173 return err;
1174}
1175
1176static void
1177__qlcnic_down(struct qlcnic_adapter *adapter, struct net_device *netdev)
1178{
1179 if (adapter->is_up != QLCNIC_ADAPTER_UP_MAGIC)
1180 return;
1181
1182 if (!test_and_clear_bit(__QLCNIC_DEV_UP, &adapter->state))
1183 return;
1184
1185 smp_mb();
1186 spin_lock(&adapter->tx_clean_lock);
1187 netif_carrier_off(netdev);
1188 netif_tx_disable(netdev);
1189
1190 qlcnic_free_mac_list(adapter);
1191
b5e5492c
AKS
1192 if (adapter->fhash.fnum)
1193 qlcnic_delete_lb_filters(adapter);
1194
af19b491
AKS
1195 qlcnic_nic_set_promisc(adapter, QLCNIC_NIU_NON_PROMISC_MODE);
1196
1197 qlcnic_napi_disable(adapter);
1198
8a15ad1f
AKS
1199 qlcnic_fw_destroy_ctx(adapter);
1200
1201 qlcnic_reset_rx_buffers_list(adapter);
af19b491
AKS
1202 qlcnic_release_tx_buffers(adapter);
1203 spin_unlock(&adapter->tx_clean_lock);
1204}
1205
1206/* Usage: During suspend and firmware recovery module */
1207
1208static void
1209qlcnic_down(struct qlcnic_adapter *adapter, struct net_device *netdev)
1210{
1211 rtnl_lock();
1212 if (netif_running(netdev))
1213 __qlcnic_down(adapter, netdev);
1214 rtnl_unlock();
1215
1216}
1217
1218static int
1219qlcnic_attach(struct qlcnic_adapter *adapter)
1220{
1221 struct net_device *netdev = adapter->netdev;
1222 struct pci_dev *pdev = adapter->pdev;
8a15ad1f 1223 int err;
af19b491
AKS
1224
1225 if (adapter->is_up == QLCNIC_ADAPTER_UP_MAGIC)
1226 return 0;
1227
af19b491
AKS
1228 err = qlcnic_napi_add(adapter, netdev);
1229 if (err)
1230 return err;
1231
1232 err = qlcnic_alloc_sw_resources(adapter);
1233 if (err) {
1234 dev_err(&pdev->dev, "Error in setting sw resources\n");
8a15ad1f 1235 goto err_out_napi_del;
af19b491
AKS
1236 }
1237
1238 err = qlcnic_alloc_hw_resources(adapter);
1239 if (err) {
1240 dev_err(&pdev->dev, "Error in setting hw resources\n");
1241 goto err_out_free_sw;
1242 }
1243
af19b491
AKS
1244 err = qlcnic_request_irq(adapter);
1245 if (err) {
1246 dev_err(&pdev->dev, "failed to setup interrupt\n");
8a15ad1f 1247 goto err_out_free_hw;
af19b491
AKS
1248 }
1249
1250 qlcnic_init_coalesce_defaults(adapter);
1251
1252 qlcnic_create_sysfs_entries(adapter);
1253
1254 adapter->is_up = QLCNIC_ADAPTER_UP_MAGIC;
1255 return 0;
1256
8a15ad1f 1257err_out_free_hw:
af19b491
AKS
1258 qlcnic_free_hw_resources(adapter);
1259err_out_free_sw:
1260 qlcnic_free_sw_resources(adapter);
8a15ad1f
AKS
1261err_out_napi_del:
1262 qlcnic_napi_del(adapter);
af19b491
AKS
1263 return err;
1264}
1265
1266static void
1267qlcnic_detach(struct qlcnic_adapter *adapter)
1268{
1269 if (adapter->is_up != QLCNIC_ADAPTER_UP_MAGIC)
1270 return;
1271
1272 qlcnic_remove_sysfs_entries(adapter);
1273
1274 qlcnic_free_hw_resources(adapter);
1275 qlcnic_release_rx_buffers(adapter);
1276 qlcnic_free_irq(adapter);
1277 qlcnic_napi_del(adapter);
1278 qlcnic_free_sw_resources(adapter);
1279
1280 adapter->is_up = 0;
1281}
1282
7eb9855d
AKS
1283void qlcnic_diag_free_res(struct net_device *netdev, int max_sds_rings)
1284{
1285 struct qlcnic_adapter *adapter = netdev_priv(netdev);
1286 struct qlcnic_host_sds_ring *sds_ring;
1287 int ring;
1288
78ad3892 1289 clear_bit(__QLCNIC_DEV_UP, &adapter->state);
cdaff185
AKS
1290 if (adapter->diag_test == QLCNIC_INTERRUPT_TEST) {
1291 for (ring = 0; ring < adapter->max_sds_rings; ring++) {
1292 sds_ring = &adapter->recv_ctx.sds_rings[ring];
1293 qlcnic_disable_int(sds_ring);
1294 }
7eb9855d
AKS
1295 }
1296
8a15ad1f
AKS
1297 qlcnic_fw_destroy_ctx(adapter);
1298
7eb9855d
AKS
1299 qlcnic_detach(adapter);
1300
1301 adapter->diag_test = 0;
1302 adapter->max_sds_rings = max_sds_rings;
1303
1304 if (qlcnic_attach(adapter))
34ce3626 1305 goto out;
7eb9855d
AKS
1306
1307 if (netif_running(netdev))
1308 __qlcnic_up(adapter, netdev);
34ce3626 1309out:
7eb9855d
AKS
1310 netif_device_attach(netdev);
1311}
1312
1313int qlcnic_diag_alloc_res(struct net_device *netdev, int test)
1314{
1315 struct qlcnic_adapter *adapter = netdev_priv(netdev);
1316 struct qlcnic_host_sds_ring *sds_ring;
8a15ad1f 1317 struct qlcnic_host_rds_ring *rds_ring;
7eb9855d
AKS
1318 int ring;
1319 int ret;
1320
1321 netif_device_detach(netdev);
1322
1323 if (netif_running(netdev))
1324 __qlcnic_down(adapter, netdev);
1325
1326 qlcnic_detach(adapter);
1327
1328 adapter->max_sds_rings = 1;
1329 adapter->diag_test = test;
1330
1331 ret = qlcnic_attach(adapter);
34ce3626
AKS
1332 if (ret) {
1333 netif_device_attach(netdev);
7eb9855d 1334 return ret;
34ce3626 1335 }
7eb9855d 1336
8a15ad1f
AKS
1337 ret = qlcnic_fw_create_ctx(adapter);
1338 if (ret) {
1339 qlcnic_detach(adapter);
57e46248 1340 netif_device_attach(netdev);
8a15ad1f
AKS
1341 return ret;
1342 }
1343
1344 for (ring = 0; ring < adapter->max_rds_rings; ring++) {
1345 rds_ring = &adapter->recv_ctx.rds_rings[ring];
1346 qlcnic_post_rx_buffers(adapter, ring, rds_ring);
1347 }
1348
cdaff185
AKS
1349 if (adapter->diag_test == QLCNIC_INTERRUPT_TEST) {
1350 for (ring = 0; ring < adapter->max_sds_rings; ring++) {
1351 sds_ring = &adapter->recv_ctx.sds_rings[ring];
1352 qlcnic_enable_int(sds_ring);
1353 }
7eb9855d 1354 }
78ad3892 1355 set_bit(__QLCNIC_DEV_UP, &adapter->state);
7eb9855d
AKS
1356
1357 return 0;
1358}
1359
68bf1c68
AKS
1360/* Reset context in hardware only */
1361static int
1362qlcnic_reset_hw_context(struct qlcnic_adapter *adapter)
1363{
1364 struct net_device *netdev = adapter->netdev;
1365
1366 if (test_and_set_bit(__QLCNIC_RESETTING, &adapter->state))
1367 return -EBUSY;
1368
1369 netif_device_detach(netdev);
1370
1371 qlcnic_down(adapter, netdev);
1372
1373 qlcnic_up(adapter, netdev);
1374
1375 netif_device_attach(netdev);
1376
1377 clear_bit(__QLCNIC_RESETTING, &adapter->state);
1378 return 0;
1379}
1380
af19b491
AKS
1381int
1382qlcnic_reset_context(struct qlcnic_adapter *adapter)
1383{
1384 int err = 0;
1385 struct net_device *netdev = adapter->netdev;
1386
1387 if (test_and_set_bit(__QLCNIC_RESETTING, &adapter->state))
1388 return -EBUSY;
1389
1390 if (adapter->is_up == QLCNIC_ADAPTER_UP_MAGIC) {
1391
1392 netif_device_detach(netdev);
1393
1394 if (netif_running(netdev))
1395 __qlcnic_down(adapter, netdev);
1396
1397 qlcnic_detach(adapter);
1398
1399 if (netif_running(netdev)) {
1400 err = qlcnic_attach(adapter);
1401 if (!err)
34ce3626 1402 __qlcnic_up(adapter, netdev);
af19b491
AKS
1403 }
1404
1405 netif_device_attach(netdev);
1406 }
1407
af19b491
AKS
1408 clear_bit(__QLCNIC_RESETTING, &adapter->state);
1409 return err;
1410}
1411
1412static int
1413qlcnic_setup_netdev(struct qlcnic_adapter *adapter,
1bb09fb9 1414 struct net_device *netdev, u8 pci_using_dac)
af19b491
AKS
1415{
1416 int err;
1417 struct pci_dev *pdev = adapter->pdev;
1418
1419 adapter->rx_csum = 1;
1420 adapter->mc_enabled = 0;
1421 adapter->max_mc_count = 38;
1422
1423 netdev->netdev_ops = &qlcnic_netdev_ops;
ef71ff83 1424 netdev->watchdog_timeo = 5*HZ;
af19b491
AKS
1425
1426 qlcnic_change_mtu(netdev, netdev->mtu);
1427
1428 SET_ETHTOOL_OPS(netdev, &qlcnic_ethtool_ops);
1429
2e9d722d 1430 netdev->features |= (NETIF_F_SG | NETIF_F_IP_CSUM |
d5790663 1431 NETIF_F_IPV6_CSUM | NETIF_F_GRO | NETIF_F_HW_VLAN_RX);
2e9d722d 1432 netdev->vlan_features |= (NETIF_F_SG | NETIF_F_IP_CSUM |
ac8d0c4f
AC
1433 NETIF_F_IPV6_CSUM);
1434
1435 if (adapter->capabilities & QLCNIC_FW_CAPABILITY_TSO) {
1436 netdev->features |= (NETIF_F_TSO | NETIF_F_TSO6);
1437 netdev->vlan_features |= (NETIF_F_TSO | NETIF_F_TSO6);
1438 }
af19b491 1439
1bb09fb9 1440 if (pci_using_dac) {
af19b491
AKS
1441 netdev->features |= NETIF_F_HIGHDMA;
1442 netdev->vlan_features |= NETIF_F_HIGHDMA;
1443 }
1444
1445 if (adapter->capabilities & QLCNIC_FW_CAPABILITY_FVLANTX)
1446 netdev->features |= (NETIF_F_HW_VLAN_TX);
1447
1448 if (adapter->capabilities & QLCNIC_FW_CAPABILITY_HW_LRO)
1449 netdev->features |= NETIF_F_LRO;
af19b491
AKS
1450 netdev->irq = adapter->msix_entries[0].vector;
1451
af19b491 1452 netif_carrier_off(netdev);
af19b491
AKS
1453
1454 err = register_netdev(netdev);
1455 if (err) {
1456 dev_err(&pdev->dev, "failed to register net device\n");
1457 return err;
1458 }
1459
1460 return 0;
1461}
1462
1bb09fb9
AKS
1463static int qlcnic_set_dma_mask(struct pci_dev *pdev, u8 *pci_using_dac)
1464{
1465 if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) &&
1466 !pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)))
1467 *pci_using_dac = 1;
1468 else if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(32)) &&
1469 !pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32)))
1470 *pci_using_dac = 0;
1471 else {
1472 dev_err(&pdev->dev, "Unable to set DMA mask, aborting\n");
1473 return -EIO;
1474 }
1475
1476 return 0;
1477}
1478
af19b491
AKS
1479static int __devinit
1480qlcnic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
1481{
1482 struct net_device *netdev = NULL;
1483 struct qlcnic_adapter *adapter = NULL;
1484 int err;
af19b491 1485 uint8_t revision_id;
1bb09fb9 1486 uint8_t pci_using_dac;
da48e6c3 1487 char brd_name[QLCNIC_MAX_BOARD_NAME_LEN];
af19b491
AKS
1488
1489 err = pci_enable_device(pdev);
1490 if (err)
1491 return err;
1492
1493 if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
1494 err = -ENODEV;
1495 goto err_out_disable_pdev;
1496 }
1497
1bb09fb9
AKS
1498 err = qlcnic_set_dma_mask(pdev, &pci_using_dac);
1499 if (err)
1500 goto err_out_disable_pdev;
1501
af19b491
AKS
1502 err = pci_request_regions(pdev, qlcnic_driver_name);
1503 if (err)
1504 goto err_out_disable_pdev;
1505
1506 pci_set_master(pdev);
451724c8 1507 pci_enable_pcie_error_reporting(pdev);
af19b491
AKS
1508
1509 netdev = alloc_etherdev(sizeof(struct qlcnic_adapter));
1510 if (!netdev) {
1511 dev_err(&pdev->dev, "failed to allocate net_device\n");
1512 err = -ENOMEM;
1513 goto err_out_free_res;
1514 }
1515
1516 SET_NETDEV_DEV(netdev, &pdev->dev);
1517
1518 adapter = netdev_priv(netdev);
1519 adapter->netdev = netdev;
1520 adapter->pdev = pdev;
6df900e9 1521 adapter->dev_rst_time = jiffies;
af19b491
AKS
1522
1523 revision_id = pdev->revision;
1524 adapter->ahw.revision_id = revision_id;
1525
1526 rwlock_init(&adapter->ahw.crb_lock);
1527 mutex_init(&adapter->ahw.mem_lock);
1528
1529 spin_lock_init(&adapter->tx_clean_lock);
1530 INIT_LIST_HEAD(&adapter->mac_list);
1531
1532 err = qlcnic_setup_pci_map(adapter);
1533 if (err)
1534 goto err_out_free_netdev;
1535
1536 /* This will be reset for mezz cards */
2e9d722d 1537 adapter->portnum = adapter->ahw.pci_func;
af19b491
AKS
1538
1539 err = qlcnic_get_board_info(adapter);
1540 if (err) {
1541 dev_err(&pdev->dev, "Error getting board config info.\n");
1542 goto err_out_iounmap;
1543 }
1544
8cfdce08
SC
1545 err = qlcnic_setup_idc_param(adapter);
1546 if (err)
b3a24649 1547 goto err_out_iounmap;
af19b491 1548
9f26f547 1549 err = adapter->nic_ops->start_firmware(adapter);
a7fc948f
AKS
1550 if (err) {
1551 dev_err(&pdev->dev, "Loading fw failed.Please Reboot\n");
af19b491 1552 goto err_out_decr_ref;
a7fc948f 1553 }
af19b491 1554
da48e6c3
RB
1555 if (qlcnic_read_mac_addr(adapter))
1556 dev_warn(&pdev->dev, "failed to read mac addr\n");
1557
1558 if (adapter->portnum == 0) {
1559 get_brd_name(adapter, brd_name);
1560
1561 pr_info("%s: %s Board Chip rev 0x%x\n",
1562 module_name(THIS_MODULE),
1563 brd_name, adapter->ahw.revision_id);
1564 }
1565
af19b491
AKS
1566 qlcnic_clear_stats(adapter);
1567
1568 qlcnic_setup_intr(adapter);
1569
1bb09fb9 1570 err = qlcnic_setup_netdev(adapter, netdev, pci_using_dac);
af19b491
AKS
1571 if (err)
1572 goto err_out_disable_msi;
1573
1574 pci_set_drvdata(pdev, adapter);
1575
1576 qlcnic_schedule_work(adapter, qlcnic_fw_poll_work, FW_POLL_DELAY);
1577
1578 switch (adapter->ahw.port_type) {
1579 case QLCNIC_GBE:
1580 dev_info(&adapter->pdev->dev, "%s: GbE port initialized\n",
1581 adapter->netdev->name);
1582 break;
1583 case QLCNIC_XGBE:
1584 dev_info(&adapter->pdev->dev, "%s: XGbE port initialized\n",
1585 adapter->netdev->name);
1586 break;
1587 }
1588
b5e5492c 1589 qlcnic_alloc_lb_filters_mem(adapter);
af19b491
AKS
1590 qlcnic_create_diag_entries(adapter);
1591
1592 return 0;
1593
1594err_out_disable_msi:
1595 qlcnic_teardown_intr(adapter);
1596
1597err_out_decr_ref:
21854f02 1598 qlcnic_clr_all_drv_state(adapter, 0);
af19b491
AKS
1599
1600err_out_iounmap:
1601 qlcnic_cleanup_pci_map(adapter);
1602
1603err_out_free_netdev:
1604 free_netdev(netdev);
1605
1606err_out_free_res:
1607 pci_release_regions(pdev);
1608
1609err_out_disable_pdev:
1610 pci_set_drvdata(pdev, NULL);
1611 pci_disable_device(pdev);
1612 return err;
1613}
1614
1615static void __devexit qlcnic_remove(struct pci_dev *pdev)
1616{
1617 struct qlcnic_adapter *adapter;
1618 struct net_device *netdev;
1619
1620 adapter = pci_get_drvdata(pdev);
1621 if (adapter == NULL)
1622 return;
1623
1624 netdev = adapter->netdev;
1625
1626 qlcnic_cancel_fw_work(adapter);
1627
1628 unregister_netdev(netdev);
1629
af19b491
AKS
1630 qlcnic_detach(adapter);
1631
2e9d722d
AC
1632 if (adapter->npars != NULL)
1633 kfree(adapter->npars);
1634 if (adapter->eswitch != NULL)
1635 kfree(adapter->eswitch);
1636
21854f02 1637 qlcnic_clr_all_drv_state(adapter, 0);
af19b491
AKS
1638
1639 clear_bit(__QLCNIC_RESETTING, &adapter->state);
1640
b5e5492c
AKS
1641 qlcnic_free_lb_filters_mem(adapter);
1642
af19b491
AKS
1643 qlcnic_teardown_intr(adapter);
1644
1645 qlcnic_remove_diag_entries(adapter);
1646
1647 qlcnic_cleanup_pci_map(adapter);
1648
1649 qlcnic_release_firmware(adapter);
1650
451724c8 1651 pci_disable_pcie_error_reporting(pdev);
af19b491
AKS
1652 pci_release_regions(pdev);
1653 pci_disable_device(pdev);
1654 pci_set_drvdata(pdev, NULL);
1655
1656 free_netdev(netdev);
1657}
1658static int __qlcnic_shutdown(struct pci_dev *pdev)
1659{
1660 struct qlcnic_adapter *adapter = pci_get_drvdata(pdev);
1661 struct net_device *netdev = adapter->netdev;
1662 int retval;
1663
1664 netif_device_detach(netdev);
1665
1666 qlcnic_cancel_fw_work(adapter);
1667
1668 if (netif_running(netdev))
1669 qlcnic_down(adapter, netdev);
1670
21854f02 1671 qlcnic_clr_all_drv_state(adapter, 0);
af19b491
AKS
1672
1673 clear_bit(__QLCNIC_RESETTING, &adapter->state);
1674
1675 retval = pci_save_state(pdev);
1676 if (retval)
1677 return retval;
1678
1679 if (qlcnic_wol_supported(adapter)) {
1680 pci_enable_wake(pdev, PCI_D3cold, 1);
1681 pci_enable_wake(pdev, PCI_D3hot, 1);
1682 }
1683
1684 return 0;
1685}
1686
1687static void qlcnic_shutdown(struct pci_dev *pdev)
1688{
1689 if (__qlcnic_shutdown(pdev))
1690 return;
1691
1692 pci_disable_device(pdev);
1693}
1694
1695#ifdef CONFIG_PM
1696static int
1697qlcnic_suspend(struct pci_dev *pdev, pm_message_t state)
1698{
1699 int retval;
1700
1701 retval = __qlcnic_shutdown(pdev);
1702 if (retval)
1703 return retval;
1704
1705 pci_set_power_state(pdev, pci_choose_state(pdev, state));
1706 return 0;
1707}
1708
1709static int
1710qlcnic_resume(struct pci_dev *pdev)
1711{
1712 struct qlcnic_adapter *adapter = pci_get_drvdata(pdev);
1713 struct net_device *netdev = adapter->netdev;
1714 int err;
1715
1716 err = pci_enable_device(pdev);
1717 if (err)
1718 return err;
1719
1720 pci_set_power_state(pdev, PCI_D0);
1721 pci_set_master(pdev);
1722 pci_restore_state(pdev);
1723
9f26f547 1724 err = adapter->nic_ops->start_firmware(adapter);
af19b491
AKS
1725 if (err) {
1726 dev_err(&pdev->dev, "failed to start firmware\n");
1727 return err;
1728 }
1729
1730 if (netif_running(netdev)) {
af19b491
AKS
1731 err = qlcnic_up(adapter, netdev);
1732 if (err)
52486a3a 1733 goto done;
af19b491 1734
aec1e845 1735 qlcnic_restore_indev_addr(netdev, NETDEV_UP);
af19b491 1736 }
52486a3a 1737done:
af19b491
AKS
1738 netif_device_attach(netdev);
1739 qlcnic_schedule_work(adapter, qlcnic_fw_poll_work, FW_POLL_DELAY);
1740 return 0;
af19b491
AKS
1741}
1742#endif
1743
1744static int qlcnic_open(struct net_device *netdev)
1745{
1746 struct qlcnic_adapter *adapter = netdev_priv(netdev);
1747 int err;
1748
af19b491
AKS
1749 err = qlcnic_attach(adapter);
1750 if (err)
1751 return err;
1752
1753 err = __qlcnic_up(adapter, netdev);
1754 if (err)
1755 goto err_out;
1756
1757 netif_start_queue(netdev);
1758
1759 return 0;
1760
1761err_out:
1762 qlcnic_detach(adapter);
1763 return err;
1764}
1765
1766/*
1767 * qlcnic_close - Disables a network interface entry point
1768 */
1769static int qlcnic_close(struct net_device *netdev)
1770{
1771 struct qlcnic_adapter *adapter = netdev_priv(netdev);
1772
1773 __qlcnic_down(adapter, netdev);
1774 return 0;
1775}
1776
b5e5492c
AKS
1777static void
1778qlcnic_alloc_lb_filters_mem(struct qlcnic_adapter *adapter)
1779{
1780 void *head;
1781 int i;
1782
1783 if (!qlcnic_mac_learn)
1784 return;
1785
1786 spin_lock_init(&adapter->mac_learn_lock);
1787
1788 head = kcalloc(QLCNIC_LB_MAX_FILTERS, sizeof(struct hlist_head),
1789 GFP_KERNEL);
1790 if (!head)
1791 return;
1792
1793 adapter->fhash.fmax = QLCNIC_LB_MAX_FILTERS;
1794 adapter->fhash.fhead = (struct hlist_head *)head;
1795
1796 for (i = 0; i < adapter->fhash.fmax; i++)
1797 INIT_HLIST_HEAD(&adapter->fhash.fhead[i]);
1798}
1799
1800static void qlcnic_free_lb_filters_mem(struct qlcnic_adapter *adapter)
1801{
1802 if (adapter->fhash.fmax && adapter->fhash.fhead)
1803 kfree(adapter->fhash.fhead);
1804
1805 adapter->fhash.fhead = NULL;
1806 adapter->fhash.fmax = 0;
1807}
1808
1809static void qlcnic_change_filter(struct qlcnic_adapter *adapter,
7e56cac4 1810 u64 uaddr, __le16 vlan_id, struct qlcnic_host_tx_ring *tx_ring)
b5e5492c
AKS
1811{
1812 struct cmd_desc_type0 *hwdesc;
1813 struct qlcnic_nic_req *req;
1814 struct qlcnic_mac_req *mac_req;
7e56cac4 1815 struct qlcnic_vlan_req *vlan_req;
b5e5492c
AKS
1816 u32 producer;
1817 u64 word;
1818
1819 producer = tx_ring->producer;
1820 hwdesc = &tx_ring->desc_head[tx_ring->producer];
1821
1822 req = (struct qlcnic_nic_req *)hwdesc;
1823 memset(req, 0, sizeof(struct qlcnic_nic_req));
1824 req->qhdr = cpu_to_le64(QLCNIC_REQUEST << 23);
1825
1826 word = QLCNIC_MAC_EVENT | ((u64)(adapter->portnum) << 16);
1827 req->req_hdr = cpu_to_le64(word);
1828
1829 mac_req = (struct qlcnic_mac_req *)&(req->words[0]);
03c5d770 1830 mac_req->op = vlan_id ? QLCNIC_MAC_VLAN_ADD : QLCNIC_MAC_ADD;
b5e5492c
AKS
1831 memcpy(mac_req->mac_addr, &uaddr, ETH_ALEN);
1832
7e56cac4
SC
1833 vlan_req = (struct qlcnic_vlan_req *)&req->words[1];
1834 vlan_req->vlan_id = vlan_id;
03c5d770 1835
b5e5492c
AKS
1836 tx_ring->producer = get_next_index(producer, tx_ring->num_desc);
1837}
1838
1839#define QLCNIC_MAC_HASH(MAC)\
1840 ((((MAC) & 0x70000) >> 0x10) | (((MAC) & 0x70000000000ULL) >> 0x25))
1841
1842static void
1843qlcnic_send_filter(struct qlcnic_adapter *adapter,
1844 struct qlcnic_host_tx_ring *tx_ring,
1845 struct cmd_desc_type0 *first_desc,
1846 struct sk_buff *skb)
1847{
1848 struct ethhdr *phdr = (struct ethhdr *)(skb->data);
1849 struct qlcnic_filter *fil, *tmp_fil;
1850 struct hlist_node *tmp_hnode, *n;
1851 struct hlist_head *head;
1852 u64 src_addr = 0;
7e56cac4 1853 __le16 vlan_id = 0;
b5e5492c
AKS
1854 u8 hindex;
1855
1856 if (!compare_ether_addr(phdr->h_source, adapter->mac_addr))
1857 return;
1858
1859 if (adapter->fhash.fnum >= adapter->fhash.fmax)
1860 return;
1861
03c5d770
AKS
1862 /* Only NPAR capable devices support vlan based learning*/
1863 if (adapter->flags & QLCNIC_ESWITCH_ENABLED)
1864 vlan_id = first_desc->vlan_TCI;
b5e5492c
AKS
1865 memcpy(&src_addr, phdr->h_source, ETH_ALEN);
1866 hindex = QLCNIC_MAC_HASH(src_addr) & (QLCNIC_LB_MAX_FILTERS - 1);
1867 head = &(adapter->fhash.fhead[hindex]);
1868
1869 hlist_for_each_entry_safe(tmp_fil, tmp_hnode, n, head, fnode) {
03c5d770
AKS
1870 if (!memcmp(tmp_fil->faddr, &src_addr, ETH_ALEN) &&
1871 tmp_fil->vlan_id == vlan_id) {
e5edb7b1 1872
1873 if (jiffies >
1874 (QLCNIC_READD_AGE * HZ + tmp_fil->ftime))
1875 qlcnic_change_filter(adapter, src_addr, vlan_id,
1876 tx_ring);
b5e5492c
AKS
1877 tmp_fil->ftime = jiffies;
1878 return;
1879 }
1880 }
1881
1882 fil = kzalloc(sizeof(struct qlcnic_filter), GFP_ATOMIC);
1883 if (!fil)
1884 return;
1885
03c5d770 1886 qlcnic_change_filter(adapter, src_addr, vlan_id, tx_ring);
b5e5492c
AKS
1887
1888 fil->ftime = jiffies;
03c5d770 1889 fil->vlan_id = vlan_id;
b5e5492c
AKS
1890 memcpy(fil->faddr, &src_addr, ETH_ALEN);
1891 spin_lock(&adapter->mac_learn_lock);
1892 hlist_add_head(&(fil->fnode), head);
1893 adapter->fhash.fnum++;
1894 spin_unlock(&adapter->mac_learn_lock);
1895}
1896
af19b491
AKS
1897static void
1898qlcnic_tso_check(struct net_device *netdev,
1899 struct qlcnic_host_tx_ring *tx_ring,
1900 struct cmd_desc_type0 *first_desc,
1901 struct sk_buff *skb)
1902{
1903 u8 opcode = TX_ETHER_PKT;
1904 __be16 protocol = skb->protocol;
8cf61f89
AKS
1905 u16 flags = 0;
1906 int copied, offset, copy_len, hdr_len = 0, tso = 0;
af19b491
AKS
1907 struct cmd_desc_type0 *hwdesc;
1908 struct vlan_ethhdr *vh;
8bfe8b91 1909 struct qlcnic_adapter *adapter = netdev_priv(netdev);
2e9d722d 1910 u32 producer = tx_ring->producer;
7e56cac4
SC
1911 __le16 vlan_oob = first_desc->flags_opcode &
1912 cpu_to_le16(FLAGS_VLAN_OOB);
af19b491 1913
2e9d722d
AC
1914 if (*(skb->data) & BIT_0) {
1915 flags |= BIT_0;
1916 memcpy(&first_desc->eth_addr, skb->data, ETH_ALEN);
1917 }
1918
af19b491
AKS
1919 if ((netdev->features & (NETIF_F_TSO | NETIF_F_TSO6)) &&
1920 skb_shinfo(skb)->gso_size > 0) {
1921
1922 hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
1923
1924 first_desc->mss = cpu_to_le16(skb_shinfo(skb)->gso_size);
1925 first_desc->total_hdr_length = hdr_len;
1926 if (vlan_oob) {
1927 first_desc->total_hdr_length += VLAN_HLEN;
1928 first_desc->tcp_hdr_offset = VLAN_HLEN;
1929 first_desc->ip_hdr_offset = VLAN_HLEN;
1930 /* Only in case of TSO on vlan device */
1931 flags |= FLAGS_VLAN_TAGGED;
1932 }
1933
1934 opcode = (protocol == cpu_to_be16(ETH_P_IPV6)) ?
1935 TX_TCP_LSO6 : TX_TCP_LSO;
1936 tso = 1;
1937
1938 } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
1939 u8 l4proto;
1940
1941 if (protocol == cpu_to_be16(ETH_P_IP)) {
1942 l4proto = ip_hdr(skb)->protocol;
1943
1944 if (l4proto == IPPROTO_TCP)
1945 opcode = TX_TCP_PKT;
1946 else if (l4proto == IPPROTO_UDP)
1947 opcode = TX_UDP_PKT;
1948 } else if (protocol == cpu_to_be16(ETH_P_IPV6)) {
1949 l4proto = ipv6_hdr(skb)->nexthdr;
1950
1951 if (l4proto == IPPROTO_TCP)
1952 opcode = TX_TCPV6_PKT;
1953 else if (l4proto == IPPROTO_UDP)
1954 opcode = TX_UDPV6_PKT;
1955 }
1956 }
1957
1958 first_desc->tcp_hdr_offset += skb_transport_offset(skb);
1959 first_desc->ip_hdr_offset += skb_network_offset(skb);
1960 qlcnic_set_tx_flags_opcode(first_desc, flags, opcode);
1961
1962 if (!tso)
1963 return;
1964
1965 /* For LSO, we need to copy the MAC/IP/TCP headers into
1966 * the descriptor ring
1967 */
af19b491
AKS
1968 copied = 0;
1969 offset = 2;
1970
1971 if (vlan_oob) {
1972 /* Create a TSO vlan header template for firmware */
1973
1974 hwdesc = &tx_ring->desc_head[producer];
1975 tx_ring->cmd_buf_arr[producer].skb = NULL;
1976
1977 copy_len = min((int)sizeof(struct cmd_desc_type0) - offset,
1978 hdr_len + VLAN_HLEN);
1979
1980 vh = (struct vlan_ethhdr *)((char *)hwdesc + 2);
1981 skb_copy_from_linear_data(skb, vh, 12);
1982 vh->h_vlan_proto = htons(ETH_P_8021Q);
7e56cac4
SC
1983 vh->h_vlan_TCI = (__be16)swab16((u16)first_desc->vlan_TCI);
1984
af19b491
AKS
1985 skb_copy_from_linear_data_offset(skb, 12,
1986 (char *)vh + 16, copy_len - 16);
1987
1988 copied = copy_len - VLAN_HLEN;
1989 offset = 0;
1990
1991 producer = get_next_index(producer, tx_ring->num_desc);
1992 }
1993
1994 while (copied < hdr_len) {
1995
1996 copy_len = min((int)sizeof(struct cmd_desc_type0) - offset,
1997 (hdr_len - copied));
1998
1999 hwdesc = &tx_ring->desc_head[producer];
2000 tx_ring->cmd_buf_arr[producer].skb = NULL;
2001
2002 skb_copy_from_linear_data_offset(skb, copied,
2003 (char *)hwdesc + offset, copy_len);
2004
2005 copied += copy_len;
2006 offset = 0;
2007
2008 producer = get_next_index(producer, tx_ring->num_desc);
2009 }
2010
2011 tx_ring->producer = producer;
2012 barrier();
8bfe8b91 2013 adapter->stats.lso_frames++;
af19b491
AKS
2014}
2015
2016static int
2017qlcnic_map_tx_skb(struct pci_dev *pdev,
2018 struct sk_buff *skb, struct qlcnic_cmd_buffer *pbuf)
2019{
2020 struct qlcnic_skb_frag *nf;
2021 struct skb_frag_struct *frag;
2022 int i, nr_frags;
2023 dma_addr_t map;
2024
2025 nr_frags = skb_shinfo(skb)->nr_frags;
2026 nf = &pbuf->frag_array[0];
2027
2028 map = pci_map_single(pdev, skb->data,
2029 skb_headlen(skb), PCI_DMA_TODEVICE);
2030 if (pci_dma_mapping_error(pdev, map))
2031 goto out_err;
2032
2033 nf->dma = map;
2034 nf->length = skb_headlen(skb);
2035
2036 for (i = 0; i < nr_frags; i++) {
2037 frag = &skb_shinfo(skb)->frags[i];
2038 nf = &pbuf->frag_array[i+1];
2039
2040 map = pci_map_page(pdev, frag->page, frag->page_offset,
2041 frag->size, PCI_DMA_TODEVICE);
2042 if (pci_dma_mapping_error(pdev, map))
2043 goto unwind;
2044
2045 nf->dma = map;
2046 nf->length = frag->size;
2047 }
2048
2049 return 0;
2050
2051unwind:
2052 while (--i >= 0) {
2053 nf = &pbuf->frag_array[i+1];
2054 pci_unmap_page(pdev, nf->dma, nf->length, PCI_DMA_TODEVICE);
2055 }
2056
2057 nf = &pbuf->frag_array[0];
2058 pci_unmap_single(pdev, nf->dma, skb_headlen(skb), PCI_DMA_TODEVICE);
2059
2060out_err:
2061 return -ENOMEM;
2062}
2063
8cf61f89
AKS
2064static int
2065qlcnic_check_tx_tagging(struct qlcnic_adapter *adapter,
2066 struct sk_buff *skb,
2067 struct cmd_desc_type0 *first_desc)
2068{
2069 u8 opcode = 0;
2070 u16 flags = 0;
2071 __be16 protocol = skb->protocol;
2072 struct vlan_ethhdr *vh;
2073
2074 if (protocol == cpu_to_be16(ETH_P_8021Q)) {
2075 vh = (struct vlan_ethhdr *)skb->data;
2076 protocol = vh->h_vlan_encapsulated_proto;
2077 flags = FLAGS_VLAN_TAGGED;
2078 qlcnic_set_tx_vlan_tci(first_desc, ntohs(vh->h_vlan_TCI));
2079 } else if (vlan_tx_tag_present(skb)) {
2080 flags = FLAGS_VLAN_OOB;
2081 qlcnic_set_tx_vlan_tci(first_desc, vlan_tx_tag_get(skb));
2082 }
2083 if (unlikely(adapter->pvid)) {
2084 if (first_desc->vlan_TCI &&
2085 !(adapter->flags & QLCNIC_TAGGING_ENABLED))
2086 return -EIO;
2087 if (first_desc->vlan_TCI &&
2088 (adapter->flags & QLCNIC_TAGGING_ENABLED))
2089 goto set_flags;
2090
2091 flags = FLAGS_VLAN_OOB;
2092 qlcnic_set_tx_vlan_tci(first_desc, adapter->pvid);
2093 }
2094set_flags:
2095 qlcnic_set_tx_flags_opcode(first_desc, flags, opcode);
2096 return 0;
2097}
2098
af19b491
AKS
2099static inline void
2100qlcnic_clear_cmddesc(u64 *desc)
2101{
2102 desc[0] = 0ULL;
2103 desc[2] = 0ULL;
8cf61f89 2104 desc[7] = 0ULL;
af19b491
AKS
2105}
2106
cdaff185 2107netdev_tx_t
af19b491
AKS
2108qlcnic_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
2109{
2110 struct qlcnic_adapter *adapter = netdev_priv(netdev);
2111 struct qlcnic_host_tx_ring *tx_ring = adapter->tx_ring;
2112 struct qlcnic_cmd_buffer *pbuf;
2113 struct qlcnic_skb_frag *buffrag;
2114 struct cmd_desc_type0 *hwdesc, *first_desc;
2115 struct pci_dev *pdev;
dcb50aff 2116 struct ethhdr *phdr;
af19b491
AKS
2117 int i, k;
2118
2119 u32 producer;
2120 int frag_count, no_of_desc;
2121 u32 num_txd = tx_ring->num_desc;
2122
780ab790
AKS
2123 if (!test_bit(__QLCNIC_DEV_UP, &adapter->state)) {
2124 netif_stop_queue(netdev);
2125 return NETDEV_TX_BUSY;
2126 }
2127
fe4d434d 2128 if (adapter->flags & QLCNIC_MACSPOOF) {
dcb50aff
RB
2129 phdr = (struct ethhdr *)skb->data;
2130 if (compare_ether_addr(phdr->h_source,
fe4d434d
SC
2131 adapter->mac_addr))
2132 goto drop_packet;
2133 }
2134
af19b491
AKS
2135 frag_count = skb_shinfo(skb)->nr_frags + 1;
2136
2137 /* 4 fragments per cmd des */
2138 no_of_desc = (frag_count + 3) >> 2;
2139
ef71ff83 2140 if (unlikely(qlcnic_tx_avail(tx_ring) <= TX_STOP_THRESH)) {
af19b491 2141 netif_stop_queue(netdev);
ef71ff83
RB
2142 smp_mb();
2143 if (qlcnic_tx_avail(tx_ring) > TX_STOP_THRESH)
2144 netif_start_queue(netdev);
2145 else {
2146 adapter->stats.xmit_off++;
2147 return NETDEV_TX_BUSY;
2148 }
af19b491
AKS
2149 }
2150
2151 producer = tx_ring->producer;
2152 pbuf = &tx_ring->cmd_buf_arr[producer];
2153
2154 pdev = adapter->pdev;
2155
8cf61f89
AKS
2156 first_desc = hwdesc = &tx_ring->desc_head[producer];
2157 qlcnic_clear_cmddesc((u64 *)hwdesc);
2158
2159 if (qlcnic_check_tx_tagging(adapter, skb, first_desc))
2160 goto drop_packet;
2161
8ae6df97
AKS
2162 if (qlcnic_map_tx_skb(pdev, skb, pbuf)) {
2163 adapter->stats.tx_dma_map_error++;
af19b491 2164 goto drop_packet;
8ae6df97 2165 }
af19b491
AKS
2166
2167 pbuf->skb = skb;
2168 pbuf->frag_count = frag_count;
2169
af19b491
AKS
2170 qlcnic_set_tx_frags_len(first_desc, frag_count, skb->len);
2171 qlcnic_set_tx_port(first_desc, adapter->portnum);
2172
2173 for (i = 0; i < frag_count; i++) {
2174
2175 k = i % 4;
2176
2177 if ((k == 0) && (i > 0)) {
2178 /* move to next desc.*/
2179 producer = get_next_index(producer, num_txd);
2180 hwdesc = &tx_ring->desc_head[producer];
2181 qlcnic_clear_cmddesc((u64 *)hwdesc);
2182 tx_ring->cmd_buf_arr[producer].skb = NULL;
2183 }
2184
2185 buffrag = &pbuf->frag_array[i];
2186
2187 hwdesc->buffer_length[k] = cpu_to_le16(buffrag->length);
2188 switch (k) {
2189 case 0:
2190 hwdesc->addr_buffer1 = cpu_to_le64(buffrag->dma);
2191 break;
2192 case 1:
2193 hwdesc->addr_buffer2 = cpu_to_le64(buffrag->dma);
2194 break;
2195 case 2:
2196 hwdesc->addr_buffer3 = cpu_to_le64(buffrag->dma);
2197 break;
2198 case 3:
2199 hwdesc->addr_buffer4 = cpu_to_le64(buffrag->dma);
2200 break;
2201 }
2202 }
2203
2204 tx_ring->producer = get_next_index(producer, num_txd);
2205
2206 qlcnic_tso_check(netdev, tx_ring, first_desc, skb);
2207
b5e5492c
AKS
2208 if (qlcnic_mac_learn)
2209 qlcnic_send_filter(adapter, tx_ring, first_desc, skb);
2210
af19b491
AKS
2211 qlcnic_update_cmd_producer(adapter, tx_ring);
2212
2213 adapter->stats.txbytes += skb->len;
2214 adapter->stats.xmitcalled++;
2215
2216 return NETDEV_TX_OK;
2217
2218drop_packet:
2219 adapter->stats.txdropped++;
2220 dev_kfree_skb_any(skb);
2221 return NETDEV_TX_OK;
2222}
2223
2224static int qlcnic_check_temp(struct qlcnic_adapter *adapter)
2225{
2226 struct net_device *netdev = adapter->netdev;
2227 u32 temp, temp_state, temp_val;
2228 int rv = 0;
2229
2230 temp = QLCRD32(adapter, CRB_TEMP_STATE);
2231
2232 temp_state = qlcnic_get_temp_state(temp);
2233 temp_val = qlcnic_get_temp_val(temp);
2234
2235 if (temp_state == QLCNIC_TEMP_PANIC) {
2236 dev_err(&netdev->dev,
2237 "Device temperature %d degrees C exceeds"
2238 " maximum allowed. Hardware has been shut down.\n",
2239 temp_val);
2240 rv = 1;
2241 } else if (temp_state == QLCNIC_TEMP_WARN) {
2242 if (adapter->temp == QLCNIC_TEMP_NORMAL) {
2243 dev_err(&netdev->dev,
2244 "Device temperature %d degrees C "
2245 "exceeds operating range."
2246 " Immediate action needed.\n",
2247 temp_val);
2248 }
2249 } else {
2250 if (adapter->temp == QLCNIC_TEMP_WARN) {
2251 dev_info(&netdev->dev,
2252 "Device temperature is now %d degrees C"
2253 " in normal range.\n", temp_val);
2254 }
2255 }
2256 adapter->temp = temp_state;
2257 return rv;
2258}
2259
2260void qlcnic_advert_link_change(struct qlcnic_adapter *adapter, int linkup)
2261{
2262 struct net_device *netdev = adapter->netdev;
2263
2264 if (adapter->ahw.linkup && !linkup) {
69324275 2265 netdev_info(netdev, "NIC Link is down\n");
af19b491
AKS
2266 adapter->ahw.linkup = 0;
2267 if (netif_running(netdev)) {
2268 netif_carrier_off(netdev);
2269 netif_stop_queue(netdev);
2270 }
2271 } else if (!adapter->ahw.linkup && linkup) {
69324275 2272 netdev_info(netdev, "NIC Link is up\n");
af19b491
AKS
2273 adapter->ahw.linkup = 1;
2274 if (netif_running(netdev)) {
2275 netif_carrier_on(netdev);
2276 netif_wake_queue(netdev);
2277 }
2278 }
2279}
2280
2281static void qlcnic_tx_timeout(struct net_device *netdev)
2282{
2283 struct qlcnic_adapter *adapter = netdev_priv(netdev);
2284
2285 if (test_bit(__QLCNIC_RESETTING, &adapter->state))
2286 return;
2287
2288 dev_err(&netdev->dev, "transmit timeout, resetting.\n");
af19b491
AKS
2289
2290 if (++adapter->tx_timeo_cnt >= QLCNIC_MAX_TX_TIMEOUTS)
68bf1c68
AKS
2291 adapter->need_fw_reset = 1;
2292 else
2293 adapter->reset_context = 1;
af19b491
AKS
2294}
2295
2296static struct net_device_stats *qlcnic_get_stats(struct net_device *netdev)
2297{
2298 struct qlcnic_adapter *adapter = netdev_priv(netdev);
2299 struct net_device_stats *stats = &netdev->stats;
2300
af19b491
AKS
2301 stats->rx_packets = adapter->stats.rx_pkts + adapter->stats.lro_pkts;
2302 stats->tx_packets = adapter->stats.xmitfinished;
7e382594 2303 stats->rx_bytes = adapter->stats.rxbytes + adapter->stats.lrobytes;
af19b491
AKS
2304 stats->tx_bytes = adapter->stats.txbytes;
2305 stats->rx_dropped = adapter->stats.rxdropped;
2306 stats->tx_dropped = adapter->stats.txdropped;
2307
2308 return stats;
2309}
2310
7eb9855d 2311static irqreturn_t qlcnic_clear_legacy_intr(struct qlcnic_adapter *adapter)
af19b491 2312{
af19b491
AKS
2313 u32 status;
2314
2315 status = readl(adapter->isr_int_vec);
2316
2317 if (!(status & adapter->int_vec_bit))
2318 return IRQ_NONE;
2319
2320 /* check interrupt state machine, to be sure */
2321 status = readl(adapter->crb_int_state_reg);
2322 if (!ISR_LEGACY_INT_TRIGGERED(status))
2323 return IRQ_NONE;
2324
2325 writel(0xffffffff, adapter->tgt_status_reg);
2326 /* read twice to ensure write is flushed */
2327 readl(adapter->isr_int_vec);
2328 readl(adapter->isr_int_vec);
2329
7eb9855d
AKS
2330 return IRQ_HANDLED;
2331}
2332
2333static irqreturn_t qlcnic_tmp_intr(int irq, void *data)
2334{
2335 struct qlcnic_host_sds_ring *sds_ring = data;
2336 struct qlcnic_adapter *adapter = sds_ring->adapter;
2337
2338 if (adapter->flags & QLCNIC_MSIX_ENABLED)
2339 goto done;
2340 else if (adapter->flags & QLCNIC_MSI_ENABLED) {
2341 writel(0xffffffff, adapter->tgt_status_reg);
2342 goto done;
2343 }
2344
2345 if (qlcnic_clear_legacy_intr(adapter) == IRQ_NONE)
2346 return IRQ_NONE;
2347
2348done:
2349 adapter->diag_cnt++;
2350 qlcnic_enable_int(sds_ring);
2351 return IRQ_HANDLED;
2352}
2353
2354static irqreturn_t qlcnic_intr(int irq, void *data)
2355{
2356 struct qlcnic_host_sds_ring *sds_ring = data;
2357 struct qlcnic_adapter *adapter = sds_ring->adapter;
2358
2359 if (qlcnic_clear_legacy_intr(adapter) == IRQ_NONE)
2360 return IRQ_NONE;
2361
af19b491
AKS
2362 napi_schedule(&sds_ring->napi);
2363
2364 return IRQ_HANDLED;
2365}
2366
2367static irqreturn_t qlcnic_msi_intr(int irq, void *data)
2368{
2369 struct qlcnic_host_sds_ring *sds_ring = data;
2370 struct qlcnic_adapter *adapter = sds_ring->adapter;
2371
2372 /* clear interrupt */
2373 writel(0xffffffff, adapter->tgt_status_reg);
2374
2375 napi_schedule(&sds_ring->napi);
2376 return IRQ_HANDLED;
2377}
2378
2379static irqreturn_t qlcnic_msix_intr(int irq, void *data)
2380{
2381 struct qlcnic_host_sds_ring *sds_ring = data;
2382
2383 napi_schedule(&sds_ring->napi);
2384 return IRQ_HANDLED;
2385}
2386
2387static int qlcnic_process_cmd_ring(struct qlcnic_adapter *adapter)
2388{
2389 u32 sw_consumer, hw_consumer;
2390 int count = 0, i;
2391 struct qlcnic_cmd_buffer *buffer;
2392 struct pci_dev *pdev = adapter->pdev;
2393 struct net_device *netdev = adapter->netdev;
2394 struct qlcnic_skb_frag *frag;
2395 int done;
2396 struct qlcnic_host_tx_ring *tx_ring = adapter->tx_ring;
2397
2398 if (!spin_trylock(&adapter->tx_clean_lock))
2399 return 1;
2400
2401 sw_consumer = tx_ring->sw_consumer;
2402 hw_consumer = le32_to_cpu(*(tx_ring->hw_consumer));
2403
2404 while (sw_consumer != hw_consumer) {
2405 buffer = &tx_ring->cmd_buf_arr[sw_consumer];
2406 if (buffer->skb) {
2407 frag = &buffer->frag_array[0];
2408 pci_unmap_single(pdev, frag->dma, frag->length,
2409 PCI_DMA_TODEVICE);
2410 frag->dma = 0ULL;
2411 for (i = 1; i < buffer->frag_count; i++) {
2412 frag++;
2413 pci_unmap_page(pdev, frag->dma, frag->length,
2414 PCI_DMA_TODEVICE);
2415 frag->dma = 0ULL;
2416 }
2417
2418 adapter->stats.xmitfinished++;
2419 dev_kfree_skb_any(buffer->skb);
2420 buffer->skb = NULL;
2421 }
2422
2423 sw_consumer = get_next_index(sw_consumer, tx_ring->num_desc);
2424 if (++count >= MAX_STATUS_HANDLE)
2425 break;
2426 }
2427
2428 if (count && netif_running(netdev)) {
2429 tx_ring->sw_consumer = sw_consumer;
2430
2431 smp_mb();
2432
2433 if (netif_queue_stopped(netdev) && netif_carrier_ok(netdev)) {
af19b491
AKS
2434 if (qlcnic_tx_avail(tx_ring) > TX_STOP_THRESH) {
2435 netif_wake_queue(netdev);
8bfe8b91 2436 adapter->stats.xmit_on++;
af19b491 2437 }
af19b491 2438 }
ef71ff83 2439 adapter->tx_timeo_cnt = 0;
af19b491
AKS
2440 }
2441 /*
2442 * If everything is freed up to consumer then check if the ring is full
2443 * If the ring is full then check if more needs to be freed and
2444 * schedule the call back again.
2445 *
2446 * This happens when there are 2 CPUs. One could be freeing and the
2447 * other filling it. If the ring is full when we get out of here and
2448 * the card has already interrupted the host then the host can miss the
2449 * interrupt.
2450 *
2451 * There is still a possible race condition and the host could miss an
2452 * interrupt. The card has to take care of this.
2453 */
2454 hw_consumer = le32_to_cpu(*(tx_ring->hw_consumer));
2455 done = (sw_consumer == hw_consumer);
2456 spin_unlock(&adapter->tx_clean_lock);
2457
2458 return done;
2459}
2460
2461static int qlcnic_poll(struct napi_struct *napi, int budget)
2462{
2463 struct qlcnic_host_sds_ring *sds_ring =
2464 container_of(napi, struct qlcnic_host_sds_ring, napi);
2465
2466 struct qlcnic_adapter *adapter = sds_ring->adapter;
2467
2468 int tx_complete;
2469 int work_done;
2470
2471 tx_complete = qlcnic_process_cmd_ring(adapter);
2472
2473 work_done = qlcnic_process_rcv_ring(sds_ring, budget);
2474
2475 if ((work_done < budget) && tx_complete) {
2476 napi_complete(&sds_ring->napi);
2477 if (test_bit(__QLCNIC_DEV_UP, &adapter->state))
2478 qlcnic_enable_int(sds_ring);
2479 }
2480
2481 return work_done;
2482}
2483
8f891387 2484static int qlcnic_rx_poll(struct napi_struct *napi, int budget)
2485{
2486 struct qlcnic_host_sds_ring *sds_ring =
2487 container_of(napi, struct qlcnic_host_sds_ring, napi);
2488
2489 struct qlcnic_adapter *adapter = sds_ring->adapter;
2490 int work_done;
2491
2492 work_done = qlcnic_process_rcv_ring(sds_ring, budget);
2493
2494 if (work_done < budget) {
2495 napi_complete(&sds_ring->napi);
2496 if (test_bit(__QLCNIC_DEV_UP, &adapter->state))
2497 qlcnic_enable_int(sds_ring);
2498 }
2499
2500 return work_done;
2501}
2502
af19b491
AKS
2503#ifdef CONFIG_NET_POLL_CONTROLLER
2504static void qlcnic_poll_controller(struct net_device *netdev)
2505{
bf82791e
YL
2506 int ring;
2507 struct qlcnic_host_sds_ring *sds_ring;
af19b491 2508 struct qlcnic_adapter *adapter = netdev_priv(netdev);
bf82791e
YL
2509 struct qlcnic_recv_context *recv_ctx = &adapter->recv_ctx;
2510
af19b491 2511 disable_irq(adapter->irq);
bf82791e
YL
2512 for (ring = 0; ring < adapter->max_sds_rings; ring++) {
2513 sds_ring = &recv_ctx->sds_rings[ring];
2514 qlcnic_intr(adapter->irq, sds_ring);
2515 }
af19b491
AKS
2516 enable_irq(adapter->irq);
2517}
2518#endif
2519
6df900e9
SC
2520static void
2521qlcnic_idc_debug_info(struct qlcnic_adapter *adapter, u8 encoding)
2522{
2523 u32 val;
2524
2525 val = adapter->portnum & 0xf;
2526 val |= encoding << 7;
2527 val |= (jiffies - adapter->dev_rst_time) << 8;
2528
2529 QLCWR32(adapter, QLCNIC_CRB_DRV_SCRATCH, val);
2530 adapter->dev_rst_time = jiffies;
2531}
2532
ade91f8e
AKS
2533static int
2534qlcnic_set_drv_state(struct qlcnic_adapter *adapter, u8 state)
af19b491
AKS
2535{
2536 u32 val;
2537
2538 WARN_ON(state != QLCNIC_DEV_NEED_RESET &&
2539 state != QLCNIC_DEV_NEED_QUISCENT);
2540
2541 if (qlcnic_api_lock(adapter))
ade91f8e 2542 return -EIO;
af19b491
AKS
2543
2544 val = QLCRD32(adapter, QLCNIC_CRB_DRV_STATE);
2545
2546 if (state == QLCNIC_DEV_NEED_RESET)
6d2a4724 2547 QLC_DEV_SET_RST_RDY(val, adapter->portnum);
af19b491 2548 else if (state == QLCNIC_DEV_NEED_QUISCENT)
6d2a4724 2549 QLC_DEV_SET_QSCNT_RDY(val, adapter->portnum);
af19b491
AKS
2550
2551 QLCWR32(adapter, QLCNIC_CRB_DRV_STATE, val);
2552
2553 qlcnic_api_unlock(adapter);
ade91f8e
AKS
2554
2555 return 0;
af19b491
AKS
2556}
2557
1b95a839
AKS
2558static int
2559qlcnic_clr_drv_state(struct qlcnic_adapter *adapter)
2560{
2561 u32 val;
2562
2563 if (qlcnic_api_lock(adapter))
2564 return -EBUSY;
2565
2566 val = QLCRD32(adapter, QLCNIC_CRB_DRV_STATE);
6d2a4724 2567 QLC_DEV_CLR_RST_QSCNT(val, adapter->portnum);
1b95a839
AKS
2568 QLCWR32(adapter, QLCNIC_CRB_DRV_STATE, val);
2569
2570 qlcnic_api_unlock(adapter);
2571
2572 return 0;
2573}
2574
af19b491 2575static void
21854f02 2576qlcnic_clr_all_drv_state(struct qlcnic_adapter *adapter, u8 failed)
af19b491
AKS
2577{
2578 u32 val;
2579
2580 if (qlcnic_api_lock(adapter))
2581 goto err;
2582
31018e06 2583 val = QLCRD32(adapter, QLCNIC_CRB_DRV_ACTIVE);
6d2a4724 2584 QLC_DEV_CLR_REF_CNT(val, adapter->portnum);
31018e06 2585 QLCWR32(adapter, QLCNIC_CRB_DRV_ACTIVE, val);
af19b491 2586
21854f02
AKS
2587 if (failed) {
2588 QLCWR32(adapter, QLCNIC_CRB_DEV_STATE, QLCNIC_DEV_FAILED);
2589 dev_info(&adapter->pdev->dev,
2590 "Device state set to Failed. Please Reboot\n");
2591 } else if (!(val & 0x11111111))
af19b491
AKS
2592 QLCWR32(adapter, QLCNIC_CRB_DEV_STATE, QLCNIC_DEV_COLD);
2593
2594 val = QLCRD32(adapter, QLCNIC_CRB_DRV_STATE);
6d2a4724 2595 QLC_DEV_CLR_RST_QSCNT(val, adapter->portnum);
af19b491
AKS
2596 QLCWR32(adapter, QLCNIC_CRB_DRV_STATE, val);
2597
2598 qlcnic_api_unlock(adapter);
2599err:
2600 adapter->fw_fail_cnt = 0;
2601 clear_bit(__QLCNIC_START_FW, &adapter->state);
2602 clear_bit(__QLCNIC_RESETTING, &adapter->state);
2603}
2604
f73dfc50 2605/* Grab api lock, before checking state */
af19b491
AKS
2606static int
2607qlcnic_check_drv_state(struct qlcnic_adapter *adapter)
2608{
2609 int act, state;
2610
2611 state = QLCRD32(adapter, QLCNIC_CRB_DRV_STATE);
31018e06 2612 act = QLCRD32(adapter, QLCNIC_CRB_DRV_ACTIVE);
af19b491
AKS
2613
2614 if (((state & 0x11111111) == (act & 0x11111111)) ||
2615 ((act & 0x11111111) == ((state >> 1) & 0x11111111)))
2616 return 0;
2617 else
2618 return 1;
2619}
2620
96f8118c
SC
2621static int qlcnic_check_idc_ver(struct qlcnic_adapter *adapter)
2622{
2623 u32 val = QLCRD32(adapter, QLCNIC_CRB_DRV_IDC_VER);
2624
2625 if (val != QLCNIC_DRV_IDC_VER) {
2626 dev_warn(&adapter->pdev->dev, "IDC Version mismatch, driver's"
2627 " idc ver = %x; reqd = %x\n", QLCNIC_DRV_IDC_VER, val);
2628 }
2629
2630 return 0;
2631}
2632
af19b491
AKS
2633static int
2634qlcnic_can_start_firmware(struct qlcnic_adapter *adapter)
2635{
2636 u32 val, prev_state;
aa5e18c0 2637 u8 dev_init_timeo = adapter->dev_init_timeo;
6d2a4724 2638 u8 portnum = adapter->portnum;
96f8118c 2639 u8 ret;
af19b491 2640
f73dfc50
AKS
2641 if (test_and_clear_bit(__QLCNIC_START_FW, &adapter->state))
2642 return 1;
2643
af19b491
AKS
2644 if (qlcnic_api_lock(adapter))
2645 return -1;
2646
31018e06 2647 val = QLCRD32(adapter, QLCNIC_CRB_DRV_ACTIVE);
6d2a4724
AKS
2648 if (!(val & (1 << (portnum * 4)))) {
2649 QLC_DEV_SET_REF_CNT(val, portnum);
31018e06 2650 QLCWR32(adapter, QLCNIC_CRB_DRV_ACTIVE, val);
af19b491
AKS
2651 }
2652
2653 prev_state = QLCRD32(adapter, QLCNIC_CRB_DEV_STATE);
65b5b420 2654 QLCDB(adapter, HW, "Device state = %u\n", prev_state);
af19b491
AKS
2655
2656 switch (prev_state) {
2657 case QLCNIC_DEV_COLD:
bbd8c6a4 2658 QLCWR32(adapter, QLCNIC_CRB_DEV_STATE, QLCNIC_DEV_INITIALIZING);
96f8118c 2659 QLCWR32(adapter, QLCNIC_CRB_DRV_IDC_VER, QLCNIC_DRV_IDC_VER);
6df900e9 2660 qlcnic_idc_debug_info(adapter, 0);
af19b491
AKS
2661 qlcnic_api_unlock(adapter);
2662 return 1;
2663
2664 case QLCNIC_DEV_READY:
96f8118c 2665 ret = qlcnic_check_idc_ver(adapter);
af19b491 2666 qlcnic_api_unlock(adapter);
96f8118c 2667 return ret;
af19b491
AKS
2668
2669 case QLCNIC_DEV_NEED_RESET:
2670 val = QLCRD32(adapter, QLCNIC_CRB_DRV_STATE);
6d2a4724 2671 QLC_DEV_SET_RST_RDY(val, portnum);
af19b491
AKS
2672 QLCWR32(adapter, QLCNIC_CRB_DRV_STATE, val);
2673 break;
2674
2675 case QLCNIC_DEV_NEED_QUISCENT:
2676 val = QLCRD32(adapter, QLCNIC_CRB_DRV_STATE);
6d2a4724 2677 QLC_DEV_SET_QSCNT_RDY(val, portnum);
af19b491
AKS
2678 QLCWR32(adapter, QLCNIC_CRB_DRV_STATE, val);
2679 break;
2680
2681 case QLCNIC_DEV_FAILED:
a7fc948f 2682 dev_err(&adapter->pdev->dev, "Device in failed state.\n");
af19b491
AKS
2683 qlcnic_api_unlock(adapter);
2684 return -1;
bbd8c6a4
AKS
2685
2686 case QLCNIC_DEV_INITIALIZING:
2687 case QLCNIC_DEV_QUISCENT:
2688 break;
af19b491
AKS
2689 }
2690
2691 qlcnic_api_unlock(adapter);
aa5e18c0
SC
2692
2693 do {
af19b491 2694 msleep(1000);
a5e463d0
SC
2695 prev_state = QLCRD32(adapter, QLCNIC_CRB_DEV_STATE);
2696
2697 if (prev_state == QLCNIC_DEV_QUISCENT)
2698 continue;
2699 } while ((prev_state != QLCNIC_DEV_READY) && --dev_init_timeo);
af19b491 2700
65b5b420
AKS
2701 if (!dev_init_timeo) {
2702 dev_err(&adapter->pdev->dev,
2703 "Waiting for device to initialize timeout\n");
af19b491 2704 return -1;
65b5b420 2705 }
af19b491
AKS
2706
2707 if (qlcnic_api_lock(adapter))
2708 return -1;
2709
2710 val = QLCRD32(adapter, QLCNIC_CRB_DRV_STATE);
6d2a4724 2711 QLC_DEV_CLR_RST_QSCNT(val, portnum);
af19b491
AKS
2712 QLCWR32(adapter, QLCNIC_CRB_DRV_STATE, val);
2713
96f8118c 2714 ret = qlcnic_check_idc_ver(adapter);
af19b491
AKS
2715 qlcnic_api_unlock(adapter);
2716
96f8118c 2717 return ret;
af19b491
AKS
2718}
2719
2720static void
2721qlcnic_fwinit_work(struct work_struct *work)
2722{
2723 struct qlcnic_adapter *adapter = container_of(work,
2724 struct qlcnic_adapter, fw_work.work);
3c4b23b1 2725 u32 dev_state = 0xf;
af19b491 2726
f73dfc50
AKS
2727 if (qlcnic_api_lock(adapter))
2728 goto err_ret;
af19b491 2729
a5e463d0 2730 dev_state = QLCRD32(adapter, QLCNIC_CRB_DEV_STATE);
b8c17620
AKS
2731 if (dev_state == QLCNIC_DEV_QUISCENT ||
2732 dev_state == QLCNIC_DEV_NEED_QUISCENT) {
a5e463d0
SC
2733 qlcnic_api_unlock(adapter);
2734 qlcnic_schedule_work(adapter, qlcnic_fwinit_work,
2735 FW_POLL_DELAY * 2);
2736 return;
2737 }
2738
9f26f547 2739 if (adapter->op_mode == QLCNIC_NON_PRIV_FUNC) {
3c4b23b1
AKS
2740 qlcnic_api_unlock(adapter);
2741 goto wait_npar;
9f26f547
AC
2742 }
2743
f73dfc50
AKS
2744 if (adapter->fw_wait_cnt++ > adapter->reset_ack_timeo) {
2745 dev_err(&adapter->pdev->dev, "Reset:Failed to get ack %d sec\n",
2746 adapter->reset_ack_timeo);
2747 goto skip_ack_check;
2748 }
2749
2750 if (!qlcnic_check_drv_state(adapter)) {
2751skip_ack_check:
2752 dev_state = QLCRD32(adapter, QLCNIC_CRB_DEV_STATE);
a5e463d0 2753
f73dfc50
AKS
2754 if (dev_state == QLCNIC_DEV_NEED_RESET) {
2755 QLCWR32(adapter, QLCNIC_CRB_DEV_STATE,
2756 QLCNIC_DEV_INITIALIZING);
2757 set_bit(__QLCNIC_START_FW, &adapter->state);
2758 QLCDB(adapter, DRV, "Restarting fw\n");
6df900e9 2759 qlcnic_idc_debug_info(adapter, 0);
af19b491
AKS
2760 }
2761
f73dfc50
AKS
2762 qlcnic_api_unlock(adapter);
2763
9f26f547 2764 if (!adapter->nic_ops->start_firmware(adapter)) {
af19b491 2765 qlcnic_schedule_work(adapter, qlcnic_attach_work, 0);
b18971d1 2766 adapter->fw_wait_cnt = 0;
af19b491
AKS
2767 return;
2768 }
af19b491
AKS
2769 goto err_ret;
2770 }
2771
f73dfc50 2772 qlcnic_api_unlock(adapter);
aa5e18c0 2773
9f26f547 2774wait_npar:
af19b491 2775 dev_state = QLCRD32(adapter, QLCNIC_CRB_DEV_STATE);
f73dfc50 2776 QLCDB(adapter, HW, "Func waiting: Device state=%u\n", dev_state);
65b5b420 2777
af19b491 2778 switch (dev_state) {
3c4b23b1 2779 case QLCNIC_DEV_READY:
9f26f547 2780 if (!adapter->nic_ops->start_firmware(adapter)) {
f73dfc50 2781 qlcnic_schedule_work(adapter, qlcnic_attach_work, 0);
b18971d1 2782 adapter->fw_wait_cnt = 0;
f73dfc50
AKS
2783 return;
2784 }
3c4b23b1
AKS
2785 case QLCNIC_DEV_FAILED:
2786 break;
2787 default:
2788 qlcnic_schedule_work(adapter,
2789 qlcnic_fwinit_work, FW_POLL_DELAY);
2790 return;
af19b491
AKS
2791 }
2792
2793err_ret:
f73dfc50
AKS
2794 dev_err(&adapter->pdev->dev, "Fwinit work failed state=%u "
2795 "fw_wait_cnt=%u\n", dev_state, adapter->fw_wait_cnt);
34ce3626 2796 netif_device_attach(adapter->netdev);
21854f02 2797 qlcnic_clr_all_drv_state(adapter, 0);
af19b491
AKS
2798}
2799
2800static void
2801qlcnic_detach_work(struct work_struct *work)
2802{
2803 struct qlcnic_adapter *adapter = container_of(work,
2804 struct qlcnic_adapter, fw_work.work);
2805 struct net_device *netdev = adapter->netdev;
2806 u32 status;
2807
2808 netif_device_detach(netdev);
2809
b8c17620
AKS
2810 /* Dont grab rtnl lock during Quiscent mode */
2811 if (adapter->dev_state == QLCNIC_DEV_NEED_QUISCENT) {
2812 if (netif_running(netdev))
2813 __qlcnic_down(adapter, netdev);
2814 } else
2815 qlcnic_down(adapter, netdev);
af19b491 2816
af19b491
AKS
2817 status = QLCRD32(adapter, QLCNIC_PEG_HALT_STATUS1);
2818
2819 if (status & QLCNIC_RCODE_FATAL_ERROR)
2820 goto err_ret;
2821
2822 if (adapter->temp == QLCNIC_TEMP_PANIC)
2823 goto err_ret;
2824
ade91f8e
AKS
2825 if (qlcnic_set_drv_state(adapter, adapter->dev_state))
2826 goto err_ret;
af19b491
AKS
2827
2828 adapter->fw_wait_cnt = 0;
2829
2830 qlcnic_schedule_work(adapter, qlcnic_fwinit_work, FW_POLL_DELAY);
2831
2832 return;
2833
2834err_ret:
65b5b420
AKS
2835 dev_err(&adapter->pdev->dev, "detach failed; status=%d temp=%d\n",
2836 status, adapter->temp);
34ce3626 2837 netif_device_attach(netdev);
21854f02 2838 qlcnic_clr_all_drv_state(adapter, 1);
af19b491
AKS
2839}
2840
3c4b23b1
AKS
2841/*Transit NPAR state to NON Operational */
2842static void
2843qlcnic_set_npar_non_operational(struct qlcnic_adapter *adapter)
2844{
2845 u32 state;
2846
2847 state = QLCRD32(adapter, QLCNIC_CRB_DEV_NPAR_STATE);
2848 if (state == QLCNIC_DEV_NPAR_NON_OPER)
2849 return;
2850
2851 if (qlcnic_api_lock(adapter))
2852 return;
2853 QLCWR32(adapter, QLCNIC_CRB_DEV_NPAR_STATE, QLCNIC_DEV_NPAR_NON_OPER);
2854 qlcnic_api_unlock(adapter);
2855}
2856
b8c17620
AKS
2857/* Caller should held RESETTING bit.
2858 * This should be call in sync with qlcnic_request_quiscent_mode.
2859 */
2860void qlcnic_clear_quiscent_mode(struct qlcnic_adapter *adapter)
2861{
2862 qlcnic_clr_drv_state(adapter);
2863 qlcnic_api_lock(adapter);
2864 QLCWR32(adapter, QLCNIC_CRB_DEV_STATE, QLCNIC_DEV_READY);
2865 qlcnic_api_unlock(adapter);
2866}
2867
2868/* Caller should held RESETTING bit.
2869 */
2870int qlcnic_request_quiscent_mode(struct qlcnic_adapter *adapter)
2871{
2872 u8 timeo = adapter->dev_init_timeo / 2;
2873 u32 state;
2874
2875 if (qlcnic_api_lock(adapter))
2876 return -EIO;
2877
2878 state = QLCRD32(adapter, QLCNIC_CRB_DEV_STATE);
2879 if (state != QLCNIC_DEV_READY)
2880 return -EIO;
2881
2882 QLCWR32(adapter, QLCNIC_CRB_DEV_STATE, QLCNIC_DEV_NEED_QUISCENT);
2883 qlcnic_api_unlock(adapter);
2884 QLCDB(adapter, DRV, "NEED QUISCENT state set\n");
2885 qlcnic_idc_debug_info(adapter, 0);
2886
2887 qlcnic_set_drv_state(adapter, QLCNIC_DEV_NEED_QUISCENT);
2888
2889 do {
2890 msleep(2000);
2891 state = QLCRD32(adapter, QLCNIC_CRB_DEV_STATE);
2892 if (state == QLCNIC_DEV_QUISCENT)
2893 return 0;
2894 if (!qlcnic_check_drv_state(adapter)) {
2895 if (qlcnic_api_lock(adapter))
2896 return -EIO;
2897 QLCWR32(adapter, QLCNIC_CRB_DEV_STATE,
2898 QLCNIC_DEV_QUISCENT);
2899 qlcnic_api_unlock(adapter);
2900 QLCDB(adapter, DRV, "QUISCENT mode set\n");
2901 return 0;
2902 }
2903 } while (--timeo);
2904
2905 dev_err(&adapter->pdev->dev, "Failed to quiesce device, DRV_STATE=%08x"
2906 " DRV_ACTIVE=%08x\n", QLCRD32(adapter, QLCNIC_CRB_DRV_STATE),
2907 QLCRD32(adapter, QLCNIC_CRB_DRV_ACTIVE));
2908 qlcnic_clear_quiscent_mode(adapter);
2909 return -EIO;
2910}
2911
f73dfc50 2912/*Transit to RESET state from READY state only */
af19b491
AKS
2913static void
2914qlcnic_dev_request_reset(struct qlcnic_adapter *adapter)
2915{
2916 u32 state;
2917
cea8975e 2918 adapter->need_fw_reset = 1;
af19b491
AKS
2919 if (qlcnic_api_lock(adapter))
2920 return;
2921
2922 state = QLCRD32(adapter, QLCNIC_CRB_DEV_STATE);
2923
f73dfc50 2924 if (state == QLCNIC_DEV_READY) {
af19b491 2925 QLCWR32(adapter, QLCNIC_CRB_DEV_STATE, QLCNIC_DEV_NEED_RESET);
65b5b420 2926 QLCDB(adapter, DRV, "NEED_RESET state set\n");
6df900e9 2927 qlcnic_idc_debug_info(adapter, 0);
af19b491
AKS
2928 }
2929
3c4b23b1 2930 QLCWR32(adapter, QLCNIC_CRB_DEV_NPAR_STATE, QLCNIC_DEV_NPAR_NON_OPER);
af19b491
AKS
2931 qlcnic_api_unlock(adapter);
2932}
2933
9f26f547
AC
2934/* Transit to NPAR READY state from NPAR NOT READY state */
2935static void
2936qlcnic_dev_set_npar_ready(struct qlcnic_adapter *adapter)
2937{
9f26f547
AC
2938 if (qlcnic_api_lock(adapter))
2939 return;
2940
3c4b23b1
AKS
2941 QLCWR32(adapter, QLCNIC_CRB_DEV_NPAR_STATE, QLCNIC_DEV_NPAR_OPER);
2942 QLCDB(adapter, DRV, "NPAR operational state set\n");
9f26f547
AC
2943
2944 qlcnic_api_unlock(adapter);
2945}
2946
af19b491
AKS
2947static void
2948qlcnic_schedule_work(struct qlcnic_adapter *adapter,
2949 work_func_t func, int delay)
2950{
451724c8
SC
2951 if (test_bit(__QLCNIC_AER, &adapter->state))
2952 return;
2953
af19b491 2954 INIT_DELAYED_WORK(&adapter->fw_work, func);
f7ec804a
AKS
2955 queue_delayed_work(qlcnic_wq, &adapter->fw_work,
2956 round_jiffies_relative(delay));
af19b491
AKS
2957}
2958
2959static void
2960qlcnic_cancel_fw_work(struct qlcnic_adapter *adapter)
2961{
2962 while (test_and_set_bit(__QLCNIC_RESETTING, &adapter->state))
2963 msleep(10);
2964
2965 cancel_delayed_work_sync(&adapter->fw_work);
2966}
2967
2968static void
2969qlcnic_attach_work(struct work_struct *work)
2970{
2971 struct qlcnic_adapter *adapter = container_of(work,
2972 struct qlcnic_adapter, fw_work.work);
2973 struct net_device *netdev = adapter->netdev;
b18971d1 2974 u32 npar_state;
af19b491 2975
b18971d1
AKS
2976 if (adapter->op_mode != QLCNIC_MGMT_FUNC) {
2977 npar_state = QLCRD32(adapter, QLCNIC_CRB_DEV_NPAR_STATE);
2978 if (adapter->fw_wait_cnt++ > QLCNIC_DEV_NPAR_OPER_TIMEO)
2979 qlcnic_clr_all_drv_state(adapter, 0);
2980 else if (npar_state != QLCNIC_DEV_NPAR_OPER)
2981 qlcnic_schedule_work(adapter, qlcnic_attach_work,
2982 FW_POLL_DELAY);
2983 else
2984 goto attach;
2985 QLCDB(adapter, DRV, "Waiting for NPAR state to operational\n");
2986 return;
2987 }
2988attach:
af19b491 2989 if (netif_running(netdev)) {
52486a3a 2990 if (qlcnic_up(adapter, netdev))
af19b491 2991 goto done;
af19b491 2992
aec1e845 2993 qlcnic_restore_indev_addr(netdev, NETDEV_UP);
af19b491
AKS
2994 }
2995
af19b491 2996done:
34ce3626 2997 netif_device_attach(netdev);
af19b491
AKS
2998 adapter->fw_fail_cnt = 0;
2999 clear_bit(__QLCNIC_RESETTING, &adapter->state);
1b95a839
AKS
3000
3001 if (!qlcnic_clr_drv_state(adapter))
3002 qlcnic_schedule_work(adapter, qlcnic_fw_poll_work,
3003 FW_POLL_DELAY);
af19b491
AKS
3004}
3005
3006static int
3007qlcnic_check_health(struct qlcnic_adapter *adapter)
3008{
4e70812b 3009 u32 state = 0, heartbeat;
af19b491
AKS
3010 struct net_device *netdev = adapter->netdev;
3011
3012 if (qlcnic_check_temp(adapter))
3013 goto detach;
3014
2372a5f1 3015 if (adapter->need_fw_reset)
af19b491 3016 qlcnic_dev_request_reset(adapter);
af19b491
AKS
3017
3018 state = QLCRD32(adapter, QLCNIC_CRB_DEV_STATE);
b8c17620 3019 if (state == QLCNIC_DEV_NEED_RESET) {
3c4b23b1 3020 qlcnic_set_npar_non_operational(adapter);
af19b491 3021 adapter->need_fw_reset = 1;
b8c17620
AKS
3022 } else if (state == QLCNIC_DEV_NEED_QUISCENT)
3023 goto detach;
af19b491 3024
4e70812b
SC
3025 heartbeat = QLCRD32(adapter, QLCNIC_PEG_ALIVE_COUNTER);
3026 if (heartbeat != adapter->heartbeat) {
3027 adapter->heartbeat = heartbeat;
af19b491
AKS
3028 adapter->fw_fail_cnt = 0;
3029 if (adapter->need_fw_reset)
3030 goto detach;
68bf1c68 3031
0df170b6
AKS
3032 if (adapter->reset_context &&
3033 auto_fw_reset == AUTO_FW_RESET_ENABLED) {
68bf1c68
AKS
3034 qlcnic_reset_hw_context(adapter);
3035 adapter->netdev->trans_start = jiffies;
3036 }
3037
af19b491
AKS
3038 return 0;
3039 }
3040
3041 if (++adapter->fw_fail_cnt < FW_FAIL_THRESH)
3042 return 0;
3043
3044 qlcnic_dev_request_reset(adapter);
3045
0df170b6
AKS
3046 if ((auto_fw_reset == AUTO_FW_RESET_ENABLED))
3047 clear_bit(__QLCNIC_FW_ATTACHED, &adapter->state);
af19b491
AKS
3048
3049 dev_info(&netdev->dev, "firmware hang detected\n");
3050
3051detach:
3052 adapter->dev_state = (state == QLCNIC_DEV_NEED_QUISCENT) ? state :
3053 QLCNIC_DEV_NEED_RESET;
3054
3055 if ((auto_fw_reset == AUTO_FW_RESET_ENABLED) &&
65b5b420
AKS
3056 !test_and_set_bit(__QLCNIC_RESETTING, &adapter->state)) {
3057
af19b491 3058 qlcnic_schedule_work(adapter, qlcnic_detach_work, 0);
65b5b420
AKS
3059 QLCDB(adapter, DRV, "fw recovery scheduled.\n");
3060 }
af19b491
AKS
3061
3062 return 1;
3063}
3064
3065static void
3066qlcnic_fw_poll_work(struct work_struct *work)
3067{
3068 struct qlcnic_adapter *adapter = container_of(work,
3069 struct qlcnic_adapter, fw_work.work);
3070
3071 if (test_bit(__QLCNIC_RESETTING, &adapter->state))
3072 goto reschedule;
3073
3074
3075 if (qlcnic_check_health(adapter))
3076 return;
3077
b5e5492c
AKS
3078 if (adapter->fhash.fnum)
3079 qlcnic_prune_lb_filters(adapter);
3080
af19b491
AKS
3081reschedule:
3082 qlcnic_schedule_work(adapter, qlcnic_fw_poll_work, FW_POLL_DELAY);
3083}
3084
451724c8
SC
3085static int qlcnic_is_first_func(struct pci_dev *pdev)
3086{
3087 struct pci_dev *oth_pdev;
3088 int val = pdev->devfn;
3089
3090 while (val-- > 0) {
3091 oth_pdev = pci_get_domain_bus_and_slot(pci_domain_nr
3092 (pdev->bus), pdev->bus->number,
3093 PCI_DEVFN(PCI_SLOT(pdev->devfn), val));
bfc978fa
AKS
3094 if (!oth_pdev)
3095 continue;
451724c8 3096
bfc978fa
AKS
3097 if (oth_pdev->current_state != PCI_D3cold) {
3098 pci_dev_put(oth_pdev);
451724c8 3099 return 0;
bfc978fa
AKS
3100 }
3101 pci_dev_put(oth_pdev);
451724c8
SC
3102 }
3103 return 1;
3104}
3105
3106static int qlcnic_attach_func(struct pci_dev *pdev)
3107{
3108 int err, first_func;
3109 struct qlcnic_adapter *adapter = pci_get_drvdata(pdev);
3110 struct net_device *netdev = adapter->netdev;
3111
3112 pdev->error_state = pci_channel_io_normal;
3113
3114 err = pci_enable_device(pdev);
3115 if (err)
3116 return err;
3117
3118 pci_set_power_state(pdev, PCI_D0);
3119 pci_set_master(pdev);
3120 pci_restore_state(pdev);
3121
3122 first_func = qlcnic_is_first_func(pdev);
3123
3124 if (qlcnic_api_lock(adapter))
3125 return -EINVAL;
3126
933fce12 3127 if (adapter->op_mode != QLCNIC_NON_PRIV_FUNC && first_func) {
451724c8
SC
3128 adapter->need_fw_reset = 1;
3129 set_bit(__QLCNIC_START_FW, &adapter->state);
3130 QLCWR32(adapter, QLCNIC_CRB_DEV_STATE, QLCNIC_DEV_INITIALIZING);
3131 QLCDB(adapter, DRV, "Restarting fw\n");
3132 }
3133 qlcnic_api_unlock(adapter);
3134
3135 err = adapter->nic_ops->start_firmware(adapter);
3136 if (err)
3137 return err;
3138
3139 qlcnic_clr_drv_state(adapter);
3140 qlcnic_setup_intr(adapter);
3141
3142 if (netif_running(netdev)) {
3143 err = qlcnic_attach(adapter);
3144 if (err) {
21854f02 3145 qlcnic_clr_all_drv_state(adapter, 1);
451724c8
SC
3146 clear_bit(__QLCNIC_AER, &adapter->state);
3147 netif_device_attach(netdev);
3148 return err;
3149 }
3150
3151 err = qlcnic_up(adapter, netdev);
3152 if (err)
3153 goto done;
3154
aec1e845 3155 qlcnic_restore_indev_addr(netdev, NETDEV_UP);
451724c8
SC
3156 }
3157 done:
3158 netif_device_attach(netdev);
3159 return err;
3160}
3161
3162static pci_ers_result_t qlcnic_io_error_detected(struct pci_dev *pdev,
3163 pci_channel_state_t state)
3164{
3165 struct qlcnic_adapter *adapter = pci_get_drvdata(pdev);
3166 struct net_device *netdev = adapter->netdev;
3167
3168 if (state == pci_channel_io_perm_failure)
3169 return PCI_ERS_RESULT_DISCONNECT;
3170
3171 if (state == pci_channel_io_normal)
3172 return PCI_ERS_RESULT_RECOVERED;
3173
3174 set_bit(__QLCNIC_AER, &adapter->state);
3175 netif_device_detach(netdev);
3176
3177 cancel_delayed_work_sync(&adapter->fw_work);
3178
3179 if (netif_running(netdev))
3180 qlcnic_down(adapter, netdev);
3181
3182 qlcnic_detach(adapter);
3183 qlcnic_teardown_intr(adapter);
3184
3185 clear_bit(__QLCNIC_RESETTING, &adapter->state);
3186
3187 pci_save_state(pdev);
3188 pci_disable_device(pdev);
3189
3190 return PCI_ERS_RESULT_NEED_RESET;
3191}
3192
3193static pci_ers_result_t qlcnic_io_slot_reset(struct pci_dev *pdev)
3194{
3195 return qlcnic_attach_func(pdev) ? PCI_ERS_RESULT_DISCONNECT :
3196 PCI_ERS_RESULT_RECOVERED;
3197}
3198
3199static void qlcnic_io_resume(struct pci_dev *pdev)
3200{
3201 struct qlcnic_adapter *adapter = pci_get_drvdata(pdev);
3202
3203 pci_cleanup_aer_uncorrect_error_status(pdev);
3204
3205 if (QLCRD32(adapter, QLCNIC_CRB_DEV_STATE) == QLCNIC_DEV_READY &&
3206 test_and_clear_bit(__QLCNIC_AER, &adapter->state))
3207 qlcnic_schedule_work(adapter, qlcnic_fw_poll_work,
3208 FW_POLL_DELAY);
3209}
3210
87eb743b
AC
3211static int
3212qlcnicvf_start_firmware(struct qlcnic_adapter *adapter)
3213{
3214 int err;
3215
3216 err = qlcnic_can_start_firmware(adapter);
3217 if (err)
3218 return err;
3219
78f84e1a
AKS
3220 err = qlcnic_check_npar_opertional(adapter);
3221 if (err)
3222 return err;
3c4b23b1 3223
174240a8
RB
3224 err = qlcnic_initialize_nic(adapter);
3225 if (err)
3226 return err;
3227
87eb743b
AC
3228 qlcnic_check_options(adapter);
3229
7373373d
RB
3230 err = qlcnic_set_eswitch_port_config(adapter);
3231 if (err)
3232 return err;
3233
87eb743b
AC
3234 adapter->need_fw_reset = 0;
3235
3236 return err;
3237}
3238
3239static int
3240qlcnicvf_config_bridged_mode(struct qlcnic_adapter *adapter, u32 enable)
3241{
3242 return -EOPNOTSUPP;
3243}
3244
3245static int
3246qlcnicvf_config_led(struct qlcnic_adapter *adapter, u32 state, u32 rate)
3247{
3248 return -EOPNOTSUPP;
3249}
3250
af19b491
AKS
3251static ssize_t
3252qlcnic_store_bridged_mode(struct device *dev,
3253 struct device_attribute *attr, const char *buf, size_t len)
3254{
3255 struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
3256 unsigned long new;
3257 int ret = -EINVAL;
3258
3259 if (!(adapter->capabilities & QLCNIC_FW_CAPABILITY_BDG))
3260 goto err_out;
3261
8a15ad1f 3262 if (!test_bit(__QLCNIC_DEV_UP, &adapter->state))
af19b491
AKS
3263 goto err_out;
3264
3265 if (strict_strtoul(buf, 2, &new))
3266 goto err_out;
3267
2e9d722d 3268 if (!adapter->nic_ops->config_bridged_mode(adapter, !!new))
af19b491
AKS
3269 ret = len;
3270
3271err_out:
3272 return ret;
3273}
3274
3275static ssize_t
3276qlcnic_show_bridged_mode(struct device *dev,
3277 struct device_attribute *attr, char *buf)
3278{
3279 struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
3280 int bridged_mode = 0;
3281
3282 if (adapter->capabilities & QLCNIC_FW_CAPABILITY_BDG)
3283 bridged_mode = !!(adapter->flags & QLCNIC_BRIDGE_ENABLED);
3284
3285 return sprintf(buf, "%d\n", bridged_mode);
3286}
3287
3288static struct device_attribute dev_attr_bridged_mode = {
3289 .attr = {.name = "bridged_mode", .mode = (S_IRUGO | S_IWUSR)},
3290 .show = qlcnic_show_bridged_mode,
3291 .store = qlcnic_store_bridged_mode,
3292};
3293
3294static ssize_t
3295qlcnic_store_diag_mode(struct device *dev,
3296 struct device_attribute *attr, const char *buf, size_t len)
3297{
3298 struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
3299 unsigned long new;
3300
3301 if (strict_strtoul(buf, 2, &new))
3302 return -EINVAL;
3303
3304 if (!!new != !!(adapter->flags & QLCNIC_DIAG_ENABLED))
3305 adapter->flags ^= QLCNIC_DIAG_ENABLED;
3306
3307 return len;
3308}
3309
3310static ssize_t
3311qlcnic_show_diag_mode(struct device *dev,
3312 struct device_attribute *attr, char *buf)
3313{
3314 struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
3315
3316 return sprintf(buf, "%d\n",
3317 !!(adapter->flags & QLCNIC_DIAG_ENABLED));
3318}
3319
3320static struct device_attribute dev_attr_diag_mode = {
3321 .attr = {.name = "diag_mode", .mode = (S_IRUGO | S_IWUSR)},
3322 .show = qlcnic_show_diag_mode,
3323 .store = qlcnic_store_diag_mode,
3324};
3325
3326static int
3327qlcnic_sysfs_validate_crb(struct qlcnic_adapter *adapter,
3328 loff_t offset, size_t size)
3329{
897e8c7c
DP
3330 size_t crb_size = 4;
3331
af19b491
AKS
3332 if (!(adapter->flags & QLCNIC_DIAG_ENABLED))
3333 return -EIO;
3334
897e8c7c
DP
3335 if (offset < QLCNIC_PCI_CRBSPACE) {
3336 if (ADDR_IN_RANGE(offset, QLCNIC_PCI_CAMQM,
3337 QLCNIC_PCI_CAMQM_END))
3338 crb_size = 8;
3339 else
3340 return -EINVAL;
3341 }
af19b491 3342
897e8c7c
DP
3343 if ((size != crb_size) || (offset & (crb_size-1)))
3344 return -EINVAL;
af19b491
AKS
3345
3346 return 0;
3347}
3348
3349static ssize_t
2c3c8bea
CW
3350qlcnic_sysfs_read_crb(struct file *filp, struct kobject *kobj,
3351 struct bin_attribute *attr,
af19b491
AKS
3352 char *buf, loff_t offset, size_t size)
3353{
3354 struct device *dev = container_of(kobj, struct device, kobj);
3355 struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
3356 u32 data;
897e8c7c 3357 u64 qmdata;
af19b491
AKS
3358 int ret;
3359
3360 ret = qlcnic_sysfs_validate_crb(adapter, offset, size);
3361 if (ret != 0)
3362 return ret;
3363
897e8c7c
DP
3364 if (ADDR_IN_RANGE(offset, QLCNIC_PCI_CAMQM, QLCNIC_PCI_CAMQM_END)) {
3365 qlcnic_pci_camqm_read_2M(adapter, offset, &qmdata);
3366 memcpy(buf, &qmdata, size);
3367 } else {
3368 data = QLCRD32(adapter, offset);
3369 memcpy(buf, &data, size);
3370 }
af19b491
AKS
3371 return size;
3372}
3373
3374static ssize_t
2c3c8bea
CW
3375qlcnic_sysfs_write_crb(struct file *filp, struct kobject *kobj,
3376 struct bin_attribute *attr,
af19b491
AKS
3377 char *buf, loff_t offset, size_t size)
3378{
3379 struct device *dev = container_of(kobj, struct device, kobj);
3380 struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
3381 u32 data;
897e8c7c 3382 u64 qmdata;
af19b491
AKS
3383 int ret;
3384
3385 ret = qlcnic_sysfs_validate_crb(adapter, offset, size);
3386 if (ret != 0)
3387 return ret;
3388
897e8c7c
DP
3389 if (ADDR_IN_RANGE(offset, QLCNIC_PCI_CAMQM, QLCNIC_PCI_CAMQM_END)) {
3390 memcpy(&qmdata, buf, size);
3391 qlcnic_pci_camqm_write_2M(adapter, offset, qmdata);
3392 } else {
3393 memcpy(&data, buf, size);
3394 QLCWR32(adapter, offset, data);
3395 }
af19b491
AKS
3396 return size;
3397}
3398
3399static int
3400qlcnic_sysfs_validate_mem(struct qlcnic_adapter *adapter,
3401 loff_t offset, size_t size)
3402{
3403 if (!(adapter->flags & QLCNIC_DIAG_ENABLED))
3404 return -EIO;
3405
3406 if ((size != 8) || (offset & 0x7))
3407 return -EIO;
3408
3409 return 0;
3410}
3411
3412static ssize_t
2c3c8bea
CW
3413qlcnic_sysfs_read_mem(struct file *filp, struct kobject *kobj,
3414 struct bin_attribute *attr,
af19b491
AKS
3415 char *buf, loff_t offset, size_t size)
3416{
3417 struct device *dev = container_of(kobj, struct device, kobj);
3418 struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
3419 u64 data;
3420 int ret;
3421
3422 ret = qlcnic_sysfs_validate_mem(adapter, offset, size);
3423 if (ret != 0)
3424 return ret;
3425
3426 if (qlcnic_pci_mem_read_2M(adapter, offset, &data))
3427 return -EIO;
3428
3429 memcpy(buf, &data, size);
3430
3431 return size;
3432}
3433
3434static ssize_t
2c3c8bea
CW
3435qlcnic_sysfs_write_mem(struct file *filp, struct kobject *kobj,
3436 struct bin_attribute *attr,
af19b491
AKS
3437 char *buf, loff_t offset, size_t size)
3438{
3439 struct device *dev = container_of(kobj, struct device, kobj);
3440 struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
3441 u64 data;
3442 int ret;
3443
3444 ret = qlcnic_sysfs_validate_mem(adapter, offset, size);
3445 if (ret != 0)
3446 return ret;
3447
3448 memcpy(&data, buf, size);
3449
3450 if (qlcnic_pci_mem_write_2M(adapter, offset, data))
3451 return -EIO;
3452
3453 return size;
3454}
3455
3456
3457static struct bin_attribute bin_attr_crb = {
3458 .attr = {.name = "crb", .mode = (S_IRUGO | S_IWUSR)},
3459 .size = 0,
3460 .read = qlcnic_sysfs_read_crb,
3461 .write = qlcnic_sysfs_write_crb,
3462};
3463
3464static struct bin_attribute bin_attr_mem = {
3465 .attr = {.name = "mem", .mode = (S_IRUGO | S_IWUSR)},
3466 .size = 0,
3467 .read = qlcnic_sysfs_read_mem,
3468 .write = qlcnic_sysfs_write_mem,
3469};
3470
cea8975e 3471static int
346fe763
RB
3472validate_pm_config(struct qlcnic_adapter *adapter,
3473 struct qlcnic_pm_func_cfg *pm_cfg, int count)
3474{
3475
3476 u8 src_pci_func, s_esw_id, d_esw_id;
3477 u8 dest_pci_func;
3478 int i;
3479
3480 for (i = 0; i < count; i++) {
3481 src_pci_func = pm_cfg[i].pci_func;
3482 dest_pci_func = pm_cfg[i].dest_npar;
3483 if (src_pci_func >= QLCNIC_MAX_PCI_FUNC
3484 || dest_pci_func >= QLCNIC_MAX_PCI_FUNC)
3485 return QL_STATUS_INVALID_PARAM;
3486
3487 if (adapter->npars[src_pci_func].type != QLCNIC_TYPE_NIC)
3488 return QL_STATUS_INVALID_PARAM;
3489
3490 if (adapter->npars[dest_pci_func].type != QLCNIC_TYPE_NIC)
3491 return QL_STATUS_INVALID_PARAM;
3492
346fe763
RB
3493 s_esw_id = adapter->npars[src_pci_func].phy_port;
3494 d_esw_id = adapter->npars[dest_pci_func].phy_port;
3495
3496 if (s_esw_id != d_esw_id)
3497 return QL_STATUS_INVALID_PARAM;
3498
3499 }
3500 return 0;
3501
3502}
3503
3504static ssize_t
3505qlcnic_sysfs_write_pm_config(struct file *filp, struct kobject *kobj,
3506 struct bin_attribute *attr, char *buf, loff_t offset, size_t size)
3507{
3508 struct device *dev = container_of(kobj, struct device, kobj);
3509 struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
3510 struct qlcnic_pm_func_cfg *pm_cfg;
3511 u32 id, action, pci_func;
3512 int count, rem, i, ret;
3513
3514 count = size / sizeof(struct qlcnic_pm_func_cfg);
3515 rem = size % sizeof(struct qlcnic_pm_func_cfg);
3516 if (rem)
3517 return QL_STATUS_INVALID_PARAM;
3518
3519 pm_cfg = (struct qlcnic_pm_func_cfg *) buf;
3520
3521 ret = validate_pm_config(adapter, pm_cfg, count);
3522 if (ret)
3523 return ret;
3524 for (i = 0; i < count; i++) {
3525 pci_func = pm_cfg[i].pci_func;
4e8acb01 3526 action = !!pm_cfg[i].action;
346fe763
RB
3527 id = adapter->npars[pci_func].phy_port;
3528 ret = qlcnic_config_port_mirroring(adapter, id,
3529 action, pci_func);
3530 if (ret)
3531 return ret;
3532 }
3533
3534 for (i = 0; i < count; i++) {
3535 pci_func = pm_cfg[i].pci_func;
3536 id = adapter->npars[pci_func].phy_port;
4e8acb01 3537 adapter->npars[pci_func].enable_pm = !!pm_cfg[i].action;
346fe763
RB
3538 adapter->npars[pci_func].dest_npar = id;
3539 }
3540 return size;
3541}
3542
3543static ssize_t
3544qlcnic_sysfs_read_pm_config(struct file *filp, struct kobject *kobj,
3545 struct bin_attribute *attr, char *buf, loff_t offset, size_t size)
3546{
3547 struct device *dev = container_of(kobj, struct device, kobj);
3548 struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
3549 struct qlcnic_pm_func_cfg pm_cfg[QLCNIC_MAX_PCI_FUNC];
3550 int i;
3551
3552 if (size != sizeof(pm_cfg))
3553 return QL_STATUS_INVALID_PARAM;
3554
3555 for (i = 0; i < QLCNIC_MAX_PCI_FUNC; i++) {
3556 if (adapter->npars[i].type != QLCNIC_TYPE_NIC)
3557 continue;
3558 pm_cfg[i].action = adapter->npars[i].enable_pm;
3559 pm_cfg[i].dest_npar = 0;
3560 pm_cfg[i].pci_func = i;
3561 }
3562 memcpy(buf, &pm_cfg, size);
3563
3564 return size;
3565}
3566
cea8975e 3567static int
346fe763 3568validate_esw_config(struct qlcnic_adapter *adapter,
4e8acb01 3569 struct qlcnic_esw_func_cfg *esw_cfg, int count)
346fe763 3570{
7613c87b 3571 u32 op_mode;
346fe763
RB
3572 u8 pci_func;
3573 int i;
7613c87b
RB
3574
3575 op_mode = readl(adapter->ahw.pci_base0 + QLCNIC_DRV_OP_MODE);
3576
346fe763
RB
3577 for (i = 0; i < count; i++) {
3578 pci_func = esw_cfg[i].pci_func;
3579 if (pci_func >= QLCNIC_MAX_PCI_FUNC)
3580 return QL_STATUS_INVALID_PARAM;
3581
4e8acb01
RB
3582 if (adapter->op_mode == QLCNIC_MGMT_FUNC)
3583 if (adapter->npars[pci_func].type != QLCNIC_TYPE_NIC)
3584 return QL_STATUS_INVALID_PARAM;
346fe763 3585
4e8acb01
RB
3586 switch (esw_cfg[i].op_mode) {
3587 case QLCNIC_PORT_DEFAULTS:
7613c87b 3588 if (QLC_DEV_GET_DRV(op_mode, pci_func) !=
7373373d 3589 QLCNIC_NON_PRIV_FUNC) {
7613c87b 3590 esw_cfg[i].mac_anti_spoof = 0;
7373373d 3591 esw_cfg[i].mac_override = 1;
ee07c1a7 3592 esw_cfg[i].promisc_mode = 1;
7373373d 3593 }
4e8acb01
RB
3594 break;
3595 case QLCNIC_ADD_VLAN:
346fe763
RB
3596 if (!IS_VALID_VLAN(esw_cfg[i].vlan_id))
3597 return QL_STATUS_INVALID_PARAM;
4e8acb01
RB
3598 if (!esw_cfg[i].op_type)
3599 return QL_STATUS_INVALID_PARAM;
3600 break;
3601 case QLCNIC_DEL_VLAN:
4e8acb01
RB
3602 if (!esw_cfg[i].op_type)
3603 return QL_STATUS_INVALID_PARAM;
3604 break;
3605 default:
346fe763 3606 return QL_STATUS_INVALID_PARAM;
4e8acb01 3607 }
346fe763 3608 }
346fe763
RB
3609 return 0;
3610}
3611
3612static ssize_t
3613qlcnic_sysfs_write_esw_config(struct file *file, struct kobject *kobj,
3614 struct bin_attribute *attr, char *buf, loff_t offset, size_t size)
3615{
3616 struct device *dev = container_of(kobj, struct device, kobj);
3617 struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
3618 struct qlcnic_esw_func_cfg *esw_cfg;
4e8acb01 3619 struct qlcnic_npar_info *npar;
346fe763 3620 int count, rem, i, ret;
0325d69b 3621 u8 pci_func, op_mode = 0;
346fe763
RB
3622
3623 count = size / sizeof(struct qlcnic_esw_func_cfg);
3624 rem = size % sizeof(struct qlcnic_esw_func_cfg);
3625 if (rem)
3626 return QL_STATUS_INVALID_PARAM;
3627
3628 esw_cfg = (struct qlcnic_esw_func_cfg *) buf;
3629 ret = validate_esw_config(adapter, esw_cfg, count);
3630 if (ret)
3631 return ret;
3632
3633 for (i = 0; i < count; i++) {
0325d69b
RB
3634 if (adapter->op_mode == QLCNIC_MGMT_FUNC)
3635 if (qlcnic_config_switch_port(adapter, &esw_cfg[i]))
3636 return QL_STATUS_INVALID_PARAM;
e9a47700
RB
3637
3638 if (adapter->ahw.pci_func != esw_cfg[i].pci_func)
3639 continue;
3640
3641 op_mode = esw_cfg[i].op_mode;
3642 qlcnic_get_eswitch_port_config(adapter, &esw_cfg[i]);
3643 esw_cfg[i].op_mode = op_mode;
3644 esw_cfg[i].pci_func = adapter->ahw.pci_func;
3645
3646 switch (esw_cfg[i].op_mode) {
3647 case QLCNIC_PORT_DEFAULTS:
3648 qlcnic_set_eswitch_port_features(adapter, &esw_cfg[i]);
3649 break;
8cf61f89
AKS
3650 case QLCNIC_ADD_VLAN:
3651 qlcnic_set_vlan_config(adapter, &esw_cfg[i]);
3652 break;
3653 case QLCNIC_DEL_VLAN:
3654 esw_cfg[i].vlan_id = 0;
3655 qlcnic_set_vlan_config(adapter, &esw_cfg[i]);
3656 break;
0325d69b 3657 }
346fe763
RB
3658 }
3659
0325d69b
RB
3660 if (adapter->op_mode != QLCNIC_MGMT_FUNC)
3661 goto out;
e9a47700 3662
346fe763
RB
3663 for (i = 0; i < count; i++) {
3664 pci_func = esw_cfg[i].pci_func;
4e8acb01
RB
3665 npar = &adapter->npars[pci_func];
3666 switch (esw_cfg[i].op_mode) {
3667 case QLCNIC_PORT_DEFAULTS:
3668 npar->promisc_mode = esw_cfg[i].promisc_mode;
7373373d 3669 npar->mac_override = esw_cfg[i].mac_override;
4e8acb01
RB
3670 npar->offload_flags = esw_cfg[i].offload_flags;
3671 npar->mac_anti_spoof = esw_cfg[i].mac_anti_spoof;
3672 npar->discard_tagged = esw_cfg[i].discard_tagged;
3673 break;
3674 case QLCNIC_ADD_VLAN:
3675 npar->pvid = esw_cfg[i].vlan_id;
3676 break;
3677 case QLCNIC_DEL_VLAN:
3678 npar->pvid = 0;
3679 break;
3680 }
346fe763 3681 }
0325d69b 3682out:
346fe763
RB
3683 return size;
3684}
3685
3686static ssize_t
3687qlcnic_sysfs_read_esw_config(struct file *file, struct kobject *kobj,
3688 struct bin_attribute *attr, char *buf, loff_t offset, size_t size)
3689{
3690 struct device *dev = container_of(kobj, struct device, kobj);
3691 struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
3692 struct qlcnic_esw_func_cfg esw_cfg[QLCNIC_MAX_PCI_FUNC];
4e8acb01 3693 u8 i;
346fe763
RB
3694
3695 if (size != sizeof(esw_cfg))
3696 return QL_STATUS_INVALID_PARAM;
3697
3698 for (i = 0; i < QLCNIC_MAX_PCI_FUNC; i++) {
3699 if (adapter->npars[i].type != QLCNIC_TYPE_NIC)
3700 continue;
4e8acb01
RB
3701 esw_cfg[i].pci_func = i;
3702 if (qlcnic_get_eswitch_port_config(adapter, &esw_cfg[i]))
3703 return QL_STATUS_INVALID_PARAM;
346fe763
RB
3704 }
3705 memcpy(buf, &esw_cfg, size);
3706
3707 return size;
3708}
3709
cea8975e 3710static int
346fe763
RB
3711validate_npar_config(struct qlcnic_adapter *adapter,
3712 struct qlcnic_npar_func_cfg *np_cfg, int count)
3713{
3714 u8 pci_func, i;
3715
3716 for (i = 0; i < count; i++) {
3717 pci_func = np_cfg[i].pci_func;
3718 if (pci_func >= QLCNIC_MAX_PCI_FUNC)
3719 return QL_STATUS_INVALID_PARAM;
3720
3721 if (adapter->npars[pci_func].type != QLCNIC_TYPE_NIC)
3722 return QL_STATUS_INVALID_PARAM;
3723
3724 if (!IS_VALID_BW(np_cfg[i].min_bw)
3725 || !IS_VALID_BW(np_cfg[i].max_bw)
3726 || !IS_VALID_RX_QUEUES(np_cfg[i].max_rx_queues)
3727 || !IS_VALID_TX_QUEUES(np_cfg[i].max_tx_queues))
3728 return QL_STATUS_INVALID_PARAM;
3729 }
3730 return 0;
3731}
3732
3733static ssize_t
3734qlcnic_sysfs_write_npar_config(struct file *file, struct kobject *kobj,
3735 struct bin_attribute *attr, char *buf, loff_t offset, size_t size)
3736{
3737 struct device *dev = container_of(kobj, struct device, kobj);
3738 struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
3739 struct qlcnic_info nic_info;
3740 struct qlcnic_npar_func_cfg *np_cfg;
3741 int i, count, rem, ret;
3742 u8 pci_func;
3743
3744 count = size / sizeof(struct qlcnic_npar_func_cfg);
3745 rem = size % sizeof(struct qlcnic_npar_func_cfg);
3746 if (rem)
3747 return QL_STATUS_INVALID_PARAM;
3748
3749 np_cfg = (struct qlcnic_npar_func_cfg *) buf;
3750 ret = validate_npar_config(adapter, np_cfg, count);
3751 if (ret)
3752 return ret;
3753
3754 for (i = 0; i < count ; i++) {
3755 pci_func = np_cfg[i].pci_func;
3756 ret = qlcnic_get_nic_info(adapter, &nic_info, pci_func);
3757 if (ret)
3758 return ret;
3759 nic_info.pci_func = pci_func;
3760 nic_info.min_tx_bw = np_cfg[i].min_bw;
3761 nic_info.max_tx_bw = np_cfg[i].max_bw;
3762 ret = qlcnic_set_nic_info(adapter, &nic_info);
3763 if (ret)
3764 return ret;
cea8975e
AC
3765 adapter->npars[i].min_bw = nic_info.min_tx_bw;
3766 adapter->npars[i].max_bw = nic_info.max_tx_bw;
346fe763
RB
3767 }
3768
3769 return size;
3770
3771}
3772static ssize_t
3773qlcnic_sysfs_read_npar_config(struct file *file, struct kobject *kobj,
3774 struct bin_attribute *attr, char *buf, loff_t offset, size_t size)
3775{
3776 struct device *dev = container_of(kobj, struct device, kobj);
3777 struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
3778 struct qlcnic_info nic_info;
3779 struct qlcnic_npar_func_cfg np_cfg[QLCNIC_MAX_PCI_FUNC];
3780 int i, ret;
3781
3782 if (size != sizeof(np_cfg))
3783 return QL_STATUS_INVALID_PARAM;
3784
3785 for (i = 0; i < QLCNIC_MAX_PCI_FUNC ; i++) {
3786 if (adapter->npars[i].type != QLCNIC_TYPE_NIC)
3787 continue;
3788 ret = qlcnic_get_nic_info(adapter, &nic_info, i);
3789 if (ret)
3790 return ret;
3791
3792 np_cfg[i].pci_func = i;
a1c0c459 3793 np_cfg[i].op_mode = (u8)nic_info.op_mode;
346fe763
RB
3794 np_cfg[i].port_num = nic_info.phys_port;
3795 np_cfg[i].fw_capab = nic_info.capabilities;
3796 np_cfg[i].min_bw = nic_info.min_tx_bw ;
3797 np_cfg[i].max_bw = nic_info.max_tx_bw;
3798 np_cfg[i].max_tx_queues = nic_info.max_tx_ques;
3799 np_cfg[i].max_rx_queues = nic_info.max_rx_ques;
3800 }
3801 memcpy(buf, &np_cfg, size);
3802 return size;
3803}
3804
b6021212
AKS
3805static ssize_t
3806qlcnic_sysfs_get_port_stats(struct file *file, struct kobject *kobj,
3807 struct bin_attribute *attr, char *buf, loff_t offset, size_t size)
3808{
3809 struct device *dev = container_of(kobj, struct device, kobj);
3810 struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
3811 struct qlcnic_esw_statistics port_stats;
3812 int ret;
3813
3814 if (size != sizeof(struct qlcnic_esw_statistics))
3815 return QL_STATUS_INVALID_PARAM;
3816
3817 if (offset >= QLCNIC_MAX_PCI_FUNC)
3818 return QL_STATUS_INVALID_PARAM;
3819
3820 memset(&port_stats, 0, size);
3821 ret = qlcnic_get_port_stats(adapter, offset, QLCNIC_QUERY_RX_COUNTER,
3822 &port_stats.rx);
3823 if (ret)
3824 return ret;
3825
3826 ret = qlcnic_get_port_stats(adapter, offset, QLCNIC_QUERY_TX_COUNTER,
3827 &port_stats.tx);
3828 if (ret)
3829 return ret;
3830
3831 memcpy(buf, &port_stats, size);
3832 return size;
3833}
3834
3835static ssize_t
3836qlcnic_sysfs_get_esw_stats(struct file *file, struct kobject *kobj,
3837 struct bin_attribute *attr, char *buf, loff_t offset, size_t size)
3838{
3839 struct device *dev = container_of(kobj, struct device, kobj);
3840 struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
3841 struct qlcnic_esw_statistics esw_stats;
3842 int ret;
3843
3844 if (size != sizeof(struct qlcnic_esw_statistics))
3845 return QL_STATUS_INVALID_PARAM;
3846
3847 if (offset >= QLCNIC_NIU_MAX_XG_PORTS)
3848 return QL_STATUS_INVALID_PARAM;
3849
3850 memset(&esw_stats, 0, size);
3851 ret = qlcnic_get_eswitch_stats(adapter, offset, QLCNIC_QUERY_RX_COUNTER,
3852 &esw_stats.rx);
3853 if (ret)
3854 return ret;
3855
3856 ret = qlcnic_get_eswitch_stats(adapter, offset, QLCNIC_QUERY_TX_COUNTER,
3857 &esw_stats.tx);
3858 if (ret)
3859 return ret;
3860
3861 memcpy(buf, &esw_stats, size);
3862 return size;
3863}
3864
3865static ssize_t
3866qlcnic_sysfs_clear_esw_stats(struct file *file, struct kobject *kobj,
3867 struct bin_attribute *attr, char *buf, loff_t offset, size_t size)
3868{
3869 struct device *dev = container_of(kobj, struct device, kobj);
3870 struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
3871 int ret;
3872
3873 if (offset >= QLCNIC_NIU_MAX_XG_PORTS)
3874 return QL_STATUS_INVALID_PARAM;
3875
3876 ret = qlcnic_clear_esw_stats(adapter, QLCNIC_STATS_ESWITCH, offset,
3877 QLCNIC_QUERY_RX_COUNTER);
3878 if (ret)
3879 return ret;
3880
3881 ret = qlcnic_clear_esw_stats(adapter, QLCNIC_STATS_ESWITCH, offset,
3882 QLCNIC_QUERY_TX_COUNTER);
3883 if (ret)
3884 return ret;
3885
3886 return size;
3887}
3888
3889static ssize_t
3890qlcnic_sysfs_clear_port_stats(struct file *file, struct kobject *kobj,
3891 struct bin_attribute *attr, char *buf, loff_t offset, size_t size)
3892{
3893
3894 struct device *dev = container_of(kobj, struct device, kobj);
3895 struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
3896 int ret;
3897
3898 if (offset >= QLCNIC_MAX_PCI_FUNC)
3899 return QL_STATUS_INVALID_PARAM;
3900
3901 ret = qlcnic_clear_esw_stats(adapter, QLCNIC_STATS_PORT, offset,
3902 QLCNIC_QUERY_RX_COUNTER);
3903 if (ret)
3904 return ret;
3905
3906 ret = qlcnic_clear_esw_stats(adapter, QLCNIC_STATS_PORT, offset,
3907 QLCNIC_QUERY_TX_COUNTER);
3908 if (ret)
3909 return ret;
3910
3911 return size;
3912}
3913
346fe763
RB
3914static ssize_t
3915qlcnic_sysfs_read_pci_config(struct file *file, struct kobject *kobj,
3916 struct bin_attribute *attr, char *buf, loff_t offset, size_t size)
3917{
3918 struct device *dev = container_of(kobj, struct device, kobj);
3919 struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
3920 struct qlcnic_pci_func_cfg pci_cfg[QLCNIC_MAX_PCI_FUNC];
e88db3bd 3921 struct qlcnic_pci_info *pci_info;
346fe763
RB
3922 int i, ret;
3923
3924 if (size != sizeof(pci_cfg))
3925 return QL_STATUS_INVALID_PARAM;
3926
e88db3bd
DC
3927 pci_info = kcalloc(QLCNIC_MAX_PCI_FUNC, sizeof(*pci_info), GFP_KERNEL);
3928 if (!pci_info)
3929 return -ENOMEM;
3930
346fe763 3931 ret = qlcnic_get_pci_info(adapter, pci_info);
e88db3bd
DC
3932 if (ret) {
3933 kfree(pci_info);
346fe763 3934 return ret;
e88db3bd 3935 }
346fe763
RB
3936
3937 for (i = 0; i < QLCNIC_MAX_PCI_FUNC ; i++) {
3938 pci_cfg[i].pci_func = pci_info[i].id;
3939 pci_cfg[i].func_type = pci_info[i].type;
3940 pci_cfg[i].port_num = pci_info[i].default_port;
3941 pci_cfg[i].min_bw = pci_info[i].tx_min_bw;
3942 pci_cfg[i].max_bw = pci_info[i].tx_max_bw;
3943 memcpy(&pci_cfg[i].def_mac_addr, &pci_info[i].mac, ETH_ALEN);
3944 }
3945 memcpy(buf, &pci_cfg, size);
e88db3bd 3946 kfree(pci_info);
346fe763 3947 return size;
346fe763
RB
3948}
3949static struct bin_attribute bin_attr_npar_config = {
3950 .attr = {.name = "npar_config", .mode = (S_IRUGO | S_IWUSR)},
3951 .size = 0,
3952 .read = qlcnic_sysfs_read_npar_config,
3953 .write = qlcnic_sysfs_write_npar_config,
3954};
3955
3956static struct bin_attribute bin_attr_pci_config = {
3957 .attr = {.name = "pci_config", .mode = (S_IRUGO | S_IWUSR)},
3958 .size = 0,
3959 .read = qlcnic_sysfs_read_pci_config,
3960 .write = NULL,
3961};
3962
b6021212
AKS
3963static struct bin_attribute bin_attr_port_stats = {
3964 .attr = {.name = "port_stats", .mode = (S_IRUGO | S_IWUSR)},
3965 .size = 0,
3966 .read = qlcnic_sysfs_get_port_stats,
3967 .write = qlcnic_sysfs_clear_port_stats,
3968};
3969
3970static struct bin_attribute bin_attr_esw_stats = {
3971 .attr = {.name = "esw_stats", .mode = (S_IRUGO | S_IWUSR)},
3972 .size = 0,
3973 .read = qlcnic_sysfs_get_esw_stats,
3974 .write = qlcnic_sysfs_clear_esw_stats,
3975};
3976
346fe763
RB
3977static struct bin_attribute bin_attr_esw_config = {
3978 .attr = {.name = "esw_config", .mode = (S_IRUGO | S_IWUSR)},
3979 .size = 0,
3980 .read = qlcnic_sysfs_read_esw_config,
3981 .write = qlcnic_sysfs_write_esw_config,
3982};
3983
3984static struct bin_attribute bin_attr_pm_config = {
3985 .attr = {.name = "pm_config", .mode = (S_IRUGO | S_IWUSR)},
3986 .size = 0,
3987 .read = qlcnic_sysfs_read_pm_config,
3988 .write = qlcnic_sysfs_write_pm_config,
3989};
3990
af19b491
AKS
3991static void
3992qlcnic_create_sysfs_entries(struct qlcnic_adapter *adapter)
3993{
3994 struct device *dev = &adapter->pdev->dev;
3995
3996 if (adapter->capabilities & QLCNIC_FW_CAPABILITY_BDG)
3997 if (device_create_file(dev, &dev_attr_bridged_mode))
3998 dev_warn(dev,
3999 "failed to create bridged_mode sysfs entry\n");
4000}
4001
4002static void
4003qlcnic_remove_sysfs_entries(struct qlcnic_adapter *adapter)
4004{
4005 struct device *dev = &adapter->pdev->dev;
4006
4007 if (adapter->capabilities & QLCNIC_FW_CAPABILITY_BDG)
4008 device_remove_file(dev, &dev_attr_bridged_mode);
4009}
4010
4011static void
4012qlcnic_create_diag_entries(struct qlcnic_adapter *adapter)
4013{
4014 struct device *dev = &adapter->pdev->dev;
4015
b6021212
AKS
4016 if (device_create_bin_file(dev, &bin_attr_port_stats))
4017 dev_info(dev, "failed to create port stats sysfs entry");
4018
132ff00a
AC
4019 if (adapter->op_mode == QLCNIC_NON_PRIV_FUNC)
4020 return;
af19b491
AKS
4021 if (device_create_file(dev, &dev_attr_diag_mode))
4022 dev_info(dev, "failed to create diag_mode sysfs entry\n");
4023 if (device_create_bin_file(dev, &bin_attr_crb))
4024 dev_info(dev, "failed to create crb sysfs entry\n");
4025 if (device_create_bin_file(dev, &bin_attr_mem))
4026 dev_info(dev, "failed to create mem sysfs entry\n");
4e8acb01
RB
4027 if (!(adapter->flags & QLCNIC_ESWITCH_ENABLED))
4028 return;
4029 if (device_create_bin_file(dev, &bin_attr_esw_config))
4030 dev_info(dev, "failed to create esw config sysfs entry");
4031 if (adapter->op_mode != QLCNIC_MGMT_FUNC)
346fe763
RB
4032 return;
4033 if (device_create_bin_file(dev, &bin_attr_pci_config))
4034 dev_info(dev, "failed to create pci config sysfs entry");
4035 if (device_create_bin_file(dev, &bin_attr_npar_config))
4036 dev_info(dev, "failed to create npar config sysfs entry");
346fe763
RB
4037 if (device_create_bin_file(dev, &bin_attr_pm_config))
4038 dev_info(dev, "failed to create pm config sysfs entry");
b6021212
AKS
4039 if (device_create_bin_file(dev, &bin_attr_esw_stats))
4040 dev_info(dev, "failed to create eswitch stats sysfs entry");
af19b491
AKS
4041}
4042
af19b491
AKS
4043static void
4044qlcnic_remove_diag_entries(struct qlcnic_adapter *adapter)
4045{
4046 struct device *dev = &adapter->pdev->dev;
4047
b6021212
AKS
4048 device_remove_bin_file(dev, &bin_attr_port_stats);
4049
132ff00a
AC
4050 if (adapter->op_mode == QLCNIC_NON_PRIV_FUNC)
4051 return;
af19b491
AKS
4052 device_remove_file(dev, &dev_attr_diag_mode);
4053 device_remove_bin_file(dev, &bin_attr_crb);
4054 device_remove_bin_file(dev, &bin_attr_mem);
4e8acb01
RB
4055 if (!(adapter->flags & QLCNIC_ESWITCH_ENABLED))
4056 return;
4057 device_remove_bin_file(dev, &bin_attr_esw_config);
4058 if (adapter->op_mode != QLCNIC_MGMT_FUNC)
346fe763
RB
4059 return;
4060 device_remove_bin_file(dev, &bin_attr_pci_config);
4061 device_remove_bin_file(dev, &bin_attr_npar_config);
346fe763 4062 device_remove_bin_file(dev, &bin_attr_pm_config);
b6021212 4063 device_remove_bin_file(dev, &bin_attr_esw_stats);
af19b491
AKS
4064}
4065
4066#ifdef CONFIG_INET
4067
4068#define is_qlcnic_netdev(dev) (dev->netdev_ops == &qlcnic_netdev_ops)
4069
af19b491 4070static void
aec1e845
AKS
4071qlcnic_config_indev_addr(struct qlcnic_adapter *adapter,
4072 struct net_device *dev, unsigned long event)
af19b491
AKS
4073{
4074 struct in_device *indev;
af19b491 4075
af19b491
AKS
4076 indev = in_dev_get(dev);
4077 if (!indev)
4078 return;
4079
4080 for_ifa(indev) {
4081 switch (event) {
4082 case NETDEV_UP:
4083 qlcnic_config_ipaddr(adapter,
4084 ifa->ifa_address, QLCNIC_IP_UP);
4085 break;
4086 case NETDEV_DOWN:
4087 qlcnic_config_ipaddr(adapter,
4088 ifa->ifa_address, QLCNIC_IP_DOWN);
4089 break;
4090 default:
4091 break;
4092 }
4093 } endfor_ifa(indev);
4094
4095 in_dev_put(indev);
af19b491
AKS
4096}
4097
aec1e845
AKS
4098static void
4099qlcnic_restore_indev_addr(struct net_device *netdev, unsigned long event)
4100{
4101 struct qlcnic_adapter *adapter = netdev_priv(netdev);
4102 struct net_device *dev;
4103 u16 vid;
4104
4105 qlcnic_config_indev_addr(adapter, netdev, event);
4106
4107 if (!adapter->vlgrp)
4108 return;
4109
b738127d 4110 for (vid = 0; vid < VLAN_N_VID; vid++) {
aec1e845
AKS
4111 dev = vlan_group_get_device(adapter->vlgrp, vid);
4112 if (!dev)
4113 continue;
4114
4115 qlcnic_config_indev_addr(adapter, dev, event);
4116 }
4117}
4118
af19b491
AKS
4119static int qlcnic_netdev_event(struct notifier_block *this,
4120 unsigned long event, void *ptr)
4121{
4122 struct qlcnic_adapter *adapter;
4123 struct net_device *dev = (struct net_device *)ptr;
4124
4125recheck:
4126 if (dev == NULL)
4127 goto done;
4128
4129 if (dev->priv_flags & IFF_802_1Q_VLAN) {
4130 dev = vlan_dev_real_dev(dev);
4131 goto recheck;
4132 }
4133
4134 if (!is_qlcnic_netdev(dev))
4135 goto done;
4136
4137 adapter = netdev_priv(dev);
4138
4139 if (!adapter)
4140 goto done;
4141
8a15ad1f 4142 if (!test_bit(__QLCNIC_DEV_UP, &adapter->state))
af19b491
AKS
4143 goto done;
4144
aec1e845 4145 qlcnic_config_indev_addr(adapter, dev, event);
af19b491
AKS
4146done:
4147 return NOTIFY_DONE;
4148}
4149
4150static int
4151qlcnic_inetaddr_event(struct notifier_block *this,
4152 unsigned long event, void *ptr)
4153{
4154 struct qlcnic_adapter *adapter;
4155 struct net_device *dev;
4156
4157 struct in_ifaddr *ifa = (struct in_ifaddr *)ptr;
4158
4159 dev = ifa->ifa_dev ? ifa->ifa_dev->dev : NULL;
4160
4161recheck:
aec1e845 4162 if (dev == NULL)
af19b491
AKS
4163 goto done;
4164
4165 if (dev->priv_flags & IFF_802_1Q_VLAN) {
4166 dev = vlan_dev_real_dev(dev);
4167 goto recheck;
4168 }
4169
4170 if (!is_qlcnic_netdev(dev))
4171 goto done;
4172
4173 adapter = netdev_priv(dev);
4174
251a84c9 4175 if (!adapter)
af19b491
AKS
4176 goto done;
4177
8a15ad1f 4178 if (!test_bit(__QLCNIC_DEV_UP, &adapter->state))
af19b491
AKS
4179 goto done;
4180
4181 switch (event) {
4182 case NETDEV_UP:
4183 qlcnic_config_ipaddr(adapter, ifa->ifa_address, QLCNIC_IP_UP);
4184 break;
4185 case NETDEV_DOWN:
4186 qlcnic_config_ipaddr(adapter, ifa->ifa_address, QLCNIC_IP_DOWN);
4187 break;
4188 default:
4189 break;
4190 }
4191
4192done:
4193 return NOTIFY_DONE;
4194}
4195
4196static struct notifier_block qlcnic_netdev_cb = {
4197 .notifier_call = qlcnic_netdev_event,
4198};
4199
4200static struct notifier_block qlcnic_inetaddr_cb = {
4201 .notifier_call = qlcnic_inetaddr_event,
4202};
4203#else
4204static void
aec1e845 4205qlcnic_restore_indev_addr(struct net_device *dev, unsigned long event)
af19b491
AKS
4206{ }
4207#endif
451724c8
SC
4208static struct pci_error_handlers qlcnic_err_handler = {
4209 .error_detected = qlcnic_io_error_detected,
4210 .slot_reset = qlcnic_io_slot_reset,
4211 .resume = qlcnic_io_resume,
4212};
af19b491
AKS
4213
4214static struct pci_driver qlcnic_driver = {
4215 .name = qlcnic_driver_name,
4216 .id_table = qlcnic_pci_tbl,
4217 .probe = qlcnic_probe,
4218 .remove = __devexit_p(qlcnic_remove),
4219#ifdef CONFIG_PM
4220 .suspend = qlcnic_suspend,
4221 .resume = qlcnic_resume,
4222#endif
451724c8
SC
4223 .shutdown = qlcnic_shutdown,
4224 .err_handler = &qlcnic_err_handler
4225
af19b491
AKS
4226};
4227
4228static int __init qlcnic_init_module(void)
4229{
0cf3a14c 4230 int ret;
af19b491
AKS
4231
4232 printk(KERN_INFO "%s\n", qlcnic_driver_string);
4233
f7ec804a
AKS
4234 qlcnic_wq = create_singlethread_workqueue("qlcnic");
4235 if (qlcnic_wq == NULL) {
4236 printk(KERN_ERR "qlcnic: cannot create workqueue\n");
4237 return -ENOMEM;
4238 }
4239
af19b491
AKS
4240#ifdef CONFIG_INET
4241 register_netdevice_notifier(&qlcnic_netdev_cb);
4242 register_inetaddr_notifier(&qlcnic_inetaddr_cb);
4243#endif
4244
0cf3a14c
AKS
4245 ret = pci_register_driver(&qlcnic_driver);
4246 if (ret) {
4247#ifdef CONFIG_INET
4248 unregister_inetaddr_notifier(&qlcnic_inetaddr_cb);
4249 unregister_netdevice_notifier(&qlcnic_netdev_cb);
4250#endif
f7ec804a 4251 destroy_workqueue(qlcnic_wq);
0cf3a14c 4252 }
af19b491 4253
0cf3a14c 4254 return ret;
af19b491
AKS
4255}
4256
4257module_init(qlcnic_init_module);
4258
4259static void __exit qlcnic_exit_module(void)
4260{
4261
4262 pci_unregister_driver(&qlcnic_driver);
4263
4264#ifdef CONFIG_INET
4265 unregister_inetaddr_notifier(&qlcnic_inetaddr_cb);
4266 unregister_netdevice_notifier(&qlcnic_netdev_cb);
4267#endif
f7ec804a 4268 destroy_workqueue(qlcnic_wq);
af19b491
AKS
4269}
4270
4271module_exit(qlcnic_exit_module);