]> bbs.cooldavid.org Git - net-next-2.6.git/blame - drivers/net/qlcnic/qlcnic_main.c
qlcnic: Fix missing error codes
[net-next-2.6.git] / drivers / net / qlcnic / qlcnic_main.c
CommitLineData
af19b491
AKS
1/*
2 * Copyright (C) 2009 - QLogic Corporation.
3 * All rights reserved.
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License
7 * as published by the Free Software Foundation; either version 2
8 * of the License, or (at your option) any later version.
9 *
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 59 Temple Place - Suite 330, Boston,
18 * MA 02111-1307, USA.
19 *
20 * The full GNU General Public License is included in this distribution
21 * in the file called "COPYING".
22 *
23 */
24
5a0e3ad6 25#include <linux/slab.h>
af19b491
AKS
26#include <linux/vmalloc.h>
27#include <linux/interrupt.h>
28
29#include "qlcnic.h"
30
31#include <linux/dma-mapping.h>
32#include <linux/if_vlan.h>
33#include <net/ip.h>
34#include <linux/ipv6.h>
35#include <linux/inetdevice.h>
36#include <linux/sysfs.h>
451724c8 37#include <linux/aer.h>
af19b491 38
7f9a0c34 39MODULE_DESCRIPTION("QLogic 1/10 GbE Converged/Intelligent Ethernet Driver");
af19b491
AKS
40MODULE_LICENSE("GPL");
41MODULE_VERSION(QLCNIC_LINUX_VERSIONID);
42MODULE_FIRMWARE(QLCNIC_UNIFIED_ROMIMAGE_NAME);
43
44char qlcnic_driver_name[] = "qlcnic";
7f9a0c34
SV
45static const char qlcnic_driver_string[] = "QLogic 1/10 GbE "
46 "Converged/Intelligent Ethernet Driver v" QLCNIC_LINUX_VERSIONID;
af19b491
AKS
47
48static int port_mode = QLCNIC_PORT_MODE_AUTO_NEG;
49
50/* Default to restricted 1G auto-neg mode */
51static int wol_port_mode = 5;
52
53static int use_msi = 1;
54module_param(use_msi, int, 0644);
55MODULE_PARM_DESC(use_msi, "MSI interrupt (0=disabled, 1=enabled");
56
57static int use_msi_x = 1;
58module_param(use_msi_x, int, 0644);
59MODULE_PARM_DESC(use_msi_x, "MSI-X interrupt (0=disabled, 1=enabled");
60
61static int auto_fw_reset = AUTO_FW_RESET_ENABLED;
62module_param(auto_fw_reset, int, 0644);
63MODULE_PARM_DESC(auto_fw_reset, "Auto firmware reset (0=disabled, 1=enabled");
64
4d5bdb38
AKS
65static int load_fw_file;
66module_param(load_fw_file, int, 0644);
67MODULE_PARM_DESC(load_fw_file, "Load firmware from (0=flash, 1=file");
68
2e9d722d
AC
69static int qlcnic_config_npars;
70module_param(qlcnic_config_npars, int, 0644);
71MODULE_PARM_DESC(qlcnic_config_npars, "Configure NPARs (0=disabled, 1=enabled");
72
af19b491
AKS
73static int __devinit qlcnic_probe(struct pci_dev *pdev,
74 const struct pci_device_id *ent);
75static void __devexit qlcnic_remove(struct pci_dev *pdev);
76static int qlcnic_open(struct net_device *netdev);
77static int qlcnic_close(struct net_device *netdev);
af19b491 78static void qlcnic_tx_timeout(struct net_device *netdev);
af19b491
AKS
79static void qlcnic_attach_work(struct work_struct *work);
80static void qlcnic_fwinit_work(struct work_struct *work);
81static void qlcnic_fw_poll_work(struct work_struct *work);
82static void qlcnic_schedule_work(struct qlcnic_adapter *adapter,
83 work_func_t func, int delay);
84static void qlcnic_cancel_fw_work(struct qlcnic_adapter *adapter);
85static int qlcnic_poll(struct napi_struct *napi, int budget);
8f891387 86static int qlcnic_rx_poll(struct napi_struct *napi, int budget);
af19b491
AKS
87#ifdef CONFIG_NET_POLL_CONTROLLER
88static void qlcnic_poll_controller(struct net_device *netdev);
89#endif
90
91static void qlcnic_create_sysfs_entries(struct qlcnic_adapter *adapter);
92static void qlcnic_remove_sysfs_entries(struct qlcnic_adapter *adapter);
93static void qlcnic_create_diag_entries(struct qlcnic_adapter *adapter);
94static void qlcnic_remove_diag_entries(struct qlcnic_adapter *adapter);
95
6df900e9 96static void qlcnic_idc_debug_info(struct qlcnic_adapter *adapter, u8 encoding);
21854f02 97static void qlcnic_clr_all_drv_state(struct qlcnic_adapter *adapter, u8);
af19b491
AKS
98static int qlcnic_can_start_firmware(struct qlcnic_adapter *adapter);
99
7eb9855d 100static irqreturn_t qlcnic_tmp_intr(int irq, void *data);
af19b491
AKS
101static irqreturn_t qlcnic_intr(int irq, void *data);
102static irqreturn_t qlcnic_msi_intr(int irq, void *data);
103static irqreturn_t qlcnic_msix_intr(int irq, void *data);
104
105static struct net_device_stats *qlcnic_get_stats(struct net_device *netdev);
106static void qlcnic_config_indev_addr(struct net_device *dev, unsigned long);
9f26f547
AC
107static int qlcnic_start_firmware(struct qlcnic_adapter *);
108
109static void qlcnic_dev_set_npar_ready(struct qlcnic_adapter *);
9f26f547
AC
110static int qlcnicvf_config_led(struct qlcnic_adapter *, u32, u32);
111static int qlcnicvf_config_bridged_mode(struct qlcnic_adapter *, u32);
112static int qlcnicvf_start_firmware(struct qlcnic_adapter *);
0325d69b
RB
113static void qlcnic_set_netdev_features(struct qlcnic_adapter *,
114 struct qlcnic_esw_func_cfg *);
af19b491
AKS
115/* PCI Device ID Table */
116#define ENTRY(device) \
117 {PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, (device)), \
118 .class = PCI_CLASS_NETWORK_ETHERNET << 8, .class_mask = ~0}
119
120#define PCI_DEVICE_ID_QLOGIC_QLE824X 0x8020
121
6a902881 122static DEFINE_PCI_DEVICE_TABLE(qlcnic_pci_tbl) = {
af19b491
AKS
123 ENTRY(PCI_DEVICE_ID_QLOGIC_QLE824X),
124 {0,}
125};
126
127MODULE_DEVICE_TABLE(pci, qlcnic_pci_tbl);
128
129
130void
131qlcnic_update_cmd_producer(struct qlcnic_adapter *adapter,
132 struct qlcnic_host_tx_ring *tx_ring)
133{
134 writel(tx_ring->producer, tx_ring->crb_cmd_producer);
af19b491
AKS
135}
136
137static const u32 msi_tgt_status[8] = {
138 ISR_INT_TARGET_STATUS, ISR_INT_TARGET_STATUS_F1,
139 ISR_INT_TARGET_STATUS_F2, ISR_INT_TARGET_STATUS_F3,
140 ISR_INT_TARGET_STATUS_F4, ISR_INT_TARGET_STATUS_F5,
141 ISR_INT_TARGET_STATUS_F6, ISR_INT_TARGET_STATUS_F7
142};
143
144static const
145struct qlcnic_legacy_intr_set legacy_intr[] = QLCNIC_LEGACY_INTR_CONFIG;
146
147static inline void qlcnic_disable_int(struct qlcnic_host_sds_ring *sds_ring)
148{
149 writel(0, sds_ring->crb_intr_mask);
150}
151
152static inline void qlcnic_enable_int(struct qlcnic_host_sds_ring *sds_ring)
153{
154 struct qlcnic_adapter *adapter = sds_ring->adapter;
155
156 writel(0x1, sds_ring->crb_intr_mask);
157
158 if (!QLCNIC_IS_MSI_FAMILY(adapter))
159 writel(0xfbff, adapter->tgt_mask_reg);
160}
161
162static int
163qlcnic_alloc_sds_rings(struct qlcnic_recv_context *recv_ctx, int count)
164{
165 int size = sizeof(struct qlcnic_host_sds_ring) * count;
166
167 recv_ctx->sds_rings = kzalloc(size, GFP_KERNEL);
168
169 return (recv_ctx->sds_rings == NULL);
170}
171
172static void
173qlcnic_free_sds_rings(struct qlcnic_recv_context *recv_ctx)
174{
175 if (recv_ctx->sds_rings != NULL)
176 kfree(recv_ctx->sds_rings);
177
178 recv_ctx->sds_rings = NULL;
179}
180
181static int
182qlcnic_napi_add(struct qlcnic_adapter *adapter, struct net_device *netdev)
183{
184 int ring;
185 struct qlcnic_host_sds_ring *sds_ring;
186 struct qlcnic_recv_context *recv_ctx = &adapter->recv_ctx;
187
188 if (qlcnic_alloc_sds_rings(recv_ctx, adapter->max_sds_rings))
189 return -ENOMEM;
190
191 for (ring = 0; ring < adapter->max_sds_rings; ring++) {
192 sds_ring = &recv_ctx->sds_rings[ring];
8f891387 193
194 if (ring == adapter->max_sds_rings - 1)
195 netif_napi_add(netdev, &sds_ring->napi, qlcnic_poll,
196 QLCNIC_NETDEV_WEIGHT/adapter->max_sds_rings);
197 else
198 netif_napi_add(netdev, &sds_ring->napi,
199 qlcnic_rx_poll, QLCNIC_NETDEV_WEIGHT*2);
af19b491
AKS
200 }
201
202 return 0;
203}
204
205static void
206qlcnic_napi_del(struct qlcnic_adapter *adapter)
207{
208 int ring;
209 struct qlcnic_host_sds_ring *sds_ring;
210 struct qlcnic_recv_context *recv_ctx = &adapter->recv_ctx;
211
212 for (ring = 0; ring < adapter->max_sds_rings; ring++) {
213 sds_ring = &recv_ctx->sds_rings[ring];
214 netif_napi_del(&sds_ring->napi);
215 }
216
217 qlcnic_free_sds_rings(&adapter->recv_ctx);
218}
219
220static void
221qlcnic_napi_enable(struct qlcnic_adapter *adapter)
222{
223 int ring;
224 struct qlcnic_host_sds_ring *sds_ring;
225 struct qlcnic_recv_context *recv_ctx = &adapter->recv_ctx;
226
780ab790
AKS
227 if (adapter->is_up != QLCNIC_ADAPTER_UP_MAGIC)
228 return;
229
af19b491
AKS
230 for (ring = 0; ring < adapter->max_sds_rings; ring++) {
231 sds_ring = &recv_ctx->sds_rings[ring];
232 napi_enable(&sds_ring->napi);
233 qlcnic_enable_int(sds_ring);
234 }
235}
236
237static void
238qlcnic_napi_disable(struct qlcnic_adapter *adapter)
239{
240 int ring;
241 struct qlcnic_host_sds_ring *sds_ring;
242 struct qlcnic_recv_context *recv_ctx = &adapter->recv_ctx;
243
780ab790
AKS
244 if (adapter->is_up != QLCNIC_ADAPTER_UP_MAGIC)
245 return;
246
af19b491
AKS
247 for (ring = 0; ring < adapter->max_sds_rings; ring++) {
248 sds_ring = &recv_ctx->sds_rings[ring];
249 qlcnic_disable_int(sds_ring);
250 napi_synchronize(&sds_ring->napi);
251 napi_disable(&sds_ring->napi);
252 }
253}
254
255static void qlcnic_clear_stats(struct qlcnic_adapter *adapter)
256{
257 memset(&adapter->stats, 0, sizeof(adapter->stats));
af19b491
AKS
258}
259
af19b491
AKS
260static void qlcnic_set_port_mode(struct qlcnic_adapter *adapter)
261{
262 u32 val, data;
263
264 val = adapter->ahw.board_type;
265 if ((val == QLCNIC_BRDTYPE_P3_HMEZ) ||
266 (val == QLCNIC_BRDTYPE_P3_XG_LOM)) {
267 if (port_mode == QLCNIC_PORT_MODE_802_3_AP) {
268 data = QLCNIC_PORT_MODE_802_3_AP;
269 QLCWR32(adapter, QLCNIC_PORT_MODE_ADDR, data);
270 } else if (port_mode == QLCNIC_PORT_MODE_XG) {
271 data = QLCNIC_PORT_MODE_XG;
272 QLCWR32(adapter, QLCNIC_PORT_MODE_ADDR, data);
273 } else if (port_mode == QLCNIC_PORT_MODE_AUTO_NEG_1G) {
274 data = QLCNIC_PORT_MODE_AUTO_NEG_1G;
275 QLCWR32(adapter, QLCNIC_PORT_MODE_ADDR, data);
276 } else if (port_mode == QLCNIC_PORT_MODE_AUTO_NEG_XG) {
277 data = QLCNIC_PORT_MODE_AUTO_NEG_XG;
278 QLCWR32(adapter, QLCNIC_PORT_MODE_ADDR, data);
279 } else {
280 data = QLCNIC_PORT_MODE_AUTO_NEG;
281 QLCWR32(adapter, QLCNIC_PORT_MODE_ADDR, data);
282 }
283
284 if ((wol_port_mode != QLCNIC_PORT_MODE_802_3_AP) &&
285 (wol_port_mode != QLCNIC_PORT_MODE_XG) &&
286 (wol_port_mode != QLCNIC_PORT_MODE_AUTO_NEG_1G) &&
287 (wol_port_mode != QLCNIC_PORT_MODE_AUTO_NEG_XG)) {
288 wol_port_mode = QLCNIC_PORT_MODE_AUTO_NEG;
289 }
290 QLCWR32(adapter, QLCNIC_WOL_PORT_MODE, wol_port_mode);
291 }
292}
293
294static void qlcnic_set_msix_bit(struct pci_dev *pdev, int enable)
295{
296 u32 control;
297 int pos;
298
299 pos = pci_find_capability(pdev, PCI_CAP_ID_MSIX);
300 if (pos) {
301 pci_read_config_dword(pdev, pos, &control);
302 if (enable)
303 control |= PCI_MSIX_FLAGS_ENABLE;
304 else
305 control = 0;
306 pci_write_config_dword(pdev, pos, control);
307 }
308}
309
310static void qlcnic_init_msix_entries(struct qlcnic_adapter *adapter, int count)
311{
312 int i;
313
314 for (i = 0; i < count; i++)
315 adapter->msix_entries[i].entry = i;
316}
317
318static int
319qlcnic_read_mac_addr(struct qlcnic_adapter *adapter)
320{
2e9d722d 321 u8 mac_addr[ETH_ALEN];
af19b491
AKS
322 struct net_device *netdev = adapter->netdev;
323 struct pci_dev *pdev = adapter->pdev;
324
2e9d722d 325 if (adapter->nic_ops->get_mac_addr(adapter, mac_addr) != 0)
af19b491
AKS
326 return -EIO;
327
2e9d722d 328 memcpy(netdev->dev_addr, mac_addr, ETH_ALEN);
af19b491
AKS
329 memcpy(netdev->perm_addr, netdev->dev_addr, netdev->addr_len);
330 memcpy(adapter->mac_addr, netdev->dev_addr, netdev->addr_len);
331
332 /* set station address */
333
334 if (!is_valid_ether_addr(netdev->perm_addr))
335 dev_warn(&pdev->dev, "Bad MAC address %pM.\n",
336 netdev->dev_addr);
337
338 return 0;
339}
340
341static int qlcnic_set_mac(struct net_device *netdev, void *p)
342{
343 struct qlcnic_adapter *adapter = netdev_priv(netdev);
344 struct sockaddr *addr = p;
345
346 if (!is_valid_ether_addr(addr->sa_data))
347 return -EINVAL;
348
8a15ad1f 349 if (test_bit(__QLCNIC_DEV_UP, &adapter->state)) {
af19b491
AKS
350 netif_device_detach(netdev);
351 qlcnic_napi_disable(adapter);
352 }
353
354 memcpy(adapter->mac_addr, addr->sa_data, netdev->addr_len);
355 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
356 qlcnic_set_multi(adapter->netdev);
357
8a15ad1f 358 if (test_bit(__QLCNIC_DEV_UP, &adapter->state)) {
af19b491
AKS
359 netif_device_attach(netdev);
360 qlcnic_napi_enable(adapter);
361 }
362 return 0;
363}
364
365static const struct net_device_ops qlcnic_netdev_ops = {
366 .ndo_open = qlcnic_open,
367 .ndo_stop = qlcnic_close,
368 .ndo_start_xmit = qlcnic_xmit_frame,
369 .ndo_get_stats = qlcnic_get_stats,
370 .ndo_validate_addr = eth_validate_addr,
371 .ndo_set_multicast_list = qlcnic_set_multi,
372 .ndo_set_mac_address = qlcnic_set_mac,
373 .ndo_change_mtu = qlcnic_change_mtu,
374 .ndo_tx_timeout = qlcnic_tx_timeout,
375#ifdef CONFIG_NET_POLL_CONTROLLER
376 .ndo_poll_controller = qlcnic_poll_controller,
377#endif
378};
379
2e9d722d 380static struct qlcnic_nic_template qlcnic_ops = {
2e9d722d
AC
381 .get_mac_addr = qlcnic_get_mac_address,
382 .config_bridged_mode = qlcnic_config_bridged_mode,
383 .config_led = qlcnic_config_led,
9f26f547
AC
384 .start_firmware = qlcnic_start_firmware
385};
386
387static struct qlcnic_nic_template qlcnic_vf_ops = {
388 .get_mac_addr = qlcnic_get_mac_address,
389 .config_bridged_mode = qlcnicvf_config_bridged_mode,
390 .config_led = qlcnicvf_config_led,
9f26f547 391 .start_firmware = qlcnicvf_start_firmware
2e9d722d
AC
392};
393
af19b491
AKS
394static void
395qlcnic_setup_intr(struct qlcnic_adapter *adapter)
396{
397 const struct qlcnic_legacy_intr_set *legacy_intrp;
398 struct pci_dev *pdev = adapter->pdev;
399 int err, num_msix;
400
401 if (adapter->rss_supported) {
402 num_msix = (num_online_cpus() >= MSIX_ENTRIES_PER_ADAPTER) ?
403 MSIX_ENTRIES_PER_ADAPTER : 2;
404 } else
405 num_msix = 1;
406
407 adapter->max_sds_rings = 1;
408
409 adapter->flags &= ~(QLCNIC_MSI_ENABLED | QLCNIC_MSIX_ENABLED);
410
411 legacy_intrp = &legacy_intr[adapter->ahw.pci_func];
412
413 adapter->int_vec_bit = legacy_intrp->int_vec_bit;
414 adapter->tgt_status_reg = qlcnic_get_ioaddr(adapter,
415 legacy_intrp->tgt_status_reg);
416 adapter->tgt_mask_reg = qlcnic_get_ioaddr(adapter,
417 legacy_intrp->tgt_mask_reg);
418 adapter->isr_int_vec = qlcnic_get_ioaddr(adapter, ISR_INT_VECTOR);
419
420 adapter->crb_int_state_reg = qlcnic_get_ioaddr(adapter,
421 ISR_INT_STATE_REG);
422
423 qlcnic_set_msix_bit(pdev, 0);
424
425 if (adapter->msix_supported) {
426
427 qlcnic_init_msix_entries(adapter, num_msix);
428 err = pci_enable_msix(pdev, adapter->msix_entries, num_msix);
429 if (err == 0) {
430 adapter->flags |= QLCNIC_MSIX_ENABLED;
431 qlcnic_set_msix_bit(pdev, 1);
432
433 if (adapter->rss_supported)
434 adapter->max_sds_rings = num_msix;
435
436 dev_info(&pdev->dev, "using msi-x interrupts\n");
437 return;
438 }
439
440 if (err > 0)
441 pci_disable_msix(pdev);
442
443 /* fall through for msi */
444 }
445
446 if (use_msi && !pci_enable_msi(pdev)) {
447 adapter->flags |= QLCNIC_MSI_ENABLED;
448 adapter->tgt_status_reg = qlcnic_get_ioaddr(adapter,
449 msi_tgt_status[adapter->ahw.pci_func]);
450 dev_info(&pdev->dev, "using msi interrupts\n");
451 adapter->msix_entries[0].vector = pdev->irq;
452 return;
453 }
454
455 dev_info(&pdev->dev, "using legacy interrupts\n");
456 adapter->msix_entries[0].vector = pdev->irq;
457}
458
459static void
460qlcnic_teardown_intr(struct qlcnic_adapter *adapter)
461{
462 if (adapter->flags & QLCNIC_MSIX_ENABLED)
463 pci_disable_msix(adapter->pdev);
464 if (adapter->flags & QLCNIC_MSI_ENABLED)
465 pci_disable_msi(adapter->pdev);
466}
467
468static void
469qlcnic_cleanup_pci_map(struct qlcnic_adapter *adapter)
470{
471 if (adapter->ahw.pci_base0 != NULL)
472 iounmap(adapter->ahw.pci_base0);
473}
474
346fe763
RB
475static int
476qlcnic_init_pci_info(struct qlcnic_adapter *adapter)
477{
e88db3bd 478 struct qlcnic_pci_info *pci_info;
900853a4 479 int i, ret = 0;
346fe763
RB
480 u8 pfn;
481
e88db3bd
DC
482 pci_info = kcalloc(QLCNIC_MAX_PCI_FUNC, sizeof(*pci_info), GFP_KERNEL);
483 if (!pci_info)
484 return -ENOMEM;
485
ca315ac2 486 adapter->npars = kzalloc(sizeof(struct qlcnic_npar_info) *
346fe763 487 QLCNIC_MAX_PCI_FUNC, GFP_KERNEL);
e88db3bd 488 if (!adapter->npars) {
900853a4 489 ret = -ENOMEM;
e88db3bd
DC
490 goto err_pci_info;
491 }
346fe763 492
ca315ac2 493 adapter->eswitch = kzalloc(sizeof(struct qlcnic_eswitch) *
346fe763
RB
494 QLCNIC_NIU_MAX_XG_PORTS, GFP_KERNEL);
495 if (!adapter->eswitch) {
900853a4 496 ret = -ENOMEM;
ca315ac2 497 goto err_npars;
346fe763
RB
498 }
499
500 ret = qlcnic_get_pci_info(adapter, pci_info);
ca315ac2
DC
501 if (ret)
502 goto err_eswitch;
346fe763 503
ca315ac2
DC
504 for (i = 0; i < QLCNIC_MAX_PCI_FUNC; i++) {
505 pfn = pci_info[i].id;
506 if (pfn > QLCNIC_MAX_PCI_FUNC)
507 return QL_STATUS_INVALID_PARAM;
508 adapter->npars[pfn].active = pci_info[i].active;
509 adapter->npars[pfn].type = pci_info[i].type;
510 adapter->npars[pfn].phy_port = pci_info[i].default_port;
ca315ac2
DC
511 adapter->npars[pfn].min_bw = pci_info[i].tx_min_bw;
512 adapter->npars[pfn].max_bw = pci_info[i].tx_max_bw;
346fe763
RB
513 }
514
ca315ac2
DC
515 for (i = 0; i < QLCNIC_NIU_MAX_XG_PORTS; i++)
516 adapter->eswitch[i].flags |= QLCNIC_SWITCH_ENABLE;
517
e88db3bd 518 kfree(pci_info);
ca315ac2
DC
519 return 0;
520
521err_eswitch:
346fe763
RB
522 kfree(adapter->eswitch);
523 adapter->eswitch = NULL;
ca315ac2 524err_npars:
346fe763 525 kfree(adapter->npars);
ca315ac2 526 adapter->npars = NULL;
e88db3bd
DC
527err_pci_info:
528 kfree(pci_info);
346fe763
RB
529
530 return ret;
531}
532
2e9d722d
AC
533static int
534qlcnic_set_function_modes(struct qlcnic_adapter *adapter)
535{
536 u8 id;
537 u32 ref_count;
538 int i, ret = 1;
539 u32 data = QLCNIC_MGMT_FUNC;
540 void __iomem *priv_op = adapter->ahw.pci_base0 + QLCNIC_DRV_OP_MODE;
541
542 /* If other drivers are not in use set their privilege level */
31018e06 543 ref_count = QLCRD32(adapter, QLCNIC_CRB_DRV_ACTIVE);
2e9d722d
AC
544 ret = qlcnic_api_lock(adapter);
545 if (ret)
546 goto err_lock;
2e9d722d 547
0e33c664
AC
548 if (qlcnic_config_npars) {
549 for (i = 0; i < QLCNIC_MAX_PCI_FUNC; i++) {
346fe763 550 id = i;
0e33c664
AC
551 if (adapter->npars[i].type != QLCNIC_TYPE_NIC ||
552 id == adapter->ahw.pci_func)
553 continue;
554 data |= (qlcnic_config_npars &
555 QLC_DEV_SET_DRV(0xf, id));
556 }
557 } else {
558 data = readl(priv_op);
559 data = (data & ~QLC_DEV_SET_DRV(0xf, adapter->ahw.pci_func)) |
560 (QLC_DEV_SET_DRV(QLCNIC_MGMT_FUNC,
561 adapter->ahw.pci_func));
2e9d722d
AC
562 }
563 writel(data, priv_op);
2e9d722d
AC
564 qlcnic_api_unlock(adapter);
565err_lock:
566 return ret;
567}
568
2e9d722d
AC
569static u32
570qlcnic_get_driver_mode(struct qlcnic_adapter *adapter)
571{
572 void __iomem *msix_base_addr;
573 void __iomem *priv_op;
346fe763 574 struct qlcnic_info nic_info;
2e9d722d
AC
575 u32 func;
576 u32 msix_base;
577 u32 op_mode, priv_level;
578
579 /* Determine FW API version */
580 adapter->fw_hal_version = readl(adapter->ahw.pci_base0 + QLCNIC_FW_API);
2e9d722d
AC
581
582 /* Find PCI function number */
583 pci_read_config_dword(adapter->pdev, QLCNIC_MSIX_TABLE_OFFSET, &func);
584 msix_base_addr = adapter->ahw.pci_base0 + QLCNIC_MSIX_BASE;
585 msix_base = readl(msix_base_addr);
586 func = (func - msix_base)/QLCNIC_MSIX_TBL_PGSIZE;
587 adapter->ahw.pci_func = func;
588
346fe763
RB
589 if (!qlcnic_get_nic_info(adapter, &nic_info, adapter->ahw.pci_func)) {
590 adapter->capabilities = nic_info.capabilities;
591
592 if (adapter->capabilities & BIT_6)
593 adapter->flags |= QLCNIC_ESWITCH_ENABLED;
594 else
595 adapter->flags &= ~QLCNIC_ESWITCH_ENABLED;
596 }
0e33c664
AC
597
598 if (!(adapter->flags & QLCNIC_ESWITCH_ENABLED)) {
599 adapter->nic_ops = &qlcnic_ops;
600 return adapter->fw_hal_version;
601 }
602
2e9d722d
AC
603 /* Determine function privilege level */
604 priv_op = adapter->ahw.pci_base0 + QLCNIC_DRV_OP_MODE;
605 op_mode = readl(priv_op);
0e33c664 606 if (op_mode == QLC_DEV_DRV_DEFAULT)
2e9d722d 607 priv_level = QLCNIC_MGMT_FUNC;
0e33c664 608 else
2e9d722d
AC
609 priv_level = QLC_DEV_GET_DRV(op_mode, adapter->ahw.pci_func);
610
611 switch (priv_level) {
612 case QLCNIC_MGMT_FUNC:
613 adapter->op_mode = QLCNIC_MGMT_FUNC;
45918e2f 614 adapter->nic_ops = &qlcnic_ops;
346fe763 615 qlcnic_init_pci_info(adapter);
2e9d722d 616 /* Set privilege level for other functions */
0e33c664 617 qlcnic_set_function_modes(adapter);
2e9d722d
AC
618 dev_info(&adapter->pdev->dev,
619 "HAL Version: %d, Management function\n",
620 adapter->fw_hal_version);
621 break;
622 case QLCNIC_PRIV_FUNC:
623 adapter->op_mode = QLCNIC_PRIV_FUNC;
624 dev_info(&adapter->pdev->dev,
625 "HAL Version: %d, Privileged function\n",
626 adapter->fw_hal_version);
45918e2f 627 adapter->nic_ops = &qlcnic_ops;
2e9d722d 628 break;
9f26f547
AC
629 case QLCNIC_NON_PRIV_FUNC:
630 adapter->op_mode = QLCNIC_NON_PRIV_FUNC;
631 dev_info(&adapter->pdev->dev,
632 "HAL Version: %d Non Privileged function\n",
633 adapter->fw_hal_version);
634 adapter->nic_ops = &qlcnic_vf_ops;
635 break;
2e9d722d
AC
636 default:
637 dev_info(&adapter->pdev->dev, "Unknown function mode: %d\n",
638 priv_level);
639 return 0;
640 }
641 return adapter->fw_hal_version;
642}
643
af19b491
AKS
644static int
645qlcnic_setup_pci_map(struct qlcnic_adapter *adapter)
646{
647 void __iomem *mem_ptr0 = NULL;
648 resource_size_t mem_base;
649 unsigned long mem_len, pci_len0 = 0;
650
651 struct pci_dev *pdev = adapter->pdev;
af19b491 652
af19b491
AKS
653 /* remap phys address */
654 mem_base = pci_resource_start(pdev, 0); /* 0 is for BAR 0 */
655 mem_len = pci_resource_len(pdev, 0);
656
657 if (mem_len == QLCNIC_PCI_2MB_SIZE) {
658
659 mem_ptr0 = pci_ioremap_bar(pdev, 0);
660 if (mem_ptr0 == NULL) {
661 dev_err(&pdev->dev, "failed to map PCI bar 0\n");
662 return -EIO;
663 }
664 pci_len0 = mem_len;
665 } else {
666 return -EIO;
667 }
668
669 dev_info(&pdev->dev, "%dMB memory map\n", (int)(mem_len>>20));
670
671 adapter->ahw.pci_base0 = mem_ptr0;
672 adapter->ahw.pci_len0 = pci_len0;
673
2e9d722d
AC
674 if (!qlcnic_get_driver_mode(adapter)) {
675 iounmap(adapter->ahw.pci_base0);
676 return -EIO;
677 }
678
af19b491 679 adapter->ahw.ocm_win_crb = qlcnic_get_ioaddr(adapter,
2e9d722d 680 QLCNIC_PCIX_PS_REG(PCIX_OCM_WINDOW_REG(adapter->ahw.pci_func)));
af19b491
AKS
681
682 return 0;
683}
684
685static void get_brd_name(struct qlcnic_adapter *adapter, char *name)
686{
687 struct pci_dev *pdev = adapter->pdev;
688 int i, found = 0;
689
690 for (i = 0; i < NUM_SUPPORTED_BOARDS; ++i) {
691 if (qlcnic_boards[i].vendor == pdev->vendor &&
692 qlcnic_boards[i].device == pdev->device &&
693 qlcnic_boards[i].sub_vendor == pdev->subsystem_vendor &&
694 qlcnic_boards[i].sub_device == pdev->subsystem_device) {
02f6e46f
SC
695 sprintf(name, "%pM: %s" ,
696 adapter->mac_addr,
697 qlcnic_boards[i].short_name);
af19b491
AKS
698 found = 1;
699 break;
700 }
701
702 }
703
704 if (!found)
7f9a0c34 705 sprintf(name, "%pM Gigabit Ethernet", adapter->mac_addr);
af19b491
AKS
706}
707
708static void
709qlcnic_check_options(struct qlcnic_adapter *adapter)
710{
711 u32 fw_major, fw_minor, fw_build;
712 char brd_name[QLCNIC_MAX_BOARD_NAME_LEN];
af19b491 713 struct pci_dev *pdev = adapter->pdev;
346fe763 714 struct qlcnic_info nic_info;
af19b491
AKS
715
716 fw_major = QLCRD32(adapter, QLCNIC_FW_VERSION_MAJOR);
717 fw_minor = QLCRD32(adapter, QLCNIC_FW_VERSION_MINOR);
718 fw_build = QLCRD32(adapter, QLCNIC_FW_VERSION_SUB);
719
720 adapter->fw_version = QLCNIC_VERSION_CODE(fw_major, fw_minor, fw_build);
721
722 if (adapter->portnum == 0) {
723 get_brd_name(adapter, brd_name);
724
725 pr_info("%s: %s Board Chip rev 0x%x\n",
726 module_name(THIS_MODULE),
727 brd_name, adapter->ahw.revision_id);
728 }
729
251a84c9
AKS
730 dev_info(&pdev->dev, "firmware v%d.%d.%d\n",
731 fw_major, fw_minor, fw_build);
af19b491 732
af19b491
AKS
733 adapter->flags &= ~QLCNIC_LRO_ENABLED;
734
735 if (adapter->ahw.port_type == QLCNIC_XGBE) {
736 adapter->num_rxd = DEFAULT_RCV_DESCRIPTORS_10G;
737 adapter->num_jumbo_rxd = MAX_JUMBO_RCV_DESCRIPTORS_10G;
738 } else if (adapter->ahw.port_type == QLCNIC_GBE) {
739 adapter->num_rxd = DEFAULT_RCV_DESCRIPTORS_1G;
740 adapter->num_jumbo_rxd = MAX_JUMBO_RCV_DESCRIPTORS_1G;
741 }
742
346fe763
RB
743 if (!qlcnic_get_nic_info(adapter, &nic_info, adapter->ahw.pci_func)) {
744 adapter->physical_port = nic_info.phys_port;
745 adapter->switch_mode = nic_info.switch_mode;
746 adapter->max_tx_ques = nic_info.max_tx_ques;
747 adapter->max_rx_ques = nic_info.max_rx_ques;
748 adapter->capabilities = nic_info.capabilities;
749 adapter->max_mac_filters = nic_info.max_mac_filters;
750 adapter->max_mtu = nic_info.max_mtu;
751 }
0e33c664 752
af19b491
AKS
753 adapter->msix_supported = !!use_msi_x;
754 adapter->rss_supported = !!use_msi_x;
755
756 adapter->num_txd = MAX_CMD_DESCRIPTORS;
757
251b036a 758 adapter->max_rds_rings = MAX_RDS_RINGS;
af19b491
AKS
759}
760
8cf61f89
AKS
761static void
762qlcnic_set_vlan_config(struct qlcnic_adapter *adapter,
763 struct qlcnic_esw_func_cfg *esw_cfg)
764{
765 if (esw_cfg->discard_tagged)
766 adapter->flags &= ~QLCNIC_TAGGING_ENABLED;
767 else
768 adapter->flags |= QLCNIC_TAGGING_ENABLED;
769
770 if (esw_cfg->vlan_id)
771 adapter->pvid = esw_cfg->vlan_id;
772 else
773 adapter->pvid = 0;
774}
775
0325d69b
RB
776static void
777qlcnic_set_eswitch_port_features(struct qlcnic_adapter *adapter,
778 struct qlcnic_esw_func_cfg *esw_cfg)
779{
fe4d434d
SC
780 adapter->flags &= ~QLCNIC_MACSPOOF;
781 if (adapter->op_mode == QLCNIC_NON_PRIV_FUNC)
782 if (esw_cfg->mac_anti_spoof)
783 adapter->flags |= QLCNIC_MACSPOOF;
784
0325d69b
RB
785 qlcnic_set_netdev_features(adapter, esw_cfg);
786}
787
788static int
789qlcnic_set_eswitch_port_config(struct qlcnic_adapter *adapter)
790{
791 struct qlcnic_esw_func_cfg esw_cfg;
792
793 if (!(adapter->flags & QLCNIC_ESWITCH_ENABLED))
794 return 0;
795
796 esw_cfg.pci_func = adapter->ahw.pci_func;
797 if (qlcnic_get_eswitch_port_config(adapter, &esw_cfg))
798 return -EIO;
8cf61f89 799 qlcnic_set_vlan_config(adapter, &esw_cfg);
0325d69b
RB
800 qlcnic_set_eswitch_port_features(adapter, &esw_cfg);
801
802 return 0;
803}
804
805static void
806qlcnic_set_netdev_features(struct qlcnic_adapter *adapter,
807 struct qlcnic_esw_func_cfg *esw_cfg)
808{
809 struct net_device *netdev = adapter->netdev;
810 unsigned long features, vlan_features;
811
812 features = (NETIF_F_SG | NETIF_F_IP_CSUM |
813 NETIF_F_IPV6_CSUM | NETIF_F_GRO);
814 vlan_features = (NETIF_F_SG | NETIF_F_IP_CSUM |
815 NETIF_F_IPV6_CSUM);
816
817 if (adapter->capabilities & QLCNIC_FW_CAPABILITY_TSO) {
818 features |= (NETIF_F_TSO | NETIF_F_TSO6);
819 vlan_features |= (NETIF_F_TSO | NETIF_F_TSO6);
820 }
821 if (adapter->capabilities & QLCNIC_FW_CAPABILITY_HW_LRO)
822 features |= NETIF_F_LRO;
823
824 if (esw_cfg->offload_flags & BIT_0) {
825 netdev->features |= features;
826 adapter->rx_csum = 1;
827 if (!(esw_cfg->offload_flags & BIT_1))
828 netdev->features &= ~NETIF_F_TSO;
829 if (!(esw_cfg->offload_flags & BIT_2))
830 netdev->features &= ~NETIF_F_TSO6;
831 } else {
832 netdev->features &= ~features;
833 adapter->rx_csum = 0;
834 }
835
836 netdev->vlan_features = (features & vlan_features);
837}
838
839static int
840qlcnic_set_default_offload_settings(struct qlcnic_adapter *adapter)
841{
842 struct qlcnic_esw_func_cfg esw_cfg;
843 struct qlcnic_npar_info *npar;
844 u8 i;
845
846 if (!(adapter->flags & QLCNIC_ESWITCH_ENABLED) ||
847 adapter->need_fw_reset ||
848 adapter->op_mode != QLCNIC_MGMT_FUNC)
849 return 0;
850
851 for (i = 0; i < QLCNIC_MAX_PCI_FUNC; i++) {
852 if (adapter->npars[i].type != QLCNIC_TYPE_NIC)
853 continue;
854 memset(&esw_cfg, 0, sizeof(struct qlcnic_esw_func_cfg));
855 esw_cfg.pci_func = i;
856 esw_cfg.offload_flags = BIT_0;
857 esw_cfg.mac_learning = BIT_0;
858 if (adapter->capabilities & QLCNIC_FW_CAPABILITY_TSO)
859 esw_cfg.offload_flags |= (BIT_1 | BIT_2);
860 if (qlcnic_config_switch_port(adapter, &esw_cfg))
861 return -EIO;
862 npar = &adapter->npars[i];
863 npar->pvid = esw_cfg.vlan_id;
864 npar->mac_learning = esw_cfg.offload_flags;
865 npar->mac_anti_spoof = esw_cfg.mac_anti_spoof;
866 npar->discard_tagged = esw_cfg.discard_tagged;
867 npar->promisc_mode = esw_cfg.promisc_mode;
868 npar->offload_flags = esw_cfg.offload_flags;
869 }
870
871 return 0;
872}
873
4e8acb01
RB
874static int
875qlcnic_reset_eswitch_config(struct qlcnic_adapter *adapter,
876 struct qlcnic_npar_info *npar, int pci_func)
877{
878 struct qlcnic_esw_func_cfg esw_cfg;
879 esw_cfg.op_mode = QLCNIC_PORT_DEFAULTS;
880 esw_cfg.pci_func = pci_func;
881 esw_cfg.vlan_id = npar->pvid;
882 esw_cfg.mac_learning = npar->mac_learning;
883 esw_cfg.discard_tagged = npar->discard_tagged;
884 esw_cfg.mac_anti_spoof = npar->mac_anti_spoof;
885 esw_cfg.offload_flags = npar->offload_flags;
886 esw_cfg.promisc_mode = npar->promisc_mode;
887 if (qlcnic_config_switch_port(adapter, &esw_cfg))
888 return -EIO;
889
890 esw_cfg.op_mode = QLCNIC_ADD_VLAN;
891 if (qlcnic_config_switch_port(adapter, &esw_cfg))
892 return -EIO;
893
894 return 0;
895}
896
cea8975e
AC
897static int
898qlcnic_reset_npar_config(struct qlcnic_adapter *adapter)
899{
4e8acb01 900 int i, err;
cea8975e
AC
901 struct qlcnic_npar_info *npar;
902 struct qlcnic_info nic_info;
903
904 if (!(adapter->flags & QLCNIC_ESWITCH_ENABLED) ||
4e8acb01 905 !adapter->need_fw_reset || adapter->op_mode != QLCNIC_MGMT_FUNC)
cea8975e
AC
906 return 0;
907
4e8acb01
RB
908 /* Set the NPAR config data after FW reset */
909 for (i = 0; i < QLCNIC_MAX_PCI_FUNC; i++) {
910 npar = &adapter->npars[i];
911 if (npar->type != QLCNIC_TYPE_NIC)
912 continue;
913 err = qlcnic_get_nic_info(adapter, &nic_info, i);
914 if (err)
915 return err;
916 nic_info.min_tx_bw = npar->min_bw;
917 nic_info.max_tx_bw = npar->max_bw;
918 err = qlcnic_set_nic_info(adapter, &nic_info);
919 if (err)
920 return err;
cea8975e 921
4e8acb01
RB
922 if (npar->enable_pm) {
923 err = qlcnic_config_port_mirroring(adapter,
924 npar->dest_npar, 1, i);
925 if (err)
926 return err;
cea8975e 927 }
4e8acb01
RB
928 err = qlcnic_reset_eswitch_config(adapter, npar, i);
929 if (err)
930 return err;
cea8975e 931 }
4e8acb01 932 return 0;
cea8975e
AC
933}
934
78f84e1a
AKS
935static int qlcnic_check_npar_opertional(struct qlcnic_adapter *adapter)
936{
937 u8 npar_opt_timeo = QLCNIC_DEV_NPAR_OPER_TIMEO;
938 u32 npar_state;
939
940 if (adapter->op_mode == QLCNIC_MGMT_FUNC)
941 return 0;
942
943 npar_state = QLCRD32(adapter, QLCNIC_CRB_DEV_NPAR_STATE);
944 while (npar_state != QLCNIC_DEV_NPAR_OPER && --npar_opt_timeo) {
945 msleep(1000);
946 npar_state = QLCRD32(adapter, QLCNIC_CRB_DEV_NPAR_STATE);
947 }
948 if (!npar_opt_timeo) {
949 dev_err(&adapter->pdev->dev,
950 "Waiting for NPAR state to opertional timeout\n");
951 return -EIO;
952 }
953 return 0;
954}
955
af19b491
AKS
956static int
957qlcnic_start_firmware(struct qlcnic_adapter *adapter)
958{
d4066833 959 int err;
af19b491 960
aa5e18c0
SC
961 err = qlcnic_can_start_firmware(adapter);
962 if (err < 0)
963 return err;
964 else if (!err)
d4066833 965 goto check_fw_status;
af19b491 966
4d5bdb38
AKS
967 if (load_fw_file)
968 qlcnic_request_firmware(adapter);
8f891387 969 else {
8cfdce08
SC
970 err = qlcnic_check_flash_fw_ver(adapter);
971 if (err)
8f891387 972 goto err_out;
973
4d5bdb38 974 adapter->fw_type = QLCNIC_FLASH_ROMIMAGE;
8f891387 975 }
af19b491
AKS
976
977 err = qlcnic_need_fw_reset(adapter);
af19b491 978 if (err == 0)
d4066833 979 goto set_dev_ready;
af19b491 980
d4066833
SC
981 err = qlcnic_pinit_from_rom(adapter);
982 if (err)
983 goto err_out;
af19b491
AKS
984 qlcnic_set_port_mode(adapter);
985
986 err = qlcnic_load_firmware(adapter);
987 if (err)
988 goto err_out;
989
990 qlcnic_release_firmware(adapter);
d4066833 991 QLCWR32(adapter, CRB_DRIVER_VERSION, QLCNIC_DRIVER_VERSION);
af19b491 992
d4066833
SC
993check_fw_status:
994 err = qlcnic_check_fw_status(adapter);
af19b491
AKS
995 if (err)
996 goto err_out;
997
d4066833 998set_dev_ready:
af19b491 999 QLCWR32(adapter, QLCNIC_CRB_DEV_STATE, QLCNIC_DEV_READY);
6df900e9 1000 qlcnic_idc_debug_info(adapter, 1);
b18971d1 1001
8cfdce08
SC
1002 err = qlcnic_set_default_offload_settings(adapter);
1003 if (err)
0325d69b 1004 goto err_out;
8cfdce08
SC
1005 err = qlcnic_reset_npar_config(adapter);
1006 if (err)
cea8975e
AC
1007 goto err_out;
1008 qlcnic_dev_set_npar_ready(adapter);
4e8acb01 1009 qlcnic_check_options(adapter);
af19b491
AKS
1010 adapter->need_fw_reset = 0;
1011
a7fc948f
AKS
1012 qlcnic_release_firmware(adapter);
1013 return 0;
af19b491
AKS
1014
1015err_out:
a7fc948f
AKS
1016 QLCWR32(adapter, QLCNIC_CRB_DEV_STATE, QLCNIC_DEV_FAILED);
1017 dev_err(&adapter->pdev->dev, "Device state set to failed\n");
af19b491
AKS
1018 qlcnic_release_firmware(adapter);
1019 return err;
1020}
1021
1022static int
1023qlcnic_request_irq(struct qlcnic_adapter *adapter)
1024{
1025 irq_handler_t handler;
1026 struct qlcnic_host_sds_ring *sds_ring;
1027 int err, ring;
1028
1029 unsigned long flags = 0;
1030 struct net_device *netdev = adapter->netdev;
1031 struct qlcnic_recv_context *recv_ctx = &adapter->recv_ctx;
1032
7eb9855d
AKS
1033 if (adapter->diag_test == QLCNIC_INTERRUPT_TEST) {
1034 handler = qlcnic_tmp_intr;
1035 if (!QLCNIC_IS_MSI_FAMILY(adapter))
1036 flags |= IRQF_SHARED;
1037
1038 } else {
1039 if (adapter->flags & QLCNIC_MSIX_ENABLED)
1040 handler = qlcnic_msix_intr;
1041 else if (adapter->flags & QLCNIC_MSI_ENABLED)
1042 handler = qlcnic_msi_intr;
1043 else {
1044 flags |= IRQF_SHARED;
1045 handler = qlcnic_intr;
1046 }
af19b491
AKS
1047 }
1048 adapter->irq = netdev->irq;
1049
1050 for (ring = 0; ring < adapter->max_sds_rings; ring++) {
1051 sds_ring = &recv_ctx->sds_rings[ring];
1052 sprintf(sds_ring->name, "%s[%d]", netdev->name, ring);
1053 err = request_irq(sds_ring->irq, handler,
1054 flags, sds_ring->name, sds_ring);
1055 if (err)
1056 return err;
1057 }
1058
1059 return 0;
1060}
1061
1062static void
1063qlcnic_free_irq(struct qlcnic_adapter *adapter)
1064{
1065 int ring;
1066 struct qlcnic_host_sds_ring *sds_ring;
1067
1068 struct qlcnic_recv_context *recv_ctx = &adapter->recv_ctx;
1069
1070 for (ring = 0; ring < adapter->max_sds_rings; ring++) {
1071 sds_ring = &recv_ctx->sds_rings[ring];
1072 free_irq(sds_ring->irq, sds_ring);
1073 }
1074}
1075
1076static void
1077qlcnic_init_coalesce_defaults(struct qlcnic_adapter *adapter)
1078{
1079 adapter->coal.flags = QLCNIC_INTR_DEFAULT;
1080 adapter->coal.normal.data.rx_time_us =
1081 QLCNIC_DEFAULT_INTR_COALESCE_RX_TIME_US;
1082 adapter->coal.normal.data.rx_packets =
1083 QLCNIC_DEFAULT_INTR_COALESCE_RX_PACKETS;
1084 adapter->coal.normal.data.tx_time_us =
1085 QLCNIC_DEFAULT_INTR_COALESCE_TX_TIME_US;
1086 adapter->coal.normal.data.tx_packets =
1087 QLCNIC_DEFAULT_INTR_COALESCE_TX_PACKETS;
1088}
1089
1090static int
1091__qlcnic_up(struct qlcnic_adapter *adapter, struct net_device *netdev)
1092{
8a15ad1f
AKS
1093 int ring;
1094 struct qlcnic_host_rds_ring *rds_ring;
1095
af19b491
AKS
1096 if (adapter->is_up != QLCNIC_ADAPTER_UP_MAGIC)
1097 return -EIO;
1098
8a15ad1f
AKS
1099 if (test_bit(__QLCNIC_DEV_UP, &adapter->state))
1100 return 0;
0325d69b
RB
1101 if (qlcnic_set_eswitch_port_config(adapter))
1102 return -EIO;
8a15ad1f
AKS
1103
1104 if (qlcnic_fw_create_ctx(adapter))
1105 return -EIO;
1106
1107 for (ring = 0; ring < adapter->max_rds_rings; ring++) {
1108 rds_ring = &adapter->recv_ctx.rds_rings[ring];
1109 qlcnic_post_rx_buffers(adapter, ring, rds_ring);
1110 }
1111
af19b491
AKS
1112 qlcnic_set_multi(netdev);
1113 qlcnic_fw_cmd_set_mtu(adapter, netdev->mtu);
1114
1115 adapter->ahw.linkup = 0;
1116
1117 if (adapter->max_sds_rings > 1)
1118 qlcnic_config_rss(adapter, 1);
1119
1120 qlcnic_config_intr_coalesce(adapter);
1121
24763d80 1122 if (netdev->features & NETIF_F_LRO)
af19b491
AKS
1123 qlcnic_config_hw_lro(adapter, QLCNIC_LRO_ENABLED);
1124
1125 qlcnic_napi_enable(adapter);
1126
1127 qlcnic_linkevent_request(adapter, 1);
1128
68bf1c68 1129 adapter->reset_context = 0;
af19b491
AKS
1130 set_bit(__QLCNIC_DEV_UP, &adapter->state);
1131 return 0;
1132}
1133
1134/* Usage: During resume and firmware recovery module.*/
1135
1136static int
1137qlcnic_up(struct qlcnic_adapter *adapter, struct net_device *netdev)
1138{
1139 int err = 0;
1140
1141 rtnl_lock();
1142 if (netif_running(netdev))
1143 err = __qlcnic_up(adapter, netdev);
1144 rtnl_unlock();
1145
1146 return err;
1147}
1148
1149static void
1150__qlcnic_down(struct qlcnic_adapter *adapter, struct net_device *netdev)
1151{
1152 if (adapter->is_up != QLCNIC_ADAPTER_UP_MAGIC)
1153 return;
1154
1155 if (!test_and_clear_bit(__QLCNIC_DEV_UP, &adapter->state))
1156 return;
1157
1158 smp_mb();
1159 spin_lock(&adapter->tx_clean_lock);
1160 netif_carrier_off(netdev);
1161 netif_tx_disable(netdev);
1162
1163 qlcnic_free_mac_list(adapter);
1164
1165 qlcnic_nic_set_promisc(adapter, QLCNIC_NIU_NON_PROMISC_MODE);
1166
1167 qlcnic_napi_disable(adapter);
1168
8a15ad1f
AKS
1169 qlcnic_fw_destroy_ctx(adapter);
1170
1171 qlcnic_reset_rx_buffers_list(adapter);
af19b491
AKS
1172 qlcnic_release_tx_buffers(adapter);
1173 spin_unlock(&adapter->tx_clean_lock);
1174}
1175
1176/* Usage: During suspend and firmware recovery module */
1177
1178static void
1179qlcnic_down(struct qlcnic_adapter *adapter, struct net_device *netdev)
1180{
1181 rtnl_lock();
1182 if (netif_running(netdev))
1183 __qlcnic_down(adapter, netdev);
1184 rtnl_unlock();
1185
1186}
1187
1188static int
1189qlcnic_attach(struct qlcnic_adapter *adapter)
1190{
1191 struct net_device *netdev = adapter->netdev;
1192 struct pci_dev *pdev = adapter->pdev;
8a15ad1f 1193 int err;
af19b491
AKS
1194
1195 if (adapter->is_up == QLCNIC_ADAPTER_UP_MAGIC)
1196 return 0;
1197
af19b491
AKS
1198 err = qlcnic_napi_add(adapter, netdev);
1199 if (err)
1200 return err;
1201
1202 err = qlcnic_alloc_sw_resources(adapter);
1203 if (err) {
1204 dev_err(&pdev->dev, "Error in setting sw resources\n");
8a15ad1f 1205 goto err_out_napi_del;
af19b491
AKS
1206 }
1207
1208 err = qlcnic_alloc_hw_resources(adapter);
1209 if (err) {
1210 dev_err(&pdev->dev, "Error in setting hw resources\n");
1211 goto err_out_free_sw;
1212 }
1213
af19b491
AKS
1214 err = qlcnic_request_irq(adapter);
1215 if (err) {
1216 dev_err(&pdev->dev, "failed to setup interrupt\n");
8a15ad1f 1217 goto err_out_free_hw;
af19b491
AKS
1218 }
1219
1220 qlcnic_init_coalesce_defaults(adapter);
1221
1222 qlcnic_create_sysfs_entries(adapter);
1223
1224 adapter->is_up = QLCNIC_ADAPTER_UP_MAGIC;
1225 return 0;
1226
8a15ad1f 1227err_out_free_hw:
af19b491
AKS
1228 qlcnic_free_hw_resources(adapter);
1229err_out_free_sw:
1230 qlcnic_free_sw_resources(adapter);
8a15ad1f
AKS
1231err_out_napi_del:
1232 qlcnic_napi_del(adapter);
af19b491
AKS
1233 return err;
1234}
1235
1236static void
1237qlcnic_detach(struct qlcnic_adapter *adapter)
1238{
1239 if (adapter->is_up != QLCNIC_ADAPTER_UP_MAGIC)
1240 return;
1241
1242 qlcnic_remove_sysfs_entries(adapter);
1243
1244 qlcnic_free_hw_resources(adapter);
1245 qlcnic_release_rx_buffers(adapter);
1246 qlcnic_free_irq(adapter);
1247 qlcnic_napi_del(adapter);
1248 qlcnic_free_sw_resources(adapter);
1249
1250 adapter->is_up = 0;
1251}
1252
7eb9855d
AKS
1253void qlcnic_diag_free_res(struct net_device *netdev, int max_sds_rings)
1254{
1255 struct qlcnic_adapter *adapter = netdev_priv(netdev);
1256 struct qlcnic_host_sds_ring *sds_ring;
1257 int ring;
1258
78ad3892 1259 clear_bit(__QLCNIC_DEV_UP, &adapter->state);
cdaff185
AKS
1260 if (adapter->diag_test == QLCNIC_INTERRUPT_TEST) {
1261 for (ring = 0; ring < adapter->max_sds_rings; ring++) {
1262 sds_ring = &adapter->recv_ctx.sds_rings[ring];
1263 qlcnic_disable_int(sds_ring);
1264 }
7eb9855d
AKS
1265 }
1266
8a15ad1f
AKS
1267 qlcnic_fw_destroy_ctx(adapter);
1268
7eb9855d
AKS
1269 qlcnic_detach(adapter);
1270
1271 adapter->diag_test = 0;
1272 adapter->max_sds_rings = max_sds_rings;
1273
1274 if (qlcnic_attach(adapter))
34ce3626 1275 goto out;
7eb9855d
AKS
1276
1277 if (netif_running(netdev))
1278 __qlcnic_up(adapter, netdev);
34ce3626 1279out:
7eb9855d
AKS
1280 netif_device_attach(netdev);
1281}
1282
1283int qlcnic_diag_alloc_res(struct net_device *netdev, int test)
1284{
1285 struct qlcnic_adapter *adapter = netdev_priv(netdev);
1286 struct qlcnic_host_sds_ring *sds_ring;
8a15ad1f 1287 struct qlcnic_host_rds_ring *rds_ring;
7eb9855d
AKS
1288 int ring;
1289 int ret;
1290
1291 netif_device_detach(netdev);
1292
1293 if (netif_running(netdev))
1294 __qlcnic_down(adapter, netdev);
1295
1296 qlcnic_detach(adapter);
1297
1298 adapter->max_sds_rings = 1;
1299 adapter->diag_test = test;
1300
1301 ret = qlcnic_attach(adapter);
34ce3626
AKS
1302 if (ret) {
1303 netif_device_attach(netdev);
7eb9855d 1304 return ret;
34ce3626 1305 }
7eb9855d 1306
8a15ad1f
AKS
1307 ret = qlcnic_fw_create_ctx(adapter);
1308 if (ret) {
1309 qlcnic_detach(adapter);
57e46248 1310 netif_device_attach(netdev);
8a15ad1f
AKS
1311 return ret;
1312 }
1313
1314 for (ring = 0; ring < adapter->max_rds_rings; ring++) {
1315 rds_ring = &adapter->recv_ctx.rds_rings[ring];
1316 qlcnic_post_rx_buffers(adapter, ring, rds_ring);
1317 }
1318
cdaff185
AKS
1319 if (adapter->diag_test == QLCNIC_INTERRUPT_TEST) {
1320 for (ring = 0; ring < adapter->max_sds_rings; ring++) {
1321 sds_ring = &adapter->recv_ctx.sds_rings[ring];
1322 qlcnic_enable_int(sds_ring);
1323 }
7eb9855d 1324 }
78ad3892 1325 set_bit(__QLCNIC_DEV_UP, &adapter->state);
7eb9855d
AKS
1326
1327 return 0;
1328}
1329
68bf1c68
AKS
1330/* Reset context in hardware only */
1331static int
1332qlcnic_reset_hw_context(struct qlcnic_adapter *adapter)
1333{
1334 struct net_device *netdev = adapter->netdev;
1335
1336 if (test_and_set_bit(__QLCNIC_RESETTING, &adapter->state))
1337 return -EBUSY;
1338
1339 netif_device_detach(netdev);
1340
1341 qlcnic_down(adapter, netdev);
1342
1343 qlcnic_up(adapter, netdev);
1344
1345 netif_device_attach(netdev);
1346
1347 clear_bit(__QLCNIC_RESETTING, &adapter->state);
1348 return 0;
1349}
1350
af19b491
AKS
1351int
1352qlcnic_reset_context(struct qlcnic_adapter *adapter)
1353{
1354 int err = 0;
1355 struct net_device *netdev = adapter->netdev;
1356
1357 if (test_and_set_bit(__QLCNIC_RESETTING, &adapter->state))
1358 return -EBUSY;
1359
1360 if (adapter->is_up == QLCNIC_ADAPTER_UP_MAGIC) {
1361
1362 netif_device_detach(netdev);
1363
1364 if (netif_running(netdev))
1365 __qlcnic_down(adapter, netdev);
1366
1367 qlcnic_detach(adapter);
1368
1369 if (netif_running(netdev)) {
1370 err = qlcnic_attach(adapter);
1371 if (!err)
34ce3626 1372 __qlcnic_up(adapter, netdev);
af19b491
AKS
1373 }
1374
1375 netif_device_attach(netdev);
1376 }
1377
af19b491
AKS
1378 clear_bit(__QLCNIC_RESETTING, &adapter->state);
1379 return err;
1380}
1381
1382static int
1383qlcnic_setup_netdev(struct qlcnic_adapter *adapter,
1bb09fb9 1384 struct net_device *netdev, u8 pci_using_dac)
af19b491
AKS
1385{
1386 int err;
1387 struct pci_dev *pdev = adapter->pdev;
1388
1389 adapter->rx_csum = 1;
1390 adapter->mc_enabled = 0;
1391 adapter->max_mc_count = 38;
1392
1393 netdev->netdev_ops = &qlcnic_netdev_ops;
ef71ff83 1394 netdev->watchdog_timeo = 5*HZ;
af19b491
AKS
1395
1396 qlcnic_change_mtu(netdev, netdev->mtu);
1397
1398 SET_ETHTOOL_OPS(netdev, &qlcnic_ethtool_ops);
1399
2e9d722d 1400 netdev->features |= (NETIF_F_SG | NETIF_F_IP_CSUM |
ac8d0c4f 1401 NETIF_F_IPV6_CSUM | NETIF_F_GRO);
2e9d722d 1402 netdev->vlan_features |= (NETIF_F_SG | NETIF_F_IP_CSUM |
ac8d0c4f
AC
1403 NETIF_F_IPV6_CSUM);
1404
1405 if (adapter->capabilities & QLCNIC_FW_CAPABILITY_TSO) {
1406 netdev->features |= (NETIF_F_TSO | NETIF_F_TSO6);
1407 netdev->vlan_features |= (NETIF_F_TSO | NETIF_F_TSO6);
1408 }
af19b491 1409
1bb09fb9 1410 if (pci_using_dac) {
af19b491
AKS
1411 netdev->features |= NETIF_F_HIGHDMA;
1412 netdev->vlan_features |= NETIF_F_HIGHDMA;
1413 }
1414
1415 if (adapter->capabilities & QLCNIC_FW_CAPABILITY_FVLANTX)
1416 netdev->features |= (NETIF_F_HW_VLAN_TX);
1417
1418 if (adapter->capabilities & QLCNIC_FW_CAPABILITY_HW_LRO)
1419 netdev->features |= NETIF_F_LRO;
af19b491
AKS
1420 netdev->irq = adapter->msix_entries[0].vector;
1421
af19b491
AKS
1422 if (qlcnic_read_mac_addr(adapter))
1423 dev_warn(&pdev->dev, "failed to read mac addr\n");
1424
1425 netif_carrier_off(netdev);
1426 netif_stop_queue(netdev);
1427
1428 err = register_netdev(netdev);
1429 if (err) {
1430 dev_err(&pdev->dev, "failed to register net device\n");
1431 return err;
1432 }
1433
1434 return 0;
1435}
1436
1bb09fb9
AKS
1437static int qlcnic_set_dma_mask(struct pci_dev *pdev, u8 *pci_using_dac)
1438{
1439 if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) &&
1440 !pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)))
1441 *pci_using_dac = 1;
1442 else if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(32)) &&
1443 !pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32)))
1444 *pci_using_dac = 0;
1445 else {
1446 dev_err(&pdev->dev, "Unable to set DMA mask, aborting\n");
1447 return -EIO;
1448 }
1449
1450 return 0;
1451}
1452
af19b491
AKS
1453static int __devinit
1454qlcnic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
1455{
1456 struct net_device *netdev = NULL;
1457 struct qlcnic_adapter *adapter = NULL;
1458 int err;
af19b491 1459 uint8_t revision_id;
1bb09fb9 1460 uint8_t pci_using_dac;
af19b491
AKS
1461
1462 err = pci_enable_device(pdev);
1463 if (err)
1464 return err;
1465
1466 if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
1467 err = -ENODEV;
1468 goto err_out_disable_pdev;
1469 }
1470
1bb09fb9
AKS
1471 err = qlcnic_set_dma_mask(pdev, &pci_using_dac);
1472 if (err)
1473 goto err_out_disable_pdev;
1474
af19b491
AKS
1475 err = pci_request_regions(pdev, qlcnic_driver_name);
1476 if (err)
1477 goto err_out_disable_pdev;
1478
1479 pci_set_master(pdev);
451724c8 1480 pci_enable_pcie_error_reporting(pdev);
af19b491
AKS
1481
1482 netdev = alloc_etherdev(sizeof(struct qlcnic_adapter));
1483 if (!netdev) {
1484 dev_err(&pdev->dev, "failed to allocate net_device\n");
1485 err = -ENOMEM;
1486 goto err_out_free_res;
1487 }
1488
1489 SET_NETDEV_DEV(netdev, &pdev->dev);
1490
1491 adapter = netdev_priv(netdev);
1492 adapter->netdev = netdev;
1493 adapter->pdev = pdev;
6df900e9 1494 adapter->dev_rst_time = jiffies;
af19b491
AKS
1495
1496 revision_id = pdev->revision;
1497 adapter->ahw.revision_id = revision_id;
1498
1499 rwlock_init(&adapter->ahw.crb_lock);
1500 mutex_init(&adapter->ahw.mem_lock);
1501
1502 spin_lock_init(&adapter->tx_clean_lock);
1503 INIT_LIST_HEAD(&adapter->mac_list);
1504
1505 err = qlcnic_setup_pci_map(adapter);
1506 if (err)
1507 goto err_out_free_netdev;
1508
1509 /* This will be reset for mezz cards */
2e9d722d 1510 adapter->portnum = adapter->ahw.pci_func;
af19b491
AKS
1511
1512 err = qlcnic_get_board_info(adapter);
1513 if (err) {
1514 dev_err(&pdev->dev, "Error getting board config info.\n");
1515 goto err_out_iounmap;
1516 }
1517
02f6e46f
SC
1518 if (qlcnic_read_mac_addr(adapter))
1519 dev_warn(&pdev->dev, "failed to read mac addr\n");
1520
8cfdce08
SC
1521 err = qlcnic_setup_idc_param(adapter);
1522 if (err)
b3a24649 1523 goto err_out_iounmap;
af19b491 1524
9f26f547 1525 err = adapter->nic_ops->start_firmware(adapter);
a7fc948f
AKS
1526 if (err) {
1527 dev_err(&pdev->dev, "Loading fw failed.Please Reboot\n");
af19b491 1528 goto err_out_decr_ref;
a7fc948f 1529 }
af19b491 1530
af19b491
AKS
1531 qlcnic_clear_stats(adapter);
1532
1533 qlcnic_setup_intr(adapter);
1534
1bb09fb9 1535 err = qlcnic_setup_netdev(adapter, netdev, pci_using_dac);
af19b491
AKS
1536 if (err)
1537 goto err_out_disable_msi;
1538
1539 pci_set_drvdata(pdev, adapter);
1540
1541 qlcnic_schedule_work(adapter, qlcnic_fw_poll_work, FW_POLL_DELAY);
1542
1543 switch (adapter->ahw.port_type) {
1544 case QLCNIC_GBE:
1545 dev_info(&adapter->pdev->dev, "%s: GbE port initialized\n",
1546 adapter->netdev->name);
1547 break;
1548 case QLCNIC_XGBE:
1549 dev_info(&adapter->pdev->dev, "%s: XGbE port initialized\n",
1550 adapter->netdev->name);
1551 break;
1552 }
1553
1554 qlcnic_create_diag_entries(adapter);
1555
1556 return 0;
1557
1558err_out_disable_msi:
1559 qlcnic_teardown_intr(adapter);
1560
1561err_out_decr_ref:
21854f02 1562 qlcnic_clr_all_drv_state(adapter, 0);
af19b491
AKS
1563
1564err_out_iounmap:
1565 qlcnic_cleanup_pci_map(adapter);
1566
1567err_out_free_netdev:
1568 free_netdev(netdev);
1569
1570err_out_free_res:
1571 pci_release_regions(pdev);
1572
1573err_out_disable_pdev:
1574 pci_set_drvdata(pdev, NULL);
1575 pci_disable_device(pdev);
1576 return err;
1577}
1578
1579static void __devexit qlcnic_remove(struct pci_dev *pdev)
1580{
1581 struct qlcnic_adapter *adapter;
1582 struct net_device *netdev;
1583
1584 adapter = pci_get_drvdata(pdev);
1585 if (adapter == NULL)
1586 return;
1587
1588 netdev = adapter->netdev;
1589
1590 qlcnic_cancel_fw_work(adapter);
1591
1592 unregister_netdev(netdev);
1593
af19b491
AKS
1594 qlcnic_detach(adapter);
1595
2e9d722d
AC
1596 if (adapter->npars != NULL)
1597 kfree(adapter->npars);
1598 if (adapter->eswitch != NULL)
1599 kfree(adapter->eswitch);
1600
21854f02 1601 qlcnic_clr_all_drv_state(adapter, 0);
af19b491
AKS
1602
1603 clear_bit(__QLCNIC_RESETTING, &adapter->state);
1604
1605 qlcnic_teardown_intr(adapter);
1606
1607 qlcnic_remove_diag_entries(adapter);
1608
1609 qlcnic_cleanup_pci_map(adapter);
1610
1611 qlcnic_release_firmware(adapter);
1612
451724c8 1613 pci_disable_pcie_error_reporting(pdev);
af19b491
AKS
1614 pci_release_regions(pdev);
1615 pci_disable_device(pdev);
1616 pci_set_drvdata(pdev, NULL);
1617
1618 free_netdev(netdev);
1619}
1620static int __qlcnic_shutdown(struct pci_dev *pdev)
1621{
1622 struct qlcnic_adapter *adapter = pci_get_drvdata(pdev);
1623 struct net_device *netdev = adapter->netdev;
1624 int retval;
1625
1626 netif_device_detach(netdev);
1627
1628 qlcnic_cancel_fw_work(adapter);
1629
1630 if (netif_running(netdev))
1631 qlcnic_down(adapter, netdev);
1632
21854f02 1633 qlcnic_clr_all_drv_state(adapter, 0);
af19b491
AKS
1634
1635 clear_bit(__QLCNIC_RESETTING, &adapter->state);
1636
1637 retval = pci_save_state(pdev);
1638 if (retval)
1639 return retval;
1640
1641 if (qlcnic_wol_supported(adapter)) {
1642 pci_enable_wake(pdev, PCI_D3cold, 1);
1643 pci_enable_wake(pdev, PCI_D3hot, 1);
1644 }
1645
1646 return 0;
1647}
1648
1649static void qlcnic_shutdown(struct pci_dev *pdev)
1650{
1651 if (__qlcnic_shutdown(pdev))
1652 return;
1653
1654 pci_disable_device(pdev);
1655}
1656
1657#ifdef CONFIG_PM
1658static int
1659qlcnic_suspend(struct pci_dev *pdev, pm_message_t state)
1660{
1661 int retval;
1662
1663 retval = __qlcnic_shutdown(pdev);
1664 if (retval)
1665 return retval;
1666
1667 pci_set_power_state(pdev, pci_choose_state(pdev, state));
1668 return 0;
1669}
1670
1671static int
1672qlcnic_resume(struct pci_dev *pdev)
1673{
1674 struct qlcnic_adapter *adapter = pci_get_drvdata(pdev);
1675 struct net_device *netdev = adapter->netdev;
1676 int err;
1677
1678 err = pci_enable_device(pdev);
1679 if (err)
1680 return err;
1681
1682 pci_set_power_state(pdev, PCI_D0);
1683 pci_set_master(pdev);
1684 pci_restore_state(pdev);
1685
9f26f547 1686 err = adapter->nic_ops->start_firmware(adapter);
af19b491
AKS
1687 if (err) {
1688 dev_err(&pdev->dev, "failed to start firmware\n");
1689 return err;
1690 }
1691
1692 if (netif_running(netdev)) {
af19b491
AKS
1693 err = qlcnic_up(adapter, netdev);
1694 if (err)
52486a3a 1695 goto done;
af19b491
AKS
1696
1697 qlcnic_config_indev_addr(netdev, NETDEV_UP);
1698 }
52486a3a 1699done:
af19b491
AKS
1700 netif_device_attach(netdev);
1701 qlcnic_schedule_work(adapter, qlcnic_fw_poll_work, FW_POLL_DELAY);
1702 return 0;
af19b491
AKS
1703}
1704#endif
1705
1706static int qlcnic_open(struct net_device *netdev)
1707{
1708 struct qlcnic_adapter *adapter = netdev_priv(netdev);
1709 int err;
1710
af19b491
AKS
1711 err = qlcnic_attach(adapter);
1712 if (err)
1713 return err;
1714
1715 err = __qlcnic_up(adapter, netdev);
1716 if (err)
1717 goto err_out;
1718
1719 netif_start_queue(netdev);
1720
1721 return 0;
1722
1723err_out:
1724 qlcnic_detach(adapter);
1725 return err;
1726}
1727
1728/*
1729 * qlcnic_close - Disables a network interface entry point
1730 */
1731static int qlcnic_close(struct net_device *netdev)
1732{
1733 struct qlcnic_adapter *adapter = netdev_priv(netdev);
1734
1735 __qlcnic_down(adapter, netdev);
1736 return 0;
1737}
1738
1739static void
1740qlcnic_tso_check(struct net_device *netdev,
1741 struct qlcnic_host_tx_ring *tx_ring,
1742 struct cmd_desc_type0 *first_desc,
1743 struct sk_buff *skb)
1744{
1745 u8 opcode = TX_ETHER_PKT;
1746 __be16 protocol = skb->protocol;
8cf61f89
AKS
1747 u16 flags = 0;
1748 int copied, offset, copy_len, hdr_len = 0, tso = 0;
af19b491
AKS
1749 struct cmd_desc_type0 *hwdesc;
1750 struct vlan_ethhdr *vh;
8bfe8b91 1751 struct qlcnic_adapter *adapter = netdev_priv(netdev);
2e9d722d 1752 u32 producer = tx_ring->producer;
8cf61f89 1753 int vlan_oob = first_desc->flags_opcode & cpu_to_le16(FLAGS_VLAN_OOB);
af19b491 1754
2e9d722d
AC
1755 if (*(skb->data) & BIT_0) {
1756 flags |= BIT_0;
1757 memcpy(&first_desc->eth_addr, skb->data, ETH_ALEN);
1758 }
1759
af19b491
AKS
1760 if ((netdev->features & (NETIF_F_TSO | NETIF_F_TSO6)) &&
1761 skb_shinfo(skb)->gso_size > 0) {
1762
1763 hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
1764
1765 first_desc->mss = cpu_to_le16(skb_shinfo(skb)->gso_size);
1766 first_desc->total_hdr_length = hdr_len;
1767 if (vlan_oob) {
1768 first_desc->total_hdr_length += VLAN_HLEN;
1769 first_desc->tcp_hdr_offset = VLAN_HLEN;
1770 first_desc->ip_hdr_offset = VLAN_HLEN;
1771 /* Only in case of TSO on vlan device */
1772 flags |= FLAGS_VLAN_TAGGED;
1773 }
1774
1775 opcode = (protocol == cpu_to_be16(ETH_P_IPV6)) ?
1776 TX_TCP_LSO6 : TX_TCP_LSO;
1777 tso = 1;
1778
1779 } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
1780 u8 l4proto;
1781
1782 if (protocol == cpu_to_be16(ETH_P_IP)) {
1783 l4proto = ip_hdr(skb)->protocol;
1784
1785 if (l4proto == IPPROTO_TCP)
1786 opcode = TX_TCP_PKT;
1787 else if (l4proto == IPPROTO_UDP)
1788 opcode = TX_UDP_PKT;
1789 } else if (protocol == cpu_to_be16(ETH_P_IPV6)) {
1790 l4proto = ipv6_hdr(skb)->nexthdr;
1791
1792 if (l4proto == IPPROTO_TCP)
1793 opcode = TX_TCPV6_PKT;
1794 else if (l4proto == IPPROTO_UDP)
1795 opcode = TX_UDPV6_PKT;
1796 }
1797 }
1798
1799 first_desc->tcp_hdr_offset += skb_transport_offset(skb);
1800 first_desc->ip_hdr_offset += skb_network_offset(skb);
1801 qlcnic_set_tx_flags_opcode(first_desc, flags, opcode);
1802
1803 if (!tso)
1804 return;
1805
1806 /* For LSO, we need to copy the MAC/IP/TCP headers into
1807 * the descriptor ring
1808 */
af19b491
AKS
1809 copied = 0;
1810 offset = 2;
1811
1812 if (vlan_oob) {
1813 /* Create a TSO vlan header template for firmware */
1814
1815 hwdesc = &tx_ring->desc_head[producer];
1816 tx_ring->cmd_buf_arr[producer].skb = NULL;
1817
1818 copy_len = min((int)sizeof(struct cmd_desc_type0) - offset,
1819 hdr_len + VLAN_HLEN);
1820
1821 vh = (struct vlan_ethhdr *)((char *)hwdesc + 2);
1822 skb_copy_from_linear_data(skb, vh, 12);
1823 vh->h_vlan_proto = htons(ETH_P_8021Q);
8cf61f89 1824 vh->h_vlan_TCI = htons(first_desc->vlan_TCI);
af19b491
AKS
1825 skb_copy_from_linear_data_offset(skb, 12,
1826 (char *)vh + 16, copy_len - 16);
1827
1828 copied = copy_len - VLAN_HLEN;
1829 offset = 0;
1830
1831 producer = get_next_index(producer, tx_ring->num_desc);
1832 }
1833
1834 while (copied < hdr_len) {
1835
1836 copy_len = min((int)sizeof(struct cmd_desc_type0) - offset,
1837 (hdr_len - copied));
1838
1839 hwdesc = &tx_ring->desc_head[producer];
1840 tx_ring->cmd_buf_arr[producer].skb = NULL;
1841
1842 skb_copy_from_linear_data_offset(skb, copied,
1843 (char *)hwdesc + offset, copy_len);
1844
1845 copied += copy_len;
1846 offset = 0;
1847
1848 producer = get_next_index(producer, tx_ring->num_desc);
1849 }
1850
1851 tx_ring->producer = producer;
1852 barrier();
8bfe8b91 1853 adapter->stats.lso_frames++;
af19b491
AKS
1854}
1855
1856static int
1857qlcnic_map_tx_skb(struct pci_dev *pdev,
1858 struct sk_buff *skb, struct qlcnic_cmd_buffer *pbuf)
1859{
1860 struct qlcnic_skb_frag *nf;
1861 struct skb_frag_struct *frag;
1862 int i, nr_frags;
1863 dma_addr_t map;
1864
1865 nr_frags = skb_shinfo(skb)->nr_frags;
1866 nf = &pbuf->frag_array[0];
1867
1868 map = pci_map_single(pdev, skb->data,
1869 skb_headlen(skb), PCI_DMA_TODEVICE);
1870 if (pci_dma_mapping_error(pdev, map))
1871 goto out_err;
1872
1873 nf->dma = map;
1874 nf->length = skb_headlen(skb);
1875
1876 for (i = 0; i < nr_frags; i++) {
1877 frag = &skb_shinfo(skb)->frags[i];
1878 nf = &pbuf->frag_array[i+1];
1879
1880 map = pci_map_page(pdev, frag->page, frag->page_offset,
1881 frag->size, PCI_DMA_TODEVICE);
1882 if (pci_dma_mapping_error(pdev, map))
1883 goto unwind;
1884
1885 nf->dma = map;
1886 nf->length = frag->size;
1887 }
1888
1889 return 0;
1890
1891unwind:
1892 while (--i >= 0) {
1893 nf = &pbuf->frag_array[i+1];
1894 pci_unmap_page(pdev, nf->dma, nf->length, PCI_DMA_TODEVICE);
1895 }
1896
1897 nf = &pbuf->frag_array[0];
1898 pci_unmap_single(pdev, nf->dma, skb_headlen(skb), PCI_DMA_TODEVICE);
1899
1900out_err:
1901 return -ENOMEM;
1902}
1903
8cf61f89
AKS
1904static int
1905qlcnic_check_tx_tagging(struct qlcnic_adapter *adapter,
1906 struct sk_buff *skb,
1907 struct cmd_desc_type0 *first_desc)
1908{
1909 u8 opcode = 0;
1910 u16 flags = 0;
1911 __be16 protocol = skb->protocol;
1912 struct vlan_ethhdr *vh;
1913
1914 if (protocol == cpu_to_be16(ETH_P_8021Q)) {
1915 vh = (struct vlan_ethhdr *)skb->data;
1916 protocol = vh->h_vlan_encapsulated_proto;
1917 flags = FLAGS_VLAN_TAGGED;
1918 qlcnic_set_tx_vlan_tci(first_desc, ntohs(vh->h_vlan_TCI));
1919 } else if (vlan_tx_tag_present(skb)) {
1920 flags = FLAGS_VLAN_OOB;
1921 qlcnic_set_tx_vlan_tci(first_desc, vlan_tx_tag_get(skb));
1922 }
1923 if (unlikely(adapter->pvid)) {
1924 if (first_desc->vlan_TCI &&
1925 !(adapter->flags & QLCNIC_TAGGING_ENABLED))
1926 return -EIO;
1927 if (first_desc->vlan_TCI &&
1928 (adapter->flags & QLCNIC_TAGGING_ENABLED))
1929 goto set_flags;
1930
1931 flags = FLAGS_VLAN_OOB;
1932 qlcnic_set_tx_vlan_tci(first_desc, adapter->pvid);
1933 }
1934set_flags:
1935 qlcnic_set_tx_flags_opcode(first_desc, flags, opcode);
1936 return 0;
1937}
1938
af19b491
AKS
1939static inline void
1940qlcnic_clear_cmddesc(u64 *desc)
1941{
1942 desc[0] = 0ULL;
1943 desc[2] = 0ULL;
8cf61f89 1944 desc[7] = 0ULL;
af19b491
AKS
1945}
1946
cdaff185 1947netdev_tx_t
af19b491
AKS
1948qlcnic_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
1949{
1950 struct qlcnic_adapter *adapter = netdev_priv(netdev);
1951 struct qlcnic_host_tx_ring *tx_ring = adapter->tx_ring;
1952 struct qlcnic_cmd_buffer *pbuf;
1953 struct qlcnic_skb_frag *buffrag;
1954 struct cmd_desc_type0 *hwdesc, *first_desc;
1955 struct pci_dev *pdev;
1956 int i, k;
1957
1958 u32 producer;
1959 int frag_count, no_of_desc;
1960 u32 num_txd = tx_ring->num_desc;
1961
780ab790
AKS
1962 if (!test_bit(__QLCNIC_DEV_UP, &adapter->state)) {
1963 netif_stop_queue(netdev);
1964 return NETDEV_TX_BUSY;
1965 }
1966
fe4d434d
SC
1967 if (adapter->flags & QLCNIC_MACSPOOF) {
1968 if (compare_ether_addr(eth_hdr(skb)->h_source,
1969 adapter->mac_addr))
1970 goto drop_packet;
1971 }
1972
af19b491
AKS
1973 frag_count = skb_shinfo(skb)->nr_frags + 1;
1974
1975 /* 4 fragments per cmd des */
1976 no_of_desc = (frag_count + 3) >> 2;
1977
ef71ff83 1978 if (unlikely(qlcnic_tx_avail(tx_ring) <= TX_STOP_THRESH)) {
af19b491 1979 netif_stop_queue(netdev);
ef71ff83
RB
1980 smp_mb();
1981 if (qlcnic_tx_avail(tx_ring) > TX_STOP_THRESH)
1982 netif_start_queue(netdev);
1983 else {
1984 adapter->stats.xmit_off++;
1985 return NETDEV_TX_BUSY;
1986 }
af19b491
AKS
1987 }
1988
1989 producer = tx_ring->producer;
1990 pbuf = &tx_ring->cmd_buf_arr[producer];
1991
1992 pdev = adapter->pdev;
1993
8cf61f89
AKS
1994 first_desc = hwdesc = &tx_ring->desc_head[producer];
1995 qlcnic_clear_cmddesc((u64 *)hwdesc);
1996
1997 if (qlcnic_check_tx_tagging(adapter, skb, first_desc))
1998 goto drop_packet;
1999
8ae6df97
AKS
2000 if (qlcnic_map_tx_skb(pdev, skb, pbuf)) {
2001 adapter->stats.tx_dma_map_error++;
af19b491 2002 goto drop_packet;
8ae6df97 2003 }
af19b491
AKS
2004
2005 pbuf->skb = skb;
2006 pbuf->frag_count = frag_count;
2007
af19b491
AKS
2008 qlcnic_set_tx_frags_len(first_desc, frag_count, skb->len);
2009 qlcnic_set_tx_port(first_desc, adapter->portnum);
2010
2011 for (i = 0; i < frag_count; i++) {
2012
2013 k = i % 4;
2014
2015 if ((k == 0) && (i > 0)) {
2016 /* move to next desc.*/
2017 producer = get_next_index(producer, num_txd);
2018 hwdesc = &tx_ring->desc_head[producer];
2019 qlcnic_clear_cmddesc((u64 *)hwdesc);
2020 tx_ring->cmd_buf_arr[producer].skb = NULL;
2021 }
2022
2023 buffrag = &pbuf->frag_array[i];
2024
2025 hwdesc->buffer_length[k] = cpu_to_le16(buffrag->length);
2026 switch (k) {
2027 case 0:
2028 hwdesc->addr_buffer1 = cpu_to_le64(buffrag->dma);
2029 break;
2030 case 1:
2031 hwdesc->addr_buffer2 = cpu_to_le64(buffrag->dma);
2032 break;
2033 case 2:
2034 hwdesc->addr_buffer3 = cpu_to_le64(buffrag->dma);
2035 break;
2036 case 3:
2037 hwdesc->addr_buffer4 = cpu_to_le64(buffrag->dma);
2038 break;
2039 }
2040 }
2041
2042 tx_ring->producer = get_next_index(producer, num_txd);
2043
2044 qlcnic_tso_check(netdev, tx_ring, first_desc, skb);
2045
2046 qlcnic_update_cmd_producer(adapter, tx_ring);
2047
2048 adapter->stats.txbytes += skb->len;
2049 adapter->stats.xmitcalled++;
2050
2051 return NETDEV_TX_OK;
2052
2053drop_packet:
2054 adapter->stats.txdropped++;
2055 dev_kfree_skb_any(skb);
2056 return NETDEV_TX_OK;
2057}
2058
2059static int qlcnic_check_temp(struct qlcnic_adapter *adapter)
2060{
2061 struct net_device *netdev = adapter->netdev;
2062 u32 temp, temp_state, temp_val;
2063 int rv = 0;
2064
2065 temp = QLCRD32(adapter, CRB_TEMP_STATE);
2066
2067 temp_state = qlcnic_get_temp_state(temp);
2068 temp_val = qlcnic_get_temp_val(temp);
2069
2070 if (temp_state == QLCNIC_TEMP_PANIC) {
2071 dev_err(&netdev->dev,
2072 "Device temperature %d degrees C exceeds"
2073 " maximum allowed. Hardware has been shut down.\n",
2074 temp_val);
2075 rv = 1;
2076 } else if (temp_state == QLCNIC_TEMP_WARN) {
2077 if (adapter->temp == QLCNIC_TEMP_NORMAL) {
2078 dev_err(&netdev->dev,
2079 "Device temperature %d degrees C "
2080 "exceeds operating range."
2081 " Immediate action needed.\n",
2082 temp_val);
2083 }
2084 } else {
2085 if (adapter->temp == QLCNIC_TEMP_WARN) {
2086 dev_info(&netdev->dev,
2087 "Device temperature is now %d degrees C"
2088 " in normal range.\n", temp_val);
2089 }
2090 }
2091 adapter->temp = temp_state;
2092 return rv;
2093}
2094
2095void qlcnic_advert_link_change(struct qlcnic_adapter *adapter, int linkup)
2096{
2097 struct net_device *netdev = adapter->netdev;
2098
2099 if (adapter->ahw.linkup && !linkup) {
69324275 2100 netdev_info(netdev, "NIC Link is down\n");
af19b491
AKS
2101 adapter->ahw.linkup = 0;
2102 if (netif_running(netdev)) {
2103 netif_carrier_off(netdev);
2104 netif_stop_queue(netdev);
2105 }
2106 } else if (!adapter->ahw.linkup && linkup) {
69324275 2107 netdev_info(netdev, "NIC Link is up\n");
af19b491
AKS
2108 adapter->ahw.linkup = 1;
2109 if (netif_running(netdev)) {
2110 netif_carrier_on(netdev);
2111 netif_wake_queue(netdev);
2112 }
2113 }
2114}
2115
2116static void qlcnic_tx_timeout(struct net_device *netdev)
2117{
2118 struct qlcnic_adapter *adapter = netdev_priv(netdev);
2119
2120 if (test_bit(__QLCNIC_RESETTING, &adapter->state))
2121 return;
2122
2123 dev_err(&netdev->dev, "transmit timeout, resetting.\n");
af19b491
AKS
2124
2125 if (++adapter->tx_timeo_cnt >= QLCNIC_MAX_TX_TIMEOUTS)
68bf1c68
AKS
2126 adapter->need_fw_reset = 1;
2127 else
2128 adapter->reset_context = 1;
af19b491
AKS
2129}
2130
2131static struct net_device_stats *qlcnic_get_stats(struct net_device *netdev)
2132{
2133 struct qlcnic_adapter *adapter = netdev_priv(netdev);
2134 struct net_device_stats *stats = &netdev->stats;
2135
af19b491
AKS
2136 stats->rx_packets = adapter->stats.rx_pkts + adapter->stats.lro_pkts;
2137 stats->tx_packets = adapter->stats.xmitfinished;
7e382594 2138 stats->rx_bytes = adapter->stats.rxbytes + adapter->stats.lrobytes;
af19b491
AKS
2139 stats->tx_bytes = adapter->stats.txbytes;
2140 stats->rx_dropped = adapter->stats.rxdropped;
2141 stats->tx_dropped = adapter->stats.txdropped;
2142
2143 return stats;
2144}
2145
7eb9855d 2146static irqreturn_t qlcnic_clear_legacy_intr(struct qlcnic_adapter *adapter)
af19b491 2147{
af19b491
AKS
2148 u32 status;
2149
2150 status = readl(adapter->isr_int_vec);
2151
2152 if (!(status & adapter->int_vec_bit))
2153 return IRQ_NONE;
2154
2155 /* check interrupt state machine, to be sure */
2156 status = readl(adapter->crb_int_state_reg);
2157 if (!ISR_LEGACY_INT_TRIGGERED(status))
2158 return IRQ_NONE;
2159
2160 writel(0xffffffff, adapter->tgt_status_reg);
2161 /* read twice to ensure write is flushed */
2162 readl(adapter->isr_int_vec);
2163 readl(adapter->isr_int_vec);
2164
7eb9855d
AKS
2165 return IRQ_HANDLED;
2166}
2167
2168static irqreturn_t qlcnic_tmp_intr(int irq, void *data)
2169{
2170 struct qlcnic_host_sds_ring *sds_ring = data;
2171 struct qlcnic_adapter *adapter = sds_ring->adapter;
2172
2173 if (adapter->flags & QLCNIC_MSIX_ENABLED)
2174 goto done;
2175 else if (adapter->flags & QLCNIC_MSI_ENABLED) {
2176 writel(0xffffffff, adapter->tgt_status_reg);
2177 goto done;
2178 }
2179
2180 if (qlcnic_clear_legacy_intr(adapter) == IRQ_NONE)
2181 return IRQ_NONE;
2182
2183done:
2184 adapter->diag_cnt++;
2185 qlcnic_enable_int(sds_ring);
2186 return IRQ_HANDLED;
2187}
2188
2189static irqreturn_t qlcnic_intr(int irq, void *data)
2190{
2191 struct qlcnic_host_sds_ring *sds_ring = data;
2192 struct qlcnic_adapter *adapter = sds_ring->adapter;
2193
2194 if (qlcnic_clear_legacy_intr(adapter) == IRQ_NONE)
2195 return IRQ_NONE;
2196
af19b491
AKS
2197 napi_schedule(&sds_ring->napi);
2198
2199 return IRQ_HANDLED;
2200}
2201
2202static irqreturn_t qlcnic_msi_intr(int irq, void *data)
2203{
2204 struct qlcnic_host_sds_ring *sds_ring = data;
2205 struct qlcnic_adapter *adapter = sds_ring->adapter;
2206
2207 /* clear interrupt */
2208 writel(0xffffffff, adapter->tgt_status_reg);
2209
2210 napi_schedule(&sds_ring->napi);
2211 return IRQ_HANDLED;
2212}
2213
2214static irqreturn_t qlcnic_msix_intr(int irq, void *data)
2215{
2216 struct qlcnic_host_sds_ring *sds_ring = data;
2217
2218 napi_schedule(&sds_ring->napi);
2219 return IRQ_HANDLED;
2220}
2221
2222static int qlcnic_process_cmd_ring(struct qlcnic_adapter *adapter)
2223{
2224 u32 sw_consumer, hw_consumer;
2225 int count = 0, i;
2226 struct qlcnic_cmd_buffer *buffer;
2227 struct pci_dev *pdev = adapter->pdev;
2228 struct net_device *netdev = adapter->netdev;
2229 struct qlcnic_skb_frag *frag;
2230 int done;
2231 struct qlcnic_host_tx_ring *tx_ring = adapter->tx_ring;
2232
2233 if (!spin_trylock(&adapter->tx_clean_lock))
2234 return 1;
2235
2236 sw_consumer = tx_ring->sw_consumer;
2237 hw_consumer = le32_to_cpu(*(tx_ring->hw_consumer));
2238
2239 while (sw_consumer != hw_consumer) {
2240 buffer = &tx_ring->cmd_buf_arr[sw_consumer];
2241 if (buffer->skb) {
2242 frag = &buffer->frag_array[0];
2243 pci_unmap_single(pdev, frag->dma, frag->length,
2244 PCI_DMA_TODEVICE);
2245 frag->dma = 0ULL;
2246 for (i = 1; i < buffer->frag_count; i++) {
2247 frag++;
2248 pci_unmap_page(pdev, frag->dma, frag->length,
2249 PCI_DMA_TODEVICE);
2250 frag->dma = 0ULL;
2251 }
2252
2253 adapter->stats.xmitfinished++;
2254 dev_kfree_skb_any(buffer->skb);
2255 buffer->skb = NULL;
2256 }
2257
2258 sw_consumer = get_next_index(sw_consumer, tx_ring->num_desc);
2259 if (++count >= MAX_STATUS_HANDLE)
2260 break;
2261 }
2262
2263 if (count && netif_running(netdev)) {
2264 tx_ring->sw_consumer = sw_consumer;
2265
2266 smp_mb();
2267
2268 if (netif_queue_stopped(netdev) && netif_carrier_ok(netdev)) {
af19b491
AKS
2269 if (qlcnic_tx_avail(tx_ring) > TX_STOP_THRESH) {
2270 netif_wake_queue(netdev);
8bfe8b91 2271 adapter->stats.xmit_on++;
af19b491 2272 }
af19b491 2273 }
ef71ff83 2274 adapter->tx_timeo_cnt = 0;
af19b491
AKS
2275 }
2276 /*
2277 * If everything is freed up to consumer then check if the ring is full
2278 * If the ring is full then check if more needs to be freed and
2279 * schedule the call back again.
2280 *
2281 * This happens when there are 2 CPUs. One could be freeing and the
2282 * other filling it. If the ring is full when we get out of here and
2283 * the card has already interrupted the host then the host can miss the
2284 * interrupt.
2285 *
2286 * There is still a possible race condition and the host could miss an
2287 * interrupt. The card has to take care of this.
2288 */
2289 hw_consumer = le32_to_cpu(*(tx_ring->hw_consumer));
2290 done = (sw_consumer == hw_consumer);
2291 spin_unlock(&adapter->tx_clean_lock);
2292
2293 return done;
2294}
2295
2296static int qlcnic_poll(struct napi_struct *napi, int budget)
2297{
2298 struct qlcnic_host_sds_ring *sds_ring =
2299 container_of(napi, struct qlcnic_host_sds_ring, napi);
2300
2301 struct qlcnic_adapter *adapter = sds_ring->adapter;
2302
2303 int tx_complete;
2304 int work_done;
2305
2306 tx_complete = qlcnic_process_cmd_ring(adapter);
2307
2308 work_done = qlcnic_process_rcv_ring(sds_ring, budget);
2309
2310 if ((work_done < budget) && tx_complete) {
2311 napi_complete(&sds_ring->napi);
2312 if (test_bit(__QLCNIC_DEV_UP, &adapter->state))
2313 qlcnic_enable_int(sds_ring);
2314 }
2315
2316 return work_done;
2317}
2318
8f891387 2319static int qlcnic_rx_poll(struct napi_struct *napi, int budget)
2320{
2321 struct qlcnic_host_sds_ring *sds_ring =
2322 container_of(napi, struct qlcnic_host_sds_ring, napi);
2323
2324 struct qlcnic_adapter *adapter = sds_ring->adapter;
2325 int work_done;
2326
2327 work_done = qlcnic_process_rcv_ring(sds_ring, budget);
2328
2329 if (work_done < budget) {
2330 napi_complete(&sds_ring->napi);
2331 if (test_bit(__QLCNIC_DEV_UP, &adapter->state))
2332 qlcnic_enable_int(sds_ring);
2333 }
2334
2335 return work_done;
2336}
2337
af19b491
AKS
2338#ifdef CONFIG_NET_POLL_CONTROLLER
2339static void qlcnic_poll_controller(struct net_device *netdev)
2340{
2341 struct qlcnic_adapter *adapter = netdev_priv(netdev);
2342 disable_irq(adapter->irq);
2343 qlcnic_intr(adapter->irq, adapter);
2344 enable_irq(adapter->irq);
2345}
2346#endif
2347
6df900e9
SC
2348static void
2349qlcnic_idc_debug_info(struct qlcnic_adapter *adapter, u8 encoding)
2350{
2351 u32 val;
2352
2353 val = adapter->portnum & 0xf;
2354 val |= encoding << 7;
2355 val |= (jiffies - adapter->dev_rst_time) << 8;
2356
2357 QLCWR32(adapter, QLCNIC_CRB_DRV_SCRATCH, val);
2358 adapter->dev_rst_time = jiffies;
2359}
2360
ade91f8e
AKS
2361static int
2362qlcnic_set_drv_state(struct qlcnic_adapter *adapter, u8 state)
af19b491
AKS
2363{
2364 u32 val;
2365
2366 WARN_ON(state != QLCNIC_DEV_NEED_RESET &&
2367 state != QLCNIC_DEV_NEED_QUISCENT);
2368
2369 if (qlcnic_api_lock(adapter))
ade91f8e 2370 return -EIO;
af19b491
AKS
2371
2372 val = QLCRD32(adapter, QLCNIC_CRB_DRV_STATE);
2373
2374 if (state == QLCNIC_DEV_NEED_RESET)
6d2a4724 2375 QLC_DEV_SET_RST_RDY(val, adapter->portnum);
af19b491 2376 else if (state == QLCNIC_DEV_NEED_QUISCENT)
6d2a4724 2377 QLC_DEV_SET_QSCNT_RDY(val, adapter->portnum);
af19b491
AKS
2378
2379 QLCWR32(adapter, QLCNIC_CRB_DRV_STATE, val);
2380
2381 qlcnic_api_unlock(adapter);
ade91f8e
AKS
2382
2383 return 0;
af19b491
AKS
2384}
2385
1b95a839
AKS
2386static int
2387qlcnic_clr_drv_state(struct qlcnic_adapter *adapter)
2388{
2389 u32 val;
2390
2391 if (qlcnic_api_lock(adapter))
2392 return -EBUSY;
2393
2394 val = QLCRD32(adapter, QLCNIC_CRB_DRV_STATE);
6d2a4724 2395 QLC_DEV_CLR_RST_QSCNT(val, adapter->portnum);
1b95a839
AKS
2396 QLCWR32(adapter, QLCNIC_CRB_DRV_STATE, val);
2397
2398 qlcnic_api_unlock(adapter);
2399
2400 return 0;
2401}
2402
af19b491 2403static void
21854f02 2404qlcnic_clr_all_drv_state(struct qlcnic_adapter *adapter, u8 failed)
af19b491
AKS
2405{
2406 u32 val;
2407
2408 if (qlcnic_api_lock(adapter))
2409 goto err;
2410
31018e06 2411 val = QLCRD32(adapter, QLCNIC_CRB_DRV_ACTIVE);
6d2a4724 2412 QLC_DEV_CLR_REF_CNT(val, adapter->portnum);
31018e06 2413 QLCWR32(adapter, QLCNIC_CRB_DRV_ACTIVE, val);
af19b491 2414
21854f02
AKS
2415 if (failed) {
2416 QLCWR32(adapter, QLCNIC_CRB_DEV_STATE, QLCNIC_DEV_FAILED);
2417 dev_info(&adapter->pdev->dev,
2418 "Device state set to Failed. Please Reboot\n");
2419 } else if (!(val & 0x11111111))
af19b491
AKS
2420 QLCWR32(adapter, QLCNIC_CRB_DEV_STATE, QLCNIC_DEV_COLD);
2421
2422 val = QLCRD32(adapter, QLCNIC_CRB_DRV_STATE);
6d2a4724 2423 QLC_DEV_CLR_RST_QSCNT(val, adapter->portnum);
af19b491
AKS
2424 QLCWR32(adapter, QLCNIC_CRB_DRV_STATE, val);
2425
2426 qlcnic_api_unlock(adapter);
2427err:
2428 adapter->fw_fail_cnt = 0;
2429 clear_bit(__QLCNIC_START_FW, &adapter->state);
2430 clear_bit(__QLCNIC_RESETTING, &adapter->state);
2431}
2432
f73dfc50 2433/* Grab api lock, before checking state */
af19b491
AKS
2434static int
2435qlcnic_check_drv_state(struct qlcnic_adapter *adapter)
2436{
2437 int act, state;
2438
2439 state = QLCRD32(adapter, QLCNIC_CRB_DRV_STATE);
31018e06 2440 act = QLCRD32(adapter, QLCNIC_CRB_DRV_ACTIVE);
af19b491
AKS
2441
2442 if (((state & 0x11111111) == (act & 0x11111111)) ||
2443 ((act & 0x11111111) == ((state >> 1) & 0x11111111)))
2444 return 0;
2445 else
2446 return 1;
2447}
2448
96f8118c
SC
2449static int qlcnic_check_idc_ver(struct qlcnic_adapter *adapter)
2450{
2451 u32 val = QLCRD32(adapter, QLCNIC_CRB_DRV_IDC_VER);
2452
2453 if (val != QLCNIC_DRV_IDC_VER) {
2454 dev_warn(&adapter->pdev->dev, "IDC Version mismatch, driver's"
2455 " idc ver = %x; reqd = %x\n", QLCNIC_DRV_IDC_VER, val);
2456 }
2457
2458 return 0;
2459}
2460
af19b491
AKS
2461static int
2462qlcnic_can_start_firmware(struct qlcnic_adapter *adapter)
2463{
2464 u32 val, prev_state;
aa5e18c0 2465 u8 dev_init_timeo = adapter->dev_init_timeo;
6d2a4724 2466 u8 portnum = adapter->portnum;
96f8118c 2467 u8 ret;
af19b491 2468
f73dfc50
AKS
2469 if (test_and_clear_bit(__QLCNIC_START_FW, &adapter->state))
2470 return 1;
2471
af19b491
AKS
2472 if (qlcnic_api_lock(adapter))
2473 return -1;
2474
31018e06 2475 val = QLCRD32(adapter, QLCNIC_CRB_DRV_ACTIVE);
6d2a4724
AKS
2476 if (!(val & (1 << (portnum * 4)))) {
2477 QLC_DEV_SET_REF_CNT(val, portnum);
31018e06 2478 QLCWR32(adapter, QLCNIC_CRB_DRV_ACTIVE, val);
af19b491
AKS
2479 }
2480
2481 prev_state = QLCRD32(adapter, QLCNIC_CRB_DEV_STATE);
65b5b420 2482 QLCDB(adapter, HW, "Device state = %u\n", prev_state);
af19b491
AKS
2483
2484 switch (prev_state) {
2485 case QLCNIC_DEV_COLD:
bbd8c6a4 2486 QLCWR32(adapter, QLCNIC_CRB_DEV_STATE, QLCNIC_DEV_INITIALIZING);
96f8118c 2487 QLCWR32(adapter, QLCNIC_CRB_DRV_IDC_VER, QLCNIC_DRV_IDC_VER);
6df900e9 2488 qlcnic_idc_debug_info(adapter, 0);
af19b491
AKS
2489 qlcnic_api_unlock(adapter);
2490 return 1;
2491
2492 case QLCNIC_DEV_READY:
96f8118c 2493 ret = qlcnic_check_idc_ver(adapter);
af19b491 2494 qlcnic_api_unlock(adapter);
96f8118c 2495 return ret;
af19b491
AKS
2496
2497 case QLCNIC_DEV_NEED_RESET:
2498 val = QLCRD32(adapter, QLCNIC_CRB_DRV_STATE);
6d2a4724 2499 QLC_DEV_SET_RST_RDY(val, portnum);
af19b491
AKS
2500 QLCWR32(adapter, QLCNIC_CRB_DRV_STATE, val);
2501 break;
2502
2503 case QLCNIC_DEV_NEED_QUISCENT:
2504 val = QLCRD32(adapter, QLCNIC_CRB_DRV_STATE);
6d2a4724 2505 QLC_DEV_SET_QSCNT_RDY(val, portnum);
af19b491
AKS
2506 QLCWR32(adapter, QLCNIC_CRB_DRV_STATE, val);
2507 break;
2508
2509 case QLCNIC_DEV_FAILED:
a7fc948f 2510 dev_err(&adapter->pdev->dev, "Device in failed state.\n");
af19b491
AKS
2511 qlcnic_api_unlock(adapter);
2512 return -1;
bbd8c6a4
AKS
2513
2514 case QLCNIC_DEV_INITIALIZING:
2515 case QLCNIC_DEV_QUISCENT:
2516 break;
af19b491
AKS
2517 }
2518
2519 qlcnic_api_unlock(adapter);
aa5e18c0
SC
2520
2521 do {
af19b491 2522 msleep(1000);
a5e463d0
SC
2523 prev_state = QLCRD32(adapter, QLCNIC_CRB_DEV_STATE);
2524
2525 if (prev_state == QLCNIC_DEV_QUISCENT)
2526 continue;
2527 } while ((prev_state != QLCNIC_DEV_READY) && --dev_init_timeo);
af19b491 2528
65b5b420
AKS
2529 if (!dev_init_timeo) {
2530 dev_err(&adapter->pdev->dev,
2531 "Waiting for device to initialize timeout\n");
af19b491 2532 return -1;
65b5b420 2533 }
af19b491
AKS
2534
2535 if (qlcnic_api_lock(adapter))
2536 return -1;
2537
2538 val = QLCRD32(adapter, QLCNIC_CRB_DRV_STATE);
6d2a4724 2539 QLC_DEV_CLR_RST_QSCNT(val, portnum);
af19b491
AKS
2540 QLCWR32(adapter, QLCNIC_CRB_DRV_STATE, val);
2541
96f8118c 2542 ret = qlcnic_check_idc_ver(adapter);
af19b491
AKS
2543 qlcnic_api_unlock(adapter);
2544
96f8118c 2545 return ret;
af19b491
AKS
2546}
2547
2548static void
2549qlcnic_fwinit_work(struct work_struct *work)
2550{
2551 struct qlcnic_adapter *adapter = container_of(work,
2552 struct qlcnic_adapter, fw_work.work);
3c4b23b1 2553 u32 dev_state = 0xf;
af19b491 2554
f73dfc50
AKS
2555 if (qlcnic_api_lock(adapter))
2556 goto err_ret;
af19b491 2557
a5e463d0
SC
2558 dev_state = QLCRD32(adapter, QLCNIC_CRB_DEV_STATE);
2559 if (dev_state == QLCNIC_DEV_QUISCENT) {
2560 qlcnic_api_unlock(adapter);
2561 qlcnic_schedule_work(adapter, qlcnic_fwinit_work,
2562 FW_POLL_DELAY * 2);
2563 return;
2564 }
2565
9f26f547 2566 if (adapter->op_mode == QLCNIC_NON_PRIV_FUNC) {
3c4b23b1
AKS
2567 qlcnic_api_unlock(adapter);
2568 goto wait_npar;
9f26f547
AC
2569 }
2570
f73dfc50
AKS
2571 if (adapter->fw_wait_cnt++ > adapter->reset_ack_timeo) {
2572 dev_err(&adapter->pdev->dev, "Reset:Failed to get ack %d sec\n",
2573 adapter->reset_ack_timeo);
2574 goto skip_ack_check;
2575 }
2576
2577 if (!qlcnic_check_drv_state(adapter)) {
2578skip_ack_check:
2579 dev_state = QLCRD32(adapter, QLCNIC_CRB_DEV_STATE);
a5e463d0
SC
2580
2581 if (dev_state == QLCNIC_DEV_NEED_QUISCENT) {
2582 QLCWR32(adapter, QLCNIC_CRB_DEV_STATE,
2583 QLCNIC_DEV_QUISCENT);
2584 qlcnic_schedule_work(adapter, qlcnic_fwinit_work,
2585 FW_POLL_DELAY * 2);
2586 QLCDB(adapter, DRV, "Quiscing the driver\n");
6df900e9
SC
2587 qlcnic_idc_debug_info(adapter, 0);
2588
a5e463d0
SC
2589 qlcnic_api_unlock(adapter);
2590 return;
2591 }
2592
f73dfc50
AKS
2593 if (dev_state == QLCNIC_DEV_NEED_RESET) {
2594 QLCWR32(adapter, QLCNIC_CRB_DEV_STATE,
2595 QLCNIC_DEV_INITIALIZING);
2596 set_bit(__QLCNIC_START_FW, &adapter->state);
2597 QLCDB(adapter, DRV, "Restarting fw\n");
6df900e9 2598 qlcnic_idc_debug_info(adapter, 0);
af19b491
AKS
2599 }
2600
f73dfc50
AKS
2601 qlcnic_api_unlock(adapter);
2602
9f26f547 2603 if (!adapter->nic_ops->start_firmware(adapter)) {
af19b491 2604 qlcnic_schedule_work(adapter, qlcnic_attach_work, 0);
b18971d1 2605 adapter->fw_wait_cnt = 0;
af19b491
AKS
2606 return;
2607 }
af19b491
AKS
2608 goto err_ret;
2609 }
2610
f73dfc50 2611 qlcnic_api_unlock(adapter);
aa5e18c0 2612
9f26f547 2613wait_npar:
af19b491 2614 dev_state = QLCRD32(adapter, QLCNIC_CRB_DEV_STATE);
f73dfc50 2615 QLCDB(adapter, HW, "Func waiting: Device state=%u\n", dev_state);
65b5b420 2616
af19b491 2617 switch (dev_state) {
3c4b23b1 2618 case QLCNIC_DEV_READY:
9f26f547 2619 if (!adapter->nic_ops->start_firmware(adapter)) {
f73dfc50 2620 qlcnic_schedule_work(adapter, qlcnic_attach_work, 0);
b18971d1 2621 adapter->fw_wait_cnt = 0;
f73dfc50
AKS
2622 return;
2623 }
3c4b23b1
AKS
2624 case QLCNIC_DEV_FAILED:
2625 break;
2626 default:
2627 qlcnic_schedule_work(adapter,
2628 qlcnic_fwinit_work, FW_POLL_DELAY);
2629 return;
af19b491
AKS
2630 }
2631
2632err_ret:
f73dfc50
AKS
2633 dev_err(&adapter->pdev->dev, "Fwinit work failed state=%u "
2634 "fw_wait_cnt=%u\n", dev_state, adapter->fw_wait_cnt);
34ce3626 2635 netif_device_attach(adapter->netdev);
21854f02 2636 qlcnic_clr_all_drv_state(adapter, 0);
af19b491
AKS
2637}
2638
2639static void
2640qlcnic_detach_work(struct work_struct *work)
2641{
2642 struct qlcnic_adapter *adapter = container_of(work,
2643 struct qlcnic_adapter, fw_work.work);
2644 struct net_device *netdev = adapter->netdev;
2645 u32 status;
2646
2647 netif_device_detach(netdev);
2648
2649 qlcnic_down(adapter, netdev);
2650
af19b491
AKS
2651 status = QLCRD32(adapter, QLCNIC_PEG_HALT_STATUS1);
2652
2653 if (status & QLCNIC_RCODE_FATAL_ERROR)
2654 goto err_ret;
2655
2656 if (adapter->temp == QLCNIC_TEMP_PANIC)
2657 goto err_ret;
2658
ade91f8e
AKS
2659 if (qlcnic_set_drv_state(adapter, adapter->dev_state))
2660 goto err_ret;
af19b491
AKS
2661
2662 adapter->fw_wait_cnt = 0;
2663
2664 qlcnic_schedule_work(adapter, qlcnic_fwinit_work, FW_POLL_DELAY);
2665
2666 return;
2667
2668err_ret:
65b5b420
AKS
2669 dev_err(&adapter->pdev->dev, "detach failed; status=%d temp=%d\n",
2670 status, adapter->temp);
34ce3626 2671 netif_device_attach(netdev);
21854f02 2672 qlcnic_clr_all_drv_state(adapter, 1);
af19b491
AKS
2673}
2674
3c4b23b1
AKS
2675/*Transit NPAR state to NON Operational */
2676static void
2677qlcnic_set_npar_non_operational(struct qlcnic_adapter *adapter)
2678{
2679 u32 state;
2680
2681 state = QLCRD32(adapter, QLCNIC_CRB_DEV_NPAR_STATE);
2682 if (state == QLCNIC_DEV_NPAR_NON_OPER)
2683 return;
2684
2685 if (qlcnic_api_lock(adapter))
2686 return;
2687 QLCWR32(adapter, QLCNIC_CRB_DEV_NPAR_STATE, QLCNIC_DEV_NPAR_NON_OPER);
2688 qlcnic_api_unlock(adapter);
2689}
2690
f73dfc50 2691/*Transit to RESET state from READY state only */
af19b491
AKS
2692static void
2693qlcnic_dev_request_reset(struct qlcnic_adapter *adapter)
2694{
2695 u32 state;
2696
cea8975e 2697 adapter->need_fw_reset = 1;
af19b491
AKS
2698 if (qlcnic_api_lock(adapter))
2699 return;
2700
2701 state = QLCRD32(adapter, QLCNIC_CRB_DEV_STATE);
2702
f73dfc50 2703 if (state == QLCNIC_DEV_READY) {
af19b491 2704 QLCWR32(adapter, QLCNIC_CRB_DEV_STATE, QLCNIC_DEV_NEED_RESET);
65b5b420 2705 QLCDB(adapter, DRV, "NEED_RESET state set\n");
6df900e9 2706 qlcnic_idc_debug_info(adapter, 0);
af19b491
AKS
2707 }
2708
3c4b23b1 2709 QLCWR32(adapter, QLCNIC_CRB_DEV_NPAR_STATE, QLCNIC_DEV_NPAR_NON_OPER);
af19b491
AKS
2710 qlcnic_api_unlock(adapter);
2711}
2712
9f26f547
AC
2713/* Transit to NPAR READY state from NPAR NOT READY state */
2714static void
2715qlcnic_dev_set_npar_ready(struct qlcnic_adapter *adapter)
2716{
cea8975e 2717 if (!(adapter->flags & QLCNIC_ESWITCH_ENABLED) ||
3c4b23b1 2718 adapter->op_mode != QLCNIC_MGMT_FUNC)
cea8975e 2719 return;
9f26f547
AC
2720 if (qlcnic_api_lock(adapter))
2721 return;
2722
3c4b23b1
AKS
2723 QLCWR32(adapter, QLCNIC_CRB_DEV_NPAR_STATE, QLCNIC_DEV_NPAR_OPER);
2724 QLCDB(adapter, DRV, "NPAR operational state set\n");
9f26f547
AC
2725
2726 qlcnic_api_unlock(adapter);
2727}
2728
af19b491
AKS
2729static void
2730qlcnic_schedule_work(struct qlcnic_adapter *adapter,
2731 work_func_t func, int delay)
2732{
451724c8
SC
2733 if (test_bit(__QLCNIC_AER, &adapter->state))
2734 return;
2735
af19b491
AKS
2736 INIT_DELAYED_WORK(&adapter->fw_work, func);
2737 schedule_delayed_work(&adapter->fw_work, round_jiffies_relative(delay));
2738}
2739
2740static void
2741qlcnic_cancel_fw_work(struct qlcnic_adapter *adapter)
2742{
2743 while (test_and_set_bit(__QLCNIC_RESETTING, &adapter->state))
2744 msleep(10);
2745
2746 cancel_delayed_work_sync(&adapter->fw_work);
2747}
2748
2749static void
2750qlcnic_attach_work(struct work_struct *work)
2751{
2752 struct qlcnic_adapter *adapter = container_of(work,
2753 struct qlcnic_adapter, fw_work.work);
2754 struct net_device *netdev = adapter->netdev;
b18971d1 2755 u32 npar_state;
af19b491 2756
b18971d1
AKS
2757 if (adapter->op_mode != QLCNIC_MGMT_FUNC) {
2758 npar_state = QLCRD32(adapter, QLCNIC_CRB_DEV_NPAR_STATE);
2759 if (adapter->fw_wait_cnt++ > QLCNIC_DEV_NPAR_OPER_TIMEO)
2760 qlcnic_clr_all_drv_state(adapter, 0);
2761 else if (npar_state != QLCNIC_DEV_NPAR_OPER)
2762 qlcnic_schedule_work(adapter, qlcnic_attach_work,
2763 FW_POLL_DELAY);
2764 else
2765 goto attach;
2766 QLCDB(adapter, DRV, "Waiting for NPAR state to operational\n");
2767 return;
2768 }
2769attach:
af19b491 2770 if (netif_running(netdev)) {
52486a3a 2771 if (qlcnic_up(adapter, netdev))
af19b491 2772 goto done;
af19b491
AKS
2773
2774 qlcnic_config_indev_addr(netdev, NETDEV_UP);
2775 }
2776
af19b491 2777done:
34ce3626 2778 netif_device_attach(netdev);
af19b491
AKS
2779 adapter->fw_fail_cnt = 0;
2780 clear_bit(__QLCNIC_RESETTING, &adapter->state);
1b95a839
AKS
2781
2782 if (!qlcnic_clr_drv_state(adapter))
2783 qlcnic_schedule_work(adapter, qlcnic_fw_poll_work,
2784 FW_POLL_DELAY);
af19b491
AKS
2785}
2786
2787static int
2788qlcnic_check_health(struct qlcnic_adapter *adapter)
2789{
2790 u32 state = 0, heartbit;
2791 struct net_device *netdev = adapter->netdev;
2792
2793 if (qlcnic_check_temp(adapter))
2794 goto detach;
2795
2372a5f1 2796 if (adapter->need_fw_reset)
af19b491 2797 qlcnic_dev_request_reset(adapter);
af19b491
AKS
2798
2799 state = QLCRD32(adapter, QLCNIC_CRB_DEV_STATE);
3c4b23b1
AKS
2800 if (state == QLCNIC_DEV_NEED_RESET ||
2801 state == QLCNIC_DEV_NEED_QUISCENT) {
2802 qlcnic_set_npar_non_operational(adapter);
af19b491 2803 adapter->need_fw_reset = 1;
3c4b23b1 2804 }
af19b491
AKS
2805
2806 heartbit = QLCRD32(adapter, QLCNIC_PEG_ALIVE_COUNTER);
2807 if (heartbit != adapter->heartbit) {
2808 adapter->heartbit = heartbit;
2809 adapter->fw_fail_cnt = 0;
2810 if (adapter->need_fw_reset)
2811 goto detach;
68bf1c68 2812
0df170b6
AKS
2813 if (adapter->reset_context &&
2814 auto_fw_reset == AUTO_FW_RESET_ENABLED) {
68bf1c68
AKS
2815 qlcnic_reset_hw_context(adapter);
2816 adapter->netdev->trans_start = jiffies;
2817 }
2818
af19b491
AKS
2819 return 0;
2820 }
2821
2822 if (++adapter->fw_fail_cnt < FW_FAIL_THRESH)
2823 return 0;
2824
2825 qlcnic_dev_request_reset(adapter);
2826
0df170b6
AKS
2827 if ((auto_fw_reset == AUTO_FW_RESET_ENABLED))
2828 clear_bit(__QLCNIC_FW_ATTACHED, &adapter->state);
af19b491
AKS
2829
2830 dev_info(&netdev->dev, "firmware hang detected\n");
2831
2832detach:
2833 adapter->dev_state = (state == QLCNIC_DEV_NEED_QUISCENT) ? state :
2834 QLCNIC_DEV_NEED_RESET;
2835
2836 if ((auto_fw_reset == AUTO_FW_RESET_ENABLED) &&
65b5b420
AKS
2837 !test_and_set_bit(__QLCNIC_RESETTING, &adapter->state)) {
2838
af19b491 2839 qlcnic_schedule_work(adapter, qlcnic_detach_work, 0);
65b5b420
AKS
2840 QLCDB(adapter, DRV, "fw recovery scheduled.\n");
2841 }
af19b491
AKS
2842
2843 return 1;
2844}
2845
2846static void
2847qlcnic_fw_poll_work(struct work_struct *work)
2848{
2849 struct qlcnic_adapter *adapter = container_of(work,
2850 struct qlcnic_adapter, fw_work.work);
2851
2852 if (test_bit(__QLCNIC_RESETTING, &adapter->state))
2853 goto reschedule;
2854
2855
2856 if (qlcnic_check_health(adapter))
2857 return;
2858
2859reschedule:
2860 qlcnic_schedule_work(adapter, qlcnic_fw_poll_work, FW_POLL_DELAY);
2861}
2862
451724c8
SC
2863static int qlcnic_is_first_func(struct pci_dev *pdev)
2864{
2865 struct pci_dev *oth_pdev;
2866 int val = pdev->devfn;
2867
2868 while (val-- > 0) {
2869 oth_pdev = pci_get_domain_bus_and_slot(pci_domain_nr
2870 (pdev->bus), pdev->bus->number,
2871 PCI_DEVFN(PCI_SLOT(pdev->devfn), val));
bfc978fa
AKS
2872 if (!oth_pdev)
2873 continue;
451724c8 2874
bfc978fa
AKS
2875 if (oth_pdev->current_state != PCI_D3cold) {
2876 pci_dev_put(oth_pdev);
451724c8 2877 return 0;
bfc978fa
AKS
2878 }
2879 pci_dev_put(oth_pdev);
451724c8
SC
2880 }
2881 return 1;
2882}
2883
2884static int qlcnic_attach_func(struct pci_dev *pdev)
2885{
2886 int err, first_func;
2887 struct qlcnic_adapter *adapter = pci_get_drvdata(pdev);
2888 struct net_device *netdev = adapter->netdev;
2889
2890 pdev->error_state = pci_channel_io_normal;
2891
2892 err = pci_enable_device(pdev);
2893 if (err)
2894 return err;
2895
2896 pci_set_power_state(pdev, PCI_D0);
2897 pci_set_master(pdev);
2898 pci_restore_state(pdev);
2899
2900 first_func = qlcnic_is_first_func(pdev);
2901
2902 if (qlcnic_api_lock(adapter))
2903 return -EINVAL;
2904
933fce12 2905 if (adapter->op_mode != QLCNIC_NON_PRIV_FUNC && first_func) {
451724c8
SC
2906 adapter->need_fw_reset = 1;
2907 set_bit(__QLCNIC_START_FW, &adapter->state);
2908 QLCWR32(adapter, QLCNIC_CRB_DEV_STATE, QLCNIC_DEV_INITIALIZING);
2909 QLCDB(adapter, DRV, "Restarting fw\n");
2910 }
2911 qlcnic_api_unlock(adapter);
2912
2913 err = adapter->nic_ops->start_firmware(adapter);
2914 if (err)
2915 return err;
2916
2917 qlcnic_clr_drv_state(adapter);
2918 qlcnic_setup_intr(adapter);
2919
2920 if (netif_running(netdev)) {
2921 err = qlcnic_attach(adapter);
2922 if (err) {
21854f02 2923 qlcnic_clr_all_drv_state(adapter, 1);
451724c8
SC
2924 clear_bit(__QLCNIC_AER, &adapter->state);
2925 netif_device_attach(netdev);
2926 return err;
2927 }
2928
2929 err = qlcnic_up(adapter, netdev);
2930 if (err)
2931 goto done;
2932
2933 qlcnic_config_indev_addr(netdev, NETDEV_UP);
2934 }
2935 done:
2936 netif_device_attach(netdev);
2937 return err;
2938}
2939
2940static pci_ers_result_t qlcnic_io_error_detected(struct pci_dev *pdev,
2941 pci_channel_state_t state)
2942{
2943 struct qlcnic_adapter *adapter = pci_get_drvdata(pdev);
2944 struct net_device *netdev = adapter->netdev;
2945
2946 if (state == pci_channel_io_perm_failure)
2947 return PCI_ERS_RESULT_DISCONNECT;
2948
2949 if (state == pci_channel_io_normal)
2950 return PCI_ERS_RESULT_RECOVERED;
2951
2952 set_bit(__QLCNIC_AER, &adapter->state);
2953 netif_device_detach(netdev);
2954
2955 cancel_delayed_work_sync(&adapter->fw_work);
2956
2957 if (netif_running(netdev))
2958 qlcnic_down(adapter, netdev);
2959
2960 qlcnic_detach(adapter);
2961 qlcnic_teardown_intr(adapter);
2962
2963 clear_bit(__QLCNIC_RESETTING, &adapter->state);
2964
2965 pci_save_state(pdev);
2966 pci_disable_device(pdev);
2967
2968 return PCI_ERS_RESULT_NEED_RESET;
2969}
2970
2971static pci_ers_result_t qlcnic_io_slot_reset(struct pci_dev *pdev)
2972{
2973 return qlcnic_attach_func(pdev) ? PCI_ERS_RESULT_DISCONNECT :
2974 PCI_ERS_RESULT_RECOVERED;
2975}
2976
2977static void qlcnic_io_resume(struct pci_dev *pdev)
2978{
2979 struct qlcnic_adapter *adapter = pci_get_drvdata(pdev);
2980
2981 pci_cleanup_aer_uncorrect_error_status(pdev);
2982
2983 if (QLCRD32(adapter, QLCNIC_CRB_DEV_STATE) == QLCNIC_DEV_READY &&
2984 test_and_clear_bit(__QLCNIC_AER, &adapter->state))
2985 qlcnic_schedule_work(adapter, qlcnic_fw_poll_work,
2986 FW_POLL_DELAY);
2987}
2988
87eb743b
AC
2989static int
2990qlcnicvf_start_firmware(struct qlcnic_adapter *adapter)
2991{
2992 int err;
2993
2994 err = qlcnic_can_start_firmware(adapter);
2995 if (err)
2996 return err;
2997
78f84e1a
AKS
2998 err = qlcnic_check_npar_opertional(adapter);
2999 if (err)
3000 return err;
3c4b23b1 3001
87eb743b
AC
3002 qlcnic_check_options(adapter);
3003
3004 adapter->need_fw_reset = 0;
3005
3006 return err;
3007}
3008
3009static int
3010qlcnicvf_config_bridged_mode(struct qlcnic_adapter *adapter, u32 enable)
3011{
3012 return -EOPNOTSUPP;
3013}
3014
3015static int
3016qlcnicvf_config_led(struct qlcnic_adapter *adapter, u32 state, u32 rate)
3017{
3018 return -EOPNOTSUPP;
3019}
3020
af19b491
AKS
3021static ssize_t
3022qlcnic_store_bridged_mode(struct device *dev,
3023 struct device_attribute *attr, const char *buf, size_t len)
3024{
3025 struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
3026 unsigned long new;
3027 int ret = -EINVAL;
3028
3029 if (!(adapter->capabilities & QLCNIC_FW_CAPABILITY_BDG))
3030 goto err_out;
3031
8a15ad1f 3032 if (!test_bit(__QLCNIC_DEV_UP, &adapter->state))
af19b491
AKS
3033 goto err_out;
3034
3035 if (strict_strtoul(buf, 2, &new))
3036 goto err_out;
3037
2e9d722d 3038 if (!adapter->nic_ops->config_bridged_mode(adapter, !!new))
af19b491
AKS
3039 ret = len;
3040
3041err_out:
3042 return ret;
3043}
3044
3045static ssize_t
3046qlcnic_show_bridged_mode(struct device *dev,
3047 struct device_attribute *attr, char *buf)
3048{
3049 struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
3050 int bridged_mode = 0;
3051
3052 if (adapter->capabilities & QLCNIC_FW_CAPABILITY_BDG)
3053 bridged_mode = !!(adapter->flags & QLCNIC_BRIDGE_ENABLED);
3054
3055 return sprintf(buf, "%d\n", bridged_mode);
3056}
3057
3058static struct device_attribute dev_attr_bridged_mode = {
3059 .attr = {.name = "bridged_mode", .mode = (S_IRUGO | S_IWUSR)},
3060 .show = qlcnic_show_bridged_mode,
3061 .store = qlcnic_store_bridged_mode,
3062};
3063
3064static ssize_t
3065qlcnic_store_diag_mode(struct device *dev,
3066 struct device_attribute *attr, const char *buf, size_t len)
3067{
3068 struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
3069 unsigned long new;
3070
3071 if (strict_strtoul(buf, 2, &new))
3072 return -EINVAL;
3073
3074 if (!!new != !!(adapter->flags & QLCNIC_DIAG_ENABLED))
3075 adapter->flags ^= QLCNIC_DIAG_ENABLED;
3076
3077 return len;
3078}
3079
3080static ssize_t
3081qlcnic_show_diag_mode(struct device *dev,
3082 struct device_attribute *attr, char *buf)
3083{
3084 struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
3085
3086 return sprintf(buf, "%d\n",
3087 !!(adapter->flags & QLCNIC_DIAG_ENABLED));
3088}
3089
3090static struct device_attribute dev_attr_diag_mode = {
3091 .attr = {.name = "diag_mode", .mode = (S_IRUGO | S_IWUSR)},
3092 .show = qlcnic_show_diag_mode,
3093 .store = qlcnic_store_diag_mode,
3094};
3095
3096static int
3097qlcnic_sysfs_validate_crb(struct qlcnic_adapter *adapter,
3098 loff_t offset, size_t size)
3099{
897e8c7c
DP
3100 size_t crb_size = 4;
3101
af19b491
AKS
3102 if (!(adapter->flags & QLCNIC_DIAG_ENABLED))
3103 return -EIO;
3104
897e8c7c
DP
3105 if (offset < QLCNIC_PCI_CRBSPACE) {
3106 if (ADDR_IN_RANGE(offset, QLCNIC_PCI_CAMQM,
3107 QLCNIC_PCI_CAMQM_END))
3108 crb_size = 8;
3109 else
3110 return -EINVAL;
3111 }
af19b491 3112
897e8c7c
DP
3113 if ((size != crb_size) || (offset & (crb_size-1)))
3114 return -EINVAL;
af19b491
AKS
3115
3116 return 0;
3117}
3118
3119static ssize_t
2c3c8bea
CW
3120qlcnic_sysfs_read_crb(struct file *filp, struct kobject *kobj,
3121 struct bin_attribute *attr,
af19b491
AKS
3122 char *buf, loff_t offset, size_t size)
3123{
3124 struct device *dev = container_of(kobj, struct device, kobj);
3125 struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
3126 u32 data;
897e8c7c 3127 u64 qmdata;
af19b491
AKS
3128 int ret;
3129
3130 ret = qlcnic_sysfs_validate_crb(adapter, offset, size);
3131 if (ret != 0)
3132 return ret;
3133
897e8c7c
DP
3134 if (ADDR_IN_RANGE(offset, QLCNIC_PCI_CAMQM, QLCNIC_PCI_CAMQM_END)) {
3135 qlcnic_pci_camqm_read_2M(adapter, offset, &qmdata);
3136 memcpy(buf, &qmdata, size);
3137 } else {
3138 data = QLCRD32(adapter, offset);
3139 memcpy(buf, &data, size);
3140 }
af19b491
AKS
3141 return size;
3142}
3143
3144static ssize_t
2c3c8bea
CW
3145qlcnic_sysfs_write_crb(struct file *filp, struct kobject *kobj,
3146 struct bin_attribute *attr,
af19b491
AKS
3147 char *buf, loff_t offset, size_t size)
3148{
3149 struct device *dev = container_of(kobj, struct device, kobj);
3150 struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
3151 u32 data;
897e8c7c 3152 u64 qmdata;
af19b491
AKS
3153 int ret;
3154
3155 ret = qlcnic_sysfs_validate_crb(adapter, offset, size);
3156 if (ret != 0)
3157 return ret;
3158
897e8c7c
DP
3159 if (ADDR_IN_RANGE(offset, QLCNIC_PCI_CAMQM, QLCNIC_PCI_CAMQM_END)) {
3160 memcpy(&qmdata, buf, size);
3161 qlcnic_pci_camqm_write_2M(adapter, offset, qmdata);
3162 } else {
3163 memcpy(&data, buf, size);
3164 QLCWR32(adapter, offset, data);
3165 }
af19b491
AKS
3166 return size;
3167}
3168
3169static int
3170qlcnic_sysfs_validate_mem(struct qlcnic_adapter *adapter,
3171 loff_t offset, size_t size)
3172{
3173 if (!(adapter->flags & QLCNIC_DIAG_ENABLED))
3174 return -EIO;
3175
3176 if ((size != 8) || (offset & 0x7))
3177 return -EIO;
3178
3179 return 0;
3180}
3181
3182static ssize_t
2c3c8bea
CW
3183qlcnic_sysfs_read_mem(struct file *filp, struct kobject *kobj,
3184 struct bin_attribute *attr,
af19b491
AKS
3185 char *buf, loff_t offset, size_t size)
3186{
3187 struct device *dev = container_of(kobj, struct device, kobj);
3188 struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
3189 u64 data;
3190 int ret;
3191
3192 ret = qlcnic_sysfs_validate_mem(adapter, offset, size);
3193 if (ret != 0)
3194 return ret;
3195
3196 if (qlcnic_pci_mem_read_2M(adapter, offset, &data))
3197 return -EIO;
3198
3199 memcpy(buf, &data, size);
3200
3201 return size;
3202}
3203
3204static ssize_t
2c3c8bea
CW
3205qlcnic_sysfs_write_mem(struct file *filp, struct kobject *kobj,
3206 struct bin_attribute *attr,
af19b491
AKS
3207 char *buf, loff_t offset, size_t size)
3208{
3209 struct device *dev = container_of(kobj, struct device, kobj);
3210 struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
3211 u64 data;
3212 int ret;
3213
3214 ret = qlcnic_sysfs_validate_mem(adapter, offset, size);
3215 if (ret != 0)
3216 return ret;
3217
3218 memcpy(&data, buf, size);
3219
3220 if (qlcnic_pci_mem_write_2M(adapter, offset, data))
3221 return -EIO;
3222
3223 return size;
3224}
3225
3226
3227static struct bin_attribute bin_attr_crb = {
3228 .attr = {.name = "crb", .mode = (S_IRUGO | S_IWUSR)},
3229 .size = 0,
3230 .read = qlcnic_sysfs_read_crb,
3231 .write = qlcnic_sysfs_write_crb,
3232};
3233
3234static struct bin_attribute bin_attr_mem = {
3235 .attr = {.name = "mem", .mode = (S_IRUGO | S_IWUSR)},
3236 .size = 0,
3237 .read = qlcnic_sysfs_read_mem,
3238 .write = qlcnic_sysfs_write_mem,
3239};
3240
cea8975e 3241static int
346fe763
RB
3242validate_pm_config(struct qlcnic_adapter *adapter,
3243 struct qlcnic_pm_func_cfg *pm_cfg, int count)
3244{
3245
3246 u8 src_pci_func, s_esw_id, d_esw_id;
3247 u8 dest_pci_func;
3248 int i;
3249
3250 for (i = 0; i < count; i++) {
3251 src_pci_func = pm_cfg[i].pci_func;
3252 dest_pci_func = pm_cfg[i].dest_npar;
3253 if (src_pci_func >= QLCNIC_MAX_PCI_FUNC
3254 || dest_pci_func >= QLCNIC_MAX_PCI_FUNC)
3255 return QL_STATUS_INVALID_PARAM;
3256
3257 if (adapter->npars[src_pci_func].type != QLCNIC_TYPE_NIC)
3258 return QL_STATUS_INVALID_PARAM;
3259
3260 if (adapter->npars[dest_pci_func].type != QLCNIC_TYPE_NIC)
3261 return QL_STATUS_INVALID_PARAM;
3262
346fe763
RB
3263 s_esw_id = adapter->npars[src_pci_func].phy_port;
3264 d_esw_id = adapter->npars[dest_pci_func].phy_port;
3265
3266 if (s_esw_id != d_esw_id)
3267 return QL_STATUS_INVALID_PARAM;
3268
3269 }
3270 return 0;
3271
3272}
3273
3274static ssize_t
3275qlcnic_sysfs_write_pm_config(struct file *filp, struct kobject *kobj,
3276 struct bin_attribute *attr, char *buf, loff_t offset, size_t size)
3277{
3278 struct device *dev = container_of(kobj, struct device, kobj);
3279 struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
3280 struct qlcnic_pm_func_cfg *pm_cfg;
3281 u32 id, action, pci_func;
3282 int count, rem, i, ret;
3283
3284 count = size / sizeof(struct qlcnic_pm_func_cfg);
3285 rem = size % sizeof(struct qlcnic_pm_func_cfg);
3286 if (rem)
3287 return QL_STATUS_INVALID_PARAM;
3288
3289 pm_cfg = (struct qlcnic_pm_func_cfg *) buf;
3290
3291 ret = validate_pm_config(adapter, pm_cfg, count);
3292 if (ret)
3293 return ret;
3294 for (i = 0; i < count; i++) {
3295 pci_func = pm_cfg[i].pci_func;
4e8acb01 3296 action = !!pm_cfg[i].action;
346fe763
RB
3297 id = adapter->npars[pci_func].phy_port;
3298 ret = qlcnic_config_port_mirroring(adapter, id,
3299 action, pci_func);
3300 if (ret)
3301 return ret;
3302 }
3303
3304 for (i = 0; i < count; i++) {
3305 pci_func = pm_cfg[i].pci_func;
3306 id = adapter->npars[pci_func].phy_port;
4e8acb01 3307 adapter->npars[pci_func].enable_pm = !!pm_cfg[i].action;
346fe763
RB
3308 adapter->npars[pci_func].dest_npar = id;
3309 }
3310 return size;
3311}
3312
3313static ssize_t
3314qlcnic_sysfs_read_pm_config(struct file *filp, struct kobject *kobj,
3315 struct bin_attribute *attr, char *buf, loff_t offset, size_t size)
3316{
3317 struct device *dev = container_of(kobj, struct device, kobj);
3318 struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
3319 struct qlcnic_pm_func_cfg pm_cfg[QLCNIC_MAX_PCI_FUNC];
3320 int i;
3321
3322 if (size != sizeof(pm_cfg))
3323 return QL_STATUS_INVALID_PARAM;
3324
3325 for (i = 0; i < QLCNIC_MAX_PCI_FUNC; i++) {
3326 if (adapter->npars[i].type != QLCNIC_TYPE_NIC)
3327 continue;
3328 pm_cfg[i].action = adapter->npars[i].enable_pm;
3329 pm_cfg[i].dest_npar = 0;
3330 pm_cfg[i].pci_func = i;
3331 }
3332 memcpy(buf, &pm_cfg, size);
3333
3334 return size;
3335}
3336
cea8975e 3337static int
346fe763 3338validate_esw_config(struct qlcnic_adapter *adapter,
4e8acb01 3339 struct qlcnic_esw_func_cfg *esw_cfg, int count)
346fe763
RB
3340{
3341 u8 pci_func;
3342 int i;
346fe763
RB
3343 for (i = 0; i < count; i++) {
3344 pci_func = esw_cfg[i].pci_func;
3345 if (pci_func >= QLCNIC_MAX_PCI_FUNC)
3346 return QL_STATUS_INVALID_PARAM;
3347
4e8acb01
RB
3348 if (adapter->op_mode == QLCNIC_MGMT_FUNC)
3349 if (adapter->npars[pci_func].type != QLCNIC_TYPE_NIC)
3350 return QL_STATUS_INVALID_PARAM;
346fe763 3351
4e8acb01
RB
3352 switch (esw_cfg[i].op_mode) {
3353 case QLCNIC_PORT_DEFAULTS:
3354 break;
3355 case QLCNIC_ADD_VLAN:
346fe763
RB
3356 if (!IS_VALID_VLAN(esw_cfg[i].vlan_id))
3357 return QL_STATUS_INVALID_PARAM;
4e8acb01
RB
3358 if (!esw_cfg[i].op_type)
3359 return QL_STATUS_INVALID_PARAM;
3360 break;
3361 case QLCNIC_DEL_VLAN:
4e8acb01
RB
3362 if (!esw_cfg[i].op_type)
3363 return QL_STATUS_INVALID_PARAM;
3364 break;
3365 default:
346fe763 3366 return QL_STATUS_INVALID_PARAM;
4e8acb01 3367 }
346fe763 3368 }
346fe763
RB
3369 return 0;
3370}
3371
3372static ssize_t
3373qlcnic_sysfs_write_esw_config(struct file *file, struct kobject *kobj,
3374 struct bin_attribute *attr, char *buf, loff_t offset, size_t size)
3375{
3376 struct device *dev = container_of(kobj, struct device, kobj);
3377 struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
3378 struct qlcnic_esw_func_cfg *esw_cfg;
4e8acb01 3379 struct qlcnic_npar_info *npar;
346fe763 3380 int count, rem, i, ret;
0325d69b 3381 u8 pci_func, op_mode = 0;
346fe763
RB
3382
3383 count = size / sizeof(struct qlcnic_esw_func_cfg);
3384 rem = size % sizeof(struct qlcnic_esw_func_cfg);
3385 if (rem)
3386 return QL_STATUS_INVALID_PARAM;
3387
3388 esw_cfg = (struct qlcnic_esw_func_cfg *) buf;
3389 ret = validate_esw_config(adapter, esw_cfg, count);
3390 if (ret)
3391 return ret;
3392
3393 for (i = 0; i < count; i++) {
0325d69b
RB
3394 if (adapter->op_mode == QLCNIC_MGMT_FUNC)
3395 if (qlcnic_config_switch_port(adapter, &esw_cfg[i]))
3396 return QL_STATUS_INVALID_PARAM;
e9a47700
RB
3397
3398 if (adapter->ahw.pci_func != esw_cfg[i].pci_func)
3399 continue;
3400
3401 op_mode = esw_cfg[i].op_mode;
3402 qlcnic_get_eswitch_port_config(adapter, &esw_cfg[i]);
3403 esw_cfg[i].op_mode = op_mode;
3404 esw_cfg[i].pci_func = adapter->ahw.pci_func;
3405
3406 switch (esw_cfg[i].op_mode) {
3407 case QLCNIC_PORT_DEFAULTS:
3408 qlcnic_set_eswitch_port_features(adapter, &esw_cfg[i]);
3409 break;
8cf61f89
AKS
3410 case QLCNIC_ADD_VLAN:
3411 qlcnic_set_vlan_config(adapter, &esw_cfg[i]);
3412 break;
3413 case QLCNIC_DEL_VLAN:
3414 esw_cfg[i].vlan_id = 0;
3415 qlcnic_set_vlan_config(adapter, &esw_cfg[i]);
3416 break;
0325d69b 3417 }
346fe763
RB
3418 }
3419
0325d69b
RB
3420 if (adapter->op_mode != QLCNIC_MGMT_FUNC)
3421 goto out;
e9a47700 3422
346fe763
RB
3423 for (i = 0; i < count; i++) {
3424 pci_func = esw_cfg[i].pci_func;
4e8acb01
RB
3425 npar = &adapter->npars[pci_func];
3426 switch (esw_cfg[i].op_mode) {
3427 case QLCNIC_PORT_DEFAULTS:
3428 npar->promisc_mode = esw_cfg[i].promisc_mode;
3429 npar->mac_learning = esw_cfg[i].mac_learning;
3430 npar->offload_flags = esw_cfg[i].offload_flags;
3431 npar->mac_anti_spoof = esw_cfg[i].mac_anti_spoof;
3432 npar->discard_tagged = esw_cfg[i].discard_tagged;
3433 break;
3434 case QLCNIC_ADD_VLAN:
3435 npar->pvid = esw_cfg[i].vlan_id;
3436 break;
3437 case QLCNIC_DEL_VLAN:
3438 npar->pvid = 0;
3439 break;
3440 }
346fe763 3441 }
0325d69b 3442out:
346fe763
RB
3443 return size;
3444}
3445
3446static ssize_t
3447qlcnic_sysfs_read_esw_config(struct file *file, struct kobject *kobj,
3448 struct bin_attribute *attr, char *buf, loff_t offset, size_t size)
3449{
3450 struct device *dev = container_of(kobj, struct device, kobj);
3451 struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
3452 struct qlcnic_esw_func_cfg esw_cfg[QLCNIC_MAX_PCI_FUNC];
4e8acb01 3453 u8 i;
346fe763
RB
3454
3455 if (size != sizeof(esw_cfg))
3456 return QL_STATUS_INVALID_PARAM;
3457
3458 for (i = 0; i < QLCNIC_MAX_PCI_FUNC; i++) {
3459 if (adapter->npars[i].type != QLCNIC_TYPE_NIC)
3460 continue;
4e8acb01
RB
3461 esw_cfg[i].pci_func = i;
3462 if (qlcnic_get_eswitch_port_config(adapter, &esw_cfg[i]))
3463 return QL_STATUS_INVALID_PARAM;
346fe763
RB
3464 }
3465 memcpy(buf, &esw_cfg, size);
3466
3467 return size;
3468}
3469
cea8975e 3470static int
346fe763
RB
3471validate_npar_config(struct qlcnic_adapter *adapter,
3472 struct qlcnic_npar_func_cfg *np_cfg, int count)
3473{
3474 u8 pci_func, i;
3475
3476 for (i = 0; i < count; i++) {
3477 pci_func = np_cfg[i].pci_func;
3478 if (pci_func >= QLCNIC_MAX_PCI_FUNC)
3479 return QL_STATUS_INVALID_PARAM;
3480
3481 if (adapter->npars[pci_func].type != QLCNIC_TYPE_NIC)
3482 return QL_STATUS_INVALID_PARAM;
3483
3484 if (!IS_VALID_BW(np_cfg[i].min_bw)
3485 || !IS_VALID_BW(np_cfg[i].max_bw)
3486 || !IS_VALID_RX_QUEUES(np_cfg[i].max_rx_queues)
3487 || !IS_VALID_TX_QUEUES(np_cfg[i].max_tx_queues))
3488 return QL_STATUS_INVALID_PARAM;
3489 }
3490 return 0;
3491}
3492
3493static ssize_t
3494qlcnic_sysfs_write_npar_config(struct file *file, struct kobject *kobj,
3495 struct bin_attribute *attr, char *buf, loff_t offset, size_t size)
3496{
3497 struct device *dev = container_of(kobj, struct device, kobj);
3498 struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
3499 struct qlcnic_info nic_info;
3500 struct qlcnic_npar_func_cfg *np_cfg;
3501 int i, count, rem, ret;
3502 u8 pci_func;
3503
3504 count = size / sizeof(struct qlcnic_npar_func_cfg);
3505 rem = size % sizeof(struct qlcnic_npar_func_cfg);
3506 if (rem)
3507 return QL_STATUS_INVALID_PARAM;
3508
3509 np_cfg = (struct qlcnic_npar_func_cfg *) buf;
3510 ret = validate_npar_config(adapter, np_cfg, count);
3511 if (ret)
3512 return ret;
3513
3514 for (i = 0; i < count ; i++) {
3515 pci_func = np_cfg[i].pci_func;
3516 ret = qlcnic_get_nic_info(adapter, &nic_info, pci_func);
3517 if (ret)
3518 return ret;
3519 nic_info.pci_func = pci_func;
3520 nic_info.min_tx_bw = np_cfg[i].min_bw;
3521 nic_info.max_tx_bw = np_cfg[i].max_bw;
3522 ret = qlcnic_set_nic_info(adapter, &nic_info);
3523 if (ret)
3524 return ret;
cea8975e
AC
3525 adapter->npars[i].min_bw = nic_info.min_tx_bw;
3526 adapter->npars[i].max_bw = nic_info.max_tx_bw;
346fe763
RB
3527 }
3528
3529 return size;
3530
3531}
3532static ssize_t
3533qlcnic_sysfs_read_npar_config(struct file *file, struct kobject *kobj,
3534 struct bin_attribute *attr, char *buf, loff_t offset, size_t size)
3535{
3536 struct device *dev = container_of(kobj, struct device, kobj);
3537 struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
3538 struct qlcnic_info nic_info;
3539 struct qlcnic_npar_func_cfg np_cfg[QLCNIC_MAX_PCI_FUNC];
3540 int i, ret;
3541
3542 if (size != sizeof(np_cfg))
3543 return QL_STATUS_INVALID_PARAM;
3544
3545 for (i = 0; i < QLCNIC_MAX_PCI_FUNC ; i++) {
3546 if (adapter->npars[i].type != QLCNIC_TYPE_NIC)
3547 continue;
3548 ret = qlcnic_get_nic_info(adapter, &nic_info, i);
3549 if (ret)
3550 return ret;
3551
3552 np_cfg[i].pci_func = i;
3553 np_cfg[i].op_mode = nic_info.op_mode;
3554 np_cfg[i].port_num = nic_info.phys_port;
3555 np_cfg[i].fw_capab = nic_info.capabilities;
3556 np_cfg[i].min_bw = nic_info.min_tx_bw ;
3557 np_cfg[i].max_bw = nic_info.max_tx_bw;
3558 np_cfg[i].max_tx_queues = nic_info.max_tx_ques;
3559 np_cfg[i].max_rx_queues = nic_info.max_rx_ques;
3560 }
3561 memcpy(buf, &np_cfg, size);
3562 return size;
3563}
3564
b6021212
AKS
3565static ssize_t
3566qlcnic_sysfs_get_port_stats(struct file *file, struct kobject *kobj,
3567 struct bin_attribute *attr, char *buf, loff_t offset, size_t size)
3568{
3569 struct device *dev = container_of(kobj, struct device, kobj);
3570 struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
3571 struct qlcnic_esw_statistics port_stats;
3572 int ret;
3573
3574 if (size != sizeof(struct qlcnic_esw_statistics))
3575 return QL_STATUS_INVALID_PARAM;
3576
3577 if (offset >= QLCNIC_MAX_PCI_FUNC)
3578 return QL_STATUS_INVALID_PARAM;
3579
3580 memset(&port_stats, 0, size);
3581 ret = qlcnic_get_port_stats(adapter, offset, QLCNIC_QUERY_RX_COUNTER,
3582 &port_stats.rx);
3583 if (ret)
3584 return ret;
3585
3586 ret = qlcnic_get_port_stats(adapter, offset, QLCNIC_QUERY_TX_COUNTER,
3587 &port_stats.tx);
3588 if (ret)
3589 return ret;
3590
3591 memcpy(buf, &port_stats, size);
3592 return size;
3593}
3594
3595static ssize_t
3596qlcnic_sysfs_get_esw_stats(struct file *file, struct kobject *kobj,
3597 struct bin_attribute *attr, char *buf, loff_t offset, size_t size)
3598{
3599 struct device *dev = container_of(kobj, struct device, kobj);
3600 struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
3601 struct qlcnic_esw_statistics esw_stats;
3602 int ret;
3603
3604 if (size != sizeof(struct qlcnic_esw_statistics))
3605 return QL_STATUS_INVALID_PARAM;
3606
3607 if (offset >= QLCNIC_NIU_MAX_XG_PORTS)
3608 return QL_STATUS_INVALID_PARAM;
3609
3610 memset(&esw_stats, 0, size);
3611 ret = qlcnic_get_eswitch_stats(adapter, offset, QLCNIC_QUERY_RX_COUNTER,
3612 &esw_stats.rx);
3613 if (ret)
3614 return ret;
3615
3616 ret = qlcnic_get_eswitch_stats(adapter, offset, QLCNIC_QUERY_TX_COUNTER,
3617 &esw_stats.tx);
3618 if (ret)
3619 return ret;
3620
3621 memcpy(buf, &esw_stats, size);
3622 return size;
3623}
3624
3625static ssize_t
3626qlcnic_sysfs_clear_esw_stats(struct file *file, struct kobject *kobj,
3627 struct bin_attribute *attr, char *buf, loff_t offset, size_t size)
3628{
3629 struct device *dev = container_of(kobj, struct device, kobj);
3630 struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
3631 int ret;
3632
3633 if (offset >= QLCNIC_NIU_MAX_XG_PORTS)
3634 return QL_STATUS_INVALID_PARAM;
3635
3636 ret = qlcnic_clear_esw_stats(adapter, QLCNIC_STATS_ESWITCH, offset,
3637 QLCNIC_QUERY_RX_COUNTER);
3638 if (ret)
3639 return ret;
3640
3641 ret = qlcnic_clear_esw_stats(adapter, QLCNIC_STATS_ESWITCH, offset,
3642 QLCNIC_QUERY_TX_COUNTER);
3643 if (ret)
3644 return ret;
3645
3646 return size;
3647}
3648
3649static ssize_t
3650qlcnic_sysfs_clear_port_stats(struct file *file, struct kobject *kobj,
3651 struct bin_attribute *attr, char *buf, loff_t offset, size_t size)
3652{
3653
3654 struct device *dev = container_of(kobj, struct device, kobj);
3655 struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
3656 int ret;
3657
3658 if (offset >= QLCNIC_MAX_PCI_FUNC)
3659 return QL_STATUS_INVALID_PARAM;
3660
3661 ret = qlcnic_clear_esw_stats(adapter, QLCNIC_STATS_PORT, offset,
3662 QLCNIC_QUERY_RX_COUNTER);
3663 if (ret)
3664 return ret;
3665
3666 ret = qlcnic_clear_esw_stats(adapter, QLCNIC_STATS_PORT, offset,
3667 QLCNIC_QUERY_TX_COUNTER);
3668 if (ret)
3669 return ret;
3670
3671 return size;
3672}
3673
346fe763
RB
3674static ssize_t
3675qlcnic_sysfs_read_pci_config(struct file *file, struct kobject *kobj,
3676 struct bin_attribute *attr, char *buf, loff_t offset, size_t size)
3677{
3678 struct device *dev = container_of(kobj, struct device, kobj);
3679 struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
3680 struct qlcnic_pci_func_cfg pci_cfg[QLCNIC_MAX_PCI_FUNC];
e88db3bd 3681 struct qlcnic_pci_info *pci_info;
346fe763
RB
3682 int i, ret;
3683
3684 if (size != sizeof(pci_cfg))
3685 return QL_STATUS_INVALID_PARAM;
3686
e88db3bd
DC
3687 pci_info = kcalloc(QLCNIC_MAX_PCI_FUNC, sizeof(*pci_info), GFP_KERNEL);
3688 if (!pci_info)
3689 return -ENOMEM;
3690
346fe763 3691 ret = qlcnic_get_pci_info(adapter, pci_info);
e88db3bd
DC
3692 if (ret) {
3693 kfree(pci_info);
346fe763 3694 return ret;
e88db3bd 3695 }
346fe763
RB
3696
3697 for (i = 0; i < QLCNIC_MAX_PCI_FUNC ; i++) {
3698 pci_cfg[i].pci_func = pci_info[i].id;
3699 pci_cfg[i].func_type = pci_info[i].type;
3700 pci_cfg[i].port_num = pci_info[i].default_port;
3701 pci_cfg[i].min_bw = pci_info[i].tx_min_bw;
3702 pci_cfg[i].max_bw = pci_info[i].tx_max_bw;
3703 memcpy(&pci_cfg[i].def_mac_addr, &pci_info[i].mac, ETH_ALEN);
3704 }
3705 memcpy(buf, &pci_cfg, size);
e88db3bd 3706 kfree(pci_info);
346fe763 3707 return size;
346fe763
RB
3708}
3709static struct bin_attribute bin_attr_npar_config = {
3710 .attr = {.name = "npar_config", .mode = (S_IRUGO | S_IWUSR)},
3711 .size = 0,
3712 .read = qlcnic_sysfs_read_npar_config,
3713 .write = qlcnic_sysfs_write_npar_config,
3714};
3715
3716static struct bin_attribute bin_attr_pci_config = {
3717 .attr = {.name = "pci_config", .mode = (S_IRUGO | S_IWUSR)},
3718 .size = 0,
3719 .read = qlcnic_sysfs_read_pci_config,
3720 .write = NULL,
3721};
3722
b6021212
AKS
3723static struct bin_attribute bin_attr_port_stats = {
3724 .attr = {.name = "port_stats", .mode = (S_IRUGO | S_IWUSR)},
3725 .size = 0,
3726 .read = qlcnic_sysfs_get_port_stats,
3727 .write = qlcnic_sysfs_clear_port_stats,
3728};
3729
3730static struct bin_attribute bin_attr_esw_stats = {
3731 .attr = {.name = "esw_stats", .mode = (S_IRUGO | S_IWUSR)},
3732 .size = 0,
3733 .read = qlcnic_sysfs_get_esw_stats,
3734 .write = qlcnic_sysfs_clear_esw_stats,
3735};
3736
346fe763
RB
3737static struct bin_attribute bin_attr_esw_config = {
3738 .attr = {.name = "esw_config", .mode = (S_IRUGO | S_IWUSR)},
3739 .size = 0,
3740 .read = qlcnic_sysfs_read_esw_config,
3741 .write = qlcnic_sysfs_write_esw_config,
3742};
3743
3744static struct bin_attribute bin_attr_pm_config = {
3745 .attr = {.name = "pm_config", .mode = (S_IRUGO | S_IWUSR)},
3746 .size = 0,
3747 .read = qlcnic_sysfs_read_pm_config,
3748 .write = qlcnic_sysfs_write_pm_config,
3749};
3750
af19b491
AKS
3751static void
3752qlcnic_create_sysfs_entries(struct qlcnic_adapter *adapter)
3753{
3754 struct device *dev = &adapter->pdev->dev;
3755
3756 if (adapter->capabilities & QLCNIC_FW_CAPABILITY_BDG)
3757 if (device_create_file(dev, &dev_attr_bridged_mode))
3758 dev_warn(dev,
3759 "failed to create bridged_mode sysfs entry\n");
3760}
3761
3762static void
3763qlcnic_remove_sysfs_entries(struct qlcnic_adapter *adapter)
3764{
3765 struct device *dev = &adapter->pdev->dev;
3766
3767 if (adapter->capabilities & QLCNIC_FW_CAPABILITY_BDG)
3768 device_remove_file(dev, &dev_attr_bridged_mode);
3769}
3770
3771static void
3772qlcnic_create_diag_entries(struct qlcnic_adapter *adapter)
3773{
3774 struct device *dev = &adapter->pdev->dev;
3775
b6021212
AKS
3776 if (device_create_bin_file(dev, &bin_attr_port_stats))
3777 dev_info(dev, "failed to create port stats sysfs entry");
3778
132ff00a
AC
3779 if (adapter->op_mode == QLCNIC_NON_PRIV_FUNC)
3780 return;
af19b491
AKS
3781 if (device_create_file(dev, &dev_attr_diag_mode))
3782 dev_info(dev, "failed to create diag_mode sysfs entry\n");
3783 if (device_create_bin_file(dev, &bin_attr_crb))
3784 dev_info(dev, "failed to create crb sysfs entry\n");
3785 if (device_create_bin_file(dev, &bin_attr_mem))
3786 dev_info(dev, "failed to create mem sysfs entry\n");
4e8acb01
RB
3787 if (!(adapter->flags & QLCNIC_ESWITCH_ENABLED))
3788 return;
3789 if (device_create_bin_file(dev, &bin_attr_esw_config))
3790 dev_info(dev, "failed to create esw config sysfs entry");
3791 if (adapter->op_mode != QLCNIC_MGMT_FUNC)
346fe763
RB
3792 return;
3793 if (device_create_bin_file(dev, &bin_attr_pci_config))
3794 dev_info(dev, "failed to create pci config sysfs entry");
3795 if (device_create_bin_file(dev, &bin_attr_npar_config))
3796 dev_info(dev, "failed to create npar config sysfs entry");
346fe763
RB
3797 if (device_create_bin_file(dev, &bin_attr_pm_config))
3798 dev_info(dev, "failed to create pm config sysfs entry");
b6021212
AKS
3799 if (device_create_bin_file(dev, &bin_attr_esw_stats))
3800 dev_info(dev, "failed to create eswitch stats sysfs entry");
af19b491
AKS
3801}
3802
af19b491
AKS
3803static void
3804qlcnic_remove_diag_entries(struct qlcnic_adapter *adapter)
3805{
3806 struct device *dev = &adapter->pdev->dev;
3807
b6021212
AKS
3808 device_remove_bin_file(dev, &bin_attr_port_stats);
3809
132ff00a
AC
3810 if (adapter->op_mode == QLCNIC_NON_PRIV_FUNC)
3811 return;
af19b491
AKS
3812 device_remove_file(dev, &dev_attr_diag_mode);
3813 device_remove_bin_file(dev, &bin_attr_crb);
3814 device_remove_bin_file(dev, &bin_attr_mem);
4e8acb01
RB
3815 if (!(adapter->flags & QLCNIC_ESWITCH_ENABLED))
3816 return;
3817 device_remove_bin_file(dev, &bin_attr_esw_config);
3818 if (adapter->op_mode != QLCNIC_MGMT_FUNC)
346fe763
RB
3819 return;
3820 device_remove_bin_file(dev, &bin_attr_pci_config);
3821 device_remove_bin_file(dev, &bin_attr_npar_config);
346fe763 3822 device_remove_bin_file(dev, &bin_attr_pm_config);
b6021212 3823 device_remove_bin_file(dev, &bin_attr_esw_stats);
af19b491
AKS
3824}
3825
3826#ifdef CONFIG_INET
3827
3828#define is_qlcnic_netdev(dev) (dev->netdev_ops == &qlcnic_netdev_ops)
3829
af19b491
AKS
3830static void
3831qlcnic_config_indev_addr(struct net_device *dev, unsigned long event)
3832{
3833 struct in_device *indev;
3834 struct qlcnic_adapter *adapter = netdev_priv(dev);
3835
af19b491
AKS
3836 indev = in_dev_get(dev);
3837 if (!indev)
3838 return;
3839
3840 for_ifa(indev) {
3841 switch (event) {
3842 case NETDEV_UP:
3843 qlcnic_config_ipaddr(adapter,
3844 ifa->ifa_address, QLCNIC_IP_UP);
3845 break;
3846 case NETDEV_DOWN:
3847 qlcnic_config_ipaddr(adapter,
3848 ifa->ifa_address, QLCNIC_IP_DOWN);
3849 break;
3850 default:
3851 break;
3852 }
3853 } endfor_ifa(indev);
3854
3855 in_dev_put(indev);
af19b491
AKS
3856}
3857
3858static int qlcnic_netdev_event(struct notifier_block *this,
3859 unsigned long event, void *ptr)
3860{
3861 struct qlcnic_adapter *adapter;
3862 struct net_device *dev = (struct net_device *)ptr;
3863
3864recheck:
3865 if (dev == NULL)
3866 goto done;
3867
3868 if (dev->priv_flags & IFF_802_1Q_VLAN) {
3869 dev = vlan_dev_real_dev(dev);
3870 goto recheck;
3871 }
3872
3873 if (!is_qlcnic_netdev(dev))
3874 goto done;
3875
3876 adapter = netdev_priv(dev);
3877
3878 if (!adapter)
3879 goto done;
3880
8a15ad1f 3881 if (!test_bit(__QLCNIC_DEV_UP, &adapter->state))
af19b491
AKS
3882 goto done;
3883
3884 qlcnic_config_indev_addr(dev, event);
3885done:
3886 return NOTIFY_DONE;
3887}
3888
3889static int
3890qlcnic_inetaddr_event(struct notifier_block *this,
3891 unsigned long event, void *ptr)
3892{
3893 struct qlcnic_adapter *adapter;
3894 struct net_device *dev;
3895
3896 struct in_ifaddr *ifa = (struct in_ifaddr *)ptr;
3897
3898 dev = ifa->ifa_dev ? ifa->ifa_dev->dev : NULL;
3899
3900recheck:
3901 if (dev == NULL || !netif_running(dev))
3902 goto done;
3903
3904 if (dev->priv_flags & IFF_802_1Q_VLAN) {
3905 dev = vlan_dev_real_dev(dev);
3906 goto recheck;
3907 }
3908
3909 if (!is_qlcnic_netdev(dev))
3910 goto done;
3911
3912 adapter = netdev_priv(dev);
3913
251a84c9 3914 if (!adapter)
af19b491
AKS
3915 goto done;
3916
8a15ad1f 3917 if (!test_bit(__QLCNIC_DEV_UP, &adapter->state))
af19b491
AKS
3918 goto done;
3919
3920 switch (event) {
3921 case NETDEV_UP:
3922 qlcnic_config_ipaddr(adapter, ifa->ifa_address, QLCNIC_IP_UP);
3923 break;
3924 case NETDEV_DOWN:
3925 qlcnic_config_ipaddr(adapter, ifa->ifa_address, QLCNIC_IP_DOWN);
3926 break;
3927 default:
3928 break;
3929 }
3930
3931done:
3932 return NOTIFY_DONE;
3933}
3934
3935static struct notifier_block qlcnic_netdev_cb = {
3936 .notifier_call = qlcnic_netdev_event,
3937};
3938
3939static struct notifier_block qlcnic_inetaddr_cb = {
3940 .notifier_call = qlcnic_inetaddr_event,
3941};
3942#else
3943static void
3944qlcnic_config_indev_addr(struct net_device *dev, unsigned long event)
3945{ }
3946#endif
451724c8
SC
3947static struct pci_error_handlers qlcnic_err_handler = {
3948 .error_detected = qlcnic_io_error_detected,
3949 .slot_reset = qlcnic_io_slot_reset,
3950 .resume = qlcnic_io_resume,
3951};
af19b491
AKS
3952
3953static struct pci_driver qlcnic_driver = {
3954 .name = qlcnic_driver_name,
3955 .id_table = qlcnic_pci_tbl,
3956 .probe = qlcnic_probe,
3957 .remove = __devexit_p(qlcnic_remove),
3958#ifdef CONFIG_PM
3959 .suspend = qlcnic_suspend,
3960 .resume = qlcnic_resume,
3961#endif
451724c8
SC
3962 .shutdown = qlcnic_shutdown,
3963 .err_handler = &qlcnic_err_handler
3964
af19b491
AKS
3965};
3966
3967static int __init qlcnic_init_module(void)
3968{
0cf3a14c 3969 int ret;
af19b491
AKS
3970
3971 printk(KERN_INFO "%s\n", qlcnic_driver_string);
3972
3973#ifdef CONFIG_INET
3974 register_netdevice_notifier(&qlcnic_netdev_cb);
3975 register_inetaddr_notifier(&qlcnic_inetaddr_cb);
3976#endif
3977
0cf3a14c
AKS
3978 ret = pci_register_driver(&qlcnic_driver);
3979 if (ret) {
3980#ifdef CONFIG_INET
3981 unregister_inetaddr_notifier(&qlcnic_inetaddr_cb);
3982 unregister_netdevice_notifier(&qlcnic_netdev_cb);
3983#endif
3984 }
af19b491 3985
0cf3a14c 3986 return ret;
af19b491
AKS
3987}
3988
3989module_init(qlcnic_init_module);
3990
3991static void __exit qlcnic_exit_module(void)
3992{
3993
3994 pci_unregister_driver(&qlcnic_driver);
3995
3996#ifdef CONFIG_INET
3997 unregister_inetaddr_notifier(&qlcnic_inetaddr_cb);
3998 unregister_netdevice_notifier(&qlcnic_netdev_cb);
3999#endif
4000}
4001
4002module_exit(qlcnic_exit_module);