]> bbs.cooldavid.org Git - net-next-2.6.git/blame - drivers/net/qlcnic/qlcnic_main.c
Merge branch 'master' of /repos/git/net-next-2.6
[net-next-2.6.git] / drivers / net / qlcnic / qlcnic_main.c
CommitLineData
af19b491
AKS
1/*
2 * Copyright (C) 2009 - QLogic Corporation.
3 * All rights reserved.
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License
7 * as published by the Free Software Foundation; either version 2
8 * of the License, or (at your option) any later version.
9 *
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 59 Temple Place - Suite 330, Boston,
18 * MA 02111-1307, USA.
19 *
20 * The full GNU General Public License is included in this distribution
21 * in the file called "COPYING".
22 *
23 */
24
5a0e3ad6 25#include <linux/slab.h>
af19b491
AKS
26#include <linux/vmalloc.h>
27#include <linux/interrupt.h>
28
29#include "qlcnic.h"
30
31#include <linux/dma-mapping.h>
32#include <linux/if_vlan.h>
33#include <net/ip.h>
34#include <linux/ipv6.h>
35#include <linux/inetdevice.h>
36#include <linux/sysfs.h>
37
38MODULE_DESCRIPTION("QLogic 10 GbE Converged Ethernet Driver");
39MODULE_LICENSE("GPL");
40MODULE_VERSION(QLCNIC_LINUX_VERSIONID);
41MODULE_FIRMWARE(QLCNIC_UNIFIED_ROMIMAGE_NAME);
42
43char qlcnic_driver_name[] = "qlcnic";
44static const char qlcnic_driver_string[] = "QLogic Converged Ethernet Driver v"
45 QLCNIC_LINUX_VERSIONID;
46
47static int port_mode = QLCNIC_PORT_MODE_AUTO_NEG;
48
49/* Default to restricted 1G auto-neg mode */
50static int wol_port_mode = 5;
51
52static int use_msi = 1;
53module_param(use_msi, int, 0644);
54MODULE_PARM_DESC(use_msi, "MSI interrupt (0=disabled, 1=enabled");
55
56static int use_msi_x = 1;
57module_param(use_msi_x, int, 0644);
58MODULE_PARM_DESC(use_msi_x, "MSI-X interrupt (0=disabled, 1=enabled");
59
60static int auto_fw_reset = AUTO_FW_RESET_ENABLED;
61module_param(auto_fw_reset, int, 0644);
62MODULE_PARM_DESC(auto_fw_reset, "Auto firmware reset (0=disabled, 1=enabled");
63
64static int __devinit qlcnic_probe(struct pci_dev *pdev,
65 const struct pci_device_id *ent);
66static void __devexit qlcnic_remove(struct pci_dev *pdev);
67static int qlcnic_open(struct net_device *netdev);
68static int qlcnic_close(struct net_device *netdev);
af19b491
AKS
69static void qlcnic_tx_timeout(struct net_device *netdev);
70static void qlcnic_tx_timeout_task(struct work_struct *work);
71static void qlcnic_attach_work(struct work_struct *work);
72static void qlcnic_fwinit_work(struct work_struct *work);
73static void qlcnic_fw_poll_work(struct work_struct *work);
74static void qlcnic_schedule_work(struct qlcnic_adapter *adapter,
75 work_func_t func, int delay);
76static void qlcnic_cancel_fw_work(struct qlcnic_adapter *adapter);
77static int qlcnic_poll(struct napi_struct *napi, int budget);
78#ifdef CONFIG_NET_POLL_CONTROLLER
79static void qlcnic_poll_controller(struct net_device *netdev);
80#endif
81
82static void qlcnic_create_sysfs_entries(struct qlcnic_adapter *adapter);
83static void qlcnic_remove_sysfs_entries(struct qlcnic_adapter *adapter);
84static void qlcnic_create_diag_entries(struct qlcnic_adapter *adapter);
85static void qlcnic_remove_diag_entries(struct qlcnic_adapter *adapter);
86
87static void qlcnic_clr_all_drv_state(struct qlcnic_adapter *adapter);
88static int qlcnic_can_start_firmware(struct qlcnic_adapter *adapter);
89
7eb9855d 90static irqreturn_t qlcnic_tmp_intr(int irq, void *data);
af19b491
AKS
91static irqreturn_t qlcnic_intr(int irq, void *data);
92static irqreturn_t qlcnic_msi_intr(int irq, void *data);
93static irqreturn_t qlcnic_msix_intr(int irq, void *data);
94
95static struct net_device_stats *qlcnic_get_stats(struct net_device *netdev);
96static void qlcnic_config_indev_addr(struct net_device *dev, unsigned long);
97
98/* PCI Device ID Table */
99#define ENTRY(device) \
100 {PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, (device)), \
101 .class = PCI_CLASS_NETWORK_ETHERNET << 8, .class_mask = ~0}
102
103#define PCI_DEVICE_ID_QLOGIC_QLE824X 0x8020
104
6a902881 105static DEFINE_PCI_DEVICE_TABLE(qlcnic_pci_tbl) = {
af19b491
AKS
106 ENTRY(PCI_DEVICE_ID_QLOGIC_QLE824X),
107 {0,}
108};
109
110MODULE_DEVICE_TABLE(pci, qlcnic_pci_tbl);
111
112
113void
114qlcnic_update_cmd_producer(struct qlcnic_adapter *adapter,
115 struct qlcnic_host_tx_ring *tx_ring)
116{
117 writel(tx_ring->producer, tx_ring->crb_cmd_producer);
118
119 if (qlcnic_tx_avail(tx_ring) <= TX_STOP_THRESH) {
120 netif_stop_queue(adapter->netdev);
121 smp_mb();
8bfe8b91 122 adapter->stats.xmit_off++;
af19b491
AKS
123 }
124}
125
126static const u32 msi_tgt_status[8] = {
127 ISR_INT_TARGET_STATUS, ISR_INT_TARGET_STATUS_F1,
128 ISR_INT_TARGET_STATUS_F2, ISR_INT_TARGET_STATUS_F3,
129 ISR_INT_TARGET_STATUS_F4, ISR_INT_TARGET_STATUS_F5,
130 ISR_INT_TARGET_STATUS_F6, ISR_INT_TARGET_STATUS_F7
131};
132
133static const
134struct qlcnic_legacy_intr_set legacy_intr[] = QLCNIC_LEGACY_INTR_CONFIG;
135
136static inline void qlcnic_disable_int(struct qlcnic_host_sds_ring *sds_ring)
137{
138 writel(0, sds_ring->crb_intr_mask);
139}
140
141static inline void qlcnic_enable_int(struct qlcnic_host_sds_ring *sds_ring)
142{
143 struct qlcnic_adapter *adapter = sds_ring->adapter;
144
145 writel(0x1, sds_ring->crb_intr_mask);
146
147 if (!QLCNIC_IS_MSI_FAMILY(adapter))
148 writel(0xfbff, adapter->tgt_mask_reg);
149}
150
151static int
152qlcnic_alloc_sds_rings(struct qlcnic_recv_context *recv_ctx, int count)
153{
154 int size = sizeof(struct qlcnic_host_sds_ring) * count;
155
156 recv_ctx->sds_rings = kzalloc(size, GFP_KERNEL);
157
158 return (recv_ctx->sds_rings == NULL);
159}
160
161static void
162qlcnic_free_sds_rings(struct qlcnic_recv_context *recv_ctx)
163{
164 if (recv_ctx->sds_rings != NULL)
165 kfree(recv_ctx->sds_rings);
166
167 recv_ctx->sds_rings = NULL;
168}
169
170static int
171qlcnic_napi_add(struct qlcnic_adapter *adapter, struct net_device *netdev)
172{
173 int ring;
174 struct qlcnic_host_sds_ring *sds_ring;
175 struct qlcnic_recv_context *recv_ctx = &adapter->recv_ctx;
176
177 if (qlcnic_alloc_sds_rings(recv_ctx, adapter->max_sds_rings))
178 return -ENOMEM;
179
180 for (ring = 0; ring < adapter->max_sds_rings; ring++) {
181 sds_ring = &recv_ctx->sds_rings[ring];
182 netif_napi_add(netdev, &sds_ring->napi,
183 qlcnic_poll, QLCNIC_NETDEV_WEIGHT);
184 }
185
186 return 0;
187}
188
189static void
190qlcnic_napi_del(struct qlcnic_adapter *adapter)
191{
192 int ring;
193 struct qlcnic_host_sds_ring *sds_ring;
194 struct qlcnic_recv_context *recv_ctx = &adapter->recv_ctx;
195
196 for (ring = 0; ring < adapter->max_sds_rings; ring++) {
197 sds_ring = &recv_ctx->sds_rings[ring];
198 netif_napi_del(&sds_ring->napi);
199 }
200
201 qlcnic_free_sds_rings(&adapter->recv_ctx);
202}
203
204static void
205qlcnic_napi_enable(struct qlcnic_adapter *adapter)
206{
207 int ring;
208 struct qlcnic_host_sds_ring *sds_ring;
209 struct qlcnic_recv_context *recv_ctx = &adapter->recv_ctx;
210
780ab790
AKS
211 if (adapter->is_up != QLCNIC_ADAPTER_UP_MAGIC)
212 return;
213
af19b491
AKS
214 for (ring = 0; ring < adapter->max_sds_rings; ring++) {
215 sds_ring = &recv_ctx->sds_rings[ring];
216 napi_enable(&sds_ring->napi);
217 qlcnic_enable_int(sds_ring);
218 }
219}
220
221static void
222qlcnic_napi_disable(struct qlcnic_adapter *adapter)
223{
224 int ring;
225 struct qlcnic_host_sds_ring *sds_ring;
226 struct qlcnic_recv_context *recv_ctx = &adapter->recv_ctx;
227
780ab790
AKS
228 if (adapter->is_up != QLCNIC_ADAPTER_UP_MAGIC)
229 return;
230
af19b491
AKS
231 for (ring = 0; ring < adapter->max_sds_rings; ring++) {
232 sds_ring = &recv_ctx->sds_rings[ring];
233 qlcnic_disable_int(sds_ring);
234 napi_synchronize(&sds_ring->napi);
235 napi_disable(&sds_ring->napi);
236 }
237}
238
239static void qlcnic_clear_stats(struct qlcnic_adapter *adapter)
240{
241 memset(&adapter->stats, 0, sizeof(adapter->stats));
242 return;
243}
244
245static int qlcnic_set_dma_mask(struct qlcnic_adapter *adapter)
246{
247 struct pci_dev *pdev = adapter->pdev;
248 u64 mask, cmask;
249
250 adapter->pci_using_dac = 0;
251
252 mask = DMA_BIT_MASK(39);
253 cmask = mask;
254
255 if (pci_set_dma_mask(pdev, mask) == 0 &&
256 pci_set_consistent_dma_mask(pdev, cmask) == 0) {
257 adapter->pci_using_dac = 1;
258 return 0;
259 }
260
261 return -EIO;
262}
263
264/* Update addressable range if firmware supports it */
265static int
266qlcnic_update_dma_mask(struct qlcnic_adapter *adapter)
267{
268 int change, shift, err;
269 u64 mask, old_mask, old_cmask;
270 struct pci_dev *pdev = adapter->pdev;
271
272 change = 0;
273
274 shift = QLCRD32(adapter, CRB_DMA_SHIFT);
275 if (shift > 32)
276 return 0;
277
278 if (shift > 9)
279 change = 1;
280
281 if (change) {
282 old_mask = pdev->dma_mask;
283 old_cmask = pdev->dev.coherent_dma_mask;
284
285 mask = DMA_BIT_MASK(32+shift);
286
287 err = pci_set_dma_mask(pdev, mask);
288 if (err)
289 goto err_out;
290
291 err = pci_set_consistent_dma_mask(pdev, mask);
292 if (err)
293 goto err_out;
294 dev_info(&pdev->dev, "using %d-bit dma mask\n", 32+shift);
295 }
296
297 return 0;
298
299err_out:
300 pci_set_dma_mask(pdev, old_mask);
301 pci_set_consistent_dma_mask(pdev, old_cmask);
302 return err;
303}
304
305static void qlcnic_set_port_mode(struct qlcnic_adapter *adapter)
306{
307 u32 val, data;
308
309 val = adapter->ahw.board_type;
310 if ((val == QLCNIC_BRDTYPE_P3_HMEZ) ||
311 (val == QLCNIC_BRDTYPE_P3_XG_LOM)) {
312 if (port_mode == QLCNIC_PORT_MODE_802_3_AP) {
313 data = QLCNIC_PORT_MODE_802_3_AP;
314 QLCWR32(adapter, QLCNIC_PORT_MODE_ADDR, data);
315 } else if (port_mode == QLCNIC_PORT_MODE_XG) {
316 data = QLCNIC_PORT_MODE_XG;
317 QLCWR32(adapter, QLCNIC_PORT_MODE_ADDR, data);
318 } else if (port_mode == QLCNIC_PORT_MODE_AUTO_NEG_1G) {
319 data = QLCNIC_PORT_MODE_AUTO_NEG_1G;
320 QLCWR32(adapter, QLCNIC_PORT_MODE_ADDR, data);
321 } else if (port_mode == QLCNIC_PORT_MODE_AUTO_NEG_XG) {
322 data = QLCNIC_PORT_MODE_AUTO_NEG_XG;
323 QLCWR32(adapter, QLCNIC_PORT_MODE_ADDR, data);
324 } else {
325 data = QLCNIC_PORT_MODE_AUTO_NEG;
326 QLCWR32(adapter, QLCNIC_PORT_MODE_ADDR, data);
327 }
328
329 if ((wol_port_mode != QLCNIC_PORT_MODE_802_3_AP) &&
330 (wol_port_mode != QLCNIC_PORT_MODE_XG) &&
331 (wol_port_mode != QLCNIC_PORT_MODE_AUTO_NEG_1G) &&
332 (wol_port_mode != QLCNIC_PORT_MODE_AUTO_NEG_XG)) {
333 wol_port_mode = QLCNIC_PORT_MODE_AUTO_NEG;
334 }
335 QLCWR32(adapter, QLCNIC_WOL_PORT_MODE, wol_port_mode);
336 }
337}
338
339static void qlcnic_set_msix_bit(struct pci_dev *pdev, int enable)
340{
341 u32 control;
342 int pos;
343
344 pos = pci_find_capability(pdev, PCI_CAP_ID_MSIX);
345 if (pos) {
346 pci_read_config_dword(pdev, pos, &control);
347 if (enable)
348 control |= PCI_MSIX_FLAGS_ENABLE;
349 else
350 control = 0;
351 pci_write_config_dword(pdev, pos, control);
352 }
353}
354
355static void qlcnic_init_msix_entries(struct qlcnic_adapter *adapter, int count)
356{
357 int i;
358
359 for (i = 0; i < count; i++)
360 adapter->msix_entries[i].entry = i;
361}
362
363static int
364qlcnic_read_mac_addr(struct qlcnic_adapter *adapter)
365{
366 int i;
367 unsigned char *p;
368 u64 mac_addr;
369 struct net_device *netdev = adapter->netdev;
370 struct pci_dev *pdev = adapter->pdev;
371
372 if (qlcnic_get_mac_addr(adapter, &mac_addr) != 0)
373 return -EIO;
374
375 p = (unsigned char *)&mac_addr;
376 for (i = 0; i < 6; i++)
377 netdev->dev_addr[i] = *(p + 5 - i);
378
379 memcpy(netdev->perm_addr, netdev->dev_addr, netdev->addr_len);
380 memcpy(adapter->mac_addr, netdev->dev_addr, netdev->addr_len);
381
382 /* set station address */
383
384 if (!is_valid_ether_addr(netdev->perm_addr))
385 dev_warn(&pdev->dev, "Bad MAC address %pM.\n",
386 netdev->dev_addr);
387
388 return 0;
389}
390
391static int qlcnic_set_mac(struct net_device *netdev, void *p)
392{
393 struct qlcnic_adapter *adapter = netdev_priv(netdev);
394 struct sockaddr *addr = p;
395
396 if (!is_valid_ether_addr(addr->sa_data))
397 return -EINVAL;
398
399 if (netif_running(netdev)) {
400 netif_device_detach(netdev);
401 qlcnic_napi_disable(adapter);
402 }
403
404 memcpy(adapter->mac_addr, addr->sa_data, netdev->addr_len);
405 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
406 qlcnic_set_multi(adapter->netdev);
407
408 if (netif_running(netdev)) {
409 netif_device_attach(netdev);
410 qlcnic_napi_enable(adapter);
411 }
412 return 0;
413}
414
415static const struct net_device_ops qlcnic_netdev_ops = {
416 .ndo_open = qlcnic_open,
417 .ndo_stop = qlcnic_close,
418 .ndo_start_xmit = qlcnic_xmit_frame,
419 .ndo_get_stats = qlcnic_get_stats,
420 .ndo_validate_addr = eth_validate_addr,
421 .ndo_set_multicast_list = qlcnic_set_multi,
422 .ndo_set_mac_address = qlcnic_set_mac,
423 .ndo_change_mtu = qlcnic_change_mtu,
424 .ndo_tx_timeout = qlcnic_tx_timeout,
425#ifdef CONFIG_NET_POLL_CONTROLLER
426 .ndo_poll_controller = qlcnic_poll_controller,
427#endif
428};
429
430static void
431qlcnic_setup_intr(struct qlcnic_adapter *adapter)
432{
433 const struct qlcnic_legacy_intr_set *legacy_intrp;
434 struct pci_dev *pdev = adapter->pdev;
435 int err, num_msix;
436
437 if (adapter->rss_supported) {
438 num_msix = (num_online_cpus() >= MSIX_ENTRIES_PER_ADAPTER) ?
439 MSIX_ENTRIES_PER_ADAPTER : 2;
440 } else
441 num_msix = 1;
442
443 adapter->max_sds_rings = 1;
444
445 adapter->flags &= ~(QLCNIC_MSI_ENABLED | QLCNIC_MSIX_ENABLED);
446
447 legacy_intrp = &legacy_intr[adapter->ahw.pci_func];
448
449 adapter->int_vec_bit = legacy_intrp->int_vec_bit;
450 adapter->tgt_status_reg = qlcnic_get_ioaddr(adapter,
451 legacy_intrp->tgt_status_reg);
452 adapter->tgt_mask_reg = qlcnic_get_ioaddr(adapter,
453 legacy_intrp->tgt_mask_reg);
454 adapter->isr_int_vec = qlcnic_get_ioaddr(adapter, ISR_INT_VECTOR);
455
456 adapter->crb_int_state_reg = qlcnic_get_ioaddr(adapter,
457 ISR_INT_STATE_REG);
458
459 qlcnic_set_msix_bit(pdev, 0);
460
461 if (adapter->msix_supported) {
462
463 qlcnic_init_msix_entries(adapter, num_msix);
464 err = pci_enable_msix(pdev, adapter->msix_entries, num_msix);
465 if (err == 0) {
466 adapter->flags |= QLCNIC_MSIX_ENABLED;
467 qlcnic_set_msix_bit(pdev, 1);
468
469 if (adapter->rss_supported)
470 adapter->max_sds_rings = num_msix;
471
472 dev_info(&pdev->dev, "using msi-x interrupts\n");
473 return;
474 }
475
476 if (err > 0)
477 pci_disable_msix(pdev);
478
479 /* fall through for msi */
480 }
481
482 if (use_msi && !pci_enable_msi(pdev)) {
483 adapter->flags |= QLCNIC_MSI_ENABLED;
484 adapter->tgt_status_reg = qlcnic_get_ioaddr(adapter,
485 msi_tgt_status[adapter->ahw.pci_func]);
486 dev_info(&pdev->dev, "using msi interrupts\n");
487 adapter->msix_entries[0].vector = pdev->irq;
488 return;
489 }
490
491 dev_info(&pdev->dev, "using legacy interrupts\n");
492 adapter->msix_entries[0].vector = pdev->irq;
493}
494
495static void
496qlcnic_teardown_intr(struct qlcnic_adapter *adapter)
497{
498 if (adapter->flags & QLCNIC_MSIX_ENABLED)
499 pci_disable_msix(adapter->pdev);
500 if (adapter->flags & QLCNIC_MSI_ENABLED)
501 pci_disable_msi(adapter->pdev);
502}
503
504static void
505qlcnic_cleanup_pci_map(struct qlcnic_adapter *adapter)
506{
507 if (adapter->ahw.pci_base0 != NULL)
508 iounmap(adapter->ahw.pci_base0);
509}
510
511static int
512qlcnic_setup_pci_map(struct qlcnic_adapter *adapter)
513{
514 void __iomem *mem_ptr0 = NULL;
515 resource_size_t mem_base;
516 unsigned long mem_len, pci_len0 = 0;
517
518 struct pci_dev *pdev = adapter->pdev;
519 int pci_func = adapter->ahw.pci_func;
520
521 /*
522 * Set the CRB window to invalid. If any register in window 0 is
523 * accessed it should set the window to 0 and then reset it to 1.
524 */
525 adapter->ahw.crb_win = -1;
526 adapter->ahw.ocm_win = -1;
527
528 /* remap phys address */
529 mem_base = pci_resource_start(pdev, 0); /* 0 is for BAR 0 */
530 mem_len = pci_resource_len(pdev, 0);
531
532 if (mem_len == QLCNIC_PCI_2MB_SIZE) {
533
534 mem_ptr0 = pci_ioremap_bar(pdev, 0);
535 if (mem_ptr0 == NULL) {
536 dev_err(&pdev->dev, "failed to map PCI bar 0\n");
537 return -EIO;
538 }
539 pci_len0 = mem_len;
540 } else {
541 return -EIO;
542 }
543
544 dev_info(&pdev->dev, "%dMB memory map\n", (int)(mem_len>>20));
545
546 adapter->ahw.pci_base0 = mem_ptr0;
547 adapter->ahw.pci_len0 = pci_len0;
548
549 adapter->ahw.ocm_win_crb = qlcnic_get_ioaddr(adapter,
550 QLCNIC_PCIX_PS_REG(PCIX_OCM_WINDOW_REG(pci_func)));
551
552 return 0;
553}
554
555static void get_brd_name(struct qlcnic_adapter *adapter, char *name)
556{
557 struct pci_dev *pdev = adapter->pdev;
558 int i, found = 0;
559
560 for (i = 0; i < NUM_SUPPORTED_BOARDS; ++i) {
561 if (qlcnic_boards[i].vendor == pdev->vendor &&
562 qlcnic_boards[i].device == pdev->device &&
563 qlcnic_boards[i].sub_vendor == pdev->subsystem_vendor &&
564 qlcnic_boards[i].sub_device == pdev->subsystem_device) {
565 strcpy(name, qlcnic_boards[i].short_name);
566 found = 1;
567 break;
568 }
569
570 }
571
572 if (!found)
573 name = "Unknown";
574}
575
576static void
577qlcnic_check_options(struct qlcnic_adapter *adapter)
578{
579 u32 fw_major, fw_minor, fw_build;
580 char brd_name[QLCNIC_MAX_BOARD_NAME_LEN];
581 char serial_num[32];
582 int i, offset, val;
583 int *ptr32;
584 struct pci_dev *pdev = adapter->pdev;
585
586 adapter->driver_mismatch = 0;
587
588 ptr32 = (int *)&serial_num;
589 offset = QLCNIC_FW_SERIAL_NUM_OFFSET;
590 for (i = 0; i < 8; i++) {
591 if (qlcnic_rom_fast_read(adapter, offset, &val) == -1) {
592 dev_err(&pdev->dev, "error reading board info\n");
593 adapter->driver_mismatch = 1;
594 return;
595 }
596 ptr32[i] = cpu_to_le32(val);
597 offset += sizeof(u32);
598 }
599
600 fw_major = QLCRD32(adapter, QLCNIC_FW_VERSION_MAJOR);
601 fw_minor = QLCRD32(adapter, QLCNIC_FW_VERSION_MINOR);
602 fw_build = QLCRD32(adapter, QLCNIC_FW_VERSION_SUB);
603
604 adapter->fw_version = QLCNIC_VERSION_CODE(fw_major, fw_minor, fw_build);
605
606 if (adapter->portnum == 0) {
607 get_brd_name(adapter, brd_name);
608
609 pr_info("%s: %s Board Chip rev 0x%x\n",
610 module_name(THIS_MODULE),
611 brd_name, adapter->ahw.revision_id);
612 }
613
614 if (adapter->fw_version < QLCNIC_VERSION_CODE(3, 4, 216)) {
615 adapter->driver_mismatch = 1;
616 dev_warn(&pdev->dev, "firmware version %d.%d.%d unsupported\n",
617 fw_major, fw_minor, fw_build);
618 return;
619 }
620
621 i = QLCRD32(adapter, QLCNIC_SRE_MISC);
622 adapter->ahw.cut_through = (i & 0x8000) ? 1 : 0;
623
624 dev_info(&pdev->dev, "firmware v%d.%d.%d [%s]\n",
625 fw_major, fw_minor, fw_build,
626 adapter->ahw.cut_through ? "cut-through" : "legacy");
627
628 if (adapter->fw_version >= QLCNIC_VERSION_CODE(4, 0, 222))
629 adapter->capabilities = QLCRD32(adapter, CRB_FW_CAPABILITIES_1);
630
631 adapter->flags &= ~QLCNIC_LRO_ENABLED;
632
633 if (adapter->ahw.port_type == QLCNIC_XGBE) {
634 adapter->num_rxd = DEFAULT_RCV_DESCRIPTORS_10G;
635 adapter->num_jumbo_rxd = MAX_JUMBO_RCV_DESCRIPTORS_10G;
636 } else if (adapter->ahw.port_type == QLCNIC_GBE) {
637 adapter->num_rxd = DEFAULT_RCV_DESCRIPTORS_1G;
638 adapter->num_jumbo_rxd = MAX_JUMBO_RCV_DESCRIPTORS_1G;
639 }
640
641 adapter->msix_supported = !!use_msi_x;
642 adapter->rss_supported = !!use_msi_x;
643
644 adapter->num_txd = MAX_CMD_DESCRIPTORS;
645
646 adapter->num_lro_rxd = 0;
647 adapter->max_rds_rings = 2;
648}
649
650static int
651qlcnic_start_firmware(struct qlcnic_adapter *adapter)
652{
653 int val, err, first_boot;
654
655 err = qlcnic_set_dma_mask(adapter);
656 if (err)
657 return err;
658
aa5e18c0
SC
659 err = qlcnic_can_start_firmware(adapter);
660 if (err < 0)
661 return err;
662 else if (!err)
af19b491
AKS
663 goto wait_init;
664
665 first_boot = QLCRD32(adapter, QLCNIC_CAM_RAM(0x1fc));
666 if (first_boot == 0x55555555)
667 /* This is the first boot after power up */
668 QLCWR32(adapter, QLCNIC_CAM_RAM(0x1fc), QLCNIC_BDINFO_MAGIC);
669
670 qlcnic_request_firmware(adapter);
671
672 err = qlcnic_need_fw_reset(adapter);
673 if (err < 0)
674 goto err_out;
675 if (err == 0)
676 goto wait_init;
677
678 if (first_boot != 0x55555555) {
679 QLCWR32(adapter, CRB_CMDPEG_STATE, 0);
680 qlcnic_pinit_from_rom(adapter);
681 msleep(1);
682 }
683
684 QLCWR32(adapter, CRB_DMA_SHIFT, 0x55555555);
685 QLCWR32(adapter, QLCNIC_PEG_HALT_STATUS1, 0);
686 QLCWR32(adapter, QLCNIC_PEG_HALT_STATUS2, 0);
687
688 qlcnic_set_port_mode(adapter);
689
690 err = qlcnic_load_firmware(adapter);
691 if (err)
692 goto err_out;
693
694 qlcnic_release_firmware(adapter);
695
696 val = (_QLCNIC_LINUX_MAJOR << 16)
697 | ((_QLCNIC_LINUX_MINOR << 8))
698 | (_QLCNIC_LINUX_SUBVERSION);
699 QLCWR32(adapter, CRB_DRIVER_VERSION, val);
700
701wait_init:
702 /* Handshake with the card before we register the devices. */
703 err = qlcnic_phantom_init(adapter);
704 if (err)
705 goto err_out;
706
707 QLCWR32(adapter, QLCNIC_CRB_DEV_STATE, QLCNIC_DEV_READY);
708
709 qlcnic_update_dma_mask(adapter);
710
711 qlcnic_check_options(adapter);
712
713 adapter->need_fw_reset = 0;
714
715 /* fall through and release firmware */
716
717err_out:
718 qlcnic_release_firmware(adapter);
719 return err;
720}
721
722static int
723qlcnic_request_irq(struct qlcnic_adapter *adapter)
724{
725 irq_handler_t handler;
726 struct qlcnic_host_sds_ring *sds_ring;
727 int err, ring;
728
729 unsigned long flags = 0;
730 struct net_device *netdev = adapter->netdev;
731 struct qlcnic_recv_context *recv_ctx = &adapter->recv_ctx;
732
7eb9855d
AKS
733 if (adapter->diag_test == QLCNIC_INTERRUPT_TEST) {
734 handler = qlcnic_tmp_intr;
735 if (!QLCNIC_IS_MSI_FAMILY(adapter))
736 flags |= IRQF_SHARED;
737
738 } else {
739 if (adapter->flags & QLCNIC_MSIX_ENABLED)
740 handler = qlcnic_msix_intr;
741 else if (adapter->flags & QLCNIC_MSI_ENABLED)
742 handler = qlcnic_msi_intr;
743 else {
744 flags |= IRQF_SHARED;
745 handler = qlcnic_intr;
746 }
af19b491
AKS
747 }
748 adapter->irq = netdev->irq;
749
750 for (ring = 0; ring < adapter->max_sds_rings; ring++) {
751 sds_ring = &recv_ctx->sds_rings[ring];
752 sprintf(sds_ring->name, "%s[%d]", netdev->name, ring);
753 err = request_irq(sds_ring->irq, handler,
754 flags, sds_ring->name, sds_ring);
755 if (err)
756 return err;
757 }
758
759 return 0;
760}
761
762static void
763qlcnic_free_irq(struct qlcnic_adapter *adapter)
764{
765 int ring;
766 struct qlcnic_host_sds_ring *sds_ring;
767
768 struct qlcnic_recv_context *recv_ctx = &adapter->recv_ctx;
769
770 for (ring = 0; ring < adapter->max_sds_rings; ring++) {
771 sds_ring = &recv_ctx->sds_rings[ring];
772 free_irq(sds_ring->irq, sds_ring);
773 }
774}
775
776static void
777qlcnic_init_coalesce_defaults(struct qlcnic_adapter *adapter)
778{
779 adapter->coal.flags = QLCNIC_INTR_DEFAULT;
780 adapter->coal.normal.data.rx_time_us =
781 QLCNIC_DEFAULT_INTR_COALESCE_RX_TIME_US;
782 adapter->coal.normal.data.rx_packets =
783 QLCNIC_DEFAULT_INTR_COALESCE_RX_PACKETS;
784 adapter->coal.normal.data.tx_time_us =
785 QLCNIC_DEFAULT_INTR_COALESCE_TX_TIME_US;
786 adapter->coal.normal.data.tx_packets =
787 QLCNIC_DEFAULT_INTR_COALESCE_TX_PACKETS;
788}
789
790static int
791__qlcnic_up(struct qlcnic_adapter *adapter, struct net_device *netdev)
792{
793 if (adapter->is_up != QLCNIC_ADAPTER_UP_MAGIC)
794 return -EIO;
795
796 qlcnic_set_multi(netdev);
797 qlcnic_fw_cmd_set_mtu(adapter, netdev->mtu);
798
799 adapter->ahw.linkup = 0;
800
801 if (adapter->max_sds_rings > 1)
802 qlcnic_config_rss(adapter, 1);
803
804 qlcnic_config_intr_coalesce(adapter);
805
806 if (adapter->capabilities & QLCNIC_FW_CAPABILITY_HW_LRO)
807 qlcnic_config_hw_lro(adapter, QLCNIC_LRO_ENABLED);
808
809 qlcnic_napi_enable(adapter);
810
811 qlcnic_linkevent_request(adapter, 1);
812
813 set_bit(__QLCNIC_DEV_UP, &adapter->state);
814 return 0;
815}
816
817/* Usage: During resume and firmware recovery module.*/
818
819static int
820qlcnic_up(struct qlcnic_adapter *adapter, struct net_device *netdev)
821{
822 int err = 0;
823
824 rtnl_lock();
825 if (netif_running(netdev))
826 err = __qlcnic_up(adapter, netdev);
827 rtnl_unlock();
828
829 return err;
830}
831
832static void
833__qlcnic_down(struct qlcnic_adapter *adapter, struct net_device *netdev)
834{
835 if (adapter->is_up != QLCNIC_ADAPTER_UP_MAGIC)
836 return;
837
838 if (!test_and_clear_bit(__QLCNIC_DEV_UP, &adapter->state))
839 return;
840
841 smp_mb();
842 spin_lock(&adapter->tx_clean_lock);
843 netif_carrier_off(netdev);
844 netif_tx_disable(netdev);
845
846 qlcnic_free_mac_list(adapter);
847
848 qlcnic_nic_set_promisc(adapter, QLCNIC_NIU_NON_PROMISC_MODE);
849
850 qlcnic_napi_disable(adapter);
851
852 qlcnic_release_tx_buffers(adapter);
853 spin_unlock(&adapter->tx_clean_lock);
854}
855
856/* Usage: During suspend and firmware recovery module */
857
858static void
859qlcnic_down(struct qlcnic_adapter *adapter, struct net_device *netdev)
860{
861 rtnl_lock();
862 if (netif_running(netdev))
863 __qlcnic_down(adapter, netdev);
864 rtnl_unlock();
865
866}
867
868static int
869qlcnic_attach(struct qlcnic_adapter *adapter)
870{
871 struct net_device *netdev = adapter->netdev;
872 struct pci_dev *pdev = adapter->pdev;
873 int err, ring;
874 struct qlcnic_host_rds_ring *rds_ring;
875
876 if (adapter->is_up == QLCNIC_ADAPTER_UP_MAGIC)
877 return 0;
878
879 err = qlcnic_init_firmware(adapter);
880 if (err)
881 return err;
882
883 err = qlcnic_napi_add(adapter, netdev);
884 if (err)
885 return err;
886
887 err = qlcnic_alloc_sw_resources(adapter);
888 if (err) {
889 dev_err(&pdev->dev, "Error in setting sw resources\n");
890 return err;
891 }
892
893 err = qlcnic_alloc_hw_resources(adapter);
894 if (err) {
895 dev_err(&pdev->dev, "Error in setting hw resources\n");
896 goto err_out_free_sw;
897 }
898
899
900 for (ring = 0; ring < adapter->max_rds_rings; ring++) {
901 rds_ring = &adapter->recv_ctx.rds_rings[ring];
902 qlcnic_post_rx_buffers(adapter, ring, rds_ring);
903 }
904
905 err = qlcnic_request_irq(adapter);
906 if (err) {
907 dev_err(&pdev->dev, "failed to setup interrupt\n");
908 goto err_out_free_rxbuf;
909 }
910
911 qlcnic_init_coalesce_defaults(adapter);
912
913 qlcnic_create_sysfs_entries(adapter);
914
915 adapter->is_up = QLCNIC_ADAPTER_UP_MAGIC;
916 return 0;
917
918err_out_free_rxbuf:
919 qlcnic_release_rx_buffers(adapter);
920 qlcnic_free_hw_resources(adapter);
921err_out_free_sw:
922 qlcnic_free_sw_resources(adapter);
923 return err;
924}
925
926static void
927qlcnic_detach(struct qlcnic_adapter *adapter)
928{
929 if (adapter->is_up != QLCNIC_ADAPTER_UP_MAGIC)
930 return;
931
932 qlcnic_remove_sysfs_entries(adapter);
933
934 qlcnic_free_hw_resources(adapter);
935 qlcnic_release_rx_buffers(adapter);
936 qlcnic_free_irq(adapter);
937 qlcnic_napi_del(adapter);
938 qlcnic_free_sw_resources(adapter);
939
940 adapter->is_up = 0;
941}
942
7eb9855d
AKS
943void qlcnic_diag_free_res(struct net_device *netdev, int max_sds_rings)
944{
945 struct qlcnic_adapter *adapter = netdev_priv(netdev);
946 struct qlcnic_host_sds_ring *sds_ring;
947 int ring;
948
cdaff185
AKS
949 if (adapter->diag_test == QLCNIC_INTERRUPT_TEST) {
950 for (ring = 0; ring < adapter->max_sds_rings; ring++) {
951 sds_ring = &adapter->recv_ctx.sds_rings[ring];
952 qlcnic_disable_int(sds_ring);
953 }
7eb9855d
AKS
954 }
955
956 qlcnic_detach(adapter);
957
958 adapter->diag_test = 0;
959 adapter->max_sds_rings = max_sds_rings;
960
961 if (qlcnic_attach(adapter))
34ce3626 962 goto out;
7eb9855d
AKS
963
964 if (netif_running(netdev))
965 __qlcnic_up(adapter, netdev);
34ce3626 966out:
7eb9855d
AKS
967 netif_device_attach(netdev);
968}
969
970int qlcnic_diag_alloc_res(struct net_device *netdev, int test)
971{
972 struct qlcnic_adapter *adapter = netdev_priv(netdev);
973 struct qlcnic_host_sds_ring *sds_ring;
974 int ring;
975 int ret;
976
977 netif_device_detach(netdev);
978
979 if (netif_running(netdev))
980 __qlcnic_down(adapter, netdev);
981
982 qlcnic_detach(adapter);
983
984 adapter->max_sds_rings = 1;
985 adapter->diag_test = test;
986
987 ret = qlcnic_attach(adapter);
34ce3626
AKS
988 if (ret) {
989 netif_device_attach(netdev);
7eb9855d 990 return ret;
34ce3626 991 }
7eb9855d 992
cdaff185
AKS
993 if (adapter->diag_test == QLCNIC_INTERRUPT_TEST) {
994 for (ring = 0; ring < adapter->max_sds_rings; ring++) {
995 sds_ring = &adapter->recv_ctx.sds_rings[ring];
996 qlcnic_enable_int(sds_ring);
997 }
7eb9855d
AKS
998 }
999
1000 return 0;
1001}
1002
af19b491
AKS
1003int
1004qlcnic_reset_context(struct qlcnic_adapter *adapter)
1005{
1006 int err = 0;
1007 struct net_device *netdev = adapter->netdev;
1008
1009 if (test_and_set_bit(__QLCNIC_RESETTING, &adapter->state))
1010 return -EBUSY;
1011
1012 if (adapter->is_up == QLCNIC_ADAPTER_UP_MAGIC) {
1013
1014 netif_device_detach(netdev);
1015
1016 if (netif_running(netdev))
1017 __qlcnic_down(adapter, netdev);
1018
1019 qlcnic_detach(adapter);
1020
1021 if (netif_running(netdev)) {
1022 err = qlcnic_attach(adapter);
1023 if (!err)
34ce3626 1024 __qlcnic_up(adapter, netdev);
af19b491
AKS
1025 }
1026
1027 netif_device_attach(netdev);
1028 }
1029
af19b491
AKS
1030 clear_bit(__QLCNIC_RESETTING, &adapter->state);
1031 return err;
1032}
1033
1034static int
1035qlcnic_setup_netdev(struct qlcnic_adapter *adapter,
1036 struct net_device *netdev)
1037{
1038 int err;
1039 struct pci_dev *pdev = adapter->pdev;
1040
1041 adapter->rx_csum = 1;
1042 adapter->mc_enabled = 0;
1043 adapter->max_mc_count = 38;
1044
1045 netdev->netdev_ops = &qlcnic_netdev_ops;
1046 netdev->watchdog_timeo = 2*HZ;
1047
1048 qlcnic_change_mtu(netdev, netdev->mtu);
1049
1050 SET_ETHTOOL_OPS(netdev, &qlcnic_ethtool_ops);
1051
1052 netdev->features |= (NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_TSO);
1053 netdev->features |= (NETIF_F_GRO);
1054 netdev->vlan_features |= (NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_TSO);
1055
1056 netdev->features |= (NETIF_F_IPV6_CSUM | NETIF_F_TSO6);
1057 netdev->vlan_features |= (NETIF_F_IPV6_CSUM | NETIF_F_TSO6);
1058
1059 if (adapter->pci_using_dac) {
1060 netdev->features |= NETIF_F_HIGHDMA;
1061 netdev->vlan_features |= NETIF_F_HIGHDMA;
1062 }
1063
1064 if (adapter->capabilities & QLCNIC_FW_CAPABILITY_FVLANTX)
1065 netdev->features |= (NETIF_F_HW_VLAN_TX);
1066
1067 if (adapter->capabilities & QLCNIC_FW_CAPABILITY_HW_LRO)
1068 netdev->features |= NETIF_F_LRO;
1069
1070 netdev->irq = adapter->msix_entries[0].vector;
1071
1072 INIT_WORK(&adapter->tx_timeout_task, qlcnic_tx_timeout_task);
1073
1074 if (qlcnic_read_mac_addr(adapter))
1075 dev_warn(&pdev->dev, "failed to read mac addr\n");
1076
1077 netif_carrier_off(netdev);
1078 netif_stop_queue(netdev);
1079
1080 err = register_netdev(netdev);
1081 if (err) {
1082 dev_err(&pdev->dev, "failed to register net device\n");
1083 return err;
1084 }
1085
1086 return 0;
1087}
1088
1089static int __devinit
1090qlcnic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
1091{
1092 struct net_device *netdev = NULL;
1093 struct qlcnic_adapter *adapter = NULL;
1094 int err;
1095 int pci_func_id = PCI_FUNC(pdev->devfn);
1096 uint8_t revision_id;
1097
1098 err = pci_enable_device(pdev);
1099 if (err)
1100 return err;
1101
1102 if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
1103 err = -ENODEV;
1104 goto err_out_disable_pdev;
1105 }
1106
1107 err = pci_request_regions(pdev, qlcnic_driver_name);
1108 if (err)
1109 goto err_out_disable_pdev;
1110
1111 pci_set_master(pdev);
1112
1113 netdev = alloc_etherdev(sizeof(struct qlcnic_adapter));
1114 if (!netdev) {
1115 dev_err(&pdev->dev, "failed to allocate net_device\n");
1116 err = -ENOMEM;
1117 goto err_out_free_res;
1118 }
1119
1120 SET_NETDEV_DEV(netdev, &pdev->dev);
1121
1122 adapter = netdev_priv(netdev);
1123 adapter->netdev = netdev;
1124 adapter->pdev = pdev;
1125 adapter->ahw.pci_func = pci_func_id;
1126
1127 revision_id = pdev->revision;
1128 adapter->ahw.revision_id = revision_id;
1129
1130 rwlock_init(&adapter->ahw.crb_lock);
1131 mutex_init(&adapter->ahw.mem_lock);
1132
1133 spin_lock_init(&adapter->tx_clean_lock);
1134 INIT_LIST_HEAD(&adapter->mac_list);
1135
1136 err = qlcnic_setup_pci_map(adapter);
1137 if (err)
1138 goto err_out_free_netdev;
1139
1140 /* This will be reset for mezz cards */
1141 adapter->portnum = pci_func_id;
1142
1143 err = qlcnic_get_board_info(adapter);
1144 if (err) {
1145 dev_err(&pdev->dev, "Error getting board config info.\n");
1146 goto err_out_iounmap;
1147 }
1148
aa5e18c0 1149 qlcnic_setup_idc_param(adapter);
af19b491
AKS
1150
1151 err = qlcnic_start_firmware(adapter);
1152 if (err)
1153 goto err_out_decr_ref;
1154
1155 /*
1156 * See if the firmware gave us a virtual-physical port mapping.
1157 */
1158 adapter->physical_port = adapter->portnum;
1159
1160 qlcnic_clear_stats(adapter);
1161
1162 qlcnic_setup_intr(adapter);
1163
1164 err = qlcnic_setup_netdev(adapter, netdev);
1165 if (err)
1166 goto err_out_disable_msi;
1167
1168 pci_set_drvdata(pdev, adapter);
1169
1170 qlcnic_schedule_work(adapter, qlcnic_fw_poll_work, FW_POLL_DELAY);
1171
1172 switch (adapter->ahw.port_type) {
1173 case QLCNIC_GBE:
1174 dev_info(&adapter->pdev->dev, "%s: GbE port initialized\n",
1175 adapter->netdev->name);
1176 break;
1177 case QLCNIC_XGBE:
1178 dev_info(&adapter->pdev->dev, "%s: XGbE port initialized\n",
1179 adapter->netdev->name);
1180 break;
1181 }
1182
1183 qlcnic_create_diag_entries(adapter);
1184
1185 return 0;
1186
1187err_out_disable_msi:
1188 qlcnic_teardown_intr(adapter);
1189
1190err_out_decr_ref:
1191 qlcnic_clr_all_drv_state(adapter);
1192
1193err_out_iounmap:
1194 qlcnic_cleanup_pci_map(adapter);
1195
1196err_out_free_netdev:
1197 free_netdev(netdev);
1198
1199err_out_free_res:
1200 pci_release_regions(pdev);
1201
1202err_out_disable_pdev:
1203 pci_set_drvdata(pdev, NULL);
1204 pci_disable_device(pdev);
1205 return err;
1206}
1207
1208static void __devexit qlcnic_remove(struct pci_dev *pdev)
1209{
1210 struct qlcnic_adapter *adapter;
1211 struct net_device *netdev;
1212
1213 adapter = pci_get_drvdata(pdev);
1214 if (adapter == NULL)
1215 return;
1216
1217 netdev = adapter->netdev;
1218
1219 qlcnic_cancel_fw_work(adapter);
1220
1221 unregister_netdev(netdev);
1222
1223 cancel_work_sync(&adapter->tx_timeout_task);
1224
1225 qlcnic_detach(adapter);
1226
1227 qlcnic_clr_all_drv_state(adapter);
1228
1229 clear_bit(__QLCNIC_RESETTING, &adapter->state);
1230
1231 qlcnic_teardown_intr(adapter);
1232
1233 qlcnic_remove_diag_entries(adapter);
1234
1235 qlcnic_cleanup_pci_map(adapter);
1236
1237 qlcnic_release_firmware(adapter);
1238
1239 pci_release_regions(pdev);
1240 pci_disable_device(pdev);
1241 pci_set_drvdata(pdev, NULL);
1242
1243 free_netdev(netdev);
1244}
1245static int __qlcnic_shutdown(struct pci_dev *pdev)
1246{
1247 struct qlcnic_adapter *adapter = pci_get_drvdata(pdev);
1248 struct net_device *netdev = adapter->netdev;
1249 int retval;
1250
1251 netif_device_detach(netdev);
1252
1253 qlcnic_cancel_fw_work(adapter);
1254
1255 if (netif_running(netdev))
1256 qlcnic_down(adapter, netdev);
1257
1258 cancel_work_sync(&adapter->tx_timeout_task);
1259
1260 qlcnic_detach(adapter);
1261
1262 qlcnic_clr_all_drv_state(adapter);
1263
1264 clear_bit(__QLCNIC_RESETTING, &adapter->state);
1265
1266 retval = pci_save_state(pdev);
1267 if (retval)
1268 return retval;
1269
1270 if (qlcnic_wol_supported(adapter)) {
1271 pci_enable_wake(pdev, PCI_D3cold, 1);
1272 pci_enable_wake(pdev, PCI_D3hot, 1);
1273 }
1274
1275 return 0;
1276}
1277
1278static void qlcnic_shutdown(struct pci_dev *pdev)
1279{
1280 if (__qlcnic_shutdown(pdev))
1281 return;
1282
1283 pci_disable_device(pdev);
1284}
1285
1286#ifdef CONFIG_PM
1287static int
1288qlcnic_suspend(struct pci_dev *pdev, pm_message_t state)
1289{
1290 int retval;
1291
1292 retval = __qlcnic_shutdown(pdev);
1293 if (retval)
1294 return retval;
1295
1296 pci_set_power_state(pdev, pci_choose_state(pdev, state));
1297 return 0;
1298}
1299
1300static int
1301qlcnic_resume(struct pci_dev *pdev)
1302{
1303 struct qlcnic_adapter *adapter = pci_get_drvdata(pdev);
1304 struct net_device *netdev = adapter->netdev;
1305 int err;
1306
1307 err = pci_enable_device(pdev);
1308 if (err)
1309 return err;
1310
1311 pci_set_power_state(pdev, PCI_D0);
1312 pci_set_master(pdev);
1313 pci_restore_state(pdev);
1314
1315 adapter->ahw.crb_win = -1;
1316 adapter->ahw.ocm_win = -1;
1317
1318 err = qlcnic_start_firmware(adapter);
1319 if (err) {
1320 dev_err(&pdev->dev, "failed to start firmware\n");
1321 return err;
1322 }
1323
1324 if (netif_running(netdev)) {
1325 err = qlcnic_attach(adapter);
1326 if (err)
1327 goto err_out;
1328
1329 err = qlcnic_up(adapter, netdev);
1330 if (err)
1331 goto err_out_detach;
1332
1333
1334 qlcnic_config_indev_addr(netdev, NETDEV_UP);
1335 }
1336
1337 netif_device_attach(netdev);
1338 qlcnic_schedule_work(adapter, qlcnic_fw_poll_work, FW_POLL_DELAY);
1339 return 0;
1340
1341err_out_detach:
1342 qlcnic_detach(adapter);
1343err_out:
1344 qlcnic_clr_all_drv_state(adapter);
34ce3626 1345 netif_device_attach(netdev);
af19b491
AKS
1346 return err;
1347}
1348#endif
1349
1350static int qlcnic_open(struct net_device *netdev)
1351{
1352 struct qlcnic_adapter *adapter = netdev_priv(netdev);
1353 int err;
1354
1355 if (adapter->driver_mismatch)
1356 return -EIO;
1357
1358 err = qlcnic_attach(adapter);
1359 if (err)
1360 return err;
1361
1362 err = __qlcnic_up(adapter, netdev);
1363 if (err)
1364 goto err_out;
1365
1366 netif_start_queue(netdev);
1367
1368 return 0;
1369
1370err_out:
1371 qlcnic_detach(adapter);
1372 return err;
1373}
1374
1375/*
1376 * qlcnic_close - Disables a network interface entry point
1377 */
1378static int qlcnic_close(struct net_device *netdev)
1379{
1380 struct qlcnic_adapter *adapter = netdev_priv(netdev);
1381
1382 __qlcnic_down(adapter, netdev);
1383 return 0;
1384}
1385
1386static void
1387qlcnic_tso_check(struct net_device *netdev,
1388 struct qlcnic_host_tx_ring *tx_ring,
1389 struct cmd_desc_type0 *first_desc,
1390 struct sk_buff *skb)
1391{
1392 u8 opcode = TX_ETHER_PKT;
1393 __be16 protocol = skb->protocol;
1394 u16 flags = 0, vid = 0;
1395 u32 producer;
1396 int copied, offset, copy_len, hdr_len = 0, tso = 0, vlan_oob = 0;
1397 struct cmd_desc_type0 *hwdesc;
1398 struct vlan_ethhdr *vh;
8bfe8b91 1399 struct qlcnic_adapter *adapter = netdev_priv(netdev);
af19b491
AKS
1400
1401 if (protocol == cpu_to_be16(ETH_P_8021Q)) {
1402
1403 vh = (struct vlan_ethhdr *)skb->data;
1404 protocol = vh->h_vlan_encapsulated_proto;
1405 flags = FLAGS_VLAN_TAGGED;
1406
1407 } else if (vlan_tx_tag_present(skb)) {
1408
1409 flags = FLAGS_VLAN_OOB;
1410 vid = vlan_tx_tag_get(skb);
1411 qlcnic_set_tx_vlan_tci(first_desc, vid);
1412 vlan_oob = 1;
1413 }
1414
1415 if ((netdev->features & (NETIF_F_TSO | NETIF_F_TSO6)) &&
1416 skb_shinfo(skb)->gso_size > 0) {
1417
1418 hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
1419
1420 first_desc->mss = cpu_to_le16(skb_shinfo(skb)->gso_size);
1421 first_desc->total_hdr_length = hdr_len;
1422 if (vlan_oob) {
1423 first_desc->total_hdr_length += VLAN_HLEN;
1424 first_desc->tcp_hdr_offset = VLAN_HLEN;
1425 first_desc->ip_hdr_offset = VLAN_HLEN;
1426 /* Only in case of TSO on vlan device */
1427 flags |= FLAGS_VLAN_TAGGED;
1428 }
1429
1430 opcode = (protocol == cpu_to_be16(ETH_P_IPV6)) ?
1431 TX_TCP_LSO6 : TX_TCP_LSO;
1432 tso = 1;
1433
1434 } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
1435 u8 l4proto;
1436
1437 if (protocol == cpu_to_be16(ETH_P_IP)) {
1438 l4proto = ip_hdr(skb)->protocol;
1439
1440 if (l4proto == IPPROTO_TCP)
1441 opcode = TX_TCP_PKT;
1442 else if (l4proto == IPPROTO_UDP)
1443 opcode = TX_UDP_PKT;
1444 } else if (protocol == cpu_to_be16(ETH_P_IPV6)) {
1445 l4proto = ipv6_hdr(skb)->nexthdr;
1446
1447 if (l4proto == IPPROTO_TCP)
1448 opcode = TX_TCPV6_PKT;
1449 else if (l4proto == IPPROTO_UDP)
1450 opcode = TX_UDPV6_PKT;
1451 }
1452 }
1453
1454 first_desc->tcp_hdr_offset += skb_transport_offset(skb);
1455 first_desc->ip_hdr_offset += skb_network_offset(skb);
1456 qlcnic_set_tx_flags_opcode(first_desc, flags, opcode);
1457
1458 if (!tso)
1459 return;
1460
1461 /* For LSO, we need to copy the MAC/IP/TCP headers into
1462 * the descriptor ring
1463 */
1464 producer = tx_ring->producer;
1465 copied = 0;
1466 offset = 2;
1467
1468 if (vlan_oob) {
1469 /* Create a TSO vlan header template for firmware */
1470
1471 hwdesc = &tx_ring->desc_head[producer];
1472 tx_ring->cmd_buf_arr[producer].skb = NULL;
1473
1474 copy_len = min((int)sizeof(struct cmd_desc_type0) - offset,
1475 hdr_len + VLAN_HLEN);
1476
1477 vh = (struct vlan_ethhdr *)((char *)hwdesc + 2);
1478 skb_copy_from_linear_data(skb, vh, 12);
1479 vh->h_vlan_proto = htons(ETH_P_8021Q);
1480 vh->h_vlan_TCI = htons(vid);
1481 skb_copy_from_linear_data_offset(skb, 12,
1482 (char *)vh + 16, copy_len - 16);
1483
1484 copied = copy_len - VLAN_HLEN;
1485 offset = 0;
1486
1487 producer = get_next_index(producer, tx_ring->num_desc);
1488 }
1489
1490 while (copied < hdr_len) {
1491
1492 copy_len = min((int)sizeof(struct cmd_desc_type0) - offset,
1493 (hdr_len - copied));
1494
1495 hwdesc = &tx_ring->desc_head[producer];
1496 tx_ring->cmd_buf_arr[producer].skb = NULL;
1497
1498 skb_copy_from_linear_data_offset(skb, copied,
1499 (char *)hwdesc + offset, copy_len);
1500
1501 copied += copy_len;
1502 offset = 0;
1503
1504 producer = get_next_index(producer, tx_ring->num_desc);
1505 }
1506
1507 tx_ring->producer = producer;
1508 barrier();
8bfe8b91 1509 adapter->stats.lso_frames++;
af19b491
AKS
1510}
1511
1512static int
1513qlcnic_map_tx_skb(struct pci_dev *pdev,
1514 struct sk_buff *skb, struct qlcnic_cmd_buffer *pbuf)
1515{
1516 struct qlcnic_skb_frag *nf;
1517 struct skb_frag_struct *frag;
1518 int i, nr_frags;
1519 dma_addr_t map;
1520
1521 nr_frags = skb_shinfo(skb)->nr_frags;
1522 nf = &pbuf->frag_array[0];
1523
1524 map = pci_map_single(pdev, skb->data,
1525 skb_headlen(skb), PCI_DMA_TODEVICE);
1526 if (pci_dma_mapping_error(pdev, map))
1527 goto out_err;
1528
1529 nf->dma = map;
1530 nf->length = skb_headlen(skb);
1531
1532 for (i = 0; i < nr_frags; i++) {
1533 frag = &skb_shinfo(skb)->frags[i];
1534 nf = &pbuf->frag_array[i+1];
1535
1536 map = pci_map_page(pdev, frag->page, frag->page_offset,
1537 frag->size, PCI_DMA_TODEVICE);
1538 if (pci_dma_mapping_error(pdev, map))
1539 goto unwind;
1540
1541 nf->dma = map;
1542 nf->length = frag->size;
1543 }
1544
1545 return 0;
1546
1547unwind:
1548 while (--i >= 0) {
1549 nf = &pbuf->frag_array[i+1];
1550 pci_unmap_page(pdev, nf->dma, nf->length, PCI_DMA_TODEVICE);
1551 }
1552
1553 nf = &pbuf->frag_array[0];
1554 pci_unmap_single(pdev, nf->dma, skb_headlen(skb), PCI_DMA_TODEVICE);
1555
1556out_err:
1557 return -ENOMEM;
1558}
1559
1560static inline void
1561qlcnic_clear_cmddesc(u64 *desc)
1562{
1563 desc[0] = 0ULL;
1564 desc[2] = 0ULL;
1565}
1566
cdaff185 1567netdev_tx_t
af19b491
AKS
1568qlcnic_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
1569{
1570 struct qlcnic_adapter *adapter = netdev_priv(netdev);
1571 struct qlcnic_host_tx_ring *tx_ring = adapter->tx_ring;
1572 struct qlcnic_cmd_buffer *pbuf;
1573 struct qlcnic_skb_frag *buffrag;
1574 struct cmd_desc_type0 *hwdesc, *first_desc;
1575 struct pci_dev *pdev;
1576 int i, k;
1577
1578 u32 producer;
1579 int frag_count, no_of_desc;
1580 u32 num_txd = tx_ring->num_desc;
1581
780ab790
AKS
1582 if (!test_bit(__QLCNIC_DEV_UP, &adapter->state)) {
1583 netif_stop_queue(netdev);
1584 return NETDEV_TX_BUSY;
1585 }
1586
af19b491
AKS
1587 frag_count = skb_shinfo(skb)->nr_frags + 1;
1588
1589 /* 4 fragments per cmd des */
1590 no_of_desc = (frag_count + 3) >> 2;
1591
1592 if (unlikely(no_of_desc + 2 > qlcnic_tx_avail(tx_ring))) {
1593 netif_stop_queue(netdev);
8bfe8b91 1594 adapter->stats.xmit_off++;
af19b491
AKS
1595 return NETDEV_TX_BUSY;
1596 }
1597
1598 producer = tx_ring->producer;
1599 pbuf = &tx_ring->cmd_buf_arr[producer];
1600
1601 pdev = adapter->pdev;
1602
8ae6df97
AKS
1603 if (qlcnic_map_tx_skb(pdev, skb, pbuf)) {
1604 adapter->stats.tx_dma_map_error++;
af19b491 1605 goto drop_packet;
8ae6df97 1606 }
af19b491
AKS
1607
1608 pbuf->skb = skb;
1609 pbuf->frag_count = frag_count;
1610
1611 first_desc = hwdesc = &tx_ring->desc_head[producer];
1612 qlcnic_clear_cmddesc((u64 *)hwdesc);
1613
1614 qlcnic_set_tx_frags_len(first_desc, frag_count, skb->len);
1615 qlcnic_set_tx_port(first_desc, adapter->portnum);
1616
1617 for (i = 0; i < frag_count; i++) {
1618
1619 k = i % 4;
1620
1621 if ((k == 0) && (i > 0)) {
1622 /* move to next desc.*/
1623 producer = get_next_index(producer, num_txd);
1624 hwdesc = &tx_ring->desc_head[producer];
1625 qlcnic_clear_cmddesc((u64 *)hwdesc);
1626 tx_ring->cmd_buf_arr[producer].skb = NULL;
1627 }
1628
1629 buffrag = &pbuf->frag_array[i];
1630
1631 hwdesc->buffer_length[k] = cpu_to_le16(buffrag->length);
1632 switch (k) {
1633 case 0:
1634 hwdesc->addr_buffer1 = cpu_to_le64(buffrag->dma);
1635 break;
1636 case 1:
1637 hwdesc->addr_buffer2 = cpu_to_le64(buffrag->dma);
1638 break;
1639 case 2:
1640 hwdesc->addr_buffer3 = cpu_to_le64(buffrag->dma);
1641 break;
1642 case 3:
1643 hwdesc->addr_buffer4 = cpu_to_le64(buffrag->dma);
1644 break;
1645 }
1646 }
1647
1648 tx_ring->producer = get_next_index(producer, num_txd);
1649
1650 qlcnic_tso_check(netdev, tx_ring, first_desc, skb);
1651
1652 qlcnic_update_cmd_producer(adapter, tx_ring);
1653
1654 adapter->stats.txbytes += skb->len;
1655 adapter->stats.xmitcalled++;
1656
1657 return NETDEV_TX_OK;
1658
1659drop_packet:
1660 adapter->stats.txdropped++;
1661 dev_kfree_skb_any(skb);
1662 return NETDEV_TX_OK;
1663}
1664
1665static int qlcnic_check_temp(struct qlcnic_adapter *adapter)
1666{
1667 struct net_device *netdev = adapter->netdev;
1668 u32 temp, temp_state, temp_val;
1669 int rv = 0;
1670
1671 temp = QLCRD32(adapter, CRB_TEMP_STATE);
1672
1673 temp_state = qlcnic_get_temp_state(temp);
1674 temp_val = qlcnic_get_temp_val(temp);
1675
1676 if (temp_state == QLCNIC_TEMP_PANIC) {
1677 dev_err(&netdev->dev,
1678 "Device temperature %d degrees C exceeds"
1679 " maximum allowed. Hardware has been shut down.\n",
1680 temp_val);
1681 rv = 1;
1682 } else if (temp_state == QLCNIC_TEMP_WARN) {
1683 if (adapter->temp == QLCNIC_TEMP_NORMAL) {
1684 dev_err(&netdev->dev,
1685 "Device temperature %d degrees C "
1686 "exceeds operating range."
1687 " Immediate action needed.\n",
1688 temp_val);
1689 }
1690 } else {
1691 if (adapter->temp == QLCNIC_TEMP_WARN) {
1692 dev_info(&netdev->dev,
1693 "Device temperature is now %d degrees C"
1694 " in normal range.\n", temp_val);
1695 }
1696 }
1697 adapter->temp = temp_state;
1698 return rv;
1699}
1700
1701void qlcnic_advert_link_change(struct qlcnic_adapter *adapter, int linkup)
1702{
1703 struct net_device *netdev = adapter->netdev;
1704
1705 if (adapter->ahw.linkup && !linkup) {
1706 dev_info(&netdev->dev, "NIC Link is down\n");
1707 adapter->ahw.linkup = 0;
1708 if (netif_running(netdev)) {
1709 netif_carrier_off(netdev);
1710 netif_stop_queue(netdev);
1711 }
1712 } else if (!adapter->ahw.linkup && linkup) {
1713 dev_info(&netdev->dev, "NIC Link is up\n");
1714 adapter->ahw.linkup = 1;
1715 if (netif_running(netdev)) {
1716 netif_carrier_on(netdev);
1717 netif_wake_queue(netdev);
1718 }
1719 }
1720}
1721
1722static void qlcnic_tx_timeout(struct net_device *netdev)
1723{
1724 struct qlcnic_adapter *adapter = netdev_priv(netdev);
1725
1726 if (test_bit(__QLCNIC_RESETTING, &adapter->state))
1727 return;
1728
1729 dev_err(&netdev->dev, "transmit timeout, resetting.\n");
1730 schedule_work(&adapter->tx_timeout_task);
1731}
1732
1733static void qlcnic_tx_timeout_task(struct work_struct *work)
1734{
1735 struct qlcnic_adapter *adapter =
1736 container_of(work, struct qlcnic_adapter, tx_timeout_task);
1737
1738 if (!netif_running(adapter->netdev))
1739 return;
1740
1741 if (test_and_set_bit(__QLCNIC_RESETTING, &adapter->state))
1742 return;
1743
1744 if (++adapter->tx_timeo_cnt >= QLCNIC_MAX_TX_TIMEOUTS)
1745 goto request_reset;
1746
1747 clear_bit(__QLCNIC_RESETTING, &adapter->state);
1748 if (!qlcnic_reset_context(adapter)) {
1749 adapter->netdev->trans_start = jiffies;
1750 return;
1751
1752 /* context reset failed, fall through for fw reset */
1753 }
1754
1755request_reset:
1756 adapter->need_fw_reset = 1;
1757 clear_bit(__QLCNIC_RESETTING, &adapter->state);
65b5b420 1758 QLCDB(adapter, DRV, "Resetting adapter\n");
af19b491
AKS
1759}
1760
1761static struct net_device_stats *qlcnic_get_stats(struct net_device *netdev)
1762{
1763 struct qlcnic_adapter *adapter = netdev_priv(netdev);
1764 struct net_device_stats *stats = &netdev->stats;
1765
1766 memset(stats, 0, sizeof(*stats));
1767
1768 stats->rx_packets = adapter->stats.rx_pkts + adapter->stats.lro_pkts;
1769 stats->tx_packets = adapter->stats.xmitfinished;
1770 stats->rx_bytes = adapter->stats.rxbytes;
1771 stats->tx_bytes = adapter->stats.txbytes;
1772 stats->rx_dropped = adapter->stats.rxdropped;
1773 stats->tx_dropped = adapter->stats.txdropped;
1774
1775 return stats;
1776}
1777
7eb9855d 1778static irqreturn_t qlcnic_clear_legacy_intr(struct qlcnic_adapter *adapter)
af19b491 1779{
af19b491
AKS
1780 u32 status;
1781
1782 status = readl(adapter->isr_int_vec);
1783
1784 if (!(status & adapter->int_vec_bit))
1785 return IRQ_NONE;
1786
1787 /* check interrupt state machine, to be sure */
1788 status = readl(adapter->crb_int_state_reg);
1789 if (!ISR_LEGACY_INT_TRIGGERED(status))
1790 return IRQ_NONE;
1791
1792 writel(0xffffffff, adapter->tgt_status_reg);
1793 /* read twice to ensure write is flushed */
1794 readl(adapter->isr_int_vec);
1795 readl(adapter->isr_int_vec);
1796
7eb9855d
AKS
1797 return IRQ_HANDLED;
1798}
1799
1800static irqreturn_t qlcnic_tmp_intr(int irq, void *data)
1801{
1802 struct qlcnic_host_sds_ring *sds_ring = data;
1803 struct qlcnic_adapter *adapter = sds_ring->adapter;
1804
1805 if (adapter->flags & QLCNIC_MSIX_ENABLED)
1806 goto done;
1807 else if (adapter->flags & QLCNIC_MSI_ENABLED) {
1808 writel(0xffffffff, adapter->tgt_status_reg);
1809 goto done;
1810 }
1811
1812 if (qlcnic_clear_legacy_intr(adapter) == IRQ_NONE)
1813 return IRQ_NONE;
1814
1815done:
1816 adapter->diag_cnt++;
1817 qlcnic_enable_int(sds_ring);
1818 return IRQ_HANDLED;
1819}
1820
1821static irqreturn_t qlcnic_intr(int irq, void *data)
1822{
1823 struct qlcnic_host_sds_ring *sds_ring = data;
1824 struct qlcnic_adapter *adapter = sds_ring->adapter;
1825
1826 if (qlcnic_clear_legacy_intr(adapter) == IRQ_NONE)
1827 return IRQ_NONE;
1828
af19b491
AKS
1829 napi_schedule(&sds_ring->napi);
1830
1831 return IRQ_HANDLED;
1832}
1833
1834static irqreturn_t qlcnic_msi_intr(int irq, void *data)
1835{
1836 struct qlcnic_host_sds_ring *sds_ring = data;
1837 struct qlcnic_adapter *adapter = sds_ring->adapter;
1838
1839 /* clear interrupt */
1840 writel(0xffffffff, adapter->tgt_status_reg);
1841
1842 napi_schedule(&sds_ring->napi);
1843 return IRQ_HANDLED;
1844}
1845
1846static irqreturn_t qlcnic_msix_intr(int irq, void *data)
1847{
1848 struct qlcnic_host_sds_ring *sds_ring = data;
1849
1850 napi_schedule(&sds_ring->napi);
1851 return IRQ_HANDLED;
1852}
1853
1854static int qlcnic_process_cmd_ring(struct qlcnic_adapter *adapter)
1855{
1856 u32 sw_consumer, hw_consumer;
1857 int count = 0, i;
1858 struct qlcnic_cmd_buffer *buffer;
1859 struct pci_dev *pdev = adapter->pdev;
1860 struct net_device *netdev = adapter->netdev;
1861 struct qlcnic_skb_frag *frag;
1862 int done;
1863 struct qlcnic_host_tx_ring *tx_ring = adapter->tx_ring;
1864
1865 if (!spin_trylock(&adapter->tx_clean_lock))
1866 return 1;
1867
1868 sw_consumer = tx_ring->sw_consumer;
1869 hw_consumer = le32_to_cpu(*(tx_ring->hw_consumer));
1870
1871 while (sw_consumer != hw_consumer) {
1872 buffer = &tx_ring->cmd_buf_arr[sw_consumer];
1873 if (buffer->skb) {
1874 frag = &buffer->frag_array[0];
1875 pci_unmap_single(pdev, frag->dma, frag->length,
1876 PCI_DMA_TODEVICE);
1877 frag->dma = 0ULL;
1878 for (i = 1; i < buffer->frag_count; i++) {
1879 frag++;
1880 pci_unmap_page(pdev, frag->dma, frag->length,
1881 PCI_DMA_TODEVICE);
1882 frag->dma = 0ULL;
1883 }
1884
1885 adapter->stats.xmitfinished++;
1886 dev_kfree_skb_any(buffer->skb);
1887 buffer->skb = NULL;
1888 }
1889
1890 sw_consumer = get_next_index(sw_consumer, tx_ring->num_desc);
1891 if (++count >= MAX_STATUS_HANDLE)
1892 break;
1893 }
1894
1895 if (count && netif_running(netdev)) {
1896 tx_ring->sw_consumer = sw_consumer;
1897
1898 smp_mb();
1899
1900 if (netif_queue_stopped(netdev) && netif_carrier_ok(netdev)) {
1901 __netif_tx_lock(tx_ring->txq, smp_processor_id());
1902 if (qlcnic_tx_avail(tx_ring) > TX_STOP_THRESH) {
1903 netif_wake_queue(netdev);
1904 adapter->tx_timeo_cnt = 0;
8bfe8b91 1905 adapter->stats.xmit_on++;
af19b491
AKS
1906 }
1907 __netif_tx_unlock(tx_ring->txq);
1908 }
1909 }
1910 /*
1911 * If everything is freed up to consumer then check if the ring is full
1912 * If the ring is full then check if more needs to be freed and
1913 * schedule the call back again.
1914 *
1915 * This happens when there are 2 CPUs. One could be freeing and the
1916 * other filling it. If the ring is full when we get out of here and
1917 * the card has already interrupted the host then the host can miss the
1918 * interrupt.
1919 *
1920 * There is still a possible race condition and the host could miss an
1921 * interrupt. The card has to take care of this.
1922 */
1923 hw_consumer = le32_to_cpu(*(tx_ring->hw_consumer));
1924 done = (sw_consumer == hw_consumer);
1925 spin_unlock(&adapter->tx_clean_lock);
1926
1927 return done;
1928}
1929
1930static int qlcnic_poll(struct napi_struct *napi, int budget)
1931{
1932 struct qlcnic_host_sds_ring *sds_ring =
1933 container_of(napi, struct qlcnic_host_sds_ring, napi);
1934
1935 struct qlcnic_adapter *adapter = sds_ring->adapter;
1936
1937 int tx_complete;
1938 int work_done;
1939
1940 tx_complete = qlcnic_process_cmd_ring(adapter);
1941
1942 work_done = qlcnic_process_rcv_ring(sds_ring, budget);
1943
1944 if ((work_done < budget) && tx_complete) {
1945 napi_complete(&sds_ring->napi);
1946 if (test_bit(__QLCNIC_DEV_UP, &adapter->state))
1947 qlcnic_enable_int(sds_ring);
1948 }
1949
1950 return work_done;
1951}
1952
1953#ifdef CONFIG_NET_POLL_CONTROLLER
1954static void qlcnic_poll_controller(struct net_device *netdev)
1955{
1956 struct qlcnic_adapter *adapter = netdev_priv(netdev);
1957 disable_irq(adapter->irq);
1958 qlcnic_intr(adapter->irq, adapter);
1959 enable_irq(adapter->irq);
1960}
1961#endif
1962
ade91f8e
AKS
1963static int
1964qlcnic_set_drv_state(struct qlcnic_adapter *adapter, u8 state)
af19b491
AKS
1965{
1966 u32 val;
1967
1968 WARN_ON(state != QLCNIC_DEV_NEED_RESET &&
1969 state != QLCNIC_DEV_NEED_QUISCENT);
1970
1971 if (qlcnic_api_lock(adapter))
ade91f8e 1972 return -EIO;
af19b491
AKS
1973
1974 val = QLCRD32(adapter, QLCNIC_CRB_DRV_STATE);
1975
1976 if (state == QLCNIC_DEV_NEED_RESET)
6d2a4724 1977 QLC_DEV_SET_RST_RDY(val, adapter->portnum);
af19b491 1978 else if (state == QLCNIC_DEV_NEED_QUISCENT)
6d2a4724 1979 QLC_DEV_SET_QSCNT_RDY(val, adapter->portnum);
af19b491
AKS
1980
1981 QLCWR32(adapter, QLCNIC_CRB_DRV_STATE, val);
1982
1983 qlcnic_api_unlock(adapter);
ade91f8e
AKS
1984
1985 return 0;
af19b491
AKS
1986}
1987
1b95a839
AKS
1988static int
1989qlcnic_clr_drv_state(struct qlcnic_adapter *adapter)
1990{
1991 u32 val;
1992
1993 if (qlcnic_api_lock(adapter))
1994 return -EBUSY;
1995
1996 val = QLCRD32(adapter, QLCNIC_CRB_DRV_STATE);
6d2a4724 1997 QLC_DEV_CLR_RST_QSCNT(val, adapter->portnum);
1b95a839
AKS
1998 QLCWR32(adapter, QLCNIC_CRB_DRV_STATE, val);
1999
2000 qlcnic_api_unlock(adapter);
2001
2002 return 0;
2003}
2004
af19b491
AKS
2005static void
2006qlcnic_clr_all_drv_state(struct qlcnic_adapter *adapter)
2007{
2008 u32 val;
2009
2010 if (qlcnic_api_lock(adapter))
2011 goto err;
2012
2013 val = QLCRD32(adapter, QLCNIC_CRB_DEV_REF_COUNT);
6d2a4724 2014 QLC_DEV_CLR_REF_CNT(val, adapter->portnum);
af19b491
AKS
2015 QLCWR32(adapter, QLCNIC_CRB_DEV_REF_COUNT, val);
2016
2017 if (!(val & 0x11111111))
2018 QLCWR32(adapter, QLCNIC_CRB_DEV_STATE, QLCNIC_DEV_COLD);
2019
2020 val = QLCRD32(adapter, QLCNIC_CRB_DRV_STATE);
6d2a4724 2021 QLC_DEV_CLR_RST_QSCNT(val, adapter->portnum);
af19b491
AKS
2022 QLCWR32(adapter, QLCNIC_CRB_DRV_STATE, val);
2023
2024 qlcnic_api_unlock(adapter);
2025err:
2026 adapter->fw_fail_cnt = 0;
2027 clear_bit(__QLCNIC_START_FW, &adapter->state);
2028 clear_bit(__QLCNIC_RESETTING, &adapter->state);
2029}
2030
f73dfc50 2031/* Grab api lock, before checking state */
af19b491
AKS
2032static int
2033qlcnic_check_drv_state(struct qlcnic_adapter *adapter)
2034{
2035 int act, state;
2036
2037 state = QLCRD32(adapter, QLCNIC_CRB_DRV_STATE);
2038 act = QLCRD32(adapter, QLCNIC_CRB_DEV_REF_COUNT);
2039
2040 if (((state & 0x11111111) == (act & 0x11111111)) ||
2041 ((act & 0x11111111) == ((state >> 1) & 0x11111111)))
2042 return 0;
2043 else
2044 return 1;
2045}
2046
2047static int
2048qlcnic_can_start_firmware(struct qlcnic_adapter *adapter)
2049{
2050 u32 val, prev_state;
aa5e18c0 2051 u8 dev_init_timeo = adapter->dev_init_timeo;
6d2a4724 2052 u8 portnum = adapter->portnum;
af19b491 2053
f73dfc50
AKS
2054 if (test_and_clear_bit(__QLCNIC_START_FW, &adapter->state))
2055 return 1;
2056
af19b491
AKS
2057 if (qlcnic_api_lock(adapter))
2058 return -1;
2059
2060 val = QLCRD32(adapter, QLCNIC_CRB_DEV_REF_COUNT);
6d2a4724
AKS
2061 if (!(val & (1 << (portnum * 4)))) {
2062 QLC_DEV_SET_REF_CNT(val, portnum);
af19b491 2063 QLCWR32(adapter, QLCNIC_CRB_DEV_REF_COUNT, val);
af19b491
AKS
2064 }
2065
2066 prev_state = QLCRD32(adapter, QLCNIC_CRB_DEV_STATE);
65b5b420 2067 QLCDB(adapter, HW, "Device state = %u\n", prev_state);
af19b491
AKS
2068
2069 switch (prev_state) {
2070 case QLCNIC_DEV_COLD:
bbd8c6a4 2071 QLCWR32(adapter, QLCNIC_CRB_DEV_STATE, QLCNIC_DEV_INITIALIZING);
af19b491
AKS
2072 qlcnic_api_unlock(adapter);
2073 return 1;
2074
2075 case QLCNIC_DEV_READY:
2076 qlcnic_api_unlock(adapter);
2077 return 0;
2078
2079 case QLCNIC_DEV_NEED_RESET:
2080 val = QLCRD32(adapter, QLCNIC_CRB_DRV_STATE);
6d2a4724 2081 QLC_DEV_SET_RST_RDY(val, portnum);
af19b491
AKS
2082 QLCWR32(adapter, QLCNIC_CRB_DRV_STATE, val);
2083 break;
2084
2085 case QLCNIC_DEV_NEED_QUISCENT:
2086 val = QLCRD32(adapter, QLCNIC_CRB_DRV_STATE);
6d2a4724 2087 QLC_DEV_SET_QSCNT_RDY(val, portnum);
af19b491
AKS
2088 QLCWR32(adapter, QLCNIC_CRB_DRV_STATE, val);
2089 break;
2090
2091 case QLCNIC_DEV_FAILED:
2092 qlcnic_api_unlock(adapter);
2093 return -1;
bbd8c6a4
AKS
2094
2095 case QLCNIC_DEV_INITIALIZING:
2096 case QLCNIC_DEV_QUISCENT:
2097 break;
af19b491
AKS
2098 }
2099
2100 qlcnic_api_unlock(adapter);
aa5e18c0
SC
2101
2102 do {
af19b491 2103 msleep(1000);
aa5e18c0
SC
2104 } while ((QLCRD32(adapter, QLCNIC_CRB_DEV_STATE) != QLCNIC_DEV_READY)
2105 && --dev_init_timeo);
af19b491 2106
65b5b420
AKS
2107 if (!dev_init_timeo) {
2108 dev_err(&adapter->pdev->dev,
2109 "Waiting for device to initialize timeout\n");
af19b491 2110 return -1;
65b5b420 2111 }
af19b491
AKS
2112
2113 if (qlcnic_api_lock(adapter))
2114 return -1;
2115
2116 val = QLCRD32(adapter, QLCNIC_CRB_DRV_STATE);
6d2a4724 2117 QLC_DEV_CLR_RST_QSCNT(val, portnum);
af19b491
AKS
2118 QLCWR32(adapter, QLCNIC_CRB_DRV_STATE, val);
2119
2120 qlcnic_api_unlock(adapter);
2121
2122 return 0;
2123}
2124
2125static void
2126qlcnic_fwinit_work(struct work_struct *work)
2127{
2128 struct qlcnic_adapter *adapter = container_of(work,
2129 struct qlcnic_adapter, fw_work.work);
f73dfc50 2130 u32 dev_state = 0xf;
af19b491 2131
f73dfc50
AKS
2132 if (qlcnic_api_lock(adapter))
2133 goto err_ret;
af19b491 2134
f73dfc50
AKS
2135 if (adapter->fw_wait_cnt++ > adapter->reset_ack_timeo) {
2136 dev_err(&adapter->pdev->dev, "Reset:Failed to get ack %d sec\n",
2137 adapter->reset_ack_timeo);
2138 goto skip_ack_check;
2139 }
2140
2141 if (!qlcnic_check_drv_state(adapter)) {
2142skip_ack_check:
2143 dev_state = QLCRD32(adapter, QLCNIC_CRB_DEV_STATE);
2144 if (dev_state == QLCNIC_DEV_NEED_RESET) {
2145 QLCWR32(adapter, QLCNIC_CRB_DEV_STATE,
2146 QLCNIC_DEV_INITIALIZING);
2147 set_bit(__QLCNIC_START_FW, &adapter->state);
2148 QLCDB(adapter, DRV, "Restarting fw\n");
af19b491
AKS
2149 }
2150
f73dfc50
AKS
2151 qlcnic_api_unlock(adapter);
2152
af19b491
AKS
2153 if (!qlcnic_start_firmware(adapter)) {
2154 qlcnic_schedule_work(adapter, qlcnic_attach_work, 0);
2155 return;
2156 }
af19b491
AKS
2157 goto err_ret;
2158 }
2159
f73dfc50 2160 qlcnic_api_unlock(adapter);
aa5e18c0 2161
af19b491 2162 dev_state = QLCRD32(adapter, QLCNIC_CRB_DEV_STATE);
f73dfc50 2163 QLCDB(adapter, HW, "Func waiting: Device state=%u\n", dev_state);
65b5b420 2164
af19b491 2165 switch (dev_state) {
f73dfc50
AKS
2166 case QLCNIC_DEV_NEED_RESET:
2167 qlcnic_schedule_work(adapter,
2168 qlcnic_fwinit_work, FW_POLL_DELAY);
2169 return;
af19b491
AKS
2170 case QLCNIC_DEV_FAILED:
2171 break;
2172
2173 default:
f73dfc50
AKS
2174 if (!qlcnic_start_firmware(adapter)) {
2175 qlcnic_schedule_work(adapter, qlcnic_attach_work, 0);
2176 return;
2177 }
af19b491
AKS
2178 }
2179
2180err_ret:
f73dfc50
AKS
2181 dev_err(&adapter->pdev->dev, "Fwinit work failed state=%u "
2182 "fw_wait_cnt=%u\n", dev_state, adapter->fw_wait_cnt);
34ce3626 2183 netif_device_attach(adapter->netdev);
af19b491
AKS
2184 qlcnic_clr_all_drv_state(adapter);
2185}
2186
2187static void
2188qlcnic_detach_work(struct work_struct *work)
2189{
2190 struct qlcnic_adapter *adapter = container_of(work,
2191 struct qlcnic_adapter, fw_work.work);
2192 struct net_device *netdev = adapter->netdev;
2193 u32 status;
2194
2195 netif_device_detach(netdev);
2196
2197 qlcnic_down(adapter, netdev);
2198
ce668443 2199 rtnl_lock();
af19b491 2200 qlcnic_detach(adapter);
ce668443 2201 rtnl_unlock();
af19b491
AKS
2202
2203 status = QLCRD32(adapter, QLCNIC_PEG_HALT_STATUS1);
2204
2205 if (status & QLCNIC_RCODE_FATAL_ERROR)
2206 goto err_ret;
2207
2208 if (adapter->temp == QLCNIC_TEMP_PANIC)
2209 goto err_ret;
2210
ade91f8e
AKS
2211 if (qlcnic_set_drv_state(adapter, adapter->dev_state))
2212 goto err_ret;
af19b491
AKS
2213
2214 adapter->fw_wait_cnt = 0;
2215
2216 qlcnic_schedule_work(adapter, qlcnic_fwinit_work, FW_POLL_DELAY);
2217
2218 return;
2219
2220err_ret:
65b5b420
AKS
2221 dev_err(&adapter->pdev->dev, "detach failed; status=%d temp=%d\n",
2222 status, adapter->temp);
34ce3626 2223 netif_device_attach(netdev);
af19b491
AKS
2224 qlcnic_clr_all_drv_state(adapter);
2225
2226}
2227
f73dfc50 2228/*Transit to RESET state from READY state only */
af19b491
AKS
2229static void
2230qlcnic_dev_request_reset(struct qlcnic_adapter *adapter)
2231{
2232 u32 state;
2233
2234 if (qlcnic_api_lock(adapter))
2235 return;
2236
2237 state = QLCRD32(adapter, QLCNIC_CRB_DEV_STATE);
2238
f73dfc50 2239 if (state == QLCNIC_DEV_READY) {
af19b491 2240 QLCWR32(adapter, QLCNIC_CRB_DEV_STATE, QLCNIC_DEV_NEED_RESET);
65b5b420 2241 QLCDB(adapter, DRV, "NEED_RESET state set\n");
af19b491
AKS
2242 }
2243
2244 qlcnic_api_unlock(adapter);
2245}
2246
2247static void
2248qlcnic_schedule_work(struct qlcnic_adapter *adapter,
2249 work_func_t func, int delay)
2250{
2251 INIT_DELAYED_WORK(&adapter->fw_work, func);
2252 schedule_delayed_work(&adapter->fw_work, round_jiffies_relative(delay));
2253}
2254
2255static void
2256qlcnic_cancel_fw_work(struct qlcnic_adapter *adapter)
2257{
2258 while (test_and_set_bit(__QLCNIC_RESETTING, &adapter->state))
2259 msleep(10);
2260
2261 cancel_delayed_work_sync(&adapter->fw_work);
2262}
2263
2264static void
2265qlcnic_attach_work(struct work_struct *work)
2266{
2267 struct qlcnic_adapter *adapter = container_of(work,
2268 struct qlcnic_adapter, fw_work.work);
2269 struct net_device *netdev = adapter->netdev;
2270 int err;
2271
2272 if (netif_running(netdev)) {
2273 err = qlcnic_attach(adapter);
2274 if (err)
2275 goto done;
2276
2277 err = qlcnic_up(adapter, netdev);
2278 if (err) {
2279 qlcnic_detach(adapter);
2280 goto done;
2281 }
2282
2283 qlcnic_config_indev_addr(netdev, NETDEV_UP);
2284 }
2285
af19b491 2286done:
34ce3626 2287 netif_device_attach(netdev);
af19b491
AKS
2288 adapter->fw_fail_cnt = 0;
2289 clear_bit(__QLCNIC_RESETTING, &adapter->state);
1b95a839
AKS
2290
2291 if (!qlcnic_clr_drv_state(adapter))
2292 qlcnic_schedule_work(adapter, qlcnic_fw_poll_work,
2293 FW_POLL_DELAY);
af19b491
AKS
2294}
2295
2296static int
2297qlcnic_check_health(struct qlcnic_adapter *adapter)
2298{
2299 u32 state = 0, heartbit;
2300 struct net_device *netdev = adapter->netdev;
2301
2302 if (qlcnic_check_temp(adapter))
2303 goto detach;
2304
2305 if (adapter->need_fw_reset) {
2306 qlcnic_dev_request_reset(adapter);
2307 goto detach;
2308 }
2309
2310 state = QLCRD32(adapter, QLCNIC_CRB_DEV_STATE);
2311 if (state == QLCNIC_DEV_NEED_RESET || state == QLCNIC_DEV_NEED_QUISCENT)
2312 adapter->need_fw_reset = 1;
2313
2314 heartbit = QLCRD32(adapter, QLCNIC_PEG_ALIVE_COUNTER);
2315 if (heartbit != adapter->heartbit) {
2316 adapter->heartbit = heartbit;
2317 adapter->fw_fail_cnt = 0;
2318 if (adapter->need_fw_reset)
2319 goto detach;
2320 return 0;
2321 }
2322
2323 if (++adapter->fw_fail_cnt < FW_FAIL_THRESH)
2324 return 0;
2325
2326 qlcnic_dev_request_reset(adapter);
2327
2328 clear_bit(__QLCNIC_FW_ATTACHED, &adapter->state);
2329
2330 dev_info(&netdev->dev, "firmware hang detected\n");
2331
2332detach:
2333 adapter->dev_state = (state == QLCNIC_DEV_NEED_QUISCENT) ? state :
2334 QLCNIC_DEV_NEED_RESET;
2335
2336 if ((auto_fw_reset == AUTO_FW_RESET_ENABLED) &&
65b5b420
AKS
2337 !test_and_set_bit(__QLCNIC_RESETTING, &adapter->state)) {
2338
af19b491 2339 qlcnic_schedule_work(adapter, qlcnic_detach_work, 0);
65b5b420
AKS
2340 QLCDB(adapter, DRV, "fw recovery scheduled.\n");
2341 }
af19b491
AKS
2342
2343 return 1;
2344}
2345
2346static void
2347qlcnic_fw_poll_work(struct work_struct *work)
2348{
2349 struct qlcnic_adapter *adapter = container_of(work,
2350 struct qlcnic_adapter, fw_work.work);
2351
2352 if (test_bit(__QLCNIC_RESETTING, &adapter->state))
2353 goto reschedule;
2354
2355
2356 if (qlcnic_check_health(adapter))
2357 return;
2358
2359reschedule:
2360 qlcnic_schedule_work(adapter, qlcnic_fw_poll_work, FW_POLL_DELAY);
2361}
2362
2363static ssize_t
2364qlcnic_store_bridged_mode(struct device *dev,
2365 struct device_attribute *attr, const char *buf, size_t len)
2366{
2367 struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
2368 unsigned long new;
2369 int ret = -EINVAL;
2370
2371 if (!(adapter->capabilities & QLCNIC_FW_CAPABILITY_BDG))
2372 goto err_out;
2373
2374 if (adapter->is_up != QLCNIC_ADAPTER_UP_MAGIC)
2375 goto err_out;
2376
2377 if (strict_strtoul(buf, 2, &new))
2378 goto err_out;
2379
2380 if (!qlcnic_config_bridged_mode(adapter, !!new))
2381 ret = len;
2382
2383err_out:
2384 return ret;
2385}
2386
2387static ssize_t
2388qlcnic_show_bridged_mode(struct device *dev,
2389 struct device_attribute *attr, char *buf)
2390{
2391 struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
2392 int bridged_mode = 0;
2393
2394 if (adapter->capabilities & QLCNIC_FW_CAPABILITY_BDG)
2395 bridged_mode = !!(adapter->flags & QLCNIC_BRIDGE_ENABLED);
2396
2397 return sprintf(buf, "%d\n", bridged_mode);
2398}
2399
2400static struct device_attribute dev_attr_bridged_mode = {
2401 .attr = {.name = "bridged_mode", .mode = (S_IRUGO | S_IWUSR)},
2402 .show = qlcnic_show_bridged_mode,
2403 .store = qlcnic_store_bridged_mode,
2404};
2405
2406static ssize_t
2407qlcnic_store_diag_mode(struct device *dev,
2408 struct device_attribute *attr, const char *buf, size_t len)
2409{
2410 struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
2411 unsigned long new;
2412
2413 if (strict_strtoul(buf, 2, &new))
2414 return -EINVAL;
2415
2416 if (!!new != !!(adapter->flags & QLCNIC_DIAG_ENABLED))
2417 adapter->flags ^= QLCNIC_DIAG_ENABLED;
2418
2419 return len;
2420}
2421
2422static ssize_t
2423qlcnic_show_diag_mode(struct device *dev,
2424 struct device_attribute *attr, char *buf)
2425{
2426 struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
2427
2428 return sprintf(buf, "%d\n",
2429 !!(adapter->flags & QLCNIC_DIAG_ENABLED));
2430}
2431
2432static struct device_attribute dev_attr_diag_mode = {
2433 .attr = {.name = "diag_mode", .mode = (S_IRUGO | S_IWUSR)},
2434 .show = qlcnic_show_diag_mode,
2435 .store = qlcnic_store_diag_mode,
2436};
2437
2438static int
2439qlcnic_sysfs_validate_crb(struct qlcnic_adapter *adapter,
2440 loff_t offset, size_t size)
2441{
897e8c7c
DP
2442 size_t crb_size = 4;
2443
af19b491
AKS
2444 if (!(adapter->flags & QLCNIC_DIAG_ENABLED))
2445 return -EIO;
2446
897e8c7c
DP
2447 if (offset < QLCNIC_PCI_CRBSPACE) {
2448 if (ADDR_IN_RANGE(offset, QLCNIC_PCI_CAMQM,
2449 QLCNIC_PCI_CAMQM_END))
2450 crb_size = 8;
2451 else
2452 return -EINVAL;
2453 }
af19b491 2454
897e8c7c
DP
2455 if ((size != crb_size) || (offset & (crb_size-1)))
2456 return -EINVAL;
af19b491
AKS
2457
2458 return 0;
2459}
2460
2461static ssize_t
2462qlcnic_sysfs_read_crb(struct kobject *kobj, struct bin_attribute *attr,
2463 char *buf, loff_t offset, size_t size)
2464{
2465 struct device *dev = container_of(kobj, struct device, kobj);
2466 struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
2467 u32 data;
897e8c7c 2468 u64 qmdata;
af19b491
AKS
2469 int ret;
2470
2471 ret = qlcnic_sysfs_validate_crb(adapter, offset, size);
2472 if (ret != 0)
2473 return ret;
2474
897e8c7c
DP
2475 if (ADDR_IN_RANGE(offset, QLCNIC_PCI_CAMQM, QLCNIC_PCI_CAMQM_END)) {
2476 qlcnic_pci_camqm_read_2M(adapter, offset, &qmdata);
2477 memcpy(buf, &qmdata, size);
2478 } else {
2479 data = QLCRD32(adapter, offset);
2480 memcpy(buf, &data, size);
2481 }
af19b491
AKS
2482 return size;
2483}
2484
2485static ssize_t
2486qlcnic_sysfs_write_crb(struct kobject *kobj, struct bin_attribute *attr,
2487 char *buf, loff_t offset, size_t size)
2488{
2489 struct device *dev = container_of(kobj, struct device, kobj);
2490 struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
2491 u32 data;
897e8c7c 2492 u64 qmdata;
af19b491
AKS
2493 int ret;
2494
2495 ret = qlcnic_sysfs_validate_crb(adapter, offset, size);
2496 if (ret != 0)
2497 return ret;
2498
897e8c7c
DP
2499 if (ADDR_IN_RANGE(offset, QLCNIC_PCI_CAMQM, QLCNIC_PCI_CAMQM_END)) {
2500 memcpy(&qmdata, buf, size);
2501 qlcnic_pci_camqm_write_2M(adapter, offset, qmdata);
2502 } else {
2503 memcpy(&data, buf, size);
2504 QLCWR32(adapter, offset, data);
2505 }
af19b491
AKS
2506 return size;
2507}
2508
2509static int
2510qlcnic_sysfs_validate_mem(struct qlcnic_adapter *adapter,
2511 loff_t offset, size_t size)
2512{
2513 if (!(adapter->flags & QLCNIC_DIAG_ENABLED))
2514 return -EIO;
2515
2516 if ((size != 8) || (offset & 0x7))
2517 return -EIO;
2518
2519 return 0;
2520}
2521
2522static ssize_t
2523qlcnic_sysfs_read_mem(struct kobject *kobj, struct bin_attribute *attr,
2524 char *buf, loff_t offset, size_t size)
2525{
2526 struct device *dev = container_of(kobj, struct device, kobj);
2527 struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
2528 u64 data;
2529 int ret;
2530
2531 ret = qlcnic_sysfs_validate_mem(adapter, offset, size);
2532 if (ret != 0)
2533 return ret;
2534
2535 if (qlcnic_pci_mem_read_2M(adapter, offset, &data))
2536 return -EIO;
2537
2538 memcpy(buf, &data, size);
2539
2540 return size;
2541}
2542
2543static ssize_t
2544qlcnic_sysfs_write_mem(struct kobject *kobj, struct bin_attribute *attr,
2545 char *buf, loff_t offset, size_t size)
2546{
2547 struct device *dev = container_of(kobj, struct device, kobj);
2548 struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
2549 u64 data;
2550 int ret;
2551
2552 ret = qlcnic_sysfs_validate_mem(adapter, offset, size);
2553 if (ret != 0)
2554 return ret;
2555
2556 memcpy(&data, buf, size);
2557
2558 if (qlcnic_pci_mem_write_2M(adapter, offset, data))
2559 return -EIO;
2560
2561 return size;
2562}
2563
2564
2565static struct bin_attribute bin_attr_crb = {
2566 .attr = {.name = "crb", .mode = (S_IRUGO | S_IWUSR)},
2567 .size = 0,
2568 .read = qlcnic_sysfs_read_crb,
2569 .write = qlcnic_sysfs_write_crb,
2570};
2571
2572static struct bin_attribute bin_attr_mem = {
2573 .attr = {.name = "mem", .mode = (S_IRUGO | S_IWUSR)},
2574 .size = 0,
2575 .read = qlcnic_sysfs_read_mem,
2576 .write = qlcnic_sysfs_write_mem,
2577};
2578
2579static void
2580qlcnic_create_sysfs_entries(struct qlcnic_adapter *adapter)
2581{
2582 struct device *dev = &adapter->pdev->dev;
2583
2584 if (adapter->capabilities & QLCNIC_FW_CAPABILITY_BDG)
2585 if (device_create_file(dev, &dev_attr_bridged_mode))
2586 dev_warn(dev,
2587 "failed to create bridged_mode sysfs entry\n");
2588}
2589
2590static void
2591qlcnic_remove_sysfs_entries(struct qlcnic_adapter *adapter)
2592{
2593 struct device *dev = &adapter->pdev->dev;
2594
2595 if (adapter->capabilities & QLCNIC_FW_CAPABILITY_BDG)
2596 device_remove_file(dev, &dev_attr_bridged_mode);
2597}
2598
2599static void
2600qlcnic_create_diag_entries(struct qlcnic_adapter *adapter)
2601{
2602 struct device *dev = &adapter->pdev->dev;
2603
2604 if (device_create_file(dev, &dev_attr_diag_mode))
2605 dev_info(dev, "failed to create diag_mode sysfs entry\n");
2606 if (device_create_bin_file(dev, &bin_attr_crb))
2607 dev_info(dev, "failed to create crb sysfs entry\n");
2608 if (device_create_bin_file(dev, &bin_attr_mem))
2609 dev_info(dev, "failed to create mem sysfs entry\n");
2610}
2611
2612
2613static void
2614qlcnic_remove_diag_entries(struct qlcnic_adapter *adapter)
2615{
2616 struct device *dev = &adapter->pdev->dev;
2617
2618 device_remove_file(dev, &dev_attr_diag_mode);
2619 device_remove_bin_file(dev, &bin_attr_crb);
2620 device_remove_bin_file(dev, &bin_attr_mem);
2621}
2622
2623#ifdef CONFIG_INET
2624
2625#define is_qlcnic_netdev(dev) (dev->netdev_ops == &qlcnic_netdev_ops)
2626
2627static int
2628qlcnic_destip_supported(struct qlcnic_adapter *adapter)
2629{
2630 if (adapter->ahw.cut_through)
2631 return 0;
2632
2633 return 1;
2634}
2635
2636static void
2637qlcnic_config_indev_addr(struct net_device *dev, unsigned long event)
2638{
2639 struct in_device *indev;
2640 struct qlcnic_adapter *adapter = netdev_priv(dev);
2641
2642 if (!qlcnic_destip_supported(adapter))
2643 return;
2644
2645 indev = in_dev_get(dev);
2646 if (!indev)
2647 return;
2648
2649 for_ifa(indev) {
2650 switch (event) {
2651 case NETDEV_UP:
2652 qlcnic_config_ipaddr(adapter,
2653 ifa->ifa_address, QLCNIC_IP_UP);
2654 break;
2655 case NETDEV_DOWN:
2656 qlcnic_config_ipaddr(adapter,
2657 ifa->ifa_address, QLCNIC_IP_DOWN);
2658 break;
2659 default:
2660 break;
2661 }
2662 } endfor_ifa(indev);
2663
2664 in_dev_put(indev);
2665 return;
2666}
2667
2668static int qlcnic_netdev_event(struct notifier_block *this,
2669 unsigned long event, void *ptr)
2670{
2671 struct qlcnic_adapter *adapter;
2672 struct net_device *dev = (struct net_device *)ptr;
2673
2674recheck:
2675 if (dev == NULL)
2676 goto done;
2677
2678 if (dev->priv_flags & IFF_802_1Q_VLAN) {
2679 dev = vlan_dev_real_dev(dev);
2680 goto recheck;
2681 }
2682
2683 if (!is_qlcnic_netdev(dev))
2684 goto done;
2685
2686 adapter = netdev_priv(dev);
2687
2688 if (!adapter)
2689 goto done;
2690
2691 if (adapter->is_up != QLCNIC_ADAPTER_UP_MAGIC)
2692 goto done;
2693
2694 qlcnic_config_indev_addr(dev, event);
2695done:
2696 return NOTIFY_DONE;
2697}
2698
2699static int
2700qlcnic_inetaddr_event(struct notifier_block *this,
2701 unsigned long event, void *ptr)
2702{
2703 struct qlcnic_adapter *adapter;
2704 struct net_device *dev;
2705
2706 struct in_ifaddr *ifa = (struct in_ifaddr *)ptr;
2707
2708 dev = ifa->ifa_dev ? ifa->ifa_dev->dev : NULL;
2709
2710recheck:
2711 if (dev == NULL || !netif_running(dev))
2712 goto done;
2713
2714 if (dev->priv_flags & IFF_802_1Q_VLAN) {
2715 dev = vlan_dev_real_dev(dev);
2716 goto recheck;
2717 }
2718
2719 if (!is_qlcnic_netdev(dev))
2720 goto done;
2721
2722 adapter = netdev_priv(dev);
2723
2724 if (!adapter || !qlcnic_destip_supported(adapter))
2725 goto done;
2726
2727 if (adapter->is_up != QLCNIC_ADAPTER_UP_MAGIC)
2728 goto done;
2729
2730 switch (event) {
2731 case NETDEV_UP:
2732 qlcnic_config_ipaddr(adapter, ifa->ifa_address, QLCNIC_IP_UP);
2733 break;
2734 case NETDEV_DOWN:
2735 qlcnic_config_ipaddr(adapter, ifa->ifa_address, QLCNIC_IP_DOWN);
2736 break;
2737 default:
2738 break;
2739 }
2740
2741done:
2742 return NOTIFY_DONE;
2743}
2744
2745static struct notifier_block qlcnic_netdev_cb = {
2746 .notifier_call = qlcnic_netdev_event,
2747};
2748
2749static struct notifier_block qlcnic_inetaddr_cb = {
2750 .notifier_call = qlcnic_inetaddr_event,
2751};
2752#else
2753static void
2754qlcnic_config_indev_addr(struct net_device *dev, unsigned long event)
2755{ }
2756#endif
2757
2758static struct pci_driver qlcnic_driver = {
2759 .name = qlcnic_driver_name,
2760 .id_table = qlcnic_pci_tbl,
2761 .probe = qlcnic_probe,
2762 .remove = __devexit_p(qlcnic_remove),
2763#ifdef CONFIG_PM
2764 .suspend = qlcnic_suspend,
2765 .resume = qlcnic_resume,
2766#endif
2767 .shutdown = qlcnic_shutdown
2768};
2769
2770static int __init qlcnic_init_module(void)
2771{
2772
2773 printk(KERN_INFO "%s\n", qlcnic_driver_string);
2774
2775#ifdef CONFIG_INET
2776 register_netdevice_notifier(&qlcnic_netdev_cb);
2777 register_inetaddr_notifier(&qlcnic_inetaddr_cb);
2778#endif
2779
2780
2781 return pci_register_driver(&qlcnic_driver);
2782}
2783
2784module_init(qlcnic_init_module);
2785
2786static void __exit qlcnic_exit_module(void)
2787{
2788
2789 pci_unregister_driver(&qlcnic_driver);
2790
2791#ifdef CONFIG_INET
2792 unregister_inetaddr_notifier(&qlcnic_inetaddr_cb);
2793 unregister_netdevice_notifier(&qlcnic_netdev_cb);
2794#endif
2795}
2796
2797module_exit(qlcnic_exit_module);