]> bbs.cooldavid.org Git - net-next-2.6.git/blame - drivers/net/qlcnic/qlcnic_main.c
sysfs: Remove usage of S_BIAS to avoid merge conflict with the vfs tree
[net-next-2.6.git] / drivers / net / qlcnic / qlcnic_main.c
CommitLineData
af19b491
AKS
1/*
2 * Copyright (C) 2009 - QLogic Corporation.
3 * All rights reserved.
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License
7 * as published by the Free Software Foundation; either version 2
8 * of the License, or (at your option) any later version.
9 *
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 59 Temple Place - Suite 330, Boston,
18 * MA 02111-1307, USA.
19 *
20 * The full GNU General Public License is included in this distribution
21 * in the file called "COPYING".
22 *
23 */
24
5a0e3ad6 25#include <linux/slab.h>
af19b491
AKS
26#include <linux/vmalloc.h>
27#include <linux/interrupt.h>
28
29#include "qlcnic.h"
30
31#include <linux/dma-mapping.h>
32#include <linux/if_vlan.h>
33#include <net/ip.h>
34#include <linux/ipv6.h>
35#include <linux/inetdevice.h>
36#include <linux/sysfs.h>
37
38MODULE_DESCRIPTION("QLogic 10 GbE Converged Ethernet Driver");
39MODULE_LICENSE("GPL");
40MODULE_VERSION(QLCNIC_LINUX_VERSIONID);
41MODULE_FIRMWARE(QLCNIC_UNIFIED_ROMIMAGE_NAME);
42
43char qlcnic_driver_name[] = "qlcnic";
44static const char qlcnic_driver_string[] = "QLogic Converged Ethernet Driver v"
45 QLCNIC_LINUX_VERSIONID;
46
47static int port_mode = QLCNIC_PORT_MODE_AUTO_NEG;
48
49/* Default to restricted 1G auto-neg mode */
50static int wol_port_mode = 5;
51
52static int use_msi = 1;
53module_param(use_msi, int, 0644);
54MODULE_PARM_DESC(use_msi, "MSI interrupt (0=disabled, 1=enabled");
55
56static int use_msi_x = 1;
57module_param(use_msi_x, int, 0644);
58MODULE_PARM_DESC(use_msi_x, "MSI-X interrupt (0=disabled, 1=enabled");
59
60static int auto_fw_reset = AUTO_FW_RESET_ENABLED;
61module_param(auto_fw_reset, int, 0644);
62MODULE_PARM_DESC(auto_fw_reset, "Auto firmware reset (0=disabled, 1=enabled");
63
4d5bdb38
AKS
64static int load_fw_file;
65module_param(load_fw_file, int, 0644);
66MODULE_PARM_DESC(load_fw_file, "Load firmware from (0=flash, 1=file");
67
af19b491
AKS
68static int __devinit qlcnic_probe(struct pci_dev *pdev,
69 const struct pci_device_id *ent);
70static void __devexit qlcnic_remove(struct pci_dev *pdev);
71static int qlcnic_open(struct net_device *netdev);
72static int qlcnic_close(struct net_device *netdev);
af19b491
AKS
73static void qlcnic_tx_timeout(struct net_device *netdev);
74static void qlcnic_tx_timeout_task(struct work_struct *work);
75static void qlcnic_attach_work(struct work_struct *work);
76static void qlcnic_fwinit_work(struct work_struct *work);
77static void qlcnic_fw_poll_work(struct work_struct *work);
78static void qlcnic_schedule_work(struct qlcnic_adapter *adapter,
79 work_func_t func, int delay);
80static void qlcnic_cancel_fw_work(struct qlcnic_adapter *adapter);
81static int qlcnic_poll(struct napi_struct *napi, int budget);
82#ifdef CONFIG_NET_POLL_CONTROLLER
83static void qlcnic_poll_controller(struct net_device *netdev);
84#endif
85
86static void qlcnic_create_sysfs_entries(struct qlcnic_adapter *adapter);
87static void qlcnic_remove_sysfs_entries(struct qlcnic_adapter *adapter);
88static void qlcnic_create_diag_entries(struct qlcnic_adapter *adapter);
89static void qlcnic_remove_diag_entries(struct qlcnic_adapter *adapter);
90
6df900e9 91static void qlcnic_idc_debug_info(struct qlcnic_adapter *adapter, u8 encoding);
af19b491
AKS
92static void qlcnic_clr_all_drv_state(struct qlcnic_adapter *adapter);
93static int qlcnic_can_start_firmware(struct qlcnic_adapter *adapter);
94
7eb9855d 95static irqreturn_t qlcnic_tmp_intr(int irq, void *data);
af19b491
AKS
96static irqreturn_t qlcnic_intr(int irq, void *data);
97static irqreturn_t qlcnic_msi_intr(int irq, void *data);
98static irqreturn_t qlcnic_msix_intr(int irq, void *data);
99
100static struct net_device_stats *qlcnic_get_stats(struct net_device *netdev);
101static void qlcnic_config_indev_addr(struct net_device *dev, unsigned long);
102
103/* PCI Device ID Table */
104#define ENTRY(device) \
105 {PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, (device)), \
106 .class = PCI_CLASS_NETWORK_ETHERNET << 8, .class_mask = ~0}
107
108#define PCI_DEVICE_ID_QLOGIC_QLE824X 0x8020
109
6a902881 110static DEFINE_PCI_DEVICE_TABLE(qlcnic_pci_tbl) = {
af19b491
AKS
111 ENTRY(PCI_DEVICE_ID_QLOGIC_QLE824X),
112 {0,}
113};
114
115MODULE_DEVICE_TABLE(pci, qlcnic_pci_tbl);
116
117
118void
119qlcnic_update_cmd_producer(struct qlcnic_adapter *adapter,
120 struct qlcnic_host_tx_ring *tx_ring)
121{
122 writel(tx_ring->producer, tx_ring->crb_cmd_producer);
123
124 if (qlcnic_tx_avail(tx_ring) <= TX_STOP_THRESH) {
125 netif_stop_queue(adapter->netdev);
126 smp_mb();
8bfe8b91 127 adapter->stats.xmit_off++;
af19b491
AKS
128 }
129}
130
131static const u32 msi_tgt_status[8] = {
132 ISR_INT_TARGET_STATUS, ISR_INT_TARGET_STATUS_F1,
133 ISR_INT_TARGET_STATUS_F2, ISR_INT_TARGET_STATUS_F3,
134 ISR_INT_TARGET_STATUS_F4, ISR_INT_TARGET_STATUS_F5,
135 ISR_INT_TARGET_STATUS_F6, ISR_INT_TARGET_STATUS_F7
136};
137
138static const
139struct qlcnic_legacy_intr_set legacy_intr[] = QLCNIC_LEGACY_INTR_CONFIG;
140
141static inline void qlcnic_disable_int(struct qlcnic_host_sds_ring *sds_ring)
142{
143 writel(0, sds_ring->crb_intr_mask);
144}
145
146static inline void qlcnic_enable_int(struct qlcnic_host_sds_ring *sds_ring)
147{
148 struct qlcnic_adapter *adapter = sds_ring->adapter;
149
150 writel(0x1, sds_ring->crb_intr_mask);
151
152 if (!QLCNIC_IS_MSI_FAMILY(adapter))
153 writel(0xfbff, adapter->tgt_mask_reg);
154}
155
156static int
157qlcnic_alloc_sds_rings(struct qlcnic_recv_context *recv_ctx, int count)
158{
159 int size = sizeof(struct qlcnic_host_sds_ring) * count;
160
161 recv_ctx->sds_rings = kzalloc(size, GFP_KERNEL);
162
163 return (recv_ctx->sds_rings == NULL);
164}
165
166static void
167qlcnic_free_sds_rings(struct qlcnic_recv_context *recv_ctx)
168{
169 if (recv_ctx->sds_rings != NULL)
170 kfree(recv_ctx->sds_rings);
171
172 recv_ctx->sds_rings = NULL;
173}
174
175static int
176qlcnic_napi_add(struct qlcnic_adapter *adapter, struct net_device *netdev)
177{
178 int ring;
179 struct qlcnic_host_sds_ring *sds_ring;
180 struct qlcnic_recv_context *recv_ctx = &adapter->recv_ctx;
181
182 if (qlcnic_alloc_sds_rings(recv_ctx, adapter->max_sds_rings))
183 return -ENOMEM;
184
185 for (ring = 0; ring < adapter->max_sds_rings; ring++) {
186 sds_ring = &recv_ctx->sds_rings[ring];
187 netif_napi_add(netdev, &sds_ring->napi,
188 qlcnic_poll, QLCNIC_NETDEV_WEIGHT);
189 }
190
191 return 0;
192}
193
194static void
195qlcnic_napi_del(struct qlcnic_adapter *adapter)
196{
197 int ring;
198 struct qlcnic_host_sds_ring *sds_ring;
199 struct qlcnic_recv_context *recv_ctx = &adapter->recv_ctx;
200
201 for (ring = 0; ring < adapter->max_sds_rings; ring++) {
202 sds_ring = &recv_ctx->sds_rings[ring];
203 netif_napi_del(&sds_ring->napi);
204 }
205
206 qlcnic_free_sds_rings(&adapter->recv_ctx);
207}
208
209static void
210qlcnic_napi_enable(struct qlcnic_adapter *adapter)
211{
212 int ring;
213 struct qlcnic_host_sds_ring *sds_ring;
214 struct qlcnic_recv_context *recv_ctx = &adapter->recv_ctx;
215
780ab790
AKS
216 if (adapter->is_up != QLCNIC_ADAPTER_UP_MAGIC)
217 return;
218
af19b491
AKS
219 for (ring = 0; ring < adapter->max_sds_rings; ring++) {
220 sds_ring = &recv_ctx->sds_rings[ring];
221 napi_enable(&sds_ring->napi);
222 qlcnic_enable_int(sds_ring);
223 }
224}
225
226static void
227qlcnic_napi_disable(struct qlcnic_adapter *adapter)
228{
229 int ring;
230 struct qlcnic_host_sds_ring *sds_ring;
231 struct qlcnic_recv_context *recv_ctx = &adapter->recv_ctx;
232
780ab790
AKS
233 if (adapter->is_up != QLCNIC_ADAPTER_UP_MAGIC)
234 return;
235
af19b491
AKS
236 for (ring = 0; ring < adapter->max_sds_rings; ring++) {
237 sds_ring = &recv_ctx->sds_rings[ring];
238 qlcnic_disable_int(sds_ring);
239 napi_synchronize(&sds_ring->napi);
240 napi_disable(&sds_ring->napi);
241 }
242}
243
244static void qlcnic_clear_stats(struct qlcnic_adapter *adapter)
245{
246 memset(&adapter->stats, 0, sizeof(adapter->stats));
af19b491
AKS
247}
248
af19b491
AKS
249static void qlcnic_set_port_mode(struct qlcnic_adapter *adapter)
250{
251 u32 val, data;
252
253 val = adapter->ahw.board_type;
254 if ((val == QLCNIC_BRDTYPE_P3_HMEZ) ||
255 (val == QLCNIC_BRDTYPE_P3_XG_LOM)) {
256 if (port_mode == QLCNIC_PORT_MODE_802_3_AP) {
257 data = QLCNIC_PORT_MODE_802_3_AP;
258 QLCWR32(adapter, QLCNIC_PORT_MODE_ADDR, data);
259 } else if (port_mode == QLCNIC_PORT_MODE_XG) {
260 data = QLCNIC_PORT_MODE_XG;
261 QLCWR32(adapter, QLCNIC_PORT_MODE_ADDR, data);
262 } else if (port_mode == QLCNIC_PORT_MODE_AUTO_NEG_1G) {
263 data = QLCNIC_PORT_MODE_AUTO_NEG_1G;
264 QLCWR32(adapter, QLCNIC_PORT_MODE_ADDR, data);
265 } else if (port_mode == QLCNIC_PORT_MODE_AUTO_NEG_XG) {
266 data = QLCNIC_PORT_MODE_AUTO_NEG_XG;
267 QLCWR32(adapter, QLCNIC_PORT_MODE_ADDR, data);
268 } else {
269 data = QLCNIC_PORT_MODE_AUTO_NEG;
270 QLCWR32(adapter, QLCNIC_PORT_MODE_ADDR, data);
271 }
272
273 if ((wol_port_mode != QLCNIC_PORT_MODE_802_3_AP) &&
274 (wol_port_mode != QLCNIC_PORT_MODE_XG) &&
275 (wol_port_mode != QLCNIC_PORT_MODE_AUTO_NEG_1G) &&
276 (wol_port_mode != QLCNIC_PORT_MODE_AUTO_NEG_XG)) {
277 wol_port_mode = QLCNIC_PORT_MODE_AUTO_NEG;
278 }
279 QLCWR32(adapter, QLCNIC_WOL_PORT_MODE, wol_port_mode);
280 }
281}
282
283static void qlcnic_set_msix_bit(struct pci_dev *pdev, int enable)
284{
285 u32 control;
286 int pos;
287
288 pos = pci_find_capability(pdev, PCI_CAP_ID_MSIX);
289 if (pos) {
290 pci_read_config_dword(pdev, pos, &control);
291 if (enable)
292 control |= PCI_MSIX_FLAGS_ENABLE;
293 else
294 control = 0;
295 pci_write_config_dword(pdev, pos, control);
296 }
297}
298
299static void qlcnic_init_msix_entries(struct qlcnic_adapter *adapter, int count)
300{
301 int i;
302
303 for (i = 0; i < count; i++)
304 adapter->msix_entries[i].entry = i;
305}
306
307static int
308qlcnic_read_mac_addr(struct qlcnic_adapter *adapter)
309{
310 int i;
311 unsigned char *p;
312 u64 mac_addr;
313 struct net_device *netdev = adapter->netdev;
314 struct pci_dev *pdev = adapter->pdev;
315
316 if (qlcnic_get_mac_addr(adapter, &mac_addr) != 0)
317 return -EIO;
318
319 p = (unsigned char *)&mac_addr;
320 for (i = 0; i < 6; i++)
321 netdev->dev_addr[i] = *(p + 5 - i);
322
323 memcpy(netdev->perm_addr, netdev->dev_addr, netdev->addr_len);
324 memcpy(adapter->mac_addr, netdev->dev_addr, netdev->addr_len);
325
326 /* set station address */
327
328 if (!is_valid_ether_addr(netdev->perm_addr))
329 dev_warn(&pdev->dev, "Bad MAC address %pM.\n",
330 netdev->dev_addr);
331
332 return 0;
333}
334
335static int qlcnic_set_mac(struct net_device *netdev, void *p)
336{
337 struct qlcnic_adapter *adapter = netdev_priv(netdev);
338 struct sockaddr *addr = p;
339
340 if (!is_valid_ether_addr(addr->sa_data))
341 return -EINVAL;
342
343 if (netif_running(netdev)) {
344 netif_device_detach(netdev);
345 qlcnic_napi_disable(adapter);
346 }
347
348 memcpy(adapter->mac_addr, addr->sa_data, netdev->addr_len);
349 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
350 qlcnic_set_multi(adapter->netdev);
351
352 if (netif_running(netdev)) {
353 netif_device_attach(netdev);
354 qlcnic_napi_enable(adapter);
355 }
356 return 0;
357}
358
359static const struct net_device_ops qlcnic_netdev_ops = {
360 .ndo_open = qlcnic_open,
361 .ndo_stop = qlcnic_close,
362 .ndo_start_xmit = qlcnic_xmit_frame,
363 .ndo_get_stats = qlcnic_get_stats,
364 .ndo_validate_addr = eth_validate_addr,
365 .ndo_set_multicast_list = qlcnic_set_multi,
366 .ndo_set_mac_address = qlcnic_set_mac,
367 .ndo_change_mtu = qlcnic_change_mtu,
368 .ndo_tx_timeout = qlcnic_tx_timeout,
369#ifdef CONFIG_NET_POLL_CONTROLLER
370 .ndo_poll_controller = qlcnic_poll_controller,
371#endif
372};
373
374static void
375qlcnic_setup_intr(struct qlcnic_adapter *adapter)
376{
377 const struct qlcnic_legacy_intr_set *legacy_intrp;
378 struct pci_dev *pdev = adapter->pdev;
379 int err, num_msix;
380
381 if (adapter->rss_supported) {
382 num_msix = (num_online_cpus() >= MSIX_ENTRIES_PER_ADAPTER) ?
383 MSIX_ENTRIES_PER_ADAPTER : 2;
384 } else
385 num_msix = 1;
386
387 adapter->max_sds_rings = 1;
388
389 adapter->flags &= ~(QLCNIC_MSI_ENABLED | QLCNIC_MSIX_ENABLED);
390
391 legacy_intrp = &legacy_intr[adapter->ahw.pci_func];
392
393 adapter->int_vec_bit = legacy_intrp->int_vec_bit;
394 adapter->tgt_status_reg = qlcnic_get_ioaddr(adapter,
395 legacy_intrp->tgt_status_reg);
396 adapter->tgt_mask_reg = qlcnic_get_ioaddr(adapter,
397 legacy_intrp->tgt_mask_reg);
398 adapter->isr_int_vec = qlcnic_get_ioaddr(adapter, ISR_INT_VECTOR);
399
400 adapter->crb_int_state_reg = qlcnic_get_ioaddr(adapter,
401 ISR_INT_STATE_REG);
402
403 qlcnic_set_msix_bit(pdev, 0);
404
405 if (adapter->msix_supported) {
406
407 qlcnic_init_msix_entries(adapter, num_msix);
408 err = pci_enable_msix(pdev, adapter->msix_entries, num_msix);
409 if (err == 0) {
410 adapter->flags |= QLCNIC_MSIX_ENABLED;
411 qlcnic_set_msix_bit(pdev, 1);
412
413 if (adapter->rss_supported)
414 adapter->max_sds_rings = num_msix;
415
416 dev_info(&pdev->dev, "using msi-x interrupts\n");
417 return;
418 }
419
420 if (err > 0)
421 pci_disable_msix(pdev);
422
423 /* fall through for msi */
424 }
425
426 if (use_msi && !pci_enable_msi(pdev)) {
427 adapter->flags |= QLCNIC_MSI_ENABLED;
428 adapter->tgt_status_reg = qlcnic_get_ioaddr(adapter,
429 msi_tgt_status[adapter->ahw.pci_func]);
430 dev_info(&pdev->dev, "using msi interrupts\n");
431 adapter->msix_entries[0].vector = pdev->irq;
432 return;
433 }
434
435 dev_info(&pdev->dev, "using legacy interrupts\n");
436 adapter->msix_entries[0].vector = pdev->irq;
437}
438
439static void
440qlcnic_teardown_intr(struct qlcnic_adapter *adapter)
441{
442 if (adapter->flags & QLCNIC_MSIX_ENABLED)
443 pci_disable_msix(adapter->pdev);
444 if (adapter->flags & QLCNIC_MSI_ENABLED)
445 pci_disable_msi(adapter->pdev);
446}
447
448static void
449qlcnic_cleanup_pci_map(struct qlcnic_adapter *adapter)
450{
451 if (adapter->ahw.pci_base0 != NULL)
452 iounmap(adapter->ahw.pci_base0);
453}
454
455static int
456qlcnic_setup_pci_map(struct qlcnic_adapter *adapter)
457{
458 void __iomem *mem_ptr0 = NULL;
459 resource_size_t mem_base;
460 unsigned long mem_len, pci_len0 = 0;
461
462 struct pci_dev *pdev = adapter->pdev;
463 int pci_func = adapter->ahw.pci_func;
464
af19b491
AKS
465 /* remap phys address */
466 mem_base = pci_resource_start(pdev, 0); /* 0 is for BAR 0 */
467 mem_len = pci_resource_len(pdev, 0);
468
469 if (mem_len == QLCNIC_PCI_2MB_SIZE) {
470
471 mem_ptr0 = pci_ioremap_bar(pdev, 0);
472 if (mem_ptr0 == NULL) {
473 dev_err(&pdev->dev, "failed to map PCI bar 0\n");
474 return -EIO;
475 }
476 pci_len0 = mem_len;
477 } else {
478 return -EIO;
479 }
480
481 dev_info(&pdev->dev, "%dMB memory map\n", (int)(mem_len>>20));
482
483 adapter->ahw.pci_base0 = mem_ptr0;
484 adapter->ahw.pci_len0 = pci_len0;
485
486 adapter->ahw.ocm_win_crb = qlcnic_get_ioaddr(adapter,
487 QLCNIC_PCIX_PS_REG(PCIX_OCM_WINDOW_REG(pci_func)));
488
489 return 0;
490}
491
492static void get_brd_name(struct qlcnic_adapter *adapter, char *name)
493{
494 struct pci_dev *pdev = adapter->pdev;
495 int i, found = 0;
496
497 for (i = 0; i < NUM_SUPPORTED_BOARDS; ++i) {
498 if (qlcnic_boards[i].vendor == pdev->vendor &&
499 qlcnic_boards[i].device == pdev->device &&
500 qlcnic_boards[i].sub_vendor == pdev->subsystem_vendor &&
501 qlcnic_boards[i].sub_device == pdev->subsystem_device) {
02f6e46f
SC
502 sprintf(name, "%pM: %s" ,
503 adapter->mac_addr,
504 qlcnic_boards[i].short_name);
af19b491
AKS
505 found = 1;
506 break;
507 }
508
509 }
510
511 if (!found)
512 name = "Unknown";
513}
514
515static void
516qlcnic_check_options(struct qlcnic_adapter *adapter)
517{
518 u32 fw_major, fw_minor, fw_build;
519 char brd_name[QLCNIC_MAX_BOARD_NAME_LEN];
520 char serial_num[32];
521 int i, offset, val;
522 int *ptr32;
523 struct pci_dev *pdev = adapter->pdev;
524
525 adapter->driver_mismatch = 0;
526
527 ptr32 = (int *)&serial_num;
528 offset = QLCNIC_FW_SERIAL_NUM_OFFSET;
529 for (i = 0; i < 8; i++) {
530 if (qlcnic_rom_fast_read(adapter, offset, &val) == -1) {
531 dev_err(&pdev->dev, "error reading board info\n");
532 adapter->driver_mismatch = 1;
533 return;
534 }
535 ptr32[i] = cpu_to_le32(val);
536 offset += sizeof(u32);
537 }
538
539 fw_major = QLCRD32(adapter, QLCNIC_FW_VERSION_MAJOR);
540 fw_minor = QLCRD32(adapter, QLCNIC_FW_VERSION_MINOR);
541 fw_build = QLCRD32(adapter, QLCNIC_FW_VERSION_SUB);
542
543 adapter->fw_version = QLCNIC_VERSION_CODE(fw_major, fw_minor, fw_build);
544
545 if (adapter->portnum == 0) {
546 get_brd_name(adapter, brd_name);
547
548 pr_info("%s: %s Board Chip rev 0x%x\n",
549 module_name(THIS_MODULE),
550 brd_name, adapter->ahw.revision_id);
551 }
552
251a84c9
AKS
553 dev_info(&pdev->dev, "firmware v%d.%d.%d\n",
554 fw_major, fw_minor, fw_build);
af19b491 555
251a84c9 556 adapter->capabilities = QLCRD32(adapter, CRB_FW_CAPABILITIES_1);
af19b491
AKS
557
558 adapter->flags &= ~QLCNIC_LRO_ENABLED;
559
560 if (adapter->ahw.port_type == QLCNIC_XGBE) {
561 adapter->num_rxd = DEFAULT_RCV_DESCRIPTORS_10G;
562 adapter->num_jumbo_rxd = MAX_JUMBO_RCV_DESCRIPTORS_10G;
563 } else if (adapter->ahw.port_type == QLCNIC_GBE) {
564 adapter->num_rxd = DEFAULT_RCV_DESCRIPTORS_1G;
565 adapter->num_jumbo_rxd = MAX_JUMBO_RCV_DESCRIPTORS_1G;
566 }
567
568 adapter->msix_supported = !!use_msi_x;
569 adapter->rss_supported = !!use_msi_x;
570
571 adapter->num_txd = MAX_CMD_DESCRIPTORS;
572
af19b491
AKS
573 adapter->max_rds_rings = 2;
574}
575
576static int
577qlcnic_start_firmware(struct qlcnic_adapter *adapter)
578{
579 int val, err, first_boot;
580
aa5e18c0
SC
581 err = qlcnic_can_start_firmware(adapter);
582 if (err < 0)
583 return err;
584 else if (!err)
af19b491
AKS
585 goto wait_init;
586
587 first_boot = QLCRD32(adapter, QLCNIC_CAM_RAM(0x1fc));
588 if (first_boot == 0x55555555)
589 /* This is the first boot after power up */
590 QLCWR32(adapter, QLCNIC_CAM_RAM(0x1fc), QLCNIC_BDINFO_MAGIC);
591
4d5bdb38
AKS
592 if (load_fw_file)
593 qlcnic_request_firmware(adapter);
594 else
595 adapter->fw_type = QLCNIC_FLASH_ROMIMAGE;
af19b491
AKS
596
597 err = qlcnic_need_fw_reset(adapter);
598 if (err < 0)
599 goto err_out;
600 if (err == 0)
601 goto wait_init;
602
603 if (first_boot != 0x55555555) {
604 QLCWR32(adapter, CRB_CMDPEG_STATE, 0);
605 qlcnic_pinit_from_rom(adapter);
606 msleep(1);
607 }
608
af19b491
AKS
609 QLCWR32(adapter, QLCNIC_PEG_HALT_STATUS1, 0);
610 QLCWR32(adapter, QLCNIC_PEG_HALT_STATUS2, 0);
611
612 qlcnic_set_port_mode(adapter);
613
614 err = qlcnic_load_firmware(adapter);
615 if (err)
616 goto err_out;
617
618 qlcnic_release_firmware(adapter);
619
620 val = (_QLCNIC_LINUX_MAJOR << 16)
621 | ((_QLCNIC_LINUX_MINOR << 8))
622 | (_QLCNIC_LINUX_SUBVERSION);
623 QLCWR32(adapter, CRB_DRIVER_VERSION, val);
624
625wait_init:
626 /* Handshake with the card before we register the devices. */
627 err = qlcnic_phantom_init(adapter);
628 if (err)
629 goto err_out;
630
631 QLCWR32(adapter, QLCNIC_CRB_DEV_STATE, QLCNIC_DEV_READY);
6df900e9 632 qlcnic_idc_debug_info(adapter, 1);
af19b491 633
af19b491
AKS
634 qlcnic_check_options(adapter);
635
636 adapter->need_fw_reset = 0;
637
a7fc948f
AKS
638 qlcnic_release_firmware(adapter);
639 return 0;
af19b491
AKS
640
641err_out:
a7fc948f
AKS
642 QLCWR32(adapter, QLCNIC_CRB_DEV_STATE, QLCNIC_DEV_FAILED);
643 dev_err(&adapter->pdev->dev, "Device state set to failed\n");
af19b491
AKS
644 qlcnic_release_firmware(adapter);
645 return err;
646}
647
648static int
649qlcnic_request_irq(struct qlcnic_adapter *adapter)
650{
651 irq_handler_t handler;
652 struct qlcnic_host_sds_ring *sds_ring;
653 int err, ring;
654
655 unsigned long flags = 0;
656 struct net_device *netdev = adapter->netdev;
657 struct qlcnic_recv_context *recv_ctx = &adapter->recv_ctx;
658
7eb9855d
AKS
659 if (adapter->diag_test == QLCNIC_INTERRUPT_TEST) {
660 handler = qlcnic_tmp_intr;
661 if (!QLCNIC_IS_MSI_FAMILY(adapter))
662 flags |= IRQF_SHARED;
663
664 } else {
665 if (adapter->flags & QLCNIC_MSIX_ENABLED)
666 handler = qlcnic_msix_intr;
667 else if (adapter->flags & QLCNIC_MSI_ENABLED)
668 handler = qlcnic_msi_intr;
669 else {
670 flags |= IRQF_SHARED;
671 handler = qlcnic_intr;
672 }
af19b491
AKS
673 }
674 adapter->irq = netdev->irq;
675
676 for (ring = 0; ring < adapter->max_sds_rings; ring++) {
677 sds_ring = &recv_ctx->sds_rings[ring];
678 sprintf(sds_ring->name, "%s[%d]", netdev->name, ring);
679 err = request_irq(sds_ring->irq, handler,
680 flags, sds_ring->name, sds_ring);
681 if (err)
682 return err;
683 }
684
685 return 0;
686}
687
688static void
689qlcnic_free_irq(struct qlcnic_adapter *adapter)
690{
691 int ring;
692 struct qlcnic_host_sds_ring *sds_ring;
693
694 struct qlcnic_recv_context *recv_ctx = &adapter->recv_ctx;
695
696 for (ring = 0; ring < adapter->max_sds_rings; ring++) {
697 sds_ring = &recv_ctx->sds_rings[ring];
698 free_irq(sds_ring->irq, sds_ring);
699 }
700}
701
702static void
703qlcnic_init_coalesce_defaults(struct qlcnic_adapter *adapter)
704{
705 adapter->coal.flags = QLCNIC_INTR_DEFAULT;
706 adapter->coal.normal.data.rx_time_us =
707 QLCNIC_DEFAULT_INTR_COALESCE_RX_TIME_US;
708 adapter->coal.normal.data.rx_packets =
709 QLCNIC_DEFAULT_INTR_COALESCE_RX_PACKETS;
710 adapter->coal.normal.data.tx_time_us =
711 QLCNIC_DEFAULT_INTR_COALESCE_TX_TIME_US;
712 adapter->coal.normal.data.tx_packets =
713 QLCNIC_DEFAULT_INTR_COALESCE_TX_PACKETS;
714}
715
716static int
717__qlcnic_up(struct qlcnic_adapter *adapter, struct net_device *netdev)
718{
719 if (adapter->is_up != QLCNIC_ADAPTER_UP_MAGIC)
720 return -EIO;
721
722 qlcnic_set_multi(netdev);
723 qlcnic_fw_cmd_set_mtu(adapter, netdev->mtu);
724
725 adapter->ahw.linkup = 0;
726
727 if (adapter->max_sds_rings > 1)
728 qlcnic_config_rss(adapter, 1);
729
730 qlcnic_config_intr_coalesce(adapter);
731
732 if (adapter->capabilities & QLCNIC_FW_CAPABILITY_HW_LRO)
733 qlcnic_config_hw_lro(adapter, QLCNIC_LRO_ENABLED);
734
735 qlcnic_napi_enable(adapter);
736
737 qlcnic_linkevent_request(adapter, 1);
738
739 set_bit(__QLCNIC_DEV_UP, &adapter->state);
740 return 0;
741}
742
743/* Usage: During resume and firmware recovery module.*/
744
745static int
746qlcnic_up(struct qlcnic_adapter *adapter, struct net_device *netdev)
747{
748 int err = 0;
749
750 rtnl_lock();
751 if (netif_running(netdev))
752 err = __qlcnic_up(adapter, netdev);
753 rtnl_unlock();
754
755 return err;
756}
757
758static void
759__qlcnic_down(struct qlcnic_adapter *adapter, struct net_device *netdev)
760{
761 if (adapter->is_up != QLCNIC_ADAPTER_UP_MAGIC)
762 return;
763
764 if (!test_and_clear_bit(__QLCNIC_DEV_UP, &adapter->state))
765 return;
766
767 smp_mb();
768 spin_lock(&adapter->tx_clean_lock);
769 netif_carrier_off(netdev);
770 netif_tx_disable(netdev);
771
772 qlcnic_free_mac_list(adapter);
773
774 qlcnic_nic_set_promisc(adapter, QLCNIC_NIU_NON_PROMISC_MODE);
775
776 qlcnic_napi_disable(adapter);
777
778 qlcnic_release_tx_buffers(adapter);
779 spin_unlock(&adapter->tx_clean_lock);
780}
781
782/* Usage: During suspend and firmware recovery module */
783
784static void
785qlcnic_down(struct qlcnic_adapter *adapter, struct net_device *netdev)
786{
787 rtnl_lock();
788 if (netif_running(netdev))
789 __qlcnic_down(adapter, netdev);
790 rtnl_unlock();
791
792}
793
794static int
795qlcnic_attach(struct qlcnic_adapter *adapter)
796{
797 struct net_device *netdev = adapter->netdev;
798 struct pci_dev *pdev = adapter->pdev;
799 int err, ring;
800 struct qlcnic_host_rds_ring *rds_ring;
801
802 if (adapter->is_up == QLCNIC_ADAPTER_UP_MAGIC)
803 return 0;
804
805 err = qlcnic_init_firmware(adapter);
806 if (err)
807 return err;
808
809 err = qlcnic_napi_add(adapter, netdev);
810 if (err)
811 return err;
812
813 err = qlcnic_alloc_sw_resources(adapter);
814 if (err) {
815 dev_err(&pdev->dev, "Error in setting sw resources\n");
816 return err;
817 }
818
819 err = qlcnic_alloc_hw_resources(adapter);
820 if (err) {
821 dev_err(&pdev->dev, "Error in setting hw resources\n");
822 goto err_out_free_sw;
823 }
824
825
826 for (ring = 0; ring < adapter->max_rds_rings; ring++) {
827 rds_ring = &adapter->recv_ctx.rds_rings[ring];
828 qlcnic_post_rx_buffers(adapter, ring, rds_ring);
829 }
830
831 err = qlcnic_request_irq(adapter);
832 if (err) {
833 dev_err(&pdev->dev, "failed to setup interrupt\n");
834 goto err_out_free_rxbuf;
835 }
836
837 qlcnic_init_coalesce_defaults(adapter);
838
839 qlcnic_create_sysfs_entries(adapter);
840
841 adapter->is_up = QLCNIC_ADAPTER_UP_MAGIC;
842 return 0;
843
844err_out_free_rxbuf:
845 qlcnic_release_rx_buffers(adapter);
846 qlcnic_free_hw_resources(adapter);
847err_out_free_sw:
848 qlcnic_free_sw_resources(adapter);
849 return err;
850}
851
852static void
853qlcnic_detach(struct qlcnic_adapter *adapter)
854{
855 if (adapter->is_up != QLCNIC_ADAPTER_UP_MAGIC)
856 return;
857
858 qlcnic_remove_sysfs_entries(adapter);
859
860 qlcnic_free_hw_resources(adapter);
861 qlcnic_release_rx_buffers(adapter);
862 qlcnic_free_irq(adapter);
863 qlcnic_napi_del(adapter);
864 qlcnic_free_sw_resources(adapter);
865
866 adapter->is_up = 0;
867}
868
7eb9855d
AKS
869void qlcnic_diag_free_res(struct net_device *netdev, int max_sds_rings)
870{
871 struct qlcnic_adapter *adapter = netdev_priv(netdev);
872 struct qlcnic_host_sds_ring *sds_ring;
873 int ring;
874
78ad3892 875 clear_bit(__QLCNIC_DEV_UP, &adapter->state);
cdaff185
AKS
876 if (adapter->diag_test == QLCNIC_INTERRUPT_TEST) {
877 for (ring = 0; ring < adapter->max_sds_rings; ring++) {
878 sds_ring = &adapter->recv_ctx.sds_rings[ring];
879 qlcnic_disable_int(sds_ring);
880 }
7eb9855d
AKS
881 }
882
883 qlcnic_detach(adapter);
884
885 adapter->diag_test = 0;
886 adapter->max_sds_rings = max_sds_rings;
887
888 if (qlcnic_attach(adapter))
34ce3626 889 goto out;
7eb9855d
AKS
890
891 if (netif_running(netdev))
892 __qlcnic_up(adapter, netdev);
34ce3626 893out:
7eb9855d
AKS
894 netif_device_attach(netdev);
895}
896
897int qlcnic_diag_alloc_res(struct net_device *netdev, int test)
898{
899 struct qlcnic_adapter *adapter = netdev_priv(netdev);
900 struct qlcnic_host_sds_ring *sds_ring;
901 int ring;
902 int ret;
903
904 netif_device_detach(netdev);
905
906 if (netif_running(netdev))
907 __qlcnic_down(adapter, netdev);
908
909 qlcnic_detach(adapter);
910
911 adapter->max_sds_rings = 1;
912 adapter->diag_test = test;
913
914 ret = qlcnic_attach(adapter);
34ce3626
AKS
915 if (ret) {
916 netif_device_attach(netdev);
7eb9855d 917 return ret;
34ce3626 918 }
7eb9855d 919
cdaff185
AKS
920 if (adapter->diag_test == QLCNIC_INTERRUPT_TEST) {
921 for (ring = 0; ring < adapter->max_sds_rings; ring++) {
922 sds_ring = &adapter->recv_ctx.sds_rings[ring];
923 qlcnic_enable_int(sds_ring);
924 }
7eb9855d 925 }
78ad3892 926 set_bit(__QLCNIC_DEV_UP, &adapter->state);
7eb9855d
AKS
927
928 return 0;
929}
930
af19b491
AKS
931int
932qlcnic_reset_context(struct qlcnic_adapter *adapter)
933{
934 int err = 0;
935 struct net_device *netdev = adapter->netdev;
936
937 if (test_and_set_bit(__QLCNIC_RESETTING, &adapter->state))
938 return -EBUSY;
939
940 if (adapter->is_up == QLCNIC_ADAPTER_UP_MAGIC) {
941
942 netif_device_detach(netdev);
943
944 if (netif_running(netdev))
945 __qlcnic_down(adapter, netdev);
946
947 qlcnic_detach(adapter);
948
949 if (netif_running(netdev)) {
950 err = qlcnic_attach(adapter);
951 if (!err)
34ce3626 952 __qlcnic_up(adapter, netdev);
af19b491
AKS
953 }
954
955 netif_device_attach(netdev);
956 }
957
af19b491
AKS
958 clear_bit(__QLCNIC_RESETTING, &adapter->state);
959 return err;
960}
961
962static int
963qlcnic_setup_netdev(struct qlcnic_adapter *adapter,
1bb09fb9 964 struct net_device *netdev, u8 pci_using_dac)
af19b491
AKS
965{
966 int err;
967 struct pci_dev *pdev = adapter->pdev;
968
969 adapter->rx_csum = 1;
970 adapter->mc_enabled = 0;
971 adapter->max_mc_count = 38;
972
973 netdev->netdev_ops = &qlcnic_netdev_ops;
974 netdev->watchdog_timeo = 2*HZ;
975
976 qlcnic_change_mtu(netdev, netdev->mtu);
977
978 SET_ETHTOOL_OPS(netdev, &qlcnic_ethtool_ops);
979
980 netdev->features |= (NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_TSO);
981 netdev->features |= (NETIF_F_GRO);
982 netdev->vlan_features |= (NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_TSO);
983
984 netdev->features |= (NETIF_F_IPV6_CSUM | NETIF_F_TSO6);
985 netdev->vlan_features |= (NETIF_F_IPV6_CSUM | NETIF_F_TSO6);
986
1bb09fb9 987 if (pci_using_dac) {
af19b491
AKS
988 netdev->features |= NETIF_F_HIGHDMA;
989 netdev->vlan_features |= NETIF_F_HIGHDMA;
990 }
991
992 if (adapter->capabilities & QLCNIC_FW_CAPABILITY_FVLANTX)
993 netdev->features |= (NETIF_F_HW_VLAN_TX);
994
995 if (adapter->capabilities & QLCNIC_FW_CAPABILITY_HW_LRO)
996 netdev->features |= NETIF_F_LRO;
997
998 netdev->irq = adapter->msix_entries[0].vector;
999
1000 INIT_WORK(&adapter->tx_timeout_task, qlcnic_tx_timeout_task);
1001
1002 if (qlcnic_read_mac_addr(adapter))
1003 dev_warn(&pdev->dev, "failed to read mac addr\n");
1004
1005 netif_carrier_off(netdev);
1006 netif_stop_queue(netdev);
1007
1008 err = register_netdev(netdev);
1009 if (err) {
1010 dev_err(&pdev->dev, "failed to register net device\n");
1011 return err;
1012 }
1013
1014 return 0;
1015}
1016
1bb09fb9
AKS
1017static int qlcnic_set_dma_mask(struct pci_dev *pdev, u8 *pci_using_dac)
1018{
1019 if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) &&
1020 !pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)))
1021 *pci_using_dac = 1;
1022 else if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(32)) &&
1023 !pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32)))
1024 *pci_using_dac = 0;
1025 else {
1026 dev_err(&pdev->dev, "Unable to set DMA mask, aborting\n");
1027 return -EIO;
1028 }
1029
1030 return 0;
1031}
1032
af19b491
AKS
1033static int __devinit
1034qlcnic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
1035{
1036 struct net_device *netdev = NULL;
1037 struct qlcnic_adapter *adapter = NULL;
1038 int err;
1039 int pci_func_id = PCI_FUNC(pdev->devfn);
1040 uint8_t revision_id;
1bb09fb9 1041 uint8_t pci_using_dac;
af19b491
AKS
1042
1043 err = pci_enable_device(pdev);
1044 if (err)
1045 return err;
1046
1047 if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
1048 err = -ENODEV;
1049 goto err_out_disable_pdev;
1050 }
1051
1bb09fb9
AKS
1052 err = qlcnic_set_dma_mask(pdev, &pci_using_dac);
1053 if (err)
1054 goto err_out_disable_pdev;
1055
af19b491
AKS
1056 err = pci_request_regions(pdev, qlcnic_driver_name);
1057 if (err)
1058 goto err_out_disable_pdev;
1059
1060 pci_set_master(pdev);
1061
1062 netdev = alloc_etherdev(sizeof(struct qlcnic_adapter));
1063 if (!netdev) {
1064 dev_err(&pdev->dev, "failed to allocate net_device\n");
1065 err = -ENOMEM;
1066 goto err_out_free_res;
1067 }
1068
1069 SET_NETDEV_DEV(netdev, &pdev->dev);
1070
1071 adapter = netdev_priv(netdev);
1072 adapter->netdev = netdev;
1073 adapter->pdev = pdev;
6df900e9 1074 adapter->dev_rst_time = jiffies;
af19b491
AKS
1075 adapter->ahw.pci_func = pci_func_id;
1076
1077 revision_id = pdev->revision;
1078 adapter->ahw.revision_id = revision_id;
1079
1080 rwlock_init(&adapter->ahw.crb_lock);
1081 mutex_init(&adapter->ahw.mem_lock);
1082
1083 spin_lock_init(&adapter->tx_clean_lock);
1084 INIT_LIST_HEAD(&adapter->mac_list);
1085
1086 err = qlcnic_setup_pci_map(adapter);
1087 if (err)
1088 goto err_out_free_netdev;
1089
1090 /* This will be reset for mezz cards */
1091 adapter->portnum = pci_func_id;
1092
1093 err = qlcnic_get_board_info(adapter);
1094 if (err) {
1095 dev_err(&pdev->dev, "Error getting board config info.\n");
1096 goto err_out_iounmap;
1097 }
1098
02f6e46f
SC
1099 if (qlcnic_read_mac_addr(adapter))
1100 dev_warn(&pdev->dev, "failed to read mac addr\n");
1101
b3a24649
SC
1102 if (qlcnic_setup_idc_param(adapter))
1103 goto err_out_iounmap;
af19b491
AKS
1104
1105 err = qlcnic_start_firmware(adapter);
a7fc948f
AKS
1106 if (err) {
1107 dev_err(&pdev->dev, "Loading fw failed.Please Reboot\n");
af19b491 1108 goto err_out_decr_ref;
a7fc948f 1109 }
af19b491 1110
af19b491
AKS
1111 qlcnic_clear_stats(adapter);
1112
1113 qlcnic_setup_intr(adapter);
1114
1bb09fb9 1115 err = qlcnic_setup_netdev(adapter, netdev, pci_using_dac);
af19b491
AKS
1116 if (err)
1117 goto err_out_disable_msi;
1118
1119 pci_set_drvdata(pdev, adapter);
1120
1121 qlcnic_schedule_work(adapter, qlcnic_fw_poll_work, FW_POLL_DELAY);
1122
1123 switch (adapter->ahw.port_type) {
1124 case QLCNIC_GBE:
1125 dev_info(&adapter->pdev->dev, "%s: GbE port initialized\n",
1126 adapter->netdev->name);
1127 break;
1128 case QLCNIC_XGBE:
1129 dev_info(&adapter->pdev->dev, "%s: XGbE port initialized\n",
1130 adapter->netdev->name);
1131 break;
1132 }
1133
1134 qlcnic_create_diag_entries(adapter);
1135
1136 return 0;
1137
1138err_out_disable_msi:
1139 qlcnic_teardown_intr(adapter);
1140
1141err_out_decr_ref:
1142 qlcnic_clr_all_drv_state(adapter);
1143
1144err_out_iounmap:
1145 qlcnic_cleanup_pci_map(adapter);
1146
1147err_out_free_netdev:
1148 free_netdev(netdev);
1149
1150err_out_free_res:
1151 pci_release_regions(pdev);
1152
1153err_out_disable_pdev:
1154 pci_set_drvdata(pdev, NULL);
1155 pci_disable_device(pdev);
1156 return err;
1157}
1158
1159static void __devexit qlcnic_remove(struct pci_dev *pdev)
1160{
1161 struct qlcnic_adapter *adapter;
1162 struct net_device *netdev;
1163
1164 adapter = pci_get_drvdata(pdev);
1165 if (adapter == NULL)
1166 return;
1167
1168 netdev = adapter->netdev;
1169
1170 qlcnic_cancel_fw_work(adapter);
1171
1172 unregister_netdev(netdev);
1173
1174 cancel_work_sync(&adapter->tx_timeout_task);
1175
1176 qlcnic_detach(adapter);
1177
1178 qlcnic_clr_all_drv_state(adapter);
1179
1180 clear_bit(__QLCNIC_RESETTING, &adapter->state);
1181
1182 qlcnic_teardown_intr(adapter);
1183
1184 qlcnic_remove_diag_entries(adapter);
1185
1186 qlcnic_cleanup_pci_map(adapter);
1187
1188 qlcnic_release_firmware(adapter);
1189
1190 pci_release_regions(pdev);
1191 pci_disable_device(pdev);
1192 pci_set_drvdata(pdev, NULL);
1193
1194 free_netdev(netdev);
1195}
1196static int __qlcnic_shutdown(struct pci_dev *pdev)
1197{
1198 struct qlcnic_adapter *adapter = pci_get_drvdata(pdev);
1199 struct net_device *netdev = adapter->netdev;
1200 int retval;
1201
1202 netif_device_detach(netdev);
1203
1204 qlcnic_cancel_fw_work(adapter);
1205
1206 if (netif_running(netdev))
1207 qlcnic_down(adapter, netdev);
1208
1209 cancel_work_sync(&adapter->tx_timeout_task);
1210
1211 qlcnic_detach(adapter);
1212
1213 qlcnic_clr_all_drv_state(adapter);
1214
1215 clear_bit(__QLCNIC_RESETTING, &adapter->state);
1216
1217 retval = pci_save_state(pdev);
1218 if (retval)
1219 return retval;
1220
1221 if (qlcnic_wol_supported(adapter)) {
1222 pci_enable_wake(pdev, PCI_D3cold, 1);
1223 pci_enable_wake(pdev, PCI_D3hot, 1);
1224 }
1225
1226 return 0;
1227}
1228
1229static void qlcnic_shutdown(struct pci_dev *pdev)
1230{
1231 if (__qlcnic_shutdown(pdev))
1232 return;
1233
1234 pci_disable_device(pdev);
1235}
1236
1237#ifdef CONFIG_PM
1238static int
1239qlcnic_suspend(struct pci_dev *pdev, pm_message_t state)
1240{
1241 int retval;
1242
1243 retval = __qlcnic_shutdown(pdev);
1244 if (retval)
1245 return retval;
1246
1247 pci_set_power_state(pdev, pci_choose_state(pdev, state));
1248 return 0;
1249}
1250
1251static int
1252qlcnic_resume(struct pci_dev *pdev)
1253{
1254 struct qlcnic_adapter *adapter = pci_get_drvdata(pdev);
1255 struct net_device *netdev = adapter->netdev;
1256 int err;
1257
1258 err = pci_enable_device(pdev);
1259 if (err)
1260 return err;
1261
1262 pci_set_power_state(pdev, PCI_D0);
1263 pci_set_master(pdev);
1264 pci_restore_state(pdev);
1265
af19b491
AKS
1266 err = qlcnic_start_firmware(adapter);
1267 if (err) {
1268 dev_err(&pdev->dev, "failed to start firmware\n");
1269 return err;
1270 }
1271
1272 if (netif_running(netdev)) {
1273 err = qlcnic_attach(adapter);
1274 if (err)
1275 goto err_out;
1276
1277 err = qlcnic_up(adapter, netdev);
1278 if (err)
1279 goto err_out_detach;
1280
1281
1282 qlcnic_config_indev_addr(netdev, NETDEV_UP);
1283 }
1284
1285 netif_device_attach(netdev);
1286 qlcnic_schedule_work(adapter, qlcnic_fw_poll_work, FW_POLL_DELAY);
1287 return 0;
1288
1289err_out_detach:
1290 qlcnic_detach(adapter);
1291err_out:
1292 qlcnic_clr_all_drv_state(adapter);
34ce3626 1293 netif_device_attach(netdev);
af19b491
AKS
1294 return err;
1295}
1296#endif
1297
1298static int qlcnic_open(struct net_device *netdev)
1299{
1300 struct qlcnic_adapter *adapter = netdev_priv(netdev);
1301 int err;
1302
1303 if (adapter->driver_mismatch)
1304 return -EIO;
1305
1306 err = qlcnic_attach(adapter);
1307 if (err)
1308 return err;
1309
1310 err = __qlcnic_up(adapter, netdev);
1311 if (err)
1312 goto err_out;
1313
1314 netif_start_queue(netdev);
1315
1316 return 0;
1317
1318err_out:
1319 qlcnic_detach(adapter);
1320 return err;
1321}
1322
1323/*
1324 * qlcnic_close - Disables a network interface entry point
1325 */
1326static int qlcnic_close(struct net_device *netdev)
1327{
1328 struct qlcnic_adapter *adapter = netdev_priv(netdev);
1329
1330 __qlcnic_down(adapter, netdev);
1331 return 0;
1332}
1333
1334static void
1335qlcnic_tso_check(struct net_device *netdev,
1336 struct qlcnic_host_tx_ring *tx_ring,
1337 struct cmd_desc_type0 *first_desc,
1338 struct sk_buff *skb)
1339{
1340 u8 opcode = TX_ETHER_PKT;
1341 __be16 protocol = skb->protocol;
1342 u16 flags = 0, vid = 0;
1343 u32 producer;
1344 int copied, offset, copy_len, hdr_len = 0, tso = 0, vlan_oob = 0;
1345 struct cmd_desc_type0 *hwdesc;
1346 struct vlan_ethhdr *vh;
8bfe8b91 1347 struct qlcnic_adapter *adapter = netdev_priv(netdev);
af19b491
AKS
1348
1349 if (protocol == cpu_to_be16(ETH_P_8021Q)) {
1350
1351 vh = (struct vlan_ethhdr *)skb->data;
1352 protocol = vh->h_vlan_encapsulated_proto;
1353 flags = FLAGS_VLAN_TAGGED;
1354
1355 } else if (vlan_tx_tag_present(skb)) {
1356
1357 flags = FLAGS_VLAN_OOB;
1358 vid = vlan_tx_tag_get(skb);
1359 qlcnic_set_tx_vlan_tci(first_desc, vid);
1360 vlan_oob = 1;
1361 }
1362
1363 if ((netdev->features & (NETIF_F_TSO | NETIF_F_TSO6)) &&
1364 skb_shinfo(skb)->gso_size > 0) {
1365
1366 hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
1367
1368 first_desc->mss = cpu_to_le16(skb_shinfo(skb)->gso_size);
1369 first_desc->total_hdr_length = hdr_len;
1370 if (vlan_oob) {
1371 first_desc->total_hdr_length += VLAN_HLEN;
1372 first_desc->tcp_hdr_offset = VLAN_HLEN;
1373 first_desc->ip_hdr_offset = VLAN_HLEN;
1374 /* Only in case of TSO on vlan device */
1375 flags |= FLAGS_VLAN_TAGGED;
1376 }
1377
1378 opcode = (protocol == cpu_to_be16(ETH_P_IPV6)) ?
1379 TX_TCP_LSO6 : TX_TCP_LSO;
1380 tso = 1;
1381
1382 } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
1383 u8 l4proto;
1384
1385 if (protocol == cpu_to_be16(ETH_P_IP)) {
1386 l4proto = ip_hdr(skb)->protocol;
1387
1388 if (l4proto == IPPROTO_TCP)
1389 opcode = TX_TCP_PKT;
1390 else if (l4proto == IPPROTO_UDP)
1391 opcode = TX_UDP_PKT;
1392 } else if (protocol == cpu_to_be16(ETH_P_IPV6)) {
1393 l4proto = ipv6_hdr(skb)->nexthdr;
1394
1395 if (l4proto == IPPROTO_TCP)
1396 opcode = TX_TCPV6_PKT;
1397 else if (l4proto == IPPROTO_UDP)
1398 opcode = TX_UDPV6_PKT;
1399 }
1400 }
1401
1402 first_desc->tcp_hdr_offset += skb_transport_offset(skb);
1403 first_desc->ip_hdr_offset += skb_network_offset(skb);
1404 qlcnic_set_tx_flags_opcode(first_desc, flags, opcode);
1405
1406 if (!tso)
1407 return;
1408
1409 /* For LSO, we need to copy the MAC/IP/TCP headers into
1410 * the descriptor ring
1411 */
1412 producer = tx_ring->producer;
1413 copied = 0;
1414 offset = 2;
1415
1416 if (vlan_oob) {
1417 /* Create a TSO vlan header template for firmware */
1418
1419 hwdesc = &tx_ring->desc_head[producer];
1420 tx_ring->cmd_buf_arr[producer].skb = NULL;
1421
1422 copy_len = min((int)sizeof(struct cmd_desc_type0) - offset,
1423 hdr_len + VLAN_HLEN);
1424
1425 vh = (struct vlan_ethhdr *)((char *)hwdesc + 2);
1426 skb_copy_from_linear_data(skb, vh, 12);
1427 vh->h_vlan_proto = htons(ETH_P_8021Q);
1428 vh->h_vlan_TCI = htons(vid);
1429 skb_copy_from_linear_data_offset(skb, 12,
1430 (char *)vh + 16, copy_len - 16);
1431
1432 copied = copy_len - VLAN_HLEN;
1433 offset = 0;
1434
1435 producer = get_next_index(producer, tx_ring->num_desc);
1436 }
1437
1438 while (copied < hdr_len) {
1439
1440 copy_len = min((int)sizeof(struct cmd_desc_type0) - offset,
1441 (hdr_len - copied));
1442
1443 hwdesc = &tx_ring->desc_head[producer];
1444 tx_ring->cmd_buf_arr[producer].skb = NULL;
1445
1446 skb_copy_from_linear_data_offset(skb, copied,
1447 (char *)hwdesc + offset, copy_len);
1448
1449 copied += copy_len;
1450 offset = 0;
1451
1452 producer = get_next_index(producer, tx_ring->num_desc);
1453 }
1454
1455 tx_ring->producer = producer;
1456 barrier();
8bfe8b91 1457 adapter->stats.lso_frames++;
af19b491
AKS
1458}
1459
1460static int
1461qlcnic_map_tx_skb(struct pci_dev *pdev,
1462 struct sk_buff *skb, struct qlcnic_cmd_buffer *pbuf)
1463{
1464 struct qlcnic_skb_frag *nf;
1465 struct skb_frag_struct *frag;
1466 int i, nr_frags;
1467 dma_addr_t map;
1468
1469 nr_frags = skb_shinfo(skb)->nr_frags;
1470 nf = &pbuf->frag_array[0];
1471
1472 map = pci_map_single(pdev, skb->data,
1473 skb_headlen(skb), PCI_DMA_TODEVICE);
1474 if (pci_dma_mapping_error(pdev, map))
1475 goto out_err;
1476
1477 nf->dma = map;
1478 nf->length = skb_headlen(skb);
1479
1480 for (i = 0; i < nr_frags; i++) {
1481 frag = &skb_shinfo(skb)->frags[i];
1482 nf = &pbuf->frag_array[i+1];
1483
1484 map = pci_map_page(pdev, frag->page, frag->page_offset,
1485 frag->size, PCI_DMA_TODEVICE);
1486 if (pci_dma_mapping_error(pdev, map))
1487 goto unwind;
1488
1489 nf->dma = map;
1490 nf->length = frag->size;
1491 }
1492
1493 return 0;
1494
1495unwind:
1496 while (--i >= 0) {
1497 nf = &pbuf->frag_array[i+1];
1498 pci_unmap_page(pdev, nf->dma, nf->length, PCI_DMA_TODEVICE);
1499 }
1500
1501 nf = &pbuf->frag_array[0];
1502 pci_unmap_single(pdev, nf->dma, skb_headlen(skb), PCI_DMA_TODEVICE);
1503
1504out_err:
1505 return -ENOMEM;
1506}
1507
1508static inline void
1509qlcnic_clear_cmddesc(u64 *desc)
1510{
1511 desc[0] = 0ULL;
1512 desc[2] = 0ULL;
1513}
1514
cdaff185 1515netdev_tx_t
af19b491
AKS
1516qlcnic_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
1517{
1518 struct qlcnic_adapter *adapter = netdev_priv(netdev);
1519 struct qlcnic_host_tx_ring *tx_ring = adapter->tx_ring;
1520 struct qlcnic_cmd_buffer *pbuf;
1521 struct qlcnic_skb_frag *buffrag;
1522 struct cmd_desc_type0 *hwdesc, *first_desc;
1523 struct pci_dev *pdev;
1524 int i, k;
1525
1526 u32 producer;
1527 int frag_count, no_of_desc;
1528 u32 num_txd = tx_ring->num_desc;
1529
780ab790
AKS
1530 if (!test_bit(__QLCNIC_DEV_UP, &adapter->state)) {
1531 netif_stop_queue(netdev);
1532 return NETDEV_TX_BUSY;
1533 }
1534
af19b491
AKS
1535 frag_count = skb_shinfo(skb)->nr_frags + 1;
1536
1537 /* 4 fragments per cmd des */
1538 no_of_desc = (frag_count + 3) >> 2;
1539
1540 if (unlikely(no_of_desc + 2 > qlcnic_tx_avail(tx_ring))) {
1541 netif_stop_queue(netdev);
8bfe8b91 1542 adapter->stats.xmit_off++;
af19b491
AKS
1543 return NETDEV_TX_BUSY;
1544 }
1545
1546 producer = tx_ring->producer;
1547 pbuf = &tx_ring->cmd_buf_arr[producer];
1548
1549 pdev = adapter->pdev;
1550
8ae6df97
AKS
1551 if (qlcnic_map_tx_skb(pdev, skb, pbuf)) {
1552 adapter->stats.tx_dma_map_error++;
af19b491 1553 goto drop_packet;
8ae6df97 1554 }
af19b491
AKS
1555
1556 pbuf->skb = skb;
1557 pbuf->frag_count = frag_count;
1558
1559 first_desc = hwdesc = &tx_ring->desc_head[producer];
1560 qlcnic_clear_cmddesc((u64 *)hwdesc);
1561
1562 qlcnic_set_tx_frags_len(first_desc, frag_count, skb->len);
1563 qlcnic_set_tx_port(first_desc, adapter->portnum);
1564
1565 for (i = 0; i < frag_count; i++) {
1566
1567 k = i % 4;
1568
1569 if ((k == 0) && (i > 0)) {
1570 /* move to next desc.*/
1571 producer = get_next_index(producer, num_txd);
1572 hwdesc = &tx_ring->desc_head[producer];
1573 qlcnic_clear_cmddesc((u64 *)hwdesc);
1574 tx_ring->cmd_buf_arr[producer].skb = NULL;
1575 }
1576
1577 buffrag = &pbuf->frag_array[i];
1578
1579 hwdesc->buffer_length[k] = cpu_to_le16(buffrag->length);
1580 switch (k) {
1581 case 0:
1582 hwdesc->addr_buffer1 = cpu_to_le64(buffrag->dma);
1583 break;
1584 case 1:
1585 hwdesc->addr_buffer2 = cpu_to_le64(buffrag->dma);
1586 break;
1587 case 2:
1588 hwdesc->addr_buffer3 = cpu_to_le64(buffrag->dma);
1589 break;
1590 case 3:
1591 hwdesc->addr_buffer4 = cpu_to_le64(buffrag->dma);
1592 break;
1593 }
1594 }
1595
1596 tx_ring->producer = get_next_index(producer, num_txd);
1597
1598 qlcnic_tso_check(netdev, tx_ring, first_desc, skb);
1599
1600 qlcnic_update_cmd_producer(adapter, tx_ring);
1601
1602 adapter->stats.txbytes += skb->len;
1603 adapter->stats.xmitcalled++;
1604
1605 return NETDEV_TX_OK;
1606
1607drop_packet:
1608 adapter->stats.txdropped++;
1609 dev_kfree_skb_any(skb);
1610 return NETDEV_TX_OK;
1611}
1612
1613static int qlcnic_check_temp(struct qlcnic_adapter *adapter)
1614{
1615 struct net_device *netdev = adapter->netdev;
1616 u32 temp, temp_state, temp_val;
1617 int rv = 0;
1618
1619 temp = QLCRD32(adapter, CRB_TEMP_STATE);
1620
1621 temp_state = qlcnic_get_temp_state(temp);
1622 temp_val = qlcnic_get_temp_val(temp);
1623
1624 if (temp_state == QLCNIC_TEMP_PANIC) {
1625 dev_err(&netdev->dev,
1626 "Device temperature %d degrees C exceeds"
1627 " maximum allowed. Hardware has been shut down.\n",
1628 temp_val);
1629 rv = 1;
1630 } else if (temp_state == QLCNIC_TEMP_WARN) {
1631 if (adapter->temp == QLCNIC_TEMP_NORMAL) {
1632 dev_err(&netdev->dev,
1633 "Device temperature %d degrees C "
1634 "exceeds operating range."
1635 " Immediate action needed.\n",
1636 temp_val);
1637 }
1638 } else {
1639 if (adapter->temp == QLCNIC_TEMP_WARN) {
1640 dev_info(&netdev->dev,
1641 "Device temperature is now %d degrees C"
1642 " in normal range.\n", temp_val);
1643 }
1644 }
1645 adapter->temp = temp_state;
1646 return rv;
1647}
1648
1649void qlcnic_advert_link_change(struct qlcnic_adapter *adapter, int linkup)
1650{
1651 struct net_device *netdev = adapter->netdev;
1652
1653 if (adapter->ahw.linkup && !linkup) {
1654 dev_info(&netdev->dev, "NIC Link is down\n");
1655 adapter->ahw.linkup = 0;
1656 if (netif_running(netdev)) {
1657 netif_carrier_off(netdev);
1658 netif_stop_queue(netdev);
1659 }
1660 } else if (!adapter->ahw.linkup && linkup) {
1661 dev_info(&netdev->dev, "NIC Link is up\n");
1662 adapter->ahw.linkup = 1;
1663 if (netif_running(netdev)) {
1664 netif_carrier_on(netdev);
1665 netif_wake_queue(netdev);
1666 }
1667 }
1668}
1669
1670static void qlcnic_tx_timeout(struct net_device *netdev)
1671{
1672 struct qlcnic_adapter *adapter = netdev_priv(netdev);
1673
1674 if (test_bit(__QLCNIC_RESETTING, &adapter->state))
1675 return;
1676
1677 dev_err(&netdev->dev, "transmit timeout, resetting.\n");
1678 schedule_work(&adapter->tx_timeout_task);
1679}
1680
1681static void qlcnic_tx_timeout_task(struct work_struct *work)
1682{
1683 struct qlcnic_adapter *adapter =
1684 container_of(work, struct qlcnic_adapter, tx_timeout_task);
1685
1686 if (!netif_running(adapter->netdev))
1687 return;
1688
1689 if (test_and_set_bit(__QLCNIC_RESETTING, &adapter->state))
1690 return;
1691
1692 if (++adapter->tx_timeo_cnt >= QLCNIC_MAX_TX_TIMEOUTS)
1693 goto request_reset;
1694
1695 clear_bit(__QLCNIC_RESETTING, &adapter->state);
1696 if (!qlcnic_reset_context(adapter)) {
1697 adapter->netdev->trans_start = jiffies;
1698 return;
1699
1700 /* context reset failed, fall through for fw reset */
1701 }
1702
1703request_reset:
1704 adapter->need_fw_reset = 1;
1705 clear_bit(__QLCNIC_RESETTING, &adapter->state);
65b5b420 1706 QLCDB(adapter, DRV, "Resetting adapter\n");
af19b491
AKS
1707}
1708
1709static struct net_device_stats *qlcnic_get_stats(struct net_device *netdev)
1710{
1711 struct qlcnic_adapter *adapter = netdev_priv(netdev);
1712 struct net_device_stats *stats = &netdev->stats;
1713
1714 memset(stats, 0, sizeof(*stats));
1715
1716 stats->rx_packets = adapter->stats.rx_pkts + adapter->stats.lro_pkts;
1717 stats->tx_packets = adapter->stats.xmitfinished;
7e382594 1718 stats->rx_bytes = adapter->stats.rxbytes + adapter->stats.lrobytes;
af19b491
AKS
1719 stats->tx_bytes = adapter->stats.txbytes;
1720 stats->rx_dropped = adapter->stats.rxdropped;
1721 stats->tx_dropped = adapter->stats.txdropped;
1722
1723 return stats;
1724}
1725
7eb9855d 1726static irqreturn_t qlcnic_clear_legacy_intr(struct qlcnic_adapter *adapter)
af19b491 1727{
af19b491
AKS
1728 u32 status;
1729
1730 status = readl(adapter->isr_int_vec);
1731
1732 if (!(status & adapter->int_vec_bit))
1733 return IRQ_NONE;
1734
1735 /* check interrupt state machine, to be sure */
1736 status = readl(adapter->crb_int_state_reg);
1737 if (!ISR_LEGACY_INT_TRIGGERED(status))
1738 return IRQ_NONE;
1739
1740 writel(0xffffffff, adapter->tgt_status_reg);
1741 /* read twice to ensure write is flushed */
1742 readl(adapter->isr_int_vec);
1743 readl(adapter->isr_int_vec);
1744
7eb9855d
AKS
1745 return IRQ_HANDLED;
1746}
1747
1748static irqreturn_t qlcnic_tmp_intr(int irq, void *data)
1749{
1750 struct qlcnic_host_sds_ring *sds_ring = data;
1751 struct qlcnic_adapter *adapter = sds_ring->adapter;
1752
1753 if (adapter->flags & QLCNIC_MSIX_ENABLED)
1754 goto done;
1755 else if (adapter->flags & QLCNIC_MSI_ENABLED) {
1756 writel(0xffffffff, adapter->tgt_status_reg);
1757 goto done;
1758 }
1759
1760 if (qlcnic_clear_legacy_intr(adapter) == IRQ_NONE)
1761 return IRQ_NONE;
1762
1763done:
1764 adapter->diag_cnt++;
1765 qlcnic_enable_int(sds_ring);
1766 return IRQ_HANDLED;
1767}
1768
1769static irqreturn_t qlcnic_intr(int irq, void *data)
1770{
1771 struct qlcnic_host_sds_ring *sds_ring = data;
1772 struct qlcnic_adapter *adapter = sds_ring->adapter;
1773
1774 if (qlcnic_clear_legacy_intr(adapter) == IRQ_NONE)
1775 return IRQ_NONE;
1776
af19b491
AKS
1777 napi_schedule(&sds_ring->napi);
1778
1779 return IRQ_HANDLED;
1780}
1781
1782static irqreturn_t qlcnic_msi_intr(int irq, void *data)
1783{
1784 struct qlcnic_host_sds_ring *sds_ring = data;
1785 struct qlcnic_adapter *adapter = sds_ring->adapter;
1786
1787 /* clear interrupt */
1788 writel(0xffffffff, adapter->tgt_status_reg);
1789
1790 napi_schedule(&sds_ring->napi);
1791 return IRQ_HANDLED;
1792}
1793
1794static irqreturn_t qlcnic_msix_intr(int irq, void *data)
1795{
1796 struct qlcnic_host_sds_ring *sds_ring = data;
1797
1798 napi_schedule(&sds_ring->napi);
1799 return IRQ_HANDLED;
1800}
1801
1802static int qlcnic_process_cmd_ring(struct qlcnic_adapter *adapter)
1803{
1804 u32 sw_consumer, hw_consumer;
1805 int count = 0, i;
1806 struct qlcnic_cmd_buffer *buffer;
1807 struct pci_dev *pdev = adapter->pdev;
1808 struct net_device *netdev = adapter->netdev;
1809 struct qlcnic_skb_frag *frag;
1810 int done;
1811 struct qlcnic_host_tx_ring *tx_ring = adapter->tx_ring;
1812
1813 if (!spin_trylock(&adapter->tx_clean_lock))
1814 return 1;
1815
1816 sw_consumer = tx_ring->sw_consumer;
1817 hw_consumer = le32_to_cpu(*(tx_ring->hw_consumer));
1818
1819 while (sw_consumer != hw_consumer) {
1820 buffer = &tx_ring->cmd_buf_arr[sw_consumer];
1821 if (buffer->skb) {
1822 frag = &buffer->frag_array[0];
1823 pci_unmap_single(pdev, frag->dma, frag->length,
1824 PCI_DMA_TODEVICE);
1825 frag->dma = 0ULL;
1826 for (i = 1; i < buffer->frag_count; i++) {
1827 frag++;
1828 pci_unmap_page(pdev, frag->dma, frag->length,
1829 PCI_DMA_TODEVICE);
1830 frag->dma = 0ULL;
1831 }
1832
1833 adapter->stats.xmitfinished++;
1834 dev_kfree_skb_any(buffer->skb);
1835 buffer->skb = NULL;
1836 }
1837
1838 sw_consumer = get_next_index(sw_consumer, tx_ring->num_desc);
1839 if (++count >= MAX_STATUS_HANDLE)
1840 break;
1841 }
1842
1843 if (count && netif_running(netdev)) {
1844 tx_ring->sw_consumer = sw_consumer;
1845
1846 smp_mb();
1847
1848 if (netif_queue_stopped(netdev) && netif_carrier_ok(netdev)) {
1849 __netif_tx_lock(tx_ring->txq, smp_processor_id());
1850 if (qlcnic_tx_avail(tx_ring) > TX_STOP_THRESH) {
1851 netif_wake_queue(netdev);
1852 adapter->tx_timeo_cnt = 0;
8bfe8b91 1853 adapter->stats.xmit_on++;
af19b491
AKS
1854 }
1855 __netif_tx_unlock(tx_ring->txq);
1856 }
1857 }
1858 /*
1859 * If everything is freed up to consumer then check if the ring is full
1860 * If the ring is full then check if more needs to be freed and
1861 * schedule the call back again.
1862 *
1863 * This happens when there are 2 CPUs. One could be freeing and the
1864 * other filling it. If the ring is full when we get out of here and
1865 * the card has already interrupted the host then the host can miss the
1866 * interrupt.
1867 *
1868 * There is still a possible race condition and the host could miss an
1869 * interrupt. The card has to take care of this.
1870 */
1871 hw_consumer = le32_to_cpu(*(tx_ring->hw_consumer));
1872 done = (sw_consumer == hw_consumer);
1873 spin_unlock(&adapter->tx_clean_lock);
1874
1875 return done;
1876}
1877
1878static int qlcnic_poll(struct napi_struct *napi, int budget)
1879{
1880 struct qlcnic_host_sds_ring *sds_ring =
1881 container_of(napi, struct qlcnic_host_sds_ring, napi);
1882
1883 struct qlcnic_adapter *adapter = sds_ring->adapter;
1884
1885 int tx_complete;
1886 int work_done;
1887
1888 tx_complete = qlcnic_process_cmd_ring(adapter);
1889
1890 work_done = qlcnic_process_rcv_ring(sds_ring, budget);
1891
1892 if ((work_done < budget) && tx_complete) {
1893 napi_complete(&sds_ring->napi);
1894 if (test_bit(__QLCNIC_DEV_UP, &adapter->state))
1895 qlcnic_enable_int(sds_ring);
1896 }
1897
1898 return work_done;
1899}
1900
1901#ifdef CONFIG_NET_POLL_CONTROLLER
1902static void qlcnic_poll_controller(struct net_device *netdev)
1903{
1904 struct qlcnic_adapter *adapter = netdev_priv(netdev);
1905 disable_irq(adapter->irq);
1906 qlcnic_intr(adapter->irq, adapter);
1907 enable_irq(adapter->irq);
1908}
1909#endif
1910
6df900e9
SC
1911static void
1912qlcnic_idc_debug_info(struct qlcnic_adapter *adapter, u8 encoding)
1913{
1914 u32 val;
1915
1916 val = adapter->portnum & 0xf;
1917 val |= encoding << 7;
1918 val |= (jiffies - adapter->dev_rst_time) << 8;
1919
1920 QLCWR32(adapter, QLCNIC_CRB_DRV_SCRATCH, val);
1921 adapter->dev_rst_time = jiffies;
1922}
1923
ade91f8e
AKS
1924static int
1925qlcnic_set_drv_state(struct qlcnic_adapter *adapter, u8 state)
af19b491
AKS
1926{
1927 u32 val;
1928
1929 WARN_ON(state != QLCNIC_DEV_NEED_RESET &&
1930 state != QLCNIC_DEV_NEED_QUISCENT);
1931
1932 if (qlcnic_api_lock(adapter))
ade91f8e 1933 return -EIO;
af19b491
AKS
1934
1935 val = QLCRD32(adapter, QLCNIC_CRB_DRV_STATE);
1936
1937 if (state == QLCNIC_DEV_NEED_RESET)
6d2a4724 1938 QLC_DEV_SET_RST_RDY(val, adapter->portnum);
af19b491 1939 else if (state == QLCNIC_DEV_NEED_QUISCENT)
6d2a4724 1940 QLC_DEV_SET_QSCNT_RDY(val, adapter->portnum);
af19b491
AKS
1941
1942 QLCWR32(adapter, QLCNIC_CRB_DRV_STATE, val);
1943
1944 qlcnic_api_unlock(adapter);
ade91f8e
AKS
1945
1946 return 0;
af19b491
AKS
1947}
1948
1b95a839
AKS
1949static int
1950qlcnic_clr_drv_state(struct qlcnic_adapter *adapter)
1951{
1952 u32 val;
1953
1954 if (qlcnic_api_lock(adapter))
1955 return -EBUSY;
1956
1957 val = QLCRD32(adapter, QLCNIC_CRB_DRV_STATE);
6d2a4724 1958 QLC_DEV_CLR_RST_QSCNT(val, adapter->portnum);
1b95a839
AKS
1959 QLCWR32(adapter, QLCNIC_CRB_DRV_STATE, val);
1960
1961 qlcnic_api_unlock(adapter);
1962
1963 return 0;
1964}
1965
af19b491
AKS
1966static void
1967qlcnic_clr_all_drv_state(struct qlcnic_adapter *adapter)
1968{
1969 u32 val;
1970
1971 if (qlcnic_api_lock(adapter))
1972 goto err;
1973
1974 val = QLCRD32(adapter, QLCNIC_CRB_DEV_REF_COUNT);
6d2a4724 1975 QLC_DEV_CLR_REF_CNT(val, adapter->portnum);
af19b491
AKS
1976 QLCWR32(adapter, QLCNIC_CRB_DEV_REF_COUNT, val);
1977
1978 if (!(val & 0x11111111))
1979 QLCWR32(adapter, QLCNIC_CRB_DEV_STATE, QLCNIC_DEV_COLD);
1980
1981 val = QLCRD32(adapter, QLCNIC_CRB_DRV_STATE);
6d2a4724 1982 QLC_DEV_CLR_RST_QSCNT(val, adapter->portnum);
af19b491
AKS
1983 QLCWR32(adapter, QLCNIC_CRB_DRV_STATE, val);
1984
1985 qlcnic_api_unlock(adapter);
1986err:
1987 adapter->fw_fail_cnt = 0;
1988 clear_bit(__QLCNIC_START_FW, &adapter->state);
1989 clear_bit(__QLCNIC_RESETTING, &adapter->state);
1990}
1991
f73dfc50 1992/* Grab api lock, before checking state */
af19b491
AKS
1993static int
1994qlcnic_check_drv_state(struct qlcnic_adapter *adapter)
1995{
1996 int act, state;
1997
1998 state = QLCRD32(adapter, QLCNIC_CRB_DRV_STATE);
1999 act = QLCRD32(adapter, QLCNIC_CRB_DEV_REF_COUNT);
2000
2001 if (((state & 0x11111111) == (act & 0x11111111)) ||
2002 ((act & 0x11111111) == ((state >> 1) & 0x11111111)))
2003 return 0;
2004 else
2005 return 1;
2006}
2007
96f8118c
SC
2008static int qlcnic_check_idc_ver(struct qlcnic_adapter *adapter)
2009{
2010 u32 val = QLCRD32(adapter, QLCNIC_CRB_DRV_IDC_VER);
2011
2012 if (val != QLCNIC_DRV_IDC_VER) {
2013 dev_warn(&adapter->pdev->dev, "IDC Version mismatch, driver's"
2014 " idc ver = %x; reqd = %x\n", QLCNIC_DRV_IDC_VER, val);
2015 }
2016
2017 return 0;
2018}
2019
af19b491
AKS
2020static int
2021qlcnic_can_start_firmware(struct qlcnic_adapter *adapter)
2022{
2023 u32 val, prev_state;
aa5e18c0 2024 u8 dev_init_timeo = adapter->dev_init_timeo;
6d2a4724 2025 u8 portnum = adapter->portnum;
96f8118c 2026 u8 ret;
af19b491 2027
f73dfc50
AKS
2028 if (test_and_clear_bit(__QLCNIC_START_FW, &adapter->state))
2029 return 1;
2030
af19b491
AKS
2031 if (qlcnic_api_lock(adapter))
2032 return -1;
2033
2034 val = QLCRD32(adapter, QLCNIC_CRB_DEV_REF_COUNT);
6d2a4724
AKS
2035 if (!(val & (1 << (portnum * 4)))) {
2036 QLC_DEV_SET_REF_CNT(val, portnum);
af19b491 2037 QLCWR32(adapter, QLCNIC_CRB_DEV_REF_COUNT, val);
af19b491
AKS
2038 }
2039
2040 prev_state = QLCRD32(adapter, QLCNIC_CRB_DEV_STATE);
65b5b420 2041 QLCDB(adapter, HW, "Device state = %u\n", prev_state);
af19b491
AKS
2042
2043 switch (prev_state) {
2044 case QLCNIC_DEV_COLD:
bbd8c6a4 2045 QLCWR32(adapter, QLCNIC_CRB_DEV_STATE, QLCNIC_DEV_INITIALIZING);
96f8118c 2046 QLCWR32(adapter, QLCNIC_CRB_DRV_IDC_VER, QLCNIC_DRV_IDC_VER);
6df900e9 2047 qlcnic_idc_debug_info(adapter, 0);
af19b491
AKS
2048 qlcnic_api_unlock(adapter);
2049 return 1;
2050
2051 case QLCNIC_DEV_READY:
96f8118c 2052 ret = qlcnic_check_idc_ver(adapter);
af19b491 2053 qlcnic_api_unlock(adapter);
96f8118c 2054 return ret;
af19b491
AKS
2055
2056 case QLCNIC_DEV_NEED_RESET:
2057 val = QLCRD32(adapter, QLCNIC_CRB_DRV_STATE);
6d2a4724 2058 QLC_DEV_SET_RST_RDY(val, portnum);
af19b491
AKS
2059 QLCWR32(adapter, QLCNIC_CRB_DRV_STATE, val);
2060 break;
2061
2062 case QLCNIC_DEV_NEED_QUISCENT:
2063 val = QLCRD32(adapter, QLCNIC_CRB_DRV_STATE);
6d2a4724 2064 QLC_DEV_SET_QSCNT_RDY(val, portnum);
af19b491
AKS
2065 QLCWR32(adapter, QLCNIC_CRB_DRV_STATE, val);
2066 break;
2067
2068 case QLCNIC_DEV_FAILED:
a7fc948f 2069 dev_err(&adapter->pdev->dev, "Device in failed state.\n");
af19b491
AKS
2070 qlcnic_api_unlock(adapter);
2071 return -1;
bbd8c6a4
AKS
2072
2073 case QLCNIC_DEV_INITIALIZING:
2074 case QLCNIC_DEV_QUISCENT:
2075 break;
af19b491
AKS
2076 }
2077
2078 qlcnic_api_unlock(adapter);
aa5e18c0
SC
2079
2080 do {
af19b491 2081 msleep(1000);
a5e463d0
SC
2082 prev_state = QLCRD32(adapter, QLCNIC_CRB_DEV_STATE);
2083
2084 if (prev_state == QLCNIC_DEV_QUISCENT)
2085 continue;
2086 } while ((prev_state != QLCNIC_DEV_READY) && --dev_init_timeo);
af19b491 2087
65b5b420
AKS
2088 if (!dev_init_timeo) {
2089 dev_err(&adapter->pdev->dev,
2090 "Waiting for device to initialize timeout\n");
af19b491 2091 return -1;
65b5b420 2092 }
af19b491
AKS
2093
2094 if (qlcnic_api_lock(adapter))
2095 return -1;
2096
2097 val = QLCRD32(adapter, QLCNIC_CRB_DRV_STATE);
6d2a4724 2098 QLC_DEV_CLR_RST_QSCNT(val, portnum);
af19b491
AKS
2099 QLCWR32(adapter, QLCNIC_CRB_DRV_STATE, val);
2100
96f8118c 2101 ret = qlcnic_check_idc_ver(adapter);
af19b491
AKS
2102 qlcnic_api_unlock(adapter);
2103
96f8118c 2104 return ret;
af19b491
AKS
2105}
2106
2107static void
2108qlcnic_fwinit_work(struct work_struct *work)
2109{
2110 struct qlcnic_adapter *adapter = container_of(work,
2111 struct qlcnic_adapter, fw_work.work);
f73dfc50 2112 u32 dev_state = 0xf;
af19b491 2113
f73dfc50
AKS
2114 if (qlcnic_api_lock(adapter))
2115 goto err_ret;
af19b491 2116
a5e463d0
SC
2117 dev_state = QLCRD32(adapter, QLCNIC_CRB_DEV_STATE);
2118 if (dev_state == QLCNIC_DEV_QUISCENT) {
2119 qlcnic_api_unlock(adapter);
2120 qlcnic_schedule_work(adapter, qlcnic_fwinit_work,
2121 FW_POLL_DELAY * 2);
2122 return;
2123 }
2124
f73dfc50
AKS
2125 if (adapter->fw_wait_cnt++ > adapter->reset_ack_timeo) {
2126 dev_err(&adapter->pdev->dev, "Reset:Failed to get ack %d sec\n",
2127 adapter->reset_ack_timeo);
2128 goto skip_ack_check;
2129 }
2130
2131 if (!qlcnic_check_drv_state(adapter)) {
2132skip_ack_check:
2133 dev_state = QLCRD32(adapter, QLCNIC_CRB_DEV_STATE);
a5e463d0
SC
2134
2135 if (dev_state == QLCNIC_DEV_NEED_QUISCENT) {
2136 QLCWR32(adapter, QLCNIC_CRB_DEV_STATE,
2137 QLCNIC_DEV_QUISCENT);
2138 qlcnic_schedule_work(adapter, qlcnic_fwinit_work,
2139 FW_POLL_DELAY * 2);
2140 QLCDB(adapter, DRV, "Quiscing the driver\n");
6df900e9
SC
2141 qlcnic_idc_debug_info(adapter, 0);
2142
a5e463d0
SC
2143 qlcnic_api_unlock(adapter);
2144 return;
2145 }
2146
f73dfc50
AKS
2147 if (dev_state == QLCNIC_DEV_NEED_RESET) {
2148 QLCWR32(adapter, QLCNIC_CRB_DEV_STATE,
2149 QLCNIC_DEV_INITIALIZING);
2150 set_bit(__QLCNIC_START_FW, &adapter->state);
2151 QLCDB(adapter, DRV, "Restarting fw\n");
6df900e9 2152 qlcnic_idc_debug_info(adapter, 0);
af19b491
AKS
2153 }
2154
f73dfc50
AKS
2155 qlcnic_api_unlock(adapter);
2156
af19b491
AKS
2157 if (!qlcnic_start_firmware(adapter)) {
2158 qlcnic_schedule_work(adapter, qlcnic_attach_work, 0);
2159 return;
2160 }
af19b491
AKS
2161 goto err_ret;
2162 }
2163
f73dfc50 2164 qlcnic_api_unlock(adapter);
aa5e18c0 2165
af19b491 2166 dev_state = QLCRD32(adapter, QLCNIC_CRB_DEV_STATE);
f73dfc50 2167 QLCDB(adapter, HW, "Func waiting: Device state=%u\n", dev_state);
65b5b420 2168
af19b491 2169 switch (dev_state) {
a5e463d0
SC
2170 case QLCNIC_DEV_QUISCENT:
2171 case QLCNIC_DEV_NEED_QUISCENT:
f73dfc50
AKS
2172 case QLCNIC_DEV_NEED_RESET:
2173 qlcnic_schedule_work(adapter,
2174 qlcnic_fwinit_work, FW_POLL_DELAY);
2175 return;
af19b491
AKS
2176 case QLCNIC_DEV_FAILED:
2177 break;
2178
2179 default:
f73dfc50
AKS
2180 if (!qlcnic_start_firmware(adapter)) {
2181 qlcnic_schedule_work(adapter, qlcnic_attach_work, 0);
2182 return;
2183 }
af19b491
AKS
2184 }
2185
2186err_ret:
f73dfc50
AKS
2187 dev_err(&adapter->pdev->dev, "Fwinit work failed state=%u "
2188 "fw_wait_cnt=%u\n", dev_state, adapter->fw_wait_cnt);
34ce3626 2189 netif_device_attach(adapter->netdev);
af19b491
AKS
2190 qlcnic_clr_all_drv_state(adapter);
2191}
2192
2193static void
2194qlcnic_detach_work(struct work_struct *work)
2195{
2196 struct qlcnic_adapter *adapter = container_of(work,
2197 struct qlcnic_adapter, fw_work.work);
2198 struct net_device *netdev = adapter->netdev;
2199 u32 status;
2200
2201 netif_device_detach(netdev);
2202
2203 qlcnic_down(adapter, netdev);
2204
ce668443 2205 rtnl_lock();
af19b491 2206 qlcnic_detach(adapter);
ce668443 2207 rtnl_unlock();
af19b491
AKS
2208
2209 status = QLCRD32(adapter, QLCNIC_PEG_HALT_STATUS1);
2210
2211 if (status & QLCNIC_RCODE_FATAL_ERROR)
2212 goto err_ret;
2213
2214 if (adapter->temp == QLCNIC_TEMP_PANIC)
2215 goto err_ret;
2216
ade91f8e
AKS
2217 if (qlcnic_set_drv_state(adapter, adapter->dev_state))
2218 goto err_ret;
af19b491
AKS
2219
2220 adapter->fw_wait_cnt = 0;
2221
2222 qlcnic_schedule_work(adapter, qlcnic_fwinit_work, FW_POLL_DELAY);
2223
2224 return;
2225
2226err_ret:
65b5b420
AKS
2227 dev_err(&adapter->pdev->dev, "detach failed; status=%d temp=%d\n",
2228 status, adapter->temp);
34ce3626 2229 netif_device_attach(netdev);
af19b491
AKS
2230 qlcnic_clr_all_drv_state(adapter);
2231
2232}
2233
f73dfc50 2234/*Transit to RESET state from READY state only */
af19b491
AKS
2235static void
2236qlcnic_dev_request_reset(struct qlcnic_adapter *adapter)
2237{
2238 u32 state;
2239
2240 if (qlcnic_api_lock(adapter))
2241 return;
2242
2243 state = QLCRD32(adapter, QLCNIC_CRB_DEV_STATE);
2244
f73dfc50 2245 if (state == QLCNIC_DEV_READY) {
af19b491 2246 QLCWR32(adapter, QLCNIC_CRB_DEV_STATE, QLCNIC_DEV_NEED_RESET);
65b5b420 2247 QLCDB(adapter, DRV, "NEED_RESET state set\n");
6df900e9 2248 qlcnic_idc_debug_info(adapter, 0);
af19b491
AKS
2249 }
2250
2251 qlcnic_api_unlock(adapter);
2252}
2253
2254static void
2255qlcnic_schedule_work(struct qlcnic_adapter *adapter,
2256 work_func_t func, int delay)
2257{
2258 INIT_DELAYED_WORK(&adapter->fw_work, func);
2259 schedule_delayed_work(&adapter->fw_work, round_jiffies_relative(delay));
2260}
2261
2262static void
2263qlcnic_cancel_fw_work(struct qlcnic_adapter *adapter)
2264{
2265 while (test_and_set_bit(__QLCNIC_RESETTING, &adapter->state))
2266 msleep(10);
2267
2268 cancel_delayed_work_sync(&adapter->fw_work);
2269}
2270
2271static void
2272qlcnic_attach_work(struct work_struct *work)
2273{
2274 struct qlcnic_adapter *adapter = container_of(work,
2275 struct qlcnic_adapter, fw_work.work);
2276 struct net_device *netdev = adapter->netdev;
2277 int err;
2278
2279 if (netif_running(netdev)) {
2280 err = qlcnic_attach(adapter);
2281 if (err)
2282 goto done;
2283
2284 err = qlcnic_up(adapter, netdev);
2285 if (err) {
2286 qlcnic_detach(adapter);
2287 goto done;
2288 }
2289
2290 qlcnic_config_indev_addr(netdev, NETDEV_UP);
2291 }
2292
af19b491 2293done:
34ce3626 2294 netif_device_attach(netdev);
af19b491
AKS
2295 adapter->fw_fail_cnt = 0;
2296 clear_bit(__QLCNIC_RESETTING, &adapter->state);
1b95a839
AKS
2297
2298 if (!qlcnic_clr_drv_state(adapter))
2299 qlcnic_schedule_work(adapter, qlcnic_fw_poll_work,
2300 FW_POLL_DELAY);
af19b491
AKS
2301}
2302
2303static int
2304qlcnic_check_health(struct qlcnic_adapter *adapter)
2305{
2306 u32 state = 0, heartbit;
2307 struct net_device *netdev = adapter->netdev;
2308
2309 if (qlcnic_check_temp(adapter))
2310 goto detach;
2311
2372a5f1 2312 if (adapter->need_fw_reset)
af19b491 2313 qlcnic_dev_request_reset(adapter);
af19b491
AKS
2314
2315 state = QLCRD32(adapter, QLCNIC_CRB_DEV_STATE);
2316 if (state == QLCNIC_DEV_NEED_RESET || state == QLCNIC_DEV_NEED_QUISCENT)
2317 adapter->need_fw_reset = 1;
2318
2319 heartbit = QLCRD32(adapter, QLCNIC_PEG_ALIVE_COUNTER);
2320 if (heartbit != adapter->heartbit) {
2321 adapter->heartbit = heartbit;
2322 adapter->fw_fail_cnt = 0;
2323 if (adapter->need_fw_reset)
2324 goto detach;
2325 return 0;
2326 }
2327
2328 if (++adapter->fw_fail_cnt < FW_FAIL_THRESH)
2329 return 0;
2330
2331 qlcnic_dev_request_reset(adapter);
2332
2333 clear_bit(__QLCNIC_FW_ATTACHED, &adapter->state);
2334
2335 dev_info(&netdev->dev, "firmware hang detected\n");
2336
2337detach:
2338 adapter->dev_state = (state == QLCNIC_DEV_NEED_QUISCENT) ? state :
2339 QLCNIC_DEV_NEED_RESET;
2340
2341 if ((auto_fw_reset == AUTO_FW_RESET_ENABLED) &&
65b5b420
AKS
2342 !test_and_set_bit(__QLCNIC_RESETTING, &adapter->state)) {
2343
af19b491 2344 qlcnic_schedule_work(adapter, qlcnic_detach_work, 0);
65b5b420
AKS
2345 QLCDB(adapter, DRV, "fw recovery scheduled.\n");
2346 }
af19b491
AKS
2347
2348 return 1;
2349}
2350
2351static void
2352qlcnic_fw_poll_work(struct work_struct *work)
2353{
2354 struct qlcnic_adapter *adapter = container_of(work,
2355 struct qlcnic_adapter, fw_work.work);
2356
2357 if (test_bit(__QLCNIC_RESETTING, &adapter->state))
2358 goto reschedule;
2359
2360
2361 if (qlcnic_check_health(adapter))
2362 return;
2363
2364reschedule:
2365 qlcnic_schedule_work(adapter, qlcnic_fw_poll_work, FW_POLL_DELAY);
2366}
2367
2368static ssize_t
2369qlcnic_store_bridged_mode(struct device *dev,
2370 struct device_attribute *attr, const char *buf, size_t len)
2371{
2372 struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
2373 unsigned long new;
2374 int ret = -EINVAL;
2375
2376 if (!(adapter->capabilities & QLCNIC_FW_CAPABILITY_BDG))
2377 goto err_out;
2378
2379 if (adapter->is_up != QLCNIC_ADAPTER_UP_MAGIC)
2380 goto err_out;
2381
2382 if (strict_strtoul(buf, 2, &new))
2383 goto err_out;
2384
2385 if (!qlcnic_config_bridged_mode(adapter, !!new))
2386 ret = len;
2387
2388err_out:
2389 return ret;
2390}
2391
2392static ssize_t
2393qlcnic_show_bridged_mode(struct device *dev,
2394 struct device_attribute *attr, char *buf)
2395{
2396 struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
2397 int bridged_mode = 0;
2398
2399 if (adapter->capabilities & QLCNIC_FW_CAPABILITY_BDG)
2400 bridged_mode = !!(adapter->flags & QLCNIC_BRIDGE_ENABLED);
2401
2402 return sprintf(buf, "%d\n", bridged_mode);
2403}
2404
2405static struct device_attribute dev_attr_bridged_mode = {
2406 .attr = {.name = "bridged_mode", .mode = (S_IRUGO | S_IWUSR)},
2407 .show = qlcnic_show_bridged_mode,
2408 .store = qlcnic_store_bridged_mode,
2409};
2410
2411static ssize_t
2412qlcnic_store_diag_mode(struct device *dev,
2413 struct device_attribute *attr, const char *buf, size_t len)
2414{
2415 struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
2416 unsigned long new;
2417
2418 if (strict_strtoul(buf, 2, &new))
2419 return -EINVAL;
2420
2421 if (!!new != !!(adapter->flags & QLCNIC_DIAG_ENABLED))
2422 adapter->flags ^= QLCNIC_DIAG_ENABLED;
2423
2424 return len;
2425}
2426
2427static ssize_t
2428qlcnic_show_diag_mode(struct device *dev,
2429 struct device_attribute *attr, char *buf)
2430{
2431 struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
2432
2433 return sprintf(buf, "%d\n",
2434 !!(adapter->flags & QLCNIC_DIAG_ENABLED));
2435}
2436
2437static struct device_attribute dev_attr_diag_mode = {
2438 .attr = {.name = "diag_mode", .mode = (S_IRUGO | S_IWUSR)},
2439 .show = qlcnic_show_diag_mode,
2440 .store = qlcnic_store_diag_mode,
2441};
2442
2443static int
2444qlcnic_sysfs_validate_crb(struct qlcnic_adapter *adapter,
2445 loff_t offset, size_t size)
2446{
897e8c7c
DP
2447 size_t crb_size = 4;
2448
af19b491
AKS
2449 if (!(adapter->flags & QLCNIC_DIAG_ENABLED))
2450 return -EIO;
2451
897e8c7c
DP
2452 if (offset < QLCNIC_PCI_CRBSPACE) {
2453 if (ADDR_IN_RANGE(offset, QLCNIC_PCI_CAMQM,
2454 QLCNIC_PCI_CAMQM_END))
2455 crb_size = 8;
2456 else
2457 return -EINVAL;
2458 }
af19b491 2459
897e8c7c
DP
2460 if ((size != crb_size) || (offset & (crb_size-1)))
2461 return -EINVAL;
af19b491
AKS
2462
2463 return 0;
2464}
2465
2466static ssize_t
2467qlcnic_sysfs_read_crb(struct kobject *kobj, struct bin_attribute *attr,
2468 char *buf, loff_t offset, size_t size)
2469{
2470 struct device *dev = container_of(kobj, struct device, kobj);
2471 struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
2472 u32 data;
897e8c7c 2473 u64 qmdata;
af19b491
AKS
2474 int ret;
2475
2476 ret = qlcnic_sysfs_validate_crb(adapter, offset, size);
2477 if (ret != 0)
2478 return ret;
2479
897e8c7c
DP
2480 if (ADDR_IN_RANGE(offset, QLCNIC_PCI_CAMQM, QLCNIC_PCI_CAMQM_END)) {
2481 qlcnic_pci_camqm_read_2M(adapter, offset, &qmdata);
2482 memcpy(buf, &qmdata, size);
2483 } else {
2484 data = QLCRD32(adapter, offset);
2485 memcpy(buf, &data, size);
2486 }
af19b491
AKS
2487 return size;
2488}
2489
2490static ssize_t
2491qlcnic_sysfs_write_crb(struct kobject *kobj, struct bin_attribute *attr,
2492 char *buf, loff_t offset, size_t size)
2493{
2494 struct device *dev = container_of(kobj, struct device, kobj);
2495 struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
2496 u32 data;
897e8c7c 2497 u64 qmdata;
af19b491
AKS
2498 int ret;
2499
2500 ret = qlcnic_sysfs_validate_crb(adapter, offset, size);
2501 if (ret != 0)
2502 return ret;
2503
897e8c7c
DP
2504 if (ADDR_IN_RANGE(offset, QLCNIC_PCI_CAMQM, QLCNIC_PCI_CAMQM_END)) {
2505 memcpy(&qmdata, buf, size);
2506 qlcnic_pci_camqm_write_2M(adapter, offset, qmdata);
2507 } else {
2508 memcpy(&data, buf, size);
2509 QLCWR32(adapter, offset, data);
2510 }
af19b491
AKS
2511 return size;
2512}
2513
2514static int
2515qlcnic_sysfs_validate_mem(struct qlcnic_adapter *adapter,
2516 loff_t offset, size_t size)
2517{
2518 if (!(adapter->flags & QLCNIC_DIAG_ENABLED))
2519 return -EIO;
2520
2521 if ((size != 8) || (offset & 0x7))
2522 return -EIO;
2523
2524 return 0;
2525}
2526
2527static ssize_t
2528qlcnic_sysfs_read_mem(struct kobject *kobj, struct bin_attribute *attr,
2529 char *buf, loff_t offset, size_t size)
2530{
2531 struct device *dev = container_of(kobj, struct device, kobj);
2532 struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
2533 u64 data;
2534 int ret;
2535
2536 ret = qlcnic_sysfs_validate_mem(adapter, offset, size);
2537 if (ret != 0)
2538 return ret;
2539
2540 if (qlcnic_pci_mem_read_2M(adapter, offset, &data))
2541 return -EIO;
2542
2543 memcpy(buf, &data, size);
2544
2545 return size;
2546}
2547
2548static ssize_t
2549qlcnic_sysfs_write_mem(struct kobject *kobj, struct bin_attribute *attr,
2550 char *buf, loff_t offset, size_t size)
2551{
2552 struct device *dev = container_of(kobj, struct device, kobj);
2553 struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
2554 u64 data;
2555 int ret;
2556
2557 ret = qlcnic_sysfs_validate_mem(adapter, offset, size);
2558 if (ret != 0)
2559 return ret;
2560
2561 memcpy(&data, buf, size);
2562
2563 if (qlcnic_pci_mem_write_2M(adapter, offset, data))
2564 return -EIO;
2565
2566 return size;
2567}
2568
2569
2570static struct bin_attribute bin_attr_crb = {
2571 .attr = {.name = "crb", .mode = (S_IRUGO | S_IWUSR)},
2572 .size = 0,
2573 .read = qlcnic_sysfs_read_crb,
2574 .write = qlcnic_sysfs_write_crb,
2575};
2576
2577static struct bin_attribute bin_attr_mem = {
2578 .attr = {.name = "mem", .mode = (S_IRUGO | S_IWUSR)},
2579 .size = 0,
2580 .read = qlcnic_sysfs_read_mem,
2581 .write = qlcnic_sysfs_write_mem,
2582};
2583
2584static void
2585qlcnic_create_sysfs_entries(struct qlcnic_adapter *adapter)
2586{
2587 struct device *dev = &adapter->pdev->dev;
2588
2589 if (adapter->capabilities & QLCNIC_FW_CAPABILITY_BDG)
2590 if (device_create_file(dev, &dev_attr_bridged_mode))
2591 dev_warn(dev,
2592 "failed to create bridged_mode sysfs entry\n");
2593}
2594
2595static void
2596qlcnic_remove_sysfs_entries(struct qlcnic_adapter *adapter)
2597{
2598 struct device *dev = &adapter->pdev->dev;
2599
2600 if (adapter->capabilities & QLCNIC_FW_CAPABILITY_BDG)
2601 device_remove_file(dev, &dev_attr_bridged_mode);
2602}
2603
2604static void
2605qlcnic_create_diag_entries(struct qlcnic_adapter *adapter)
2606{
2607 struct device *dev = &adapter->pdev->dev;
2608
2609 if (device_create_file(dev, &dev_attr_diag_mode))
2610 dev_info(dev, "failed to create diag_mode sysfs entry\n");
2611 if (device_create_bin_file(dev, &bin_attr_crb))
2612 dev_info(dev, "failed to create crb sysfs entry\n");
2613 if (device_create_bin_file(dev, &bin_attr_mem))
2614 dev_info(dev, "failed to create mem sysfs entry\n");
2615}
2616
2617
2618static void
2619qlcnic_remove_diag_entries(struct qlcnic_adapter *adapter)
2620{
2621 struct device *dev = &adapter->pdev->dev;
2622
2623 device_remove_file(dev, &dev_attr_diag_mode);
2624 device_remove_bin_file(dev, &bin_attr_crb);
2625 device_remove_bin_file(dev, &bin_attr_mem);
2626}
2627
2628#ifdef CONFIG_INET
2629
2630#define is_qlcnic_netdev(dev) (dev->netdev_ops == &qlcnic_netdev_ops)
2631
af19b491
AKS
2632static void
2633qlcnic_config_indev_addr(struct net_device *dev, unsigned long event)
2634{
2635 struct in_device *indev;
2636 struct qlcnic_adapter *adapter = netdev_priv(dev);
2637
af19b491
AKS
2638 indev = in_dev_get(dev);
2639 if (!indev)
2640 return;
2641
2642 for_ifa(indev) {
2643 switch (event) {
2644 case NETDEV_UP:
2645 qlcnic_config_ipaddr(adapter,
2646 ifa->ifa_address, QLCNIC_IP_UP);
2647 break;
2648 case NETDEV_DOWN:
2649 qlcnic_config_ipaddr(adapter,
2650 ifa->ifa_address, QLCNIC_IP_DOWN);
2651 break;
2652 default:
2653 break;
2654 }
2655 } endfor_ifa(indev);
2656
2657 in_dev_put(indev);
af19b491
AKS
2658}
2659
2660static int qlcnic_netdev_event(struct notifier_block *this,
2661 unsigned long event, void *ptr)
2662{
2663 struct qlcnic_adapter *adapter;
2664 struct net_device *dev = (struct net_device *)ptr;
2665
2666recheck:
2667 if (dev == NULL)
2668 goto done;
2669
2670 if (dev->priv_flags & IFF_802_1Q_VLAN) {
2671 dev = vlan_dev_real_dev(dev);
2672 goto recheck;
2673 }
2674
2675 if (!is_qlcnic_netdev(dev))
2676 goto done;
2677
2678 adapter = netdev_priv(dev);
2679
2680 if (!adapter)
2681 goto done;
2682
2683 if (adapter->is_up != QLCNIC_ADAPTER_UP_MAGIC)
2684 goto done;
2685
2686 qlcnic_config_indev_addr(dev, event);
2687done:
2688 return NOTIFY_DONE;
2689}
2690
2691static int
2692qlcnic_inetaddr_event(struct notifier_block *this,
2693 unsigned long event, void *ptr)
2694{
2695 struct qlcnic_adapter *adapter;
2696 struct net_device *dev;
2697
2698 struct in_ifaddr *ifa = (struct in_ifaddr *)ptr;
2699
2700 dev = ifa->ifa_dev ? ifa->ifa_dev->dev : NULL;
2701
2702recheck:
2703 if (dev == NULL || !netif_running(dev))
2704 goto done;
2705
2706 if (dev->priv_flags & IFF_802_1Q_VLAN) {
2707 dev = vlan_dev_real_dev(dev);
2708 goto recheck;
2709 }
2710
2711 if (!is_qlcnic_netdev(dev))
2712 goto done;
2713
2714 adapter = netdev_priv(dev);
2715
251a84c9 2716 if (!adapter)
af19b491
AKS
2717 goto done;
2718
2719 if (adapter->is_up != QLCNIC_ADAPTER_UP_MAGIC)
2720 goto done;
2721
2722 switch (event) {
2723 case NETDEV_UP:
2724 qlcnic_config_ipaddr(adapter, ifa->ifa_address, QLCNIC_IP_UP);
2725 break;
2726 case NETDEV_DOWN:
2727 qlcnic_config_ipaddr(adapter, ifa->ifa_address, QLCNIC_IP_DOWN);
2728 break;
2729 default:
2730 break;
2731 }
2732
2733done:
2734 return NOTIFY_DONE;
2735}
2736
2737static struct notifier_block qlcnic_netdev_cb = {
2738 .notifier_call = qlcnic_netdev_event,
2739};
2740
2741static struct notifier_block qlcnic_inetaddr_cb = {
2742 .notifier_call = qlcnic_inetaddr_event,
2743};
2744#else
2745static void
2746qlcnic_config_indev_addr(struct net_device *dev, unsigned long event)
2747{ }
2748#endif
2749
2750static struct pci_driver qlcnic_driver = {
2751 .name = qlcnic_driver_name,
2752 .id_table = qlcnic_pci_tbl,
2753 .probe = qlcnic_probe,
2754 .remove = __devexit_p(qlcnic_remove),
2755#ifdef CONFIG_PM
2756 .suspend = qlcnic_suspend,
2757 .resume = qlcnic_resume,
2758#endif
2759 .shutdown = qlcnic_shutdown
2760};
2761
2762static int __init qlcnic_init_module(void)
2763{
2764
2765 printk(KERN_INFO "%s\n", qlcnic_driver_string);
2766
2767#ifdef CONFIG_INET
2768 register_netdevice_notifier(&qlcnic_netdev_cb);
2769 register_inetaddr_notifier(&qlcnic_inetaddr_cb);
2770#endif
2771
2772
2773 return pci_register_driver(&qlcnic_driver);
2774}
2775
2776module_init(qlcnic_init_module);
2777
2778static void __exit qlcnic_exit_module(void)
2779{
2780
2781 pci_unregister_driver(&qlcnic_driver);
2782
2783#ifdef CONFIG_INET
2784 unregister_inetaddr_notifier(&qlcnic_inetaddr_cb);
2785 unregister_netdevice_notifier(&qlcnic_netdev_cb);
2786#endif
2787}
2788
2789module_exit(qlcnic_exit_module);