]> bbs.cooldavid.org Git - net-next-2.6.git/blame - drivers/net/qlcnic/qlcnic_main.c
Merge branch 'master' of git://git.kernel.org/pub/scm/linux/kernel/git/linville/wirel...
[net-next-2.6.git] / drivers / net / qlcnic / qlcnic_main.c
CommitLineData
af19b491
AKS
1/*
2 * Copyright (C) 2009 - QLogic Corporation.
3 * All rights reserved.
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License
7 * as published by the Free Software Foundation; either version 2
8 * of the License, or (at your option) any later version.
9 *
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 59 Temple Place - Suite 330, Boston,
18 * MA 02111-1307, USA.
19 *
20 * The full GNU General Public License is included in this distribution
21 * in the file called "COPYING".
22 *
23 */
24
5a0e3ad6 25#include <linux/slab.h>
af19b491
AKS
26#include <linux/vmalloc.h>
27#include <linux/interrupt.h>
28
29#include "qlcnic.h"
30
31#include <linux/dma-mapping.h>
32#include <linux/if_vlan.h>
33#include <net/ip.h>
34#include <linux/ipv6.h>
35#include <linux/inetdevice.h>
36#include <linux/sysfs.h>
451724c8 37#include <linux/aer.h>
af19b491 38
7f9a0c34 39MODULE_DESCRIPTION("QLogic 1/10 GbE Converged/Intelligent Ethernet Driver");
af19b491
AKS
40MODULE_LICENSE("GPL");
41MODULE_VERSION(QLCNIC_LINUX_VERSIONID);
42MODULE_FIRMWARE(QLCNIC_UNIFIED_ROMIMAGE_NAME);
43
44char qlcnic_driver_name[] = "qlcnic";
7f9a0c34
SV
45static const char qlcnic_driver_string[] = "QLogic 1/10 GbE "
46 "Converged/Intelligent Ethernet Driver v" QLCNIC_LINUX_VERSIONID;
af19b491
AKS
47
48static int port_mode = QLCNIC_PORT_MODE_AUTO_NEG;
49
50/* Default to restricted 1G auto-neg mode */
51static int wol_port_mode = 5;
52
53static int use_msi = 1;
54module_param(use_msi, int, 0644);
55MODULE_PARM_DESC(use_msi, "MSI interrupt (0=disabled, 1=enabled");
56
57static int use_msi_x = 1;
58module_param(use_msi_x, int, 0644);
59MODULE_PARM_DESC(use_msi_x, "MSI-X interrupt (0=disabled, 1=enabled");
60
61static int auto_fw_reset = AUTO_FW_RESET_ENABLED;
62module_param(auto_fw_reset, int, 0644);
63MODULE_PARM_DESC(auto_fw_reset, "Auto firmware reset (0=disabled, 1=enabled");
64
4d5bdb38
AKS
65static int load_fw_file;
66module_param(load_fw_file, int, 0644);
67MODULE_PARM_DESC(load_fw_file, "Load firmware from (0=flash, 1=file");
68
2e9d722d
AC
69static int qlcnic_config_npars;
70module_param(qlcnic_config_npars, int, 0644);
71MODULE_PARM_DESC(qlcnic_config_npars, "Configure NPARs (0=disabled, 1=enabled");
72
af19b491
AKS
73static int __devinit qlcnic_probe(struct pci_dev *pdev,
74 const struct pci_device_id *ent);
75static void __devexit qlcnic_remove(struct pci_dev *pdev);
76static int qlcnic_open(struct net_device *netdev);
77static int qlcnic_close(struct net_device *netdev);
af19b491 78static void qlcnic_tx_timeout(struct net_device *netdev);
af19b491
AKS
79static void qlcnic_attach_work(struct work_struct *work);
80static void qlcnic_fwinit_work(struct work_struct *work);
81static void qlcnic_fw_poll_work(struct work_struct *work);
82static void qlcnic_schedule_work(struct qlcnic_adapter *adapter,
83 work_func_t func, int delay);
84static void qlcnic_cancel_fw_work(struct qlcnic_adapter *adapter);
85static int qlcnic_poll(struct napi_struct *napi, int budget);
8f891387 86static int qlcnic_rx_poll(struct napi_struct *napi, int budget);
af19b491
AKS
87#ifdef CONFIG_NET_POLL_CONTROLLER
88static void qlcnic_poll_controller(struct net_device *netdev);
89#endif
90
91static void qlcnic_create_sysfs_entries(struct qlcnic_adapter *adapter);
92static void qlcnic_remove_sysfs_entries(struct qlcnic_adapter *adapter);
93static void qlcnic_create_diag_entries(struct qlcnic_adapter *adapter);
94static void qlcnic_remove_diag_entries(struct qlcnic_adapter *adapter);
95
6df900e9 96static void qlcnic_idc_debug_info(struct qlcnic_adapter *adapter, u8 encoding);
21854f02 97static void qlcnic_clr_all_drv_state(struct qlcnic_adapter *adapter, u8);
af19b491
AKS
98static int qlcnic_can_start_firmware(struct qlcnic_adapter *adapter);
99
7eb9855d 100static irqreturn_t qlcnic_tmp_intr(int irq, void *data);
af19b491
AKS
101static irqreturn_t qlcnic_intr(int irq, void *data);
102static irqreturn_t qlcnic_msi_intr(int irq, void *data);
103static irqreturn_t qlcnic_msix_intr(int irq, void *data);
104
105static struct net_device_stats *qlcnic_get_stats(struct net_device *netdev);
106static void qlcnic_config_indev_addr(struct net_device *dev, unsigned long);
9f26f547
AC
107static int qlcnic_start_firmware(struct qlcnic_adapter *);
108
109static void qlcnic_dev_set_npar_ready(struct qlcnic_adapter *);
9f26f547
AC
110static int qlcnicvf_config_led(struct qlcnic_adapter *, u32, u32);
111static int qlcnicvf_config_bridged_mode(struct qlcnic_adapter *, u32);
112static int qlcnicvf_start_firmware(struct qlcnic_adapter *);
0325d69b
RB
113static void qlcnic_set_netdev_features(struct qlcnic_adapter *,
114 struct qlcnic_esw_func_cfg *);
af19b491
AKS
115/* PCI Device ID Table */
116#define ENTRY(device) \
117 {PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, (device)), \
118 .class = PCI_CLASS_NETWORK_ETHERNET << 8, .class_mask = ~0}
119
120#define PCI_DEVICE_ID_QLOGIC_QLE824X 0x8020
121
6a902881 122static DEFINE_PCI_DEVICE_TABLE(qlcnic_pci_tbl) = {
af19b491
AKS
123 ENTRY(PCI_DEVICE_ID_QLOGIC_QLE824X),
124 {0,}
125};
126
127MODULE_DEVICE_TABLE(pci, qlcnic_pci_tbl);
128
129
130void
131qlcnic_update_cmd_producer(struct qlcnic_adapter *adapter,
132 struct qlcnic_host_tx_ring *tx_ring)
133{
134 writel(tx_ring->producer, tx_ring->crb_cmd_producer);
af19b491
AKS
135}
136
137static const u32 msi_tgt_status[8] = {
138 ISR_INT_TARGET_STATUS, ISR_INT_TARGET_STATUS_F1,
139 ISR_INT_TARGET_STATUS_F2, ISR_INT_TARGET_STATUS_F3,
140 ISR_INT_TARGET_STATUS_F4, ISR_INT_TARGET_STATUS_F5,
141 ISR_INT_TARGET_STATUS_F6, ISR_INT_TARGET_STATUS_F7
142};
143
144static const
145struct qlcnic_legacy_intr_set legacy_intr[] = QLCNIC_LEGACY_INTR_CONFIG;
146
147static inline void qlcnic_disable_int(struct qlcnic_host_sds_ring *sds_ring)
148{
149 writel(0, sds_ring->crb_intr_mask);
150}
151
152static inline void qlcnic_enable_int(struct qlcnic_host_sds_ring *sds_ring)
153{
154 struct qlcnic_adapter *adapter = sds_ring->adapter;
155
156 writel(0x1, sds_ring->crb_intr_mask);
157
158 if (!QLCNIC_IS_MSI_FAMILY(adapter))
159 writel(0xfbff, adapter->tgt_mask_reg);
160}
161
162static int
163qlcnic_alloc_sds_rings(struct qlcnic_recv_context *recv_ctx, int count)
164{
165 int size = sizeof(struct qlcnic_host_sds_ring) * count;
166
167 recv_ctx->sds_rings = kzalloc(size, GFP_KERNEL);
168
169 return (recv_ctx->sds_rings == NULL);
170}
171
172static void
173qlcnic_free_sds_rings(struct qlcnic_recv_context *recv_ctx)
174{
175 if (recv_ctx->sds_rings != NULL)
176 kfree(recv_ctx->sds_rings);
177
178 recv_ctx->sds_rings = NULL;
179}
180
181static int
182qlcnic_napi_add(struct qlcnic_adapter *adapter, struct net_device *netdev)
183{
184 int ring;
185 struct qlcnic_host_sds_ring *sds_ring;
186 struct qlcnic_recv_context *recv_ctx = &adapter->recv_ctx;
187
188 if (qlcnic_alloc_sds_rings(recv_ctx, adapter->max_sds_rings))
189 return -ENOMEM;
190
191 for (ring = 0; ring < adapter->max_sds_rings; ring++) {
192 sds_ring = &recv_ctx->sds_rings[ring];
8f891387 193
194 if (ring == adapter->max_sds_rings - 1)
195 netif_napi_add(netdev, &sds_ring->napi, qlcnic_poll,
196 QLCNIC_NETDEV_WEIGHT/adapter->max_sds_rings);
197 else
198 netif_napi_add(netdev, &sds_ring->napi,
199 qlcnic_rx_poll, QLCNIC_NETDEV_WEIGHT*2);
af19b491
AKS
200 }
201
202 return 0;
203}
204
205static void
206qlcnic_napi_del(struct qlcnic_adapter *adapter)
207{
208 int ring;
209 struct qlcnic_host_sds_ring *sds_ring;
210 struct qlcnic_recv_context *recv_ctx = &adapter->recv_ctx;
211
212 for (ring = 0; ring < adapter->max_sds_rings; ring++) {
213 sds_ring = &recv_ctx->sds_rings[ring];
214 netif_napi_del(&sds_ring->napi);
215 }
216
217 qlcnic_free_sds_rings(&adapter->recv_ctx);
218}
219
220static void
221qlcnic_napi_enable(struct qlcnic_adapter *adapter)
222{
223 int ring;
224 struct qlcnic_host_sds_ring *sds_ring;
225 struct qlcnic_recv_context *recv_ctx = &adapter->recv_ctx;
226
780ab790
AKS
227 if (adapter->is_up != QLCNIC_ADAPTER_UP_MAGIC)
228 return;
229
af19b491
AKS
230 for (ring = 0; ring < adapter->max_sds_rings; ring++) {
231 sds_ring = &recv_ctx->sds_rings[ring];
232 napi_enable(&sds_ring->napi);
233 qlcnic_enable_int(sds_ring);
234 }
235}
236
237static void
238qlcnic_napi_disable(struct qlcnic_adapter *adapter)
239{
240 int ring;
241 struct qlcnic_host_sds_ring *sds_ring;
242 struct qlcnic_recv_context *recv_ctx = &adapter->recv_ctx;
243
780ab790
AKS
244 if (adapter->is_up != QLCNIC_ADAPTER_UP_MAGIC)
245 return;
246
af19b491
AKS
247 for (ring = 0; ring < adapter->max_sds_rings; ring++) {
248 sds_ring = &recv_ctx->sds_rings[ring];
249 qlcnic_disable_int(sds_ring);
250 napi_synchronize(&sds_ring->napi);
251 napi_disable(&sds_ring->napi);
252 }
253}
254
255static void qlcnic_clear_stats(struct qlcnic_adapter *adapter)
256{
257 memset(&adapter->stats, 0, sizeof(adapter->stats));
af19b491
AKS
258}
259
af19b491
AKS
260static void qlcnic_set_port_mode(struct qlcnic_adapter *adapter)
261{
262 u32 val, data;
263
264 val = adapter->ahw.board_type;
265 if ((val == QLCNIC_BRDTYPE_P3_HMEZ) ||
266 (val == QLCNIC_BRDTYPE_P3_XG_LOM)) {
267 if (port_mode == QLCNIC_PORT_MODE_802_3_AP) {
268 data = QLCNIC_PORT_MODE_802_3_AP;
269 QLCWR32(adapter, QLCNIC_PORT_MODE_ADDR, data);
270 } else if (port_mode == QLCNIC_PORT_MODE_XG) {
271 data = QLCNIC_PORT_MODE_XG;
272 QLCWR32(adapter, QLCNIC_PORT_MODE_ADDR, data);
273 } else if (port_mode == QLCNIC_PORT_MODE_AUTO_NEG_1G) {
274 data = QLCNIC_PORT_MODE_AUTO_NEG_1G;
275 QLCWR32(adapter, QLCNIC_PORT_MODE_ADDR, data);
276 } else if (port_mode == QLCNIC_PORT_MODE_AUTO_NEG_XG) {
277 data = QLCNIC_PORT_MODE_AUTO_NEG_XG;
278 QLCWR32(adapter, QLCNIC_PORT_MODE_ADDR, data);
279 } else {
280 data = QLCNIC_PORT_MODE_AUTO_NEG;
281 QLCWR32(adapter, QLCNIC_PORT_MODE_ADDR, data);
282 }
283
284 if ((wol_port_mode != QLCNIC_PORT_MODE_802_3_AP) &&
285 (wol_port_mode != QLCNIC_PORT_MODE_XG) &&
286 (wol_port_mode != QLCNIC_PORT_MODE_AUTO_NEG_1G) &&
287 (wol_port_mode != QLCNIC_PORT_MODE_AUTO_NEG_XG)) {
288 wol_port_mode = QLCNIC_PORT_MODE_AUTO_NEG;
289 }
290 QLCWR32(adapter, QLCNIC_WOL_PORT_MODE, wol_port_mode);
291 }
292}
293
294static void qlcnic_set_msix_bit(struct pci_dev *pdev, int enable)
295{
296 u32 control;
297 int pos;
298
299 pos = pci_find_capability(pdev, PCI_CAP_ID_MSIX);
300 if (pos) {
301 pci_read_config_dword(pdev, pos, &control);
302 if (enable)
303 control |= PCI_MSIX_FLAGS_ENABLE;
304 else
305 control = 0;
306 pci_write_config_dword(pdev, pos, control);
307 }
308}
309
310static void qlcnic_init_msix_entries(struct qlcnic_adapter *adapter, int count)
311{
312 int i;
313
314 for (i = 0; i < count; i++)
315 adapter->msix_entries[i].entry = i;
316}
317
318static int
319qlcnic_read_mac_addr(struct qlcnic_adapter *adapter)
320{
2e9d722d 321 u8 mac_addr[ETH_ALEN];
af19b491
AKS
322 struct net_device *netdev = adapter->netdev;
323 struct pci_dev *pdev = adapter->pdev;
324
2e9d722d 325 if (adapter->nic_ops->get_mac_addr(adapter, mac_addr) != 0)
af19b491
AKS
326 return -EIO;
327
2e9d722d 328 memcpy(netdev->dev_addr, mac_addr, ETH_ALEN);
af19b491
AKS
329 memcpy(netdev->perm_addr, netdev->dev_addr, netdev->addr_len);
330 memcpy(adapter->mac_addr, netdev->dev_addr, netdev->addr_len);
331
332 /* set station address */
333
334 if (!is_valid_ether_addr(netdev->perm_addr))
335 dev_warn(&pdev->dev, "Bad MAC address %pM.\n",
336 netdev->dev_addr);
337
338 return 0;
339}
340
341static int qlcnic_set_mac(struct net_device *netdev, void *p)
342{
343 struct qlcnic_adapter *adapter = netdev_priv(netdev);
344 struct sockaddr *addr = p;
345
346 if (!is_valid_ether_addr(addr->sa_data))
347 return -EINVAL;
348
8a15ad1f 349 if (test_bit(__QLCNIC_DEV_UP, &adapter->state)) {
af19b491
AKS
350 netif_device_detach(netdev);
351 qlcnic_napi_disable(adapter);
352 }
353
354 memcpy(adapter->mac_addr, addr->sa_data, netdev->addr_len);
355 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
356 qlcnic_set_multi(adapter->netdev);
357
8a15ad1f 358 if (test_bit(__QLCNIC_DEV_UP, &adapter->state)) {
af19b491
AKS
359 netif_device_attach(netdev);
360 qlcnic_napi_enable(adapter);
361 }
362 return 0;
363}
364
365static const struct net_device_ops qlcnic_netdev_ops = {
366 .ndo_open = qlcnic_open,
367 .ndo_stop = qlcnic_close,
368 .ndo_start_xmit = qlcnic_xmit_frame,
369 .ndo_get_stats = qlcnic_get_stats,
370 .ndo_validate_addr = eth_validate_addr,
371 .ndo_set_multicast_list = qlcnic_set_multi,
372 .ndo_set_mac_address = qlcnic_set_mac,
373 .ndo_change_mtu = qlcnic_change_mtu,
374 .ndo_tx_timeout = qlcnic_tx_timeout,
375#ifdef CONFIG_NET_POLL_CONTROLLER
376 .ndo_poll_controller = qlcnic_poll_controller,
377#endif
378};
379
2e9d722d 380static struct qlcnic_nic_template qlcnic_ops = {
2e9d722d
AC
381 .get_mac_addr = qlcnic_get_mac_address,
382 .config_bridged_mode = qlcnic_config_bridged_mode,
383 .config_led = qlcnic_config_led,
9f26f547
AC
384 .start_firmware = qlcnic_start_firmware
385};
386
387static struct qlcnic_nic_template qlcnic_vf_ops = {
388 .get_mac_addr = qlcnic_get_mac_address,
389 .config_bridged_mode = qlcnicvf_config_bridged_mode,
390 .config_led = qlcnicvf_config_led,
9f26f547 391 .start_firmware = qlcnicvf_start_firmware
2e9d722d
AC
392};
393
af19b491
AKS
394static void
395qlcnic_setup_intr(struct qlcnic_adapter *adapter)
396{
397 const struct qlcnic_legacy_intr_set *legacy_intrp;
398 struct pci_dev *pdev = adapter->pdev;
399 int err, num_msix;
400
401 if (adapter->rss_supported) {
402 num_msix = (num_online_cpus() >= MSIX_ENTRIES_PER_ADAPTER) ?
403 MSIX_ENTRIES_PER_ADAPTER : 2;
404 } else
405 num_msix = 1;
406
407 adapter->max_sds_rings = 1;
408
409 adapter->flags &= ~(QLCNIC_MSI_ENABLED | QLCNIC_MSIX_ENABLED);
410
411 legacy_intrp = &legacy_intr[adapter->ahw.pci_func];
412
413 adapter->int_vec_bit = legacy_intrp->int_vec_bit;
414 adapter->tgt_status_reg = qlcnic_get_ioaddr(adapter,
415 legacy_intrp->tgt_status_reg);
416 adapter->tgt_mask_reg = qlcnic_get_ioaddr(adapter,
417 legacy_intrp->tgt_mask_reg);
418 adapter->isr_int_vec = qlcnic_get_ioaddr(adapter, ISR_INT_VECTOR);
419
420 adapter->crb_int_state_reg = qlcnic_get_ioaddr(adapter,
421 ISR_INT_STATE_REG);
422
423 qlcnic_set_msix_bit(pdev, 0);
424
425 if (adapter->msix_supported) {
426
427 qlcnic_init_msix_entries(adapter, num_msix);
428 err = pci_enable_msix(pdev, adapter->msix_entries, num_msix);
429 if (err == 0) {
430 adapter->flags |= QLCNIC_MSIX_ENABLED;
431 qlcnic_set_msix_bit(pdev, 1);
432
433 if (adapter->rss_supported)
434 adapter->max_sds_rings = num_msix;
435
436 dev_info(&pdev->dev, "using msi-x interrupts\n");
437 return;
438 }
439
440 if (err > 0)
441 pci_disable_msix(pdev);
442
443 /* fall through for msi */
444 }
445
446 if (use_msi && !pci_enable_msi(pdev)) {
447 adapter->flags |= QLCNIC_MSI_ENABLED;
448 adapter->tgt_status_reg = qlcnic_get_ioaddr(adapter,
449 msi_tgt_status[adapter->ahw.pci_func]);
450 dev_info(&pdev->dev, "using msi interrupts\n");
451 adapter->msix_entries[0].vector = pdev->irq;
452 return;
453 }
454
455 dev_info(&pdev->dev, "using legacy interrupts\n");
456 adapter->msix_entries[0].vector = pdev->irq;
457}
458
459static void
460qlcnic_teardown_intr(struct qlcnic_adapter *adapter)
461{
462 if (adapter->flags & QLCNIC_MSIX_ENABLED)
463 pci_disable_msix(adapter->pdev);
464 if (adapter->flags & QLCNIC_MSI_ENABLED)
465 pci_disable_msi(adapter->pdev);
466}
467
468static void
469qlcnic_cleanup_pci_map(struct qlcnic_adapter *adapter)
470{
471 if (adapter->ahw.pci_base0 != NULL)
472 iounmap(adapter->ahw.pci_base0);
473}
474
346fe763
RB
475static int
476qlcnic_init_pci_info(struct qlcnic_adapter *adapter)
477{
e88db3bd 478 struct qlcnic_pci_info *pci_info;
900853a4 479 int i, ret = 0;
346fe763
RB
480 u8 pfn;
481
e88db3bd
DC
482 pci_info = kcalloc(QLCNIC_MAX_PCI_FUNC, sizeof(*pci_info), GFP_KERNEL);
483 if (!pci_info)
484 return -ENOMEM;
485
ca315ac2 486 adapter->npars = kzalloc(sizeof(struct qlcnic_npar_info) *
346fe763 487 QLCNIC_MAX_PCI_FUNC, GFP_KERNEL);
e88db3bd 488 if (!adapter->npars) {
900853a4 489 ret = -ENOMEM;
e88db3bd
DC
490 goto err_pci_info;
491 }
346fe763 492
ca315ac2 493 adapter->eswitch = kzalloc(sizeof(struct qlcnic_eswitch) *
346fe763
RB
494 QLCNIC_NIU_MAX_XG_PORTS, GFP_KERNEL);
495 if (!adapter->eswitch) {
900853a4 496 ret = -ENOMEM;
ca315ac2 497 goto err_npars;
346fe763
RB
498 }
499
500 ret = qlcnic_get_pci_info(adapter, pci_info);
ca315ac2
DC
501 if (ret)
502 goto err_eswitch;
346fe763 503
ca315ac2
DC
504 for (i = 0; i < QLCNIC_MAX_PCI_FUNC; i++) {
505 pfn = pci_info[i].id;
506 if (pfn > QLCNIC_MAX_PCI_FUNC)
507 return QL_STATUS_INVALID_PARAM;
508 adapter->npars[pfn].active = pci_info[i].active;
509 adapter->npars[pfn].type = pci_info[i].type;
510 adapter->npars[pfn].phy_port = pci_info[i].default_port;
ca315ac2
DC
511 adapter->npars[pfn].min_bw = pci_info[i].tx_min_bw;
512 adapter->npars[pfn].max_bw = pci_info[i].tx_max_bw;
346fe763
RB
513 }
514
ca315ac2
DC
515 for (i = 0; i < QLCNIC_NIU_MAX_XG_PORTS; i++)
516 adapter->eswitch[i].flags |= QLCNIC_SWITCH_ENABLE;
517
e88db3bd 518 kfree(pci_info);
ca315ac2
DC
519 return 0;
520
521err_eswitch:
346fe763
RB
522 kfree(adapter->eswitch);
523 adapter->eswitch = NULL;
ca315ac2 524err_npars:
346fe763 525 kfree(adapter->npars);
ca315ac2 526 adapter->npars = NULL;
e88db3bd
DC
527err_pci_info:
528 kfree(pci_info);
346fe763
RB
529
530 return ret;
531}
532
2e9d722d
AC
533static int
534qlcnic_set_function_modes(struct qlcnic_adapter *adapter)
535{
536 u8 id;
537 u32 ref_count;
538 int i, ret = 1;
539 u32 data = QLCNIC_MGMT_FUNC;
540 void __iomem *priv_op = adapter->ahw.pci_base0 + QLCNIC_DRV_OP_MODE;
541
542 /* If other drivers are not in use set their privilege level */
543 ref_count = QLCRD32(adapter, QLCNIC_CRB_DEV_REF_COUNT);
544 ret = qlcnic_api_lock(adapter);
545 if (ret)
546 goto err_lock;
2e9d722d 547
0e33c664
AC
548 if (qlcnic_config_npars) {
549 for (i = 0; i < QLCNIC_MAX_PCI_FUNC; i++) {
346fe763 550 id = i;
0e33c664
AC
551 if (adapter->npars[i].type != QLCNIC_TYPE_NIC ||
552 id == adapter->ahw.pci_func)
553 continue;
554 data |= (qlcnic_config_npars &
555 QLC_DEV_SET_DRV(0xf, id));
556 }
557 } else {
558 data = readl(priv_op);
559 data = (data & ~QLC_DEV_SET_DRV(0xf, adapter->ahw.pci_func)) |
560 (QLC_DEV_SET_DRV(QLCNIC_MGMT_FUNC,
561 adapter->ahw.pci_func));
2e9d722d
AC
562 }
563 writel(data, priv_op);
2e9d722d
AC
564 qlcnic_api_unlock(adapter);
565err_lock:
566 return ret;
567}
568
2e9d722d
AC
569static u32
570qlcnic_get_driver_mode(struct qlcnic_adapter *adapter)
571{
572 void __iomem *msix_base_addr;
573 void __iomem *priv_op;
346fe763 574 struct qlcnic_info nic_info;
2e9d722d
AC
575 u32 func;
576 u32 msix_base;
577 u32 op_mode, priv_level;
578
579 /* Determine FW API version */
580 adapter->fw_hal_version = readl(adapter->ahw.pci_base0 + QLCNIC_FW_API);
2e9d722d
AC
581
582 /* Find PCI function number */
583 pci_read_config_dword(adapter->pdev, QLCNIC_MSIX_TABLE_OFFSET, &func);
584 msix_base_addr = adapter->ahw.pci_base0 + QLCNIC_MSIX_BASE;
585 msix_base = readl(msix_base_addr);
586 func = (func - msix_base)/QLCNIC_MSIX_TBL_PGSIZE;
587 adapter->ahw.pci_func = func;
588
346fe763
RB
589 if (!qlcnic_get_nic_info(adapter, &nic_info, adapter->ahw.pci_func)) {
590 adapter->capabilities = nic_info.capabilities;
591
592 if (adapter->capabilities & BIT_6)
593 adapter->flags |= QLCNIC_ESWITCH_ENABLED;
594 else
595 adapter->flags &= ~QLCNIC_ESWITCH_ENABLED;
596 }
0e33c664
AC
597
598 if (!(adapter->flags & QLCNIC_ESWITCH_ENABLED)) {
599 adapter->nic_ops = &qlcnic_ops;
600 return adapter->fw_hal_version;
601 }
602
2e9d722d
AC
603 /* Determine function privilege level */
604 priv_op = adapter->ahw.pci_base0 + QLCNIC_DRV_OP_MODE;
605 op_mode = readl(priv_op);
0e33c664 606 if (op_mode == QLC_DEV_DRV_DEFAULT)
2e9d722d 607 priv_level = QLCNIC_MGMT_FUNC;
0e33c664 608 else
2e9d722d
AC
609 priv_level = QLC_DEV_GET_DRV(op_mode, adapter->ahw.pci_func);
610
611 switch (priv_level) {
612 case QLCNIC_MGMT_FUNC:
613 adapter->op_mode = QLCNIC_MGMT_FUNC;
45918e2f 614 adapter->nic_ops = &qlcnic_ops;
346fe763 615 qlcnic_init_pci_info(adapter);
2e9d722d 616 /* Set privilege level for other functions */
0e33c664 617 qlcnic_set_function_modes(adapter);
2e9d722d
AC
618 dev_info(&adapter->pdev->dev,
619 "HAL Version: %d, Management function\n",
620 adapter->fw_hal_version);
621 break;
622 case QLCNIC_PRIV_FUNC:
623 adapter->op_mode = QLCNIC_PRIV_FUNC;
624 dev_info(&adapter->pdev->dev,
625 "HAL Version: %d, Privileged function\n",
626 adapter->fw_hal_version);
45918e2f 627 adapter->nic_ops = &qlcnic_ops;
2e9d722d 628 break;
9f26f547
AC
629 case QLCNIC_NON_PRIV_FUNC:
630 adapter->op_mode = QLCNIC_NON_PRIV_FUNC;
631 dev_info(&adapter->pdev->dev,
632 "HAL Version: %d Non Privileged function\n",
633 adapter->fw_hal_version);
634 adapter->nic_ops = &qlcnic_vf_ops;
635 break;
2e9d722d
AC
636 default:
637 dev_info(&adapter->pdev->dev, "Unknown function mode: %d\n",
638 priv_level);
639 return 0;
640 }
641 return adapter->fw_hal_version;
642}
643
af19b491
AKS
644static int
645qlcnic_setup_pci_map(struct qlcnic_adapter *adapter)
646{
647 void __iomem *mem_ptr0 = NULL;
648 resource_size_t mem_base;
649 unsigned long mem_len, pci_len0 = 0;
650
651 struct pci_dev *pdev = adapter->pdev;
af19b491 652
af19b491
AKS
653 /* remap phys address */
654 mem_base = pci_resource_start(pdev, 0); /* 0 is for BAR 0 */
655 mem_len = pci_resource_len(pdev, 0);
656
657 if (mem_len == QLCNIC_PCI_2MB_SIZE) {
658
659 mem_ptr0 = pci_ioremap_bar(pdev, 0);
660 if (mem_ptr0 == NULL) {
661 dev_err(&pdev->dev, "failed to map PCI bar 0\n");
662 return -EIO;
663 }
664 pci_len0 = mem_len;
665 } else {
666 return -EIO;
667 }
668
669 dev_info(&pdev->dev, "%dMB memory map\n", (int)(mem_len>>20));
670
671 adapter->ahw.pci_base0 = mem_ptr0;
672 adapter->ahw.pci_len0 = pci_len0;
673
2e9d722d
AC
674 if (!qlcnic_get_driver_mode(adapter)) {
675 iounmap(adapter->ahw.pci_base0);
676 return -EIO;
677 }
678
af19b491 679 adapter->ahw.ocm_win_crb = qlcnic_get_ioaddr(adapter,
2e9d722d 680 QLCNIC_PCIX_PS_REG(PCIX_OCM_WINDOW_REG(adapter->ahw.pci_func)));
af19b491
AKS
681
682 return 0;
683}
684
685static void get_brd_name(struct qlcnic_adapter *adapter, char *name)
686{
687 struct pci_dev *pdev = adapter->pdev;
688 int i, found = 0;
689
690 for (i = 0; i < NUM_SUPPORTED_BOARDS; ++i) {
691 if (qlcnic_boards[i].vendor == pdev->vendor &&
692 qlcnic_boards[i].device == pdev->device &&
693 qlcnic_boards[i].sub_vendor == pdev->subsystem_vendor &&
694 qlcnic_boards[i].sub_device == pdev->subsystem_device) {
02f6e46f
SC
695 sprintf(name, "%pM: %s" ,
696 adapter->mac_addr,
697 qlcnic_boards[i].short_name);
af19b491
AKS
698 found = 1;
699 break;
700 }
701
702 }
703
704 if (!found)
7f9a0c34 705 sprintf(name, "%pM Gigabit Ethernet", adapter->mac_addr);
af19b491
AKS
706}
707
708static void
709qlcnic_check_options(struct qlcnic_adapter *adapter)
710{
711 u32 fw_major, fw_minor, fw_build;
712 char brd_name[QLCNIC_MAX_BOARD_NAME_LEN];
af19b491 713 struct pci_dev *pdev = adapter->pdev;
346fe763 714 struct qlcnic_info nic_info;
af19b491
AKS
715
716 fw_major = QLCRD32(adapter, QLCNIC_FW_VERSION_MAJOR);
717 fw_minor = QLCRD32(adapter, QLCNIC_FW_VERSION_MINOR);
718 fw_build = QLCRD32(adapter, QLCNIC_FW_VERSION_SUB);
719
720 adapter->fw_version = QLCNIC_VERSION_CODE(fw_major, fw_minor, fw_build);
721
722 if (adapter->portnum == 0) {
723 get_brd_name(adapter, brd_name);
724
725 pr_info("%s: %s Board Chip rev 0x%x\n",
726 module_name(THIS_MODULE),
727 brd_name, adapter->ahw.revision_id);
728 }
729
251a84c9
AKS
730 dev_info(&pdev->dev, "firmware v%d.%d.%d\n",
731 fw_major, fw_minor, fw_build);
af19b491 732
af19b491
AKS
733 adapter->flags &= ~QLCNIC_LRO_ENABLED;
734
735 if (adapter->ahw.port_type == QLCNIC_XGBE) {
736 adapter->num_rxd = DEFAULT_RCV_DESCRIPTORS_10G;
737 adapter->num_jumbo_rxd = MAX_JUMBO_RCV_DESCRIPTORS_10G;
738 } else if (adapter->ahw.port_type == QLCNIC_GBE) {
739 adapter->num_rxd = DEFAULT_RCV_DESCRIPTORS_1G;
740 adapter->num_jumbo_rxd = MAX_JUMBO_RCV_DESCRIPTORS_1G;
741 }
742
346fe763
RB
743 if (!qlcnic_get_nic_info(adapter, &nic_info, adapter->ahw.pci_func)) {
744 adapter->physical_port = nic_info.phys_port;
745 adapter->switch_mode = nic_info.switch_mode;
746 adapter->max_tx_ques = nic_info.max_tx_ques;
747 adapter->max_rx_ques = nic_info.max_rx_ques;
748 adapter->capabilities = nic_info.capabilities;
749 adapter->max_mac_filters = nic_info.max_mac_filters;
750 adapter->max_mtu = nic_info.max_mtu;
751 }
0e33c664 752
af19b491
AKS
753 adapter->msix_supported = !!use_msi_x;
754 adapter->rss_supported = !!use_msi_x;
755
756 adapter->num_txd = MAX_CMD_DESCRIPTORS;
757
251b036a 758 adapter->max_rds_rings = MAX_RDS_RINGS;
af19b491
AKS
759}
760
0325d69b
RB
761static void
762qlcnic_set_eswitch_port_features(struct qlcnic_adapter *adapter,
763 struct qlcnic_esw_func_cfg *esw_cfg)
764{
fe4d434d
SC
765 adapter->flags &= ~QLCNIC_MACSPOOF;
766 if (adapter->op_mode == QLCNIC_NON_PRIV_FUNC)
767 if (esw_cfg->mac_anti_spoof)
768 adapter->flags |= QLCNIC_MACSPOOF;
769
0325d69b
RB
770 qlcnic_set_netdev_features(adapter, esw_cfg);
771}
772
773static int
774qlcnic_set_eswitch_port_config(struct qlcnic_adapter *adapter)
775{
776 struct qlcnic_esw_func_cfg esw_cfg;
777
778 if (!(adapter->flags & QLCNIC_ESWITCH_ENABLED))
779 return 0;
780
781 esw_cfg.pci_func = adapter->ahw.pci_func;
782 if (qlcnic_get_eswitch_port_config(adapter, &esw_cfg))
783 return -EIO;
784 qlcnic_set_eswitch_port_features(adapter, &esw_cfg);
785
786 return 0;
787}
788
789static void
790qlcnic_set_netdev_features(struct qlcnic_adapter *adapter,
791 struct qlcnic_esw_func_cfg *esw_cfg)
792{
793 struct net_device *netdev = adapter->netdev;
794 unsigned long features, vlan_features;
795
796 features = (NETIF_F_SG | NETIF_F_IP_CSUM |
797 NETIF_F_IPV6_CSUM | NETIF_F_GRO);
798 vlan_features = (NETIF_F_SG | NETIF_F_IP_CSUM |
799 NETIF_F_IPV6_CSUM);
800
801 if (adapter->capabilities & QLCNIC_FW_CAPABILITY_TSO) {
802 features |= (NETIF_F_TSO | NETIF_F_TSO6);
803 vlan_features |= (NETIF_F_TSO | NETIF_F_TSO6);
804 }
805 if (adapter->capabilities & QLCNIC_FW_CAPABILITY_HW_LRO)
806 features |= NETIF_F_LRO;
807
808 if (esw_cfg->offload_flags & BIT_0) {
809 netdev->features |= features;
810 adapter->rx_csum = 1;
811 if (!(esw_cfg->offload_flags & BIT_1))
812 netdev->features &= ~NETIF_F_TSO;
813 if (!(esw_cfg->offload_flags & BIT_2))
814 netdev->features &= ~NETIF_F_TSO6;
815 } else {
816 netdev->features &= ~features;
817 adapter->rx_csum = 0;
818 }
819
820 netdev->vlan_features = (features & vlan_features);
821}
822
823static int
824qlcnic_set_default_offload_settings(struct qlcnic_adapter *adapter)
825{
826 struct qlcnic_esw_func_cfg esw_cfg;
827 struct qlcnic_npar_info *npar;
828 u8 i;
829
830 if (!(adapter->flags & QLCNIC_ESWITCH_ENABLED) ||
831 adapter->need_fw_reset ||
832 adapter->op_mode != QLCNIC_MGMT_FUNC)
833 return 0;
834
835 for (i = 0; i < QLCNIC_MAX_PCI_FUNC; i++) {
836 if (adapter->npars[i].type != QLCNIC_TYPE_NIC)
837 continue;
838 memset(&esw_cfg, 0, sizeof(struct qlcnic_esw_func_cfg));
839 esw_cfg.pci_func = i;
840 esw_cfg.offload_flags = BIT_0;
841 esw_cfg.mac_learning = BIT_0;
842 if (adapter->capabilities & QLCNIC_FW_CAPABILITY_TSO)
843 esw_cfg.offload_flags |= (BIT_1 | BIT_2);
844 if (qlcnic_config_switch_port(adapter, &esw_cfg))
845 return -EIO;
846 npar = &adapter->npars[i];
847 npar->pvid = esw_cfg.vlan_id;
848 npar->mac_learning = esw_cfg.offload_flags;
849 npar->mac_anti_spoof = esw_cfg.mac_anti_spoof;
850 npar->discard_tagged = esw_cfg.discard_tagged;
851 npar->promisc_mode = esw_cfg.promisc_mode;
852 npar->offload_flags = esw_cfg.offload_flags;
853 }
854
855 return 0;
856}
857
4e8acb01
RB
858static int
859qlcnic_reset_eswitch_config(struct qlcnic_adapter *adapter,
860 struct qlcnic_npar_info *npar, int pci_func)
861{
862 struct qlcnic_esw_func_cfg esw_cfg;
863 esw_cfg.op_mode = QLCNIC_PORT_DEFAULTS;
864 esw_cfg.pci_func = pci_func;
865 esw_cfg.vlan_id = npar->pvid;
866 esw_cfg.mac_learning = npar->mac_learning;
867 esw_cfg.discard_tagged = npar->discard_tagged;
868 esw_cfg.mac_anti_spoof = npar->mac_anti_spoof;
869 esw_cfg.offload_flags = npar->offload_flags;
870 esw_cfg.promisc_mode = npar->promisc_mode;
871 if (qlcnic_config_switch_port(adapter, &esw_cfg))
872 return -EIO;
873
874 esw_cfg.op_mode = QLCNIC_ADD_VLAN;
875 if (qlcnic_config_switch_port(adapter, &esw_cfg))
876 return -EIO;
877
878 return 0;
879}
880
cea8975e
AC
881static int
882qlcnic_reset_npar_config(struct qlcnic_adapter *adapter)
883{
4e8acb01 884 int i, err;
cea8975e
AC
885 struct qlcnic_npar_info *npar;
886 struct qlcnic_info nic_info;
887
888 if (!(adapter->flags & QLCNIC_ESWITCH_ENABLED) ||
4e8acb01 889 !adapter->need_fw_reset || adapter->op_mode != QLCNIC_MGMT_FUNC)
cea8975e
AC
890 return 0;
891
4e8acb01
RB
892 /* Set the NPAR config data after FW reset */
893 for (i = 0; i < QLCNIC_MAX_PCI_FUNC; i++) {
894 npar = &adapter->npars[i];
895 if (npar->type != QLCNIC_TYPE_NIC)
896 continue;
897 err = qlcnic_get_nic_info(adapter, &nic_info, i);
898 if (err)
899 return err;
900 nic_info.min_tx_bw = npar->min_bw;
901 nic_info.max_tx_bw = npar->max_bw;
902 err = qlcnic_set_nic_info(adapter, &nic_info);
903 if (err)
904 return err;
cea8975e 905
4e8acb01
RB
906 if (npar->enable_pm) {
907 err = qlcnic_config_port_mirroring(adapter,
908 npar->dest_npar, 1, i);
909 if (err)
910 return err;
cea8975e 911 }
4e8acb01
RB
912 err = qlcnic_reset_eswitch_config(adapter, npar, i);
913 if (err)
914 return err;
cea8975e 915 }
4e8acb01 916 return 0;
cea8975e
AC
917}
918
78f84e1a
AKS
919static int qlcnic_check_npar_opertional(struct qlcnic_adapter *adapter)
920{
921 u8 npar_opt_timeo = QLCNIC_DEV_NPAR_OPER_TIMEO;
922 u32 npar_state;
923
924 if (adapter->op_mode == QLCNIC_MGMT_FUNC)
925 return 0;
926
927 npar_state = QLCRD32(adapter, QLCNIC_CRB_DEV_NPAR_STATE);
928 while (npar_state != QLCNIC_DEV_NPAR_OPER && --npar_opt_timeo) {
929 msleep(1000);
930 npar_state = QLCRD32(adapter, QLCNIC_CRB_DEV_NPAR_STATE);
931 }
932 if (!npar_opt_timeo) {
933 dev_err(&adapter->pdev->dev,
934 "Waiting for NPAR state to opertional timeout\n");
935 return -EIO;
936 }
937 return 0;
938}
939
af19b491
AKS
940static int
941qlcnic_start_firmware(struct qlcnic_adapter *adapter)
942{
d4066833 943 int err;
af19b491 944
aa5e18c0
SC
945 err = qlcnic_can_start_firmware(adapter);
946 if (err < 0)
947 return err;
948 else if (!err)
d4066833 949 goto check_fw_status;
af19b491 950
4d5bdb38
AKS
951 if (load_fw_file)
952 qlcnic_request_firmware(adapter);
8f891387 953 else {
954 if (qlcnic_check_flash_fw_ver(adapter))
955 goto err_out;
956
4d5bdb38 957 adapter->fw_type = QLCNIC_FLASH_ROMIMAGE;
8f891387 958 }
af19b491
AKS
959
960 err = qlcnic_need_fw_reset(adapter);
af19b491 961 if (err == 0)
d4066833 962 goto set_dev_ready;
af19b491 963
d4066833
SC
964 err = qlcnic_pinit_from_rom(adapter);
965 if (err)
966 goto err_out;
af19b491
AKS
967 qlcnic_set_port_mode(adapter);
968
969 err = qlcnic_load_firmware(adapter);
970 if (err)
971 goto err_out;
972
973 qlcnic_release_firmware(adapter);
d4066833 974 QLCWR32(adapter, CRB_DRIVER_VERSION, QLCNIC_DRIVER_VERSION);
af19b491 975
d4066833
SC
976check_fw_status:
977 err = qlcnic_check_fw_status(adapter);
af19b491
AKS
978 if (err)
979 goto err_out;
980
d4066833 981set_dev_ready:
af19b491 982 QLCWR32(adapter, QLCNIC_CRB_DEV_STATE, QLCNIC_DEV_READY);
6df900e9 983 qlcnic_idc_debug_info(adapter, 1);
78f84e1a
AKS
984 err = qlcnic_check_npar_opertional(adapter);
985 if (err) {
986 qlcnic_release_firmware(adapter);
987 return err;
988 }
0325d69b
RB
989 if (qlcnic_set_default_offload_settings(adapter))
990 goto err_out;
cea8975e
AC
991 if (qlcnic_reset_npar_config(adapter))
992 goto err_out;
993 qlcnic_dev_set_npar_ready(adapter);
4e8acb01 994 qlcnic_check_options(adapter);
af19b491
AKS
995 adapter->need_fw_reset = 0;
996
a7fc948f
AKS
997 qlcnic_release_firmware(adapter);
998 return 0;
af19b491
AKS
999
1000err_out:
a7fc948f
AKS
1001 QLCWR32(adapter, QLCNIC_CRB_DEV_STATE, QLCNIC_DEV_FAILED);
1002 dev_err(&adapter->pdev->dev, "Device state set to failed\n");
af19b491
AKS
1003 qlcnic_release_firmware(adapter);
1004 return err;
1005}
1006
1007static int
1008qlcnic_request_irq(struct qlcnic_adapter *adapter)
1009{
1010 irq_handler_t handler;
1011 struct qlcnic_host_sds_ring *sds_ring;
1012 int err, ring;
1013
1014 unsigned long flags = 0;
1015 struct net_device *netdev = adapter->netdev;
1016 struct qlcnic_recv_context *recv_ctx = &adapter->recv_ctx;
1017
7eb9855d
AKS
1018 if (adapter->diag_test == QLCNIC_INTERRUPT_TEST) {
1019 handler = qlcnic_tmp_intr;
1020 if (!QLCNIC_IS_MSI_FAMILY(adapter))
1021 flags |= IRQF_SHARED;
1022
1023 } else {
1024 if (adapter->flags & QLCNIC_MSIX_ENABLED)
1025 handler = qlcnic_msix_intr;
1026 else if (adapter->flags & QLCNIC_MSI_ENABLED)
1027 handler = qlcnic_msi_intr;
1028 else {
1029 flags |= IRQF_SHARED;
1030 handler = qlcnic_intr;
1031 }
af19b491
AKS
1032 }
1033 adapter->irq = netdev->irq;
1034
1035 for (ring = 0; ring < adapter->max_sds_rings; ring++) {
1036 sds_ring = &recv_ctx->sds_rings[ring];
1037 sprintf(sds_ring->name, "%s[%d]", netdev->name, ring);
1038 err = request_irq(sds_ring->irq, handler,
1039 flags, sds_ring->name, sds_ring);
1040 if (err)
1041 return err;
1042 }
1043
1044 return 0;
1045}
1046
1047static void
1048qlcnic_free_irq(struct qlcnic_adapter *adapter)
1049{
1050 int ring;
1051 struct qlcnic_host_sds_ring *sds_ring;
1052
1053 struct qlcnic_recv_context *recv_ctx = &adapter->recv_ctx;
1054
1055 for (ring = 0; ring < adapter->max_sds_rings; ring++) {
1056 sds_ring = &recv_ctx->sds_rings[ring];
1057 free_irq(sds_ring->irq, sds_ring);
1058 }
1059}
1060
1061static void
1062qlcnic_init_coalesce_defaults(struct qlcnic_adapter *adapter)
1063{
1064 adapter->coal.flags = QLCNIC_INTR_DEFAULT;
1065 adapter->coal.normal.data.rx_time_us =
1066 QLCNIC_DEFAULT_INTR_COALESCE_RX_TIME_US;
1067 adapter->coal.normal.data.rx_packets =
1068 QLCNIC_DEFAULT_INTR_COALESCE_RX_PACKETS;
1069 adapter->coal.normal.data.tx_time_us =
1070 QLCNIC_DEFAULT_INTR_COALESCE_TX_TIME_US;
1071 adapter->coal.normal.data.tx_packets =
1072 QLCNIC_DEFAULT_INTR_COALESCE_TX_PACKETS;
1073}
1074
1075static int
1076__qlcnic_up(struct qlcnic_adapter *adapter, struct net_device *netdev)
1077{
8a15ad1f
AKS
1078 int ring;
1079 struct qlcnic_host_rds_ring *rds_ring;
1080
af19b491
AKS
1081 if (adapter->is_up != QLCNIC_ADAPTER_UP_MAGIC)
1082 return -EIO;
1083
8a15ad1f
AKS
1084 if (test_bit(__QLCNIC_DEV_UP, &adapter->state))
1085 return 0;
0325d69b
RB
1086 if (qlcnic_set_eswitch_port_config(adapter))
1087 return -EIO;
8a15ad1f
AKS
1088
1089 if (qlcnic_fw_create_ctx(adapter))
1090 return -EIO;
1091
1092 for (ring = 0; ring < adapter->max_rds_rings; ring++) {
1093 rds_ring = &adapter->recv_ctx.rds_rings[ring];
1094 qlcnic_post_rx_buffers(adapter, ring, rds_ring);
1095 }
1096
af19b491
AKS
1097 qlcnic_set_multi(netdev);
1098 qlcnic_fw_cmd_set_mtu(adapter, netdev->mtu);
1099
1100 adapter->ahw.linkup = 0;
1101
1102 if (adapter->max_sds_rings > 1)
1103 qlcnic_config_rss(adapter, 1);
1104
1105 qlcnic_config_intr_coalesce(adapter);
1106
24763d80 1107 if (netdev->features & NETIF_F_LRO)
af19b491
AKS
1108 qlcnic_config_hw_lro(adapter, QLCNIC_LRO_ENABLED);
1109
1110 qlcnic_napi_enable(adapter);
1111
1112 qlcnic_linkevent_request(adapter, 1);
1113
68bf1c68 1114 adapter->reset_context = 0;
af19b491
AKS
1115 set_bit(__QLCNIC_DEV_UP, &adapter->state);
1116 return 0;
1117}
1118
1119/* Usage: During resume and firmware recovery module.*/
1120
1121static int
1122qlcnic_up(struct qlcnic_adapter *adapter, struct net_device *netdev)
1123{
1124 int err = 0;
1125
1126 rtnl_lock();
1127 if (netif_running(netdev))
1128 err = __qlcnic_up(adapter, netdev);
1129 rtnl_unlock();
1130
1131 return err;
1132}
1133
1134static void
1135__qlcnic_down(struct qlcnic_adapter *adapter, struct net_device *netdev)
1136{
1137 if (adapter->is_up != QLCNIC_ADAPTER_UP_MAGIC)
1138 return;
1139
1140 if (!test_and_clear_bit(__QLCNIC_DEV_UP, &adapter->state))
1141 return;
1142
1143 smp_mb();
1144 spin_lock(&adapter->tx_clean_lock);
1145 netif_carrier_off(netdev);
1146 netif_tx_disable(netdev);
1147
1148 qlcnic_free_mac_list(adapter);
1149
1150 qlcnic_nic_set_promisc(adapter, QLCNIC_NIU_NON_PROMISC_MODE);
1151
1152 qlcnic_napi_disable(adapter);
1153
8a15ad1f
AKS
1154 qlcnic_fw_destroy_ctx(adapter);
1155
1156 qlcnic_reset_rx_buffers_list(adapter);
af19b491
AKS
1157 qlcnic_release_tx_buffers(adapter);
1158 spin_unlock(&adapter->tx_clean_lock);
1159}
1160
1161/* Usage: During suspend and firmware recovery module */
1162
1163static void
1164qlcnic_down(struct qlcnic_adapter *adapter, struct net_device *netdev)
1165{
1166 rtnl_lock();
1167 if (netif_running(netdev))
1168 __qlcnic_down(adapter, netdev);
1169 rtnl_unlock();
1170
1171}
1172
1173static int
1174qlcnic_attach(struct qlcnic_adapter *adapter)
1175{
1176 struct net_device *netdev = adapter->netdev;
1177 struct pci_dev *pdev = adapter->pdev;
8a15ad1f 1178 int err;
af19b491
AKS
1179
1180 if (adapter->is_up == QLCNIC_ADAPTER_UP_MAGIC)
1181 return 0;
1182
af19b491
AKS
1183 err = qlcnic_napi_add(adapter, netdev);
1184 if (err)
1185 return err;
1186
1187 err = qlcnic_alloc_sw_resources(adapter);
1188 if (err) {
1189 dev_err(&pdev->dev, "Error in setting sw resources\n");
8a15ad1f 1190 goto err_out_napi_del;
af19b491
AKS
1191 }
1192
1193 err = qlcnic_alloc_hw_resources(adapter);
1194 if (err) {
1195 dev_err(&pdev->dev, "Error in setting hw resources\n");
1196 goto err_out_free_sw;
1197 }
1198
af19b491
AKS
1199 err = qlcnic_request_irq(adapter);
1200 if (err) {
1201 dev_err(&pdev->dev, "failed to setup interrupt\n");
8a15ad1f 1202 goto err_out_free_hw;
af19b491
AKS
1203 }
1204
1205 qlcnic_init_coalesce_defaults(adapter);
1206
1207 qlcnic_create_sysfs_entries(adapter);
1208
1209 adapter->is_up = QLCNIC_ADAPTER_UP_MAGIC;
1210 return 0;
1211
8a15ad1f 1212err_out_free_hw:
af19b491
AKS
1213 qlcnic_free_hw_resources(adapter);
1214err_out_free_sw:
1215 qlcnic_free_sw_resources(adapter);
8a15ad1f
AKS
1216err_out_napi_del:
1217 qlcnic_napi_del(adapter);
af19b491
AKS
1218 return err;
1219}
1220
1221static void
1222qlcnic_detach(struct qlcnic_adapter *adapter)
1223{
1224 if (adapter->is_up != QLCNIC_ADAPTER_UP_MAGIC)
1225 return;
1226
1227 qlcnic_remove_sysfs_entries(adapter);
1228
1229 qlcnic_free_hw_resources(adapter);
1230 qlcnic_release_rx_buffers(adapter);
1231 qlcnic_free_irq(adapter);
1232 qlcnic_napi_del(adapter);
1233 qlcnic_free_sw_resources(adapter);
1234
1235 adapter->is_up = 0;
1236}
1237
7eb9855d
AKS
1238void qlcnic_diag_free_res(struct net_device *netdev, int max_sds_rings)
1239{
1240 struct qlcnic_adapter *adapter = netdev_priv(netdev);
1241 struct qlcnic_host_sds_ring *sds_ring;
1242 int ring;
1243
78ad3892 1244 clear_bit(__QLCNIC_DEV_UP, &adapter->state);
cdaff185
AKS
1245 if (adapter->diag_test == QLCNIC_INTERRUPT_TEST) {
1246 for (ring = 0; ring < adapter->max_sds_rings; ring++) {
1247 sds_ring = &adapter->recv_ctx.sds_rings[ring];
1248 qlcnic_disable_int(sds_ring);
1249 }
7eb9855d
AKS
1250 }
1251
8a15ad1f
AKS
1252 qlcnic_fw_destroy_ctx(adapter);
1253
7eb9855d
AKS
1254 qlcnic_detach(adapter);
1255
1256 adapter->diag_test = 0;
1257 adapter->max_sds_rings = max_sds_rings;
1258
1259 if (qlcnic_attach(adapter))
34ce3626 1260 goto out;
7eb9855d
AKS
1261
1262 if (netif_running(netdev))
1263 __qlcnic_up(adapter, netdev);
34ce3626 1264out:
7eb9855d
AKS
1265 netif_device_attach(netdev);
1266}
1267
1268int qlcnic_diag_alloc_res(struct net_device *netdev, int test)
1269{
1270 struct qlcnic_adapter *adapter = netdev_priv(netdev);
1271 struct qlcnic_host_sds_ring *sds_ring;
8a15ad1f 1272 struct qlcnic_host_rds_ring *rds_ring;
7eb9855d
AKS
1273 int ring;
1274 int ret;
1275
1276 netif_device_detach(netdev);
1277
1278 if (netif_running(netdev))
1279 __qlcnic_down(adapter, netdev);
1280
1281 qlcnic_detach(adapter);
1282
1283 adapter->max_sds_rings = 1;
1284 adapter->diag_test = test;
1285
1286 ret = qlcnic_attach(adapter);
34ce3626
AKS
1287 if (ret) {
1288 netif_device_attach(netdev);
7eb9855d 1289 return ret;
34ce3626 1290 }
7eb9855d 1291
8a15ad1f
AKS
1292 ret = qlcnic_fw_create_ctx(adapter);
1293 if (ret) {
1294 qlcnic_detach(adapter);
57e46248 1295 netif_device_attach(netdev);
8a15ad1f
AKS
1296 return ret;
1297 }
1298
1299 for (ring = 0; ring < adapter->max_rds_rings; ring++) {
1300 rds_ring = &adapter->recv_ctx.rds_rings[ring];
1301 qlcnic_post_rx_buffers(adapter, ring, rds_ring);
1302 }
1303
cdaff185
AKS
1304 if (adapter->diag_test == QLCNIC_INTERRUPT_TEST) {
1305 for (ring = 0; ring < adapter->max_sds_rings; ring++) {
1306 sds_ring = &adapter->recv_ctx.sds_rings[ring];
1307 qlcnic_enable_int(sds_ring);
1308 }
7eb9855d 1309 }
78ad3892 1310 set_bit(__QLCNIC_DEV_UP, &adapter->state);
7eb9855d
AKS
1311
1312 return 0;
1313}
1314
68bf1c68
AKS
1315/* Reset context in hardware only */
1316static int
1317qlcnic_reset_hw_context(struct qlcnic_adapter *adapter)
1318{
1319 struct net_device *netdev = adapter->netdev;
1320
1321 if (test_and_set_bit(__QLCNIC_RESETTING, &adapter->state))
1322 return -EBUSY;
1323
1324 netif_device_detach(netdev);
1325
1326 qlcnic_down(adapter, netdev);
1327
1328 qlcnic_up(adapter, netdev);
1329
1330 netif_device_attach(netdev);
1331
1332 clear_bit(__QLCNIC_RESETTING, &adapter->state);
1333 return 0;
1334}
1335
af19b491
AKS
1336int
1337qlcnic_reset_context(struct qlcnic_adapter *adapter)
1338{
1339 int err = 0;
1340 struct net_device *netdev = adapter->netdev;
1341
1342 if (test_and_set_bit(__QLCNIC_RESETTING, &adapter->state))
1343 return -EBUSY;
1344
1345 if (adapter->is_up == QLCNIC_ADAPTER_UP_MAGIC) {
1346
1347 netif_device_detach(netdev);
1348
1349 if (netif_running(netdev))
1350 __qlcnic_down(adapter, netdev);
1351
1352 qlcnic_detach(adapter);
1353
1354 if (netif_running(netdev)) {
1355 err = qlcnic_attach(adapter);
1356 if (!err)
34ce3626 1357 __qlcnic_up(adapter, netdev);
af19b491
AKS
1358 }
1359
1360 netif_device_attach(netdev);
1361 }
1362
af19b491
AKS
1363 clear_bit(__QLCNIC_RESETTING, &adapter->state);
1364 return err;
1365}
1366
1367static int
1368qlcnic_setup_netdev(struct qlcnic_adapter *adapter,
1bb09fb9 1369 struct net_device *netdev, u8 pci_using_dac)
af19b491
AKS
1370{
1371 int err;
1372 struct pci_dev *pdev = adapter->pdev;
1373
1374 adapter->rx_csum = 1;
1375 adapter->mc_enabled = 0;
1376 adapter->max_mc_count = 38;
1377
1378 netdev->netdev_ops = &qlcnic_netdev_ops;
ef71ff83 1379 netdev->watchdog_timeo = 5*HZ;
af19b491
AKS
1380
1381 qlcnic_change_mtu(netdev, netdev->mtu);
1382
1383 SET_ETHTOOL_OPS(netdev, &qlcnic_ethtool_ops);
1384
2e9d722d 1385 netdev->features |= (NETIF_F_SG | NETIF_F_IP_CSUM |
ac8d0c4f 1386 NETIF_F_IPV6_CSUM | NETIF_F_GRO);
2e9d722d 1387 netdev->vlan_features |= (NETIF_F_SG | NETIF_F_IP_CSUM |
ac8d0c4f
AC
1388 NETIF_F_IPV6_CSUM);
1389
1390 if (adapter->capabilities & QLCNIC_FW_CAPABILITY_TSO) {
1391 netdev->features |= (NETIF_F_TSO | NETIF_F_TSO6);
1392 netdev->vlan_features |= (NETIF_F_TSO | NETIF_F_TSO6);
1393 }
af19b491 1394
1bb09fb9 1395 if (pci_using_dac) {
af19b491
AKS
1396 netdev->features |= NETIF_F_HIGHDMA;
1397 netdev->vlan_features |= NETIF_F_HIGHDMA;
1398 }
1399
1400 if (adapter->capabilities & QLCNIC_FW_CAPABILITY_FVLANTX)
1401 netdev->features |= (NETIF_F_HW_VLAN_TX);
1402
1403 if (adapter->capabilities & QLCNIC_FW_CAPABILITY_HW_LRO)
1404 netdev->features |= NETIF_F_LRO;
af19b491
AKS
1405 netdev->irq = adapter->msix_entries[0].vector;
1406
af19b491
AKS
1407 if (qlcnic_read_mac_addr(adapter))
1408 dev_warn(&pdev->dev, "failed to read mac addr\n");
1409
1410 netif_carrier_off(netdev);
1411 netif_stop_queue(netdev);
1412
1413 err = register_netdev(netdev);
1414 if (err) {
1415 dev_err(&pdev->dev, "failed to register net device\n");
1416 return err;
1417 }
1418
1419 return 0;
1420}
1421
1bb09fb9
AKS
1422static int qlcnic_set_dma_mask(struct pci_dev *pdev, u8 *pci_using_dac)
1423{
1424 if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) &&
1425 !pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)))
1426 *pci_using_dac = 1;
1427 else if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(32)) &&
1428 !pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32)))
1429 *pci_using_dac = 0;
1430 else {
1431 dev_err(&pdev->dev, "Unable to set DMA mask, aborting\n");
1432 return -EIO;
1433 }
1434
1435 return 0;
1436}
1437
af19b491
AKS
1438static int __devinit
1439qlcnic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
1440{
1441 struct net_device *netdev = NULL;
1442 struct qlcnic_adapter *adapter = NULL;
1443 int err;
af19b491 1444 uint8_t revision_id;
1bb09fb9 1445 uint8_t pci_using_dac;
af19b491
AKS
1446
1447 err = pci_enable_device(pdev);
1448 if (err)
1449 return err;
1450
1451 if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
1452 err = -ENODEV;
1453 goto err_out_disable_pdev;
1454 }
1455
1bb09fb9
AKS
1456 err = qlcnic_set_dma_mask(pdev, &pci_using_dac);
1457 if (err)
1458 goto err_out_disable_pdev;
1459
af19b491
AKS
1460 err = pci_request_regions(pdev, qlcnic_driver_name);
1461 if (err)
1462 goto err_out_disable_pdev;
1463
1464 pci_set_master(pdev);
451724c8 1465 pci_enable_pcie_error_reporting(pdev);
af19b491
AKS
1466
1467 netdev = alloc_etherdev(sizeof(struct qlcnic_adapter));
1468 if (!netdev) {
1469 dev_err(&pdev->dev, "failed to allocate net_device\n");
1470 err = -ENOMEM;
1471 goto err_out_free_res;
1472 }
1473
1474 SET_NETDEV_DEV(netdev, &pdev->dev);
1475
1476 adapter = netdev_priv(netdev);
1477 adapter->netdev = netdev;
1478 adapter->pdev = pdev;
6df900e9 1479 adapter->dev_rst_time = jiffies;
af19b491
AKS
1480
1481 revision_id = pdev->revision;
1482 adapter->ahw.revision_id = revision_id;
1483
1484 rwlock_init(&adapter->ahw.crb_lock);
1485 mutex_init(&adapter->ahw.mem_lock);
1486
1487 spin_lock_init(&adapter->tx_clean_lock);
1488 INIT_LIST_HEAD(&adapter->mac_list);
1489
1490 err = qlcnic_setup_pci_map(adapter);
1491 if (err)
1492 goto err_out_free_netdev;
1493
1494 /* This will be reset for mezz cards */
2e9d722d 1495 adapter->portnum = adapter->ahw.pci_func;
af19b491
AKS
1496
1497 err = qlcnic_get_board_info(adapter);
1498 if (err) {
1499 dev_err(&pdev->dev, "Error getting board config info.\n");
1500 goto err_out_iounmap;
1501 }
1502
02f6e46f
SC
1503 if (qlcnic_read_mac_addr(adapter))
1504 dev_warn(&pdev->dev, "failed to read mac addr\n");
1505
b3a24649
SC
1506 if (qlcnic_setup_idc_param(adapter))
1507 goto err_out_iounmap;
af19b491 1508
9f26f547 1509 err = adapter->nic_ops->start_firmware(adapter);
a7fc948f
AKS
1510 if (err) {
1511 dev_err(&pdev->dev, "Loading fw failed.Please Reboot\n");
af19b491 1512 goto err_out_decr_ref;
a7fc948f 1513 }
af19b491 1514
af19b491
AKS
1515 qlcnic_clear_stats(adapter);
1516
1517 qlcnic_setup_intr(adapter);
1518
1bb09fb9 1519 err = qlcnic_setup_netdev(adapter, netdev, pci_using_dac);
af19b491
AKS
1520 if (err)
1521 goto err_out_disable_msi;
1522
1523 pci_set_drvdata(pdev, adapter);
1524
1525 qlcnic_schedule_work(adapter, qlcnic_fw_poll_work, FW_POLL_DELAY);
1526
1527 switch (adapter->ahw.port_type) {
1528 case QLCNIC_GBE:
1529 dev_info(&adapter->pdev->dev, "%s: GbE port initialized\n",
1530 adapter->netdev->name);
1531 break;
1532 case QLCNIC_XGBE:
1533 dev_info(&adapter->pdev->dev, "%s: XGbE port initialized\n",
1534 adapter->netdev->name);
1535 break;
1536 }
1537
1538 qlcnic_create_diag_entries(adapter);
1539
1540 return 0;
1541
1542err_out_disable_msi:
1543 qlcnic_teardown_intr(adapter);
1544
1545err_out_decr_ref:
21854f02 1546 qlcnic_clr_all_drv_state(adapter, 0);
af19b491
AKS
1547
1548err_out_iounmap:
1549 qlcnic_cleanup_pci_map(adapter);
1550
1551err_out_free_netdev:
1552 free_netdev(netdev);
1553
1554err_out_free_res:
1555 pci_release_regions(pdev);
1556
1557err_out_disable_pdev:
1558 pci_set_drvdata(pdev, NULL);
1559 pci_disable_device(pdev);
1560 return err;
1561}
1562
1563static void __devexit qlcnic_remove(struct pci_dev *pdev)
1564{
1565 struct qlcnic_adapter *adapter;
1566 struct net_device *netdev;
1567
1568 adapter = pci_get_drvdata(pdev);
1569 if (adapter == NULL)
1570 return;
1571
1572 netdev = adapter->netdev;
1573
1574 qlcnic_cancel_fw_work(adapter);
1575
1576 unregister_netdev(netdev);
1577
af19b491
AKS
1578 qlcnic_detach(adapter);
1579
2e9d722d
AC
1580 if (adapter->npars != NULL)
1581 kfree(adapter->npars);
1582 if (adapter->eswitch != NULL)
1583 kfree(adapter->eswitch);
1584
21854f02 1585 qlcnic_clr_all_drv_state(adapter, 0);
af19b491
AKS
1586
1587 clear_bit(__QLCNIC_RESETTING, &adapter->state);
1588
1589 qlcnic_teardown_intr(adapter);
1590
1591 qlcnic_remove_diag_entries(adapter);
1592
1593 qlcnic_cleanup_pci_map(adapter);
1594
1595 qlcnic_release_firmware(adapter);
1596
451724c8 1597 pci_disable_pcie_error_reporting(pdev);
af19b491
AKS
1598 pci_release_regions(pdev);
1599 pci_disable_device(pdev);
1600 pci_set_drvdata(pdev, NULL);
1601
1602 free_netdev(netdev);
1603}
1604static int __qlcnic_shutdown(struct pci_dev *pdev)
1605{
1606 struct qlcnic_adapter *adapter = pci_get_drvdata(pdev);
1607 struct net_device *netdev = adapter->netdev;
1608 int retval;
1609
1610 netif_device_detach(netdev);
1611
1612 qlcnic_cancel_fw_work(adapter);
1613
1614 if (netif_running(netdev))
1615 qlcnic_down(adapter, netdev);
1616
21854f02 1617 qlcnic_clr_all_drv_state(adapter, 0);
af19b491
AKS
1618
1619 clear_bit(__QLCNIC_RESETTING, &adapter->state);
1620
1621 retval = pci_save_state(pdev);
1622 if (retval)
1623 return retval;
1624
1625 if (qlcnic_wol_supported(adapter)) {
1626 pci_enable_wake(pdev, PCI_D3cold, 1);
1627 pci_enable_wake(pdev, PCI_D3hot, 1);
1628 }
1629
1630 return 0;
1631}
1632
1633static void qlcnic_shutdown(struct pci_dev *pdev)
1634{
1635 if (__qlcnic_shutdown(pdev))
1636 return;
1637
1638 pci_disable_device(pdev);
1639}
1640
1641#ifdef CONFIG_PM
1642static int
1643qlcnic_suspend(struct pci_dev *pdev, pm_message_t state)
1644{
1645 int retval;
1646
1647 retval = __qlcnic_shutdown(pdev);
1648 if (retval)
1649 return retval;
1650
1651 pci_set_power_state(pdev, pci_choose_state(pdev, state));
1652 return 0;
1653}
1654
1655static int
1656qlcnic_resume(struct pci_dev *pdev)
1657{
1658 struct qlcnic_adapter *adapter = pci_get_drvdata(pdev);
1659 struct net_device *netdev = adapter->netdev;
1660 int err;
1661
1662 err = pci_enable_device(pdev);
1663 if (err)
1664 return err;
1665
1666 pci_set_power_state(pdev, PCI_D0);
1667 pci_set_master(pdev);
1668 pci_restore_state(pdev);
1669
9f26f547 1670 err = adapter->nic_ops->start_firmware(adapter);
af19b491
AKS
1671 if (err) {
1672 dev_err(&pdev->dev, "failed to start firmware\n");
1673 return err;
1674 }
1675
1676 if (netif_running(netdev)) {
af19b491
AKS
1677 err = qlcnic_up(adapter, netdev);
1678 if (err)
52486a3a 1679 goto done;
af19b491
AKS
1680
1681 qlcnic_config_indev_addr(netdev, NETDEV_UP);
1682 }
52486a3a 1683done:
af19b491
AKS
1684 netif_device_attach(netdev);
1685 qlcnic_schedule_work(adapter, qlcnic_fw_poll_work, FW_POLL_DELAY);
1686 return 0;
af19b491
AKS
1687}
1688#endif
1689
1690static int qlcnic_open(struct net_device *netdev)
1691{
1692 struct qlcnic_adapter *adapter = netdev_priv(netdev);
1693 int err;
1694
af19b491
AKS
1695 err = qlcnic_attach(adapter);
1696 if (err)
1697 return err;
1698
1699 err = __qlcnic_up(adapter, netdev);
1700 if (err)
1701 goto err_out;
1702
1703 netif_start_queue(netdev);
1704
1705 return 0;
1706
1707err_out:
1708 qlcnic_detach(adapter);
1709 return err;
1710}
1711
1712/*
1713 * qlcnic_close - Disables a network interface entry point
1714 */
1715static int qlcnic_close(struct net_device *netdev)
1716{
1717 struct qlcnic_adapter *adapter = netdev_priv(netdev);
1718
1719 __qlcnic_down(adapter, netdev);
1720 return 0;
1721}
1722
1723static void
1724qlcnic_tso_check(struct net_device *netdev,
1725 struct qlcnic_host_tx_ring *tx_ring,
1726 struct cmd_desc_type0 *first_desc,
1727 struct sk_buff *skb)
1728{
1729 u8 opcode = TX_ETHER_PKT;
1730 __be16 protocol = skb->protocol;
1731 u16 flags = 0, vid = 0;
af19b491
AKS
1732 int copied, offset, copy_len, hdr_len = 0, tso = 0, vlan_oob = 0;
1733 struct cmd_desc_type0 *hwdesc;
1734 struct vlan_ethhdr *vh;
8bfe8b91 1735 struct qlcnic_adapter *adapter = netdev_priv(netdev);
2e9d722d 1736 u32 producer = tx_ring->producer;
af19b491
AKS
1737
1738 if (protocol == cpu_to_be16(ETH_P_8021Q)) {
1739
1740 vh = (struct vlan_ethhdr *)skb->data;
1741 protocol = vh->h_vlan_encapsulated_proto;
1742 flags = FLAGS_VLAN_TAGGED;
1743
1744 } else if (vlan_tx_tag_present(skb)) {
1745
1746 flags = FLAGS_VLAN_OOB;
1747 vid = vlan_tx_tag_get(skb);
1748 qlcnic_set_tx_vlan_tci(first_desc, vid);
1749 vlan_oob = 1;
1750 }
1751
2e9d722d
AC
1752 if (*(skb->data) & BIT_0) {
1753 flags |= BIT_0;
1754 memcpy(&first_desc->eth_addr, skb->data, ETH_ALEN);
1755 }
1756
af19b491
AKS
1757 if ((netdev->features & (NETIF_F_TSO | NETIF_F_TSO6)) &&
1758 skb_shinfo(skb)->gso_size > 0) {
1759
1760 hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
1761
1762 first_desc->mss = cpu_to_le16(skb_shinfo(skb)->gso_size);
1763 first_desc->total_hdr_length = hdr_len;
1764 if (vlan_oob) {
1765 first_desc->total_hdr_length += VLAN_HLEN;
1766 first_desc->tcp_hdr_offset = VLAN_HLEN;
1767 first_desc->ip_hdr_offset = VLAN_HLEN;
1768 /* Only in case of TSO on vlan device */
1769 flags |= FLAGS_VLAN_TAGGED;
1770 }
1771
1772 opcode = (protocol == cpu_to_be16(ETH_P_IPV6)) ?
1773 TX_TCP_LSO6 : TX_TCP_LSO;
1774 tso = 1;
1775
1776 } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
1777 u8 l4proto;
1778
1779 if (protocol == cpu_to_be16(ETH_P_IP)) {
1780 l4proto = ip_hdr(skb)->protocol;
1781
1782 if (l4proto == IPPROTO_TCP)
1783 opcode = TX_TCP_PKT;
1784 else if (l4proto == IPPROTO_UDP)
1785 opcode = TX_UDP_PKT;
1786 } else if (protocol == cpu_to_be16(ETH_P_IPV6)) {
1787 l4proto = ipv6_hdr(skb)->nexthdr;
1788
1789 if (l4proto == IPPROTO_TCP)
1790 opcode = TX_TCPV6_PKT;
1791 else if (l4proto == IPPROTO_UDP)
1792 opcode = TX_UDPV6_PKT;
1793 }
1794 }
1795
1796 first_desc->tcp_hdr_offset += skb_transport_offset(skb);
1797 first_desc->ip_hdr_offset += skb_network_offset(skb);
1798 qlcnic_set_tx_flags_opcode(first_desc, flags, opcode);
1799
1800 if (!tso)
1801 return;
1802
1803 /* For LSO, we need to copy the MAC/IP/TCP headers into
1804 * the descriptor ring
1805 */
af19b491
AKS
1806 copied = 0;
1807 offset = 2;
1808
1809 if (vlan_oob) {
1810 /* Create a TSO vlan header template for firmware */
1811
1812 hwdesc = &tx_ring->desc_head[producer];
1813 tx_ring->cmd_buf_arr[producer].skb = NULL;
1814
1815 copy_len = min((int)sizeof(struct cmd_desc_type0) - offset,
1816 hdr_len + VLAN_HLEN);
1817
1818 vh = (struct vlan_ethhdr *)((char *)hwdesc + 2);
1819 skb_copy_from_linear_data(skb, vh, 12);
1820 vh->h_vlan_proto = htons(ETH_P_8021Q);
1821 vh->h_vlan_TCI = htons(vid);
1822 skb_copy_from_linear_data_offset(skb, 12,
1823 (char *)vh + 16, copy_len - 16);
1824
1825 copied = copy_len - VLAN_HLEN;
1826 offset = 0;
1827
1828 producer = get_next_index(producer, tx_ring->num_desc);
1829 }
1830
1831 while (copied < hdr_len) {
1832
1833 copy_len = min((int)sizeof(struct cmd_desc_type0) - offset,
1834 (hdr_len - copied));
1835
1836 hwdesc = &tx_ring->desc_head[producer];
1837 tx_ring->cmd_buf_arr[producer].skb = NULL;
1838
1839 skb_copy_from_linear_data_offset(skb, copied,
1840 (char *)hwdesc + offset, copy_len);
1841
1842 copied += copy_len;
1843 offset = 0;
1844
1845 producer = get_next_index(producer, tx_ring->num_desc);
1846 }
1847
1848 tx_ring->producer = producer;
1849 barrier();
8bfe8b91 1850 adapter->stats.lso_frames++;
af19b491
AKS
1851}
1852
1853static int
1854qlcnic_map_tx_skb(struct pci_dev *pdev,
1855 struct sk_buff *skb, struct qlcnic_cmd_buffer *pbuf)
1856{
1857 struct qlcnic_skb_frag *nf;
1858 struct skb_frag_struct *frag;
1859 int i, nr_frags;
1860 dma_addr_t map;
1861
1862 nr_frags = skb_shinfo(skb)->nr_frags;
1863 nf = &pbuf->frag_array[0];
1864
1865 map = pci_map_single(pdev, skb->data,
1866 skb_headlen(skb), PCI_DMA_TODEVICE);
1867 if (pci_dma_mapping_error(pdev, map))
1868 goto out_err;
1869
1870 nf->dma = map;
1871 nf->length = skb_headlen(skb);
1872
1873 for (i = 0; i < nr_frags; i++) {
1874 frag = &skb_shinfo(skb)->frags[i];
1875 nf = &pbuf->frag_array[i+1];
1876
1877 map = pci_map_page(pdev, frag->page, frag->page_offset,
1878 frag->size, PCI_DMA_TODEVICE);
1879 if (pci_dma_mapping_error(pdev, map))
1880 goto unwind;
1881
1882 nf->dma = map;
1883 nf->length = frag->size;
1884 }
1885
1886 return 0;
1887
1888unwind:
1889 while (--i >= 0) {
1890 nf = &pbuf->frag_array[i+1];
1891 pci_unmap_page(pdev, nf->dma, nf->length, PCI_DMA_TODEVICE);
1892 }
1893
1894 nf = &pbuf->frag_array[0];
1895 pci_unmap_single(pdev, nf->dma, skb_headlen(skb), PCI_DMA_TODEVICE);
1896
1897out_err:
1898 return -ENOMEM;
1899}
1900
1901static inline void
1902qlcnic_clear_cmddesc(u64 *desc)
1903{
1904 desc[0] = 0ULL;
1905 desc[2] = 0ULL;
1906}
1907
cdaff185 1908netdev_tx_t
af19b491
AKS
1909qlcnic_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
1910{
1911 struct qlcnic_adapter *adapter = netdev_priv(netdev);
1912 struct qlcnic_host_tx_ring *tx_ring = adapter->tx_ring;
1913 struct qlcnic_cmd_buffer *pbuf;
1914 struct qlcnic_skb_frag *buffrag;
1915 struct cmd_desc_type0 *hwdesc, *first_desc;
1916 struct pci_dev *pdev;
1917 int i, k;
1918
1919 u32 producer;
1920 int frag_count, no_of_desc;
1921 u32 num_txd = tx_ring->num_desc;
1922
780ab790
AKS
1923 if (!test_bit(__QLCNIC_DEV_UP, &adapter->state)) {
1924 netif_stop_queue(netdev);
1925 return NETDEV_TX_BUSY;
1926 }
1927
fe4d434d
SC
1928 if (adapter->flags & QLCNIC_MACSPOOF) {
1929 if (compare_ether_addr(eth_hdr(skb)->h_source,
1930 adapter->mac_addr))
1931 goto drop_packet;
1932 }
1933
af19b491
AKS
1934 frag_count = skb_shinfo(skb)->nr_frags + 1;
1935
1936 /* 4 fragments per cmd des */
1937 no_of_desc = (frag_count + 3) >> 2;
1938
ef71ff83 1939 if (unlikely(qlcnic_tx_avail(tx_ring) <= TX_STOP_THRESH)) {
af19b491 1940 netif_stop_queue(netdev);
ef71ff83
RB
1941 smp_mb();
1942 if (qlcnic_tx_avail(tx_ring) > TX_STOP_THRESH)
1943 netif_start_queue(netdev);
1944 else {
1945 adapter->stats.xmit_off++;
1946 return NETDEV_TX_BUSY;
1947 }
af19b491
AKS
1948 }
1949
1950 producer = tx_ring->producer;
1951 pbuf = &tx_ring->cmd_buf_arr[producer];
1952
1953 pdev = adapter->pdev;
1954
8ae6df97
AKS
1955 if (qlcnic_map_tx_skb(pdev, skb, pbuf)) {
1956 adapter->stats.tx_dma_map_error++;
af19b491 1957 goto drop_packet;
8ae6df97 1958 }
af19b491
AKS
1959
1960 pbuf->skb = skb;
1961 pbuf->frag_count = frag_count;
1962
1963 first_desc = hwdesc = &tx_ring->desc_head[producer];
1964 qlcnic_clear_cmddesc((u64 *)hwdesc);
1965
1966 qlcnic_set_tx_frags_len(first_desc, frag_count, skb->len);
1967 qlcnic_set_tx_port(first_desc, adapter->portnum);
1968
1969 for (i = 0; i < frag_count; i++) {
1970
1971 k = i % 4;
1972
1973 if ((k == 0) && (i > 0)) {
1974 /* move to next desc.*/
1975 producer = get_next_index(producer, num_txd);
1976 hwdesc = &tx_ring->desc_head[producer];
1977 qlcnic_clear_cmddesc((u64 *)hwdesc);
1978 tx_ring->cmd_buf_arr[producer].skb = NULL;
1979 }
1980
1981 buffrag = &pbuf->frag_array[i];
1982
1983 hwdesc->buffer_length[k] = cpu_to_le16(buffrag->length);
1984 switch (k) {
1985 case 0:
1986 hwdesc->addr_buffer1 = cpu_to_le64(buffrag->dma);
1987 break;
1988 case 1:
1989 hwdesc->addr_buffer2 = cpu_to_le64(buffrag->dma);
1990 break;
1991 case 2:
1992 hwdesc->addr_buffer3 = cpu_to_le64(buffrag->dma);
1993 break;
1994 case 3:
1995 hwdesc->addr_buffer4 = cpu_to_le64(buffrag->dma);
1996 break;
1997 }
1998 }
1999
2000 tx_ring->producer = get_next_index(producer, num_txd);
2001
2002 qlcnic_tso_check(netdev, tx_ring, first_desc, skb);
2003
2004 qlcnic_update_cmd_producer(adapter, tx_ring);
2005
2006 adapter->stats.txbytes += skb->len;
2007 adapter->stats.xmitcalled++;
2008
2009 return NETDEV_TX_OK;
2010
2011drop_packet:
2012 adapter->stats.txdropped++;
2013 dev_kfree_skb_any(skb);
2014 return NETDEV_TX_OK;
2015}
2016
2017static int qlcnic_check_temp(struct qlcnic_adapter *adapter)
2018{
2019 struct net_device *netdev = adapter->netdev;
2020 u32 temp, temp_state, temp_val;
2021 int rv = 0;
2022
2023 temp = QLCRD32(adapter, CRB_TEMP_STATE);
2024
2025 temp_state = qlcnic_get_temp_state(temp);
2026 temp_val = qlcnic_get_temp_val(temp);
2027
2028 if (temp_state == QLCNIC_TEMP_PANIC) {
2029 dev_err(&netdev->dev,
2030 "Device temperature %d degrees C exceeds"
2031 " maximum allowed. Hardware has been shut down.\n",
2032 temp_val);
2033 rv = 1;
2034 } else if (temp_state == QLCNIC_TEMP_WARN) {
2035 if (adapter->temp == QLCNIC_TEMP_NORMAL) {
2036 dev_err(&netdev->dev,
2037 "Device temperature %d degrees C "
2038 "exceeds operating range."
2039 " Immediate action needed.\n",
2040 temp_val);
2041 }
2042 } else {
2043 if (adapter->temp == QLCNIC_TEMP_WARN) {
2044 dev_info(&netdev->dev,
2045 "Device temperature is now %d degrees C"
2046 " in normal range.\n", temp_val);
2047 }
2048 }
2049 adapter->temp = temp_state;
2050 return rv;
2051}
2052
2053void qlcnic_advert_link_change(struct qlcnic_adapter *adapter, int linkup)
2054{
2055 struct net_device *netdev = adapter->netdev;
2056
2057 if (adapter->ahw.linkup && !linkup) {
69324275 2058 netdev_info(netdev, "NIC Link is down\n");
af19b491
AKS
2059 adapter->ahw.linkup = 0;
2060 if (netif_running(netdev)) {
2061 netif_carrier_off(netdev);
2062 netif_stop_queue(netdev);
2063 }
2064 } else if (!adapter->ahw.linkup && linkup) {
69324275 2065 netdev_info(netdev, "NIC Link is up\n");
af19b491
AKS
2066 adapter->ahw.linkup = 1;
2067 if (netif_running(netdev)) {
2068 netif_carrier_on(netdev);
2069 netif_wake_queue(netdev);
2070 }
2071 }
2072}
2073
2074static void qlcnic_tx_timeout(struct net_device *netdev)
2075{
2076 struct qlcnic_adapter *adapter = netdev_priv(netdev);
2077
2078 if (test_bit(__QLCNIC_RESETTING, &adapter->state))
2079 return;
2080
2081 dev_err(&netdev->dev, "transmit timeout, resetting.\n");
af19b491
AKS
2082
2083 if (++adapter->tx_timeo_cnt >= QLCNIC_MAX_TX_TIMEOUTS)
68bf1c68
AKS
2084 adapter->need_fw_reset = 1;
2085 else
2086 adapter->reset_context = 1;
af19b491
AKS
2087}
2088
2089static struct net_device_stats *qlcnic_get_stats(struct net_device *netdev)
2090{
2091 struct qlcnic_adapter *adapter = netdev_priv(netdev);
2092 struct net_device_stats *stats = &netdev->stats;
2093
af19b491
AKS
2094 stats->rx_packets = adapter->stats.rx_pkts + adapter->stats.lro_pkts;
2095 stats->tx_packets = adapter->stats.xmitfinished;
7e382594 2096 stats->rx_bytes = adapter->stats.rxbytes + adapter->stats.lrobytes;
af19b491
AKS
2097 stats->tx_bytes = adapter->stats.txbytes;
2098 stats->rx_dropped = adapter->stats.rxdropped;
2099 stats->tx_dropped = adapter->stats.txdropped;
2100
2101 return stats;
2102}
2103
7eb9855d 2104static irqreturn_t qlcnic_clear_legacy_intr(struct qlcnic_adapter *adapter)
af19b491 2105{
af19b491
AKS
2106 u32 status;
2107
2108 status = readl(adapter->isr_int_vec);
2109
2110 if (!(status & adapter->int_vec_bit))
2111 return IRQ_NONE;
2112
2113 /* check interrupt state machine, to be sure */
2114 status = readl(adapter->crb_int_state_reg);
2115 if (!ISR_LEGACY_INT_TRIGGERED(status))
2116 return IRQ_NONE;
2117
2118 writel(0xffffffff, adapter->tgt_status_reg);
2119 /* read twice to ensure write is flushed */
2120 readl(adapter->isr_int_vec);
2121 readl(adapter->isr_int_vec);
2122
7eb9855d
AKS
2123 return IRQ_HANDLED;
2124}
2125
2126static irqreturn_t qlcnic_tmp_intr(int irq, void *data)
2127{
2128 struct qlcnic_host_sds_ring *sds_ring = data;
2129 struct qlcnic_adapter *adapter = sds_ring->adapter;
2130
2131 if (adapter->flags & QLCNIC_MSIX_ENABLED)
2132 goto done;
2133 else if (adapter->flags & QLCNIC_MSI_ENABLED) {
2134 writel(0xffffffff, adapter->tgt_status_reg);
2135 goto done;
2136 }
2137
2138 if (qlcnic_clear_legacy_intr(adapter) == IRQ_NONE)
2139 return IRQ_NONE;
2140
2141done:
2142 adapter->diag_cnt++;
2143 qlcnic_enable_int(sds_ring);
2144 return IRQ_HANDLED;
2145}
2146
2147static irqreturn_t qlcnic_intr(int irq, void *data)
2148{
2149 struct qlcnic_host_sds_ring *sds_ring = data;
2150 struct qlcnic_adapter *adapter = sds_ring->adapter;
2151
2152 if (qlcnic_clear_legacy_intr(adapter) == IRQ_NONE)
2153 return IRQ_NONE;
2154
af19b491
AKS
2155 napi_schedule(&sds_ring->napi);
2156
2157 return IRQ_HANDLED;
2158}
2159
2160static irqreturn_t qlcnic_msi_intr(int irq, void *data)
2161{
2162 struct qlcnic_host_sds_ring *sds_ring = data;
2163 struct qlcnic_adapter *adapter = sds_ring->adapter;
2164
2165 /* clear interrupt */
2166 writel(0xffffffff, adapter->tgt_status_reg);
2167
2168 napi_schedule(&sds_ring->napi);
2169 return IRQ_HANDLED;
2170}
2171
2172static irqreturn_t qlcnic_msix_intr(int irq, void *data)
2173{
2174 struct qlcnic_host_sds_ring *sds_ring = data;
2175
2176 napi_schedule(&sds_ring->napi);
2177 return IRQ_HANDLED;
2178}
2179
2180static int qlcnic_process_cmd_ring(struct qlcnic_adapter *adapter)
2181{
2182 u32 sw_consumer, hw_consumer;
2183 int count = 0, i;
2184 struct qlcnic_cmd_buffer *buffer;
2185 struct pci_dev *pdev = adapter->pdev;
2186 struct net_device *netdev = adapter->netdev;
2187 struct qlcnic_skb_frag *frag;
2188 int done;
2189 struct qlcnic_host_tx_ring *tx_ring = adapter->tx_ring;
2190
2191 if (!spin_trylock(&adapter->tx_clean_lock))
2192 return 1;
2193
2194 sw_consumer = tx_ring->sw_consumer;
2195 hw_consumer = le32_to_cpu(*(tx_ring->hw_consumer));
2196
2197 while (sw_consumer != hw_consumer) {
2198 buffer = &tx_ring->cmd_buf_arr[sw_consumer];
2199 if (buffer->skb) {
2200 frag = &buffer->frag_array[0];
2201 pci_unmap_single(pdev, frag->dma, frag->length,
2202 PCI_DMA_TODEVICE);
2203 frag->dma = 0ULL;
2204 for (i = 1; i < buffer->frag_count; i++) {
2205 frag++;
2206 pci_unmap_page(pdev, frag->dma, frag->length,
2207 PCI_DMA_TODEVICE);
2208 frag->dma = 0ULL;
2209 }
2210
2211 adapter->stats.xmitfinished++;
2212 dev_kfree_skb_any(buffer->skb);
2213 buffer->skb = NULL;
2214 }
2215
2216 sw_consumer = get_next_index(sw_consumer, tx_ring->num_desc);
2217 if (++count >= MAX_STATUS_HANDLE)
2218 break;
2219 }
2220
2221 if (count && netif_running(netdev)) {
2222 tx_ring->sw_consumer = sw_consumer;
2223
2224 smp_mb();
2225
2226 if (netif_queue_stopped(netdev) && netif_carrier_ok(netdev)) {
af19b491
AKS
2227 if (qlcnic_tx_avail(tx_ring) > TX_STOP_THRESH) {
2228 netif_wake_queue(netdev);
8bfe8b91 2229 adapter->stats.xmit_on++;
af19b491 2230 }
af19b491 2231 }
ef71ff83 2232 adapter->tx_timeo_cnt = 0;
af19b491
AKS
2233 }
2234 /*
2235 * If everything is freed up to consumer then check if the ring is full
2236 * If the ring is full then check if more needs to be freed and
2237 * schedule the call back again.
2238 *
2239 * This happens when there are 2 CPUs. One could be freeing and the
2240 * other filling it. If the ring is full when we get out of here and
2241 * the card has already interrupted the host then the host can miss the
2242 * interrupt.
2243 *
2244 * There is still a possible race condition and the host could miss an
2245 * interrupt. The card has to take care of this.
2246 */
2247 hw_consumer = le32_to_cpu(*(tx_ring->hw_consumer));
2248 done = (sw_consumer == hw_consumer);
2249 spin_unlock(&adapter->tx_clean_lock);
2250
2251 return done;
2252}
2253
2254static int qlcnic_poll(struct napi_struct *napi, int budget)
2255{
2256 struct qlcnic_host_sds_ring *sds_ring =
2257 container_of(napi, struct qlcnic_host_sds_ring, napi);
2258
2259 struct qlcnic_adapter *adapter = sds_ring->adapter;
2260
2261 int tx_complete;
2262 int work_done;
2263
2264 tx_complete = qlcnic_process_cmd_ring(adapter);
2265
2266 work_done = qlcnic_process_rcv_ring(sds_ring, budget);
2267
2268 if ((work_done < budget) && tx_complete) {
2269 napi_complete(&sds_ring->napi);
2270 if (test_bit(__QLCNIC_DEV_UP, &adapter->state))
2271 qlcnic_enable_int(sds_ring);
2272 }
2273
2274 return work_done;
2275}
2276
8f891387 2277static int qlcnic_rx_poll(struct napi_struct *napi, int budget)
2278{
2279 struct qlcnic_host_sds_ring *sds_ring =
2280 container_of(napi, struct qlcnic_host_sds_ring, napi);
2281
2282 struct qlcnic_adapter *adapter = sds_ring->adapter;
2283 int work_done;
2284
2285 work_done = qlcnic_process_rcv_ring(sds_ring, budget);
2286
2287 if (work_done < budget) {
2288 napi_complete(&sds_ring->napi);
2289 if (test_bit(__QLCNIC_DEV_UP, &adapter->state))
2290 qlcnic_enable_int(sds_ring);
2291 }
2292
2293 return work_done;
2294}
2295
af19b491
AKS
2296#ifdef CONFIG_NET_POLL_CONTROLLER
2297static void qlcnic_poll_controller(struct net_device *netdev)
2298{
2299 struct qlcnic_adapter *adapter = netdev_priv(netdev);
2300 disable_irq(adapter->irq);
2301 qlcnic_intr(adapter->irq, adapter);
2302 enable_irq(adapter->irq);
2303}
2304#endif
2305
6df900e9
SC
2306static void
2307qlcnic_idc_debug_info(struct qlcnic_adapter *adapter, u8 encoding)
2308{
2309 u32 val;
2310
2311 val = adapter->portnum & 0xf;
2312 val |= encoding << 7;
2313 val |= (jiffies - adapter->dev_rst_time) << 8;
2314
2315 QLCWR32(adapter, QLCNIC_CRB_DRV_SCRATCH, val);
2316 adapter->dev_rst_time = jiffies;
2317}
2318
ade91f8e
AKS
2319static int
2320qlcnic_set_drv_state(struct qlcnic_adapter *adapter, u8 state)
af19b491
AKS
2321{
2322 u32 val;
2323
2324 WARN_ON(state != QLCNIC_DEV_NEED_RESET &&
2325 state != QLCNIC_DEV_NEED_QUISCENT);
2326
2327 if (qlcnic_api_lock(adapter))
ade91f8e 2328 return -EIO;
af19b491
AKS
2329
2330 val = QLCRD32(adapter, QLCNIC_CRB_DRV_STATE);
2331
2332 if (state == QLCNIC_DEV_NEED_RESET)
6d2a4724 2333 QLC_DEV_SET_RST_RDY(val, adapter->portnum);
af19b491 2334 else if (state == QLCNIC_DEV_NEED_QUISCENT)
6d2a4724 2335 QLC_DEV_SET_QSCNT_RDY(val, adapter->portnum);
af19b491
AKS
2336
2337 QLCWR32(adapter, QLCNIC_CRB_DRV_STATE, val);
2338
2339 qlcnic_api_unlock(adapter);
ade91f8e
AKS
2340
2341 return 0;
af19b491
AKS
2342}
2343
1b95a839
AKS
2344static int
2345qlcnic_clr_drv_state(struct qlcnic_adapter *adapter)
2346{
2347 u32 val;
2348
2349 if (qlcnic_api_lock(adapter))
2350 return -EBUSY;
2351
2352 val = QLCRD32(adapter, QLCNIC_CRB_DRV_STATE);
6d2a4724 2353 QLC_DEV_CLR_RST_QSCNT(val, adapter->portnum);
1b95a839
AKS
2354 QLCWR32(adapter, QLCNIC_CRB_DRV_STATE, val);
2355
2356 qlcnic_api_unlock(adapter);
2357
2358 return 0;
2359}
2360
af19b491 2361static void
21854f02 2362qlcnic_clr_all_drv_state(struct qlcnic_adapter *adapter, u8 failed)
af19b491
AKS
2363{
2364 u32 val;
2365
2366 if (qlcnic_api_lock(adapter))
2367 goto err;
2368
2369 val = QLCRD32(adapter, QLCNIC_CRB_DEV_REF_COUNT);
6d2a4724 2370 QLC_DEV_CLR_REF_CNT(val, adapter->portnum);
af19b491
AKS
2371 QLCWR32(adapter, QLCNIC_CRB_DEV_REF_COUNT, val);
2372
21854f02
AKS
2373 if (failed) {
2374 QLCWR32(adapter, QLCNIC_CRB_DEV_STATE, QLCNIC_DEV_FAILED);
2375 dev_info(&adapter->pdev->dev,
2376 "Device state set to Failed. Please Reboot\n");
2377 } else if (!(val & 0x11111111))
af19b491
AKS
2378 QLCWR32(adapter, QLCNIC_CRB_DEV_STATE, QLCNIC_DEV_COLD);
2379
2380 val = QLCRD32(adapter, QLCNIC_CRB_DRV_STATE);
6d2a4724 2381 QLC_DEV_CLR_RST_QSCNT(val, adapter->portnum);
af19b491
AKS
2382 QLCWR32(adapter, QLCNIC_CRB_DRV_STATE, val);
2383
2384 qlcnic_api_unlock(adapter);
2385err:
2386 adapter->fw_fail_cnt = 0;
2387 clear_bit(__QLCNIC_START_FW, &adapter->state);
2388 clear_bit(__QLCNIC_RESETTING, &adapter->state);
2389}
2390
f73dfc50 2391/* Grab api lock, before checking state */
af19b491
AKS
2392static int
2393qlcnic_check_drv_state(struct qlcnic_adapter *adapter)
2394{
2395 int act, state;
2396
2397 state = QLCRD32(adapter, QLCNIC_CRB_DRV_STATE);
2398 act = QLCRD32(adapter, QLCNIC_CRB_DEV_REF_COUNT);
2399
2400 if (((state & 0x11111111) == (act & 0x11111111)) ||
2401 ((act & 0x11111111) == ((state >> 1) & 0x11111111)))
2402 return 0;
2403 else
2404 return 1;
2405}
2406
96f8118c
SC
2407static int qlcnic_check_idc_ver(struct qlcnic_adapter *adapter)
2408{
2409 u32 val = QLCRD32(adapter, QLCNIC_CRB_DRV_IDC_VER);
2410
2411 if (val != QLCNIC_DRV_IDC_VER) {
2412 dev_warn(&adapter->pdev->dev, "IDC Version mismatch, driver's"
2413 " idc ver = %x; reqd = %x\n", QLCNIC_DRV_IDC_VER, val);
2414 }
2415
2416 return 0;
2417}
2418
af19b491
AKS
2419static int
2420qlcnic_can_start_firmware(struct qlcnic_adapter *adapter)
2421{
2422 u32 val, prev_state;
aa5e18c0 2423 u8 dev_init_timeo = adapter->dev_init_timeo;
6d2a4724 2424 u8 portnum = adapter->portnum;
96f8118c 2425 u8 ret;
af19b491 2426
f73dfc50
AKS
2427 if (test_and_clear_bit(__QLCNIC_START_FW, &adapter->state))
2428 return 1;
2429
af19b491
AKS
2430 if (qlcnic_api_lock(adapter))
2431 return -1;
2432
2433 val = QLCRD32(adapter, QLCNIC_CRB_DEV_REF_COUNT);
6d2a4724
AKS
2434 if (!(val & (1 << (portnum * 4)))) {
2435 QLC_DEV_SET_REF_CNT(val, portnum);
af19b491 2436 QLCWR32(adapter, QLCNIC_CRB_DEV_REF_COUNT, val);
af19b491
AKS
2437 }
2438
2439 prev_state = QLCRD32(adapter, QLCNIC_CRB_DEV_STATE);
65b5b420 2440 QLCDB(adapter, HW, "Device state = %u\n", prev_state);
af19b491
AKS
2441
2442 switch (prev_state) {
2443 case QLCNIC_DEV_COLD:
bbd8c6a4 2444 QLCWR32(adapter, QLCNIC_CRB_DEV_STATE, QLCNIC_DEV_INITIALIZING);
96f8118c 2445 QLCWR32(adapter, QLCNIC_CRB_DRV_IDC_VER, QLCNIC_DRV_IDC_VER);
6df900e9 2446 qlcnic_idc_debug_info(adapter, 0);
af19b491
AKS
2447 qlcnic_api_unlock(adapter);
2448 return 1;
2449
2450 case QLCNIC_DEV_READY:
96f8118c 2451 ret = qlcnic_check_idc_ver(adapter);
af19b491 2452 qlcnic_api_unlock(adapter);
96f8118c 2453 return ret;
af19b491
AKS
2454
2455 case QLCNIC_DEV_NEED_RESET:
2456 val = QLCRD32(adapter, QLCNIC_CRB_DRV_STATE);
6d2a4724 2457 QLC_DEV_SET_RST_RDY(val, portnum);
af19b491
AKS
2458 QLCWR32(adapter, QLCNIC_CRB_DRV_STATE, val);
2459 break;
2460
2461 case QLCNIC_DEV_NEED_QUISCENT:
2462 val = QLCRD32(adapter, QLCNIC_CRB_DRV_STATE);
6d2a4724 2463 QLC_DEV_SET_QSCNT_RDY(val, portnum);
af19b491
AKS
2464 QLCWR32(adapter, QLCNIC_CRB_DRV_STATE, val);
2465 break;
2466
2467 case QLCNIC_DEV_FAILED:
a7fc948f 2468 dev_err(&adapter->pdev->dev, "Device in failed state.\n");
af19b491
AKS
2469 qlcnic_api_unlock(adapter);
2470 return -1;
bbd8c6a4
AKS
2471
2472 case QLCNIC_DEV_INITIALIZING:
2473 case QLCNIC_DEV_QUISCENT:
2474 break;
af19b491
AKS
2475 }
2476
2477 qlcnic_api_unlock(adapter);
aa5e18c0
SC
2478
2479 do {
af19b491 2480 msleep(1000);
a5e463d0
SC
2481 prev_state = QLCRD32(adapter, QLCNIC_CRB_DEV_STATE);
2482
2483 if (prev_state == QLCNIC_DEV_QUISCENT)
2484 continue;
2485 } while ((prev_state != QLCNIC_DEV_READY) && --dev_init_timeo);
af19b491 2486
65b5b420
AKS
2487 if (!dev_init_timeo) {
2488 dev_err(&adapter->pdev->dev,
2489 "Waiting for device to initialize timeout\n");
af19b491 2490 return -1;
65b5b420 2491 }
af19b491
AKS
2492
2493 if (qlcnic_api_lock(adapter))
2494 return -1;
2495
2496 val = QLCRD32(adapter, QLCNIC_CRB_DRV_STATE);
6d2a4724 2497 QLC_DEV_CLR_RST_QSCNT(val, portnum);
af19b491
AKS
2498 QLCWR32(adapter, QLCNIC_CRB_DRV_STATE, val);
2499
96f8118c 2500 ret = qlcnic_check_idc_ver(adapter);
af19b491
AKS
2501 qlcnic_api_unlock(adapter);
2502
96f8118c 2503 return ret;
af19b491
AKS
2504}
2505
2506static void
2507qlcnic_fwinit_work(struct work_struct *work)
2508{
2509 struct qlcnic_adapter *adapter = container_of(work,
2510 struct qlcnic_adapter, fw_work.work);
3c4b23b1 2511 u32 dev_state = 0xf;
af19b491 2512
f73dfc50
AKS
2513 if (qlcnic_api_lock(adapter))
2514 goto err_ret;
af19b491 2515
a5e463d0
SC
2516 dev_state = QLCRD32(adapter, QLCNIC_CRB_DEV_STATE);
2517 if (dev_state == QLCNIC_DEV_QUISCENT) {
2518 qlcnic_api_unlock(adapter);
2519 qlcnic_schedule_work(adapter, qlcnic_fwinit_work,
2520 FW_POLL_DELAY * 2);
2521 return;
2522 }
2523
9f26f547 2524 if (adapter->op_mode == QLCNIC_NON_PRIV_FUNC) {
3c4b23b1
AKS
2525 qlcnic_api_unlock(adapter);
2526 goto wait_npar;
9f26f547
AC
2527 }
2528
f73dfc50
AKS
2529 if (adapter->fw_wait_cnt++ > adapter->reset_ack_timeo) {
2530 dev_err(&adapter->pdev->dev, "Reset:Failed to get ack %d sec\n",
2531 adapter->reset_ack_timeo);
2532 goto skip_ack_check;
2533 }
2534
2535 if (!qlcnic_check_drv_state(adapter)) {
2536skip_ack_check:
2537 dev_state = QLCRD32(adapter, QLCNIC_CRB_DEV_STATE);
a5e463d0
SC
2538
2539 if (dev_state == QLCNIC_DEV_NEED_QUISCENT) {
2540 QLCWR32(adapter, QLCNIC_CRB_DEV_STATE,
2541 QLCNIC_DEV_QUISCENT);
2542 qlcnic_schedule_work(adapter, qlcnic_fwinit_work,
2543 FW_POLL_DELAY * 2);
2544 QLCDB(adapter, DRV, "Quiscing the driver\n");
6df900e9
SC
2545 qlcnic_idc_debug_info(adapter, 0);
2546
a5e463d0
SC
2547 qlcnic_api_unlock(adapter);
2548 return;
2549 }
2550
f73dfc50
AKS
2551 if (dev_state == QLCNIC_DEV_NEED_RESET) {
2552 QLCWR32(adapter, QLCNIC_CRB_DEV_STATE,
2553 QLCNIC_DEV_INITIALIZING);
2554 set_bit(__QLCNIC_START_FW, &adapter->state);
2555 QLCDB(adapter, DRV, "Restarting fw\n");
6df900e9 2556 qlcnic_idc_debug_info(adapter, 0);
af19b491
AKS
2557 }
2558
f73dfc50
AKS
2559 qlcnic_api_unlock(adapter);
2560
9f26f547 2561 if (!adapter->nic_ops->start_firmware(adapter)) {
af19b491
AKS
2562 qlcnic_schedule_work(adapter, qlcnic_attach_work, 0);
2563 return;
2564 }
af19b491
AKS
2565 goto err_ret;
2566 }
2567
f73dfc50 2568 qlcnic_api_unlock(adapter);
aa5e18c0 2569
9f26f547 2570wait_npar:
af19b491 2571 dev_state = QLCRD32(adapter, QLCNIC_CRB_DEV_STATE);
f73dfc50 2572 QLCDB(adapter, HW, "Func waiting: Device state=%u\n", dev_state);
65b5b420 2573
af19b491 2574 switch (dev_state) {
3c4b23b1 2575 case QLCNIC_DEV_READY:
9f26f547 2576 if (!adapter->nic_ops->start_firmware(adapter)) {
f73dfc50
AKS
2577 qlcnic_schedule_work(adapter, qlcnic_attach_work, 0);
2578 return;
2579 }
3c4b23b1
AKS
2580 case QLCNIC_DEV_FAILED:
2581 break;
2582 default:
2583 qlcnic_schedule_work(adapter,
2584 qlcnic_fwinit_work, FW_POLL_DELAY);
2585 return;
af19b491
AKS
2586 }
2587
2588err_ret:
f73dfc50
AKS
2589 dev_err(&adapter->pdev->dev, "Fwinit work failed state=%u "
2590 "fw_wait_cnt=%u\n", dev_state, adapter->fw_wait_cnt);
34ce3626 2591 netif_device_attach(adapter->netdev);
21854f02 2592 qlcnic_clr_all_drv_state(adapter, 0);
af19b491
AKS
2593}
2594
2595static void
2596qlcnic_detach_work(struct work_struct *work)
2597{
2598 struct qlcnic_adapter *adapter = container_of(work,
2599 struct qlcnic_adapter, fw_work.work);
2600 struct net_device *netdev = adapter->netdev;
2601 u32 status;
2602
2603 netif_device_detach(netdev);
2604
2605 qlcnic_down(adapter, netdev);
2606
af19b491
AKS
2607 status = QLCRD32(adapter, QLCNIC_PEG_HALT_STATUS1);
2608
2609 if (status & QLCNIC_RCODE_FATAL_ERROR)
2610 goto err_ret;
2611
2612 if (adapter->temp == QLCNIC_TEMP_PANIC)
2613 goto err_ret;
2614
ade91f8e
AKS
2615 if (qlcnic_set_drv_state(adapter, adapter->dev_state))
2616 goto err_ret;
af19b491
AKS
2617
2618 adapter->fw_wait_cnt = 0;
2619
2620 qlcnic_schedule_work(adapter, qlcnic_fwinit_work, FW_POLL_DELAY);
2621
2622 return;
2623
2624err_ret:
65b5b420
AKS
2625 dev_err(&adapter->pdev->dev, "detach failed; status=%d temp=%d\n",
2626 status, adapter->temp);
34ce3626 2627 netif_device_attach(netdev);
21854f02 2628 qlcnic_clr_all_drv_state(adapter, 1);
af19b491
AKS
2629}
2630
3c4b23b1
AKS
2631/*Transit NPAR state to NON Operational */
2632static void
2633qlcnic_set_npar_non_operational(struct qlcnic_adapter *adapter)
2634{
2635 u32 state;
2636
2637 state = QLCRD32(adapter, QLCNIC_CRB_DEV_NPAR_STATE);
2638 if (state == QLCNIC_DEV_NPAR_NON_OPER)
2639 return;
2640
2641 if (qlcnic_api_lock(adapter))
2642 return;
2643 QLCWR32(adapter, QLCNIC_CRB_DEV_NPAR_STATE, QLCNIC_DEV_NPAR_NON_OPER);
2644 qlcnic_api_unlock(adapter);
2645}
2646
f73dfc50 2647/*Transit to RESET state from READY state only */
af19b491
AKS
2648static void
2649qlcnic_dev_request_reset(struct qlcnic_adapter *adapter)
2650{
2651 u32 state;
2652
cea8975e 2653 adapter->need_fw_reset = 1;
af19b491
AKS
2654 if (qlcnic_api_lock(adapter))
2655 return;
2656
2657 state = QLCRD32(adapter, QLCNIC_CRB_DEV_STATE);
2658
f73dfc50 2659 if (state == QLCNIC_DEV_READY) {
af19b491 2660 QLCWR32(adapter, QLCNIC_CRB_DEV_STATE, QLCNIC_DEV_NEED_RESET);
65b5b420 2661 QLCDB(adapter, DRV, "NEED_RESET state set\n");
6df900e9 2662 qlcnic_idc_debug_info(adapter, 0);
af19b491
AKS
2663 }
2664
3c4b23b1 2665 QLCWR32(adapter, QLCNIC_CRB_DEV_NPAR_STATE, QLCNIC_DEV_NPAR_NON_OPER);
af19b491
AKS
2666 qlcnic_api_unlock(adapter);
2667}
2668
9f26f547
AC
2669/* Transit to NPAR READY state from NPAR NOT READY state */
2670static void
2671qlcnic_dev_set_npar_ready(struct qlcnic_adapter *adapter)
2672{
cea8975e 2673 if (!(adapter->flags & QLCNIC_ESWITCH_ENABLED) ||
3c4b23b1 2674 adapter->op_mode != QLCNIC_MGMT_FUNC)
cea8975e 2675 return;
9f26f547
AC
2676 if (qlcnic_api_lock(adapter))
2677 return;
2678
3c4b23b1
AKS
2679 QLCWR32(adapter, QLCNIC_CRB_DEV_NPAR_STATE, QLCNIC_DEV_NPAR_OPER);
2680 QLCDB(adapter, DRV, "NPAR operational state set\n");
9f26f547
AC
2681
2682 qlcnic_api_unlock(adapter);
2683}
2684
af19b491
AKS
2685static void
2686qlcnic_schedule_work(struct qlcnic_adapter *adapter,
2687 work_func_t func, int delay)
2688{
451724c8
SC
2689 if (test_bit(__QLCNIC_AER, &adapter->state))
2690 return;
2691
af19b491
AKS
2692 INIT_DELAYED_WORK(&adapter->fw_work, func);
2693 schedule_delayed_work(&adapter->fw_work, round_jiffies_relative(delay));
2694}
2695
2696static void
2697qlcnic_cancel_fw_work(struct qlcnic_adapter *adapter)
2698{
2699 while (test_and_set_bit(__QLCNIC_RESETTING, &adapter->state))
2700 msleep(10);
2701
2702 cancel_delayed_work_sync(&adapter->fw_work);
2703}
2704
2705static void
2706qlcnic_attach_work(struct work_struct *work)
2707{
2708 struct qlcnic_adapter *adapter = container_of(work,
2709 struct qlcnic_adapter, fw_work.work);
2710 struct net_device *netdev = adapter->netdev;
af19b491
AKS
2711
2712 if (netif_running(netdev)) {
52486a3a 2713 if (qlcnic_up(adapter, netdev))
af19b491 2714 goto done;
af19b491
AKS
2715
2716 qlcnic_config_indev_addr(netdev, NETDEV_UP);
2717 }
2718
af19b491 2719done:
34ce3626 2720 netif_device_attach(netdev);
af19b491
AKS
2721 adapter->fw_fail_cnt = 0;
2722 clear_bit(__QLCNIC_RESETTING, &adapter->state);
1b95a839
AKS
2723
2724 if (!qlcnic_clr_drv_state(adapter))
2725 qlcnic_schedule_work(adapter, qlcnic_fw_poll_work,
2726 FW_POLL_DELAY);
af19b491
AKS
2727}
2728
2729static int
2730qlcnic_check_health(struct qlcnic_adapter *adapter)
2731{
2732 u32 state = 0, heartbit;
2733 struct net_device *netdev = adapter->netdev;
2734
2735 if (qlcnic_check_temp(adapter))
2736 goto detach;
2737
2372a5f1 2738 if (adapter->need_fw_reset)
af19b491 2739 qlcnic_dev_request_reset(adapter);
af19b491
AKS
2740
2741 state = QLCRD32(adapter, QLCNIC_CRB_DEV_STATE);
3c4b23b1
AKS
2742 if (state == QLCNIC_DEV_NEED_RESET ||
2743 state == QLCNIC_DEV_NEED_QUISCENT) {
2744 qlcnic_set_npar_non_operational(adapter);
af19b491 2745 adapter->need_fw_reset = 1;
3c4b23b1 2746 }
af19b491
AKS
2747
2748 heartbit = QLCRD32(adapter, QLCNIC_PEG_ALIVE_COUNTER);
2749 if (heartbit != adapter->heartbit) {
2750 adapter->heartbit = heartbit;
2751 adapter->fw_fail_cnt = 0;
2752 if (adapter->need_fw_reset)
2753 goto detach;
68bf1c68 2754
0df170b6
AKS
2755 if (adapter->reset_context &&
2756 auto_fw_reset == AUTO_FW_RESET_ENABLED) {
68bf1c68
AKS
2757 qlcnic_reset_hw_context(adapter);
2758 adapter->netdev->trans_start = jiffies;
2759 }
2760
af19b491
AKS
2761 return 0;
2762 }
2763
2764 if (++adapter->fw_fail_cnt < FW_FAIL_THRESH)
2765 return 0;
2766
2767 qlcnic_dev_request_reset(adapter);
2768
0df170b6
AKS
2769 if ((auto_fw_reset == AUTO_FW_RESET_ENABLED))
2770 clear_bit(__QLCNIC_FW_ATTACHED, &adapter->state);
af19b491
AKS
2771
2772 dev_info(&netdev->dev, "firmware hang detected\n");
2773
2774detach:
2775 adapter->dev_state = (state == QLCNIC_DEV_NEED_QUISCENT) ? state :
2776 QLCNIC_DEV_NEED_RESET;
2777
2778 if ((auto_fw_reset == AUTO_FW_RESET_ENABLED) &&
65b5b420
AKS
2779 !test_and_set_bit(__QLCNIC_RESETTING, &adapter->state)) {
2780
af19b491 2781 qlcnic_schedule_work(adapter, qlcnic_detach_work, 0);
65b5b420
AKS
2782 QLCDB(adapter, DRV, "fw recovery scheduled.\n");
2783 }
af19b491
AKS
2784
2785 return 1;
2786}
2787
2788static void
2789qlcnic_fw_poll_work(struct work_struct *work)
2790{
2791 struct qlcnic_adapter *adapter = container_of(work,
2792 struct qlcnic_adapter, fw_work.work);
2793
2794 if (test_bit(__QLCNIC_RESETTING, &adapter->state))
2795 goto reschedule;
2796
2797
2798 if (qlcnic_check_health(adapter))
2799 return;
2800
2801reschedule:
2802 qlcnic_schedule_work(adapter, qlcnic_fw_poll_work, FW_POLL_DELAY);
2803}
2804
451724c8
SC
2805static int qlcnic_is_first_func(struct pci_dev *pdev)
2806{
2807 struct pci_dev *oth_pdev;
2808 int val = pdev->devfn;
2809
2810 while (val-- > 0) {
2811 oth_pdev = pci_get_domain_bus_and_slot(pci_domain_nr
2812 (pdev->bus), pdev->bus->number,
2813 PCI_DEVFN(PCI_SLOT(pdev->devfn), val));
bfc978fa
AKS
2814 if (!oth_pdev)
2815 continue;
451724c8 2816
bfc978fa
AKS
2817 if (oth_pdev->current_state != PCI_D3cold) {
2818 pci_dev_put(oth_pdev);
451724c8 2819 return 0;
bfc978fa
AKS
2820 }
2821 pci_dev_put(oth_pdev);
451724c8
SC
2822 }
2823 return 1;
2824}
2825
2826static int qlcnic_attach_func(struct pci_dev *pdev)
2827{
2828 int err, first_func;
2829 struct qlcnic_adapter *adapter = pci_get_drvdata(pdev);
2830 struct net_device *netdev = adapter->netdev;
2831
2832 pdev->error_state = pci_channel_io_normal;
2833
2834 err = pci_enable_device(pdev);
2835 if (err)
2836 return err;
2837
2838 pci_set_power_state(pdev, PCI_D0);
2839 pci_set_master(pdev);
2840 pci_restore_state(pdev);
2841
2842 first_func = qlcnic_is_first_func(pdev);
2843
2844 if (qlcnic_api_lock(adapter))
2845 return -EINVAL;
2846
933fce12 2847 if (adapter->op_mode != QLCNIC_NON_PRIV_FUNC && first_func) {
451724c8
SC
2848 adapter->need_fw_reset = 1;
2849 set_bit(__QLCNIC_START_FW, &adapter->state);
2850 QLCWR32(adapter, QLCNIC_CRB_DEV_STATE, QLCNIC_DEV_INITIALIZING);
2851 QLCDB(adapter, DRV, "Restarting fw\n");
2852 }
2853 qlcnic_api_unlock(adapter);
2854
2855 err = adapter->nic_ops->start_firmware(adapter);
2856 if (err)
2857 return err;
2858
2859 qlcnic_clr_drv_state(adapter);
2860 qlcnic_setup_intr(adapter);
2861
2862 if (netif_running(netdev)) {
2863 err = qlcnic_attach(adapter);
2864 if (err) {
21854f02 2865 qlcnic_clr_all_drv_state(adapter, 1);
451724c8
SC
2866 clear_bit(__QLCNIC_AER, &adapter->state);
2867 netif_device_attach(netdev);
2868 return err;
2869 }
2870
2871 err = qlcnic_up(adapter, netdev);
2872 if (err)
2873 goto done;
2874
2875 qlcnic_config_indev_addr(netdev, NETDEV_UP);
2876 }
2877 done:
2878 netif_device_attach(netdev);
2879 return err;
2880}
2881
2882static pci_ers_result_t qlcnic_io_error_detected(struct pci_dev *pdev,
2883 pci_channel_state_t state)
2884{
2885 struct qlcnic_adapter *adapter = pci_get_drvdata(pdev);
2886 struct net_device *netdev = adapter->netdev;
2887
2888 if (state == pci_channel_io_perm_failure)
2889 return PCI_ERS_RESULT_DISCONNECT;
2890
2891 if (state == pci_channel_io_normal)
2892 return PCI_ERS_RESULT_RECOVERED;
2893
2894 set_bit(__QLCNIC_AER, &adapter->state);
2895 netif_device_detach(netdev);
2896
2897 cancel_delayed_work_sync(&adapter->fw_work);
2898
2899 if (netif_running(netdev))
2900 qlcnic_down(adapter, netdev);
2901
2902 qlcnic_detach(adapter);
2903 qlcnic_teardown_intr(adapter);
2904
2905 clear_bit(__QLCNIC_RESETTING, &adapter->state);
2906
2907 pci_save_state(pdev);
2908 pci_disable_device(pdev);
2909
2910 return PCI_ERS_RESULT_NEED_RESET;
2911}
2912
2913static pci_ers_result_t qlcnic_io_slot_reset(struct pci_dev *pdev)
2914{
2915 return qlcnic_attach_func(pdev) ? PCI_ERS_RESULT_DISCONNECT :
2916 PCI_ERS_RESULT_RECOVERED;
2917}
2918
2919static void qlcnic_io_resume(struct pci_dev *pdev)
2920{
2921 struct qlcnic_adapter *adapter = pci_get_drvdata(pdev);
2922
2923 pci_cleanup_aer_uncorrect_error_status(pdev);
2924
2925 if (QLCRD32(adapter, QLCNIC_CRB_DEV_STATE) == QLCNIC_DEV_READY &&
2926 test_and_clear_bit(__QLCNIC_AER, &adapter->state))
2927 qlcnic_schedule_work(adapter, qlcnic_fw_poll_work,
2928 FW_POLL_DELAY);
2929}
2930
87eb743b
AC
2931static int
2932qlcnicvf_start_firmware(struct qlcnic_adapter *adapter)
2933{
2934 int err;
2935
2936 err = qlcnic_can_start_firmware(adapter);
2937 if (err)
2938 return err;
2939
78f84e1a
AKS
2940 err = qlcnic_check_npar_opertional(adapter);
2941 if (err)
2942 return err;
3c4b23b1 2943
87eb743b
AC
2944 qlcnic_check_options(adapter);
2945
2946 adapter->need_fw_reset = 0;
2947
2948 return err;
2949}
2950
2951static int
2952qlcnicvf_config_bridged_mode(struct qlcnic_adapter *adapter, u32 enable)
2953{
2954 return -EOPNOTSUPP;
2955}
2956
2957static int
2958qlcnicvf_config_led(struct qlcnic_adapter *adapter, u32 state, u32 rate)
2959{
2960 return -EOPNOTSUPP;
2961}
2962
af19b491
AKS
2963static ssize_t
2964qlcnic_store_bridged_mode(struct device *dev,
2965 struct device_attribute *attr, const char *buf, size_t len)
2966{
2967 struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
2968 unsigned long new;
2969 int ret = -EINVAL;
2970
2971 if (!(adapter->capabilities & QLCNIC_FW_CAPABILITY_BDG))
2972 goto err_out;
2973
8a15ad1f 2974 if (!test_bit(__QLCNIC_DEV_UP, &adapter->state))
af19b491
AKS
2975 goto err_out;
2976
2977 if (strict_strtoul(buf, 2, &new))
2978 goto err_out;
2979
2e9d722d 2980 if (!adapter->nic_ops->config_bridged_mode(adapter, !!new))
af19b491
AKS
2981 ret = len;
2982
2983err_out:
2984 return ret;
2985}
2986
2987static ssize_t
2988qlcnic_show_bridged_mode(struct device *dev,
2989 struct device_attribute *attr, char *buf)
2990{
2991 struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
2992 int bridged_mode = 0;
2993
2994 if (adapter->capabilities & QLCNIC_FW_CAPABILITY_BDG)
2995 bridged_mode = !!(adapter->flags & QLCNIC_BRIDGE_ENABLED);
2996
2997 return sprintf(buf, "%d\n", bridged_mode);
2998}
2999
3000static struct device_attribute dev_attr_bridged_mode = {
3001 .attr = {.name = "bridged_mode", .mode = (S_IRUGO | S_IWUSR)},
3002 .show = qlcnic_show_bridged_mode,
3003 .store = qlcnic_store_bridged_mode,
3004};
3005
3006static ssize_t
3007qlcnic_store_diag_mode(struct device *dev,
3008 struct device_attribute *attr, const char *buf, size_t len)
3009{
3010 struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
3011 unsigned long new;
3012
3013 if (strict_strtoul(buf, 2, &new))
3014 return -EINVAL;
3015
3016 if (!!new != !!(adapter->flags & QLCNIC_DIAG_ENABLED))
3017 adapter->flags ^= QLCNIC_DIAG_ENABLED;
3018
3019 return len;
3020}
3021
3022static ssize_t
3023qlcnic_show_diag_mode(struct device *dev,
3024 struct device_attribute *attr, char *buf)
3025{
3026 struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
3027
3028 return sprintf(buf, "%d\n",
3029 !!(adapter->flags & QLCNIC_DIAG_ENABLED));
3030}
3031
3032static struct device_attribute dev_attr_diag_mode = {
3033 .attr = {.name = "diag_mode", .mode = (S_IRUGO | S_IWUSR)},
3034 .show = qlcnic_show_diag_mode,
3035 .store = qlcnic_store_diag_mode,
3036};
3037
3038static int
3039qlcnic_sysfs_validate_crb(struct qlcnic_adapter *adapter,
3040 loff_t offset, size_t size)
3041{
897e8c7c
DP
3042 size_t crb_size = 4;
3043
af19b491
AKS
3044 if (!(adapter->flags & QLCNIC_DIAG_ENABLED))
3045 return -EIO;
3046
897e8c7c
DP
3047 if (offset < QLCNIC_PCI_CRBSPACE) {
3048 if (ADDR_IN_RANGE(offset, QLCNIC_PCI_CAMQM,
3049 QLCNIC_PCI_CAMQM_END))
3050 crb_size = 8;
3051 else
3052 return -EINVAL;
3053 }
af19b491 3054
897e8c7c
DP
3055 if ((size != crb_size) || (offset & (crb_size-1)))
3056 return -EINVAL;
af19b491
AKS
3057
3058 return 0;
3059}
3060
3061static ssize_t
2c3c8bea
CW
3062qlcnic_sysfs_read_crb(struct file *filp, struct kobject *kobj,
3063 struct bin_attribute *attr,
af19b491
AKS
3064 char *buf, loff_t offset, size_t size)
3065{
3066 struct device *dev = container_of(kobj, struct device, kobj);
3067 struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
3068 u32 data;
897e8c7c 3069 u64 qmdata;
af19b491
AKS
3070 int ret;
3071
3072 ret = qlcnic_sysfs_validate_crb(adapter, offset, size);
3073 if (ret != 0)
3074 return ret;
3075
897e8c7c
DP
3076 if (ADDR_IN_RANGE(offset, QLCNIC_PCI_CAMQM, QLCNIC_PCI_CAMQM_END)) {
3077 qlcnic_pci_camqm_read_2M(adapter, offset, &qmdata);
3078 memcpy(buf, &qmdata, size);
3079 } else {
3080 data = QLCRD32(adapter, offset);
3081 memcpy(buf, &data, size);
3082 }
af19b491
AKS
3083 return size;
3084}
3085
3086static ssize_t
2c3c8bea
CW
3087qlcnic_sysfs_write_crb(struct file *filp, struct kobject *kobj,
3088 struct bin_attribute *attr,
af19b491
AKS
3089 char *buf, loff_t offset, size_t size)
3090{
3091 struct device *dev = container_of(kobj, struct device, kobj);
3092 struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
3093 u32 data;
897e8c7c 3094 u64 qmdata;
af19b491
AKS
3095 int ret;
3096
3097 ret = qlcnic_sysfs_validate_crb(adapter, offset, size);
3098 if (ret != 0)
3099 return ret;
3100
897e8c7c
DP
3101 if (ADDR_IN_RANGE(offset, QLCNIC_PCI_CAMQM, QLCNIC_PCI_CAMQM_END)) {
3102 memcpy(&qmdata, buf, size);
3103 qlcnic_pci_camqm_write_2M(adapter, offset, qmdata);
3104 } else {
3105 memcpy(&data, buf, size);
3106 QLCWR32(adapter, offset, data);
3107 }
af19b491
AKS
3108 return size;
3109}
3110
3111static int
3112qlcnic_sysfs_validate_mem(struct qlcnic_adapter *adapter,
3113 loff_t offset, size_t size)
3114{
3115 if (!(adapter->flags & QLCNIC_DIAG_ENABLED))
3116 return -EIO;
3117
3118 if ((size != 8) || (offset & 0x7))
3119 return -EIO;
3120
3121 return 0;
3122}
3123
3124static ssize_t
2c3c8bea
CW
3125qlcnic_sysfs_read_mem(struct file *filp, struct kobject *kobj,
3126 struct bin_attribute *attr,
af19b491
AKS
3127 char *buf, loff_t offset, size_t size)
3128{
3129 struct device *dev = container_of(kobj, struct device, kobj);
3130 struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
3131 u64 data;
3132 int ret;
3133
3134 ret = qlcnic_sysfs_validate_mem(adapter, offset, size);
3135 if (ret != 0)
3136 return ret;
3137
3138 if (qlcnic_pci_mem_read_2M(adapter, offset, &data))
3139 return -EIO;
3140
3141 memcpy(buf, &data, size);
3142
3143 return size;
3144}
3145
3146static ssize_t
2c3c8bea
CW
3147qlcnic_sysfs_write_mem(struct file *filp, struct kobject *kobj,
3148 struct bin_attribute *attr,
af19b491
AKS
3149 char *buf, loff_t offset, size_t size)
3150{
3151 struct device *dev = container_of(kobj, struct device, kobj);
3152 struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
3153 u64 data;
3154 int ret;
3155
3156 ret = qlcnic_sysfs_validate_mem(adapter, offset, size);
3157 if (ret != 0)
3158 return ret;
3159
3160 memcpy(&data, buf, size);
3161
3162 if (qlcnic_pci_mem_write_2M(adapter, offset, data))
3163 return -EIO;
3164
3165 return size;
3166}
3167
3168
3169static struct bin_attribute bin_attr_crb = {
3170 .attr = {.name = "crb", .mode = (S_IRUGO | S_IWUSR)},
3171 .size = 0,
3172 .read = qlcnic_sysfs_read_crb,
3173 .write = qlcnic_sysfs_write_crb,
3174};
3175
3176static struct bin_attribute bin_attr_mem = {
3177 .attr = {.name = "mem", .mode = (S_IRUGO | S_IWUSR)},
3178 .size = 0,
3179 .read = qlcnic_sysfs_read_mem,
3180 .write = qlcnic_sysfs_write_mem,
3181};
3182
cea8975e 3183static int
346fe763
RB
3184validate_pm_config(struct qlcnic_adapter *adapter,
3185 struct qlcnic_pm_func_cfg *pm_cfg, int count)
3186{
3187
3188 u8 src_pci_func, s_esw_id, d_esw_id;
3189 u8 dest_pci_func;
3190 int i;
3191
3192 for (i = 0; i < count; i++) {
3193 src_pci_func = pm_cfg[i].pci_func;
3194 dest_pci_func = pm_cfg[i].dest_npar;
3195 if (src_pci_func >= QLCNIC_MAX_PCI_FUNC
3196 || dest_pci_func >= QLCNIC_MAX_PCI_FUNC)
3197 return QL_STATUS_INVALID_PARAM;
3198
3199 if (adapter->npars[src_pci_func].type != QLCNIC_TYPE_NIC)
3200 return QL_STATUS_INVALID_PARAM;
3201
3202 if (adapter->npars[dest_pci_func].type != QLCNIC_TYPE_NIC)
3203 return QL_STATUS_INVALID_PARAM;
3204
346fe763
RB
3205 s_esw_id = adapter->npars[src_pci_func].phy_port;
3206 d_esw_id = adapter->npars[dest_pci_func].phy_port;
3207
3208 if (s_esw_id != d_esw_id)
3209 return QL_STATUS_INVALID_PARAM;
3210
3211 }
3212 return 0;
3213
3214}
3215
3216static ssize_t
3217qlcnic_sysfs_write_pm_config(struct file *filp, struct kobject *kobj,
3218 struct bin_attribute *attr, char *buf, loff_t offset, size_t size)
3219{
3220 struct device *dev = container_of(kobj, struct device, kobj);
3221 struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
3222 struct qlcnic_pm_func_cfg *pm_cfg;
3223 u32 id, action, pci_func;
3224 int count, rem, i, ret;
3225
3226 count = size / sizeof(struct qlcnic_pm_func_cfg);
3227 rem = size % sizeof(struct qlcnic_pm_func_cfg);
3228 if (rem)
3229 return QL_STATUS_INVALID_PARAM;
3230
3231 pm_cfg = (struct qlcnic_pm_func_cfg *) buf;
3232
3233 ret = validate_pm_config(adapter, pm_cfg, count);
3234 if (ret)
3235 return ret;
3236 for (i = 0; i < count; i++) {
3237 pci_func = pm_cfg[i].pci_func;
4e8acb01 3238 action = !!pm_cfg[i].action;
346fe763
RB
3239 id = adapter->npars[pci_func].phy_port;
3240 ret = qlcnic_config_port_mirroring(adapter, id,
3241 action, pci_func);
3242 if (ret)
3243 return ret;
3244 }
3245
3246 for (i = 0; i < count; i++) {
3247 pci_func = pm_cfg[i].pci_func;
3248 id = adapter->npars[pci_func].phy_port;
4e8acb01 3249 adapter->npars[pci_func].enable_pm = !!pm_cfg[i].action;
346fe763
RB
3250 adapter->npars[pci_func].dest_npar = id;
3251 }
3252 return size;
3253}
3254
3255static ssize_t
3256qlcnic_sysfs_read_pm_config(struct file *filp, struct kobject *kobj,
3257 struct bin_attribute *attr, char *buf, loff_t offset, size_t size)
3258{
3259 struct device *dev = container_of(kobj, struct device, kobj);
3260 struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
3261 struct qlcnic_pm_func_cfg pm_cfg[QLCNIC_MAX_PCI_FUNC];
3262 int i;
3263
3264 if (size != sizeof(pm_cfg))
3265 return QL_STATUS_INVALID_PARAM;
3266
3267 for (i = 0; i < QLCNIC_MAX_PCI_FUNC; i++) {
3268 if (adapter->npars[i].type != QLCNIC_TYPE_NIC)
3269 continue;
3270 pm_cfg[i].action = adapter->npars[i].enable_pm;
3271 pm_cfg[i].dest_npar = 0;
3272 pm_cfg[i].pci_func = i;
3273 }
3274 memcpy(buf, &pm_cfg, size);
3275
3276 return size;
3277}
3278
cea8975e 3279static int
346fe763 3280validate_esw_config(struct qlcnic_adapter *adapter,
4e8acb01 3281 struct qlcnic_esw_func_cfg *esw_cfg, int count)
346fe763
RB
3282{
3283 u8 pci_func;
3284 int i;
346fe763
RB
3285 for (i = 0; i < count; i++) {
3286 pci_func = esw_cfg[i].pci_func;
3287 if (pci_func >= QLCNIC_MAX_PCI_FUNC)
3288 return QL_STATUS_INVALID_PARAM;
3289
4e8acb01
RB
3290 if (adapter->op_mode == QLCNIC_MGMT_FUNC)
3291 if (adapter->npars[pci_func].type != QLCNIC_TYPE_NIC)
3292 return QL_STATUS_INVALID_PARAM;
346fe763 3293
4e8acb01
RB
3294 switch (esw_cfg[i].op_mode) {
3295 case QLCNIC_PORT_DEFAULTS:
3296 break;
3297 case QLCNIC_ADD_VLAN:
346fe763
RB
3298 if (!IS_VALID_VLAN(esw_cfg[i].vlan_id))
3299 return QL_STATUS_INVALID_PARAM;
4e8acb01
RB
3300 if (!esw_cfg[i].op_type)
3301 return QL_STATUS_INVALID_PARAM;
3302 break;
3303 case QLCNIC_DEL_VLAN:
3304 if (!IS_VALID_VLAN(esw_cfg[i].vlan_id))
3305 return QL_STATUS_INVALID_PARAM;
3306 if (!esw_cfg[i].op_type)
3307 return QL_STATUS_INVALID_PARAM;
3308 break;
3309 default:
346fe763 3310 return QL_STATUS_INVALID_PARAM;
4e8acb01 3311 }
346fe763 3312 }
346fe763
RB
3313 return 0;
3314}
3315
3316static ssize_t
3317qlcnic_sysfs_write_esw_config(struct file *file, struct kobject *kobj,
3318 struct bin_attribute *attr, char *buf, loff_t offset, size_t size)
3319{
3320 struct device *dev = container_of(kobj, struct device, kobj);
3321 struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
3322 struct qlcnic_esw_func_cfg *esw_cfg;
4e8acb01 3323 struct qlcnic_npar_info *npar;
346fe763 3324 int count, rem, i, ret;
0325d69b 3325 u8 pci_func, op_mode = 0;
346fe763
RB
3326
3327 count = size / sizeof(struct qlcnic_esw_func_cfg);
3328 rem = size % sizeof(struct qlcnic_esw_func_cfg);
3329 if (rem)
3330 return QL_STATUS_INVALID_PARAM;
3331
3332 esw_cfg = (struct qlcnic_esw_func_cfg *) buf;
3333 ret = validate_esw_config(adapter, esw_cfg, count);
3334 if (ret)
3335 return ret;
3336
3337 for (i = 0; i < count; i++) {
0325d69b
RB
3338 if (adapter->op_mode == QLCNIC_MGMT_FUNC)
3339 if (qlcnic_config_switch_port(adapter, &esw_cfg[i]))
3340 return QL_STATUS_INVALID_PARAM;
3341 if (adapter->ahw.pci_func == esw_cfg[i].pci_func)
3342 op_mode = esw_cfg[i].op_mode;
3343 qlcnic_get_eswitch_port_config(adapter, &esw_cfg[i]);
3344 esw_cfg[i].op_mode = op_mode;
3345 esw_cfg[i].pci_func = adapter->ahw.pci_func;
3346 switch (esw_cfg[i].op_mode) {
3347 case QLCNIC_PORT_DEFAULTS:
3348 qlcnic_set_eswitch_port_features(adapter,
3349 &esw_cfg[i]);
3350 break;
3351 }
346fe763
RB
3352 }
3353
0325d69b
RB
3354 if (adapter->op_mode != QLCNIC_MGMT_FUNC)
3355 goto out;
346fe763
RB
3356 for (i = 0; i < count; i++) {
3357 pci_func = esw_cfg[i].pci_func;
4e8acb01
RB
3358 npar = &adapter->npars[pci_func];
3359 switch (esw_cfg[i].op_mode) {
3360 case QLCNIC_PORT_DEFAULTS:
3361 npar->promisc_mode = esw_cfg[i].promisc_mode;
3362 npar->mac_learning = esw_cfg[i].mac_learning;
3363 npar->offload_flags = esw_cfg[i].offload_flags;
3364 npar->mac_anti_spoof = esw_cfg[i].mac_anti_spoof;
3365 npar->discard_tagged = esw_cfg[i].discard_tagged;
3366 break;
3367 case QLCNIC_ADD_VLAN:
3368 npar->pvid = esw_cfg[i].vlan_id;
3369 break;
3370 case QLCNIC_DEL_VLAN:
3371 npar->pvid = 0;
3372 break;
3373 }
346fe763 3374 }
0325d69b 3375out:
346fe763
RB
3376 return size;
3377}
3378
3379static ssize_t
3380qlcnic_sysfs_read_esw_config(struct file *file, struct kobject *kobj,
3381 struct bin_attribute *attr, char *buf, loff_t offset, size_t size)
3382{
3383 struct device *dev = container_of(kobj, struct device, kobj);
3384 struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
3385 struct qlcnic_esw_func_cfg esw_cfg[QLCNIC_MAX_PCI_FUNC];
4e8acb01 3386 u8 i;
346fe763
RB
3387
3388 if (size != sizeof(esw_cfg))
3389 return QL_STATUS_INVALID_PARAM;
3390
3391 for (i = 0; i < QLCNIC_MAX_PCI_FUNC; i++) {
3392 if (adapter->npars[i].type != QLCNIC_TYPE_NIC)
3393 continue;
4e8acb01
RB
3394 esw_cfg[i].pci_func = i;
3395 if (qlcnic_get_eswitch_port_config(adapter, &esw_cfg[i]))
3396 return QL_STATUS_INVALID_PARAM;
346fe763
RB
3397 }
3398 memcpy(buf, &esw_cfg, size);
3399
3400 return size;
3401}
3402
cea8975e 3403static int
346fe763
RB
3404validate_npar_config(struct qlcnic_adapter *adapter,
3405 struct qlcnic_npar_func_cfg *np_cfg, int count)
3406{
3407 u8 pci_func, i;
3408
3409 for (i = 0; i < count; i++) {
3410 pci_func = np_cfg[i].pci_func;
3411 if (pci_func >= QLCNIC_MAX_PCI_FUNC)
3412 return QL_STATUS_INVALID_PARAM;
3413
3414 if (adapter->npars[pci_func].type != QLCNIC_TYPE_NIC)
3415 return QL_STATUS_INVALID_PARAM;
3416
3417 if (!IS_VALID_BW(np_cfg[i].min_bw)
3418 || !IS_VALID_BW(np_cfg[i].max_bw)
3419 || !IS_VALID_RX_QUEUES(np_cfg[i].max_rx_queues)
3420 || !IS_VALID_TX_QUEUES(np_cfg[i].max_tx_queues))
3421 return QL_STATUS_INVALID_PARAM;
3422 }
3423 return 0;
3424}
3425
3426static ssize_t
3427qlcnic_sysfs_write_npar_config(struct file *file, struct kobject *kobj,
3428 struct bin_attribute *attr, char *buf, loff_t offset, size_t size)
3429{
3430 struct device *dev = container_of(kobj, struct device, kobj);
3431 struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
3432 struct qlcnic_info nic_info;
3433 struct qlcnic_npar_func_cfg *np_cfg;
3434 int i, count, rem, ret;
3435 u8 pci_func;
3436
3437 count = size / sizeof(struct qlcnic_npar_func_cfg);
3438 rem = size % sizeof(struct qlcnic_npar_func_cfg);
3439 if (rem)
3440 return QL_STATUS_INVALID_PARAM;
3441
3442 np_cfg = (struct qlcnic_npar_func_cfg *) buf;
3443 ret = validate_npar_config(adapter, np_cfg, count);
3444 if (ret)
3445 return ret;
3446
3447 for (i = 0; i < count ; i++) {
3448 pci_func = np_cfg[i].pci_func;
3449 ret = qlcnic_get_nic_info(adapter, &nic_info, pci_func);
3450 if (ret)
3451 return ret;
3452 nic_info.pci_func = pci_func;
3453 nic_info.min_tx_bw = np_cfg[i].min_bw;
3454 nic_info.max_tx_bw = np_cfg[i].max_bw;
3455 ret = qlcnic_set_nic_info(adapter, &nic_info);
3456 if (ret)
3457 return ret;
cea8975e
AC
3458 adapter->npars[i].min_bw = nic_info.min_tx_bw;
3459 adapter->npars[i].max_bw = nic_info.max_tx_bw;
346fe763
RB
3460 }
3461
3462 return size;
3463
3464}
3465static ssize_t
3466qlcnic_sysfs_read_npar_config(struct file *file, struct kobject *kobj,
3467 struct bin_attribute *attr, char *buf, loff_t offset, size_t size)
3468{
3469 struct device *dev = container_of(kobj, struct device, kobj);
3470 struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
3471 struct qlcnic_info nic_info;
3472 struct qlcnic_npar_func_cfg np_cfg[QLCNIC_MAX_PCI_FUNC];
3473 int i, ret;
3474
3475 if (size != sizeof(np_cfg))
3476 return QL_STATUS_INVALID_PARAM;
3477
3478 for (i = 0; i < QLCNIC_MAX_PCI_FUNC ; i++) {
3479 if (adapter->npars[i].type != QLCNIC_TYPE_NIC)
3480 continue;
3481 ret = qlcnic_get_nic_info(adapter, &nic_info, i);
3482 if (ret)
3483 return ret;
3484
3485 np_cfg[i].pci_func = i;
3486 np_cfg[i].op_mode = nic_info.op_mode;
3487 np_cfg[i].port_num = nic_info.phys_port;
3488 np_cfg[i].fw_capab = nic_info.capabilities;
3489 np_cfg[i].min_bw = nic_info.min_tx_bw ;
3490 np_cfg[i].max_bw = nic_info.max_tx_bw;
3491 np_cfg[i].max_tx_queues = nic_info.max_tx_ques;
3492 np_cfg[i].max_rx_queues = nic_info.max_rx_ques;
3493 }
3494 memcpy(buf, &np_cfg, size);
3495 return size;
3496}
3497
b6021212
AKS
3498static ssize_t
3499qlcnic_sysfs_get_port_stats(struct file *file, struct kobject *kobj,
3500 struct bin_attribute *attr, char *buf, loff_t offset, size_t size)
3501{
3502 struct device *dev = container_of(kobj, struct device, kobj);
3503 struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
3504 struct qlcnic_esw_statistics port_stats;
3505 int ret;
3506
3507 if (size != sizeof(struct qlcnic_esw_statistics))
3508 return QL_STATUS_INVALID_PARAM;
3509
3510 if (offset >= QLCNIC_MAX_PCI_FUNC)
3511 return QL_STATUS_INVALID_PARAM;
3512
3513 memset(&port_stats, 0, size);
3514 ret = qlcnic_get_port_stats(adapter, offset, QLCNIC_QUERY_RX_COUNTER,
3515 &port_stats.rx);
3516 if (ret)
3517 return ret;
3518
3519 ret = qlcnic_get_port_stats(adapter, offset, QLCNIC_QUERY_TX_COUNTER,
3520 &port_stats.tx);
3521 if (ret)
3522 return ret;
3523
3524 memcpy(buf, &port_stats, size);
3525 return size;
3526}
3527
3528static ssize_t
3529qlcnic_sysfs_get_esw_stats(struct file *file, struct kobject *kobj,
3530 struct bin_attribute *attr, char *buf, loff_t offset, size_t size)
3531{
3532 struct device *dev = container_of(kobj, struct device, kobj);
3533 struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
3534 struct qlcnic_esw_statistics esw_stats;
3535 int ret;
3536
3537 if (size != sizeof(struct qlcnic_esw_statistics))
3538 return QL_STATUS_INVALID_PARAM;
3539
3540 if (offset >= QLCNIC_NIU_MAX_XG_PORTS)
3541 return QL_STATUS_INVALID_PARAM;
3542
3543 memset(&esw_stats, 0, size);
3544 ret = qlcnic_get_eswitch_stats(adapter, offset, QLCNIC_QUERY_RX_COUNTER,
3545 &esw_stats.rx);
3546 if (ret)
3547 return ret;
3548
3549 ret = qlcnic_get_eswitch_stats(adapter, offset, QLCNIC_QUERY_TX_COUNTER,
3550 &esw_stats.tx);
3551 if (ret)
3552 return ret;
3553
3554 memcpy(buf, &esw_stats, size);
3555 return size;
3556}
3557
3558static ssize_t
3559qlcnic_sysfs_clear_esw_stats(struct file *file, struct kobject *kobj,
3560 struct bin_attribute *attr, char *buf, loff_t offset, size_t size)
3561{
3562 struct device *dev = container_of(kobj, struct device, kobj);
3563 struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
3564 int ret;
3565
3566 if (offset >= QLCNIC_NIU_MAX_XG_PORTS)
3567 return QL_STATUS_INVALID_PARAM;
3568
3569 ret = qlcnic_clear_esw_stats(adapter, QLCNIC_STATS_ESWITCH, offset,
3570 QLCNIC_QUERY_RX_COUNTER);
3571 if (ret)
3572 return ret;
3573
3574 ret = qlcnic_clear_esw_stats(adapter, QLCNIC_STATS_ESWITCH, offset,
3575 QLCNIC_QUERY_TX_COUNTER);
3576 if (ret)
3577 return ret;
3578
3579 return size;
3580}
3581
3582static ssize_t
3583qlcnic_sysfs_clear_port_stats(struct file *file, struct kobject *kobj,
3584 struct bin_attribute *attr, char *buf, loff_t offset, size_t size)
3585{
3586
3587 struct device *dev = container_of(kobj, struct device, kobj);
3588 struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
3589 int ret;
3590
3591 if (offset >= QLCNIC_MAX_PCI_FUNC)
3592 return QL_STATUS_INVALID_PARAM;
3593
3594 ret = qlcnic_clear_esw_stats(adapter, QLCNIC_STATS_PORT, offset,
3595 QLCNIC_QUERY_RX_COUNTER);
3596 if (ret)
3597 return ret;
3598
3599 ret = qlcnic_clear_esw_stats(adapter, QLCNIC_STATS_PORT, offset,
3600 QLCNIC_QUERY_TX_COUNTER);
3601 if (ret)
3602 return ret;
3603
3604 return size;
3605}
3606
346fe763
RB
3607static ssize_t
3608qlcnic_sysfs_read_pci_config(struct file *file, struct kobject *kobj,
3609 struct bin_attribute *attr, char *buf, loff_t offset, size_t size)
3610{
3611 struct device *dev = container_of(kobj, struct device, kobj);
3612 struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
3613 struct qlcnic_pci_func_cfg pci_cfg[QLCNIC_MAX_PCI_FUNC];
e88db3bd 3614 struct qlcnic_pci_info *pci_info;
346fe763
RB
3615 int i, ret;
3616
3617 if (size != sizeof(pci_cfg))
3618 return QL_STATUS_INVALID_PARAM;
3619
e88db3bd
DC
3620 pci_info = kcalloc(QLCNIC_MAX_PCI_FUNC, sizeof(*pci_info), GFP_KERNEL);
3621 if (!pci_info)
3622 return -ENOMEM;
3623
346fe763 3624 ret = qlcnic_get_pci_info(adapter, pci_info);
e88db3bd
DC
3625 if (ret) {
3626 kfree(pci_info);
346fe763 3627 return ret;
e88db3bd 3628 }
346fe763
RB
3629
3630 for (i = 0; i < QLCNIC_MAX_PCI_FUNC ; i++) {
3631 pci_cfg[i].pci_func = pci_info[i].id;
3632 pci_cfg[i].func_type = pci_info[i].type;
3633 pci_cfg[i].port_num = pci_info[i].default_port;
3634 pci_cfg[i].min_bw = pci_info[i].tx_min_bw;
3635 pci_cfg[i].max_bw = pci_info[i].tx_max_bw;
3636 memcpy(&pci_cfg[i].def_mac_addr, &pci_info[i].mac, ETH_ALEN);
3637 }
3638 memcpy(buf, &pci_cfg, size);
e88db3bd 3639 kfree(pci_info);
346fe763 3640 return size;
346fe763
RB
3641}
3642static struct bin_attribute bin_attr_npar_config = {
3643 .attr = {.name = "npar_config", .mode = (S_IRUGO | S_IWUSR)},
3644 .size = 0,
3645 .read = qlcnic_sysfs_read_npar_config,
3646 .write = qlcnic_sysfs_write_npar_config,
3647};
3648
3649static struct bin_attribute bin_attr_pci_config = {
3650 .attr = {.name = "pci_config", .mode = (S_IRUGO | S_IWUSR)},
3651 .size = 0,
3652 .read = qlcnic_sysfs_read_pci_config,
3653 .write = NULL,
3654};
3655
b6021212
AKS
3656static struct bin_attribute bin_attr_port_stats = {
3657 .attr = {.name = "port_stats", .mode = (S_IRUGO | S_IWUSR)},
3658 .size = 0,
3659 .read = qlcnic_sysfs_get_port_stats,
3660 .write = qlcnic_sysfs_clear_port_stats,
3661};
3662
3663static struct bin_attribute bin_attr_esw_stats = {
3664 .attr = {.name = "esw_stats", .mode = (S_IRUGO | S_IWUSR)},
3665 .size = 0,
3666 .read = qlcnic_sysfs_get_esw_stats,
3667 .write = qlcnic_sysfs_clear_esw_stats,
3668};
3669
346fe763
RB
3670static struct bin_attribute bin_attr_esw_config = {
3671 .attr = {.name = "esw_config", .mode = (S_IRUGO | S_IWUSR)},
3672 .size = 0,
3673 .read = qlcnic_sysfs_read_esw_config,
3674 .write = qlcnic_sysfs_write_esw_config,
3675};
3676
3677static struct bin_attribute bin_attr_pm_config = {
3678 .attr = {.name = "pm_config", .mode = (S_IRUGO | S_IWUSR)},
3679 .size = 0,
3680 .read = qlcnic_sysfs_read_pm_config,
3681 .write = qlcnic_sysfs_write_pm_config,
3682};
3683
af19b491
AKS
3684static void
3685qlcnic_create_sysfs_entries(struct qlcnic_adapter *adapter)
3686{
3687 struct device *dev = &adapter->pdev->dev;
3688
3689 if (adapter->capabilities & QLCNIC_FW_CAPABILITY_BDG)
3690 if (device_create_file(dev, &dev_attr_bridged_mode))
3691 dev_warn(dev,
3692 "failed to create bridged_mode sysfs entry\n");
3693}
3694
3695static void
3696qlcnic_remove_sysfs_entries(struct qlcnic_adapter *adapter)
3697{
3698 struct device *dev = &adapter->pdev->dev;
3699
3700 if (adapter->capabilities & QLCNIC_FW_CAPABILITY_BDG)
3701 device_remove_file(dev, &dev_attr_bridged_mode);
3702}
3703
3704static void
3705qlcnic_create_diag_entries(struct qlcnic_adapter *adapter)
3706{
3707 struct device *dev = &adapter->pdev->dev;
3708
b6021212
AKS
3709 if (device_create_bin_file(dev, &bin_attr_port_stats))
3710 dev_info(dev, "failed to create port stats sysfs entry");
3711
132ff00a
AC
3712 if (adapter->op_mode == QLCNIC_NON_PRIV_FUNC)
3713 return;
af19b491
AKS
3714 if (device_create_file(dev, &dev_attr_diag_mode))
3715 dev_info(dev, "failed to create diag_mode sysfs entry\n");
3716 if (device_create_bin_file(dev, &bin_attr_crb))
3717 dev_info(dev, "failed to create crb sysfs entry\n");
3718 if (device_create_bin_file(dev, &bin_attr_mem))
3719 dev_info(dev, "failed to create mem sysfs entry\n");
4e8acb01
RB
3720 if (!(adapter->flags & QLCNIC_ESWITCH_ENABLED))
3721 return;
3722 if (device_create_bin_file(dev, &bin_attr_esw_config))
3723 dev_info(dev, "failed to create esw config sysfs entry");
3724 if (adapter->op_mode != QLCNIC_MGMT_FUNC)
346fe763
RB
3725 return;
3726 if (device_create_bin_file(dev, &bin_attr_pci_config))
3727 dev_info(dev, "failed to create pci config sysfs entry");
3728 if (device_create_bin_file(dev, &bin_attr_npar_config))
3729 dev_info(dev, "failed to create npar config sysfs entry");
346fe763
RB
3730 if (device_create_bin_file(dev, &bin_attr_pm_config))
3731 dev_info(dev, "failed to create pm config sysfs entry");
b6021212
AKS
3732 if (device_create_bin_file(dev, &bin_attr_esw_stats))
3733 dev_info(dev, "failed to create eswitch stats sysfs entry");
af19b491
AKS
3734}
3735
af19b491
AKS
3736static void
3737qlcnic_remove_diag_entries(struct qlcnic_adapter *adapter)
3738{
3739 struct device *dev = &adapter->pdev->dev;
3740
b6021212
AKS
3741 device_remove_bin_file(dev, &bin_attr_port_stats);
3742
132ff00a
AC
3743 if (adapter->op_mode == QLCNIC_NON_PRIV_FUNC)
3744 return;
af19b491
AKS
3745 device_remove_file(dev, &dev_attr_diag_mode);
3746 device_remove_bin_file(dev, &bin_attr_crb);
3747 device_remove_bin_file(dev, &bin_attr_mem);
4e8acb01
RB
3748 if (!(adapter->flags & QLCNIC_ESWITCH_ENABLED))
3749 return;
3750 device_remove_bin_file(dev, &bin_attr_esw_config);
3751 if (adapter->op_mode != QLCNIC_MGMT_FUNC)
346fe763
RB
3752 return;
3753 device_remove_bin_file(dev, &bin_attr_pci_config);
3754 device_remove_bin_file(dev, &bin_attr_npar_config);
346fe763 3755 device_remove_bin_file(dev, &bin_attr_pm_config);
b6021212 3756 device_remove_bin_file(dev, &bin_attr_esw_stats);
af19b491
AKS
3757}
3758
3759#ifdef CONFIG_INET
3760
3761#define is_qlcnic_netdev(dev) (dev->netdev_ops == &qlcnic_netdev_ops)
3762
af19b491
AKS
3763static void
3764qlcnic_config_indev_addr(struct net_device *dev, unsigned long event)
3765{
3766 struct in_device *indev;
3767 struct qlcnic_adapter *adapter = netdev_priv(dev);
3768
af19b491
AKS
3769 indev = in_dev_get(dev);
3770 if (!indev)
3771 return;
3772
3773 for_ifa(indev) {
3774 switch (event) {
3775 case NETDEV_UP:
3776 qlcnic_config_ipaddr(adapter,
3777 ifa->ifa_address, QLCNIC_IP_UP);
3778 break;
3779 case NETDEV_DOWN:
3780 qlcnic_config_ipaddr(adapter,
3781 ifa->ifa_address, QLCNIC_IP_DOWN);
3782 break;
3783 default:
3784 break;
3785 }
3786 } endfor_ifa(indev);
3787
3788 in_dev_put(indev);
af19b491
AKS
3789}
3790
3791static int qlcnic_netdev_event(struct notifier_block *this,
3792 unsigned long event, void *ptr)
3793{
3794 struct qlcnic_adapter *adapter;
3795 struct net_device *dev = (struct net_device *)ptr;
3796
3797recheck:
3798 if (dev == NULL)
3799 goto done;
3800
3801 if (dev->priv_flags & IFF_802_1Q_VLAN) {
3802 dev = vlan_dev_real_dev(dev);
3803 goto recheck;
3804 }
3805
3806 if (!is_qlcnic_netdev(dev))
3807 goto done;
3808
3809 adapter = netdev_priv(dev);
3810
3811 if (!adapter)
3812 goto done;
3813
8a15ad1f 3814 if (!test_bit(__QLCNIC_DEV_UP, &adapter->state))
af19b491
AKS
3815 goto done;
3816
3817 qlcnic_config_indev_addr(dev, event);
3818done:
3819 return NOTIFY_DONE;
3820}
3821
3822static int
3823qlcnic_inetaddr_event(struct notifier_block *this,
3824 unsigned long event, void *ptr)
3825{
3826 struct qlcnic_adapter *adapter;
3827 struct net_device *dev;
3828
3829 struct in_ifaddr *ifa = (struct in_ifaddr *)ptr;
3830
3831 dev = ifa->ifa_dev ? ifa->ifa_dev->dev : NULL;
3832
3833recheck:
3834 if (dev == NULL || !netif_running(dev))
3835 goto done;
3836
3837 if (dev->priv_flags & IFF_802_1Q_VLAN) {
3838 dev = vlan_dev_real_dev(dev);
3839 goto recheck;
3840 }
3841
3842 if (!is_qlcnic_netdev(dev))
3843 goto done;
3844
3845 adapter = netdev_priv(dev);
3846
251a84c9 3847 if (!adapter)
af19b491
AKS
3848 goto done;
3849
8a15ad1f 3850 if (!test_bit(__QLCNIC_DEV_UP, &adapter->state))
af19b491
AKS
3851 goto done;
3852
3853 switch (event) {
3854 case NETDEV_UP:
3855 qlcnic_config_ipaddr(adapter, ifa->ifa_address, QLCNIC_IP_UP);
3856 break;
3857 case NETDEV_DOWN:
3858 qlcnic_config_ipaddr(adapter, ifa->ifa_address, QLCNIC_IP_DOWN);
3859 break;
3860 default:
3861 break;
3862 }
3863
3864done:
3865 return NOTIFY_DONE;
3866}
3867
3868static struct notifier_block qlcnic_netdev_cb = {
3869 .notifier_call = qlcnic_netdev_event,
3870};
3871
3872static struct notifier_block qlcnic_inetaddr_cb = {
3873 .notifier_call = qlcnic_inetaddr_event,
3874};
3875#else
3876static void
3877qlcnic_config_indev_addr(struct net_device *dev, unsigned long event)
3878{ }
3879#endif
451724c8
SC
3880static struct pci_error_handlers qlcnic_err_handler = {
3881 .error_detected = qlcnic_io_error_detected,
3882 .slot_reset = qlcnic_io_slot_reset,
3883 .resume = qlcnic_io_resume,
3884};
af19b491
AKS
3885
3886static struct pci_driver qlcnic_driver = {
3887 .name = qlcnic_driver_name,
3888 .id_table = qlcnic_pci_tbl,
3889 .probe = qlcnic_probe,
3890 .remove = __devexit_p(qlcnic_remove),
3891#ifdef CONFIG_PM
3892 .suspend = qlcnic_suspend,
3893 .resume = qlcnic_resume,
3894#endif
451724c8
SC
3895 .shutdown = qlcnic_shutdown,
3896 .err_handler = &qlcnic_err_handler
3897
af19b491
AKS
3898};
3899
3900static int __init qlcnic_init_module(void)
3901{
0cf3a14c 3902 int ret;
af19b491
AKS
3903
3904 printk(KERN_INFO "%s\n", qlcnic_driver_string);
3905
3906#ifdef CONFIG_INET
3907 register_netdevice_notifier(&qlcnic_netdev_cb);
3908 register_inetaddr_notifier(&qlcnic_inetaddr_cb);
3909#endif
3910
0cf3a14c
AKS
3911 ret = pci_register_driver(&qlcnic_driver);
3912 if (ret) {
3913#ifdef CONFIG_INET
3914 unregister_inetaddr_notifier(&qlcnic_inetaddr_cb);
3915 unregister_netdevice_notifier(&qlcnic_netdev_cb);
3916#endif
3917 }
af19b491 3918
0cf3a14c 3919 return ret;
af19b491
AKS
3920}
3921
3922module_init(qlcnic_init_module);
3923
3924static void __exit qlcnic_exit_module(void)
3925{
3926
3927 pci_unregister_driver(&qlcnic_driver);
3928
3929#ifdef CONFIG_INET
3930 unregister_inetaddr_notifier(&qlcnic_inetaddr_cb);
3931 unregister_netdevice_notifier(&qlcnic_netdev_cb);
3932#endif
3933}
3934
3935module_exit(qlcnic_exit_module);