]> bbs.cooldavid.org Git - net-next-2.6.git/blame - drivers/net/qlcnic/qlcnic_main.c
qlcnic: Disable admin tools interface for VF driver mode
[net-next-2.6.git] / drivers / net / qlcnic / qlcnic_main.c
CommitLineData
af19b491
AKS
1/*
2 * Copyright (C) 2009 - QLogic Corporation.
3 * All rights reserved.
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License
7 * as published by the Free Software Foundation; either version 2
8 * of the License, or (at your option) any later version.
9 *
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 59 Temple Place - Suite 330, Boston,
18 * MA 02111-1307, USA.
19 *
20 * The full GNU General Public License is included in this distribution
21 * in the file called "COPYING".
22 *
23 */
24
5a0e3ad6 25#include <linux/slab.h>
af19b491
AKS
26#include <linux/vmalloc.h>
27#include <linux/interrupt.h>
28
29#include "qlcnic.h"
30
31#include <linux/dma-mapping.h>
32#include <linux/if_vlan.h>
33#include <net/ip.h>
34#include <linux/ipv6.h>
35#include <linux/inetdevice.h>
36#include <linux/sysfs.h>
37
7f9a0c34 38MODULE_DESCRIPTION("QLogic 1/10 GbE Converged/Intelligent Ethernet Driver");
af19b491
AKS
39MODULE_LICENSE("GPL");
40MODULE_VERSION(QLCNIC_LINUX_VERSIONID);
41MODULE_FIRMWARE(QLCNIC_UNIFIED_ROMIMAGE_NAME);
42
43char qlcnic_driver_name[] = "qlcnic";
7f9a0c34
SV
44static const char qlcnic_driver_string[] = "QLogic 1/10 GbE "
45 "Converged/Intelligent Ethernet Driver v" QLCNIC_LINUX_VERSIONID;
af19b491
AKS
46
47static int port_mode = QLCNIC_PORT_MODE_AUTO_NEG;
48
49/* Default to restricted 1G auto-neg mode */
50static int wol_port_mode = 5;
51
52static int use_msi = 1;
53module_param(use_msi, int, 0644);
54MODULE_PARM_DESC(use_msi, "MSI interrupt (0=disabled, 1=enabled");
55
56static int use_msi_x = 1;
57module_param(use_msi_x, int, 0644);
58MODULE_PARM_DESC(use_msi_x, "MSI-X interrupt (0=disabled, 1=enabled");
59
60static int auto_fw_reset = AUTO_FW_RESET_ENABLED;
61module_param(auto_fw_reset, int, 0644);
62MODULE_PARM_DESC(auto_fw_reset, "Auto firmware reset (0=disabled, 1=enabled");
63
4d5bdb38
AKS
64static int load_fw_file;
65module_param(load_fw_file, int, 0644);
66MODULE_PARM_DESC(load_fw_file, "Load firmware from (0=flash, 1=file");
67
2e9d722d
AC
68static int qlcnic_config_npars;
69module_param(qlcnic_config_npars, int, 0644);
70MODULE_PARM_DESC(qlcnic_config_npars, "Configure NPARs (0=disabled, 1=enabled");
71
af19b491
AKS
72static int __devinit qlcnic_probe(struct pci_dev *pdev,
73 const struct pci_device_id *ent);
74static void __devexit qlcnic_remove(struct pci_dev *pdev);
75static int qlcnic_open(struct net_device *netdev);
76static int qlcnic_close(struct net_device *netdev);
af19b491 77static void qlcnic_tx_timeout(struct net_device *netdev);
af19b491
AKS
78static void qlcnic_attach_work(struct work_struct *work);
79static void qlcnic_fwinit_work(struct work_struct *work);
80static void qlcnic_fw_poll_work(struct work_struct *work);
81static void qlcnic_schedule_work(struct qlcnic_adapter *adapter,
82 work_func_t func, int delay);
83static void qlcnic_cancel_fw_work(struct qlcnic_adapter *adapter);
84static int qlcnic_poll(struct napi_struct *napi, int budget);
8f891387 85static int qlcnic_rx_poll(struct napi_struct *napi, int budget);
af19b491
AKS
86#ifdef CONFIG_NET_POLL_CONTROLLER
87static void qlcnic_poll_controller(struct net_device *netdev);
88#endif
89
90static void qlcnic_create_sysfs_entries(struct qlcnic_adapter *adapter);
91static void qlcnic_remove_sysfs_entries(struct qlcnic_adapter *adapter);
92static void qlcnic_create_diag_entries(struct qlcnic_adapter *adapter);
93static void qlcnic_remove_diag_entries(struct qlcnic_adapter *adapter);
94
6df900e9 95static void qlcnic_idc_debug_info(struct qlcnic_adapter *adapter, u8 encoding);
af19b491
AKS
96static void qlcnic_clr_all_drv_state(struct qlcnic_adapter *adapter);
97static int qlcnic_can_start_firmware(struct qlcnic_adapter *adapter);
98
7eb9855d 99static irqreturn_t qlcnic_tmp_intr(int irq, void *data);
af19b491
AKS
100static irqreturn_t qlcnic_intr(int irq, void *data);
101static irqreturn_t qlcnic_msi_intr(int irq, void *data);
102static irqreturn_t qlcnic_msix_intr(int irq, void *data);
103
104static struct net_device_stats *qlcnic_get_stats(struct net_device *netdev);
105static void qlcnic_config_indev_addr(struct net_device *dev, unsigned long);
9f26f547
AC
106static int qlcnic_start_firmware(struct qlcnic_adapter *);
107
108static void qlcnic_dev_set_npar_ready(struct qlcnic_adapter *);
109static void qlcnicvf_clear_ilb_mode(struct qlcnic_adapter *);
110static int qlcnicvf_set_ilb_mode(struct qlcnic_adapter *);
111static int qlcnicvf_config_led(struct qlcnic_adapter *, u32, u32);
112static int qlcnicvf_config_bridged_mode(struct qlcnic_adapter *, u32);
113static int qlcnicvf_start_firmware(struct qlcnic_adapter *);
af19b491
AKS
114/* PCI Device ID Table */
115#define ENTRY(device) \
116 {PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, (device)), \
117 .class = PCI_CLASS_NETWORK_ETHERNET << 8, .class_mask = ~0}
118
119#define PCI_DEVICE_ID_QLOGIC_QLE824X 0x8020
120
6a902881 121static DEFINE_PCI_DEVICE_TABLE(qlcnic_pci_tbl) = {
af19b491
AKS
122 ENTRY(PCI_DEVICE_ID_QLOGIC_QLE824X),
123 {0,}
124};
125
126MODULE_DEVICE_TABLE(pci, qlcnic_pci_tbl);
127
128
129void
130qlcnic_update_cmd_producer(struct qlcnic_adapter *adapter,
131 struct qlcnic_host_tx_ring *tx_ring)
132{
133 writel(tx_ring->producer, tx_ring->crb_cmd_producer);
af19b491
AKS
134}
135
136static const u32 msi_tgt_status[8] = {
137 ISR_INT_TARGET_STATUS, ISR_INT_TARGET_STATUS_F1,
138 ISR_INT_TARGET_STATUS_F2, ISR_INT_TARGET_STATUS_F3,
139 ISR_INT_TARGET_STATUS_F4, ISR_INT_TARGET_STATUS_F5,
140 ISR_INT_TARGET_STATUS_F6, ISR_INT_TARGET_STATUS_F7
141};
142
143static const
144struct qlcnic_legacy_intr_set legacy_intr[] = QLCNIC_LEGACY_INTR_CONFIG;
145
146static inline void qlcnic_disable_int(struct qlcnic_host_sds_ring *sds_ring)
147{
148 writel(0, sds_ring->crb_intr_mask);
149}
150
151static inline void qlcnic_enable_int(struct qlcnic_host_sds_ring *sds_ring)
152{
153 struct qlcnic_adapter *adapter = sds_ring->adapter;
154
155 writel(0x1, sds_ring->crb_intr_mask);
156
157 if (!QLCNIC_IS_MSI_FAMILY(adapter))
158 writel(0xfbff, adapter->tgt_mask_reg);
159}
160
161static int
162qlcnic_alloc_sds_rings(struct qlcnic_recv_context *recv_ctx, int count)
163{
164 int size = sizeof(struct qlcnic_host_sds_ring) * count;
165
166 recv_ctx->sds_rings = kzalloc(size, GFP_KERNEL);
167
168 return (recv_ctx->sds_rings == NULL);
169}
170
171static void
172qlcnic_free_sds_rings(struct qlcnic_recv_context *recv_ctx)
173{
174 if (recv_ctx->sds_rings != NULL)
175 kfree(recv_ctx->sds_rings);
176
177 recv_ctx->sds_rings = NULL;
178}
179
180static int
181qlcnic_napi_add(struct qlcnic_adapter *adapter, struct net_device *netdev)
182{
183 int ring;
184 struct qlcnic_host_sds_ring *sds_ring;
185 struct qlcnic_recv_context *recv_ctx = &adapter->recv_ctx;
186
187 if (qlcnic_alloc_sds_rings(recv_ctx, adapter->max_sds_rings))
188 return -ENOMEM;
189
190 for (ring = 0; ring < adapter->max_sds_rings; ring++) {
191 sds_ring = &recv_ctx->sds_rings[ring];
8f891387 192
193 if (ring == adapter->max_sds_rings - 1)
194 netif_napi_add(netdev, &sds_ring->napi, qlcnic_poll,
195 QLCNIC_NETDEV_WEIGHT/adapter->max_sds_rings);
196 else
197 netif_napi_add(netdev, &sds_ring->napi,
198 qlcnic_rx_poll, QLCNIC_NETDEV_WEIGHT*2);
af19b491
AKS
199 }
200
201 return 0;
202}
203
204static void
205qlcnic_napi_del(struct qlcnic_adapter *adapter)
206{
207 int ring;
208 struct qlcnic_host_sds_ring *sds_ring;
209 struct qlcnic_recv_context *recv_ctx = &adapter->recv_ctx;
210
211 for (ring = 0; ring < adapter->max_sds_rings; ring++) {
212 sds_ring = &recv_ctx->sds_rings[ring];
213 netif_napi_del(&sds_ring->napi);
214 }
215
216 qlcnic_free_sds_rings(&adapter->recv_ctx);
217}
218
219static void
220qlcnic_napi_enable(struct qlcnic_adapter *adapter)
221{
222 int ring;
223 struct qlcnic_host_sds_ring *sds_ring;
224 struct qlcnic_recv_context *recv_ctx = &adapter->recv_ctx;
225
780ab790
AKS
226 if (adapter->is_up != QLCNIC_ADAPTER_UP_MAGIC)
227 return;
228
af19b491
AKS
229 for (ring = 0; ring < adapter->max_sds_rings; ring++) {
230 sds_ring = &recv_ctx->sds_rings[ring];
231 napi_enable(&sds_ring->napi);
232 qlcnic_enable_int(sds_ring);
233 }
234}
235
236static void
237qlcnic_napi_disable(struct qlcnic_adapter *adapter)
238{
239 int ring;
240 struct qlcnic_host_sds_ring *sds_ring;
241 struct qlcnic_recv_context *recv_ctx = &adapter->recv_ctx;
242
780ab790
AKS
243 if (adapter->is_up != QLCNIC_ADAPTER_UP_MAGIC)
244 return;
245
af19b491
AKS
246 for (ring = 0; ring < adapter->max_sds_rings; ring++) {
247 sds_ring = &recv_ctx->sds_rings[ring];
248 qlcnic_disable_int(sds_ring);
249 napi_synchronize(&sds_ring->napi);
250 napi_disable(&sds_ring->napi);
251 }
252}
253
254static void qlcnic_clear_stats(struct qlcnic_adapter *adapter)
255{
256 memset(&adapter->stats, 0, sizeof(adapter->stats));
af19b491
AKS
257}
258
af19b491
AKS
259static void qlcnic_set_port_mode(struct qlcnic_adapter *adapter)
260{
261 u32 val, data;
262
263 val = adapter->ahw.board_type;
264 if ((val == QLCNIC_BRDTYPE_P3_HMEZ) ||
265 (val == QLCNIC_BRDTYPE_P3_XG_LOM)) {
266 if (port_mode == QLCNIC_PORT_MODE_802_3_AP) {
267 data = QLCNIC_PORT_MODE_802_3_AP;
268 QLCWR32(adapter, QLCNIC_PORT_MODE_ADDR, data);
269 } else if (port_mode == QLCNIC_PORT_MODE_XG) {
270 data = QLCNIC_PORT_MODE_XG;
271 QLCWR32(adapter, QLCNIC_PORT_MODE_ADDR, data);
272 } else if (port_mode == QLCNIC_PORT_MODE_AUTO_NEG_1G) {
273 data = QLCNIC_PORT_MODE_AUTO_NEG_1G;
274 QLCWR32(adapter, QLCNIC_PORT_MODE_ADDR, data);
275 } else if (port_mode == QLCNIC_PORT_MODE_AUTO_NEG_XG) {
276 data = QLCNIC_PORT_MODE_AUTO_NEG_XG;
277 QLCWR32(adapter, QLCNIC_PORT_MODE_ADDR, data);
278 } else {
279 data = QLCNIC_PORT_MODE_AUTO_NEG;
280 QLCWR32(adapter, QLCNIC_PORT_MODE_ADDR, data);
281 }
282
283 if ((wol_port_mode != QLCNIC_PORT_MODE_802_3_AP) &&
284 (wol_port_mode != QLCNIC_PORT_MODE_XG) &&
285 (wol_port_mode != QLCNIC_PORT_MODE_AUTO_NEG_1G) &&
286 (wol_port_mode != QLCNIC_PORT_MODE_AUTO_NEG_XG)) {
287 wol_port_mode = QLCNIC_PORT_MODE_AUTO_NEG;
288 }
289 QLCWR32(adapter, QLCNIC_WOL_PORT_MODE, wol_port_mode);
290 }
291}
292
293static void qlcnic_set_msix_bit(struct pci_dev *pdev, int enable)
294{
295 u32 control;
296 int pos;
297
298 pos = pci_find_capability(pdev, PCI_CAP_ID_MSIX);
299 if (pos) {
300 pci_read_config_dword(pdev, pos, &control);
301 if (enable)
302 control |= PCI_MSIX_FLAGS_ENABLE;
303 else
304 control = 0;
305 pci_write_config_dword(pdev, pos, control);
306 }
307}
308
309static void qlcnic_init_msix_entries(struct qlcnic_adapter *adapter, int count)
310{
311 int i;
312
313 for (i = 0; i < count; i++)
314 adapter->msix_entries[i].entry = i;
315}
316
317static int
318qlcnic_read_mac_addr(struct qlcnic_adapter *adapter)
319{
2e9d722d 320 u8 mac_addr[ETH_ALEN];
af19b491
AKS
321 struct net_device *netdev = adapter->netdev;
322 struct pci_dev *pdev = adapter->pdev;
323
2e9d722d 324 if (adapter->nic_ops->get_mac_addr(adapter, mac_addr) != 0)
af19b491
AKS
325 return -EIO;
326
2e9d722d 327 memcpy(netdev->dev_addr, mac_addr, ETH_ALEN);
af19b491
AKS
328 memcpy(netdev->perm_addr, netdev->dev_addr, netdev->addr_len);
329 memcpy(adapter->mac_addr, netdev->dev_addr, netdev->addr_len);
330
331 /* set station address */
332
333 if (!is_valid_ether_addr(netdev->perm_addr))
334 dev_warn(&pdev->dev, "Bad MAC address %pM.\n",
335 netdev->dev_addr);
336
337 return 0;
338}
339
340static int qlcnic_set_mac(struct net_device *netdev, void *p)
341{
342 struct qlcnic_adapter *adapter = netdev_priv(netdev);
343 struct sockaddr *addr = p;
344
345 if (!is_valid_ether_addr(addr->sa_data))
346 return -EINVAL;
347
8a15ad1f 348 if (test_bit(__QLCNIC_DEV_UP, &adapter->state)) {
af19b491
AKS
349 netif_device_detach(netdev);
350 qlcnic_napi_disable(adapter);
351 }
352
353 memcpy(adapter->mac_addr, addr->sa_data, netdev->addr_len);
354 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
355 qlcnic_set_multi(adapter->netdev);
356
8a15ad1f 357 if (test_bit(__QLCNIC_DEV_UP, &adapter->state)) {
af19b491
AKS
358 netif_device_attach(netdev);
359 qlcnic_napi_enable(adapter);
360 }
361 return 0;
362}
363
364static const struct net_device_ops qlcnic_netdev_ops = {
365 .ndo_open = qlcnic_open,
366 .ndo_stop = qlcnic_close,
367 .ndo_start_xmit = qlcnic_xmit_frame,
368 .ndo_get_stats = qlcnic_get_stats,
369 .ndo_validate_addr = eth_validate_addr,
370 .ndo_set_multicast_list = qlcnic_set_multi,
371 .ndo_set_mac_address = qlcnic_set_mac,
372 .ndo_change_mtu = qlcnic_change_mtu,
373 .ndo_tx_timeout = qlcnic_tx_timeout,
374#ifdef CONFIG_NET_POLL_CONTROLLER
375 .ndo_poll_controller = qlcnic_poll_controller,
376#endif
377};
378
2e9d722d 379static struct qlcnic_nic_template qlcnic_ops = {
2e9d722d
AC
380 .get_mac_addr = qlcnic_get_mac_address,
381 .config_bridged_mode = qlcnic_config_bridged_mode,
382 .config_led = qlcnic_config_led,
383 .set_ilb_mode = qlcnic_set_ilb_mode,
9f26f547
AC
384 .clear_ilb_mode = qlcnic_clear_ilb_mode,
385 .start_firmware = qlcnic_start_firmware
386};
387
388static struct qlcnic_nic_template qlcnic_vf_ops = {
389 .get_mac_addr = qlcnic_get_mac_address,
390 .config_bridged_mode = qlcnicvf_config_bridged_mode,
391 .config_led = qlcnicvf_config_led,
392 .set_ilb_mode = qlcnicvf_set_ilb_mode,
393 .clear_ilb_mode = qlcnicvf_clear_ilb_mode,
394 .start_firmware = qlcnicvf_start_firmware
2e9d722d
AC
395};
396
af19b491
AKS
397static void
398qlcnic_setup_intr(struct qlcnic_adapter *adapter)
399{
400 const struct qlcnic_legacy_intr_set *legacy_intrp;
401 struct pci_dev *pdev = adapter->pdev;
402 int err, num_msix;
403
404 if (adapter->rss_supported) {
405 num_msix = (num_online_cpus() >= MSIX_ENTRIES_PER_ADAPTER) ?
406 MSIX_ENTRIES_PER_ADAPTER : 2;
407 } else
408 num_msix = 1;
409
410 adapter->max_sds_rings = 1;
411
412 adapter->flags &= ~(QLCNIC_MSI_ENABLED | QLCNIC_MSIX_ENABLED);
413
414 legacy_intrp = &legacy_intr[adapter->ahw.pci_func];
415
416 adapter->int_vec_bit = legacy_intrp->int_vec_bit;
417 adapter->tgt_status_reg = qlcnic_get_ioaddr(adapter,
418 legacy_intrp->tgt_status_reg);
419 adapter->tgt_mask_reg = qlcnic_get_ioaddr(adapter,
420 legacy_intrp->tgt_mask_reg);
421 adapter->isr_int_vec = qlcnic_get_ioaddr(adapter, ISR_INT_VECTOR);
422
423 adapter->crb_int_state_reg = qlcnic_get_ioaddr(adapter,
424 ISR_INT_STATE_REG);
425
426 qlcnic_set_msix_bit(pdev, 0);
427
428 if (adapter->msix_supported) {
429
430 qlcnic_init_msix_entries(adapter, num_msix);
431 err = pci_enable_msix(pdev, adapter->msix_entries, num_msix);
432 if (err == 0) {
433 adapter->flags |= QLCNIC_MSIX_ENABLED;
434 qlcnic_set_msix_bit(pdev, 1);
435
436 if (adapter->rss_supported)
437 adapter->max_sds_rings = num_msix;
438
439 dev_info(&pdev->dev, "using msi-x interrupts\n");
440 return;
441 }
442
443 if (err > 0)
444 pci_disable_msix(pdev);
445
446 /* fall through for msi */
447 }
448
449 if (use_msi && !pci_enable_msi(pdev)) {
450 adapter->flags |= QLCNIC_MSI_ENABLED;
451 adapter->tgt_status_reg = qlcnic_get_ioaddr(adapter,
452 msi_tgt_status[adapter->ahw.pci_func]);
453 dev_info(&pdev->dev, "using msi interrupts\n");
454 adapter->msix_entries[0].vector = pdev->irq;
455 return;
456 }
457
458 dev_info(&pdev->dev, "using legacy interrupts\n");
459 adapter->msix_entries[0].vector = pdev->irq;
460}
461
462static void
463qlcnic_teardown_intr(struct qlcnic_adapter *adapter)
464{
465 if (adapter->flags & QLCNIC_MSIX_ENABLED)
466 pci_disable_msix(adapter->pdev);
467 if (adapter->flags & QLCNIC_MSI_ENABLED)
468 pci_disable_msi(adapter->pdev);
469}
470
471static void
472qlcnic_cleanup_pci_map(struct qlcnic_adapter *adapter)
473{
474 if (adapter->ahw.pci_base0 != NULL)
475 iounmap(adapter->ahw.pci_base0);
476}
477
346fe763
RB
478static int
479qlcnic_init_pci_info(struct qlcnic_adapter *adapter)
480{
481 struct qlcnic_pci_info pci_info[QLCNIC_MAX_PCI_FUNC];
482 int i, ret = 0, err;
483 u8 pfn;
484
485 if (!adapter->npars)
486 adapter->npars = kzalloc(sizeof(struct qlcnic_npar_info) *
487 QLCNIC_MAX_PCI_FUNC, GFP_KERNEL);
488 if (!adapter->npars)
489 return -ENOMEM;
490
491 if (!adapter->eswitch)
492 adapter->eswitch = kzalloc(sizeof(struct qlcnic_eswitch) *
493 QLCNIC_NIU_MAX_XG_PORTS, GFP_KERNEL);
494 if (!adapter->eswitch) {
495 err = -ENOMEM;
496 goto err_eswitch;
497 }
498
499 ret = qlcnic_get_pci_info(adapter, pci_info);
500 if (!ret) {
501 for (i = 0; i < QLCNIC_MAX_PCI_FUNC; i++) {
502 pfn = pci_info[i].id;
503 if (pfn > QLCNIC_MAX_PCI_FUNC)
504 return QL_STATUS_INVALID_PARAM;
505 adapter->npars[pfn].active = pci_info[i].active;
506 adapter->npars[pfn].type = pci_info[i].type;
507 adapter->npars[pfn].phy_port = pci_info[i].default_port;
508 adapter->npars[pfn].mac_learning = DEFAULT_MAC_LEARN;
509 }
510
511 for (i = 0; i < QLCNIC_NIU_MAX_XG_PORTS; i++)
512 adapter->eswitch[i].flags |= QLCNIC_SWITCH_ENABLE;
513
514 return ret;
515 }
516
517 kfree(adapter->eswitch);
518 adapter->eswitch = NULL;
519err_eswitch:
520 kfree(adapter->npars);
521
522 return ret;
523}
524
2e9d722d
AC
525static int
526qlcnic_set_function_modes(struct qlcnic_adapter *adapter)
527{
528 u8 id;
529 u32 ref_count;
530 int i, ret = 1;
531 u32 data = QLCNIC_MGMT_FUNC;
532 void __iomem *priv_op = adapter->ahw.pci_base0 + QLCNIC_DRV_OP_MODE;
533
534 /* If other drivers are not in use set their privilege level */
535 ref_count = QLCRD32(adapter, QLCNIC_CRB_DEV_REF_COUNT);
536 ret = qlcnic_api_lock(adapter);
537 if (ret)
538 goto err_lock;
539 if (QLC_DEV_CLR_REF_CNT(ref_count, adapter->ahw.pci_func))
540 goto err_npar;
541
0e33c664
AC
542 if (qlcnic_config_npars) {
543 for (i = 0; i < QLCNIC_MAX_PCI_FUNC; i++) {
346fe763 544 id = i;
0e33c664
AC
545 if (adapter->npars[i].type != QLCNIC_TYPE_NIC ||
546 id == adapter->ahw.pci_func)
547 continue;
548 data |= (qlcnic_config_npars &
549 QLC_DEV_SET_DRV(0xf, id));
550 }
551 } else {
552 data = readl(priv_op);
553 data = (data & ~QLC_DEV_SET_DRV(0xf, adapter->ahw.pci_func)) |
554 (QLC_DEV_SET_DRV(QLCNIC_MGMT_FUNC,
555 adapter->ahw.pci_func));
2e9d722d
AC
556 }
557 writel(data, priv_op);
2e9d722d
AC
558err_npar:
559 qlcnic_api_unlock(adapter);
560err_lock:
561 return ret;
562}
563
2e9d722d
AC
564static u32
565qlcnic_get_driver_mode(struct qlcnic_adapter *adapter)
566{
567 void __iomem *msix_base_addr;
568 void __iomem *priv_op;
346fe763 569 struct qlcnic_info nic_info;
2e9d722d
AC
570 u32 func;
571 u32 msix_base;
572 u32 op_mode, priv_level;
573
574 /* Determine FW API version */
575 adapter->fw_hal_version = readl(adapter->ahw.pci_base0 + QLCNIC_FW_API);
2e9d722d
AC
576
577 /* Find PCI function number */
578 pci_read_config_dword(adapter->pdev, QLCNIC_MSIX_TABLE_OFFSET, &func);
579 msix_base_addr = adapter->ahw.pci_base0 + QLCNIC_MSIX_BASE;
580 msix_base = readl(msix_base_addr);
581 func = (func - msix_base)/QLCNIC_MSIX_TBL_PGSIZE;
582 adapter->ahw.pci_func = func;
583
346fe763
RB
584 if (!qlcnic_get_nic_info(adapter, &nic_info, adapter->ahw.pci_func)) {
585 adapter->capabilities = nic_info.capabilities;
586
587 if (adapter->capabilities & BIT_6)
588 adapter->flags |= QLCNIC_ESWITCH_ENABLED;
589 else
590 adapter->flags &= ~QLCNIC_ESWITCH_ENABLED;
591 }
0e33c664
AC
592
593 if (!(adapter->flags & QLCNIC_ESWITCH_ENABLED)) {
594 adapter->nic_ops = &qlcnic_ops;
595 return adapter->fw_hal_version;
596 }
597
2e9d722d
AC
598 /* Determine function privilege level */
599 priv_op = adapter->ahw.pci_base0 + QLCNIC_DRV_OP_MODE;
600 op_mode = readl(priv_op);
0e33c664 601 if (op_mode == QLC_DEV_DRV_DEFAULT)
2e9d722d 602 priv_level = QLCNIC_MGMT_FUNC;
0e33c664 603 else
2e9d722d
AC
604 priv_level = QLC_DEV_GET_DRV(op_mode, adapter->ahw.pci_func);
605
606 switch (priv_level) {
607 case QLCNIC_MGMT_FUNC:
608 adapter->op_mode = QLCNIC_MGMT_FUNC;
45918e2f 609 adapter->nic_ops = &qlcnic_ops;
346fe763 610 qlcnic_init_pci_info(adapter);
2e9d722d 611 /* Set privilege level for other functions */
0e33c664 612 qlcnic_set_function_modes(adapter);
2e9d722d
AC
613 dev_info(&adapter->pdev->dev,
614 "HAL Version: %d, Management function\n",
615 adapter->fw_hal_version);
616 break;
617 case QLCNIC_PRIV_FUNC:
618 adapter->op_mode = QLCNIC_PRIV_FUNC;
619 dev_info(&adapter->pdev->dev,
620 "HAL Version: %d, Privileged function\n",
621 adapter->fw_hal_version);
45918e2f 622 adapter->nic_ops = &qlcnic_ops;
2e9d722d 623 break;
9f26f547
AC
624 case QLCNIC_NON_PRIV_FUNC:
625 adapter->op_mode = QLCNIC_NON_PRIV_FUNC;
626 dev_info(&adapter->pdev->dev,
627 "HAL Version: %d Non Privileged function\n",
628 adapter->fw_hal_version);
629 adapter->nic_ops = &qlcnic_vf_ops;
630 break;
2e9d722d
AC
631 default:
632 dev_info(&adapter->pdev->dev, "Unknown function mode: %d\n",
633 priv_level);
634 return 0;
635 }
636 return adapter->fw_hal_version;
637}
638
af19b491
AKS
639static int
640qlcnic_setup_pci_map(struct qlcnic_adapter *adapter)
641{
642 void __iomem *mem_ptr0 = NULL;
643 resource_size_t mem_base;
644 unsigned long mem_len, pci_len0 = 0;
645
646 struct pci_dev *pdev = adapter->pdev;
af19b491 647
af19b491
AKS
648 /* remap phys address */
649 mem_base = pci_resource_start(pdev, 0); /* 0 is for BAR 0 */
650 mem_len = pci_resource_len(pdev, 0);
651
652 if (mem_len == QLCNIC_PCI_2MB_SIZE) {
653
654 mem_ptr0 = pci_ioremap_bar(pdev, 0);
655 if (mem_ptr0 == NULL) {
656 dev_err(&pdev->dev, "failed to map PCI bar 0\n");
657 return -EIO;
658 }
659 pci_len0 = mem_len;
660 } else {
661 return -EIO;
662 }
663
664 dev_info(&pdev->dev, "%dMB memory map\n", (int)(mem_len>>20));
665
666 adapter->ahw.pci_base0 = mem_ptr0;
667 adapter->ahw.pci_len0 = pci_len0;
668
2e9d722d
AC
669 if (!qlcnic_get_driver_mode(adapter)) {
670 iounmap(adapter->ahw.pci_base0);
671 return -EIO;
672 }
673
af19b491 674 adapter->ahw.ocm_win_crb = qlcnic_get_ioaddr(adapter,
2e9d722d 675 QLCNIC_PCIX_PS_REG(PCIX_OCM_WINDOW_REG(adapter->ahw.pci_func)));
af19b491
AKS
676
677 return 0;
678}
679
680static void get_brd_name(struct qlcnic_adapter *adapter, char *name)
681{
682 struct pci_dev *pdev = adapter->pdev;
683 int i, found = 0;
684
685 for (i = 0; i < NUM_SUPPORTED_BOARDS; ++i) {
686 if (qlcnic_boards[i].vendor == pdev->vendor &&
687 qlcnic_boards[i].device == pdev->device &&
688 qlcnic_boards[i].sub_vendor == pdev->subsystem_vendor &&
689 qlcnic_boards[i].sub_device == pdev->subsystem_device) {
02f6e46f
SC
690 sprintf(name, "%pM: %s" ,
691 adapter->mac_addr,
692 qlcnic_boards[i].short_name);
af19b491
AKS
693 found = 1;
694 break;
695 }
696
697 }
698
699 if (!found)
7f9a0c34 700 sprintf(name, "%pM Gigabit Ethernet", adapter->mac_addr);
af19b491
AKS
701}
702
703static void
704qlcnic_check_options(struct qlcnic_adapter *adapter)
705{
706 u32 fw_major, fw_minor, fw_build;
707 char brd_name[QLCNIC_MAX_BOARD_NAME_LEN];
708 char serial_num[32];
709 int i, offset, val;
710 int *ptr32;
711 struct pci_dev *pdev = adapter->pdev;
346fe763 712 struct qlcnic_info nic_info;
af19b491
AKS
713 adapter->driver_mismatch = 0;
714
715 ptr32 = (int *)&serial_num;
716 offset = QLCNIC_FW_SERIAL_NUM_OFFSET;
717 for (i = 0; i < 8; i++) {
718 if (qlcnic_rom_fast_read(adapter, offset, &val) == -1) {
719 dev_err(&pdev->dev, "error reading board info\n");
720 adapter->driver_mismatch = 1;
721 return;
722 }
723 ptr32[i] = cpu_to_le32(val);
724 offset += sizeof(u32);
725 }
726
727 fw_major = QLCRD32(adapter, QLCNIC_FW_VERSION_MAJOR);
728 fw_minor = QLCRD32(adapter, QLCNIC_FW_VERSION_MINOR);
729 fw_build = QLCRD32(adapter, QLCNIC_FW_VERSION_SUB);
730
731 adapter->fw_version = QLCNIC_VERSION_CODE(fw_major, fw_minor, fw_build);
732
733 if (adapter->portnum == 0) {
734 get_brd_name(adapter, brd_name);
735
736 pr_info("%s: %s Board Chip rev 0x%x\n",
737 module_name(THIS_MODULE),
738 brd_name, adapter->ahw.revision_id);
739 }
740
251a84c9
AKS
741 dev_info(&pdev->dev, "firmware v%d.%d.%d\n",
742 fw_major, fw_minor, fw_build);
af19b491 743
af19b491
AKS
744 adapter->flags &= ~QLCNIC_LRO_ENABLED;
745
746 if (adapter->ahw.port_type == QLCNIC_XGBE) {
747 adapter->num_rxd = DEFAULT_RCV_DESCRIPTORS_10G;
748 adapter->num_jumbo_rxd = MAX_JUMBO_RCV_DESCRIPTORS_10G;
749 } else if (adapter->ahw.port_type == QLCNIC_GBE) {
750 adapter->num_rxd = DEFAULT_RCV_DESCRIPTORS_1G;
751 adapter->num_jumbo_rxd = MAX_JUMBO_RCV_DESCRIPTORS_1G;
752 }
753
346fe763
RB
754 if (!qlcnic_get_nic_info(adapter, &nic_info, adapter->ahw.pci_func)) {
755 adapter->physical_port = nic_info.phys_port;
756 adapter->switch_mode = nic_info.switch_mode;
757 adapter->max_tx_ques = nic_info.max_tx_ques;
758 adapter->max_rx_ques = nic_info.max_rx_ques;
759 adapter->capabilities = nic_info.capabilities;
760 adapter->max_mac_filters = nic_info.max_mac_filters;
761 adapter->max_mtu = nic_info.max_mtu;
762 }
0e33c664 763
af19b491
AKS
764 adapter->msix_supported = !!use_msi_x;
765 adapter->rss_supported = !!use_msi_x;
766
767 adapter->num_txd = MAX_CMD_DESCRIPTORS;
768
af19b491
AKS
769 adapter->max_rds_rings = 2;
770}
771
772static int
773qlcnic_start_firmware(struct qlcnic_adapter *adapter)
774{
775 int val, err, first_boot;
776
aa5e18c0
SC
777 err = qlcnic_can_start_firmware(adapter);
778 if (err < 0)
779 return err;
780 else if (!err)
af19b491
AKS
781 goto wait_init;
782
783 first_boot = QLCRD32(adapter, QLCNIC_CAM_RAM(0x1fc));
784 if (first_boot == 0x55555555)
785 /* This is the first boot after power up */
786 QLCWR32(adapter, QLCNIC_CAM_RAM(0x1fc), QLCNIC_BDINFO_MAGIC);
787
4d5bdb38
AKS
788 if (load_fw_file)
789 qlcnic_request_firmware(adapter);
8f891387 790 else {
791 if (qlcnic_check_flash_fw_ver(adapter))
792 goto err_out;
793
4d5bdb38 794 adapter->fw_type = QLCNIC_FLASH_ROMIMAGE;
8f891387 795 }
af19b491
AKS
796
797 err = qlcnic_need_fw_reset(adapter);
798 if (err < 0)
799 goto err_out;
800 if (err == 0)
801 goto wait_init;
802
803 if (first_boot != 0x55555555) {
804 QLCWR32(adapter, CRB_CMDPEG_STATE, 0);
900c6cff 805 QLCWR32(adapter, CRB_RCVPEG_STATE, 0);
af19b491
AKS
806 qlcnic_pinit_from_rom(adapter);
807 msleep(1);
808 }
809
af19b491
AKS
810 QLCWR32(adapter, QLCNIC_PEG_HALT_STATUS1, 0);
811 QLCWR32(adapter, QLCNIC_PEG_HALT_STATUS2, 0);
812
813 qlcnic_set_port_mode(adapter);
814
815 err = qlcnic_load_firmware(adapter);
816 if (err)
817 goto err_out;
818
819 qlcnic_release_firmware(adapter);
820
821 val = (_QLCNIC_LINUX_MAJOR << 16)
822 | ((_QLCNIC_LINUX_MINOR << 8))
823 | (_QLCNIC_LINUX_SUBVERSION);
824 QLCWR32(adapter, CRB_DRIVER_VERSION, val);
825
826wait_init:
827 /* Handshake with the card before we register the devices. */
900c6cff 828 err = qlcnic_init_firmware(adapter);
af19b491
AKS
829 if (err)
830 goto err_out;
831
832 QLCWR32(adapter, QLCNIC_CRB_DEV_STATE, QLCNIC_DEV_READY);
6df900e9 833 qlcnic_idc_debug_info(adapter, 1);
af19b491 834
af19b491
AKS
835 qlcnic_check_options(adapter);
836
0e33c664
AC
837 if (adapter->flags & QLCNIC_ESWITCH_ENABLED &&
838 adapter->op_mode != QLCNIC_NON_PRIV_FUNC)
839 qlcnic_dev_set_npar_ready(adapter);
2e9d722d 840
af19b491
AKS
841 adapter->need_fw_reset = 0;
842
a7fc948f
AKS
843 qlcnic_release_firmware(adapter);
844 return 0;
af19b491
AKS
845
846err_out:
a7fc948f
AKS
847 QLCWR32(adapter, QLCNIC_CRB_DEV_STATE, QLCNIC_DEV_FAILED);
848 dev_err(&adapter->pdev->dev, "Device state set to failed\n");
af19b491
AKS
849 qlcnic_release_firmware(adapter);
850 return err;
851}
852
853static int
854qlcnic_request_irq(struct qlcnic_adapter *adapter)
855{
856 irq_handler_t handler;
857 struct qlcnic_host_sds_ring *sds_ring;
858 int err, ring;
859
860 unsigned long flags = 0;
861 struct net_device *netdev = adapter->netdev;
862 struct qlcnic_recv_context *recv_ctx = &adapter->recv_ctx;
863
7eb9855d
AKS
864 if (adapter->diag_test == QLCNIC_INTERRUPT_TEST) {
865 handler = qlcnic_tmp_intr;
866 if (!QLCNIC_IS_MSI_FAMILY(adapter))
867 flags |= IRQF_SHARED;
868
869 } else {
870 if (adapter->flags & QLCNIC_MSIX_ENABLED)
871 handler = qlcnic_msix_intr;
872 else if (adapter->flags & QLCNIC_MSI_ENABLED)
873 handler = qlcnic_msi_intr;
874 else {
875 flags |= IRQF_SHARED;
876 handler = qlcnic_intr;
877 }
af19b491
AKS
878 }
879 adapter->irq = netdev->irq;
880
881 for (ring = 0; ring < adapter->max_sds_rings; ring++) {
882 sds_ring = &recv_ctx->sds_rings[ring];
883 sprintf(sds_ring->name, "%s[%d]", netdev->name, ring);
884 err = request_irq(sds_ring->irq, handler,
885 flags, sds_ring->name, sds_ring);
886 if (err)
887 return err;
888 }
889
890 return 0;
891}
892
893static void
894qlcnic_free_irq(struct qlcnic_adapter *adapter)
895{
896 int ring;
897 struct qlcnic_host_sds_ring *sds_ring;
898
899 struct qlcnic_recv_context *recv_ctx = &adapter->recv_ctx;
900
901 for (ring = 0; ring < adapter->max_sds_rings; ring++) {
902 sds_ring = &recv_ctx->sds_rings[ring];
903 free_irq(sds_ring->irq, sds_ring);
904 }
905}
906
907static void
908qlcnic_init_coalesce_defaults(struct qlcnic_adapter *adapter)
909{
910 adapter->coal.flags = QLCNIC_INTR_DEFAULT;
911 adapter->coal.normal.data.rx_time_us =
912 QLCNIC_DEFAULT_INTR_COALESCE_RX_TIME_US;
913 adapter->coal.normal.data.rx_packets =
914 QLCNIC_DEFAULT_INTR_COALESCE_RX_PACKETS;
915 adapter->coal.normal.data.tx_time_us =
916 QLCNIC_DEFAULT_INTR_COALESCE_TX_TIME_US;
917 adapter->coal.normal.data.tx_packets =
918 QLCNIC_DEFAULT_INTR_COALESCE_TX_PACKETS;
919}
920
921static int
922__qlcnic_up(struct qlcnic_adapter *adapter, struct net_device *netdev)
923{
8a15ad1f
AKS
924 int ring;
925 struct qlcnic_host_rds_ring *rds_ring;
926
af19b491
AKS
927 if (adapter->is_up != QLCNIC_ADAPTER_UP_MAGIC)
928 return -EIO;
929
8a15ad1f
AKS
930 if (test_bit(__QLCNIC_DEV_UP, &adapter->state))
931 return 0;
932
933 if (qlcnic_fw_create_ctx(adapter))
934 return -EIO;
935
936 for (ring = 0; ring < adapter->max_rds_rings; ring++) {
937 rds_ring = &adapter->recv_ctx.rds_rings[ring];
938 qlcnic_post_rx_buffers(adapter, ring, rds_ring);
939 }
940
af19b491
AKS
941 qlcnic_set_multi(netdev);
942 qlcnic_fw_cmd_set_mtu(adapter, netdev->mtu);
943
944 adapter->ahw.linkup = 0;
945
946 if (adapter->max_sds_rings > 1)
947 qlcnic_config_rss(adapter, 1);
948
949 qlcnic_config_intr_coalesce(adapter);
950
951 if (adapter->capabilities & QLCNIC_FW_CAPABILITY_HW_LRO)
952 qlcnic_config_hw_lro(adapter, QLCNIC_LRO_ENABLED);
953
954 qlcnic_napi_enable(adapter);
955
956 qlcnic_linkevent_request(adapter, 1);
957
68bf1c68 958 adapter->reset_context = 0;
af19b491
AKS
959 set_bit(__QLCNIC_DEV_UP, &adapter->state);
960 return 0;
961}
962
963/* Usage: During resume and firmware recovery module.*/
964
965static int
966qlcnic_up(struct qlcnic_adapter *adapter, struct net_device *netdev)
967{
968 int err = 0;
969
970 rtnl_lock();
971 if (netif_running(netdev))
972 err = __qlcnic_up(adapter, netdev);
973 rtnl_unlock();
974
975 return err;
976}
977
978static void
979__qlcnic_down(struct qlcnic_adapter *adapter, struct net_device *netdev)
980{
981 if (adapter->is_up != QLCNIC_ADAPTER_UP_MAGIC)
982 return;
983
984 if (!test_and_clear_bit(__QLCNIC_DEV_UP, &adapter->state))
985 return;
986
987 smp_mb();
988 spin_lock(&adapter->tx_clean_lock);
989 netif_carrier_off(netdev);
990 netif_tx_disable(netdev);
991
992 qlcnic_free_mac_list(adapter);
993
994 qlcnic_nic_set_promisc(adapter, QLCNIC_NIU_NON_PROMISC_MODE);
995
996 qlcnic_napi_disable(adapter);
997
8a15ad1f
AKS
998 qlcnic_fw_destroy_ctx(adapter);
999
1000 qlcnic_reset_rx_buffers_list(adapter);
af19b491
AKS
1001 qlcnic_release_tx_buffers(adapter);
1002 spin_unlock(&adapter->tx_clean_lock);
1003}
1004
1005/* Usage: During suspend and firmware recovery module */
1006
1007static void
1008qlcnic_down(struct qlcnic_adapter *adapter, struct net_device *netdev)
1009{
1010 rtnl_lock();
1011 if (netif_running(netdev))
1012 __qlcnic_down(adapter, netdev);
1013 rtnl_unlock();
1014
1015}
1016
1017static int
1018qlcnic_attach(struct qlcnic_adapter *adapter)
1019{
1020 struct net_device *netdev = adapter->netdev;
1021 struct pci_dev *pdev = adapter->pdev;
8a15ad1f 1022 int err;
af19b491
AKS
1023
1024 if (adapter->is_up == QLCNIC_ADAPTER_UP_MAGIC)
1025 return 0;
1026
af19b491
AKS
1027 err = qlcnic_napi_add(adapter, netdev);
1028 if (err)
1029 return err;
1030
1031 err = qlcnic_alloc_sw_resources(adapter);
1032 if (err) {
1033 dev_err(&pdev->dev, "Error in setting sw resources\n");
8a15ad1f 1034 goto err_out_napi_del;
af19b491
AKS
1035 }
1036
1037 err = qlcnic_alloc_hw_resources(adapter);
1038 if (err) {
1039 dev_err(&pdev->dev, "Error in setting hw resources\n");
1040 goto err_out_free_sw;
1041 }
1042
af19b491
AKS
1043 err = qlcnic_request_irq(adapter);
1044 if (err) {
1045 dev_err(&pdev->dev, "failed to setup interrupt\n");
8a15ad1f 1046 goto err_out_free_hw;
af19b491
AKS
1047 }
1048
1049 qlcnic_init_coalesce_defaults(adapter);
1050
1051 qlcnic_create_sysfs_entries(adapter);
1052
1053 adapter->is_up = QLCNIC_ADAPTER_UP_MAGIC;
1054 return 0;
1055
8a15ad1f 1056err_out_free_hw:
af19b491
AKS
1057 qlcnic_free_hw_resources(adapter);
1058err_out_free_sw:
1059 qlcnic_free_sw_resources(adapter);
8a15ad1f
AKS
1060err_out_napi_del:
1061 qlcnic_napi_del(adapter);
af19b491
AKS
1062 return err;
1063}
1064
1065static void
1066qlcnic_detach(struct qlcnic_adapter *adapter)
1067{
1068 if (adapter->is_up != QLCNIC_ADAPTER_UP_MAGIC)
1069 return;
1070
1071 qlcnic_remove_sysfs_entries(adapter);
1072
1073 qlcnic_free_hw_resources(adapter);
1074 qlcnic_release_rx_buffers(adapter);
1075 qlcnic_free_irq(adapter);
1076 qlcnic_napi_del(adapter);
1077 qlcnic_free_sw_resources(adapter);
1078
1079 adapter->is_up = 0;
1080}
1081
7eb9855d
AKS
1082void qlcnic_diag_free_res(struct net_device *netdev, int max_sds_rings)
1083{
1084 struct qlcnic_adapter *adapter = netdev_priv(netdev);
1085 struct qlcnic_host_sds_ring *sds_ring;
1086 int ring;
1087
78ad3892 1088 clear_bit(__QLCNIC_DEV_UP, &adapter->state);
cdaff185
AKS
1089 if (adapter->diag_test == QLCNIC_INTERRUPT_TEST) {
1090 for (ring = 0; ring < adapter->max_sds_rings; ring++) {
1091 sds_ring = &adapter->recv_ctx.sds_rings[ring];
1092 qlcnic_disable_int(sds_ring);
1093 }
7eb9855d
AKS
1094 }
1095
8a15ad1f
AKS
1096 qlcnic_fw_destroy_ctx(adapter);
1097
7eb9855d
AKS
1098 qlcnic_detach(adapter);
1099
1100 adapter->diag_test = 0;
1101 adapter->max_sds_rings = max_sds_rings;
1102
1103 if (qlcnic_attach(adapter))
34ce3626 1104 goto out;
7eb9855d
AKS
1105
1106 if (netif_running(netdev))
1107 __qlcnic_up(adapter, netdev);
34ce3626 1108out:
7eb9855d
AKS
1109 netif_device_attach(netdev);
1110}
1111
1112int qlcnic_diag_alloc_res(struct net_device *netdev, int test)
1113{
1114 struct qlcnic_adapter *adapter = netdev_priv(netdev);
1115 struct qlcnic_host_sds_ring *sds_ring;
8a15ad1f 1116 struct qlcnic_host_rds_ring *rds_ring;
7eb9855d
AKS
1117 int ring;
1118 int ret;
1119
1120 netif_device_detach(netdev);
1121
1122 if (netif_running(netdev))
1123 __qlcnic_down(adapter, netdev);
1124
1125 qlcnic_detach(adapter);
1126
1127 adapter->max_sds_rings = 1;
1128 adapter->diag_test = test;
1129
1130 ret = qlcnic_attach(adapter);
34ce3626
AKS
1131 if (ret) {
1132 netif_device_attach(netdev);
7eb9855d 1133 return ret;
34ce3626 1134 }
7eb9855d 1135
8a15ad1f
AKS
1136 ret = qlcnic_fw_create_ctx(adapter);
1137 if (ret) {
1138 qlcnic_detach(adapter);
1139 return ret;
1140 }
1141
1142 for (ring = 0; ring < adapter->max_rds_rings; ring++) {
1143 rds_ring = &adapter->recv_ctx.rds_rings[ring];
1144 qlcnic_post_rx_buffers(adapter, ring, rds_ring);
1145 }
1146
cdaff185
AKS
1147 if (adapter->diag_test == QLCNIC_INTERRUPT_TEST) {
1148 for (ring = 0; ring < adapter->max_sds_rings; ring++) {
1149 sds_ring = &adapter->recv_ctx.sds_rings[ring];
1150 qlcnic_enable_int(sds_ring);
1151 }
7eb9855d 1152 }
78ad3892 1153 set_bit(__QLCNIC_DEV_UP, &adapter->state);
7eb9855d
AKS
1154
1155 return 0;
1156}
1157
68bf1c68
AKS
1158/* Reset context in hardware only */
1159static int
1160qlcnic_reset_hw_context(struct qlcnic_adapter *adapter)
1161{
1162 struct net_device *netdev = adapter->netdev;
1163
1164 if (test_and_set_bit(__QLCNIC_RESETTING, &adapter->state))
1165 return -EBUSY;
1166
1167 netif_device_detach(netdev);
1168
1169 qlcnic_down(adapter, netdev);
1170
1171 qlcnic_up(adapter, netdev);
1172
1173 netif_device_attach(netdev);
1174
1175 clear_bit(__QLCNIC_RESETTING, &adapter->state);
1176 return 0;
1177}
1178
af19b491
AKS
1179int
1180qlcnic_reset_context(struct qlcnic_adapter *adapter)
1181{
1182 int err = 0;
1183 struct net_device *netdev = adapter->netdev;
1184
1185 if (test_and_set_bit(__QLCNIC_RESETTING, &adapter->state))
1186 return -EBUSY;
1187
1188 if (adapter->is_up == QLCNIC_ADAPTER_UP_MAGIC) {
1189
1190 netif_device_detach(netdev);
1191
1192 if (netif_running(netdev))
1193 __qlcnic_down(adapter, netdev);
1194
1195 qlcnic_detach(adapter);
1196
1197 if (netif_running(netdev)) {
1198 err = qlcnic_attach(adapter);
1199 if (!err)
34ce3626 1200 __qlcnic_up(adapter, netdev);
af19b491
AKS
1201 }
1202
1203 netif_device_attach(netdev);
1204 }
1205
af19b491
AKS
1206 clear_bit(__QLCNIC_RESETTING, &adapter->state);
1207 return err;
1208}
1209
1210static int
1211qlcnic_setup_netdev(struct qlcnic_adapter *adapter,
1bb09fb9 1212 struct net_device *netdev, u8 pci_using_dac)
af19b491
AKS
1213{
1214 int err;
1215 struct pci_dev *pdev = adapter->pdev;
1216
1217 adapter->rx_csum = 1;
1218 adapter->mc_enabled = 0;
1219 adapter->max_mc_count = 38;
1220
1221 netdev->netdev_ops = &qlcnic_netdev_ops;
ef71ff83 1222 netdev->watchdog_timeo = 5*HZ;
af19b491
AKS
1223
1224 qlcnic_change_mtu(netdev, netdev->mtu);
1225
1226 SET_ETHTOOL_OPS(netdev, &qlcnic_ethtool_ops);
1227
2e9d722d 1228 netdev->features |= (NETIF_F_SG | NETIF_F_IP_CSUM |
ac8d0c4f 1229 NETIF_F_IPV6_CSUM | NETIF_F_GRO);
2e9d722d 1230 netdev->vlan_features |= (NETIF_F_SG | NETIF_F_IP_CSUM |
ac8d0c4f
AC
1231 NETIF_F_IPV6_CSUM);
1232
1233 if (adapter->capabilities & QLCNIC_FW_CAPABILITY_TSO) {
1234 netdev->features |= (NETIF_F_TSO | NETIF_F_TSO6);
1235 netdev->vlan_features |= (NETIF_F_TSO | NETIF_F_TSO6);
1236 }
af19b491 1237
1bb09fb9 1238 if (pci_using_dac) {
af19b491
AKS
1239 netdev->features |= NETIF_F_HIGHDMA;
1240 netdev->vlan_features |= NETIF_F_HIGHDMA;
1241 }
1242
1243 if (adapter->capabilities & QLCNIC_FW_CAPABILITY_FVLANTX)
1244 netdev->features |= (NETIF_F_HW_VLAN_TX);
1245
1246 if (adapter->capabilities & QLCNIC_FW_CAPABILITY_HW_LRO)
1247 netdev->features |= NETIF_F_LRO;
1248
1249 netdev->irq = adapter->msix_entries[0].vector;
1250
af19b491
AKS
1251 if (qlcnic_read_mac_addr(adapter))
1252 dev_warn(&pdev->dev, "failed to read mac addr\n");
1253
1254 netif_carrier_off(netdev);
1255 netif_stop_queue(netdev);
1256
1257 err = register_netdev(netdev);
1258 if (err) {
1259 dev_err(&pdev->dev, "failed to register net device\n");
1260 return err;
1261 }
1262
1263 return 0;
1264}
1265
1bb09fb9
AKS
1266static int qlcnic_set_dma_mask(struct pci_dev *pdev, u8 *pci_using_dac)
1267{
1268 if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) &&
1269 !pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)))
1270 *pci_using_dac = 1;
1271 else if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(32)) &&
1272 !pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32)))
1273 *pci_using_dac = 0;
1274 else {
1275 dev_err(&pdev->dev, "Unable to set DMA mask, aborting\n");
1276 return -EIO;
1277 }
1278
1279 return 0;
1280}
1281
af19b491
AKS
1282static int __devinit
1283qlcnic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
1284{
1285 struct net_device *netdev = NULL;
1286 struct qlcnic_adapter *adapter = NULL;
1287 int err;
af19b491 1288 uint8_t revision_id;
1bb09fb9 1289 uint8_t pci_using_dac;
af19b491
AKS
1290
1291 err = pci_enable_device(pdev);
1292 if (err)
1293 return err;
1294
1295 if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
1296 err = -ENODEV;
1297 goto err_out_disable_pdev;
1298 }
1299
1bb09fb9
AKS
1300 err = qlcnic_set_dma_mask(pdev, &pci_using_dac);
1301 if (err)
1302 goto err_out_disable_pdev;
1303
af19b491
AKS
1304 err = pci_request_regions(pdev, qlcnic_driver_name);
1305 if (err)
1306 goto err_out_disable_pdev;
1307
1308 pci_set_master(pdev);
1309
1310 netdev = alloc_etherdev(sizeof(struct qlcnic_adapter));
1311 if (!netdev) {
1312 dev_err(&pdev->dev, "failed to allocate net_device\n");
1313 err = -ENOMEM;
1314 goto err_out_free_res;
1315 }
1316
1317 SET_NETDEV_DEV(netdev, &pdev->dev);
1318
1319 adapter = netdev_priv(netdev);
1320 adapter->netdev = netdev;
1321 adapter->pdev = pdev;
6df900e9 1322 adapter->dev_rst_time = jiffies;
af19b491
AKS
1323
1324 revision_id = pdev->revision;
1325 adapter->ahw.revision_id = revision_id;
1326
1327 rwlock_init(&adapter->ahw.crb_lock);
1328 mutex_init(&adapter->ahw.mem_lock);
1329
1330 spin_lock_init(&adapter->tx_clean_lock);
1331 INIT_LIST_HEAD(&adapter->mac_list);
1332
1333 err = qlcnic_setup_pci_map(adapter);
1334 if (err)
1335 goto err_out_free_netdev;
1336
1337 /* This will be reset for mezz cards */
2e9d722d 1338 adapter->portnum = adapter->ahw.pci_func;
af19b491
AKS
1339
1340 err = qlcnic_get_board_info(adapter);
1341 if (err) {
1342 dev_err(&pdev->dev, "Error getting board config info.\n");
1343 goto err_out_iounmap;
1344 }
1345
02f6e46f
SC
1346 if (qlcnic_read_mac_addr(adapter))
1347 dev_warn(&pdev->dev, "failed to read mac addr\n");
1348
b3a24649
SC
1349 if (qlcnic_setup_idc_param(adapter))
1350 goto err_out_iounmap;
af19b491 1351
9f26f547 1352 err = adapter->nic_ops->start_firmware(adapter);
a7fc948f
AKS
1353 if (err) {
1354 dev_err(&pdev->dev, "Loading fw failed.Please Reboot\n");
af19b491 1355 goto err_out_decr_ref;
a7fc948f 1356 }
af19b491 1357
af19b491
AKS
1358 qlcnic_clear_stats(adapter);
1359
1360 qlcnic_setup_intr(adapter);
1361
1bb09fb9 1362 err = qlcnic_setup_netdev(adapter, netdev, pci_using_dac);
af19b491
AKS
1363 if (err)
1364 goto err_out_disable_msi;
1365
1366 pci_set_drvdata(pdev, adapter);
1367
1368 qlcnic_schedule_work(adapter, qlcnic_fw_poll_work, FW_POLL_DELAY);
1369
1370 switch (adapter->ahw.port_type) {
1371 case QLCNIC_GBE:
1372 dev_info(&adapter->pdev->dev, "%s: GbE port initialized\n",
1373 adapter->netdev->name);
1374 break;
1375 case QLCNIC_XGBE:
1376 dev_info(&adapter->pdev->dev, "%s: XGbE port initialized\n",
1377 adapter->netdev->name);
1378 break;
1379 }
1380
1381 qlcnic_create_diag_entries(adapter);
1382
1383 return 0;
1384
1385err_out_disable_msi:
1386 qlcnic_teardown_intr(adapter);
1387
1388err_out_decr_ref:
1389 qlcnic_clr_all_drv_state(adapter);
1390
1391err_out_iounmap:
1392 qlcnic_cleanup_pci_map(adapter);
1393
1394err_out_free_netdev:
1395 free_netdev(netdev);
1396
1397err_out_free_res:
1398 pci_release_regions(pdev);
1399
1400err_out_disable_pdev:
1401 pci_set_drvdata(pdev, NULL);
1402 pci_disable_device(pdev);
1403 return err;
1404}
1405
1406static void __devexit qlcnic_remove(struct pci_dev *pdev)
1407{
1408 struct qlcnic_adapter *adapter;
1409 struct net_device *netdev;
1410
1411 adapter = pci_get_drvdata(pdev);
1412 if (adapter == NULL)
1413 return;
1414
1415 netdev = adapter->netdev;
1416
1417 qlcnic_cancel_fw_work(adapter);
1418
1419 unregister_netdev(netdev);
1420
af19b491
AKS
1421 qlcnic_detach(adapter);
1422
2e9d722d
AC
1423 if (adapter->npars != NULL)
1424 kfree(adapter->npars);
1425 if (adapter->eswitch != NULL)
1426 kfree(adapter->eswitch);
1427
af19b491
AKS
1428 qlcnic_clr_all_drv_state(adapter);
1429
1430 clear_bit(__QLCNIC_RESETTING, &adapter->state);
1431
1432 qlcnic_teardown_intr(adapter);
1433
1434 qlcnic_remove_diag_entries(adapter);
1435
1436 qlcnic_cleanup_pci_map(adapter);
1437
1438 qlcnic_release_firmware(adapter);
1439
1440 pci_release_regions(pdev);
1441 pci_disable_device(pdev);
1442 pci_set_drvdata(pdev, NULL);
1443
1444 free_netdev(netdev);
1445}
1446static int __qlcnic_shutdown(struct pci_dev *pdev)
1447{
1448 struct qlcnic_adapter *adapter = pci_get_drvdata(pdev);
1449 struct net_device *netdev = adapter->netdev;
1450 int retval;
1451
1452 netif_device_detach(netdev);
1453
1454 qlcnic_cancel_fw_work(adapter);
1455
1456 if (netif_running(netdev))
1457 qlcnic_down(adapter, netdev);
1458
af19b491
AKS
1459 qlcnic_clr_all_drv_state(adapter);
1460
1461 clear_bit(__QLCNIC_RESETTING, &adapter->state);
1462
1463 retval = pci_save_state(pdev);
1464 if (retval)
1465 return retval;
1466
1467 if (qlcnic_wol_supported(adapter)) {
1468 pci_enable_wake(pdev, PCI_D3cold, 1);
1469 pci_enable_wake(pdev, PCI_D3hot, 1);
1470 }
1471
1472 return 0;
1473}
1474
1475static void qlcnic_shutdown(struct pci_dev *pdev)
1476{
1477 if (__qlcnic_shutdown(pdev))
1478 return;
1479
1480 pci_disable_device(pdev);
1481}
1482
1483#ifdef CONFIG_PM
1484static int
1485qlcnic_suspend(struct pci_dev *pdev, pm_message_t state)
1486{
1487 int retval;
1488
1489 retval = __qlcnic_shutdown(pdev);
1490 if (retval)
1491 return retval;
1492
1493 pci_set_power_state(pdev, pci_choose_state(pdev, state));
1494 return 0;
1495}
1496
1497static int
1498qlcnic_resume(struct pci_dev *pdev)
1499{
1500 struct qlcnic_adapter *adapter = pci_get_drvdata(pdev);
1501 struct net_device *netdev = adapter->netdev;
1502 int err;
1503
1504 err = pci_enable_device(pdev);
1505 if (err)
1506 return err;
1507
1508 pci_set_power_state(pdev, PCI_D0);
1509 pci_set_master(pdev);
1510 pci_restore_state(pdev);
1511
9f26f547 1512 err = adapter->nic_ops->start_firmware(adapter);
af19b491
AKS
1513 if (err) {
1514 dev_err(&pdev->dev, "failed to start firmware\n");
1515 return err;
1516 }
1517
1518 if (netif_running(netdev)) {
af19b491
AKS
1519 err = qlcnic_up(adapter, netdev);
1520 if (err)
52486a3a 1521 goto done;
af19b491
AKS
1522
1523 qlcnic_config_indev_addr(netdev, NETDEV_UP);
1524 }
52486a3a 1525done:
af19b491
AKS
1526 netif_device_attach(netdev);
1527 qlcnic_schedule_work(adapter, qlcnic_fw_poll_work, FW_POLL_DELAY);
1528 return 0;
af19b491
AKS
1529}
1530#endif
1531
1532static int qlcnic_open(struct net_device *netdev)
1533{
1534 struct qlcnic_adapter *adapter = netdev_priv(netdev);
1535 int err;
1536
1537 if (adapter->driver_mismatch)
1538 return -EIO;
1539
1540 err = qlcnic_attach(adapter);
1541 if (err)
1542 return err;
1543
1544 err = __qlcnic_up(adapter, netdev);
1545 if (err)
1546 goto err_out;
1547
1548 netif_start_queue(netdev);
1549
1550 return 0;
1551
1552err_out:
1553 qlcnic_detach(adapter);
1554 return err;
1555}
1556
1557/*
1558 * qlcnic_close - Disables a network interface entry point
1559 */
1560static int qlcnic_close(struct net_device *netdev)
1561{
1562 struct qlcnic_adapter *adapter = netdev_priv(netdev);
1563
1564 __qlcnic_down(adapter, netdev);
1565 return 0;
1566}
1567
1568static void
1569qlcnic_tso_check(struct net_device *netdev,
1570 struct qlcnic_host_tx_ring *tx_ring,
1571 struct cmd_desc_type0 *first_desc,
1572 struct sk_buff *skb)
1573{
1574 u8 opcode = TX_ETHER_PKT;
1575 __be16 protocol = skb->protocol;
1576 u16 flags = 0, vid = 0;
af19b491
AKS
1577 int copied, offset, copy_len, hdr_len = 0, tso = 0, vlan_oob = 0;
1578 struct cmd_desc_type0 *hwdesc;
1579 struct vlan_ethhdr *vh;
8bfe8b91 1580 struct qlcnic_adapter *adapter = netdev_priv(netdev);
2e9d722d 1581 u32 producer = tx_ring->producer;
af19b491
AKS
1582
1583 if (protocol == cpu_to_be16(ETH_P_8021Q)) {
1584
1585 vh = (struct vlan_ethhdr *)skb->data;
1586 protocol = vh->h_vlan_encapsulated_proto;
1587 flags = FLAGS_VLAN_TAGGED;
1588
1589 } else if (vlan_tx_tag_present(skb)) {
1590
1591 flags = FLAGS_VLAN_OOB;
1592 vid = vlan_tx_tag_get(skb);
1593 qlcnic_set_tx_vlan_tci(first_desc, vid);
1594 vlan_oob = 1;
1595 }
1596
2e9d722d
AC
1597 if (*(skb->data) & BIT_0) {
1598 flags |= BIT_0;
1599 memcpy(&first_desc->eth_addr, skb->data, ETH_ALEN);
1600 }
1601
af19b491
AKS
1602 if ((netdev->features & (NETIF_F_TSO | NETIF_F_TSO6)) &&
1603 skb_shinfo(skb)->gso_size > 0) {
1604
1605 hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
1606
1607 first_desc->mss = cpu_to_le16(skb_shinfo(skb)->gso_size);
1608 first_desc->total_hdr_length = hdr_len;
1609 if (vlan_oob) {
1610 first_desc->total_hdr_length += VLAN_HLEN;
1611 first_desc->tcp_hdr_offset = VLAN_HLEN;
1612 first_desc->ip_hdr_offset = VLAN_HLEN;
1613 /* Only in case of TSO on vlan device */
1614 flags |= FLAGS_VLAN_TAGGED;
1615 }
1616
1617 opcode = (protocol == cpu_to_be16(ETH_P_IPV6)) ?
1618 TX_TCP_LSO6 : TX_TCP_LSO;
1619 tso = 1;
1620
1621 } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
1622 u8 l4proto;
1623
1624 if (protocol == cpu_to_be16(ETH_P_IP)) {
1625 l4proto = ip_hdr(skb)->protocol;
1626
1627 if (l4proto == IPPROTO_TCP)
1628 opcode = TX_TCP_PKT;
1629 else if (l4proto == IPPROTO_UDP)
1630 opcode = TX_UDP_PKT;
1631 } else if (protocol == cpu_to_be16(ETH_P_IPV6)) {
1632 l4proto = ipv6_hdr(skb)->nexthdr;
1633
1634 if (l4proto == IPPROTO_TCP)
1635 opcode = TX_TCPV6_PKT;
1636 else if (l4proto == IPPROTO_UDP)
1637 opcode = TX_UDPV6_PKT;
1638 }
1639 }
1640
1641 first_desc->tcp_hdr_offset += skb_transport_offset(skb);
1642 first_desc->ip_hdr_offset += skb_network_offset(skb);
1643 qlcnic_set_tx_flags_opcode(first_desc, flags, opcode);
1644
1645 if (!tso)
1646 return;
1647
1648 /* For LSO, we need to copy the MAC/IP/TCP headers into
1649 * the descriptor ring
1650 */
af19b491
AKS
1651 copied = 0;
1652 offset = 2;
1653
1654 if (vlan_oob) {
1655 /* Create a TSO vlan header template for firmware */
1656
1657 hwdesc = &tx_ring->desc_head[producer];
1658 tx_ring->cmd_buf_arr[producer].skb = NULL;
1659
1660 copy_len = min((int)sizeof(struct cmd_desc_type0) - offset,
1661 hdr_len + VLAN_HLEN);
1662
1663 vh = (struct vlan_ethhdr *)((char *)hwdesc + 2);
1664 skb_copy_from_linear_data(skb, vh, 12);
1665 vh->h_vlan_proto = htons(ETH_P_8021Q);
1666 vh->h_vlan_TCI = htons(vid);
1667 skb_copy_from_linear_data_offset(skb, 12,
1668 (char *)vh + 16, copy_len - 16);
1669
1670 copied = copy_len - VLAN_HLEN;
1671 offset = 0;
1672
1673 producer = get_next_index(producer, tx_ring->num_desc);
1674 }
1675
1676 while (copied < hdr_len) {
1677
1678 copy_len = min((int)sizeof(struct cmd_desc_type0) - offset,
1679 (hdr_len - copied));
1680
1681 hwdesc = &tx_ring->desc_head[producer];
1682 tx_ring->cmd_buf_arr[producer].skb = NULL;
1683
1684 skb_copy_from_linear_data_offset(skb, copied,
1685 (char *)hwdesc + offset, copy_len);
1686
1687 copied += copy_len;
1688 offset = 0;
1689
1690 producer = get_next_index(producer, tx_ring->num_desc);
1691 }
1692
1693 tx_ring->producer = producer;
1694 barrier();
8bfe8b91 1695 adapter->stats.lso_frames++;
af19b491
AKS
1696}
1697
1698static int
1699qlcnic_map_tx_skb(struct pci_dev *pdev,
1700 struct sk_buff *skb, struct qlcnic_cmd_buffer *pbuf)
1701{
1702 struct qlcnic_skb_frag *nf;
1703 struct skb_frag_struct *frag;
1704 int i, nr_frags;
1705 dma_addr_t map;
1706
1707 nr_frags = skb_shinfo(skb)->nr_frags;
1708 nf = &pbuf->frag_array[0];
1709
1710 map = pci_map_single(pdev, skb->data,
1711 skb_headlen(skb), PCI_DMA_TODEVICE);
1712 if (pci_dma_mapping_error(pdev, map))
1713 goto out_err;
1714
1715 nf->dma = map;
1716 nf->length = skb_headlen(skb);
1717
1718 for (i = 0; i < nr_frags; i++) {
1719 frag = &skb_shinfo(skb)->frags[i];
1720 nf = &pbuf->frag_array[i+1];
1721
1722 map = pci_map_page(pdev, frag->page, frag->page_offset,
1723 frag->size, PCI_DMA_TODEVICE);
1724 if (pci_dma_mapping_error(pdev, map))
1725 goto unwind;
1726
1727 nf->dma = map;
1728 nf->length = frag->size;
1729 }
1730
1731 return 0;
1732
1733unwind:
1734 while (--i >= 0) {
1735 nf = &pbuf->frag_array[i+1];
1736 pci_unmap_page(pdev, nf->dma, nf->length, PCI_DMA_TODEVICE);
1737 }
1738
1739 nf = &pbuf->frag_array[0];
1740 pci_unmap_single(pdev, nf->dma, skb_headlen(skb), PCI_DMA_TODEVICE);
1741
1742out_err:
1743 return -ENOMEM;
1744}
1745
1746static inline void
1747qlcnic_clear_cmddesc(u64 *desc)
1748{
1749 desc[0] = 0ULL;
1750 desc[2] = 0ULL;
1751}
1752
cdaff185 1753netdev_tx_t
af19b491
AKS
1754qlcnic_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
1755{
1756 struct qlcnic_adapter *adapter = netdev_priv(netdev);
1757 struct qlcnic_host_tx_ring *tx_ring = adapter->tx_ring;
1758 struct qlcnic_cmd_buffer *pbuf;
1759 struct qlcnic_skb_frag *buffrag;
1760 struct cmd_desc_type0 *hwdesc, *first_desc;
1761 struct pci_dev *pdev;
1762 int i, k;
1763
1764 u32 producer;
1765 int frag_count, no_of_desc;
1766 u32 num_txd = tx_ring->num_desc;
1767
780ab790
AKS
1768 if (!test_bit(__QLCNIC_DEV_UP, &adapter->state)) {
1769 netif_stop_queue(netdev);
1770 return NETDEV_TX_BUSY;
1771 }
1772
af19b491
AKS
1773 frag_count = skb_shinfo(skb)->nr_frags + 1;
1774
1775 /* 4 fragments per cmd des */
1776 no_of_desc = (frag_count + 3) >> 2;
1777
ef71ff83 1778 if (unlikely(qlcnic_tx_avail(tx_ring) <= TX_STOP_THRESH)) {
af19b491 1779 netif_stop_queue(netdev);
ef71ff83
RB
1780 smp_mb();
1781 if (qlcnic_tx_avail(tx_ring) > TX_STOP_THRESH)
1782 netif_start_queue(netdev);
1783 else {
1784 adapter->stats.xmit_off++;
1785 return NETDEV_TX_BUSY;
1786 }
af19b491
AKS
1787 }
1788
1789 producer = tx_ring->producer;
1790 pbuf = &tx_ring->cmd_buf_arr[producer];
1791
1792 pdev = adapter->pdev;
1793
8ae6df97
AKS
1794 if (qlcnic_map_tx_skb(pdev, skb, pbuf)) {
1795 adapter->stats.tx_dma_map_error++;
af19b491 1796 goto drop_packet;
8ae6df97 1797 }
af19b491
AKS
1798
1799 pbuf->skb = skb;
1800 pbuf->frag_count = frag_count;
1801
1802 first_desc = hwdesc = &tx_ring->desc_head[producer];
1803 qlcnic_clear_cmddesc((u64 *)hwdesc);
1804
1805 qlcnic_set_tx_frags_len(first_desc, frag_count, skb->len);
1806 qlcnic_set_tx_port(first_desc, adapter->portnum);
1807
1808 for (i = 0; i < frag_count; i++) {
1809
1810 k = i % 4;
1811
1812 if ((k == 0) && (i > 0)) {
1813 /* move to next desc.*/
1814 producer = get_next_index(producer, num_txd);
1815 hwdesc = &tx_ring->desc_head[producer];
1816 qlcnic_clear_cmddesc((u64 *)hwdesc);
1817 tx_ring->cmd_buf_arr[producer].skb = NULL;
1818 }
1819
1820 buffrag = &pbuf->frag_array[i];
1821
1822 hwdesc->buffer_length[k] = cpu_to_le16(buffrag->length);
1823 switch (k) {
1824 case 0:
1825 hwdesc->addr_buffer1 = cpu_to_le64(buffrag->dma);
1826 break;
1827 case 1:
1828 hwdesc->addr_buffer2 = cpu_to_le64(buffrag->dma);
1829 break;
1830 case 2:
1831 hwdesc->addr_buffer3 = cpu_to_le64(buffrag->dma);
1832 break;
1833 case 3:
1834 hwdesc->addr_buffer4 = cpu_to_le64(buffrag->dma);
1835 break;
1836 }
1837 }
1838
1839 tx_ring->producer = get_next_index(producer, num_txd);
1840
1841 qlcnic_tso_check(netdev, tx_ring, first_desc, skb);
1842
1843 qlcnic_update_cmd_producer(adapter, tx_ring);
1844
1845 adapter->stats.txbytes += skb->len;
1846 adapter->stats.xmitcalled++;
1847
1848 return NETDEV_TX_OK;
1849
1850drop_packet:
1851 adapter->stats.txdropped++;
1852 dev_kfree_skb_any(skb);
1853 return NETDEV_TX_OK;
1854}
1855
1856static int qlcnic_check_temp(struct qlcnic_adapter *adapter)
1857{
1858 struct net_device *netdev = adapter->netdev;
1859 u32 temp, temp_state, temp_val;
1860 int rv = 0;
1861
1862 temp = QLCRD32(adapter, CRB_TEMP_STATE);
1863
1864 temp_state = qlcnic_get_temp_state(temp);
1865 temp_val = qlcnic_get_temp_val(temp);
1866
1867 if (temp_state == QLCNIC_TEMP_PANIC) {
1868 dev_err(&netdev->dev,
1869 "Device temperature %d degrees C exceeds"
1870 " maximum allowed. Hardware has been shut down.\n",
1871 temp_val);
1872 rv = 1;
1873 } else if (temp_state == QLCNIC_TEMP_WARN) {
1874 if (adapter->temp == QLCNIC_TEMP_NORMAL) {
1875 dev_err(&netdev->dev,
1876 "Device temperature %d degrees C "
1877 "exceeds operating range."
1878 " Immediate action needed.\n",
1879 temp_val);
1880 }
1881 } else {
1882 if (adapter->temp == QLCNIC_TEMP_WARN) {
1883 dev_info(&netdev->dev,
1884 "Device temperature is now %d degrees C"
1885 " in normal range.\n", temp_val);
1886 }
1887 }
1888 adapter->temp = temp_state;
1889 return rv;
1890}
1891
1892void qlcnic_advert_link_change(struct qlcnic_adapter *adapter, int linkup)
1893{
1894 struct net_device *netdev = adapter->netdev;
1895
1896 if (adapter->ahw.linkup && !linkup) {
1897 dev_info(&netdev->dev, "NIC Link is down\n");
1898 adapter->ahw.linkup = 0;
1899 if (netif_running(netdev)) {
1900 netif_carrier_off(netdev);
1901 netif_stop_queue(netdev);
1902 }
1903 } else if (!adapter->ahw.linkup && linkup) {
1904 dev_info(&netdev->dev, "NIC Link is up\n");
1905 adapter->ahw.linkup = 1;
1906 if (netif_running(netdev)) {
1907 netif_carrier_on(netdev);
1908 netif_wake_queue(netdev);
1909 }
1910 }
1911}
1912
1913static void qlcnic_tx_timeout(struct net_device *netdev)
1914{
1915 struct qlcnic_adapter *adapter = netdev_priv(netdev);
1916
1917 if (test_bit(__QLCNIC_RESETTING, &adapter->state))
1918 return;
1919
1920 dev_err(&netdev->dev, "transmit timeout, resetting.\n");
af19b491
AKS
1921
1922 if (++adapter->tx_timeo_cnt >= QLCNIC_MAX_TX_TIMEOUTS)
68bf1c68
AKS
1923 adapter->need_fw_reset = 1;
1924 else
1925 adapter->reset_context = 1;
af19b491
AKS
1926}
1927
1928static struct net_device_stats *qlcnic_get_stats(struct net_device *netdev)
1929{
1930 struct qlcnic_adapter *adapter = netdev_priv(netdev);
1931 struct net_device_stats *stats = &netdev->stats;
1932
1933 memset(stats, 0, sizeof(*stats));
1934
1935 stats->rx_packets = adapter->stats.rx_pkts + adapter->stats.lro_pkts;
1936 stats->tx_packets = adapter->stats.xmitfinished;
7e382594 1937 stats->rx_bytes = adapter->stats.rxbytes + adapter->stats.lrobytes;
af19b491
AKS
1938 stats->tx_bytes = adapter->stats.txbytes;
1939 stats->rx_dropped = adapter->stats.rxdropped;
1940 stats->tx_dropped = adapter->stats.txdropped;
1941
1942 return stats;
1943}
1944
7eb9855d 1945static irqreturn_t qlcnic_clear_legacy_intr(struct qlcnic_adapter *adapter)
af19b491 1946{
af19b491
AKS
1947 u32 status;
1948
1949 status = readl(adapter->isr_int_vec);
1950
1951 if (!(status & adapter->int_vec_bit))
1952 return IRQ_NONE;
1953
1954 /* check interrupt state machine, to be sure */
1955 status = readl(adapter->crb_int_state_reg);
1956 if (!ISR_LEGACY_INT_TRIGGERED(status))
1957 return IRQ_NONE;
1958
1959 writel(0xffffffff, adapter->tgt_status_reg);
1960 /* read twice to ensure write is flushed */
1961 readl(adapter->isr_int_vec);
1962 readl(adapter->isr_int_vec);
1963
7eb9855d
AKS
1964 return IRQ_HANDLED;
1965}
1966
1967static irqreturn_t qlcnic_tmp_intr(int irq, void *data)
1968{
1969 struct qlcnic_host_sds_ring *sds_ring = data;
1970 struct qlcnic_adapter *adapter = sds_ring->adapter;
1971
1972 if (adapter->flags & QLCNIC_MSIX_ENABLED)
1973 goto done;
1974 else if (adapter->flags & QLCNIC_MSI_ENABLED) {
1975 writel(0xffffffff, adapter->tgt_status_reg);
1976 goto done;
1977 }
1978
1979 if (qlcnic_clear_legacy_intr(adapter) == IRQ_NONE)
1980 return IRQ_NONE;
1981
1982done:
1983 adapter->diag_cnt++;
1984 qlcnic_enable_int(sds_ring);
1985 return IRQ_HANDLED;
1986}
1987
1988static irqreturn_t qlcnic_intr(int irq, void *data)
1989{
1990 struct qlcnic_host_sds_ring *sds_ring = data;
1991 struct qlcnic_adapter *adapter = sds_ring->adapter;
1992
1993 if (qlcnic_clear_legacy_intr(adapter) == IRQ_NONE)
1994 return IRQ_NONE;
1995
af19b491
AKS
1996 napi_schedule(&sds_ring->napi);
1997
1998 return IRQ_HANDLED;
1999}
2000
2001static irqreturn_t qlcnic_msi_intr(int irq, void *data)
2002{
2003 struct qlcnic_host_sds_ring *sds_ring = data;
2004 struct qlcnic_adapter *adapter = sds_ring->adapter;
2005
2006 /* clear interrupt */
2007 writel(0xffffffff, adapter->tgt_status_reg);
2008
2009 napi_schedule(&sds_ring->napi);
2010 return IRQ_HANDLED;
2011}
2012
2013static irqreturn_t qlcnic_msix_intr(int irq, void *data)
2014{
2015 struct qlcnic_host_sds_ring *sds_ring = data;
2016
2017 napi_schedule(&sds_ring->napi);
2018 return IRQ_HANDLED;
2019}
2020
2021static int qlcnic_process_cmd_ring(struct qlcnic_adapter *adapter)
2022{
2023 u32 sw_consumer, hw_consumer;
2024 int count = 0, i;
2025 struct qlcnic_cmd_buffer *buffer;
2026 struct pci_dev *pdev = adapter->pdev;
2027 struct net_device *netdev = adapter->netdev;
2028 struct qlcnic_skb_frag *frag;
2029 int done;
2030 struct qlcnic_host_tx_ring *tx_ring = adapter->tx_ring;
2031
2032 if (!spin_trylock(&adapter->tx_clean_lock))
2033 return 1;
2034
2035 sw_consumer = tx_ring->sw_consumer;
2036 hw_consumer = le32_to_cpu(*(tx_ring->hw_consumer));
2037
2038 while (sw_consumer != hw_consumer) {
2039 buffer = &tx_ring->cmd_buf_arr[sw_consumer];
2040 if (buffer->skb) {
2041 frag = &buffer->frag_array[0];
2042 pci_unmap_single(pdev, frag->dma, frag->length,
2043 PCI_DMA_TODEVICE);
2044 frag->dma = 0ULL;
2045 for (i = 1; i < buffer->frag_count; i++) {
2046 frag++;
2047 pci_unmap_page(pdev, frag->dma, frag->length,
2048 PCI_DMA_TODEVICE);
2049 frag->dma = 0ULL;
2050 }
2051
2052 adapter->stats.xmitfinished++;
2053 dev_kfree_skb_any(buffer->skb);
2054 buffer->skb = NULL;
2055 }
2056
2057 sw_consumer = get_next_index(sw_consumer, tx_ring->num_desc);
2058 if (++count >= MAX_STATUS_HANDLE)
2059 break;
2060 }
2061
2062 if (count && netif_running(netdev)) {
2063 tx_ring->sw_consumer = sw_consumer;
2064
2065 smp_mb();
2066
2067 if (netif_queue_stopped(netdev) && netif_carrier_ok(netdev)) {
af19b491
AKS
2068 if (qlcnic_tx_avail(tx_ring) > TX_STOP_THRESH) {
2069 netif_wake_queue(netdev);
8bfe8b91 2070 adapter->stats.xmit_on++;
af19b491 2071 }
af19b491 2072 }
ef71ff83 2073 adapter->tx_timeo_cnt = 0;
af19b491
AKS
2074 }
2075 /*
2076 * If everything is freed up to consumer then check if the ring is full
2077 * If the ring is full then check if more needs to be freed and
2078 * schedule the call back again.
2079 *
2080 * This happens when there are 2 CPUs. One could be freeing and the
2081 * other filling it. If the ring is full when we get out of here and
2082 * the card has already interrupted the host then the host can miss the
2083 * interrupt.
2084 *
2085 * There is still a possible race condition and the host could miss an
2086 * interrupt. The card has to take care of this.
2087 */
2088 hw_consumer = le32_to_cpu(*(tx_ring->hw_consumer));
2089 done = (sw_consumer == hw_consumer);
2090 spin_unlock(&adapter->tx_clean_lock);
2091
2092 return done;
2093}
2094
2095static int qlcnic_poll(struct napi_struct *napi, int budget)
2096{
2097 struct qlcnic_host_sds_ring *sds_ring =
2098 container_of(napi, struct qlcnic_host_sds_ring, napi);
2099
2100 struct qlcnic_adapter *adapter = sds_ring->adapter;
2101
2102 int tx_complete;
2103 int work_done;
2104
2105 tx_complete = qlcnic_process_cmd_ring(adapter);
2106
2107 work_done = qlcnic_process_rcv_ring(sds_ring, budget);
2108
2109 if ((work_done < budget) && tx_complete) {
2110 napi_complete(&sds_ring->napi);
2111 if (test_bit(__QLCNIC_DEV_UP, &adapter->state))
2112 qlcnic_enable_int(sds_ring);
2113 }
2114
2115 return work_done;
2116}
2117
8f891387 2118static int qlcnic_rx_poll(struct napi_struct *napi, int budget)
2119{
2120 struct qlcnic_host_sds_ring *sds_ring =
2121 container_of(napi, struct qlcnic_host_sds_ring, napi);
2122
2123 struct qlcnic_adapter *adapter = sds_ring->adapter;
2124 int work_done;
2125
2126 work_done = qlcnic_process_rcv_ring(sds_ring, budget);
2127
2128 if (work_done < budget) {
2129 napi_complete(&sds_ring->napi);
2130 if (test_bit(__QLCNIC_DEV_UP, &adapter->state))
2131 qlcnic_enable_int(sds_ring);
2132 }
2133
2134 return work_done;
2135}
2136
af19b491
AKS
2137#ifdef CONFIG_NET_POLL_CONTROLLER
2138static void qlcnic_poll_controller(struct net_device *netdev)
2139{
2140 struct qlcnic_adapter *adapter = netdev_priv(netdev);
2141 disable_irq(adapter->irq);
2142 qlcnic_intr(adapter->irq, adapter);
2143 enable_irq(adapter->irq);
2144}
2145#endif
2146
6df900e9
SC
2147static void
2148qlcnic_idc_debug_info(struct qlcnic_adapter *adapter, u8 encoding)
2149{
2150 u32 val;
2151
2152 val = adapter->portnum & 0xf;
2153 val |= encoding << 7;
2154 val |= (jiffies - adapter->dev_rst_time) << 8;
2155
2156 QLCWR32(adapter, QLCNIC_CRB_DRV_SCRATCH, val);
2157 adapter->dev_rst_time = jiffies;
2158}
2159
ade91f8e
AKS
2160static int
2161qlcnic_set_drv_state(struct qlcnic_adapter *adapter, u8 state)
af19b491
AKS
2162{
2163 u32 val;
2164
2165 WARN_ON(state != QLCNIC_DEV_NEED_RESET &&
2166 state != QLCNIC_DEV_NEED_QUISCENT);
2167
2168 if (qlcnic_api_lock(adapter))
ade91f8e 2169 return -EIO;
af19b491
AKS
2170
2171 val = QLCRD32(adapter, QLCNIC_CRB_DRV_STATE);
2172
2173 if (state == QLCNIC_DEV_NEED_RESET)
6d2a4724 2174 QLC_DEV_SET_RST_RDY(val, adapter->portnum);
af19b491 2175 else if (state == QLCNIC_DEV_NEED_QUISCENT)
6d2a4724 2176 QLC_DEV_SET_QSCNT_RDY(val, adapter->portnum);
af19b491
AKS
2177
2178 QLCWR32(adapter, QLCNIC_CRB_DRV_STATE, val);
2179
2180 qlcnic_api_unlock(adapter);
ade91f8e
AKS
2181
2182 return 0;
af19b491
AKS
2183}
2184
1b95a839
AKS
2185static int
2186qlcnic_clr_drv_state(struct qlcnic_adapter *adapter)
2187{
2188 u32 val;
2189
2190 if (qlcnic_api_lock(adapter))
2191 return -EBUSY;
2192
2193 val = QLCRD32(adapter, QLCNIC_CRB_DRV_STATE);
6d2a4724 2194 QLC_DEV_CLR_RST_QSCNT(val, adapter->portnum);
1b95a839
AKS
2195 QLCWR32(adapter, QLCNIC_CRB_DRV_STATE, val);
2196
2197 qlcnic_api_unlock(adapter);
2198
2199 return 0;
2200}
2201
af19b491
AKS
2202static void
2203qlcnic_clr_all_drv_state(struct qlcnic_adapter *adapter)
2204{
2205 u32 val;
2206
2207 if (qlcnic_api_lock(adapter))
2208 goto err;
2209
2210 val = QLCRD32(adapter, QLCNIC_CRB_DEV_REF_COUNT);
6d2a4724 2211 QLC_DEV_CLR_REF_CNT(val, adapter->portnum);
af19b491
AKS
2212 QLCWR32(adapter, QLCNIC_CRB_DEV_REF_COUNT, val);
2213
2214 if (!(val & 0x11111111))
2215 QLCWR32(adapter, QLCNIC_CRB_DEV_STATE, QLCNIC_DEV_COLD);
2216
2217 val = QLCRD32(adapter, QLCNIC_CRB_DRV_STATE);
6d2a4724 2218 QLC_DEV_CLR_RST_QSCNT(val, adapter->portnum);
af19b491
AKS
2219 QLCWR32(adapter, QLCNIC_CRB_DRV_STATE, val);
2220
2221 qlcnic_api_unlock(adapter);
2222err:
2223 adapter->fw_fail_cnt = 0;
2224 clear_bit(__QLCNIC_START_FW, &adapter->state);
2225 clear_bit(__QLCNIC_RESETTING, &adapter->state);
2226}
2227
f73dfc50 2228/* Grab api lock, before checking state */
af19b491
AKS
2229static int
2230qlcnic_check_drv_state(struct qlcnic_adapter *adapter)
2231{
2232 int act, state;
2233
2234 state = QLCRD32(adapter, QLCNIC_CRB_DRV_STATE);
2235 act = QLCRD32(adapter, QLCNIC_CRB_DEV_REF_COUNT);
2236
2237 if (((state & 0x11111111) == (act & 0x11111111)) ||
2238 ((act & 0x11111111) == ((state >> 1) & 0x11111111)))
2239 return 0;
2240 else
2241 return 1;
2242}
2243
96f8118c
SC
2244static int qlcnic_check_idc_ver(struct qlcnic_adapter *adapter)
2245{
2246 u32 val = QLCRD32(adapter, QLCNIC_CRB_DRV_IDC_VER);
2247
2248 if (val != QLCNIC_DRV_IDC_VER) {
2249 dev_warn(&adapter->pdev->dev, "IDC Version mismatch, driver's"
2250 " idc ver = %x; reqd = %x\n", QLCNIC_DRV_IDC_VER, val);
2251 }
2252
2253 return 0;
2254}
2255
af19b491
AKS
2256static int
2257qlcnic_can_start_firmware(struct qlcnic_adapter *adapter)
2258{
2259 u32 val, prev_state;
aa5e18c0 2260 u8 dev_init_timeo = adapter->dev_init_timeo;
6d2a4724 2261 u8 portnum = adapter->portnum;
96f8118c 2262 u8 ret;
af19b491 2263
f73dfc50
AKS
2264 if (test_and_clear_bit(__QLCNIC_START_FW, &adapter->state))
2265 return 1;
2266
af19b491
AKS
2267 if (qlcnic_api_lock(adapter))
2268 return -1;
2269
2270 val = QLCRD32(adapter, QLCNIC_CRB_DEV_REF_COUNT);
6d2a4724
AKS
2271 if (!(val & (1 << (portnum * 4)))) {
2272 QLC_DEV_SET_REF_CNT(val, portnum);
af19b491 2273 QLCWR32(adapter, QLCNIC_CRB_DEV_REF_COUNT, val);
af19b491
AKS
2274 }
2275
2276 prev_state = QLCRD32(adapter, QLCNIC_CRB_DEV_STATE);
65b5b420 2277 QLCDB(adapter, HW, "Device state = %u\n", prev_state);
af19b491
AKS
2278
2279 switch (prev_state) {
2280 case QLCNIC_DEV_COLD:
bbd8c6a4 2281 QLCWR32(adapter, QLCNIC_CRB_DEV_STATE, QLCNIC_DEV_INITIALIZING);
96f8118c 2282 QLCWR32(adapter, QLCNIC_CRB_DRV_IDC_VER, QLCNIC_DRV_IDC_VER);
6df900e9 2283 qlcnic_idc_debug_info(adapter, 0);
af19b491
AKS
2284 qlcnic_api_unlock(adapter);
2285 return 1;
2286
2287 case QLCNIC_DEV_READY:
96f8118c 2288 ret = qlcnic_check_idc_ver(adapter);
af19b491 2289 qlcnic_api_unlock(adapter);
96f8118c 2290 return ret;
af19b491
AKS
2291
2292 case QLCNIC_DEV_NEED_RESET:
2293 val = QLCRD32(adapter, QLCNIC_CRB_DRV_STATE);
6d2a4724 2294 QLC_DEV_SET_RST_RDY(val, portnum);
af19b491
AKS
2295 QLCWR32(adapter, QLCNIC_CRB_DRV_STATE, val);
2296 break;
2297
2298 case QLCNIC_DEV_NEED_QUISCENT:
2299 val = QLCRD32(adapter, QLCNIC_CRB_DRV_STATE);
6d2a4724 2300 QLC_DEV_SET_QSCNT_RDY(val, portnum);
af19b491
AKS
2301 QLCWR32(adapter, QLCNIC_CRB_DRV_STATE, val);
2302 break;
2303
2304 case QLCNIC_DEV_FAILED:
a7fc948f 2305 dev_err(&adapter->pdev->dev, "Device in failed state.\n");
af19b491
AKS
2306 qlcnic_api_unlock(adapter);
2307 return -1;
bbd8c6a4
AKS
2308
2309 case QLCNIC_DEV_INITIALIZING:
2310 case QLCNIC_DEV_QUISCENT:
2311 break;
af19b491
AKS
2312 }
2313
2314 qlcnic_api_unlock(adapter);
aa5e18c0
SC
2315
2316 do {
af19b491 2317 msleep(1000);
a5e463d0
SC
2318 prev_state = QLCRD32(adapter, QLCNIC_CRB_DEV_STATE);
2319
2320 if (prev_state == QLCNIC_DEV_QUISCENT)
2321 continue;
2322 } while ((prev_state != QLCNIC_DEV_READY) && --dev_init_timeo);
af19b491 2323
65b5b420
AKS
2324 if (!dev_init_timeo) {
2325 dev_err(&adapter->pdev->dev,
2326 "Waiting for device to initialize timeout\n");
af19b491 2327 return -1;
65b5b420 2328 }
af19b491
AKS
2329
2330 if (qlcnic_api_lock(adapter))
2331 return -1;
2332
2333 val = QLCRD32(adapter, QLCNIC_CRB_DRV_STATE);
6d2a4724 2334 QLC_DEV_CLR_RST_QSCNT(val, portnum);
af19b491
AKS
2335 QLCWR32(adapter, QLCNIC_CRB_DRV_STATE, val);
2336
96f8118c 2337 ret = qlcnic_check_idc_ver(adapter);
af19b491
AKS
2338 qlcnic_api_unlock(adapter);
2339
96f8118c 2340 return ret;
af19b491
AKS
2341}
2342
2343static void
2344qlcnic_fwinit_work(struct work_struct *work)
2345{
2346 struct qlcnic_adapter *adapter = container_of(work,
2347 struct qlcnic_adapter, fw_work.work);
9f26f547 2348 u32 dev_state = 0xf, npar_state;
af19b491 2349
f73dfc50
AKS
2350 if (qlcnic_api_lock(adapter))
2351 goto err_ret;
af19b491 2352
a5e463d0
SC
2353 dev_state = QLCRD32(adapter, QLCNIC_CRB_DEV_STATE);
2354 if (dev_state == QLCNIC_DEV_QUISCENT) {
2355 qlcnic_api_unlock(adapter);
2356 qlcnic_schedule_work(adapter, qlcnic_fwinit_work,
2357 FW_POLL_DELAY * 2);
2358 return;
2359 }
2360
9f26f547
AC
2361 if (adapter->op_mode == QLCNIC_NON_PRIV_FUNC) {
2362 npar_state = QLCRD32(adapter, QLCNIC_CRB_DEV_NPAR_STATE);
2363 if (npar_state == QLCNIC_DEV_NPAR_RDY) {
2364 qlcnic_api_unlock(adapter);
2365 goto wait_npar;
2366 } else {
2367 qlcnic_schedule_work(adapter, qlcnic_fwinit_work,
2368 FW_POLL_DELAY);
2369 qlcnic_api_unlock(adapter);
2370 return;
2371 }
2372 }
2373
f73dfc50
AKS
2374 if (adapter->fw_wait_cnt++ > adapter->reset_ack_timeo) {
2375 dev_err(&adapter->pdev->dev, "Reset:Failed to get ack %d sec\n",
2376 adapter->reset_ack_timeo);
2377 goto skip_ack_check;
2378 }
2379
2380 if (!qlcnic_check_drv_state(adapter)) {
2381skip_ack_check:
2382 dev_state = QLCRD32(adapter, QLCNIC_CRB_DEV_STATE);
a5e463d0
SC
2383
2384 if (dev_state == QLCNIC_DEV_NEED_QUISCENT) {
2385 QLCWR32(adapter, QLCNIC_CRB_DEV_STATE,
2386 QLCNIC_DEV_QUISCENT);
2387 qlcnic_schedule_work(adapter, qlcnic_fwinit_work,
2388 FW_POLL_DELAY * 2);
2389 QLCDB(adapter, DRV, "Quiscing the driver\n");
6df900e9
SC
2390 qlcnic_idc_debug_info(adapter, 0);
2391
a5e463d0
SC
2392 qlcnic_api_unlock(adapter);
2393 return;
2394 }
2395
f73dfc50
AKS
2396 if (dev_state == QLCNIC_DEV_NEED_RESET) {
2397 QLCWR32(adapter, QLCNIC_CRB_DEV_STATE,
2398 QLCNIC_DEV_INITIALIZING);
2399 set_bit(__QLCNIC_START_FW, &adapter->state);
2400 QLCDB(adapter, DRV, "Restarting fw\n");
6df900e9 2401 qlcnic_idc_debug_info(adapter, 0);
af19b491
AKS
2402 }
2403
f73dfc50
AKS
2404 qlcnic_api_unlock(adapter);
2405
9f26f547 2406 if (!adapter->nic_ops->start_firmware(adapter)) {
af19b491
AKS
2407 qlcnic_schedule_work(adapter, qlcnic_attach_work, 0);
2408 return;
2409 }
af19b491
AKS
2410 goto err_ret;
2411 }
2412
f73dfc50 2413 qlcnic_api_unlock(adapter);
aa5e18c0 2414
9f26f547 2415wait_npar:
af19b491 2416 dev_state = QLCRD32(adapter, QLCNIC_CRB_DEV_STATE);
f73dfc50 2417 QLCDB(adapter, HW, "Func waiting: Device state=%u\n", dev_state);
65b5b420 2418
af19b491 2419 switch (dev_state) {
a5e463d0
SC
2420 case QLCNIC_DEV_QUISCENT:
2421 case QLCNIC_DEV_NEED_QUISCENT:
f73dfc50
AKS
2422 case QLCNIC_DEV_NEED_RESET:
2423 qlcnic_schedule_work(adapter,
2424 qlcnic_fwinit_work, FW_POLL_DELAY);
2425 return;
af19b491
AKS
2426 case QLCNIC_DEV_FAILED:
2427 break;
2428
2429 default:
9f26f547 2430 if (!adapter->nic_ops->start_firmware(adapter)) {
f73dfc50
AKS
2431 qlcnic_schedule_work(adapter, qlcnic_attach_work, 0);
2432 return;
2433 }
af19b491
AKS
2434 }
2435
2436err_ret:
f73dfc50
AKS
2437 dev_err(&adapter->pdev->dev, "Fwinit work failed state=%u "
2438 "fw_wait_cnt=%u\n", dev_state, adapter->fw_wait_cnt);
34ce3626 2439 netif_device_attach(adapter->netdev);
af19b491
AKS
2440 qlcnic_clr_all_drv_state(adapter);
2441}
2442
2443static void
2444qlcnic_detach_work(struct work_struct *work)
2445{
2446 struct qlcnic_adapter *adapter = container_of(work,
2447 struct qlcnic_adapter, fw_work.work);
2448 struct net_device *netdev = adapter->netdev;
2449 u32 status;
2450
2451 netif_device_detach(netdev);
2452
2453 qlcnic_down(adapter, netdev);
2454
af19b491
AKS
2455 status = QLCRD32(adapter, QLCNIC_PEG_HALT_STATUS1);
2456
2457 if (status & QLCNIC_RCODE_FATAL_ERROR)
2458 goto err_ret;
2459
2460 if (adapter->temp == QLCNIC_TEMP_PANIC)
2461 goto err_ret;
2462
ade91f8e
AKS
2463 if (qlcnic_set_drv_state(adapter, adapter->dev_state))
2464 goto err_ret;
af19b491
AKS
2465
2466 adapter->fw_wait_cnt = 0;
2467
2468 qlcnic_schedule_work(adapter, qlcnic_fwinit_work, FW_POLL_DELAY);
2469
2470 return;
2471
2472err_ret:
65b5b420
AKS
2473 dev_err(&adapter->pdev->dev, "detach failed; status=%d temp=%d\n",
2474 status, adapter->temp);
34ce3626 2475 netif_device_attach(netdev);
af19b491
AKS
2476 qlcnic_clr_all_drv_state(adapter);
2477
2478}
2479
f73dfc50 2480/*Transit to RESET state from READY state only */
af19b491
AKS
2481static void
2482qlcnic_dev_request_reset(struct qlcnic_adapter *adapter)
2483{
2484 u32 state;
2485
2486 if (qlcnic_api_lock(adapter))
2487 return;
2488
2489 state = QLCRD32(adapter, QLCNIC_CRB_DEV_STATE);
2490
f73dfc50 2491 if (state == QLCNIC_DEV_READY) {
af19b491 2492 QLCWR32(adapter, QLCNIC_CRB_DEV_STATE, QLCNIC_DEV_NEED_RESET);
65b5b420 2493 QLCDB(adapter, DRV, "NEED_RESET state set\n");
6df900e9 2494 qlcnic_idc_debug_info(adapter, 0);
af19b491
AKS
2495 }
2496
2497 qlcnic_api_unlock(adapter);
2498}
2499
9f26f547
AC
2500/* Transit to NPAR READY state from NPAR NOT READY state */
2501static void
2502qlcnic_dev_set_npar_ready(struct qlcnic_adapter *adapter)
2503{
2504 u32 state;
2505
9f26f547
AC
2506 if (qlcnic_api_lock(adapter))
2507 return;
2508
2509 state = QLCRD32(adapter, QLCNIC_CRB_DEV_NPAR_STATE);
2510
2511 if (state != QLCNIC_DEV_NPAR_RDY) {
2512 QLCWR32(adapter, QLCNIC_CRB_DEV_NPAR_STATE,
2513 QLCNIC_DEV_NPAR_RDY);
2514 QLCDB(adapter, DRV, "NPAR READY state set\n");
2515 }
2516
2517 qlcnic_api_unlock(adapter);
2518}
2519
af19b491
AKS
2520static void
2521qlcnic_schedule_work(struct qlcnic_adapter *adapter,
2522 work_func_t func, int delay)
2523{
2524 INIT_DELAYED_WORK(&adapter->fw_work, func);
2525 schedule_delayed_work(&adapter->fw_work, round_jiffies_relative(delay));
2526}
2527
2528static void
2529qlcnic_cancel_fw_work(struct qlcnic_adapter *adapter)
2530{
2531 while (test_and_set_bit(__QLCNIC_RESETTING, &adapter->state))
2532 msleep(10);
2533
2534 cancel_delayed_work_sync(&adapter->fw_work);
2535}
2536
2537static void
2538qlcnic_attach_work(struct work_struct *work)
2539{
2540 struct qlcnic_adapter *adapter = container_of(work,
2541 struct qlcnic_adapter, fw_work.work);
2542 struct net_device *netdev = adapter->netdev;
af19b491
AKS
2543
2544 if (netif_running(netdev)) {
52486a3a 2545 if (qlcnic_up(adapter, netdev))
af19b491 2546 goto done;
af19b491
AKS
2547
2548 qlcnic_config_indev_addr(netdev, NETDEV_UP);
2549 }
2550
af19b491 2551done:
34ce3626 2552 netif_device_attach(netdev);
af19b491
AKS
2553 adapter->fw_fail_cnt = 0;
2554 clear_bit(__QLCNIC_RESETTING, &adapter->state);
1b95a839
AKS
2555
2556 if (!qlcnic_clr_drv_state(adapter))
2557 qlcnic_schedule_work(adapter, qlcnic_fw_poll_work,
2558 FW_POLL_DELAY);
af19b491
AKS
2559}
2560
2561static int
2562qlcnic_check_health(struct qlcnic_adapter *adapter)
2563{
2564 u32 state = 0, heartbit;
2565 struct net_device *netdev = adapter->netdev;
2566
2567 if (qlcnic_check_temp(adapter))
2568 goto detach;
2569
2372a5f1 2570 if (adapter->need_fw_reset)
af19b491 2571 qlcnic_dev_request_reset(adapter);
af19b491
AKS
2572
2573 state = QLCRD32(adapter, QLCNIC_CRB_DEV_STATE);
2574 if (state == QLCNIC_DEV_NEED_RESET || state == QLCNIC_DEV_NEED_QUISCENT)
2575 adapter->need_fw_reset = 1;
2576
2577 heartbit = QLCRD32(adapter, QLCNIC_PEG_ALIVE_COUNTER);
2578 if (heartbit != adapter->heartbit) {
2579 adapter->heartbit = heartbit;
2580 adapter->fw_fail_cnt = 0;
2581 if (adapter->need_fw_reset)
2582 goto detach;
68bf1c68
AKS
2583
2584 if (adapter->reset_context) {
2585 qlcnic_reset_hw_context(adapter);
2586 adapter->netdev->trans_start = jiffies;
2587 }
2588
af19b491
AKS
2589 return 0;
2590 }
2591
2592 if (++adapter->fw_fail_cnt < FW_FAIL_THRESH)
2593 return 0;
2594
2595 qlcnic_dev_request_reset(adapter);
2596
2597 clear_bit(__QLCNIC_FW_ATTACHED, &adapter->state);
2598
2599 dev_info(&netdev->dev, "firmware hang detected\n");
2600
2601detach:
2602 adapter->dev_state = (state == QLCNIC_DEV_NEED_QUISCENT) ? state :
2603 QLCNIC_DEV_NEED_RESET;
2604
2605 if ((auto_fw_reset == AUTO_FW_RESET_ENABLED) &&
65b5b420
AKS
2606 !test_and_set_bit(__QLCNIC_RESETTING, &adapter->state)) {
2607
af19b491 2608 qlcnic_schedule_work(adapter, qlcnic_detach_work, 0);
65b5b420
AKS
2609 QLCDB(adapter, DRV, "fw recovery scheduled.\n");
2610 }
af19b491
AKS
2611
2612 return 1;
2613}
2614
2615static void
2616qlcnic_fw_poll_work(struct work_struct *work)
2617{
2618 struct qlcnic_adapter *adapter = container_of(work,
2619 struct qlcnic_adapter, fw_work.work);
2620
2621 if (test_bit(__QLCNIC_RESETTING, &adapter->state))
2622 goto reschedule;
2623
2624
2625 if (qlcnic_check_health(adapter))
2626 return;
2627
2628reschedule:
2629 qlcnic_schedule_work(adapter, qlcnic_fw_poll_work, FW_POLL_DELAY);
2630}
2631
87eb743b
AC
2632static int
2633qlcnicvf_start_firmware(struct qlcnic_adapter *adapter)
2634{
2635 int err;
2636
2637 err = qlcnic_can_start_firmware(adapter);
2638 if (err)
2639 return err;
2640
2641 qlcnic_check_options(adapter);
2642
2643 adapter->need_fw_reset = 0;
2644
2645 return err;
2646}
2647
2648static int
2649qlcnicvf_config_bridged_mode(struct qlcnic_adapter *adapter, u32 enable)
2650{
2651 return -EOPNOTSUPP;
2652}
2653
2654static int
2655qlcnicvf_config_led(struct qlcnic_adapter *adapter, u32 state, u32 rate)
2656{
2657 return -EOPNOTSUPP;
2658}
2659
2660static int
2661qlcnicvf_set_ilb_mode(struct qlcnic_adapter *adapter)
2662{
2663 return -EOPNOTSUPP;
2664}
2665
2666static void
2667qlcnicvf_clear_ilb_mode(struct qlcnic_adapter *adapter)
2668{
2669 return;
2670}
2671
af19b491
AKS
2672static ssize_t
2673qlcnic_store_bridged_mode(struct device *dev,
2674 struct device_attribute *attr, const char *buf, size_t len)
2675{
2676 struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
2677 unsigned long new;
2678 int ret = -EINVAL;
2679
2680 if (!(adapter->capabilities & QLCNIC_FW_CAPABILITY_BDG))
2681 goto err_out;
2682
8a15ad1f 2683 if (!test_bit(__QLCNIC_DEV_UP, &adapter->state))
af19b491
AKS
2684 goto err_out;
2685
2686 if (strict_strtoul(buf, 2, &new))
2687 goto err_out;
2688
2e9d722d 2689 if (!adapter->nic_ops->config_bridged_mode(adapter, !!new))
af19b491
AKS
2690 ret = len;
2691
2692err_out:
2693 return ret;
2694}
2695
2696static ssize_t
2697qlcnic_show_bridged_mode(struct device *dev,
2698 struct device_attribute *attr, char *buf)
2699{
2700 struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
2701 int bridged_mode = 0;
2702
2703 if (adapter->capabilities & QLCNIC_FW_CAPABILITY_BDG)
2704 bridged_mode = !!(adapter->flags & QLCNIC_BRIDGE_ENABLED);
2705
2706 return sprintf(buf, "%d\n", bridged_mode);
2707}
2708
2709static struct device_attribute dev_attr_bridged_mode = {
2710 .attr = {.name = "bridged_mode", .mode = (S_IRUGO | S_IWUSR)},
2711 .show = qlcnic_show_bridged_mode,
2712 .store = qlcnic_store_bridged_mode,
2713};
2714
2715static ssize_t
2716qlcnic_store_diag_mode(struct device *dev,
2717 struct device_attribute *attr, const char *buf, size_t len)
2718{
2719 struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
2720 unsigned long new;
2721
2722 if (strict_strtoul(buf, 2, &new))
2723 return -EINVAL;
2724
2725 if (!!new != !!(adapter->flags & QLCNIC_DIAG_ENABLED))
2726 adapter->flags ^= QLCNIC_DIAG_ENABLED;
2727
2728 return len;
2729}
2730
2731static ssize_t
2732qlcnic_show_diag_mode(struct device *dev,
2733 struct device_attribute *attr, char *buf)
2734{
2735 struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
2736
2737 return sprintf(buf, "%d\n",
2738 !!(adapter->flags & QLCNIC_DIAG_ENABLED));
2739}
2740
2741static struct device_attribute dev_attr_diag_mode = {
2742 .attr = {.name = "diag_mode", .mode = (S_IRUGO | S_IWUSR)},
2743 .show = qlcnic_show_diag_mode,
2744 .store = qlcnic_store_diag_mode,
2745};
2746
2747static int
2748qlcnic_sysfs_validate_crb(struct qlcnic_adapter *adapter,
2749 loff_t offset, size_t size)
2750{
897e8c7c
DP
2751 size_t crb_size = 4;
2752
af19b491
AKS
2753 if (!(adapter->flags & QLCNIC_DIAG_ENABLED))
2754 return -EIO;
2755
897e8c7c
DP
2756 if (offset < QLCNIC_PCI_CRBSPACE) {
2757 if (ADDR_IN_RANGE(offset, QLCNIC_PCI_CAMQM,
2758 QLCNIC_PCI_CAMQM_END))
2759 crb_size = 8;
2760 else
2761 return -EINVAL;
2762 }
af19b491 2763
897e8c7c
DP
2764 if ((size != crb_size) || (offset & (crb_size-1)))
2765 return -EINVAL;
af19b491
AKS
2766
2767 return 0;
2768}
2769
2770static ssize_t
2c3c8bea
CW
2771qlcnic_sysfs_read_crb(struct file *filp, struct kobject *kobj,
2772 struct bin_attribute *attr,
af19b491
AKS
2773 char *buf, loff_t offset, size_t size)
2774{
2775 struct device *dev = container_of(kobj, struct device, kobj);
2776 struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
2777 u32 data;
897e8c7c 2778 u64 qmdata;
af19b491
AKS
2779 int ret;
2780
2781 ret = qlcnic_sysfs_validate_crb(adapter, offset, size);
2782 if (ret != 0)
2783 return ret;
2784
897e8c7c
DP
2785 if (ADDR_IN_RANGE(offset, QLCNIC_PCI_CAMQM, QLCNIC_PCI_CAMQM_END)) {
2786 qlcnic_pci_camqm_read_2M(adapter, offset, &qmdata);
2787 memcpy(buf, &qmdata, size);
2788 } else {
2789 data = QLCRD32(adapter, offset);
2790 memcpy(buf, &data, size);
2791 }
af19b491
AKS
2792 return size;
2793}
2794
2795static ssize_t
2c3c8bea
CW
2796qlcnic_sysfs_write_crb(struct file *filp, struct kobject *kobj,
2797 struct bin_attribute *attr,
af19b491
AKS
2798 char *buf, loff_t offset, size_t size)
2799{
2800 struct device *dev = container_of(kobj, struct device, kobj);
2801 struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
2802 u32 data;
897e8c7c 2803 u64 qmdata;
af19b491
AKS
2804 int ret;
2805
2806 ret = qlcnic_sysfs_validate_crb(adapter, offset, size);
2807 if (ret != 0)
2808 return ret;
2809
897e8c7c
DP
2810 if (ADDR_IN_RANGE(offset, QLCNIC_PCI_CAMQM, QLCNIC_PCI_CAMQM_END)) {
2811 memcpy(&qmdata, buf, size);
2812 qlcnic_pci_camqm_write_2M(adapter, offset, qmdata);
2813 } else {
2814 memcpy(&data, buf, size);
2815 QLCWR32(adapter, offset, data);
2816 }
af19b491
AKS
2817 return size;
2818}
2819
2820static int
2821qlcnic_sysfs_validate_mem(struct qlcnic_adapter *adapter,
2822 loff_t offset, size_t size)
2823{
2824 if (!(adapter->flags & QLCNIC_DIAG_ENABLED))
2825 return -EIO;
2826
2827 if ((size != 8) || (offset & 0x7))
2828 return -EIO;
2829
2830 return 0;
2831}
2832
2833static ssize_t
2c3c8bea
CW
2834qlcnic_sysfs_read_mem(struct file *filp, struct kobject *kobj,
2835 struct bin_attribute *attr,
af19b491
AKS
2836 char *buf, loff_t offset, size_t size)
2837{
2838 struct device *dev = container_of(kobj, struct device, kobj);
2839 struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
2840 u64 data;
2841 int ret;
2842
2843 ret = qlcnic_sysfs_validate_mem(adapter, offset, size);
2844 if (ret != 0)
2845 return ret;
2846
2847 if (qlcnic_pci_mem_read_2M(adapter, offset, &data))
2848 return -EIO;
2849
2850 memcpy(buf, &data, size);
2851
2852 return size;
2853}
2854
2855static ssize_t
2c3c8bea
CW
2856qlcnic_sysfs_write_mem(struct file *filp, struct kobject *kobj,
2857 struct bin_attribute *attr,
af19b491
AKS
2858 char *buf, loff_t offset, size_t size)
2859{
2860 struct device *dev = container_of(kobj, struct device, kobj);
2861 struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
2862 u64 data;
2863 int ret;
2864
2865 ret = qlcnic_sysfs_validate_mem(adapter, offset, size);
2866 if (ret != 0)
2867 return ret;
2868
2869 memcpy(&data, buf, size);
2870
2871 if (qlcnic_pci_mem_write_2M(adapter, offset, data))
2872 return -EIO;
2873
2874 return size;
2875}
2876
2877
2878static struct bin_attribute bin_attr_crb = {
2879 .attr = {.name = "crb", .mode = (S_IRUGO | S_IWUSR)},
2880 .size = 0,
2881 .read = qlcnic_sysfs_read_crb,
2882 .write = qlcnic_sysfs_write_crb,
2883};
2884
2885static struct bin_attribute bin_attr_mem = {
2886 .attr = {.name = "mem", .mode = (S_IRUGO | S_IWUSR)},
2887 .size = 0,
2888 .read = qlcnic_sysfs_read_mem,
2889 .write = qlcnic_sysfs_write_mem,
2890};
2891
346fe763
RB
2892int
2893validate_pm_config(struct qlcnic_adapter *adapter,
2894 struct qlcnic_pm_func_cfg *pm_cfg, int count)
2895{
2896
2897 u8 src_pci_func, s_esw_id, d_esw_id;
2898 u8 dest_pci_func;
2899 int i;
2900
2901 for (i = 0; i < count; i++) {
2902 src_pci_func = pm_cfg[i].pci_func;
2903 dest_pci_func = pm_cfg[i].dest_npar;
2904 if (src_pci_func >= QLCNIC_MAX_PCI_FUNC
2905 || dest_pci_func >= QLCNIC_MAX_PCI_FUNC)
2906 return QL_STATUS_INVALID_PARAM;
2907
2908 if (adapter->npars[src_pci_func].type != QLCNIC_TYPE_NIC)
2909 return QL_STATUS_INVALID_PARAM;
2910
2911 if (adapter->npars[dest_pci_func].type != QLCNIC_TYPE_NIC)
2912 return QL_STATUS_INVALID_PARAM;
2913
2914 if (!IS_VALID_MODE(pm_cfg[i].action))
2915 return QL_STATUS_INVALID_PARAM;
2916
2917 s_esw_id = adapter->npars[src_pci_func].phy_port;
2918 d_esw_id = adapter->npars[dest_pci_func].phy_port;
2919
2920 if (s_esw_id != d_esw_id)
2921 return QL_STATUS_INVALID_PARAM;
2922
2923 }
2924 return 0;
2925
2926}
2927
2928static ssize_t
2929qlcnic_sysfs_write_pm_config(struct file *filp, struct kobject *kobj,
2930 struct bin_attribute *attr, char *buf, loff_t offset, size_t size)
2931{
2932 struct device *dev = container_of(kobj, struct device, kobj);
2933 struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
2934 struct qlcnic_pm_func_cfg *pm_cfg;
2935 u32 id, action, pci_func;
2936 int count, rem, i, ret;
2937
2938 count = size / sizeof(struct qlcnic_pm_func_cfg);
2939 rem = size % sizeof(struct qlcnic_pm_func_cfg);
2940 if (rem)
2941 return QL_STATUS_INVALID_PARAM;
2942
2943 pm_cfg = (struct qlcnic_pm_func_cfg *) buf;
2944
2945 ret = validate_pm_config(adapter, pm_cfg, count);
2946 if (ret)
2947 return ret;
2948 for (i = 0; i < count; i++) {
2949 pci_func = pm_cfg[i].pci_func;
2950 action = pm_cfg[i].action;
2951 id = adapter->npars[pci_func].phy_port;
2952 ret = qlcnic_config_port_mirroring(adapter, id,
2953 action, pci_func);
2954 if (ret)
2955 return ret;
2956 }
2957
2958 for (i = 0; i < count; i++) {
2959 pci_func = pm_cfg[i].pci_func;
2960 id = adapter->npars[pci_func].phy_port;
2961 adapter->npars[pci_func].enable_pm = pm_cfg[i].action;
2962 adapter->npars[pci_func].dest_npar = id;
2963 }
2964 return size;
2965}
2966
2967static ssize_t
2968qlcnic_sysfs_read_pm_config(struct file *filp, struct kobject *kobj,
2969 struct bin_attribute *attr, char *buf, loff_t offset, size_t size)
2970{
2971 struct device *dev = container_of(kobj, struct device, kobj);
2972 struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
2973 struct qlcnic_pm_func_cfg pm_cfg[QLCNIC_MAX_PCI_FUNC];
2974 int i;
2975
2976 if (size != sizeof(pm_cfg))
2977 return QL_STATUS_INVALID_PARAM;
2978
2979 for (i = 0; i < QLCNIC_MAX_PCI_FUNC; i++) {
2980 if (adapter->npars[i].type != QLCNIC_TYPE_NIC)
2981 continue;
2982 pm_cfg[i].action = adapter->npars[i].enable_pm;
2983 pm_cfg[i].dest_npar = 0;
2984 pm_cfg[i].pci_func = i;
2985 }
2986 memcpy(buf, &pm_cfg, size);
2987
2988 return size;
2989}
2990
2991int
2992validate_esw_config(struct qlcnic_adapter *adapter,
2993 struct qlcnic_esw_func_cfg *esw_cfg, int count)
2994{
2995 u8 pci_func;
2996 int i;
2997
2998 for (i = 0; i < count; i++) {
2999 pci_func = esw_cfg[i].pci_func;
3000 if (pci_func >= QLCNIC_MAX_PCI_FUNC)
3001 return QL_STATUS_INVALID_PARAM;
3002
3003 if (adapter->npars[i].type != QLCNIC_TYPE_NIC)
3004 return QL_STATUS_INVALID_PARAM;
3005
3006 if (esw_cfg->host_vlan_tag == 1)
3007 if (!IS_VALID_VLAN(esw_cfg[i].vlan_id))
3008 return QL_STATUS_INVALID_PARAM;
3009
3010 if (!IS_VALID_MODE(esw_cfg[i].promisc_mode)
3011 || !IS_VALID_MODE(esw_cfg[i].host_vlan_tag)
3012 || !IS_VALID_MODE(esw_cfg[i].mac_learning)
3013 || !IS_VALID_MODE(esw_cfg[i].discard_tagged))
3014 return QL_STATUS_INVALID_PARAM;
3015 }
3016
3017 return 0;
3018}
3019
3020static ssize_t
3021qlcnic_sysfs_write_esw_config(struct file *file, struct kobject *kobj,
3022 struct bin_attribute *attr, char *buf, loff_t offset, size_t size)
3023{
3024 struct device *dev = container_of(kobj, struct device, kobj);
3025 struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
3026 struct qlcnic_esw_func_cfg *esw_cfg;
3027 u8 id, discard_tagged, promsc_mode, mac_learn;
3028 u8 vlan_tagging, pci_func, vlan_id;
3029 int count, rem, i, ret;
3030
3031 count = size / sizeof(struct qlcnic_esw_func_cfg);
3032 rem = size % sizeof(struct qlcnic_esw_func_cfg);
3033 if (rem)
3034 return QL_STATUS_INVALID_PARAM;
3035
3036 esw_cfg = (struct qlcnic_esw_func_cfg *) buf;
3037 ret = validate_esw_config(adapter, esw_cfg, count);
3038 if (ret)
3039 return ret;
3040
3041 for (i = 0; i < count; i++) {
3042 pci_func = esw_cfg[i].pci_func;
3043 id = adapter->npars[pci_func].phy_port;
3044 vlan_tagging = esw_cfg[i].host_vlan_tag;
3045 promsc_mode = esw_cfg[i].promisc_mode;
3046 mac_learn = esw_cfg[i].mac_learning;
3047 vlan_id = esw_cfg[i].vlan_id;
3048 discard_tagged = esw_cfg[i].discard_tagged;
3049 ret = qlcnic_config_switch_port(adapter, id, vlan_tagging,
3050 discard_tagged,
3051 promsc_mode,
3052 mac_learn,
3053 pci_func,
3054 vlan_id);
3055 if (ret)
3056 return ret;
3057 }
3058
3059 for (i = 0; i < count; i++) {
3060 pci_func = esw_cfg[i].pci_func;
3061 adapter->npars[pci_func].promisc_mode = esw_cfg[i].promisc_mode;
3062 adapter->npars[pci_func].mac_learning = esw_cfg[i].mac_learning;
3063 adapter->npars[pci_func].vlan_id = esw_cfg[i].vlan_id;
3064 adapter->npars[pci_func].discard_tagged =
3065 esw_cfg[i].discard_tagged;
3066 adapter->npars[pci_func].host_vlan_tag =
3067 esw_cfg[i].host_vlan_tag;
3068 }
3069
3070 return size;
3071}
3072
3073static ssize_t
3074qlcnic_sysfs_read_esw_config(struct file *file, struct kobject *kobj,
3075 struct bin_attribute *attr, char *buf, loff_t offset, size_t size)
3076{
3077 struct device *dev = container_of(kobj, struct device, kobj);
3078 struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
3079 struct qlcnic_esw_func_cfg esw_cfg[QLCNIC_MAX_PCI_FUNC];
3080 int i;
3081
3082 if (size != sizeof(esw_cfg))
3083 return QL_STATUS_INVALID_PARAM;
3084
3085 for (i = 0; i < QLCNIC_MAX_PCI_FUNC; i++) {
3086 if (adapter->npars[i].type != QLCNIC_TYPE_NIC)
3087 continue;
3088
3089 esw_cfg[i].host_vlan_tag = adapter->npars[i].host_vlan_tag;
3090 esw_cfg[i].promisc_mode = adapter->npars[i].promisc_mode;
3091 esw_cfg[i].discard_tagged = adapter->npars[i].discard_tagged;
3092 esw_cfg[i].vlan_id = adapter->npars[i].vlan_id;
3093 esw_cfg[i].mac_learning = adapter->npars[i].mac_learning;
3094 }
3095 memcpy(buf, &esw_cfg, size);
3096
3097 return size;
3098}
3099
3100int
3101validate_npar_config(struct qlcnic_adapter *adapter,
3102 struct qlcnic_npar_func_cfg *np_cfg, int count)
3103{
3104 u8 pci_func, i;
3105
3106 for (i = 0; i < count; i++) {
3107 pci_func = np_cfg[i].pci_func;
3108 if (pci_func >= QLCNIC_MAX_PCI_FUNC)
3109 return QL_STATUS_INVALID_PARAM;
3110
3111 if (adapter->npars[pci_func].type != QLCNIC_TYPE_NIC)
3112 return QL_STATUS_INVALID_PARAM;
3113
3114 if (!IS_VALID_BW(np_cfg[i].min_bw)
3115 || !IS_VALID_BW(np_cfg[i].max_bw)
3116 || !IS_VALID_RX_QUEUES(np_cfg[i].max_rx_queues)
3117 || !IS_VALID_TX_QUEUES(np_cfg[i].max_tx_queues))
3118 return QL_STATUS_INVALID_PARAM;
3119 }
3120 return 0;
3121}
3122
3123static ssize_t
3124qlcnic_sysfs_write_npar_config(struct file *file, struct kobject *kobj,
3125 struct bin_attribute *attr, char *buf, loff_t offset, size_t size)
3126{
3127 struct device *dev = container_of(kobj, struct device, kobj);
3128 struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
3129 struct qlcnic_info nic_info;
3130 struct qlcnic_npar_func_cfg *np_cfg;
3131 int i, count, rem, ret;
3132 u8 pci_func;
3133
3134 count = size / sizeof(struct qlcnic_npar_func_cfg);
3135 rem = size % sizeof(struct qlcnic_npar_func_cfg);
3136 if (rem)
3137 return QL_STATUS_INVALID_PARAM;
3138
3139 np_cfg = (struct qlcnic_npar_func_cfg *) buf;
3140 ret = validate_npar_config(adapter, np_cfg, count);
3141 if (ret)
3142 return ret;
3143
3144 for (i = 0; i < count ; i++) {
3145 pci_func = np_cfg[i].pci_func;
3146 ret = qlcnic_get_nic_info(adapter, &nic_info, pci_func);
3147 if (ret)
3148 return ret;
3149 nic_info.pci_func = pci_func;
3150 nic_info.min_tx_bw = np_cfg[i].min_bw;
3151 nic_info.max_tx_bw = np_cfg[i].max_bw;
3152 ret = qlcnic_set_nic_info(adapter, &nic_info);
3153 if (ret)
3154 return ret;
3155 }
3156
3157 return size;
3158
3159}
3160static ssize_t
3161qlcnic_sysfs_read_npar_config(struct file *file, struct kobject *kobj,
3162 struct bin_attribute *attr, char *buf, loff_t offset, size_t size)
3163{
3164 struct device *dev = container_of(kobj, struct device, kobj);
3165 struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
3166 struct qlcnic_info nic_info;
3167 struct qlcnic_npar_func_cfg np_cfg[QLCNIC_MAX_PCI_FUNC];
3168 int i, ret;
3169
3170 if (size != sizeof(np_cfg))
3171 return QL_STATUS_INVALID_PARAM;
3172
3173 for (i = 0; i < QLCNIC_MAX_PCI_FUNC ; i++) {
3174 if (adapter->npars[i].type != QLCNIC_TYPE_NIC)
3175 continue;
3176 ret = qlcnic_get_nic_info(adapter, &nic_info, i);
3177 if (ret)
3178 return ret;
3179
3180 np_cfg[i].pci_func = i;
3181 np_cfg[i].op_mode = nic_info.op_mode;
3182 np_cfg[i].port_num = nic_info.phys_port;
3183 np_cfg[i].fw_capab = nic_info.capabilities;
3184 np_cfg[i].min_bw = nic_info.min_tx_bw ;
3185 np_cfg[i].max_bw = nic_info.max_tx_bw;
3186 np_cfg[i].max_tx_queues = nic_info.max_tx_ques;
3187 np_cfg[i].max_rx_queues = nic_info.max_rx_ques;
3188 }
3189 memcpy(buf, &np_cfg, size);
3190 return size;
3191}
3192
3193static ssize_t
3194qlcnic_sysfs_read_pci_config(struct file *file, struct kobject *kobj,
3195 struct bin_attribute *attr, char *buf, loff_t offset, size_t size)
3196{
3197 struct device *dev = container_of(kobj, struct device, kobj);
3198 struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
3199 struct qlcnic_pci_func_cfg pci_cfg[QLCNIC_MAX_PCI_FUNC];
3200 struct qlcnic_pci_info pci_info[QLCNIC_MAX_PCI_FUNC];
3201 int i, ret;
3202
3203 if (size != sizeof(pci_cfg))
3204 return QL_STATUS_INVALID_PARAM;
3205
3206 ret = qlcnic_get_pci_info(adapter, pci_info);
3207 if (ret)
3208 return ret;
3209
3210 for (i = 0; i < QLCNIC_MAX_PCI_FUNC ; i++) {
3211 pci_cfg[i].pci_func = pci_info[i].id;
3212 pci_cfg[i].func_type = pci_info[i].type;
3213 pci_cfg[i].port_num = pci_info[i].default_port;
3214 pci_cfg[i].min_bw = pci_info[i].tx_min_bw;
3215 pci_cfg[i].max_bw = pci_info[i].tx_max_bw;
3216 memcpy(&pci_cfg[i].def_mac_addr, &pci_info[i].mac, ETH_ALEN);
3217 }
3218 memcpy(buf, &pci_cfg, size);
3219 return size;
3220
3221}
3222static struct bin_attribute bin_attr_npar_config = {
3223 .attr = {.name = "npar_config", .mode = (S_IRUGO | S_IWUSR)},
3224 .size = 0,
3225 .read = qlcnic_sysfs_read_npar_config,
3226 .write = qlcnic_sysfs_write_npar_config,
3227};
3228
3229static struct bin_attribute bin_attr_pci_config = {
3230 .attr = {.name = "pci_config", .mode = (S_IRUGO | S_IWUSR)},
3231 .size = 0,
3232 .read = qlcnic_sysfs_read_pci_config,
3233 .write = NULL,
3234};
3235
3236static struct bin_attribute bin_attr_esw_config = {
3237 .attr = {.name = "esw_config", .mode = (S_IRUGO | S_IWUSR)},
3238 .size = 0,
3239 .read = qlcnic_sysfs_read_esw_config,
3240 .write = qlcnic_sysfs_write_esw_config,
3241};
3242
3243static struct bin_attribute bin_attr_pm_config = {
3244 .attr = {.name = "pm_config", .mode = (S_IRUGO | S_IWUSR)},
3245 .size = 0,
3246 .read = qlcnic_sysfs_read_pm_config,
3247 .write = qlcnic_sysfs_write_pm_config,
3248};
3249
af19b491
AKS
3250static void
3251qlcnic_create_sysfs_entries(struct qlcnic_adapter *adapter)
3252{
3253 struct device *dev = &adapter->pdev->dev;
3254
3255 if (adapter->capabilities & QLCNIC_FW_CAPABILITY_BDG)
3256 if (device_create_file(dev, &dev_attr_bridged_mode))
3257 dev_warn(dev,
3258 "failed to create bridged_mode sysfs entry\n");
3259}
3260
3261static void
3262qlcnic_remove_sysfs_entries(struct qlcnic_adapter *adapter)
3263{
3264 struct device *dev = &adapter->pdev->dev;
3265
3266 if (adapter->capabilities & QLCNIC_FW_CAPABILITY_BDG)
3267 device_remove_file(dev, &dev_attr_bridged_mode);
3268}
3269
3270static void
3271qlcnic_create_diag_entries(struct qlcnic_adapter *adapter)
3272{
3273 struct device *dev = &adapter->pdev->dev;
3274
132ff00a
AC
3275 if (adapter->op_mode == QLCNIC_NON_PRIV_FUNC)
3276 return;
af19b491
AKS
3277 if (device_create_file(dev, &dev_attr_diag_mode))
3278 dev_info(dev, "failed to create diag_mode sysfs entry\n");
3279 if (device_create_bin_file(dev, &bin_attr_crb))
3280 dev_info(dev, "failed to create crb sysfs entry\n");
3281 if (device_create_bin_file(dev, &bin_attr_mem))
3282 dev_info(dev, "failed to create mem sysfs entry\n");
346fe763
RB
3283 if (!(adapter->flags & QLCNIC_ESWITCH_ENABLED) ||
3284 adapter->op_mode != QLCNIC_MGMT_FUNC)
3285 return;
3286 if (device_create_bin_file(dev, &bin_attr_pci_config))
3287 dev_info(dev, "failed to create pci config sysfs entry");
3288 if (device_create_bin_file(dev, &bin_attr_npar_config))
3289 dev_info(dev, "failed to create npar config sysfs entry");
3290 if (device_create_bin_file(dev, &bin_attr_esw_config))
3291 dev_info(dev, "failed to create esw config sysfs entry");
3292 if (device_create_bin_file(dev, &bin_attr_pm_config))
3293 dev_info(dev, "failed to create pm config sysfs entry");
3294
af19b491
AKS
3295}
3296
af19b491
AKS
3297static void
3298qlcnic_remove_diag_entries(struct qlcnic_adapter *adapter)
3299{
3300 struct device *dev = &adapter->pdev->dev;
3301
132ff00a
AC
3302 if (adapter->op_mode == QLCNIC_NON_PRIV_FUNC)
3303 return;
af19b491
AKS
3304 device_remove_file(dev, &dev_attr_diag_mode);
3305 device_remove_bin_file(dev, &bin_attr_crb);
3306 device_remove_bin_file(dev, &bin_attr_mem);
346fe763
RB
3307 if (!(adapter->flags & QLCNIC_ESWITCH_ENABLED) ||
3308 adapter->op_mode != QLCNIC_MGMT_FUNC)
3309 return;
3310 device_remove_bin_file(dev, &bin_attr_pci_config);
3311 device_remove_bin_file(dev, &bin_attr_npar_config);
3312 device_remove_bin_file(dev, &bin_attr_esw_config);
3313 device_remove_bin_file(dev, &bin_attr_pm_config);
af19b491
AKS
3314}
3315
3316#ifdef CONFIG_INET
3317
3318#define is_qlcnic_netdev(dev) (dev->netdev_ops == &qlcnic_netdev_ops)
3319
af19b491
AKS
3320static void
3321qlcnic_config_indev_addr(struct net_device *dev, unsigned long event)
3322{
3323 struct in_device *indev;
3324 struct qlcnic_adapter *adapter = netdev_priv(dev);
3325
af19b491
AKS
3326 indev = in_dev_get(dev);
3327 if (!indev)
3328 return;
3329
3330 for_ifa(indev) {
3331 switch (event) {
3332 case NETDEV_UP:
3333 qlcnic_config_ipaddr(adapter,
3334 ifa->ifa_address, QLCNIC_IP_UP);
3335 break;
3336 case NETDEV_DOWN:
3337 qlcnic_config_ipaddr(adapter,
3338 ifa->ifa_address, QLCNIC_IP_DOWN);
3339 break;
3340 default:
3341 break;
3342 }
3343 } endfor_ifa(indev);
3344
3345 in_dev_put(indev);
af19b491
AKS
3346}
3347
3348static int qlcnic_netdev_event(struct notifier_block *this,
3349 unsigned long event, void *ptr)
3350{
3351 struct qlcnic_adapter *adapter;
3352 struct net_device *dev = (struct net_device *)ptr;
3353
3354recheck:
3355 if (dev == NULL)
3356 goto done;
3357
3358 if (dev->priv_flags & IFF_802_1Q_VLAN) {
3359 dev = vlan_dev_real_dev(dev);
3360 goto recheck;
3361 }
3362
3363 if (!is_qlcnic_netdev(dev))
3364 goto done;
3365
3366 adapter = netdev_priv(dev);
3367
3368 if (!adapter)
3369 goto done;
3370
8a15ad1f 3371 if (!test_bit(__QLCNIC_DEV_UP, &adapter->state))
af19b491
AKS
3372 goto done;
3373
3374 qlcnic_config_indev_addr(dev, event);
3375done:
3376 return NOTIFY_DONE;
3377}
3378
3379static int
3380qlcnic_inetaddr_event(struct notifier_block *this,
3381 unsigned long event, void *ptr)
3382{
3383 struct qlcnic_adapter *adapter;
3384 struct net_device *dev;
3385
3386 struct in_ifaddr *ifa = (struct in_ifaddr *)ptr;
3387
3388 dev = ifa->ifa_dev ? ifa->ifa_dev->dev : NULL;
3389
3390recheck:
3391 if (dev == NULL || !netif_running(dev))
3392 goto done;
3393
3394 if (dev->priv_flags & IFF_802_1Q_VLAN) {
3395 dev = vlan_dev_real_dev(dev);
3396 goto recheck;
3397 }
3398
3399 if (!is_qlcnic_netdev(dev))
3400 goto done;
3401
3402 adapter = netdev_priv(dev);
3403
251a84c9 3404 if (!adapter)
af19b491
AKS
3405 goto done;
3406
8a15ad1f 3407 if (!test_bit(__QLCNIC_DEV_UP, &adapter->state))
af19b491
AKS
3408 goto done;
3409
3410 switch (event) {
3411 case NETDEV_UP:
3412 qlcnic_config_ipaddr(adapter, ifa->ifa_address, QLCNIC_IP_UP);
3413 break;
3414 case NETDEV_DOWN:
3415 qlcnic_config_ipaddr(adapter, ifa->ifa_address, QLCNIC_IP_DOWN);
3416 break;
3417 default:
3418 break;
3419 }
3420
3421done:
3422 return NOTIFY_DONE;
3423}
3424
3425static struct notifier_block qlcnic_netdev_cb = {
3426 .notifier_call = qlcnic_netdev_event,
3427};
3428
3429static struct notifier_block qlcnic_inetaddr_cb = {
3430 .notifier_call = qlcnic_inetaddr_event,
3431};
3432#else
3433static void
3434qlcnic_config_indev_addr(struct net_device *dev, unsigned long event)
3435{ }
3436#endif
3437
3438static struct pci_driver qlcnic_driver = {
3439 .name = qlcnic_driver_name,
3440 .id_table = qlcnic_pci_tbl,
3441 .probe = qlcnic_probe,
3442 .remove = __devexit_p(qlcnic_remove),
3443#ifdef CONFIG_PM
3444 .suspend = qlcnic_suspend,
3445 .resume = qlcnic_resume,
3446#endif
3447 .shutdown = qlcnic_shutdown
3448};
3449
3450static int __init qlcnic_init_module(void)
3451{
3452
3453 printk(KERN_INFO "%s\n", qlcnic_driver_string);
3454
3455#ifdef CONFIG_INET
3456 register_netdevice_notifier(&qlcnic_netdev_cb);
3457 register_inetaddr_notifier(&qlcnic_inetaddr_cb);
3458#endif
3459
3460
3461 return pci_register_driver(&qlcnic_driver);
3462}
3463
3464module_init(qlcnic_init_module);
3465
3466static void __exit qlcnic_exit_module(void)
3467{
3468
3469 pci_unregister_driver(&qlcnic_driver);
3470
3471#ifdef CONFIG_INET
3472 unregister_inetaddr_notifier(&qlcnic_inetaddr_cb);
3473 unregister_netdevice_notifier(&qlcnic_netdev_cb);
3474#endif
3475}
3476
3477module_exit(qlcnic_exit_module);