]> bbs.cooldavid.org Git - net-next-2.6.git/blame - drivers/scsi/qla2xxx/qla_attr.c
include cleanup: Update gfp.h and slab.h includes to prepare for breaking implicit...
[net-next-2.6.git] / drivers / scsi / qla2xxx / qla_attr.c
CommitLineData
8482e118 1/*
fa90c54f 2 * QLogic Fibre Channel HBA Driver
01e58d8e 3 * Copyright (c) 2003-2008 QLogic Corporation
8482e118 4 *
fa90c54f 5 * See LICENSE.qla2xxx for copyright and licensing details.
8482e118
AV
6 */
7#include "qla_def.h"
8
2c3dfe3f 9#include <linux/kthread.h>
7aaef27b 10#include <linux/vmalloc.h>
5a0e3ad6 11#include <linux/slab.h>
00eabe7c 12#include <linux/delay.h>
8482e118 13
a824ebb3 14static int qla24xx_vport_disable(struct fc_vport *, bool);
9a069e19
GM
15static int qla84xx_reset(scsi_qla_host_t *, struct msg_echo_lb *, struct fc_bsg_job *);
16int qla84xx_reset_chip(scsi_qla_host_t *, uint16_t, uint16_t *);
17static int qla84xx_mgmt_cmd(scsi_qla_host_t *, struct msg_echo_lb *, struct fc_bsg_job *);
8482e118
AV
18/* SYSFS attributes --------------------------------------------------------- */
19
20static ssize_t
91a69029
ZR
21qla2x00_sysfs_read_fw_dump(struct kobject *kobj,
22 struct bin_attribute *bin_attr,
23 char *buf, loff_t off, size_t count)
8482e118 24{
7b867cf7 25 struct scsi_qla_host *vha = shost_priv(dev_to_shost(container_of(kobj,
8482e118 26 struct device, kobj)));
7b867cf7 27 struct qla_hw_data *ha = vha->hw;
8482e118
AV
28
29 if (ha->fw_dump_reading == 0)
30 return 0;
8482e118 31
b3dc9088
AM
32 return memory_read_from_buffer(buf, count, &off, ha->fw_dump,
33 ha->fw_dump_len);
8482e118
AV
34}
35
36static ssize_t
91a69029
ZR
37qla2x00_sysfs_write_fw_dump(struct kobject *kobj,
38 struct bin_attribute *bin_attr,
39 char *buf, loff_t off, size_t count)
8482e118 40{
7b867cf7 41 struct scsi_qla_host *vha = shost_priv(dev_to_shost(container_of(kobj,
8482e118 42 struct device, kobj)));
7b867cf7 43 struct qla_hw_data *ha = vha->hw;
8482e118 44 int reading;
8482e118
AV
45
46 if (off != 0)
47 return (0);
48
49 reading = simple_strtol(buf, NULL, 10);
50 switch (reading) {
51 case 0:
a7a167bf
AV
52 if (!ha->fw_dump_reading)
53 break;
8482e118 54
a7a167bf 55 qla_printk(KERN_INFO, ha,
7b867cf7 56 "Firmware dump cleared on (%ld).\n", vha->host_no);
a7a167bf
AV
57
58 ha->fw_dump_reading = 0;
59 ha->fw_dumped = 0;
8482e118
AV
60 break;
61 case 1:
d4e3e04d 62 if (ha->fw_dumped && !ha->fw_dump_reading) {
8482e118
AV
63 ha->fw_dump_reading = 1;
64
8482e118 65 qla_printk(KERN_INFO, ha,
a7a167bf 66 "Raw firmware dump ready for read on (%ld).\n",
7b867cf7 67 vha->host_no);
8482e118
AV
68 }
69 break;
a7a167bf 70 case 2:
7b867cf7 71 qla2x00_alloc_fw_dump(vha);
a7a167bf 72 break;
68af0811 73 case 3:
7b867cf7 74 qla2x00_system_error(vha);
68af0811 75 break;
8482e118
AV
76 }
77 return (count);
78}
79
80static struct bin_attribute sysfs_fw_dump_attr = {
81 .attr = {
82 .name = "fw_dump",
83 .mode = S_IRUSR | S_IWUSR,
8482e118
AV
84 },
85 .size = 0,
86 .read = qla2x00_sysfs_read_fw_dump,
87 .write = qla2x00_sysfs_write_fw_dump,
88};
89
90static ssize_t
91a69029
ZR
91qla2x00_sysfs_read_nvram(struct kobject *kobj,
92 struct bin_attribute *bin_attr,
93 char *buf, loff_t off, size_t count)
8482e118 94{
7b867cf7 95 struct scsi_qla_host *vha = shost_priv(dev_to_shost(container_of(kobj,
8482e118 96 struct device, kobj)));
7b867cf7 97 struct qla_hw_data *ha = vha->hw;
8482e118 98
b3dc9088 99 if (!capable(CAP_SYS_ADMIN))
8482e118
AV
100 return 0;
101
6749ce36 102 if (IS_NOCACHE_VPD_TYPE(ha))
8f979751 103 ha->isp_ops->read_optrom(vha, ha->nvram, ha->flt_region_nvram << 2,
6749ce36 104 ha->nvram_size);
b3dc9088
AM
105 return memory_read_from_buffer(buf, count, &off, ha->nvram,
106 ha->nvram_size);
8482e118
AV
107}
108
109static ssize_t
91a69029
ZR
110qla2x00_sysfs_write_nvram(struct kobject *kobj,
111 struct bin_attribute *bin_attr,
112 char *buf, loff_t off, size_t count)
8482e118 113{
7b867cf7 114 struct scsi_qla_host *vha = shost_priv(dev_to_shost(container_of(kobj,
8482e118 115 struct device, kobj)));
7b867cf7 116 struct qla_hw_data *ha = vha->hw;
8482e118 117 uint16_t cnt;
8482e118 118
3d79038f
AV
119 if (!capable(CAP_SYS_ADMIN) || off != 0 || count != ha->nvram_size ||
120 !ha->isp_ops->write_nvram)
8482e118
AV
121 return 0;
122
123 /* Checksum NVRAM. */
e428924c 124 if (IS_FWI2_CAPABLE(ha)) {
459c5378
AV
125 uint32_t *iter;
126 uint32_t chksum;
127
128 iter = (uint32_t *)buf;
129 chksum = 0;
130 for (cnt = 0; cnt < ((count >> 2) - 1); cnt++)
131 chksum += le32_to_cpu(*iter++);
132 chksum = ~chksum + 1;
133 *iter = cpu_to_le32(chksum);
134 } else {
135 uint8_t *iter;
136 uint8_t chksum;
137
138 iter = (uint8_t *)buf;
139 chksum = 0;
140 for (cnt = 0; cnt < count - 1; cnt++)
141 chksum += *iter++;
142 chksum = ~chksum + 1;
143 *iter = chksum;
144 }
8482e118 145
2533cf67
LC
146 if (qla2x00_wait_for_hba_online(vha) != QLA_SUCCESS) {
147 qla_printk(KERN_WARNING, ha,
148 "HBA not online, failing NVRAM update.\n");
149 return -EAGAIN;
150 }
151
8482e118 152 /* Write NVRAM. */
7b867cf7
AC
153 ha->isp_ops->write_nvram(vha, (uint8_t *)buf, ha->nvram_base, count);
154 ha->isp_ops->read_nvram(vha, (uint8_t *)ha->nvram, ha->nvram_base,
281afe19 155 count);
8482e118 156
2533cf67 157 /* NVRAM settings take effect immediately. */
7b867cf7 158 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
2533cf67
LC
159 qla2xxx_wake_dpc(vha);
160 qla2x00_wait_for_chip_reset(vha);
26b8d348 161
8482e118
AV
162 return (count);
163}
164
165static struct bin_attribute sysfs_nvram_attr = {
166 .attr = {
167 .name = "nvram",
168 .mode = S_IRUSR | S_IWUSR,
8482e118 169 },
1b3f6365 170 .size = 512,
8482e118
AV
171 .read = qla2x00_sysfs_read_nvram,
172 .write = qla2x00_sysfs_write_nvram,
173};
174
854165f4 175static ssize_t
91a69029
ZR
176qla2x00_sysfs_read_optrom(struct kobject *kobj,
177 struct bin_attribute *bin_attr,
178 char *buf, loff_t off, size_t count)
854165f4 179{
7b867cf7 180 struct scsi_qla_host *vha = shost_priv(dev_to_shost(container_of(kobj,
854165f4 181 struct device, kobj)));
7b867cf7 182 struct qla_hw_data *ha = vha->hw;
854165f4
AV
183
184 if (ha->optrom_state != QLA_SREADING)
185 return 0;
854165f4 186
b3dc9088
AM
187 return memory_read_from_buffer(buf, count, &off, ha->optrom_buffer,
188 ha->optrom_region_size);
854165f4
AV
189}
190
191static ssize_t
91a69029
ZR
192qla2x00_sysfs_write_optrom(struct kobject *kobj,
193 struct bin_attribute *bin_attr,
194 char *buf, loff_t off, size_t count)
854165f4 195{
7b867cf7 196 struct scsi_qla_host *vha = shost_priv(dev_to_shost(container_of(kobj,
854165f4 197 struct device, kobj)));
7b867cf7 198 struct qla_hw_data *ha = vha->hw;
854165f4
AV
199
200 if (ha->optrom_state != QLA_SWRITING)
201 return -EINVAL;
b7cc176c 202 if (off > ha->optrom_region_size)
854165f4 203 return -ERANGE;
b7cc176c
JC
204 if (off + count > ha->optrom_region_size)
205 count = ha->optrom_region_size - off;
854165f4
AV
206
207 memcpy(&ha->optrom_buffer[off], buf, count);
208
209 return count;
210}
211
212static struct bin_attribute sysfs_optrom_attr = {
213 .attr = {
214 .name = "optrom",
215 .mode = S_IRUSR | S_IWUSR,
854165f4 216 },
c3a2f0df 217 .size = 0,
854165f4
AV
218 .read = qla2x00_sysfs_read_optrom,
219 .write = qla2x00_sysfs_write_optrom,
220};
221
222static ssize_t
91a69029
ZR
223qla2x00_sysfs_write_optrom_ctl(struct kobject *kobj,
224 struct bin_attribute *bin_attr,
225 char *buf, loff_t off, size_t count)
854165f4 226{
7b867cf7 227 struct scsi_qla_host *vha = shost_priv(dev_to_shost(container_of(kobj,
854165f4 228 struct device, kobj)));
7b867cf7
AC
229 struct qla_hw_data *ha = vha->hw;
230
b7cc176c
JC
231 uint32_t start = 0;
232 uint32_t size = ha->optrom_size;
233 int val, valid;
854165f4
AV
234
235 if (off)
236 return 0;
237
85880801
AV
238 if (unlikely(pci_channel_offline(ha->pdev)))
239 return 0;
240
b7cc176c
JC
241 if (sscanf(buf, "%d:%x:%x", &val, &start, &size) < 1)
242 return -EINVAL;
243 if (start > ha->optrom_size)
854165f4
AV
244 return -EINVAL;
245
246 switch (val) {
247 case 0:
248 if (ha->optrom_state != QLA_SREADING &&
249 ha->optrom_state != QLA_SWRITING)
250 break;
251
252 ha->optrom_state = QLA_SWAITING;
b7cc176c
JC
253
254 DEBUG2(qla_printk(KERN_INFO, ha,
255 "Freeing flash region allocation -- 0x%x bytes.\n",
256 ha->optrom_region_size));
257
854165f4
AV
258 vfree(ha->optrom_buffer);
259 ha->optrom_buffer = NULL;
260 break;
261 case 1:
262 if (ha->optrom_state != QLA_SWAITING)
263 break;
264
b7cc176c
JC
265 ha->optrom_region_start = start;
266 ha->optrom_region_size = start + size > ha->optrom_size ?
267 ha->optrom_size - start : size;
268
854165f4 269 ha->optrom_state = QLA_SREADING;
b7cc176c 270 ha->optrom_buffer = vmalloc(ha->optrom_region_size);
854165f4
AV
271 if (ha->optrom_buffer == NULL) {
272 qla_printk(KERN_WARNING, ha,
273 "Unable to allocate memory for optrom retrieval "
b7cc176c 274 "(%x).\n", ha->optrom_region_size);
854165f4
AV
275
276 ha->optrom_state = QLA_SWAITING;
277 return count;
278 }
279
b7cc176c
JC
280 DEBUG2(qla_printk(KERN_INFO, ha,
281 "Reading flash region -- 0x%x/0x%x.\n",
282 ha->optrom_region_start, ha->optrom_region_size));
283
284 memset(ha->optrom_buffer, 0, ha->optrom_region_size);
7b867cf7 285 ha->isp_ops->read_optrom(vha, ha->optrom_buffer,
b7cc176c 286 ha->optrom_region_start, ha->optrom_region_size);
854165f4
AV
287 break;
288 case 2:
289 if (ha->optrom_state != QLA_SWAITING)
290 break;
291
b7cc176c
JC
292 /*
293 * We need to be more restrictive on which FLASH regions are
294 * allowed to be updated via user-space. Regions accessible
295 * via this method include:
296 *
297 * ISP21xx/ISP22xx/ISP23xx type boards:
298 *
299 * 0x000000 -> 0x020000 -- Boot code.
300 *
301 * ISP2322/ISP24xx type boards:
302 *
303 * 0x000000 -> 0x07ffff -- Boot code.
304 * 0x080000 -> 0x0fffff -- Firmware.
305 *
306 * ISP25xx type boards:
307 *
308 * 0x000000 -> 0x07ffff -- Boot code.
309 * 0x080000 -> 0x0fffff -- Firmware.
310 * 0x120000 -> 0x12ffff -- VPD and HBA parameters.
311 */
312 valid = 0;
313 if (ha->optrom_size == OPTROM_SIZE_2300 && start == 0)
314 valid = 1;
c00d8994
AV
315 else if (start == (ha->flt_region_boot * 4) ||
316 start == (ha->flt_region_fw * 4))
b7cc176c 317 valid = 1;
6431c5dc 318 else if (IS_QLA25XX(ha) || IS_QLA81XX(ha))
b7cc176c
JC
319 valid = 1;
320 if (!valid) {
321 qla_printk(KERN_WARNING, ha,
322 "Invalid start region 0x%x/0x%x.\n", start, size);
323 return -EINVAL;
324 }
325
326 ha->optrom_region_start = start;
327 ha->optrom_region_size = start + size > ha->optrom_size ?
328 ha->optrom_size - start : size;
329
854165f4 330 ha->optrom_state = QLA_SWRITING;
b7cc176c 331 ha->optrom_buffer = vmalloc(ha->optrom_region_size);
854165f4
AV
332 if (ha->optrom_buffer == NULL) {
333 qla_printk(KERN_WARNING, ha,
334 "Unable to allocate memory for optrom update "
b7cc176c 335 "(%x).\n", ha->optrom_region_size);
854165f4
AV
336
337 ha->optrom_state = QLA_SWAITING;
338 return count;
339 }
b7cc176c
JC
340
341 DEBUG2(qla_printk(KERN_INFO, ha,
342 "Staging flash region write -- 0x%x/0x%x.\n",
343 ha->optrom_region_start, ha->optrom_region_size));
344
345 memset(ha->optrom_buffer, 0, ha->optrom_region_size);
854165f4
AV
346 break;
347 case 3:
348 if (ha->optrom_state != QLA_SWRITING)
349 break;
350
2533cf67
LC
351 if (qla2x00_wait_for_hba_online(vha) != QLA_SUCCESS) {
352 qla_printk(KERN_WARNING, ha,
353 "HBA not online, failing flash update.\n");
354 return -EAGAIN;
355 }
356
b7cc176c
JC
357 DEBUG2(qla_printk(KERN_INFO, ha,
358 "Writing flash region -- 0x%x/0x%x.\n",
359 ha->optrom_region_start, ha->optrom_region_size));
360
7b867cf7 361 ha->isp_ops->write_optrom(vha, ha->optrom_buffer,
b7cc176c 362 ha->optrom_region_start, ha->optrom_region_size);
854165f4 363 break;
b7cc176c
JC
364 default:
365 count = -EINVAL;
854165f4
AV
366 }
367 return count;
368}
369
370static struct bin_attribute sysfs_optrom_ctl_attr = {
371 .attr = {
372 .name = "optrom_ctl",
373 .mode = S_IWUSR,
854165f4
AV
374 },
375 .size = 0,
376 .write = qla2x00_sysfs_write_optrom_ctl,
377};
378
6f641790 379static ssize_t
91a69029
ZR
380qla2x00_sysfs_read_vpd(struct kobject *kobj,
381 struct bin_attribute *bin_attr,
382 char *buf, loff_t off, size_t count)
6f641790 383{
7b867cf7 384 struct scsi_qla_host *vha = shost_priv(dev_to_shost(container_of(kobj,
6f641790 385 struct device, kobj)));
7b867cf7 386 struct qla_hw_data *ha = vha->hw;
6f641790 387
85880801
AV
388 if (unlikely(pci_channel_offline(ha->pdev)))
389 return 0;
390
b3dc9088 391 if (!capable(CAP_SYS_ADMIN))
6f641790
AV
392 return 0;
393
6749ce36
AV
394 if (IS_NOCACHE_VPD_TYPE(ha))
395 ha->isp_ops->read_optrom(vha, ha->vpd, ha->flt_region_vpd << 2,
396 ha->vpd_size);
b3dc9088 397 return memory_read_from_buffer(buf, count, &off, ha->vpd, ha->vpd_size);
6f641790
AV
398}
399
400static ssize_t
91a69029
ZR
401qla2x00_sysfs_write_vpd(struct kobject *kobj,
402 struct bin_attribute *bin_attr,
403 char *buf, loff_t off, size_t count)
6f641790 404{
7b867cf7 405 struct scsi_qla_host *vha = shost_priv(dev_to_shost(container_of(kobj,
6f641790 406 struct device, kobj)));
7b867cf7 407 struct qla_hw_data *ha = vha->hw;
d0c3eefa 408 uint8_t *tmp_data;
6f641790 409
85880801
AV
410 if (unlikely(pci_channel_offline(ha->pdev)))
411 return 0;
412
3d79038f
AV
413 if (!capable(CAP_SYS_ADMIN) || off != 0 || count != ha->vpd_size ||
414 !ha->isp_ops->write_nvram)
6f641790
AV
415 return 0;
416
2533cf67
LC
417 if (qla2x00_wait_for_hba_online(vha) != QLA_SUCCESS) {
418 qla_printk(KERN_WARNING, ha,
419 "HBA not online, failing VPD update.\n");
420 return -EAGAIN;
421 }
422
6f641790 423 /* Write NVRAM. */
7b867cf7
AC
424 ha->isp_ops->write_nvram(vha, (uint8_t *)buf, ha->vpd_base, count);
425 ha->isp_ops->read_nvram(vha, (uint8_t *)ha->vpd, ha->vpd_base, count);
6f641790 426
d0c3eefa
LC
427 /* Update flash version information for 4Gb & above. */
428 if (!IS_FWI2_CAPABLE(ha))
429 goto done;
430
431 tmp_data = vmalloc(256);
432 if (!tmp_data) {
433 qla_printk(KERN_WARNING, ha,
434 "Unable to allocate memory for VPD information update.\n");
435 goto done;
436 }
437 ha->isp_ops->get_flash_version(vha, tmp_data);
438 vfree(tmp_data);
439done:
6f641790
AV
440 return count;
441}
442
443static struct bin_attribute sysfs_vpd_attr = {
444 .attr = {
445 .name = "vpd",
446 .mode = S_IRUSR | S_IWUSR,
6f641790
AV
447 },
448 .size = 0,
449 .read = qla2x00_sysfs_read_vpd,
450 .write = qla2x00_sysfs_write_vpd,
451};
452
88729e53 453static ssize_t
91a69029
ZR
454qla2x00_sysfs_read_sfp(struct kobject *kobj,
455 struct bin_attribute *bin_attr,
456 char *buf, loff_t off, size_t count)
88729e53 457{
7b867cf7 458 struct scsi_qla_host *vha = shost_priv(dev_to_shost(container_of(kobj,
88729e53 459 struct device, kobj)));
7b867cf7 460 struct qla_hw_data *ha = vha->hw;
88729e53
AV
461 uint16_t iter, addr, offset;
462 int rval;
463
464 if (!capable(CAP_SYS_ADMIN) || off != 0 || count != SFP_DEV_SIZE * 2)
465 return 0;
466
e8711085
AV
467 if (ha->sfp_data)
468 goto do_read;
469
470 ha->sfp_data = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL,
471 &ha->sfp_data_dma);
472 if (!ha->sfp_data) {
473 qla_printk(KERN_WARNING, ha,
474 "Unable to allocate memory for SFP read-data.\n");
475 return 0;
476 }
477
478do_read:
479 memset(ha->sfp_data, 0, SFP_BLOCK_SIZE);
88729e53
AV
480 addr = 0xa0;
481 for (iter = 0, offset = 0; iter < (SFP_DEV_SIZE * 2) / SFP_BLOCK_SIZE;
482 iter++, offset += SFP_BLOCK_SIZE) {
483 if (iter == 4) {
484 /* Skip to next device address. */
485 addr = 0xa2;
486 offset = 0;
487 }
488
7b867cf7 489 rval = qla2x00_read_sfp(vha, ha->sfp_data_dma, addr, offset,
88729e53
AV
490 SFP_BLOCK_SIZE);
491 if (rval != QLA_SUCCESS) {
492 qla_printk(KERN_WARNING, ha,
493 "Unable to read SFP data (%x/%x/%x).\n", rval,
494 addr, offset);
495 count = 0;
496 break;
497 }
498 memcpy(buf, ha->sfp_data, SFP_BLOCK_SIZE);
499 buf += SFP_BLOCK_SIZE;
500 }
501
502 return count;
503}
504
505static struct bin_attribute sysfs_sfp_attr = {
506 .attr = {
507 .name = "sfp",
508 .mode = S_IRUSR | S_IWUSR,
88729e53
AV
509 },
510 .size = SFP_DEV_SIZE * 2,
511 .read = qla2x00_sysfs_read_sfp,
512};
513
6e181be5
LC
514static ssize_t
515qla2x00_sysfs_write_reset(struct kobject *kobj,
516 struct bin_attribute *bin_attr,
517 char *buf, loff_t off, size_t count)
518{
519 struct scsi_qla_host *vha = shost_priv(dev_to_shost(container_of(kobj,
520 struct device, kobj)));
521 struct qla_hw_data *ha = vha->hw;
522 int type;
523
524 if (off != 0)
525 return 0;
526
527 type = simple_strtol(buf, NULL, 10);
528 switch (type) {
529 case 0x2025c:
530 qla_printk(KERN_INFO, ha,
531 "Issuing ISP reset on (%ld).\n", vha->host_no);
532
533 scsi_block_requests(vha->host);
534 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
535 qla2xxx_wake_dpc(vha);
536 qla2x00_wait_for_chip_reset(vha);
537 scsi_unblock_requests(vha->host);
538 break;
539 case 0x2025d:
540 if (!IS_QLA81XX(ha))
541 break;
542
543 qla_printk(KERN_INFO, ha,
544 "Issuing MPI reset on (%ld).\n", vha->host_no);
545
546 /* Make sure FC side is not in reset */
547 qla2x00_wait_for_hba_online(vha);
548
549 /* Issue MPI reset */
550 scsi_block_requests(vha->host);
551 if (qla81xx_restart_mpi_firmware(vha) != QLA_SUCCESS)
552 qla_printk(KERN_WARNING, ha,
553 "MPI reset failed on (%ld).\n", vha->host_no);
554 scsi_unblock_requests(vha->host);
555 break;
556 }
557 return count;
558}
559
560static struct bin_attribute sysfs_reset_attr = {
561 .attr = {
562 .name = "reset",
563 .mode = S_IWUSR,
564 },
565 .size = 0,
566 .write = qla2x00_sysfs_write_reset,
567};
568
ad0ecd61
JC
569static ssize_t
570qla2x00_sysfs_write_edc(struct kobject *kobj,
571 struct bin_attribute *bin_attr,
572 char *buf, loff_t off, size_t count)
573{
574 struct scsi_qla_host *vha = shost_priv(dev_to_shost(container_of(kobj,
575 struct device, kobj)));
576 struct qla_hw_data *ha = vha->hw;
577 uint16_t dev, adr, opt, len;
578 int rval;
579
580 ha->edc_data_len = 0;
581
582 if (!capable(CAP_SYS_ADMIN) || off != 0 || count < 8)
583 return 0;
584
585 if (!ha->edc_data) {
586 ha->edc_data = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL,
587 &ha->edc_data_dma);
588 if (!ha->edc_data) {
589 DEBUG2(qla_printk(KERN_INFO, ha,
590 "Unable to allocate memory for EDC write.\n"));
591 return 0;
592 }
593 }
594
595 dev = le16_to_cpup((void *)&buf[0]);
596 adr = le16_to_cpup((void *)&buf[2]);
597 opt = le16_to_cpup((void *)&buf[4]);
598 len = le16_to_cpup((void *)&buf[6]);
599
600 if (!(opt & BIT_0))
601 if (len == 0 || len > DMA_POOL_SIZE || len > count - 8)
602 return -EINVAL;
603
604 memcpy(ha->edc_data, &buf[8], len);
605
606 rval = qla2x00_write_edc(vha, dev, adr, ha->edc_data_dma,
607 ha->edc_data, len, opt);
608 if (rval != QLA_SUCCESS) {
609 DEBUG2(qla_printk(KERN_INFO, ha,
610 "Unable to write EDC (%x) %02x:%02x:%04x:%02x:%02x.\n",
611 rval, dev, adr, opt, len, *buf));
612 return 0;
613 }
614
615 return count;
616}
617
618static struct bin_attribute sysfs_edc_attr = {
619 .attr = {
620 .name = "edc",
621 .mode = S_IWUSR,
622 },
623 .size = 0,
624 .write = qla2x00_sysfs_write_edc,
625};
626
627static ssize_t
628qla2x00_sysfs_write_edc_status(struct kobject *kobj,
629 struct bin_attribute *bin_attr,
630 char *buf, loff_t off, size_t count)
631{
632 struct scsi_qla_host *vha = shost_priv(dev_to_shost(container_of(kobj,
633 struct device, kobj)));
634 struct qla_hw_data *ha = vha->hw;
635 uint16_t dev, adr, opt, len;
636 int rval;
637
638 ha->edc_data_len = 0;
639
640 if (!capable(CAP_SYS_ADMIN) || off != 0 || count < 8)
641 return 0;
642
643 if (!ha->edc_data) {
644 ha->edc_data = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL,
645 &ha->edc_data_dma);
646 if (!ha->edc_data) {
647 DEBUG2(qla_printk(KERN_INFO, ha,
648 "Unable to allocate memory for EDC status.\n"));
649 return 0;
650 }
651 }
652
653 dev = le16_to_cpup((void *)&buf[0]);
654 adr = le16_to_cpup((void *)&buf[2]);
655 opt = le16_to_cpup((void *)&buf[4]);
656 len = le16_to_cpup((void *)&buf[6]);
657
658 if (!(opt & BIT_0))
659 if (len == 0 || len > DMA_POOL_SIZE)
660 return -EINVAL;
661
662 memset(ha->edc_data, 0, len);
663 rval = qla2x00_read_edc(vha, dev, adr, ha->edc_data_dma,
664 ha->edc_data, len, opt);
665 if (rval != QLA_SUCCESS) {
666 DEBUG2(qla_printk(KERN_INFO, ha,
667 "Unable to write EDC status (%x) %02x:%02x:%04x:%02x.\n",
668 rval, dev, adr, opt, len));
669 return 0;
670 }
671
672 ha->edc_data_len = len;
673
674 return count;
675}
676
677static ssize_t
678qla2x00_sysfs_read_edc_status(struct kobject *kobj,
679 struct bin_attribute *bin_attr,
680 char *buf, loff_t off, size_t count)
681{
682 struct scsi_qla_host *vha = shost_priv(dev_to_shost(container_of(kobj,
683 struct device, kobj)));
684 struct qla_hw_data *ha = vha->hw;
685
686 if (!capable(CAP_SYS_ADMIN) || off != 0 || count == 0)
687 return 0;
688
689 if (!ha->edc_data || ha->edc_data_len == 0 || ha->edc_data_len > count)
690 return -EINVAL;
691
692 memcpy(buf, ha->edc_data, ha->edc_data_len);
693
694 return ha->edc_data_len;
695}
696
697static struct bin_attribute sysfs_edc_status_attr = {
698 .attr = {
699 .name = "edc_status",
700 .mode = S_IRUSR | S_IWUSR,
701 },
702 .size = 0,
703 .write = qla2x00_sysfs_write_edc_status,
704 .read = qla2x00_sysfs_read_edc_status,
705};
706
ce0423f4
AV
707static ssize_t
708qla2x00_sysfs_read_xgmac_stats(struct kobject *kobj,
709 struct bin_attribute *bin_attr,
710 char *buf, loff_t off, size_t count)
711{
712 struct scsi_qla_host *vha = shost_priv(dev_to_shost(container_of(kobj,
713 struct device, kobj)));
714 struct qla_hw_data *ha = vha->hw;
715 int rval;
716 uint16_t actual_size;
717
718 if (!capable(CAP_SYS_ADMIN) || off != 0 || count > XGMAC_DATA_SIZE)
719 return 0;
720
721 if (ha->xgmac_data)
722 goto do_read;
723
724 ha->xgmac_data = dma_alloc_coherent(&ha->pdev->dev, XGMAC_DATA_SIZE,
725 &ha->xgmac_data_dma, GFP_KERNEL);
726 if (!ha->xgmac_data) {
727 qla_printk(KERN_WARNING, ha,
728 "Unable to allocate memory for XGMAC read-data.\n");
729 return 0;
730 }
731
732do_read:
733 actual_size = 0;
734 memset(ha->xgmac_data, 0, XGMAC_DATA_SIZE);
735
736 rval = qla2x00_get_xgmac_stats(vha, ha->xgmac_data_dma,
737 XGMAC_DATA_SIZE, &actual_size);
738 if (rval != QLA_SUCCESS) {
739 qla_printk(KERN_WARNING, ha,
740 "Unable to read XGMAC data (%x).\n", rval);
741 count = 0;
742 }
743
744 count = actual_size > count ? count: actual_size;
745 memcpy(buf, ha->xgmac_data, count);
746
747 return count;
748}
749
750static struct bin_attribute sysfs_xgmac_stats_attr = {
751 .attr = {
752 .name = "xgmac_stats",
753 .mode = S_IRUSR,
754 },
755 .size = 0,
756 .read = qla2x00_sysfs_read_xgmac_stats,
757};
758
11bbc1d8
AV
759static ssize_t
760qla2x00_sysfs_read_dcbx_tlv(struct kobject *kobj,
761 struct bin_attribute *bin_attr,
762 char *buf, loff_t off, size_t count)
763{
764 struct scsi_qla_host *vha = shost_priv(dev_to_shost(container_of(kobj,
765 struct device, kobj)));
766 struct qla_hw_data *ha = vha->hw;
767 int rval;
768 uint16_t actual_size;
769
770 if (!capable(CAP_SYS_ADMIN) || off != 0 || count > DCBX_TLV_DATA_SIZE)
771 return 0;
772
773 if (ha->dcbx_tlv)
774 goto do_read;
775
776 ha->dcbx_tlv = dma_alloc_coherent(&ha->pdev->dev, DCBX_TLV_DATA_SIZE,
777 &ha->dcbx_tlv_dma, GFP_KERNEL);
778 if (!ha->dcbx_tlv) {
779 qla_printk(KERN_WARNING, ha,
780 "Unable to allocate memory for DCBX TLV read-data.\n");
781 return 0;
782 }
783
784do_read:
785 actual_size = 0;
786 memset(ha->dcbx_tlv, 0, DCBX_TLV_DATA_SIZE);
787
788 rval = qla2x00_get_dcbx_params(vha, ha->dcbx_tlv_dma,
789 DCBX_TLV_DATA_SIZE);
790 if (rval != QLA_SUCCESS) {
791 qla_printk(KERN_WARNING, ha,
792 "Unable to read DCBX TLV data (%x).\n", rval);
793 count = 0;
794 }
795
796 memcpy(buf, ha->dcbx_tlv, count);
797
798 return count;
799}
800
801static struct bin_attribute sysfs_dcbx_tlv_attr = {
802 .attr = {
803 .name = "dcbx_tlv",
804 .mode = S_IRUSR,
805 },
806 .size = 0,
807 .read = qla2x00_sysfs_read_dcbx_tlv,
808};
809
f1663ad5
AV
810static struct sysfs_entry {
811 char *name;
812 struct bin_attribute *attr;
813 int is4GBp_only;
814} bin_file_entries[] = {
815 { "fw_dump", &sysfs_fw_dump_attr, },
816 { "nvram", &sysfs_nvram_attr, },
817 { "optrom", &sysfs_optrom_attr, },
818 { "optrom_ctl", &sysfs_optrom_ctl_attr, },
819 { "vpd", &sysfs_vpd_attr, 1 },
820 { "sfp", &sysfs_sfp_attr, 1 },
6e181be5 821 { "reset", &sysfs_reset_attr, },
ad0ecd61
JC
822 { "edc", &sysfs_edc_attr, 2 },
823 { "edc_status", &sysfs_edc_status_attr, 2 },
ce0423f4 824 { "xgmac_stats", &sysfs_xgmac_stats_attr, 3 },
11bbc1d8 825 { "dcbx_tlv", &sysfs_dcbx_tlv_attr, 3 },
46ddab7b 826 { NULL },
f1663ad5
AV
827};
828
8482e118 829void
7b867cf7 830qla2x00_alloc_sysfs_attr(scsi_qla_host_t *vha)
8482e118 831{
7b867cf7 832 struct Scsi_Host *host = vha->host;
f1663ad5
AV
833 struct sysfs_entry *iter;
834 int ret;
8482e118 835
f1663ad5 836 for (iter = bin_file_entries; iter->name; iter++) {
7b867cf7 837 if (iter->is4GBp_only && !IS_FWI2_CAPABLE(vha->hw))
f1663ad5 838 continue;
ad0ecd61
JC
839 if (iter->is4GBp_only == 2 && !IS_QLA25XX(vha->hw))
840 continue;
ce0423f4
AV
841 if (iter->is4GBp_only == 3 && !IS_QLA81XX(vha->hw))
842 continue;
f1663ad5
AV
843
844 ret = sysfs_create_bin_file(&host->shost_gendev.kobj,
845 iter->attr);
846 if (ret)
7b867cf7 847 qla_printk(KERN_INFO, vha->hw,
f1663ad5
AV
848 "Unable to create sysfs %s binary attribute "
849 "(%d).\n", iter->name, ret);
7914d004 850 }
8482e118
AV
851}
852
853void
7b867cf7 854qla2x00_free_sysfs_attr(scsi_qla_host_t *vha)
8482e118 855{
7b867cf7 856 struct Scsi_Host *host = vha->host;
f1663ad5 857 struct sysfs_entry *iter;
7b867cf7 858 struct qla_hw_data *ha = vha->hw;
f1663ad5
AV
859
860 for (iter = bin_file_entries; iter->name; iter++) {
e428924c 861 if (iter->is4GBp_only && !IS_FWI2_CAPABLE(ha))
f1663ad5 862 continue;
ad0ecd61
JC
863 if (iter->is4GBp_only == 2 && !IS_QLA25XX(ha))
864 continue;
ce0423f4
AV
865 if (iter->is4GBp_only == 3 && !IS_QLA81XX(ha))
866 continue;
8482e118 867
88729e53 868 sysfs_remove_bin_file(&host->shost_gendev.kobj,
f1663ad5 869 iter->attr);
7914d004 870 }
f6df144c
AV
871
872 if (ha->beacon_blink_led == 1)
7b867cf7 873 ha->isp_ops->beacon_off(vha);
8482e118
AV
874}
875
afb046e2
AV
876/* Scsi_Host attributes. */
877
878static ssize_t
ee959b00
TJ
879qla2x00_drvr_version_show(struct device *dev,
880 struct device_attribute *attr, char *buf)
afb046e2
AV
881{
882 return snprintf(buf, PAGE_SIZE, "%s\n", qla2x00_version_str);
883}
884
885static ssize_t
ee959b00
TJ
886qla2x00_fw_version_show(struct device *dev,
887 struct device_attribute *attr, char *buf)
afb046e2 888{
7b867cf7
AC
889 scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
890 struct qla_hw_data *ha = vha->hw;
891 char fw_str[128];
afb046e2
AV
892
893 return snprintf(buf, PAGE_SIZE, "%s\n",
7b867cf7 894 ha->isp_ops->fw_version_str(vha, fw_str));
afb046e2
AV
895}
896
897static ssize_t
ee959b00
TJ
898qla2x00_serial_num_show(struct device *dev, struct device_attribute *attr,
899 char *buf)
afb046e2 900{
7b867cf7
AC
901 scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
902 struct qla_hw_data *ha = vha->hw;
afb046e2
AV
903 uint32_t sn;
904
1ee27146 905 if (IS_FWI2_CAPABLE(ha)) {
7b867cf7 906 qla2xxx_get_vpd_field(vha, "SN", buf, PAGE_SIZE);
1ee27146
JC
907 return snprintf(buf, PAGE_SIZE, "%s\n", buf);
908 }
8b7afc2a 909
afb046e2
AV
910 sn = ((ha->serial0 & 0x1f) << 16) | (ha->serial2 << 8) | ha->serial1;
911 return snprintf(buf, PAGE_SIZE, "%c%05d\n", 'A' + sn / 100000,
912 sn % 100000);
913}
914
915static ssize_t
ee959b00
TJ
916qla2x00_isp_name_show(struct device *dev, struct device_attribute *attr,
917 char *buf)
afb046e2 918{
7b867cf7
AC
919 scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
920 return snprintf(buf, PAGE_SIZE, "ISP%04X\n", vha->hw->pdev->device);
afb046e2
AV
921}
922
923static ssize_t
ee959b00
TJ
924qla2x00_isp_id_show(struct device *dev, struct device_attribute *attr,
925 char *buf)
afb046e2 926{
7b867cf7
AC
927 scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
928 struct qla_hw_data *ha = vha->hw;
afb046e2
AV
929 return snprintf(buf, PAGE_SIZE, "%04x %04x %04x %04x\n",
930 ha->product_id[0], ha->product_id[1], ha->product_id[2],
931 ha->product_id[3]);
932}
933
934static ssize_t
ee959b00
TJ
935qla2x00_model_name_show(struct device *dev, struct device_attribute *attr,
936 char *buf)
afb046e2 937{
7b867cf7
AC
938 scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
939 return snprintf(buf, PAGE_SIZE, "%s\n", vha->hw->model_number);
afb046e2
AV
940}
941
942static ssize_t
ee959b00
TJ
943qla2x00_model_desc_show(struct device *dev, struct device_attribute *attr,
944 char *buf)
afb046e2 945{
7b867cf7 946 scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
afb046e2 947 return snprintf(buf, PAGE_SIZE, "%s\n",
7b867cf7 948 vha->hw->model_desc ? vha->hw->model_desc : "");
afb046e2
AV
949}
950
951static ssize_t
ee959b00
TJ
952qla2x00_pci_info_show(struct device *dev, struct device_attribute *attr,
953 char *buf)
afb046e2 954{
7b867cf7 955 scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
afb046e2
AV
956 char pci_info[30];
957
958 return snprintf(buf, PAGE_SIZE, "%s\n",
7b867cf7 959 vha->hw->isp_ops->pci_info_str(vha, pci_info));
afb046e2
AV
960}
961
962static ssize_t
bbd1ae41
HR
963qla2x00_link_state_show(struct device *dev, struct device_attribute *attr,
964 char *buf)
afb046e2 965{
7b867cf7
AC
966 scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
967 struct qla_hw_data *ha = vha->hw;
afb046e2
AV
968 int len = 0;
969
7b867cf7
AC
970 if (atomic_read(&vha->loop_state) == LOOP_DOWN ||
971 atomic_read(&vha->loop_state) == LOOP_DEAD)
afb046e2 972 len = snprintf(buf, PAGE_SIZE, "Link Down\n");
7b867cf7
AC
973 else if (atomic_read(&vha->loop_state) != LOOP_READY ||
974 test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags) ||
975 test_bit(ISP_ABORT_NEEDED, &vha->dpc_flags))
afb046e2
AV
976 len = snprintf(buf, PAGE_SIZE, "Unknown Link State\n");
977 else {
978 len = snprintf(buf, PAGE_SIZE, "Link Up - ");
979
980 switch (ha->current_topology) {
981 case ISP_CFG_NL:
982 len += snprintf(buf + len, PAGE_SIZE-len, "Loop\n");
983 break;
984 case ISP_CFG_FL:
985 len += snprintf(buf + len, PAGE_SIZE-len, "FL_Port\n");
986 break;
987 case ISP_CFG_N:
988 len += snprintf(buf + len, PAGE_SIZE-len,
989 "N_Port to N_Port\n");
990 break;
991 case ISP_CFG_F:
992 len += snprintf(buf + len, PAGE_SIZE-len, "F_Port\n");
993 break;
994 default:
995 len += snprintf(buf + len, PAGE_SIZE-len, "Loop\n");
996 break;
997 }
998 }
999 return len;
1000}
1001
4fdfefe5 1002static ssize_t
ee959b00
TJ
1003qla2x00_zio_show(struct device *dev, struct device_attribute *attr,
1004 char *buf)
4fdfefe5 1005{
7b867cf7 1006 scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
4fdfefe5
AV
1007 int len = 0;
1008
7b867cf7 1009 switch (vha->hw->zio_mode) {
4fdfefe5
AV
1010 case QLA_ZIO_MODE_6:
1011 len += snprintf(buf + len, PAGE_SIZE-len, "Mode 6\n");
1012 break;
1013 case QLA_ZIO_DISABLED:
1014 len += snprintf(buf + len, PAGE_SIZE-len, "Disabled\n");
1015 break;
1016 }
1017 return len;
1018}
1019
1020static ssize_t
ee959b00
TJ
1021qla2x00_zio_store(struct device *dev, struct device_attribute *attr,
1022 const char *buf, size_t count)
4fdfefe5 1023{
7b867cf7
AC
1024 scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
1025 struct qla_hw_data *ha = vha->hw;
4fdfefe5
AV
1026 int val = 0;
1027 uint16_t zio_mode;
1028
4a59f71d
AV
1029 if (!IS_ZIO_SUPPORTED(ha))
1030 return -ENOTSUPP;
1031
4fdfefe5
AV
1032 if (sscanf(buf, "%d", &val) != 1)
1033 return -EINVAL;
1034
4a59f71d 1035 if (val)
4fdfefe5 1036 zio_mode = QLA_ZIO_MODE_6;
4a59f71d 1037 else
4fdfefe5 1038 zio_mode = QLA_ZIO_DISABLED;
4fdfefe5
AV
1039
1040 /* Update per-hba values and queue a reset. */
1041 if (zio_mode != QLA_ZIO_DISABLED || ha->zio_mode != QLA_ZIO_DISABLED) {
1042 ha->zio_mode = zio_mode;
7b867cf7 1043 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
4fdfefe5
AV
1044 }
1045 return strlen(buf);
1046}
1047
1048static ssize_t
ee959b00
TJ
1049qla2x00_zio_timer_show(struct device *dev, struct device_attribute *attr,
1050 char *buf)
4fdfefe5 1051{
7b867cf7 1052 scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
4fdfefe5 1053
7b867cf7 1054 return snprintf(buf, PAGE_SIZE, "%d us\n", vha->hw->zio_timer * 100);
4fdfefe5
AV
1055}
1056
1057static ssize_t
ee959b00
TJ
1058qla2x00_zio_timer_store(struct device *dev, struct device_attribute *attr,
1059 const char *buf, size_t count)
4fdfefe5 1060{
7b867cf7 1061 scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
4fdfefe5
AV
1062 int val = 0;
1063 uint16_t zio_timer;
1064
1065 if (sscanf(buf, "%d", &val) != 1)
1066 return -EINVAL;
1067 if (val > 25500 || val < 100)
1068 return -ERANGE;
1069
1070 zio_timer = (uint16_t)(val / 100);
7b867cf7 1071 vha->hw->zio_timer = zio_timer;
4fdfefe5
AV
1072
1073 return strlen(buf);
1074}
1075
f6df144c 1076static ssize_t
ee959b00
TJ
1077qla2x00_beacon_show(struct device *dev, struct device_attribute *attr,
1078 char *buf)
f6df144c 1079{
7b867cf7 1080 scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
f6df144c
AV
1081 int len = 0;
1082
7b867cf7 1083 if (vha->hw->beacon_blink_led)
f6df144c
AV
1084 len += snprintf(buf + len, PAGE_SIZE-len, "Enabled\n");
1085 else
1086 len += snprintf(buf + len, PAGE_SIZE-len, "Disabled\n");
1087 return len;
1088}
1089
1090static ssize_t
ee959b00
TJ
1091qla2x00_beacon_store(struct device *dev, struct device_attribute *attr,
1092 const char *buf, size_t count)
f6df144c 1093{
7b867cf7
AC
1094 scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
1095 struct qla_hw_data *ha = vha->hw;
f6df144c
AV
1096 int val = 0;
1097 int rval;
1098
1099 if (IS_QLA2100(ha) || IS_QLA2200(ha))
1100 return -EPERM;
1101
7b867cf7 1102 if (test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags)) {
f6df144c
AV
1103 qla_printk(KERN_WARNING, ha,
1104 "Abort ISP active -- ignoring beacon request.\n");
1105 return -EBUSY;
1106 }
1107
1108 if (sscanf(buf, "%d", &val) != 1)
1109 return -EINVAL;
1110
1111 if (val)
7b867cf7 1112 rval = ha->isp_ops->beacon_on(vha);
f6df144c 1113 else
7b867cf7 1114 rval = ha->isp_ops->beacon_off(vha);
f6df144c
AV
1115
1116 if (rval != QLA_SUCCESS)
1117 count = 0;
1118
1119 return count;
1120}
1121
30c47662 1122static ssize_t
ee959b00
TJ
1123qla2x00_optrom_bios_version_show(struct device *dev,
1124 struct device_attribute *attr, char *buf)
30c47662 1125{
7b867cf7
AC
1126 scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
1127 struct qla_hw_data *ha = vha->hw;
30c47662
AV
1128 return snprintf(buf, PAGE_SIZE, "%d.%02d\n", ha->bios_revision[1],
1129 ha->bios_revision[0]);
1130}
1131
1132static ssize_t
ee959b00
TJ
1133qla2x00_optrom_efi_version_show(struct device *dev,
1134 struct device_attribute *attr, char *buf)
30c47662 1135{
7b867cf7
AC
1136 scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
1137 struct qla_hw_data *ha = vha->hw;
30c47662
AV
1138 return snprintf(buf, PAGE_SIZE, "%d.%02d\n", ha->efi_revision[1],
1139 ha->efi_revision[0]);
1140}
1141
1142static ssize_t
ee959b00
TJ
1143qla2x00_optrom_fcode_version_show(struct device *dev,
1144 struct device_attribute *attr, char *buf)
30c47662 1145{
7b867cf7
AC
1146 scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
1147 struct qla_hw_data *ha = vha->hw;
30c47662
AV
1148 return snprintf(buf, PAGE_SIZE, "%d.%02d\n", ha->fcode_revision[1],
1149 ha->fcode_revision[0]);
1150}
1151
1152static ssize_t
ee959b00
TJ
1153qla2x00_optrom_fw_version_show(struct device *dev,
1154 struct device_attribute *attr, char *buf)
30c47662 1155{
7b867cf7
AC
1156 scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
1157 struct qla_hw_data *ha = vha->hw;
30c47662
AV
1158 return snprintf(buf, PAGE_SIZE, "%d.%02d.%02d %d\n",
1159 ha->fw_revision[0], ha->fw_revision[1], ha->fw_revision[2],
1160 ha->fw_revision[3]);
1161}
1162
e5f5f6f7
HZ
1163static ssize_t
1164qla2x00_total_isp_aborts_show(struct device *dev,
1165 struct device_attribute *attr, char *buf)
1166{
7b867cf7
AC
1167 scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
1168 struct qla_hw_data *ha = vha->hw;
e5f5f6f7
HZ
1169 return snprintf(buf, PAGE_SIZE, "%d\n",
1170 ha->qla_stats.total_isp_aborts);
1171}
1172
9a069e19
GM
1173static ssize_t
1174qla24xx_84xx_fw_version_show(struct device *dev,
1175 struct device_attribute *attr, char *buf)
1176{
1177 int rval = QLA_SUCCESS;
1178 uint16_t status[2] = {0, 0};
1179 scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
1180 struct qla_hw_data *ha = vha->hw;
1181
1182 if (IS_QLA84XX(ha) && ha->cs84xx) {
1183 if (ha->cs84xx->op_fw_version == 0) {
1184 rval = qla84xx_verify_chip(vha, status);
1185 }
1186
1187 if ((rval == QLA_SUCCESS) && (status[0] == 0))
1188 return snprintf(buf, PAGE_SIZE, "%u\n",
1189 (uint32_t)ha->cs84xx->op_fw_version);
1190 }
1191
1192 return snprintf(buf, PAGE_SIZE, "\n");
1193}
1194
3a03eb79
AV
1195static ssize_t
1196qla2x00_mpi_version_show(struct device *dev, struct device_attribute *attr,
1197 char *buf)
1198{
1199 scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
1200 struct qla_hw_data *ha = vha->hw;
1201
1202 if (!IS_QLA81XX(ha))
1203 return snprintf(buf, PAGE_SIZE, "\n");
1204
55a96158 1205 return snprintf(buf, PAGE_SIZE, "%d.%02d.%02d (%x)\n",
3a03eb79 1206 ha->mpi_version[0], ha->mpi_version[1], ha->mpi_version[2],
55a96158
AV
1207 ha->mpi_capabilities);
1208}
1209
1210static ssize_t
1211qla2x00_phy_version_show(struct device *dev, struct device_attribute *attr,
1212 char *buf)
1213{
1214 scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
1215 struct qla_hw_data *ha = vha->hw;
1216
1217 if (!IS_QLA81XX(ha))
1218 return snprintf(buf, PAGE_SIZE, "\n");
1219
1220 return snprintf(buf, PAGE_SIZE, "%d.%02d.%02d\n",
1221 ha->phy_version[0], ha->phy_version[1], ha->phy_version[2]);
3a03eb79
AV
1222}
1223
fbcbb5d0
LC
1224static ssize_t
1225qla2x00_flash_block_size_show(struct device *dev,
1226 struct device_attribute *attr, char *buf)
1227{
1228 scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
1229 struct qla_hw_data *ha = vha->hw;
1230
1231 return snprintf(buf, PAGE_SIZE, "0x%x\n", ha->fdt_block_size);
1232}
1233
bad7001c
AV
1234static ssize_t
1235qla2x00_vlan_id_show(struct device *dev, struct device_attribute *attr,
1236 char *buf)
1237{
1238 scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
1239
1240 if (!IS_QLA81XX(vha->hw))
1241 return snprintf(buf, PAGE_SIZE, "\n");
1242
1243 return snprintf(buf, PAGE_SIZE, "%d\n", vha->fcoe_vlan_id);
1244}
1245
1246static ssize_t
1247qla2x00_vn_port_mac_address_show(struct device *dev,
1248 struct device_attribute *attr, char *buf)
1249{
1250 scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
1251
1252 if (!IS_QLA81XX(vha->hw))
1253 return snprintf(buf, PAGE_SIZE, "\n");
1254
1255 return snprintf(buf, PAGE_SIZE, "%02x:%02x:%02x:%02x:%02x:%02x\n",
1256 vha->fcoe_vn_port_mac[5], vha->fcoe_vn_port_mac[4],
1257 vha->fcoe_vn_port_mac[3], vha->fcoe_vn_port_mac[2],
1258 vha->fcoe_vn_port_mac[1], vha->fcoe_vn_port_mac[0]);
1259}
1260
7f774025
AV
1261static ssize_t
1262qla2x00_fabric_param_show(struct device *dev, struct device_attribute *attr,
1263 char *buf)
1264{
1265 scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
1266
1267 return snprintf(buf, PAGE_SIZE, "%d\n", vha->hw->switch_cap);
1268}
1269
656e8912
AV
1270static ssize_t
1271qla2x00_fw_state_show(struct device *dev, struct device_attribute *attr,
1272 char *buf)
1273{
1274 scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
85880801 1275 int rval = QLA_FUNCTION_FAILED;
656e8912
AV
1276 uint16_t state[5];
1277
85880801
AV
1278 if (!vha->hw->flags.eeh_busy)
1279 rval = qla2x00_get_firmware_state(vha, state);
656e8912
AV
1280 if (rval != QLA_SUCCESS)
1281 memset(state, -1, sizeof(state));
1282
1283 return snprintf(buf, PAGE_SIZE, "0x%x 0x%x 0x%x 0x%x 0x%x\n", state[0],
1284 state[1], state[2], state[3], state[4]);
1285}
1286
ee959b00
TJ
1287static DEVICE_ATTR(driver_version, S_IRUGO, qla2x00_drvr_version_show, NULL);
1288static DEVICE_ATTR(fw_version, S_IRUGO, qla2x00_fw_version_show, NULL);
1289static DEVICE_ATTR(serial_num, S_IRUGO, qla2x00_serial_num_show, NULL);
1290static DEVICE_ATTR(isp_name, S_IRUGO, qla2x00_isp_name_show, NULL);
1291static DEVICE_ATTR(isp_id, S_IRUGO, qla2x00_isp_id_show, NULL);
1292static DEVICE_ATTR(model_name, S_IRUGO, qla2x00_model_name_show, NULL);
1293static DEVICE_ATTR(model_desc, S_IRUGO, qla2x00_model_desc_show, NULL);
1294static DEVICE_ATTR(pci_info, S_IRUGO, qla2x00_pci_info_show, NULL);
bbd1ae41 1295static DEVICE_ATTR(link_state, S_IRUGO, qla2x00_link_state_show, NULL);
ee959b00
TJ
1296static DEVICE_ATTR(zio, S_IRUGO | S_IWUSR, qla2x00_zio_show, qla2x00_zio_store);
1297static DEVICE_ATTR(zio_timer, S_IRUGO | S_IWUSR, qla2x00_zio_timer_show,
1298 qla2x00_zio_timer_store);
1299static DEVICE_ATTR(beacon, S_IRUGO | S_IWUSR, qla2x00_beacon_show,
1300 qla2x00_beacon_store);
1301static DEVICE_ATTR(optrom_bios_version, S_IRUGO,
1302 qla2x00_optrom_bios_version_show, NULL);
1303static DEVICE_ATTR(optrom_efi_version, S_IRUGO,
1304 qla2x00_optrom_efi_version_show, NULL);
1305static DEVICE_ATTR(optrom_fcode_version, S_IRUGO,
1306 qla2x00_optrom_fcode_version_show, NULL);
1307static DEVICE_ATTR(optrom_fw_version, S_IRUGO, qla2x00_optrom_fw_version_show,
1308 NULL);
9a069e19
GM
1309static DEVICE_ATTR(84xx_fw_version, S_IRUGO, qla24xx_84xx_fw_version_show,
1310 NULL);
e5f5f6f7
HZ
1311static DEVICE_ATTR(total_isp_aborts, S_IRUGO, qla2x00_total_isp_aborts_show,
1312 NULL);
3a03eb79 1313static DEVICE_ATTR(mpi_version, S_IRUGO, qla2x00_mpi_version_show, NULL);
55a96158 1314static DEVICE_ATTR(phy_version, S_IRUGO, qla2x00_phy_version_show, NULL);
fbcbb5d0
LC
1315static DEVICE_ATTR(flash_block_size, S_IRUGO, qla2x00_flash_block_size_show,
1316 NULL);
bad7001c
AV
1317static DEVICE_ATTR(vlan_id, S_IRUGO, qla2x00_vlan_id_show, NULL);
1318static DEVICE_ATTR(vn_port_mac_address, S_IRUGO,
1319 qla2x00_vn_port_mac_address_show, NULL);
7f774025 1320static DEVICE_ATTR(fabric_param, S_IRUGO, qla2x00_fabric_param_show, NULL);
656e8912 1321static DEVICE_ATTR(fw_state, S_IRUGO, qla2x00_fw_state_show, NULL);
ee959b00
TJ
1322
1323struct device_attribute *qla2x00_host_attrs[] = {
1324 &dev_attr_driver_version,
1325 &dev_attr_fw_version,
1326 &dev_attr_serial_num,
1327 &dev_attr_isp_name,
1328 &dev_attr_isp_id,
1329 &dev_attr_model_name,
1330 &dev_attr_model_desc,
1331 &dev_attr_pci_info,
bbd1ae41 1332 &dev_attr_link_state,
ee959b00
TJ
1333 &dev_attr_zio,
1334 &dev_attr_zio_timer,
1335 &dev_attr_beacon,
1336 &dev_attr_optrom_bios_version,
1337 &dev_attr_optrom_efi_version,
1338 &dev_attr_optrom_fcode_version,
1339 &dev_attr_optrom_fw_version,
9a069e19 1340 &dev_attr_84xx_fw_version,
e5f5f6f7 1341 &dev_attr_total_isp_aborts,
3a03eb79 1342 &dev_attr_mpi_version,
55a96158 1343 &dev_attr_phy_version,
fbcbb5d0 1344 &dev_attr_flash_block_size,
bad7001c
AV
1345 &dev_attr_vlan_id,
1346 &dev_attr_vn_port_mac_address,
7f774025 1347 &dev_attr_fabric_param,
656e8912 1348 &dev_attr_fw_state,
afb046e2
AV
1349 NULL,
1350};
1351
8482e118
AV
1352/* Host attributes. */
1353
1354static void
1355qla2x00_get_host_port_id(struct Scsi_Host *shost)
1356{
7b867cf7 1357 scsi_qla_host_t *vha = shost_priv(shost);
8482e118 1358
7b867cf7
AC
1359 fc_host_port_id(shost) = vha->d_id.b.domain << 16 |
1360 vha->d_id.b.area << 8 | vha->d_id.b.al_pa;
8482e118
AV
1361}
1362
04414013
AV
1363static void
1364qla2x00_get_host_speed(struct Scsi_Host *shost)
1365{
7b867cf7
AC
1366 struct qla_hw_data *ha = ((struct scsi_qla_host *)
1367 (shost_priv(shost)))->hw;
2ae2b370 1368 u32 speed = FC_PORTSPEED_UNKNOWN;
04414013
AV
1369
1370 switch (ha->link_data_rate) {
d8b45213 1371 case PORT_SPEED_1GB:
2ae2b370 1372 speed = FC_PORTSPEED_1GBIT;
04414013 1373 break;
d8b45213 1374 case PORT_SPEED_2GB:
2ae2b370 1375 speed = FC_PORTSPEED_2GBIT;
04414013 1376 break;
d8b45213 1377 case PORT_SPEED_4GB:
2ae2b370 1378 speed = FC_PORTSPEED_4GBIT;
04414013 1379 break;
da4541b6 1380 case PORT_SPEED_8GB:
2ae2b370 1381 speed = FC_PORTSPEED_8GBIT;
da4541b6 1382 break;
3a03eb79
AV
1383 case PORT_SPEED_10GB:
1384 speed = FC_PORTSPEED_10GBIT;
1385 break;
04414013
AV
1386 }
1387 fc_host_speed(shost) = speed;
1388}
1389
8d067623
AV
1390static void
1391qla2x00_get_host_port_type(struct Scsi_Host *shost)
1392{
7b867cf7 1393 scsi_qla_host_t *vha = shost_priv(shost);
8d067623
AV
1394 uint32_t port_type = FC_PORTTYPE_UNKNOWN;
1395
7b867cf7 1396 if (vha->vp_idx) {
2f2fa13d
SS
1397 fc_host_port_type(shost) = FC_PORTTYPE_NPIV;
1398 return;
1399 }
7b867cf7 1400 switch (vha->hw->current_topology) {
8d067623
AV
1401 case ISP_CFG_NL:
1402 port_type = FC_PORTTYPE_LPORT;
1403 break;
1404 case ISP_CFG_FL:
1405 port_type = FC_PORTTYPE_NLPORT;
1406 break;
1407 case ISP_CFG_N:
1408 port_type = FC_PORTTYPE_PTP;
1409 break;
1410 case ISP_CFG_F:
1411 port_type = FC_PORTTYPE_NPORT;
1412 break;
1413 }
1414 fc_host_port_type(shost) = port_type;
1415}
1416
8482e118
AV
1417static void
1418qla2x00_get_starget_node_name(struct scsi_target *starget)
1419{
1420 struct Scsi_Host *host = dev_to_shost(starget->dev.parent);
7b867cf7 1421 scsi_qla_host_t *vha = shost_priv(host);
bdf79621 1422 fc_port_t *fcport;
f8b02a85 1423 u64 node_name = 0;
8482e118 1424
7b867cf7 1425 list_for_each_entry(fcport, &vha->vp_fcports, list) {
5ab5a4dd
AV
1426 if (fcport->rport &&
1427 starget->id == fcport->rport->scsi_target_id) {
f8b02a85 1428 node_name = wwn_to_u64(fcport->node_name);
bdf79621
AV
1429 break;
1430 }
1431 }
1432
f8b02a85 1433 fc_starget_node_name(starget) = node_name;
8482e118
AV
1434}
1435
1436static void
1437qla2x00_get_starget_port_name(struct scsi_target *starget)
1438{
1439 struct Scsi_Host *host = dev_to_shost(starget->dev.parent);
7b867cf7 1440 scsi_qla_host_t *vha = shost_priv(host);
bdf79621 1441 fc_port_t *fcport;
f8b02a85 1442 u64 port_name = 0;
8482e118 1443
7b867cf7 1444 list_for_each_entry(fcport, &vha->vp_fcports, list) {
5ab5a4dd
AV
1445 if (fcport->rport &&
1446 starget->id == fcport->rport->scsi_target_id) {
f8b02a85 1447 port_name = wwn_to_u64(fcport->port_name);
bdf79621
AV
1448 break;
1449 }
1450 }
1451
f8b02a85 1452 fc_starget_port_name(starget) = port_name;
8482e118
AV
1453}
1454
1455static void
1456qla2x00_get_starget_port_id(struct scsi_target *starget)
1457{
1458 struct Scsi_Host *host = dev_to_shost(starget->dev.parent);
7b867cf7 1459 scsi_qla_host_t *vha = shost_priv(host);
bdf79621
AV
1460 fc_port_t *fcport;
1461 uint32_t port_id = ~0U;
1462
7b867cf7 1463 list_for_each_entry(fcport, &vha->vp_fcports, list) {
5ab5a4dd
AV
1464 if (fcport->rport &&
1465 starget->id == fcport->rport->scsi_target_id) {
bdf79621
AV
1466 port_id = fcport->d_id.b.domain << 16 |
1467 fcport->d_id.b.area << 8 | fcport->d_id.b.al_pa;
1468 break;
1469 }
1470 }
8482e118 1471
8482e118
AV
1472 fc_starget_port_id(starget) = port_id;
1473}
1474
8482e118
AV
1475static void
1476qla2x00_set_rport_loss_tmo(struct fc_rport *rport, uint32_t timeout)
1477{
8482e118 1478 if (timeout)
85821c90 1479 rport->dev_loss_tmo = timeout;
8482e118 1480 else
85821c90 1481 rport->dev_loss_tmo = 1;
8482e118
AV
1482}
1483
5f3a9a20
SJ
1484static void
1485qla2x00_dev_loss_tmo_callbk(struct fc_rport *rport)
1486{
1487 struct Scsi_Host *host = rport_to_shost(rport);
1488 fc_port_t *fcport = *(fc_port_t **)rport->dd_data;
1489
3c01b4f9
SJ
1490 if (!fcport)
1491 return;
1492
85880801
AV
1493 if (test_bit(ABORT_ISP_ACTIVE, &fcport->vha->dpc_flags))
1494 return;
1495
1496 if (unlikely(pci_channel_offline(fcport->vha->hw->pdev))) {
b9b12f73 1497 qla2x00_abort_all_cmds(fcport->vha, DID_NO_CONNECT << 16);
85880801
AV
1498 return;
1499 }
5f3a9a20
SJ
1500
1501 /*
1502 * Transport has effectively 'deleted' the rport, clear
1503 * all local references.
1504 */
1505 spin_lock_irq(host->host_lock);
1506 fcport->rport = NULL;
1507 *((fc_port_t **)rport->dd_data) = NULL;
1508 spin_unlock_irq(host->host_lock);
1509}
1510
1511static void
1512qla2x00_terminate_rport_io(struct fc_rport *rport)
1513{
1514 fc_port_t *fcport = *(fc_port_t **)rport->dd_data;
1515
3c01b4f9
SJ
1516 if (!fcport)
1517 return;
1518
85880801
AV
1519 if (test_bit(ABORT_ISP_ACTIVE, &fcport->vha->dpc_flags))
1520 return;
1521
b9b12f73
SJ
1522 if (unlikely(pci_channel_offline(fcport->vha->hw->pdev))) {
1523 qla2x00_abort_all_cmds(fcport->vha, DID_NO_CONNECT << 16);
1524 return;
1525 }
6390d1f3
AV
1526 /*
1527 * At this point all fcport's software-states are cleared. Perform any
1528 * final cleanup of firmware resources (PCBs and XCBs).
1529 */
6805c150
AV
1530 if (fcport->loop_id != FC_NO_LOOP_ID &&
1531 !test_bit(UNLOADING, &fcport->vha->dpc_flags))
7b867cf7
AC
1532 fcport->vha->hw->isp_ops->fabric_logout(fcport->vha,
1533 fcport->loop_id, fcport->d_id.b.domain,
1534 fcport->d_id.b.area, fcport->d_id.b.al_pa);
5f3a9a20
SJ
1535}
1536
91ca7b01
AV
1537static int
1538qla2x00_issue_lip(struct Scsi_Host *shost)
1539{
7b867cf7 1540 scsi_qla_host_t *vha = shost_priv(shost);
91ca7b01 1541
7b867cf7 1542 qla2x00_loop_reset(vha);
91ca7b01
AV
1543 return 0;
1544}
1545
392e2f65
AV
1546static struct fc_host_statistics *
1547qla2x00_get_fc_host_stats(struct Scsi_Host *shost)
1548{
7b867cf7
AC
1549 scsi_qla_host_t *vha = shost_priv(shost);
1550 struct qla_hw_data *ha = vha->hw;
1551 struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev);
392e2f65 1552 int rval;
43ef0580
AV
1553 struct link_statistics *stats;
1554 dma_addr_t stats_dma;
392e2f65
AV
1555 struct fc_host_statistics *pfc_host_stat;
1556
1557 pfc_host_stat = &ha->fc_host_stat;
1558 memset(pfc_host_stat, -1, sizeof(struct fc_host_statistics));
1559
85880801
AV
1560 if (test_bit(UNLOADING, &vha->dpc_flags))
1561 goto done;
1562
1563 if (unlikely(pci_channel_offline(ha->pdev)))
1564 goto done;
1565
43ef0580
AV
1566 stats = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &stats_dma);
1567 if (stats == NULL) {
1568 DEBUG2_3_11(printk("%s(%ld): Failed to allocate memory.\n",
7b867cf7 1569 __func__, base_vha->host_no));
43ef0580
AV
1570 goto done;
1571 }
1572 memset(stats, 0, DMA_POOL_SIZE);
1573
1574 rval = QLA_FUNCTION_FAILED;
e428924c 1575 if (IS_FWI2_CAPABLE(ha)) {
7b867cf7
AC
1576 rval = qla24xx_get_isp_stats(base_vha, stats, stats_dma);
1577 } else if (atomic_read(&base_vha->loop_state) == LOOP_READY &&
1578 !test_bit(ABORT_ISP_ACTIVE, &base_vha->dpc_flags) &&
1579 !test_bit(ISP_ABORT_NEEDED, &base_vha->dpc_flags) &&
178779a6
AV
1580 !ha->dpc_active) {
1581 /* Must be in a 'READY' state for statistics retrieval. */
7b867cf7
AC
1582 rval = qla2x00_get_link_status(base_vha, base_vha->loop_id,
1583 stats, stats_dma);
392e2f65 1584 }
178779a6
AV
1585
1586 if (rval != QLA_SUCCESS)
43ef0580
AV
1587 goto done_free;
1588
1589 pfc_host_stat->link_failure_count = stats->link_fail_cnt;
1590 pfc_host_stat->loss_of_sync_count = stats->loss_sync_cnt;
1591 pfc_host_stat->loss_of_signal_count = stats->loss_sig_cnt;
1592 pfc_host_stat->prim_seq_protocol_err_count = stats->prim_seq_err_cnt;
1593 pfc_host_stat->invalid_tx_word_count = stats->inval_xmit_word_cnt;
1594 pfc_host_stat->invalid_crc_count = stats->inval_crc_cnt;
1595 if (IS_FWI2_CAPABLE(ha)) {
032d8dd7 1596 pfc_host_stat->lip_count = stats->lip_cnt;
43ef0580
AV
1597 pfc_host_stat->tx_frames = stats->tx_frames;
1598 pfc_host_stat->rx_frames = stats->rx_frames;
1599 pfc_host_stat->dumped_frames = stats->dumped_frames;
1600 pfc_host_stat->nos_count = stats->nos_rcvd;
1601 }
49fd462a
HZ
1602 pfc_host_stat->fcp_input_megabytes = ha->qla_stats.input_bytes >> 20;
1603 pfc_host_stat->fcp_output_megabytes = ha->qla_stats.output_bytes >> 20;
392e2f65 1604
43ef0580
AV
1605done_free:
1606 dma_pool_free(ha->s_dma_pool, stats, stats_dma);
178779a6 1607done:
392e2f65
AV
1608 return pfc_host_stat;
1609}
1610
1620f7c2
AV
1611static void
1612qla2x00_get_host_symbolic_name(struct Scsi_Host *shost)
1613{
7b867cf7 1614 scsi_qla_host_t *vha = shost_priv(shost);
1620f7c2 1615
7b867cf7 1616 qla2x00_get_sym_node_name(vha, fc_host_symbolic_name(shost));
1620f7c2
AV
1617}
1618
a740a3f0
AV
1619static void
1620qla2x00_set_host_system_hostname(struct Scsi_Host *shost)
1621{
7b867cf7 1622 scsi_qla_host_t *vha = shost_priv(shost);
a740a3f0 1623
7b867cf7 1624 set_bit(REGISTER_FDMI_NEEDED, &vha->dpc_flags);
a740a3f0
AV
1625}
1626
90991c85
AV
1627static void
1628qla2x00_get_host_fabric_name(struct Scsi_Host *shost)
1629{
7b867cf7 1630 scsi_qla_host_t *vha = shost_priv(shost);
90991c85
AV
1631 u64 node_name;
1632
7b867cf7
AC
1633 if (vha->device_flags & SWITCH_FOUND)
1634 node_name = wwn_to_u64(vha->fabric_node_name);
90991c85 1635 else
7b867cf7 1636 node_name = wwn_to_u64(vha->node_name);
90991c85
AV
1637
1638 fc_host_fabric_name(shost) = node_name;
1639}
1640
7047fcdd
AV
1641static void
1642qla2x00_get_host_port_state(struct Scsi_Host *shost)
1643{
7b867cf7
AC
1644 scsi_qla_host_t *vha = shost_priv(shost);
1645 struct scsi_qla_host *base_vha = pci_get_drvdata(vha->hw->pdev);
7047fcdd 1646
7b867cf7 1647 if (!base_vha->flags.online)
7047fcdd 1648 fc_host_port_state(shost) = FC_PORTSTATE_OFFLINE;
7b867cf7 1649 else if (atomic_read(&base_vha->loop_state) == LOOP_TIMEOUT)
7047fcdd
AV
1650 fc_host_port_state(shost) = FC_PORTSTATE_UNKNOWN;
1651 else
1652 fc_host_port_state(shost) = FC_PORTSTATE_ONLINE;
1653}
1654
2c3dfe3f
SJ
1655static int
1656qla24xx_vport_create(struct fc_vport *fc_vport, bool disable)
1657{
1658 int ret = 0;
2afa19a9 1659 uint8_t qos = 0;
7b867cf7
AC
1660 scsi_qla_host_t *base_vha = shost_priv(fc_vport->shost);
1661 scsi_qla_host_t *vha = NULL;
73208dfd 1662 struct qla_hw_data *ha = base_vha->hw;
2afa19a9
AC
1663 uint16_t options = 0;
1664 int cnt;
59e0b8b0 1665 struct req_que *req = ha->req_q_map[0];
2c3dfe3f
SJ
1666
1667 ret = qla24xx_vport_create_req_sanity_check(fc_vport);
1668 if (ret) {
1669 DEBUG15(printk("qla24xx_vport_create_req_sanity_check failed, "
1670 "status %x\n", ret));
1671 return (ret);
1672 }
1673
1674 vha = qla24xx_create_vhost(fc_vport);
1675 if (vha == NULL) {
1676 DEBUG15(printk ("qla24xx_create_vhost failed, vha = %p\n",
1677 vha));
1678 return FC_VPORT_FAILED;
1679 }
1680 if (disable) {
1681 atomic_set(&vha->vp_state, VP_OFFLINE);
1682 fc_vport_set_state(fc_vport, FC_VPORT_DISABLED);
1683 } else
1684 atomic_set(&vha->vp_state, VP_FAILED);
1685
1686 /* ready to create vport */
7b867cf7
AC
1687 qla_printk(KERN_INFO, vha->hw, "VP entry id %d assigned.\n",
1688 vha->vp_idx);
2c3dfe3f
SJ
1689
1690 /* initialized vport states */
1691 atomic_set(&vha->loop_state, LOOP_DOWN);
1692 vha->vp_err_state= VP_ERR_PORTDWN;
1693 vha->vp_prev_err_state= VP_ERR_UNKWN;
1694 /* Check if physical ha port is Up */
7b867cf7
AC
1695 if (atomic_read(&base_vha->loop_state) == LOOP_DOWN ||
1696 atomic_read(&base_vha->loop_state) == LOOP_DEAD) {
2c3dfe3f
SJ
1697 /* Don't retry or attempt login of this virtual port */
1698 DEBUG15(printk ("scsi(%ld): pport loop_state is not UP.\n",
7b867cf7 1699 base_vha->host_no));
2c3dfe3f
SJ
1700 atomic_set(&vha->loop_state, LOOP_DEAD);
1701 if (!disable)
1702 fc_vport_set_state(fc_vport, FC_VPORT_LINKDOWN);
1703 }
1704
d139b9bd
JB
1705 if (scsi_add_host_with_dma(vha->host, &fc_vport->dev,
1706 &ha->pdev->dev)) {
2c3dfe3f
SJ
1707 DEBUG15(printk("scsi(%ld): scsi_add_host failure for VP[%d].\n",
1708 vha->host_no, vha->vp_idx));
1709 goto vport_create_failed_2;
1710 }
1711
1712 /* initialize attributes */
1713 fc_host_node_name(vha->host) = wwn_to_u64(vha->node_name);
1714 fc_host_port_name(vha->host) = wwn_to_u64(vha->port_name);
1715 fc_host_supported_classes(vha->host) =
7b867cf7 1716 fc_host_supported_classes(base_vha->host);
2c3dfe3f 1717 fc_host_supported_speeds(vha->host) =
7b867cf7 1718 fc_host_supported_speeds(base_vha->host);
2c3dfe3f
SJ
1719
1720 qla24xx_vport_disable(fc_vport, disable);
1721
7163ea81 1722 if (ha->flags.cpu_affinity_enabled) {
59e0b8b0
AC
1723 req = ha->req_q_map[1];
1724 goto vport_queue;
1725 } else if (ql2xmaxqueues == 1 || !ha->npiv_info)
2afa19a9
AC
1726 goto vport_queue;
1727 /* Create a request queue in QoS mode for the vport */
40859ae5
AC
1728 for (cnt = 0; cnt < ha->nvram_npiv_size; cnt++) {
1729 if (memcmp(ha->npiv_info[cnt].port_name, vha->port_name, 8) == 0
1730 && memcmp(ha->npiv_info[cnt].node_name, vha->node_name,
59e0b8b0 1731 8) == 0) {
2afa19a9
AC
1732 qos = ha->npiv_info[cnt].q_qos;
1733 break;
73208dfd 1734 }
2afa19a9
AC
1735 }
1736 if (qos) {
1737 ret = qla25xx_create_req_que(ha, options, vha->vp_idx, 0, 0,
1738 qos);
1739 if (!ret)
1740 qla_printk(KERN_WARNING, ha,
1741 "Can't create request queue for vp_idx:%d\n",
1742 vha->vp_idx);
59e0b8b0 1743 else {
2afa19a9 1744 DEBUG2(qla_printk(KERN_INFO, ha,
40859ae5
AC
1745 "Request Que:%d (QoS: %d) created for vp_idx:%d\n",
1746 ret, qos, vha->vp_idx));
59e0b8b0
AC
1747 req = ha->req_q_map[ret];
1748 }
73208dfd
AC
1749 }
1750
2afa19a9 1751vport_queue:
59e0b8b0 1752 vha->req = req;
2c3dfe3f 1753 return 0;
2afa19a9 1754
2c3dfe3f
SJ
1755vport_create_failed_2:
1756 qla24xx_disable_vp(vha);
1757 qla24xx_deallocate_vp_id(vha);
2c3dfe3f
SJ
1758 scsi_host_put(vha->host);
1759 return FC_VPORT_FAILED;
1760}
1761
a824ebb3 1762static int
2c3dfe3f
SJ
1763qla24xx_vport_delete(struct fc_vport *fc_vport)
1764{
2c3dfe3f 1765 scsi_qla_host_t *vha = fc_vport->dd_data;
7b867cf7 1766 fc_port_t *fcport, *tfcport;
73208dfd
AC
1767 struct qla_hw_data *ha = vha->hw;
1768 uint16_t id = vha->vp_idx;
c9c5ced9
AV
1769
1770 while (test_bit(LOOP_RESYNC_ACTIVE, &vha->dpc_flags) ||
7b867cf7 1771 test_bit(FCPORT_UPDATE_NEEDED, &vha->dpc_flags))
c9c5ced9 1772 msleep(1000);
2c3dfe3f
SJ
1773
1774 qla24xx_disable_vp(vha);
2c3dfe3f 1775
7b867cf7
AC
1776 fc_remove_host(vha->host);
1777
1778 scsi_remove_host(vha->host);
1779
1780 list_for_each_entry_safe(fcport, tfcport, &vha->vp_fcports, list) {
1781 list_del(&fcport->list);
1782 kfree(fcport);
1783 fcport = NULL;
1784 }
1785
1786 qla24xx_deallocate_vp_id(vha);
2c3dfe3f 1787
0d6e61bc
AV
1788 mutex_lock(&ha->vport_lock);
1789 ha->cur_vport_count--;
1790 clear_bit(vha->vp_idx, ha->vp_idx_map);
1791 mutex_unlock(&ha->vport_lock);
1792
2c3dfe3f
SJ
1793 if (vha->timer_active) {
1794 qla2x00_vp_stop_timer(vha);
1795 DEBUG15(printk ("scsi(%ld): timer for the vport[%d] = %p "
1796 "has stopped\n",
1797 vha->host_no, vha->vp_idx, vha));
1798 }
1799
7163ea81 1800 if (vha->req->id && !ha->flags.cpu_affinity_enabled) {
2afa19a9 1801 if (qla25xx_delete_req_que(vha, vha->req) != QLA_SUCCESS)
cf5a1631
AC
1802 qla_printk(KERN_WARNING, ha,
1803 "Queue delete failed.\n");
1804 }
1805
2c3dfe3f 1806 scsi_host_put(vha->host);
73208dfd 1807 qla_printk(KERN_INFO, ha, "vport %d deleted\n", id);
2c3dfe3f
SJ
1808 return 0;
1809}
1810
a824ebb3 1811static int
2c3dfe3f
SJ
1812qla24xx_vport_disable(struct fc_vport *fc_vport, bool disable)
1813{
1814 scsi_qla_host_t *vha = fc_vport->dd_data;
1815
1816 if (disable)
1817 qla24xx_disable_vp(vha);
1818 else
1819 qla24xx_enable_vp(vha);
1820
1821 return 0;
1822}
1823
9a069e19
GM
1824/* BSG support for ELS/CT pass through */
1825inline srb_t *
1826qla2x00_get_ctx_bsg_sp(scsi_qla_host_t *vha, fc_port_t *fcport, size_t size)
1827{
1828 srb_t *sp;
1829 struct qla_hw_data *ha = vha->hw;
1830 struct srb_bsg_ctx *ctx;
1831
1832 sp = mempool_alloc(ha->srb_mempool, GFP_KERNEL);
1833 if (!sp)
1834 goto done;
1835 ctx = kzalloc(size, GFP_KERNEL);
1836 if (!ctx) {
1837 mempool_free(sp, ha->srb_mempool);
1838 goto done;
1839 }
1840
1841 memset(sp, 0, sizeof(*sp));
1842 sp->fcport = fcport;
1843 sp->ctx = ctx;
1844done:
1845 return sp;
1846}
1847
1848static int
1849qla2x00_process_els(struct fc_bsg_job *bsg_job)
1850{
1851 struct fc_rport *rport;
1852 fc_port_t *fcport;
1853 struct Scsi_Host *host;
1854 scsi_qla_host_t *vha;
1855 struct qla_hw_data *ha;
1856 srb_t *sp;
1857 const char *type;
1858 int req_sg_cnt, rsp_sg_cnt;
1859 int rval = (DRIVER_ERROR << 16);
1860 uint16_t nextlid = 0;
1861 struct srb_bsg *els;
1862
1863 /* Multiple SG's are not supported for ELS requests */
1864 if (bsg_job->request_payload.sg_cnt > 1 ||
1865 bsg_job->reply_payload.sg_cnt > 1) {
1866 DEBUG2(printk(KERN_INFO
1867 "multiple SG's are not supported for ELS requests"
1868 " [request_sg_cnt: %x reply_sg_cnt: %x]\n",
1869 bsg_job->request_payload.sg_cnt,
1870 bsg_job->reply_payload.sg_cnt));
1871 rval = -EPERM;
1872 goto done;
1873 }
1874
1875 /* ELS request for rport */
1876 if (bsg_job->request->msgcode == FC_BSG_RPT_ELS) {
1877 rport = bsg_job->rport;
1878 fcport = *(fc_port_t **) rport->dd_data;
1879 host = rport_to_shost(rport);
1880 vha = shost_priv(host);
1881 ha = vha->hw;
1882 type = "FC_BSG_RPT_ELS";
1883
9a069e19
GM
1884 /* make sure the rport is logged in,
1885 * if not perform fabric login
1886 */
1887 if (qla2x00_fabric_login(vha, fcport, &nextlid)) {
1888 DEBUG2(qla_printk(KERN_WARNING, ha,
1889 "failed to login port %06X for ELS passthru\n",
1890 fcport->d_id.b24));
1891 rval = -EIO;
1892 goto done;
1893 }
1894 } else {
1895 host = bsg_job->shost;
1896 vha = shost_priv(host);
1897 ha = vha->hw;
1898 type = "FC_BSG_HST_ELS_NOLOGIN";
1899
9a069e19
GM
1900 /* Allocate a dummy fcport structure, since functions
1901 * preparing the IOCB and mailbox command retrieves port
1902 * specific information from fcport structure. For Host based
1903 * ELS commands there will be no fcport structure allocated
1904 */
1905 fcport = qla2x00_alloc_fcport(vha, GFP_KERNEL);
1906 if (!fcport) {
1907 rval = -ENOMEM;
1908 goto done;
1909 }
1910
1911 /* Initialize all required fields of fcport */
1912 fcport->vha = vha;
1913 fcport->vp_idx = vha->vp_idx;
1914 fcport->d_id.b.al_pa =
1915 bsg_job->request->rqst_data.h_els.port_id[0];
1916 fcport->d_id.b.area =
1917 bsg_job->request->rqst_data.h_els.port_id[1];
1918 fcport->d_id.b.domain =
1919 bsg_job->request->rqst_data.h_els.port_id[2];
1920 fcport->loop_id =
1921 (fcport->d_id.b.al_pa == 0xFD) ?
1922 NPH_FABRIC_CONTROLLER : NPH_F_PORT;
1923 }
1924
db3ad7f8
GM
1925 if (!vha->flags.online) {
1926 DEBUG2(qla_printk(KERN_WARNING, ha,
1927 "host not online\n"));
1928 rval = -EIO;
1929 goto done;
1930 }
9a069e19
GM
1931
1932 req_sg_cnt =
1933 dma_map_sg(&ha->pdev->dev, bsg_job->request_payload.sg_list,
1934 bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
1935 if (!req_sg_cnt) {
1936 rval = -ENOMEM;
1937 goto done_free_fcport;
1938 }
1939 rsp_sg_cnt = dma_map_sg(&ha->pdev->dev, bsg_job->reply_payload.sg_list,
1940 bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
1941 if (!rsp_sg_cnt) {
1942 rval = -ENOMEM;
1943 goto done_free_fcport;
1944 }
1945
1946 if ((req_sg_cnt != bsg_job->request_payload.sg_cnt) ||
1947 (rsp_sg_cnt != bsg_job->reply_payload.sg_cnt))
1948 {
1949 DEBUG2(printk(KERN_INFO
1950 "dma mapping resulted in different sg counts \
1951 [request_sg_cnt: %x dma_request_sg_cnt: %x\
1952 reply_sg_cnt: %x dma_reply_sg_cnt: %x]\n",
1953 bsg_job->request_payload.sg_cnt, req_sg_cnt,
1954 bsg_job->reply_payload.sg_cnt, rsp_sg_cnt));
1955 rval = -EAGAIN;
1956 goto done_unmap_sg;
1957 }
1958
1959 /* Alloc SRB structure */
1960 sp = qla2x00_get_ctx_bsg_sp(vha, fcport, sizeof(struct srb_bsg));
1961 if (!sp) {
1962 rval = -ENOMEM;
1963 goto done_unmap_sg;
1964 }
1965
1966 els = sp->ctx;
1967 els->ctx.type =
1968 (bsg_job->request->msgcode == FC_BSG_RPT_ELS ?
1969 SRB_ELS_CMD_RPT : SRB_ELS_CMD_HST);
1970 els->bsg_job = bsg_job;
1971
1972 DEBUG2(qla_printk(KERN_INFO, ha,
1973 "scsi(%ld:%x): bsg rqst type: %s els type: %x - loop-id=%x "
1974 "portid=%02x%02x%02x.\n", vha->host_no, sp->handle, type,
1975 bsg_job->request->rqst_data.h_els.command_code,
1976 fcport->loop_id, fcport->d_id.b.domain, fcport->d_id.b.area,
1977 fcport->d_id.b.al_pa));
1978
1979 rval = qla2x00_start_sp(sp);
1980 if (rval != QLA_SUCCESS) {
1981 kfree(sp->ctx);
1982 mempool_free(sp, ha->srb_mempool);
1983 rval = -EIO;
1984 goto done_unmap_sg;
1985 }
1986 return rval;
1987
1988done_unmap_sg:
1989 dma_unmap_sg(&ha->pdev->dev, bsg_job->request_payload.sg_list,
1990 bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
1991 dma_unmap_sg(&ha->pdev->dev, bsg_job->reply_payload.sg_list,
1992 bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
1993 goto done_free_fcport;
1994
1995done_free_fcport:
1996 if (bsg_job->request->msgcode == FC_BSG_HST_ELS_NOLOGIN)
1997 kfree(fcport);
1998done:
1999 return rval;
2000}
2001
2002static int
2003qla2x00_process_ct(struct fc_bsg_job *bsg_job)
2004{
2005 srb_t *sp;
2006 struct Scsi_Host *host = bsg_job->shost;
2007 scsi_qla_host_t *vha = shost_priv(host);
2008 struct qla_hw_data *ha = vha->hw;
2009 int rval = (DRIVER_ERROR << 16);
2010 int req_sg_cnt, rsp_sg_cnt;
2011 uint16_t loop_id;
2012 struct fc_port *fcport;
2013 char *type = "FC_BSG_HST_CT";
2014 struct srb_bsg *ct;
2015
2016 /* pass through is supported only for ISP 4Gb or higher */
2017 if (!IS_FWI2_CAPABLE(ha)) {
2018 DEBUG2(qla_printk(KERN_INFO, ha,
2019 "scsi(%ld):Firmware is not capable to support FC "
2020 "CT pass thru\n", vha->host_no));
2021 rval = -EPERM;
2022 goto done;
2023 }
2024
2025 req_sg_cnt =
2026 dma_map_sg(&ha->pdev->dev, bsg_job->request_payload.sg_list,
2027 bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
2028 if (!req_sg_cnt) {
2029 rval = -ENOMEM;
2030 goto done;
2031 }
2032
2033 rsp_sg_cnt = dma_map_sg(&ha->pdev->dev, bsg_job->reply_payload.sg_list,
2034 bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
2035 if (!rsp_sg_cnt) {
2036 rval = -ENOMEM;
2037 goto done;
2038 }
2039
2040 if ((req_sg_cnt != bsg_job->request_payload.sg_cnt) ||
2041 (rsp_sg_cnt != bsg_job->reply_payload.sg_cnt))
2042 {
2043 DEBUG2(qla_printk(KERN_WARNING, ha,
2044 "dma mapping resulted in different sg counts \
2045 [request_sg_cnt: %x dma_request_sg_cnt: %x\
2046 reply_sg_cnt: %x dma_reply_sg_cnt: %x]\n",
2047 bsg_job->request_payload.sg_cnt, req_sg_cnt,
2048 bsg_job->reply_payload.sg_cnt, rsp_sg_cnt));
2049 rval = -EAGAIN;
2050 goto done_unmap_sg;
2051 }
2052
db3ad7f8
GM
2053 if (!vha->flags.online) {
2054 DEBUG2(qla_printk(KERN_WARNING, ha,
2055 "host not online\n"));
2056 rval = -EIO;
2057 goto done_unmap_sg;
2058 }
2059
9a069e19
GM
2060 loop_id =
2061 (bsg_job->request->rqst_data.h_ct.preamble_word1 & 0xFF000000)
2062 >> 24;
2063 switch (loop_id) {
2064 case 0xFC:
2065 loop_id = cpu_to_le16(NPH_SNS);
2066 break;
2067 case 0xFA:
2068 loop_id = vha->mgmt_svr_loop_id;
2069 break;
2070 default:
2071 DEBUG2(qla_printk(KERN_INFO, ha,
2072 "Unknown loop id: %x\n", loop_id));
2073 rval = -EINVAL;
2074 goto done_unmap_sg;
2075 }
2076
2077 /* Allocate a dummy fcport structure, since functions preparing the
2078 * IOCB and mailbox command retrieves port specific information
2079 * from fcport structure. For Host based ELS commands there will be
2080 * no fcport structure allocated
2081 */
2082 fcport = qla2x00_alloc_fcport(vha, GFP_KERNEL);
2083 if (!fcport)
2084 {
2085 rval = -ENOMEM;
2086 goto done_unmap_sg;
2087 }
2088
2089 /* Initialize all required fields of fcport */
2090 fcport->vha = vha;
2091 fcport->vp_idx = vha->vp_idx;
2092 fcport->d_id.b.al_pa = bsg_job->request->rqst_data.h_ct.port_id[0];
2093 fcport->d_id.b.area = bsg_job->request->rqst_data.h_ct.port_id[1];
2094 fcport->d_id.b.domain = bsg_job->request->rqst_data.h_ct.port_id[2];
2095 fcport->loop_id = loop_id;
2096
2097 /* Alloc SRB structure */
2098 sp = qla2x00_get_ctx_bsg_sp(vha, fcport, sizeof(struct srb_bsg));
2099 if (!sp) {
2100 rval = -ENOMEM;
2101 goto done_free_fcport;
2102 }
2103
2104 ct = sp->ctx;
2105 ct->ctx.type = SRB_CT_CMD;
2106 ct->bsg_job = bsg_job;
2107
2108 DEBUG2(qla_printk(KERN_INFO, ha,
2109 "scsi(%ld:%x): bsg rqst type: %s els type: %x - loop-id=%x "
2110 "portid=%02x%02x%02x.\n", vha->host_no, sp->handle, type,
2111 (bsg_job->request->rqst_data.h_ct.preamble_word2 >> 16),
2112 fcport->loop_id, fcport->d_id.b.domain, fcport->d_id.b.area,
2113 fcport->d_id.b.al_pa));
2114
2115 rval = qla2x00_start_sp(sp);
2116 if (rval != QLA_SUCCESS) {
2117 kfree(sp->ctx);
2118 mempool_free(sp, ha->srb_mempool);
2119 rval = -EIO;
2120 goto done_free_fcport;
2121 }
2122 return rval;
2123
2124done_free_fcport:
2125 kfree(fcport);
2126done_unmap_sg:
2127 dma_unmap_sg(&ha->pdev->dev, bsg_job->request_payload.sg_list,
2128 bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
2129 dma_unmap_sg(&ha->pdev->dev, bsg_job->reply_payload.sg_list,
2130 bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
2131done:
2132 return rval;
2133}
2134
2135static int
2136qla2x00_process_vendor_specific(struct fc_bsg_job *bsg_job)
2137{
2138 struct Scsi_Host *host = bsg_job->shost;
2139 scsi_qla_host_t *vha = shost_priv(host);
2140 struct qla_hw_data *ha = vha->hw;
2141 int rval;
2142 uint8_t command_sent;
2143 uint32_t vendor_cmd;
2144 char *type;
2145 struct msg_echo_lb elreq;
2146 uint16_t response[MAILBOX_REGISTER_COUNT];
2147 uint8_t* fw_sts_ptr;
2148 uint8_t *req_data;
2149 dma_addr_t req_data_dma;
2150 uint32_t req_data_len;
2151 uint8_t *rsp_data;
2152 dma_addr_t rsp_data_dma;
2153 uint32_t rsp_data_len;
2154
2155 if (test_bit(ISP_ABORT_NEEDED, &vha->dpc_flags) ||
2156 test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags) ||
2157 test_bit(ISP_ABORT_RETRY, &vha->dpc_flags)) {
2158 rval = -EBUSY;
2159 goto done;
2160 }
2161
db3ad7f8
GM
2162 if (!vha->flags.online) {
2163 DEBUG2(qla_printk(KERN_WARNING, ha,
2164 "host not online\n"));
2165 rval = -EIO;
2166 goto done;
2167 }
2168
9a069e19
GM
2169 elreq.req_sg_cnt =
2170 dma_map_sg(&ha->pdev->dev, bsg_job->request_payload.sg_list,
2171 bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
2172 if (!elreq.req_sg_cnt) {
2173 rval = -ENOMEM;
2174 goto done;
2175 }
2176 elreq.rsp_sg_cnt =
2177 dma_map_sg(&ha->pdev->dev, bsg_job->reply_payload.sg_list,
2178 bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
2179 if (!elreq.rsp_sg_cnt) {
2180 rval = -ENOMEM;
2181 goto done;
2182 }
2183
2184 if ((elreq.req_sg_cnt != bsg_job->request_payload.sg_cnt) ||
2185 (elreq.rsp_sg_cnt != bsg_job->reply_payload.sg_cnt))
2186 {
2187 DEBUG2(printk(KERN_INFO
2188 "dma mapping resulted in different sg counts \
2189 [request_sg_cnt: %x dma_request_sg_cnt: %x\
2190 reply_sg_cnt: %x dma_reply_sg_cnt: %x]\n",
2191 bsg_job->request_payload.sg_cnt, elreq.req_sg_cnt,
2192 bsg_job->reply_payload.sg_cnt, elreq.rsp_sg_cnt));
2193 rval = -EAGAIN;
2194 goto done_unmap_sg;
2195 }
2196 req_data_len = rsp_data_len = bsg_job->request_payload.payload_len;
2197 req_data = dma_alloc_coherent(&ha->pdev->dev, req_data_len,
2198 &req_data_dma, GFP_KERNEL);
2199
2200 rsp_data = dma_alloc_coherent(&ha->pdev->dev, rsp_data_len,
2201 &rsp_data_dma, GFP_KERNEL);
2202
2203 /* Copy the request buffer in req_data now */
2204 sg_copy_to_buffer(bsg_job->request_payload.sg_list,
2205 bsg_job->request_payload.sg_cnt, req_data,
2206 req_data_len);
2207
2208 elreq.send_dma = req_data_dma;
2209 elreq.rcv_dma = rsp_data_dma;
2210 elreq.transfer_size = req_data_len;
2211
2212 /* Vendor cmd : loopback or ECHO diagnostic
2213 * Options:
2214 * Loopback : Either internal or external loopback
2215 * ECHO: ECHO ELS or Vendor specific FC4 link data
2216 */
2217 vendor_cmd = bsg_job->request->rqst_data.h_vendor.vendor_cmd[0];
2218 elreq.options =
2219 *(((uint32_t *)bsg_job->request->rqst_data.h_vendor.vendor_cmd)
2220 + 1);
2221
2222 switch (bsg_job->request->rqst_data.h_vendor.vendor_cmd[0]) {
2223 case QL_VND_LOOPBACK:
2224 if (ha->current_topology != ISP_CFG_F) {
2225 type = "FC_BSG_HST_VENDOR_LOOPBACK";
2226
9a069e19
GM
2227 DEBUG2(qla_printk(KERN_INFO, ha,
2228 "scsi(%ld) bsg rqst type: %s vendor rqst type: %x options: %x.\n",
2229 vha->host_no, type, vendor_cmd, elreq.options));
db3ad7f8 2230
9a069e19
GM
2231 command_sent = INT_DEF_LB_LOOPBACK_CMD;
2232 rval = qla2x00_loopback_test(vha, &elreq, response);
2233 if (IS_QLA81XX(ha)) {
2234 if (response[0] == MBS_COMMAND_ERROR && response[1] == MBS_LB_RESET) {
2235 DEBUG2(printk(KERN_ERR "%s(%ld): ABORTing "
2236 "ISP\n", __func__, vha->host_no));
2237 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
2238 qla2xxx_wake_dpc(vha);
2239 }
2240 }
2241 } else {
2242 type = "FC_BSG_HST_VENDOR_ECHO_DIAG";
2243 DEBUG2(qla_printk(KERN_INFO, ha,
2244 "scsi(%ld) bsg rqst type: %s vendor rqst type: %x options: %x.\n",
2245 vha->host_no, type, vendor_cmd, elreq.options));
db3ad7f8 2246
9a069e19
GM
2247 command_sent = INT_DEF_LB_ECHO_CMD;
2248 rval = qla2x00_echo_test(vha, &elreq, response);
2249 }
2250 break;
2251 case QLA84_RESET:
2252 if (!IS_QLA84XX(vha->hw)) {
2253 rval = -EINVAL;
2254 DEBUG16(printk(
2255 "%s(%ld): 8xxx exiting.\n",
2256 __func__, vha->host_no));
2257 return rval;
2258 }
2259 rval = qla84xx_reset(vha, &elreq, bsg_job);
2260 break;
2261 case QLA84_MGMT_CMD:
2262 if (!IS_QLA84XX(vha->hw)) {
2263 rval = -EINVAL;
2264 DEBUG16(printk(
2265 "%s(%ld): 8xxx exiting.\n",
2266 __func__, vha->host_no));
2267 return rval;
2268 }
2269 rval = qla84xx_mgmt_cmd(vha, &elreq, bsg_job);
2270 break;
2271 default:
2272 rval = -ENOSYS;
2273 }
2274
2275 if (rval != QLA_SUCCESS) {
2276 DEBUG2(qla_printk(KERN_WARNING, ha,
2277 "scsi(%ld) Vendor request %s failed\n", vha->host_no, type));
2278 rval = 0;
2279 bsg_job->reply->result = (DID_ERROR << 16);
236b0249 2280 bsg_job->reply->reply_payload_rcv_len = 0;
9a069e19
GM
2281 fw_sts_ptr = ((uint8_t*)bsg_job->req->sense) + sizeof(struct fc_bsg_reply);
2282 memcpy( fw_sts_ptr, response, sizeof(response));
2283 fw_sts_ptr += sizeof(response);
2284 *fw_sts_ptr = command_sent;
2285 } else {
2286 DEBUG2(qla_printk(KERN_WARNING, ha,
2287 "scsi(%ld) Vendor request %s completed\n", vha->host_no, type));
2288 rval = bsg_job->reply->result = 0;
2289 bsg_job->reply_len = sizeof(struct fc_bsg_reply) + sizeof(response) + sizeof(uint8_t);
2290 bsg_job->reply->reply_payload_rcv_len = bsg_job->reply_payload.payload_len;
2291 fw_sts_ptr = ((uint8_t*)bsg_job->req->sense) + sizeof(struct fc_bsg_reply);
2292 memcpy(fw_sts_ptr, response, sizeof(response));
2293 fw_sts_ptr += sizeof(response);
2294 *fw_sts_ptr = command_sent;
2295 sg_copy_from_buffer(bsg_job->reply_payload.sg_list,
2296 bsg_job->reply_payload.sg_cnt, rsp_data,
2297 rsp_data_len);
2298 }
2299 bsg_job->job_done(bsg_job);
2300
2301done_unmap_sg:
2302
2303 if(req_data)
2304 dma_free_coherent(&ha->pdev->dev, req_data_len,
2305 req_data, req_data_dma);
2306 dma_unmap_sg(&ha->pdev->dev,
2307 bsg_job->request_payload.sg_list,
2308 bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
2309 dma_unmap_sg(&ha->pdev->dev,
2310 bsg_job->reply_payload.sg_list,
2311 bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
2312
2313done:
2314 return rval;
2315}
2316
2317static int
2318qla24xx_bsg_request(struct fc_bsg_job *bsg_job)
2319{
2320 int ret = -EINVAL;
2321
2322 switch (bsg_job->request->msgcode) {
2323 case FC_BSG_RPT_ELS:
2324 case FC_BSG_HST_ELS_NOLOGIN:
2325 ret = qla2x00_process_els(bsg_job);
2326 break;
2327 case FC_BSG_HST_CT:
2328 ret = qla2x00_process_ct(bsg_job);
2329 break;
2330 case FC_BSG_HST_VENDOR:
2331 ret = qla2x00_process_vendor_specific(bsg_job);
2332 break;
2333 case FC_BSG_HST_ADD_RPORT:
2334 case FC_BSG_HST_DEL_RPORT:
2335 case FC_BSG_RPT_CT:
2336 default:
2337 DEBUG2(printk("qla2xxx: unsupported BSG request\n"));
2338 break;
2339 }
2340 return ret;
2341}
2342
2343static int
2344qla24xx_bsg_timeout(struct fc_bsg_job *bsg_job)
2345{
2346 scsi_qla_host_t *vha = shost_priv(bsg_job->shost);
2347 struct qla_hw_data *ha = vha->hw;
2348 srb_t *sp;
db3ad7f8 2349 int cnt, que;
9a069e19 2350 unsigned long flags;
9a069e19 2351 struct req_que *req;
9a069e19
GM
2352 struct srb_bsg *sp_bsg;
2353
2354 /* find the bsg job from the active list of commands */
2355 spin_lock_irqsave(&ha->hardware_lock, flags);
db3ad7f8
GM
2356 for (que = 0; que < ha->max_req_queues; que++) {
2357 req = ha->req_q_map[que];
2358 if (!req)
2359 continue;
9a069e19 2360
db3ad7f8
GM
2361 for (cnt = 1; cnt < MAX_OUTSTANDING_COMMANDS; cnt++ ) {
2362 sp = req->outstanding_cmds[cnt];
2363
2364 if (sp) {
2365 sp_bsg = (struct srb_bsg*)sp->ctx;
2366
2367 if (((sp_bsg->ctx.type == SRB_CT_CMD) ||
2368 (sp_bsg->ctx.type == SRB_ELS_CMD_RPT)
2369 || ( sp_bsg->ctx.type == SRB_ELS_CMD_HST)) &&
2370 (sp_bsg->bsg_job == bsg_job)) {
2371 if (ha->isp_ops->abort_command(sp)) {
2372 DEBUG2(qla_printk(KERN_INFO, ha,
2373 "scsi(%ld): mbx abort_command failed\n", vha->host_no));
2374 bsg_job->req->errors = bsg_job->reply->result = -EIO;
2375 } else {
2376 DEBUG2(qla_printk(KERN_INFO, ha,
2377 "scsi(%ld): mbx abort_command success\n", vha->host_no));
2378 bsg_job->req->errors = bsg_job->reply->result = 0;
2379 }
2380 goto done;
2381 }
2382 }
9a069e19
GM
2383 }
2384 }
2385 spin_unlock_irqrestore(&ha->hardware_lock, flags);
db3ad7f8
GM
2386 DEBUG2(qla_printk(KERN_INFO, ha,
2387 "scsi(%ld) SRB not found to abort\n", vha->host_no));
2388 bsg_job->req->errors = bsg_job->reply->result = -ENXIO;
2389 return 0;
9a069e19 2390
db3ad7f8 2391done:
9a069e19
GM
2392 if (bsg_job->request->msgcode == FC_BSG_HST_CT)
2393 kfree(sp->fcport);
2394 kfree(sp->ctx);
2395 mempool_free(sp, ha->srb_mempool);
2396 return 0;
2397}
2398
1c97a12a 2399struct fc_function_template qla2xxx_transport_functions = {
8482e118
AV
2400
2401 .show_host_node_name = 1,
2402 .show_host_port_name = 1,
ad3e0eda 2403 .show_host_supported_classes = 1,
2ae2b370 2404 .show_host_supported_speeds = 1,
ad3e0eda 2405
8482e118
AV
2406 .get_host_port_id = qla2x00_get_host_port_id,
2407 .show_host_port_id = 1,
04414013
AV
2408 .get_host_speed = qla2x00_get_host_speed,
2409 .show_host_speed = 1,
8d067623
AV
2410 .get_host_port_type = qla2x00_get_host_port_type,
2411 .show_host_port_type = 1,
1620f7c2
AV
2412 .get_host_symbolic_name = qla2x00_get_host_symbolic_name,
2413 .show_host_symbolic_name = 1,
a740a3f0
AV
2414 .set_host_system_hostname = qla2x00_set_host_system_hostname,
2415 .show_host_system_hostname = 1,
90991c85
AV
2416 .get_host_fabric_name = qla2x00_get_host_fabric_name,
2417 .show_host_fabric_name = 1,
7047fcdd
AV
2418 .get_host_port_state = qla2x00_get_host_port_state,
2419 .show_host_port_state = 1,
8482e118 2420
bdf79621 2421 .dd_fcrport_size = sizeof(struct fc_port *),
ad3e0eda 2422 .show_rport_supported_classes = 1,
8482e118
AV
2423
2424 .get_starget_node_name = qla2x00_get_starget_node_name,
2425 .show_starget_node_name = 1,
2426 .get_starget_port_name = qla2x00_get_starget_port_name,
2427 .show_starget_port_name = 1,
2428 .get_starget_port_id = qla2x00_get_starget_port_id,
2429 .show_starget_port_id = 1,
2430
8482e118
AV
2431 .set_rport_dev_loss_tmo = qla2x00_set_rport_loss_tmo,
2432 .show_rport_dev_loss_tmo = 1,
2433
91ca7b01 2434 .issue_fc_host_lip = qla2x00_issue_lip,
5f3a9a20
SJ
2435 .dev_loss_tmo_callbk = qla2x00_dev_loss_tmo_callbk,
2436 .terminate_rport_io = qla2x00_terminate_rport_io,
392e2f65 2437 .get_fc_host_stats = qla2x00_get_fc_host_stats,
2c3dfe3f
SJ
2438
2439 .vport_create = qla24xx_vport_create,
2440 .vport_disable = qla24xx_vport_disable,
2441 .vport_delete = qla24xx_vport_delete,
9a069e19
GM
2442 .bsg_request = qla24xx_bsg_request,
2443 .bsg_timeout = qla24xx_bsg_timeout,
2c3dfe3f
SJ
2444};
2445
2446struct fc_function_template qla2xxx_transport_vport_functions = {
2447
2448 .show_host_node_name = 1,
2449 .show_host_port_name = 1,
2450 .show_host_supported_classes = 1,
2451
2452 .get_host_port_id = qla2x00_get_host_port_id,
2453 .show_host_port_id = 1,
2454 .get_host_speed = qla2x00_get_host_speed,
2455 .show_host_speed = 1,
2456 .get_host_port_type = qla2x00_get_host_port_type,
2457 .show_host_port_type = 1,
2458 .get_host_symbolic_name = qla2x00_get_host_symbolic_name,
2459 .show_host_symbolic_name = 1,
2460 .set_host_system_hostname = qla2x00_set_host_system_hostname,
2461 .show_host_system_hostname = 1,
2462 .get_host_fabric_name = qla2x00_get_host_fabric_name,
2463 .show_host_fabric_name = 1,
2464 .get_host_port_state = qla2x00_get_host_port_state,
2465 .show_host_port_state = 1,
2466
2467 .dd_fcrport_size = sizeof(struct fc_port *),
2468 .show_rport_supported_classes = 1,
2469
2470 .get_starget_node_name = qla2x00_get_starget_node_name,
2471 .show_starget_node_name = 1,
2472 .get_starget_port_name = qla2x00_get_starget_port_name,
2473 .show_starget_port_name = 1,
2474 .get_starget_port_id = qla2x00_get_starget_port_id,
2475 .show_starget_port_id = 1,
2476
2c3dfe3f
SJ
2477 .set_rport_dev_loss_tmo = qla2x00_set_rport_loss_tmo,
2478 .show_rport_dev_loss_tmo = 1,
2479
2480 .issue_fc_host_lip = qla2x00_issue_lip,
5f3a9a20
SJ
2481 .dev_loss_tmo_callbk = qla2x00_dev_loss_tmo_callbk,
2482 .terminate_rport_io = qla2x00_terminate_rport_io,
2c3dfe3f 2483 .get_fc_host_stats = qla2x00_get_fc_host_stats,
9a069e19
GM
2484 .bsg_request = qla24xx_bsg_request,
2485 .bsg_timeout = qla24xx_bsg_timeout,
8482e118
AV
2486};
2487
8482e118 2488void
7b867cf7 2489qla2x00_init_host_attr(scsi_qla_host_t *vha)
8482e118 2490{
7b867cf7 2491 struct qla_hw_data *ha = vha->hw;
2ae2b370
AV
2492 u32 speed = FC_PORTSPEED_UNKNOWN;
2493
7b867cf7
AC
2494 fc_host_node_name(vha->host) = wwn_to_u64(vha->node_name);
2495 fc_host_port_name(vha->host) = wwn_to_u64(vha->port_name);
2496 fc_host_supported_classes(vha->host) = FC_COS_CLASS3;
2497 fc_host_max_npiv_vports(vha->host) = ha->max_npiv_vports;
2498 fc_host_npiv_vports_inuse(vha->host) = ha->cur_vport_count;
2ae2b370 2499
3a03eb79
AV
2500 if (IS_QLA81XX(ha))
2501 speed = FC_PORTSPEED_10GBIT;
2502 else if (IS_QLA25XX(ha))
2ae2b370
AV
2503 speed = FC_PORTSPEED_8GBIT | FC_PORTSPEED_4GBIT |
2504 FC_PORTSPEED_2GBIT | FC_PORTSPEED_1GBIT;
4d4df193 2505 else if (IS_QLA24XX_TYPE(ha))
2ae2b370
AV
2506 speed = FC_PORTSPEED_4GBIT | FC_PORTSPEED_2GBIT |
2507 FC_PORTSPEED_1GBIT;
2508 else if (IS_QLA23XX(ha))
2509 speed = FC_PORTSPEED_2GBIT | FC_PORTSPEED_1GBIT;
2510 else
2511 speed = FC_PORTSPEED_1GBIT;
7b867cf7 2512 fc_host_supported_speeds(vha->host) = speed;
8482e118 2513}
9a069e19
GM
2514static int
2515qla84xx_reset(scsi_qla_host_t *ha, struct msg_echo_lb *mreq, struct fc_bsg_job *bsg_job)
2516{
2517 int ret = 0;
2518 int cmd;
2519 uint16_t cmd_status;
2520
2521 DEBUG16(printk("%s(%ld): entered.\n", __func__, ha->host_no));
2522
2523 cmd = (*((bsg_job->request->rqst_data.h_vendor.vendor_cmd) + 2))
2524 == A84_RESET_FLAG_ENABLE_DIAG_FW ?
2525 A84_ISSUE_RESET_DIAG_FW : A84_ISSUE_RESET_OP_FW;
2526 ret = qla84xx_reset_chip(ha, cmd == A84_ISSUE_RESET_DIAG_FW,
2527 &cmd_status);
2528 return ret;
2529}
2530
2531static int
2532qla84xx_mgmt_cmd(scsi_qla_host_t *ha, struct msg_echo_lb *mreq, struct fc_bsg_job *bsg_job)
2533{
2534 struct access_chip_84xx *mn;
2535 dma_addr_t mn_dma, mgmt_dma;
2536 void *mgmt_b = NULL;
2537 int ret = 0;
2538 int rsp_hdr_len, len = 0;
2539 struct qla84_msg_mgmt *ql84_mgmt;
2540
2541 ql84_mgmt = (struct qla84_msg_mgmt *) vmalloc(sizeof(struct qla84_msg_mgmt));
2542 ql84_mgmt->cmd =
2543 *((uint16_t *)(bsg_job->request->rqst_data.h_vendor.vendor_cmd + 2));
2544 ql84_mgmt->mgmtp.u.mem.start_addr =
2545 *((uint32_t *)(bsg_job->request->rqst_data.h_vendor.vendor_cmd + 3));
2546 ql84_mgmt->len =
2547 *((uint32_t *)(bsg_job->request->rqst_data.h_vendor.vendor_cmd + 4));
2548 ql84_mgmt->mgmtp.u.config.id =
2549 *((uint32_t *)(bsg_job->request->rqst_data.h_vendor.vendor_cmd + 5));
2550 ql84_mgmt->mgmtp.u.config.param0 =
2551 *((uint32_t *)(bsg_job->request->rqst_data.h_vendor.vendor_cmd + 6));
2552 ql84_mgmt->mgmtp.u.config.param1 =
2553 *((uint32_t *)(bsg_job->request->rqst_data.h_vendor.vendor_cmd + 7));
2554 ql84_mgmt->mgmtp.u.info.type =
2555 *((uint32_t *)(bsg_job->request->rqst_data.h_vendor.vendor_cmd + 8));
2556 ql84_mgmt->mgmtp.u.info.context =
2557 *((uint32_t *)(bsg_job->request->rqst_data.h_vendor.vendor_cmd + 9));
2558
2559 rsp_hdr_len = bsg_job->request_payload.payload_len;
2560
2561 mn = dma_pool_alloc(ha->hw->s_dma_pool, GFP_KERNEL, &mn_dma);
2562 if (mn == NULL) {
2563 DEBUG2(printk(KERN_ERR "%s: dma alloc for fw buffer "
2564 "failed%lu\n", __func__, ha->host_no));
2565 return -ENOMEM;
2566 }
2567
2568 memset(mn, 0, sizeof (struct access_chip_84xx));
2569
2570 mn->entry_type = ACCESS_CHIP_IOCB_TYPE;
2571 mn->entry_count = 1;
2572
2573 switch (ql84_mgmt->cmd) {
2574 case QLA84_MGMT_READ_MEM:
2575 mn->options = cpu_to_le16(ACO_DUMP_MEMORY);
2576 mn->parameter1 = cpu_to_le32(ql84_mgmt->mgmtp.u.mem.start_addr);
2577 break;
2578 case QLA84_MGMT_WRITE_MEM:
2579 mn->options = cpu_to_le16(ACO_LOAD_MEMORY);
2580 mn->parameter1 = cpu_to_le32(ql84_mgmt->mgmtp.u.mem.start_addr);
2581 break;
2582 case QLA84_MGMT_CHNG_CONFIG:
2583 mn->options = cpu_to_le16(ACO_CHANGE_CONFIG_PARAM);
2584 mn->parameter1 = cpu_to_le32(ql84_mgmt->mgmtp.u.config.id);
2585 mn->parameter2 = cpu_to_le32(ql84_mgmt->mgmtp.u.config.param0);
2586 mn->parameter3 = cpu_to_le32(ql84_mgmt->mgmtp.u.config.param1);
2587 break;
2588 case QLA84_MGMT_GET_INFO:
2589 mn->options = cpu_to_le16(ACO_REQUEST_INFO);
2590 mn->parameter1 = cpu_to_le32(ql84_mgmt->mgmtp.u.info.type);
2591 mn->parameter2 = cpu_to_le32(ql84_mgmt->mgmtp.u.info.context);
2592 break;
2593 default:
2594 ret = -EIO;
2595 goto exit_mgmt0;
2596 }
2597
2598 if ((len == ql84_mgmt->len) &&
2599 ql84_mgmt->cmd != QLA84_MGMT_CHNG_CONFIG) {
2600 mgmt_b = dma_alloc_coherent(&ha->hw->pdev->dev, len,
2601 &mgmt_dma, GFP_KERNEL);
2602 if (mgmt_b == NULL) {
2603 DEBUG2(printk(KERN_ERR "%s: dma alloc mgmt_b "
2604 "failed%lu\n", __func__, ha->host_no));
2605 ret = -ENOMEM;
2606 goto exit_mgmt0;
2607 }
2608 mn->total_byte_cnt = cpu_to_le32(ql84_mgmt->len);
2609 mn->dseg_count = cpu_to_le16(1);
2610 mn->dseg_address[0] = cpu_to_le32(LSD(mgmt_dma));
2611 mn->dseg_address[1] = cpu_to_le32(MSD(mgmt_dma));
2612 mn->dseg_length = cpu_to_le32(len);
2613
2614 if (ql84_mgmt->cmd == QLA84_MGMT_WRITE_MEM) {
2615 memcpy(mgmt_b, ql84_mgmt->payload, len);
2616 }
2617 }
2618
2619 ret = qla2x00_issue_iocb(ha, mn, mn_dma, 0);
2620 if ((ret != QLA_SUCCESS) || (ql84_mgmt->cmd == QLA84_MGMT_WRITE_MEM)
2621 || (ql84_mgmt->cmd == QLA84_MGMT_CHNG_CONFIG)) {
2622 if (ret != QLA_SUCCESS)
2623 DEBUG2(printk(KERN_ERR "%s(%lu): failed\n",
2624 __func__, ha->host_no));
2625 } else if ((ql84_mgmt->cmd == QLA84_MGMT_READ_MEM) ||
2626 (ql84_mgmt->cmd == QLA84_MGMT_GET_INFO)) {
2627 }
2628
2629 if (mgmt_b)
2630 dma_free_coherent(&ha->hw->pdev->dev, len, mgmt_b, mgmt_dma);
2631
2632exit_mgmt0:
2633 dma_pool_free(ha->hw->s_dma_pool, mn, mn_dma);
2634 return ret;
2635}