]> bbs.cooldavid.org Git - net-next-2.6.git/blame - drivers/scsi/qla2xxx/qla_attr.c
[SCSI] qla2xxx: fix lock imbalance
[net-next-2.6.git] / drivers / scsi / qla2xxx / qla_attr.c
CommitLineData
8482e118 1/*
fa90c54f 2 * QLogic Fibre Channel HBA Driver
01e58d8e 3 * Copyright (c) 2003-2008 QLogic Corporation
8482e118 4 *
fa90c54f 5 * See LICENSE.qla2xxx for copyright and licensing details.
8482e118
AV
6 */
7#include "qla_def.h"
8
2c3dfe3f 9#include <linux/kthread.h>
7aaef27b 10#include <linux/vmalloc.h>
5a0e3ad6 11#include <linux/slab.h>
00eabe7c 12#include <linux/delay.h>
8482e118 13
a824ebb3 14static int qla24xx_vport_disable(struct fc_vport *, bool);
9a069e19
GM
15static int qla84xx_reset(scsi_qla_host_t *, struct msg_echo_lb *, struct fc_bsg_job *);
16int qla84xx_reset_chip(scsi_qla_host_t *, uint16_t, uint16_t *);
17static int qla84xx_mgmt_cmd(scsi_qla_host_t *, struct msg_echo_lb *, struct fc_bsg_job *);
8482e118
AV
18/* SYSFS attributes --------------------------------------------------------- */
19
20static ssize_t
91a69029
ZR
21qla2x00_sysfs_read_fw_dump(struct kobject *kobj,
22 struct bin_attribute *bin_attr,
23 char *buf, loff_t off, size_t count)
8482e118 24{
7b867cf7 25 struct scsi_qla_host *vha = shost_priv(dev_to_shost(container_of(kobj,
8482e118 26 struct device, kobj)));
7b867cf7 27 struct qla_hw_data *ha = vha->hw;
8482e118
AV
28
29 if (ha->fw_dump_reading == 0)
30 return 0;
8482e118 31
b3dc9088
AM
32 return memory_read_from_buffer(buf, count, &off, ha->fw_dump,
33 ha->fw_dump_len);
8482e118
AV
34}
35
36static ssize_t
91a69029
ZR
37qla2x00_sysfs_write_fw_dump(struct kobject *kobj,
38 struct bin_attribute *bin_attr,
39 char *buf, loff_t off, size_t count)
8482e118 40{
7b867cf7 41 struct scsi_qla_host *vha = shost_priv(dev_to_shost(container_of(kobj,
8482e118 42 struct device, kobj)));
7b867cf7 43 struct qla_hw_data *ha = vha->hw;
8482e118 44 int reading;
8482e118
AV
45
46 if (off != 0)
47 return (0);
48
49 reading = simple_strtol(buf, NULL, 10);
50 switch (reading) {
51 case 0:
a7a167bf
AV
52 if (!ha->fw_dump_reading)
53 break;
8482e118 54
a7a167bf 55 qla_printk(KERN_INFO, ha,
7b867cf7 56 "Firmware dump cleared on (%ld).\n", vha->host_no);
a7a167bf
AV
57
58 ha->fw_dump_reading = 0;
59 ha->fw_dumped = 0;
8482e118
AV
60 break;
61 case 1:
d4e3e04d 62 if (ha->fw_dumped && !ha->fw_dump_reading) {
8482e118
AV
63 ha->fw_dump_reading = 1;
64
8482e118 65 qla_printk(KERN_INFO, ha,
a7a167bf 66 "Raw firmware dump ready for read on (%ld).\n",
7b867cf7 67 vha->host_no);
8482e118
AV
68 }
69 break;
a7a167bf 70 case 2:
7b867cf7 71 qla2x00_alloc_fw_dump(vha);
a7a167bf 72 break;
68af0811 73 case 3:
7b867cf7 74 qla2x00_system_error(vha);
68af0811 75 break;
8482e118
AV
76 }
77 return (count);
78}
79
80static struct bin_attribute sysfs_fw_dump_attr = {
81 .attr = {
82 .name = "fw_dump",
83 .mode = S_IRUSR | S_IWUSR,
8482e118
AV
84 },
85 .size = 0,
86 .read = qla2x00_sysfs_read_fw_dump,
87 .write = qla2x00_sysfs_write_fw_dump,
88};
89
90static ssize_t
91a69029
ZR
91qla2x00_sysfs_read_nvram(struct kobject *kobj,
92 struct bin_attribute *bin_attr,
93 char *buf, loff_t off, size_t count)
8482e118 94{
7b867cf7 95 struct scsi_qla_host *vha = shost_priv(dev_to_shost(container_of(kobj,
8482e118 96 struct device, kobj)));
7b867cf7 97 struct qla_hw_data *ha = vha->hw;
8482e118 98
b3dc9088 99 if (!capable(CAP_SYS_ADMIN))
8482e118
AV
100 return 0;
101
6749ce36 102 if (IS_NOCACHE_VPD_TYPE(ha))
8f979751 103 ha->isp_ops->read_optrom(vha, ha->nvram, ha->flt_region_nvram << 2,
6749ce36 104 ha->nvram_size);
b3dc9088
AM
105 return memory_read_from_buffer(buf, count, &off, ha->nvram,
106 ha->nvram_size);
8482e118
AV
107}
108
109static ssize_t
91a69029
ZR
110qla2x00_sysfs_write_nvram(struct kobject *kobj,
111 struct bin_attribute *bin_attr,
112 char *buf, loff_t off, size_t count)
8482e118 113{
7b867cf7 114 struct scsi_qla_host *vha = shost_priv(dev_to_shost(container_of(kobj,
8482e118 115 struct device, kobj)));
7b867cf7 116 struct qla_hw_data *ha = vha->hw;
8482e118 117 uint16_t cnt;
8482e118 118
3d79038f
AV
119 if (!capable(CAP_SYS_ADMIN) || off != 0 || count != ha->nvram_size ||
120 !ha->isp_ops->write_nvram)
8482e118
AV
121 return 0;
122
123 /* Checksum NVRAM. */
e428924c 124 if (IS_FWI2_CAPABLE(ha)) {
459c5378
AV
125 uint32_t *iter;
126 uint32_t chksum;
127
128 iter = (uint32_t *)buf;
129 chksum = 0;
130 for (cnt = 0; cnt < ((count >> 2) - 1); cnt++)
131 chksum += le32_to_cpu(*iter++);
132 chksum = ~chksum + 1;
133 *iter = cpu_to_le32(chksum);
134 } else {
135 uint8_t *iter;
136 uint8_t chksum;
137
138 iter = (uint8_t *)buf;
139 chksum = 0;
140 for (cnt = 0; cnt < count - 1; cnt++)
141 chksum += *iter++;
142 chksum = ~chksum + 1;
143 *iter = chksum;
144 }
8482e118 145
2533cf67
LC
146 if (qla2x00_wait_for_hba_online(vha) != QLA_SUCCESS) {
147 qla_printk(KERN_WARNING, ha,
148 "HBA not online, failing NVRAM update.\n");
149 return -EAGAIN;
150 }
151
8482e118 152 /* Write NVRAM. */
7b867cf7
AC
153 ha->isp_ops->write_nvram(vha, (uint8_t *)buf, ha->nvram_base, count);
154 ha->isp_ops->read_nvram(vha, (uint8_t *)ha->nvram, ha->nvram_base,
281afe19 155 count);
8482e118 156
2533cf67 157 /* NVRAM settings take effect immediately. */
7b867cf7 158 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
2533cf67
LC
159 qla2xxx_wake_dpc(vha);
160 qla2x00_wait_for_chip_reset(vha);
26b8d348 161
8482e118
AV
162 return (count);
163}
164
165static struct bin_attribute sysfs_nvram_attr = {
166 .attr = {
167 .name = "nvram",
168 .mode = S_IRUSR | S_IWUSR,
8482e118 169 },
1b3f6365 170 .size = 512,
8482e118
AV
171 .read = qla2x00_sysfs_read_nvram,
172 .write = qla2x00_sysfs_write_nvram,
173};
174
854165f4 175static ssize_t
91a69029
ZR
176qla2x00_sysfs_read_optrom(struct kobject *kobj,
177 struct bin_attribute *bin_attr,
178 char *buf, loff_t off, size_t count)
854165f4 179{
7b867cf7 180 struct scsi_qla_host *vha = shost_priv(dev_to_shost(container_of(kobj,
854165f4 181 struct device, kobj)));
7b867cf7 182 struct qla_hw_data *ha = vha->hw;
854165f4
AV
183
184 if (ha->optrom_state != QLA_SREADING)
185 return 0;
854165f4 186
b3dc9088
AM
187 return memory_read_from_buffer(buf, count, &off, ha->optrom_buffer,
188 ha->optrom_region_size);
854165f4
AV
189}
190
191static ssize_t
91a69029
ZR
192qla2x00_sysfs_write_optrom(struct kobject *kobj,
193 struct bin_attribute *bin_attr,
194 char *buf, loff_t off, size_t count)
854165f4 195{
7b867cf7 196 struct scsi_qla_host *vha = shost_priv(dev_to_shost(container_of(kobj,
854165f4 197 struct device, kobj)));
7b867cf7 198 struct qla_hw_data *ha = vha->hw;
854165f4
AV
199
200 if (ha->optrom_state != QLA_SWRITING)
201 return -EINVAL;
b7cc176c 202 if (off > ha->optrom_region_size)
854165f4 203 return -ERANGE;
b7cc176c
JC
204 if (off + count > ha->optrom_region_size)
205 count = ha->optrom_region_size - off;
854165f4
AV
206
207 memcpy(&ha->optrom_buffer[off], buf, count);
208
209 return count;
210}
211
212static struct bin_attribute sysfs_optrom_attr = {
213 .attr = {
214 .name = "optrom",
215 .mode = S_IRUSR | S_IWUSR,
854165f4 216 },
c3a2f0df 217 .size = 0,
854165f4
AV
218 .read = qla2x00_sysfs_read_optrom,
219 .write = qla2x00_sysfs_write_optrom,
220};
221
222static ssize_t
91a69029
ZR
223qla2x00_sysfs_write_optrom_ctl(struct kobject *kobj,
224 struct bin_attribute *bin_attr,
225 char *buf, loff_t off, size_t count)
854165f4 226{
7b867cf7 227 struct scsi_qla_host *vha = shost_priv(dev_to_shost(container_of(kobj,
854165f4 228 struct device, kobj)));
7b867cf7
AC
229 struct qla_hw_data *ha = vha->hw;
230
b7cc176c
JC
231 uint32_t start = 0;
232 uint32_t size = ha->optrom_size;
233 int val, valid;
854165f4
AV
234
235 if (off)
236 return 0;
237
85880801
AV
238 if (unlikely(pci_channel_offline(ha->pdev)))
239 return 0;
240
b7cc176c
JC
241 if (sscanf(buf, "%d:%x:%x", &val, &start, &size) < 1)
242 return -EINVAL;
243 if (start > ha->optrom_size)
854165f4
AV
244 return -EINVAL;
245
246 switch (val) {
247 case 0:
248 if (ha->optrom_state != QLA_SREADING &&
249 ha->optrom_state != QLA_SWRITING)
250 break;
251
252 ha->optrom_state = QLA_SWAITING;
b7cc176c
JC
253
254 DEBUG2(qla_printk(KERN_INFO, ha,
255 "Freeing flash region allocation -- 0x%x bytes.\n",
256 ha->optrom_region_size));
257
854165f4
AV
258 vfree(ha->optrom_buffer);
259 ha->optrom_buffer = NULL;
260 break;
261 case 1:
262 if (ha->optrom_state != QLA_SWAITING)
263 break;
264
b7cc176c
JC
265 ha->optrom_region_start = start;
266 ha->optrom_region_size = start + size > ha->optrom_size ?
267 ha->optrom_size - start : size;
268
854165f4 269 ha->optrom_state = QLA_SREADING;
b7cc176c 270 ha->optrom_buffer = vmalloc(ha->optrom_region_size);
854165f4
AV
271 if (ha->optrom_buffer == NULL) {
272 qla_printk(KERN_WARNING, ha,
273 "Unable to allocate memory for optrom retrieval "
b7cc176c 274 "(%x).\n", ha->optrom_region_size);
854165f4
AV
275
276 ha->optrom_state = QLA_SWAITING;
277 return count;
278 }
279
b7cc176c
JC
280 DEBUG2(qla_printk(KERN_INFO, ha,
281 "Reading flash region -- 0x%x/0x%x.\n",
282 ha->optrom_region_start, ha->optrom_region_size));
283
284 memset(ha->optrom_buffer, 0, ha->optrom_region_size);
7b867cf7 285 ha->isp_ops->read_optrom(vha, ha->optrom_buffer,
b7cc176c 286 ha->optrom_region_start, ha->optrom_region_size);
854165f4
AV
287 break;
288 case 2:
289 if (ha->optrom_state != QLA_SWAITING)
290 break;
291
b7cc176c
JC
292 /*
293 * We need to be more restrictive on which FLASH regions are
294 * allowed to be updated via user-space. Regions accessible
295 * via this method include:
296 *
297 * ISP21xx/ISP22xx/ISP23xx type boards:
298 *
299 * 0x000000 -> 0x020000 -- Boot code.
300 *
301 * ISP2322/ISP24xx type boards:
302 *
303 * 0x000000 -> 0x07ffff -- Boot code.
304 * 0x080000 -> 0x0fffff -- Firmware.
305 *
306 * ISP25xx type boards:
307 *
308 * 0x000000 -> 0x07ffff -- Boot code.
309 * 0x080000 -> 0x0fffff -- Firmware.
310 * 0x120000 -> 0x12ffff -- VPD and HBA parameters.
311 */
312 valid = 0;
313 if (ha->optrom_size == OPTROM_SIZE_2300 && start == 0)
314 valid = 1;
c00d8994
AV
315 else if (start == (ha->flt_region_boot * 4) ||
316 start == (ha->flt_region_fw * 4))
b7cc176c 317 valid = 1;
6431c5dc 318 else if (IS_QLA25XX(ha) || IS_QLA81XX(ha))
b7cc176c
JC
319 valid = 1;
320 if (!valid) {
321 qla_printk(KERN_WARNING, ha,
322 "Invalid start region 0x%x/0x%x.\n", start, size);
323 return -EINVAL;
324 }
325
326 ha->optrom_region_start = start;
327 ha->optrom_region_size = start + size > ha->optrom_size ?
328 ha->optrom_size - start : size;
329
854165f4 330 ha->optrom_state = QLA_SWRITING;
b7cc176c 331 ha->optrom_buffer = vmalloc(ha->optrom_region_size);
854165f4
AV
332 if (ha->optrom_buffer == NULL) {
333 qla_printk(KERN_WARNING, ha,
334 "Unable to allocate memory for optrom update "
b7cc176c 335 "(%x).\n", ha->optrom_region_size);
854165f4
AV
336
337 ha->optrom_state = QLA_SWAITING;
338 return count;
339 }
b7cc176c
JC
340
341 DEBUG2(qla_printk(KERN_INFO, ha,
342 "Staging flash region write -- 0x%x/0x%x.\n",
343 ha->optrom_region_start, ha->optrom_region_size));
344
345 memset(ha->optrom_buffer, 0, ha->optrom_region_size);
854165f4
AV
346 break;
347 case 3:
348 if (ha->optrom_state != QLA_SWRITING)
349 break;
350
2533cf67
LC
351 if (qla2x00_wait_for_hba_online(vha) != QLA_SUCCESS) {
352 qla_printk(KERN_WARNING, ha,
353 "HBA not online, failing flash update.\n");
354 return -EAGAIN;
355 }
356
b7cc176c
JC
357 DEBUG2(qla_printk(KERN_INFO, ha,
358 "Writing flash region -- 0x%x/0x%x.\n",
359 ha->optrom_region_start, ha->optrom_region_size));
360
7b867cf7 361 ha->isp_ops->write_optrom(vha, ha->optrom_buffer,
b7cc176c 362 ha->optrom_region_start, ha->optrom_region_size);
854165f4 363 break;
b7cc176c
JC
364 default:
365 count = -EINVAL;
854165f4
AV
366 }
367 return count;
368}
369
370static struct bin_attribute sysfs_optrom_ctl_attr = {
371 .attr = {
372 .name = "optrom_ctl",
373 .mode = S_IWUSR,
854165f4
AV
374 },
375 .size = 0,
376 .write = qla2x00_sysfs_write_optrom_ctl,
377};
378
6f641790 379static ssize_t
91a69029
ZR
380qla2x00_sysfs_read_vpd(struct kobject *kobj,
381 struct bin_attribute *bin_attr,
382 char *buf, loff_t off, size_t count)
6f641790 383{
7b867cf7 384 struct scsi_qla_host *vha = shost_priv(dev_to_shost(container_of(kobj,
6f641790 385 struct device, kobj)));
7b867cf7 386 struct qla_hw_data *ha = vha->hw;
6f641790 387
85880801
AV
388 if (unlikely(pci_channel_offline(ha->pdev)))
389 return 0;
390
b3dc9088 391 if (!capable(CAP_SYS_ADMIN))
6f641790
AV
392 return 0;
393
6749ce36
AV
394 if (IS_NOCACHE_VPD_TYPE(ha))
395 ha->isp_ops->read_optrom(vha, ha->vpd, ha->flt_region_vpd << 2,
396 ha->vpd_size);
b3dc9088 397 return memory_read_from_buffer(buf, count, &off, ha->vpd, ha->vpd_size);
6f641790
AV
398}
399
400static ssize_t
91a69029
ZR
401qla2x00_sysfs_write_vpd(struct kobject *kobj,
402 struct bin_attribute *bin_attr,
403 char *buf, loff_t off, size_t count)
6f641790 404{
7b867cf7 405 struct scsi_qla_host *vha = shost_priv(dev_to_shost(container_of(kobj,
6f641790 406 struct device, kobj)));
7b867cf7 407 struct qla_hw_data *ha = vha->hw;
d0c3eefa 408 uint8_t *tmp_data;
6f641790 409
85880801
AV
410 if (unlikely(pci_channel_offline(ha->pdev)))
411 return 0;
412
3d79038f
AV
413 if (!capable(CAP_SYS_ADMIN) || off != 0 || count != ha->vpd_size ||
414 !ha->isp_ops->write_nvram)
6f641790
AV
415 return 0;
416
2533cf67
LC
417 if (qla2x00_wait_for_hba_online(vha) != QLA_SUCCESS) {
418 qla_printk(KERN_WARNING, ha,
419 "HBA not online, failing VPD update.\n");
420 return -EAGAIN;
421 }
422
6f641790 423 /* Write NVRAM. */
7b867cf7
AC
424 ha->isp_ops->write_nvram(vha, (uint8_t *)buf, ha->vpd_base, count);
425 ha->isp_ops->read_nvram(vha, (uint8_t *)ha->vpd, ha->vpd_base, count);
6f641790 426
d0c3eefa
LC
427 /* Update flash version information for 4Gb & above. */
428 if (!IS_FWI2_CAPABLE(ha))
429 goto done;
430
431 tmp_data = vmalloc(256);
432 if (!tmp_data) {
433 qla_printk(KERN_WARNING, ha,
434 "Unable to allocate memory for VPD information update.\n");
435 goto done;
436 }
437 ha->isp_ops->get_flash_version(vha, tmp_data);
438 vfree(tmp_data);
439done:
6f641790
AV
440 return count;
441}
442
443static struct bin_attribute sysfs_vpd_attr = {
444 .attr = {
445 .name = "vpd",
446 .mode = S_IRUSR | S_IWUSR,
6f641790
AV
447 },
448 .size = 0,
449 .read = qla2x00_sysfs_read_vpd,
450 .write = qla2x00_sysfs_write_vpd,
451};
452
88729e53 453static ssize_t
91a69029
ZR
454qla2x00_sysfs_read_sfp(struct kobject *kobj,
455 struct bin_attribute *bin_attr,
456 char *buf, loff_t off, size_t count)
88729e53 457{
7b867cf7 458 struct scsi_qla_host *vha = shost_priv(dev_to_shost(container_of(kobj,
88729e53 459 struct device, kobj)));
7b867cf7 460 struct qla_hw_data *ha = vha->hw;
88729e53
AV
461 uint16_t iter, addr, offset;
462 int rval;
463
464 if (!capable(CAP_SYS_ADMIN) || off != 0 || count != SFP_DEV_SIZE * 2)
465 return 0;
466
e8711085
AV
467 if (ha->sfp_data)
468 goto do_read;
469
470 ha->sfp_data = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL,
471 &ha->sfp_data_dma);
472 if (!ha->sfp_data) {
473 qla_printk(KERN_WARNING, ha,
474 "Unable to allocate memory for SFP read-data.\n");
475 return 0;
476 }
477
478do_read:
479 memset(ha->sfp_data, 0, SFP_BLOCK_SIZE);
88729e53
AV
480 addr = 0xa0;
481 for (iter = 0, offset = 0; iter < (SFP_DEV_SIZE * 2) / SFP_BLOCK_SIZE;
482 iter++, offset += SFP_BLOCK_SIZE) {
483 if (iter == 4) {
484 /* Skip to next device address. */
485 addr = 0xa2;
486 offset = 0;
487 }
488
7b867cf7 489 rval = qla2x00_read_sfp(vha, ha->sfp_data_dma, addr, offset,
88729e53
AV
490 SFP_BLOCK_SIZE);
491 if (rval != QLA_SUCCESS) {
492 qla_printk(KERN_WARNING, ha,
493 "Unable to read SFP data (%x/%x/%x).\n", rval,
494 addr, offset);
495 count = 0;
496 break;
497 }
498 memcpy(buf, ha->sfp_data, SFP_BLOCK_SIZE);
499 buf += SFP_BLOCK_SIZE;
500 }
501
502 return count;
503}
504
505static struct bin_attribute sysfs_sfp_attr = {
506 .attr = {
507 .name = "sfp",
508 .mode = S_IRUSR | S_IWUSR,
88729e53
AV
509 },
510 .size = SFP_DEV_SIZE * 2,
511 .read = qla2x00_sysfs_read_sfp,
512};
513
6e181be5
LC
514static ssize_t
515qla2x00_sysfs_write_reset(struct kobject *kobj,
516 struct bin_attribute *bin_attr,
517 char *buf, loff_t off, size_t count)
518{
519 struct scsi_qla_host *vha = shost_priv(dev_to_shost(container_of(kobj,
520 struct device, kobj)));
521 struct qla_hw_data *ha = vha->hw;
522 int type;
523
524 if (off != 0)
525 return 0;
526
527 type = simple_strtol(buf, NULL, 10);
528 switch (type) {
529 case 0x2025c:
530 qla_printk(KERN_INFO, ha,
531 "Issuing ISP reset on (%ld).\n", vha->host_no);
532
533 scsi_block_requests(vha->host);
534 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
535 qla2xxx_wake_dpc(vha);
536 qla2x00_wait_for_chip_reset(vha);
537 scsi_unblock_requests(vha->host);
538 break;
539 case 0x2025d:
540 if (!IS_QLA81XX(ha))
541 break;
542
543 qla_printk(KERN_INFO, ha,
544 "Issuing MPI reset on (%ld).\n", vha->host_no);
545
546 /* Make sure FC side is not in reset */
547 qla2x00_wait_for_hba_online(vha);
548
549 /* Issue MPI reset */
550 scsi_block_requests(vha->host);
551 if (qla81xx_restart_mpi_firmware(vha) != QLA_SUCCESS)
552 qla_printk(KERN_WARNING, ha,
553 "MPI reset failed on (%ld).\n", vha->host_no);
554 scsi_unblock_requests(vha->host);
555 break;
556 }
557 return count;
558}
559
560static struct bin_attribute sysfs_reset_attr = {
561 .attr = {
562 .name = "reset",
563 .mode = S_IWUSR,
564 },
565 .size = 0,
566 .write = qla2x00_sysfs_write_reset,
567};
568
ad0ecd61
JC
569static ssize_t
570qla2x00_sysfs_write_edc(struct kobject *kobj,
571 struct bin_attribute *bin_attr,
572 char *buf, loff_t off, size_t count)
573{
574 struct scsi_qla_host *vha = shost_priv(dev_to_shost(container_of(kobj,
575 struct device, kobj)));
576 struct qla_hw_data *ha = vha->hw;
577 uint16_t dev, adr, opt, len;
578 int rval;
579
580 ha->edc_data_len = 0;
581
582 if (!capable(CAP_SYS_ADMIN) || off != 0 || count < 8)
583 return 0;
584
585 if (!ha->edc_data) {
586 ha->edc_data = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL,
587 &ha->edc_data_dma);
588 if (!ha->edc_data) {
589 DEBUG2(qla_printk(KERN_INFO, ha,
590 "Unable to allocate memory for EDC write.\n"));
591 return 0;
592 }
593 }
594
595 dev = le16_to_cpup((void *)&buf[0]);
596 adr = le16_to_cpup((void *)&buf[2]);
597 opt = le16_to_cpup((void *)&buf[4]);
598 len = le16_to_cpup((void *)&buf[6]);
599
600 if (!(opt & BIT_0))
601 if (len == 0 || len > DMA_POOL_SIZE || len > count - 8)
602 return -EINVAL;
603
604 memcpy(ha->edc_data, &buf[8], len);
605
606 rval = qla2x00_write_edc(vha, dev, adr, ha->edc_data_dma,
607 ha->edc_data, len, opt);
608 if (rval != QLA_SUCCESS) {
609 DEBUG2(qla_printk(KERN_INFO, ha,
610 "Unable to write EDC (%x) %02x:%02x:%04x:%02x:%02x.\n",
611 rval, dev, adr, opt, len, *buf));
612 return 0;
613 }
614
615 return count;
616}
617
618static struct bin_attribute sysfs_edc_attr = {
619 .attr = {
620 .name = "edc",
621 .mode = S_IWUSR,
622 },
623 .size = 0,
624 .write = qla2x00_sysfs_write_edc,
625};
626
627static ssize_t
628qla2x00_sysfs_write_edc_status(struct kobject *kobj,
629 struct bin_attribute *bin_attr,
630 char *buf, loff_t off, size_t count)
631{
632 struct scsi_qla_host *vha = shost_priv(dev_to_shost(container_of(kobj,
633 struct device, kobj)));
634 struct qla_hw_data *ha = vha->hw;
635 uint16_t dev, adr, opt, len;
636 int rval;
637
638 ha->edc_data_len = 0;
639
640 if (!capable(CAP_SYS_ADMIN) || off != 0 || count < 8)
641 return 0;
642
643 if (!ha->edc_data) {
644 ha->edc_data = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL,
645 &ha->edc_data_dma);
646 if (!ha->edc_data) {
647 DEBUG2(qla_printk(KERN_INFO, ha,
648 "Unable to allocate memory for EDC status.\n"));
649 return 0;
650 }
651 }
652
653 dev = le16_to_cpup((void *)&buf[0]);
654 adr = le16_to_cpup((void *)&buf[2]);
655 opt = le16_to_cpup((void *)&buf[4]);
656 len = le16_to_cpup((void *)&buf[6]);
657
658 if (!(opt & BIT_0))
659 if (len == 0 || len > DMA_POOL_SIZE)
660 return -EINVAL;
661
662 memset(ha->edc_data, 0, len);
663 rval = qla2x00_read_edc(vha, dev, adr, ha->edc_data_dma,
664 ha->edc_data, len, opt);
665 if (rval != QLA_SUCCESS) {
666 DEBUG2(qla_printk(KERN_INFO, ha,
667 "Unable to write EDC status (%x) %02x:%02x:%04x:%02x.\n",
668 rval, dev, adr, opt, len));
669 return 0;
670 }
671
672 ha->edc_data_len = len;
673
674 return count;
675}
676
677static ssize_t
678qla2x00_sysfs_read_edc_status(struct kobject *kobj,
679 struct bin_attribute *bin_attr,
680 char *buf, loff_t off, size_t count)
681{
682 struct scsi_qla_host *vha = shost_priv(dev_to_shost(container_of(kobj,
683 struct device, kobj)));
684 struct qla_hw_data *ha = vha->hw;
685
686 if (!capable(CAP_SYS_ADMIN) || off != 0 || count == 0)
687 return 0;
688
689 if (!ha->edc_data || ha->edc_data_len == 0 || ha->edc_data_len > count)
690 return -EINVAL;
691
692 memcpy(buf, ha->edc_data, ha->edc_data_len);
693
694 return ha->edc_data_len;
695}
696
697static struct bin_attribute sysfs_edc_status_attr = {
698 .attr = {
699 .name = "edc_status",
700 .mode = S_IRUSR | S_IWUSR,
701 },
702 .size = 0,
703 .write = qla2x00_sysfs_write_edc_status,
704 .read = qla2x00_sysfs_read_edc_status,
705};
706
ce0423f4
AV
707static ssize_t
708qla2x00_sysfs_read_xgmac_stats(struct kobject *kobj,
709 struct bin_attribute *bin_attr,
710 char *buf, loff_t off, size_t count)
711{
712 struct scsi_qla_host *vha = shost_priv(dev_to_shost(container_of(kobj,
713 struct device, kobj)));
714 struct qla_hw_data *ha = vha->hw;
715 int rval;
716 uint16_t actual_size;
717
718 if (!capable(CAP_SYS_ADMIN) || off != 0 || count > XGMAC_DATA_SIZE)
719 return 0;
720
721 if (ha->xgmac_data)
722 goto do_read;
723
724 ha->xgmac_data = dma_alloc_coherent(&ha->pdev->dev, XGMAC_DATA_SIZE,
725 &ha->xgmac_data_dma, GFP_KERNEL);
726 if (!ha->xgmac_data) {
727 qla_printk(KERN_WARNING, ha,
728 "Unable to allocate memory for XGMAC read-data.\n");
729 return 0;
730 }
731
732do_read:
733 actual_size = 0;
734 memset(ha->xgmac_data, 0, XGMAC_DATA_SIZE);
735
736 rval = qla2x00_get_xgmac_stats(vha, ha->xgmac_data_dma,
737 XGMAC_DATA_SIZE, &actual_size);
738 if (rval != QLA_SUCCESS) {
739 qla_printk(KERN_WARNING, ha,
740 "Unable to read XGMAC data (%x).\n", rval);
741 count = 0;
742 }
743
744 count = actual_size > count ? count: actual_size;
745 memcpy(buf, ha->xgmac_data, count);
746
747 return count;
748}
749
750static struct bin_attribute sysfs_xgmac_stats_attr = {
751 .attr = {
752 .name = "xgmac_stats",
753 .mode = S_IRUSR,
754 },
755 .size = 0,
756 .read = qla2x00_sysfs_read_xgmac_stats,
757};
758
11bbc1d8
AV
759static ssize_t
760qla2x00_sysfs_read_dcbx_tlv(struct kobject *kobj,
761 struct bin_attribute *bin_attr,
762 char *buf, loff_t off, size_t count)
763{
764 struct scsi_qla_host *vha = shost_priv(dev_to_shost(container_of(kobj,
765 struct device, kobj)));
766 struct qla_hw_data *ha = vha->hw;
767 int rval;
768 uint16_t actual_size;
769
770 if (!capable(CAP_SYS_ADMIN) || off != 0 || count > DCBX_TLV_DATA_SIZE)
771 return 0;
772
773 if (ha->dcbx_tlv)
774 goto do_read;
775
776 ha->dcbx_tlv = dma_alloc_coherent(&ha->pdev->dev, DCBX_TLV_DATA_SIZE,
777 &ha->dcbx_tlv_dma, GFP_KERNEL);
778 if (!ha->dcbx_tlv) {
779 qla_printk(KERN_WARNING, ha,
780 "Unable to allocate memory for DCBX TLV read-data.\n");
781 return 0;
782 }
783
784do_read:
785 actual_size = 0;
786 memset(ha->dcbx_tlv, 0, DCBX_TLV_DATA_SIZE);
787
788 rval = qla2x00_get_dcbx_params(vha, ha->dcbx_tlv_dma,
789 DCBX_TLV_DATA_SIZE);
790 if (rval != QLA_SUCCESS) {
791 qla_printk(KERN_WARNING, ha,
792 "Unable to read DCBX TLV data (%x).\n", rval);
793 count = 0;
794 }
795
796 memcpy(buf, ha->dcbx_tlv, count);
797
798 return count;
799}
800
801static struct bin_attribute sysfs_dcbx_tlv_attr = {
802 .attr = {
803 .name = "dcbx_tlv",
804 .mode = S_IRUSR,
805 },
806 .size = 0,
807 .read = qla2x00_sysfs_read_dcbx_tlv,
808};
809
f1663ad5
AV
810static struct sysfs_entry {
811 char *name;
812 struct bin_attribute *attr;
813 int is4GBp_only;
814} bin_file_entries[] = {
815 { "fw_dump", &sysfs_fw_dump_attr, },
816 { "nvram", &sysfs_nvram_attr, },
817 { "optrom", &sysfs_optrom_attr, },
818 { "optrom_ctl", &sysfs_optrom_ctl_attr, },
819 { "vpd", &sysfs_vpd_attr, 1 },
820 { "sfp", &sysfs_sfp_attr, 1 },
6e181be5 821 { "reset", &sysfs_reset_attr, },
ad0ecd61
JC
822 { "edc", &sysfs_edc_attr, 2 },
823 { "edc_status", &sysfs_edc_status_attr, 2 },
ce0423f4 824 { "xgmac_stats", &sysfs_xgmac_stats_attr, 3 },
11bbc1d8 825 { "dcbx_tlv", &sysfs_dcbx_tlv_attr, 3 },
46ddab7b 826 { NULL },
f1663ad5
AV
827};
828
8482e118 829void
7b867cf7 830qla2x00_alloc_sysfs_attr(scsi_qla_host_t *vha)
8482e118 831{
7b867cf7 832 struct Scsi_Host *host = vha->host;
f1663ad5
AV
833 struct sysfs_entry *iter;
834 int ret;
8482e118 835
f1663ad5 836 for (iter = bin_file_entries; iter->name; iter++) {
7b867cf7 837 if (iter->is4GBp_only && !IS_FWI2_CAPABLE(vha->hw))
f1663ad5 838 continue;
ad0ecd61
JC
839 if (iter->is4GBp_only == 2 && !IS_QLA25XX(vha->hw))
840 continue;
ce0423f4
AV
841 if (iter->is4GBp_only == 3 && !IS_QLA81XX(vha->hw))
842 continue;
f1663ad5
AV
843
844 ret = sysfs_create_bin_file(&host->shost_gendev.kobj,
845 iter->attr);
846 if (ret)
7b867cf7 847 qla_printk(KERN_INFO, vha->hw,
f1663ad5
AV
848 "Unable to create sysfs %s binary attribute "
849 "(%d).\n", iter->name, ret);
7914d004 850 }
8482e118
AV
851}
852
853void
7b867cf7 854qla2x00_free_sysfs_attr(scsi_qla_host_t *vha)
8482e118 855{
7b867cf7 856 struct Scsi_Host *host = vha->host;
f1663ad5 857 struct sysfs_entry *iter;
7b867cf7 858 struct qla_hw_data *ha = vha->hw;
f1663ad5
AV
859
860 for (iter = bin_file_entries; iter->name; iter++) {
e428924c 861 if (iter->is4GBp_only && !IS_FWI2_CAPABLE(ha))
f1663ad5 862 continue;
ad0ecd61
JC
863 if (iter->is4GBp_only == 2 && !IS_QLA25XX(ha))
864 continue;
ce0423f4
AV
865 if (iter->is4GBp_only == 3 && !IS_QLA81XX(ha))
866 continue;
8482e118 867
88729e53 868 sysfs_remove_bin_file(&host->shost_gendev.kobj,
f1663ad5 869 iter->attr);
7914d004 870 }
f6df144c
AV
871
872 if (ha->beacon_blink_led == 1)
7b867cf7 873 ha->isp_ops->beacon_off(vha);
8482e118
AV
874}
875
afb046e2
AV
876/* Scsi_Host attributes. */
877
878static ssize_t
ee959b00
TJ
879qla2x00_drvr_version_show(struct device *dev,
880 struct device_attribute *attr, char *buf)
afb046e2
AV
881{
882 return snprintf(buf, PAGE_SIZE, "%s\n", qla2x00_version_str);
883}
884
885static ssize_t
ee959b00
TJ
886qla2x00_fw_version_show(struct device *dev,
887 struct device_attribute *attr, char *buf)
afb046e2 888{
7b867cf7
AC
889 scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
890 struct qla_hw_data *ha = vha->hw;
891 char fw_str[128];
afb046e2
AV
892
893 return snprintf(buf, PAGE_SIZE, "%s\n",
7b867cf7 894 ha->isp_ops->fw_version_str(vha, fw_str));
afb046e2
AV
895}
896
897static ssize_t
ee959b00
TJ
898qla2x00_serial_num_show(struct device *dev, struct device_attribute *attr,
899 char *buf)
afb046e2 900{
7b867cf7
AC
901 scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
902 struct qla_hw_data *ha = vha->hw;
afb046e2
AV
903 uint32_t sn;
904
1ee27146 905 if (IS_FWI2_CAPABLE(ha)) {
7b867cf7 906 qla2xxx_get_vpd_field(vha, "SN", buf, PAGE_SIZE);
1ee27146
JC
907 return snprintf(buf, PAGE_SIZE, "%s\n", buf);
908 }
8b7afc2a 909
afb046e2
AV
910 sn = ((ha->serial0 & 0x1f) << 16) | (ha->serial2 << 8) | ha->serial1;
911 return snprintf(buf, PAGE_SIZE, "%c%05d\n", 'A' + sn / 100000,
912 sn % 100000);
913}
914
915static ssize_t
ee959b00
TJ
916qla2x00_isp_name_show(struct device *dev, struct device_attribute *attr,
917 char *buf)
afb046e2 918{
7b867cf7
AC
919 scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
920 return snprintf(buf, PAGE_SIZE, "ISP%04X\n", vha->hw->pdev->device);
afb046e2
AV
921}
922
923static ssize_t
ee959b00
TJ
924qla2x00_isp_id_show(struct device *dev, struct device_attribute *attr,
925 char *buf)
afb046e2 926{
7b867cf7
AC
927 scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
928 struct qla_hw_data *ha = vha->hw;
afb046e2
AV
929 return snprintf(buf, PAGE_SIZE, "%04x %04x %04x %04x\n",
930 ha->product_id[0], ha->product_id[1], ha->product_id[2],
931 ha->product_id[3]);
932}
933
934static ssize_t
ee959b00
TJ
935qla2x00_model_name_show(struct device *dev, struct device_attribute *attr,
936 char *buf)
afb046e2 937{
7b867cf7
AC
938 scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
939 return snprintf(buf, PAGE_SIZE, "%s\n", vha->hw->model_number);
afb046e2
AV
940}
941
942static ssize_t
ee959b00
TJ
943qla2x00_model_desc_show(struct device *dev, struct device_attribute *attr,
944 char *buf)
afb046e2 945{
7b867cf7 946 scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
afb046e2 947 return snprintf(buf, PAGE_SIZE, "%s\n",
7b867cf7 948 vha->hw->model_desc ? vha->hw->model_desc : "");
afb046e2
AV
949}
950
951static ssize_t
ee959b00
TJ
952qla2x00_pci_info_show(struct device *dev, struct device_attribute *attr,
953 char *buf)
afb046e2 954{
7b867cf7 955 scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
afb046e2
AV
956 char pci_info[30];
957
958 return snprintf(buf, PAGE_SIZE, "%s\n",
7b867cf7 959 vha->hw->isp_ops->pci_info_str(vha, pci_info));
afb046e2
AV
960}
961
962static ssize_t
bbd1ae41
HR
963qla2x00_link_state_show(struct device *dev, struct device_attribute *attr,
964 char *buf)
afb046e2 965{
7b867cf7
AC
966 scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
967 struct qla_hw_data *ha = vha->hw;
afb046e2
AV
968 int len = 0;
969
7b867cf7
AC
970 if (atomic_read(&vha->loop_state) == LOOP_DOWN ||
971 atomic_read(&vha->loop_state) == LOOP_DEAD)
afb046e2 972 len = snprintf(buf, PAGE_SIZE, "Link Down\n");
7b867cf7
AC
973 else if (atomic_read(&vha->loop_state) != LOOP_READY ||
974 test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags) ||
975 test_bit(ISP_ABORT_NEEDED, &vha->dpc_flags))
afb046e2
AV
976 len = snprintf(buf, PAGE_SIZE, "Unknown Link State\n");
977 else {
978 len = snprintf(buf, PAGE_SIZE, "Link Up - ");
979
980 switch (ha->current_topology) {
981 case ISP_CFG_NL:
982 len += snprintf(buf + len, PAGE_SIZE-len, "Loop\n");
983 break;
984 case ISP_CFG_FL:
985 len += snprintf(buf + len, PAGE_SIZE-len, "FL_Port\n");
986 break;
987 case ISP_CFG_N:
988 len += snprintf(buf + len, PAGE_SIZE-len,
989 "N_Port to N_Port\n");
990 break;
991 case ISP_CFG_F:
992 len += snprintf(buf + len, PAGE_SIZE-len, "F_Port\n");
993 break;
994 default:
995 len += snprintf(buf + len, PAGE_SIZE-len, "Loop\n");
996 break;
997 }
998 }
999 return len;
1000}
1001
4fdfefe5 1002static ssize_t
ee959b00
TJ
1003qla2x00_zio_show(struct device *dev, struct device_attribute *attr,
1004 char *buf)
4fdfefe5 1005{
7b867cf7 1006 scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
4fdfefe5
AV
1007 int len = 0;
1008
7b867cf7 1009 switch (vha->hw->zio_mode) {
4fdfefe5
AV
1010 case QLA_ZIO_MODE_6:
1011 len += snprintf(buf + len, PAGE_SIZE-len, "Mode 6\n");
1012 break;
1013 case QLA_ZIO_DISABLED:
1014 len += snprintf(buf + len, PAGE_SIZE-len, "Disabled\n");
1015 break;
1016 }
1017 return len;
1018}
1019
1020static ssize_t
ee959b00
TJ
1021qla2x00_zio_store(struct device *dev, struct device_attribute *attr,
1022 const char *buf, size_t count)
4fdfefe5 1023{
7b867cf7
AC
1024 scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
1025 struct qla_hw_data *ha = vha->hw;
4fdfefe5
AV
1026 int val = 0;
1027 uint16_t zio_mode;
1028
4a59f71d
AV
1029 if (!IS_ZIO_SUPPORTED(ha))
1030 return -ENOTSUPP;
1031
4fdfefe5
AV
1032 if (sscanf(buf, "%d", &val) != 1)
1033 return -EINVAL;
1034
4a59f71d 1035 if (val)
4fdfefe5 1036 zio_mode = QLA_ZIO_MODE_6;
4a59f71d 1037 else
4fdfefe5 1038 zio_mode = QLA_ZIO_DISABLED;
4fdfefe5
AV
1039
1040 /* Update per-hba values and queue a reset. */
1041 if (zio_mode != QLA_ZIO_DISABLED || ha->zio_mode != QLA_ZIO_DISABLED) {
1042 ha->zio_mode = zio_mode;
7b867cf7 1043 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
4fdfefe5
AV
1044 }
1045 return strlen(buf);
1046}
1047
1048static ssize_t
ee959b00
TJ
1049qla2x00_zio_timer_show(struct device *dev, struct device_attribute *attr,
1050 char *buf)
4fdfefe5 1051{
7b867cf7 1052 scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
4fdfefe5 1053
7b867cf7 1054 return snprintf(buf, PAGE_SIZE, "%d us\n", vha->hw->zio_timer * 100);
4fdfefe5
AV
1055}
1056
1057static ssize_t
ee959b00
TJ
1058qla2x00_zio_timer_store(struct device *dev, struct device_attribute *attr,
1059 const char *buf, size_t count)
4fdfefe5 1060{
7b867cf7 1061 scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
4fdfefe5
AV
1062 int val = 0;
1063 uint16_t zio_timer;
1064
1065 if (sscanf(buf, "%d", &val) != 1)
1066 return -EINVAL;
1067 if (val > 25500 || val < 100)
1068 return -ERANGE;
1069
1070 zio_timer = (uint16_t)(val / 100);
7b867cf7 1071 vha->hw->zio_timer = zio_timer;
4fdfefe5
AV
1072
1073 return strlen(buf);
1074}
1075
f6df144c 1076static ssize_t
ee959b00
TJ
1077qla2x00_beacon_show(struct device *dev, struct device_attribute *attr,
1078 char *buf)
f6df144c 1079{
7b867cf7 1080 scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
f6df144c
AV
1081 int len = 0;
1082
7b867cf7 1083 if (vha->hw->beacon_blink_led)
f6df144c
AV
1084 len += snprintf(buf + len, PAGE_SIZE-len, "Enabled\n");
1085 else
1086 len += snprintf(buf + len, PAGE_SIZE-len, "Disabled\n");
1087 return len;
1088}
1089
1090static ssize_t
ee959b00
TJ
1091qla2x00_beacon_store(struct device *dev, struct device_attribute *attr,
1092 const char *buf, size_t count)
f6df144c 1093{
7b867cf7
AC
1094 scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
1095 struct qla_hw_data *ha = vha->hw;
f6df144c
AV
1096 int val = 0;
1097 int rval;
1098
1099 if (IS_QLA2100(ha) || IS_QLA2200(ha))
1100 return -EPERM;
1101
7b867cf7 1102 if (test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags)) {
f6df144c
AV
1103 qla_printk(KERN_WARNING, ha,
1104 "Abort ISP active -- ignoring beacon request.\n");
1105 return -EBUSY;
1106 }
1107
1108 if (sscanf(buf, "%d", &val) != 1)
1109 return -EINVAL;
1110
1111 if (val)
7b867cf7 1112 rval = ha->isp_ops->beacon_on(vha);
f6df144c 1113 else
7b867cf7 1114 rval = ha->isp_ops->beacon_off(vha);
f6df144c
AV
1115
1116 if (rval != QLA_SUCCESS)
1117 count = 0;
1118
1119 return count;
1120}
1121
30c47662 1122static ssize_t
ee959b00
TJ
1123qla2x00_optrom_bios_version_show(struct device *dev,
1124 struct device_attribute *attr, char *buf)
30c47662 1125{
7b867cf7
AC
1126 scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
1127 struct qla_hw_data *ha = vha->hw;
30c47662
AV
1128 return snprintf(buf, PAGE_SIZE, "%d.%02d\n", ha->bios_revision[1],
1129 ha->bios_revision[0]);
1130}
1131
1132static ssize_t
ee959b00
TJ
1133qla2x00_optrom_efi_version_show(struct device *dev,
1134 struct device_attribute *attr, char *buf)
30c47662 1135{
7b867cf7
AC
1136 scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
1137 struct qla_hw_data *ha = vha->hw;
30c47662
AV
1138 return snprintf(buf, PAGE_SIZE, "%d.%02d\n", ha->efi_revision[1],
1139 ha->efi_revision[0]);
1140}
1141
1142static ssize_t
ee959b00
TJ
1143qla2x00_optrom_fcode_version_show(struct device *dev,
1144 struct device_attribute *attr, char *buf)
30c47662 1145{
7b867cf7
AC
1146 scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
1147 struct qla_hw_data *ha = vha->hw;
30c47662
AV
1148 return snprintf(buf, PAGE_SIZE, "%d.%02d\n", ha->fcode_revision[1],
1149 ha->fcode_revision[0]);
1150}
1151
1152static ssize_t
ee959b00
TJ
1153qla2x00_optrom_fw_version_show(struct device *dev,
1154 struct device_attribute *attr, char *buf)
30c47662 1155{
7b867cf7
AC
1156 scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
1157 struct qla_hw_data *ha = vha->hw;
30c47662
AV
1158 return snprintf(buf, PAGE_SIZE, "%d.%02d.%02d %d\n",
1159 ha->fw_revision[0], ha->fw_revision[1], ha->fw_revision[2],
1160 ha->fw_revision[3]);
1161}
1162
e5f5f6f7
HZ
1163static ssize_t
1164qla2x00_total_isp_aborts_show(struct device *dev,
1165 struct device_attribute *attr, char *buf)
1166{
7b867cf7
AC
1167 scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
1168 struct qla_hw_data *ha = vha->hw;
e5f5f6f7
HZ
1169 return snprintf(buf, PAGE_SIZE, "%d\n",
1170 ha->qla_stats.total_isp_aborts);
1171}
1172
9a069e19
GM
1173static ssize_t
1174qla24xx_84xx_fw_version_show(struct device *dev,
1175 struct device_attribute *attr, char *buf)
1176{
1177 int rval = QLA_SUCCESS;
1178 uint16_t status[2] = {0, 0};
1179 scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
1180 struct qla_hw_data *ha = vha->hw;
1181
1182 if (IS_QLA84XX(ha) && ha->cs84xx) {
1183 if (ha->cs84xx->op_fw_version == 0) {
1184 rval = qla84xx_verify_chip(vha, status);
1185 }
1186
1187 if ((rval == QLA_SUCCESS) && (status[0] == 0))
1188 return snprintf(buf, PAGE_SIZE, "%u\n",
1189 (uint32_t)ha->cs84xx->op_fw_version);
1190 }
1191
1192 return snprintf(buf, PAGE_SIZE, "\n");
1193}
1194
3a03eb79
AV
1195static ssize_t
1196qla2x00_mpi_version_show(struct device *dev, struct device_attribute *attr,
1197 char *buf)
1198{
1199 scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
1200 struct qla_hw_data *ha = vha->hw;
1201
1202 if (!IS_QLA81XX(ha))
1203 return snprintf(buf, PAGE_SIZE, "\n");
1204
55a96158 1205 return snprintf(buf, PAGE_SIZE, "%d.%02d.%02d (%x)\n",
3a03eb79 1206 ha->mpi_version[0], ha->mpi_version[1], ha->mpi_version[2],
55a96158
AV
1207 ha->mpi_capabilities);
1208}
1209
1210static ssize_t
1211qla2x00_phy_version_show(struct device *dev, struct device_attribute *attr,
1212 char *buf)
1213{
1214 scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
1215 struct qla_hw_data *ha = vha->hw;
1216
1217 if (!IS_QLA81XX(ha))
1218 return snprintf(buf, PAGE_SIZE, "\n");
1219
1220 return snprintf(buf, PAGE_SIZE, "%d.%02d.%02d\n",
1221 ha->phy_version[0], ha->phy_version[1], ha->phy_version[2]);
3a03eb79
AV
1222}
1223
fbcbb5d0
LC
1224static ssize_t
1225qla2x00_flash_block_size_show(struct device *dev,
1226 struct device_attribute *attr, char *buf)
1227{
1228 scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
1229 struct qla_hw_data *ha = vha->hw;
1230
1231 return snprintf(buf, PAGE_SIZE, "0x%x\n", ha->fdt_block_size);
1232}
1233
bad7001c
AV
1234static ssize_t
1235qla2x00_vlan_id_show(struct device *dev, struct device_attribute *attr,
1236 char *buf)
1237{
1238 scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
1239
1240 if (!IS_QLA81XX(vha->hw))
1241 return snprintf(buf, PAGE_SIZE, "\n");
1242
1243 return snprintf(buf, PAGE_SIZE, "%d\n", vha->fcoe_vlan_id);
1244}
1245
1246static ssize_t
1247qla2x00_vn_port_mac_address_show(struct device *dev,
1248 struct device_attribute *attr, char *buf)
1249{
1250 scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
1251
1252 if (!IS_QLA81XX(vha->hw))
1253 return snprintf(buf, PAGE_SIZE, "\n");
1254
1255 return snprintf(buf, PAGE_SIZE, "%02x:%02x:%02x:%02x:%02x:%02x\n",
1256 vha->fcoe_vn_port_mac[5], vha->fcoe_vn_port_mac[4],
1257 vha->fcoe_vn_port_mac[3], vha->fcoe_vn_port_mac[2],
1258 vha->fcoe_vn_port_mac[1], vha->fcoe_vn_port_mac[0]);
1259}
1260
7f774025
AV
1261static ssize_t
1262qla2x00_fabric_param_show(struct device *dev, struct device_attribute *attr,
1263 char *buf)
1264{
1265 scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
1266
1267 return snprintf(buf, PAGE_SIZE, "%d\n", vha->hw->switch_cap);
1268}
1269
656e8912
AV
1270static ssize_t
1271qla2x00_fw_state_show(struct device *dev, struct device_attribute *attr,
1272 char *buf)
1273{
1274 scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
85880801 1275 int rval = QLA_FUNCTION_FAILED;
656e8912
AV
1276 uint16_t state[5];
1277
d6136f3f
SV
1278 if (test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags) ||
1279 test_bit(ISP_ABORT_NEEDED, &vha->dpc_flags))
1280 DEBUG2_3_11(printk("%s(%ld): isp reset in progress.\n",
1281 __func__, vha->host_no));
1282 else if (!vha->hw->flags.eeh_busy)
85880801 1283 rval = qla2x00_get_firmware_state(vha, state);
656e8912
AV
1284 if (rval != QLA_SUCCESS)
1285 memset(state, -1, sizeof(state));
1286
1287 return snprintf(buf, PAGE_SIZE, "0x%x 0x%x 0x%x 0x%x 0x%x\n", state[0],
1288 state[1], state[2], state[3], state[4]);
1289}
1290
ee959b00
TJ
1291static DEVICE_ATTR(driver_version, S_IRUGO, qla2x00_drvr_version_show, NULL);
1292static DEVICE_ATTR(fw_version, S_IRUGO, qla2x00_fw_version_show, NULL);
1293static DEVICE_ATTR(serial_num, S_IRUGO, qla2x00_serial_num_show, NULL);
1294static DEVICE_ATTR(isp_name, S_IRUGO, qla2x00_isp_name_show, NULL);
1295static DEVICE_ATTR(isp_id, S_IRUGO, qla2x00_isp_id_show, NULL);
1296static DEVICE_ATTR(model_name, S_IRUGO, qla2x00_model_name_show, NULL);
1297static DEVICE_ATTR(model_desc, S_IRUGO, qla2x00_model_desc_show, NULL);
1298static DEVICE_ATTR(pci_info, S_IRUGO, qla2x00_pci_info_show, NULL);
bbd1ae41 1299static DEVICE_ATTR(link_state, S_IRUGO, qla2x00_link_state_show, NULL);
ee959b00
TJ
1300static DEVICE_ATTR(zio, S_IRUGO | S_IWUSR, qla2x00_zio_show, qla2x00_zio_store);
1301static DEVICE_ATTR(zio_timer, S_IRUGO | S_IWUSR, qla2x00_zio_timer_show,
1302 qla2x00_zio_timer_store);
1303static DEVICE_ATTR(beacon, S_IRUGO | S_IWUSR, qla2x00_beacon_show,
1304 qla2x00_beacon_store);
1305static DEVICE_ATTR(optrom_bios_version, S_IRUGO,
1306 qla2x00_optrom_bios_version_show, NULL);
1307static DEVICE_ATTR(optrom_efi_version, S_IRUGO,
1308 qla2x00_optrom_efi_version_show, NULL);
1309static DEVICE_ATTR(optrom_fcode_version, S_IRUGO,
1310 qla2x00_optrom_fcode_version_show, NULL);
1311static DEVICE_ATTR(optrom_fw_version, S_IRUGO, qla2x00_optrom_fw_version_show,
1312 NULL);
9a069e19
GM
1313static DEVICE_ATTR(84xx_fw_version, S_IRUGO, qla24xx_84xx_fw_version_show,
1314 NULL);
e5f5f6f7
HZ
1315static DEVICE_ATTR(total_isp_aborts, S_IRUGO, qla2x00_total_isp_aborts_show,
1316 NULL);
3a03eb79 1317static DEVICE_ATTR(mpi_version, S_IRUGO, qla2x00_mpi_version_show, NULL);
55a96158 1318static DEVICE_ATTR(phy_version, S_IRUGO, qla2x00_phy_version_show, NULL);
fbcbb5d0
LC
1319static DEVICE_ATTR(flash_block_size, S_IRUGO, qla2x00_flash_block_size_show,
1320 NULL);
bad7001c
AV
1321static DEVICE_ATTR(vlan_id, S_IRUGO, qla2x00_vlan_id_show, NULL);
1322static DEVICE_ATTR(vn_port_mac_address, S_IRUGO,
1323 qla2x00_vn_port_mac_address_show, NULL);
7f774025 1324static DEVICE_ATTR(fabric_param, S_IRUGO, qla2x00_fabric_param_show, NULL);
656e8912 1325static DEVICE_ATTR(fw_state, S_IRUGO, qla2x00_fw_state_show, NULL);
ee959b00
TJ
1326
1327struct device_attribute *qla2x00_host_attrs[] = {
1328 &dev_attr_driver_version,
1329 &dev_attr_fw_version,
1330 &dev_attr_serial_num,
1331 &dev_attr_isp_name,
1332 &dev_attr_isp_id,
1333 &dev_attr_model_name,
1334 &dev_attr_model_desc,
1335 &dev_attr_pci_info,
bbd1ae41 1336 &dev_attr_link_state,
ee959b00
TJ
1337 &dev_attr_zio,
1338 &dev_attr_zio_timer,
1339 &dev_attr_beacon,
1340 &dev_attr_optrom_bios_version,
1341 &dev_attr_optrom_efi_version,
1342 &dev_attr_optrom_fcode_version,
1343 &dev_attr_optrom_fw_version,
9a069e19 1344 &dev_attr_84xx_fw_version,
e5f5f6f7 1345 &dev_attr_total_isp_aborts,
3a03eb79 1346 &dev_attr_mpi_version,
55a96158 1347 &dev_attr_phy_version,
fbcbb5d0 1348 &dev_attr_flash_block_size,
bad7001c
AV
1349 &dev_attr_vlan_id,
1350 &dev_attr_vn_port_mac_address,
7f774025 1351 &dev_attr_fabric_param,
656e8912 1352 &dev_attr_fw_state,
afb046e2
AV
1353 NULL,
1354};
1355
8482e118
AV
1356/* Host attributes. */
1357
1358static void
1359qla2x00_get_host_port_id(struct Scsi_Host *shost)
1360{
7b867cf7 1361 scsi_qla_host_t *vha = shost_priv(shost);
8482e118 1362
7b867cf7
AC
1363 fc_host_port_id(shost) = vha->d_id.b.domain << 16 |
1364 vha->d_id.b.area << 8 | vha->d_id.b.al_pa;
8482e118
AV
1365}
1366
04414013
AV
1367static void
1368qla2x00_get_host_speed(struct Scsi_Host *shost)
1369{
7b867cf7
AC
1370 struct qla_hw_data *ha = ((struct scsi_qla_host *)
1371 (shost_priv(shost)))->hw;
2ae2b370 1372 u32 speed = FC_PORTSPEED_UNKNOWN;
04414013
AV
1373
1374 switch (ha->link_data_rate) {
d8b45213 1375 case PORT_SPEED_1GB:
2ae2b370 1376 speed = FC_PORTSPEED_1GBIT;
04414013 1377 break;
d8b45213 1378 case PORT_SPEED_2GB:
2ae2b370 1379 speed = FC_PORTSPEED_2GBIT;
04414013 1380 break;
d8b45213 1381 case PORT_SPEED_4GB:
2ae2b370 1382 speed = FC_PORTSPEED_4GBIT;
04414013 1383 break;
da4541b6 1384 case PORT_SPEED_8GB:
2ae2b370 1385 speed = FC_PORTSPEED_8GBIT;
da4541b6 1386 break;
3a03eb79
AV
1387 case PORT_SPEED_10GB:
1388 speed = FC_PORTSPEED_10GBIT;
1389 break;
04414013
AV
1390 }
1391 fc_host_speed(shost) = speed;
1392}
1393
8d067623
AV
1394static void
1395qla2x00_get_host_port_type(struct Scsi_Host *shost)
1396{
7b867cf7 1397 scsi_qla_host_t *vha = shost_priv(shost);
8d067623
AV
1398 uint32_t port_type = FC_PORTTYPE_UNKNOWN;
1399
7b867cf7 1400 if (vha->vp_idx) {
2f2fa13d
SS
1401 fc_host_port_type(shost) = FC_PORTTYPE_NPIV;
1402 return;
1403 }
7b867cf7 1404 switch (vha->hw->current_topology) {
8d067623
AV
1405 case ISP_CFG_NL:
1406 port_type = FC_PORTTYPE_LPORT;
1407 break;
1408 case ISP_CFG_FL:
1409 port_type = FC_PORTTYPE_NLPORT;
1410 break;
1411 case ISP_CFG_N:
1412 port_type = FC_PORTTYPE_PTP;
1413 break;
1414 case ISP_CFG_F:
1415 port_type = FC_PORTTYPE_NPORT;
1416 break;
1417 }
1418 fc_host_port_type(shost) = port_type;
1419}
1420
8482e118
AV
1421static void
1422qla2x00_get_starget_node_name(struct scsi_target *starget)
1423{
1424 struct Scsi_Host *host = dev_to_shost(starget->dev.parent);
7b867cf7 1425 scsi_qla_host_t *vha = shost_priv(host);
bdf79621 1426 fc_port_t *fcport;
f8b02a85 1427 u64 node_name = 0;
8482e118 1428
7b867cf7 1429 list_for_each_entry(fcport, &vha->vp_fcports, list) {
5ab5a4dd
AV
1430 if (fcport->rport &&
1431 starget->id == fcport->rport->scsi_target_id) {
f8b02a85 1432 node_name = wwn_to_u64(fcport->node_name);
bdf79621
AV
1433 break;
1434 }
1435 }
1436
f8b02a85 1437 fc_starget_node_name(starget) = node_name;
8482e118
AV
1438}
1439
1440static void
1441qla2x00_get_starget_port_name(struct scsi_target *starget)
1442{
1443 struct Scsi_Host *host = dev_to_shost(starget->dev.parent);
7b867cf7 1444 scsi_qla_host_t *vha = shost_priv(host);
bdf79621 1445 fc_port_t *fcport;
f8b02a85 1446 u64 port_name = 0;
8482e118 1447
7b867cf7 1448 list_for_each_entry(fcport, &vha->vp_fcports, list) {
5ab5a4dd
AV
1449 if (fcport->rport &&
1450 starget->id == fcport->rport->scsi_target_id) {
f8b02a85 1451 port_name = wwn_to_u64(fcport->port_name);
bdf79621
AV
1452 break;
1453 }
1454 }
1455
f8b02a85 1456 fc_starget_port_name(starget) = port_name;
8482e118
AV
1457}
1458
1459static void
1460qla2x00_get_starget_port_id(struct scsi_target *starget)
1461{
1462 struct Scsi_Host *host = dev_to_shost(starget->dev.parent);
7b867cf7 1463 scsi_qla_host_t *vha = shost_priv(host);
bdf79621
AV
1464 fc_port_t *fcport;
1465 uint32_t port_id = ~0U;
1466
7b867cf7 1467 list_for_each_entry(fcport, &vha->vp_fcports, list) {
5ab5a4dd
AV
1468 if (fcport->rport &&
1469 starget->id == fcport->rport->scsi_target_id) {
bdf79621
AV
1470 port_id = fcport->d_id.b.domain << 16 |
1471 fcport->d_id.b.area << 8 | fcport->d_id.b.al_pa;
1472 break;
1473 }
1474 }
8482e118 1475
8482e118
AV
1476 fc_starget_port_id(starget) = port_id;
1477}
1478
8482e118
AV
1479static void
1480qla2x00_set_rport_loss_tmo(struct fc_rport *rport, uint32_t timeout)
1481{
8482e118 1482 if (timeout)
85821c90 1483 rport->dev_loss_tmo = timeout;
8482e118 1484 else
85821c90 1485 rport->dev_loss_tmo = 1;
8482e118
AV
1486}
1487
5f3a9a20
SJ
1488static void
1489qla2x00_dev_loss_tmo_callbk(struct fc_rport *rport)
1490{
1491 struct Scsi_Host *host = rport_to_shost(rport);
1492 fc_port_t *fcport = *(fc_port_t **)rport->dd_data;
1493
3c01b4f9
SJ
1494 if (!fcport)
1495 return;
1496
85880801
AV
1497 if (test_bit(ABORT_ISP_ACTIVE, &fcport->vha->dpc_flags))
1498 return;
1499
1500 if (unlikely(pci_channel_offline(fcport->vha->hw->pdev))) {
b9b12f73 1501 qla2x00_abort_all_cmds(fcport->vha, DID_NO_CONNECT << 16);
85880801
AV
1502 return;
1503 }
5f3a9a20
SJ
1504
1505 /*
1506 * Transport has effectively 'deleted' the rport, clear
1507 * all local references.
1508 */
1509 spin_lock_irq(host->host_lock);
1510 fcport->rport = NULL;
1511 *((fc_port_t **)rport->dd_data) = NULL;
1512 spin_unlock_irq(host->host_lock);
1513}
1514
1515static void
1516qla2x00_terminate_rport_io(struct fc_rport *rport)
1517{
1518 fc_port_t *fcport = *(fc_port_t **)rport->dd_data;
1519
3c01b4f9
SJ
1520 if (!fcport)
1521 return;
1522
85880801
AV
1523 if (test_bit(ABORT_ISP_ACTIVE, &fcport->vha->dpc_flags))
1524 return;
1525
b9b12f73
SJ
1526 if (unlikely(pci_channel_offline(fcport->vha->hw->pdev))) {
1527 qla2x00_abort_all_cmds(fcport->vha, DID_NO_CONNECT << 16);
1528 return;
1529 }
6390d1f3
AV
1530 /*
1531 * At this point all fcport's software-states are cleared. Perform any
1532 * final cleanup of firmware resources (PCBs and XCBs).
1533 */
6805c150
AV
1534 if (fcport->loop_id != FC_NO_LOOP_ID &&
1535 !test_bit(UNLOADING, &fcport->vha->dpc_flags))
7b867cf7
AC
1536 fcport->vha->hw->isp_ops->fabric_logout(fcport->vha,
1537 fcport->loop_id, fcport->d_id.b.domain,
1538 fcport->d_id.b.area, fcport->d_id.b.al_pa);
5f3a9a20
SJ
1539}
1540
91ca7b01
AV
1541static int
1542qla2x00_issue_lip(struct Scsi_Host *shost)
1543{
7b867cf7 1544 scsi_qla_host_t *vha = shost_priv(shost);
91ca7b01 1545
7b867cf7 1546 qla2x00_loop_reset(vha);
91ca7b01
AV
1547 return 0;
1548}
1549
392e2f65
AV
1550static struct fc_host_statistics *
1551qla2x00_get_fc_host_stats(struct Scsi_Host *shost)
1552{
7b867cf7
AC
1553 scsi_qla_host_t *vha = shost_priv(shost);
1554 struct qla_hw_data *ha = vha->hw;
1555 struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev);
392e2f65 1556 int rval;
43ef0580
AV
1557 struct link_statistics *stats;
1558 dma_addr_t stats_dma;
392e2f65
AV
1559 struct fc_host_statistics *pfc_host_stat;
1560
1561 pfc_host_stat = &ha->fc_host_stat;
1562 memset(pfc_host_stat, -1, sizeof(struct fc_host_statistics));
1563
85880801
AV
1564 if (test_bit(UNLOADING, &vha->dpc_flags))
1565 goto done;
1566
1567 if (unlikely(pci_channel_offline(ha->pdev)))
1568 goto done;
1569
43ef0580
AV
1570 stats = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &stats_dma);
1571 if (stats == NULL) {
1572 DEBUG2_3_11(printk("%s(%ld): Failed to allocate memory.\n",
7b867cf7 1573 __func__, base_vha->host_no));
43ef0580
AV
1574 goto done;
1575 }
1576 memset(stats, 0, DMA_POOL_SIZE);
1577
1578 rval = QLA_FUNCTION_FAILED;
e428924c 1579 if (IS_FWI2_CAPABLE(ha)) {
7b867cf7
AC
1580 rval = qla24xx_get_isp_stats(base_vha, stats, stats_dma);
1581 } else if (atomic_read(&base_vha->loop_state) == LOOP_READY &&
1582 !test_bit(ABORT_ISP_ACTIVE, &base_vha->dpc_flags) &&
1583 !test_bit(ISP_ABORT_NEEDED, &base_vha->dpc_flags) &&
178779a6
AV
1584 !ha->dpc_active) {
1585 /* Must be in a 'READY' state for statistics retrieval. */
7b867cf7
AC
1586 rval = qla2x00_get_link_status(base_vha, base_vha->loop_id,
1587 stats, stats_dma);
392e2f65 1588 }
178779a6
AV
1589
1590 if (rval != QLA_SUCCESS)
43ef0580
AV
1591 goto done_free;
1592
1593 pfc_host_stat->link_failure_count = stats->link_fail_cnt;
1594 pfc_host_stat->loss_of_sync_count = stats->loss_sync_cnt;
1595 pfc_host_stat->loss_of_signal_count = stats->loss_sig_cnt;
1596 pfc_host_stat->prim_seq_protocol_err_count = stats->prim_seq_err_cnt;
1597 pfc_host_stat->invalid_tx_word_count = stats->inval_xmit_word_cnt;
1598 pfc_host_stat->invalid_crc_count = stats->inval_crc_cnt;
1599 if (IS_FWI2_CAPABLE(ha)) {
032d8dd7 1600 pfc_host_stat->lip_count = stats->lip_cnt;
43ef0580
AV
1601 pfc_host_stat->tx_frames = stats->tx_frames;
1602 pfc_host_stat->rx_frames = stats->rx_frames;
1603 pfc_host_stat->dumped_frames = stats->dumped_frames;
1604 pfc_host_stat->nos_count = stats->nos_rcvd;
1605 }
49fd462a
HZ
1606 pfc_host_stat->fcp_input_megabytes = ha->qla_stats.input_bytes >> 20;
1607 pfc_host_stat->fcp_output_megabytes = ha->qla_stats.output_bytes >> 20;
392e2f65 1608
43ef0580
AV
1609done_free:
1610 dma_pool_free(ha->s_dma_pool, stats, stats_dma);
178779a6 1611done:
392e2f65
AV
1612 return pfc_host_stat;
1613}
1614
1620f7c2
AV
1615static void
1616qla2x00_get_host_symbolic_name(struct Scsi_Host *shost)
1617{
7b867cf7 1618 scsi_qla_host_t *vha = shost_priv(shost);
1620f7c2 1619
7b867cf7 1620 qla2x00_get_sym_node_name(vha, fc_host_symbolic_name(shost));
1620f7c2
AV
1621}
1622
a740a3f0
AV
1623static void
1624qla2x00_set_host_system_hostname(struct Scsi_Host *shost)
1625{
7b867cf7 1626 scsi_qla_host_t *vha = shost_priv(shost);
a740a3f0 1627
7b867cf7 1628 set_bit(REGISTER_FDMI_NEEDED, &vha->dpc_flags);
a740a3f0
AV
1629}
1630
90991c85
AV
1631static void
1632qla2x00_get_host_fabric_name(struct Scsi_Host *shost)
1633{
7b867cf7 1634 scsi_qla_host_t *vha = shost_priv(shost);
90991c85
AV
1635 u64 node_name;
1636
7b867cf7
AC
1637 if (vha->device_flags & SWITCH_FOUND)
1638 node_name = wwn_to_u64(vha->fabric_node_name);
90991c85 1639 else
7b867cf7 1640 node_name = wwn_to_u64(vha->node_name);
90991c85
AV
1641
1642 fc_host_fabric_name(shost) = node_name;
1643}
1644
7047fcdd
AV
1645static void
1646qla2x00_get_host_port_state(struct Scsi_Host *shost)
1647{
7b867cf7
AC
1648 scsi_qla_host_t *vha = shost_priv(shost);
1649 struct scsi_qla_host *base_vha = pci_get_drvdata(vha->hw->pdev);
7047fcdd 1650
7b867cf7 1651 if (!base_vha->flags.online)
7047fcdd 1652 fc_host_port_state(shost) = FC_PORTSTATE_OFFLINE;
7b867cf7 1653 else if (atomic_read(&base_vha->loop_state) == LOOP_TIMEOUT)
7047fcdd
AV
1654 fc_host_port_state(shost) = FC_PORTSTATE_UNKNOWN;
1655 else
1656 fc_host_port_state(shost) = FC_PORTSTATE_ONLINE;
1657}
1658
2c3dfe3f
SJ
1659static int
1660qla24xx_vport_create(struct fc_vport *fc_vport, bool disable)
1661{
1662 int ret = 0;
2afa19a9 1663 uint8_t qos = 0;
7b867cf7
AC
1664 scsi_qla_host_t *base_vha = shost_priv(fc_vport->shost);
1665 scsi_qla_host_t *vha = NULL;
73208dfd 1666 struct qla_hw_data *ha = base_vha->hw;
2afa19a9
AC
1667 uint16_t options = 0;
1668 int cnt;
59e0b8b0 1669 struct req_que *req = ha->req_q_map[0];
2c3dfe3f
SJ
1670
1671 ret = qla24xx_vport_create_req_sanity_check(fc_vport);
1672 if (ret) {
1673 DEBUG15(printk("qla24xx_vport_create_req_sanity_check failed, "
1674 "status %x\n", ret));
1675 return (ret);
1676 }
1677
1678 vha = qla24xx_create_vhost(fc_vport);
1679 if (vha == NULL) {
1680 DEBUG15(printk ("qla24xx_create_vhost failed, vha = %p\n",
1681 vha));
1682 return FC_VPORT_FAILED;
1683 }
1684 if (disable) {
1685 atomic_set(&vha->vp_state, VP_OFFLINE);
1686 fc_vport_set_state(fc_vport, FC_VPORT_DISABLED);
1687 } else
1688 atomic_set(&vha->vp_state, VP_FAILED);
1689
1690 /* ready to create vport */
7b867cf7
AC
1691 qla_printk(KERN_INFO, vha->hw, "VP entry id %d assigned.\n",
1692 vha->vp_idx);
2c3dfe3f
SJ
1693
1694 /* initialized vport states */
1695 atomic_set(&vha->loop_state, LOOP_DOWN);
1696 vha->vp_err_state= VP_ERR_PORTDWN;
1697 vha->vp_prev_err_state= VP_ERR_UNKWN;
1698 /* Check if physical ha port is Up */
7b867cf7
AC
1699 if (atomic_read(&base_vha->loop_state) == LOOP_DOWN ||
1700 atomic_read(&base_vha->loop_state) == LOOP_DEAD) {
2c3dfe3f
SJ
1701 /* Don't retry or attempt login of this virtual port */
1702 DEBUG15(printk ("scsi(%ld): pport loop_state is not UP.\n",
7b867cf7 1703 base_vha->host_no));
2c3dfe3f
SJ
1704 atomic_set(&vha->loop_state, LOOP_DEAD);
1705 if (!disable)
1706 fc_vport_set_state(fc_vport, FC_VPORT_LINKDOWN);
1707 }
1708
d139b9bd
JB
1709 if (scsi_add_host_with_dma(vha->host, &fc_vport->dev,
1710 &ha->pdev->dev)) {
2c3dfe3f
SJ
1711 DEBUG15(printk("scsi(%ld): scsi_add_host failure for VP[%d].\n",
1712 vha->host_no, vha->vp_idx));
1713 goto vport_create_failed_2;
1714 }
1715
1716 /* initialize attributes */
1717 fc_host_node_name(vha->host) = wwn_to_u64(vha->node_name);
1718 fc_host_port_name(vha->host) = wwn_to_u64(vha->port_name);
1719 fc_host_supported_classes(vha->host) =
7b867cf7 1720 fc_host_supported_classes(base_vha->host);
2c3dfe3f 1721 fc_host_supported_speeds(vha->host) =
7b867cf7 1722 fc_host_supported_speeds(base_vha->host);
2c3dfe3f
SJ
1723
1724 qla24xx_vport_disable(fc_vport, disable);
1725
7163ea81 1726 if (ha->flags.cpu_affinity_enabled) {
59e0b8b0
AC
1727 req = ha->req_q_map[1];
1728 goto vport_queue;
1729 } else if (ql2xmaxqueues == 1 || !ha->npiv_info)
2afa19a9
AC
1730 goto vport_queue;
1731 /* Create a request queue in QoS mode for the vport */
40859ae5
AC
1732 for (cnt = 0; cnt < ha->nvram_npiv_size; cnt++) {
1733 if (memcmp(ha->npiv_info[cnt].port_name, vha->port_name, 8) == 0
1734 && memcmp(ha->npiv_info[cnt].node_name, vha->node_name,
59e0b8b0 1735 8) == 0) {
2afa19a9
AC
1736 qos = ha->npiv_info[cnt].q_qos;
1737 break;
73208dfd 1738 }
2afa19a9
AC
1739 }
1740 if (qos) {
1741 ret = qla25xx_create_req_que(ha, options, vha->vp_idx, 0, 0,
1742 qos);
1743 if (!ret)
1744 qla_printk(KERN_WARNING, ha,
1745 "Can't create request queue for vp_idx:%d\n",
1746 vha->vp_idx);
59e0b8b0 1747 else {
2afa19a9 1748 DEBUG2(qla_printk(KERN_INFO, ha,
40859ae5
AC
1749 "Request Que:%d (QoS: %d) created for vp_idx:%d\n",
1750 ret, qos, vha->vp_idx));
59e0b8b0
AC
1751 req = ha->req_q_map[ret];
1752 }
73208dfd
AC
1753 }
1754
2afa19a9 1755vport_queue:
59e0b8b0 1756 vha->req = req;
2c3dfe3f 1757 return 0;
2afa19a9 1758
2c3dfe3f
SJ
1759vport_create_failed_2:
1760 qla24xx_disable_vp(vha);
1761 qla24xx_deallocate_vp_id(vha);
2c3dfe3f
SJ
1762 scsi_host_put(vha->host);
1763 return FC_VPORT_FAILED;
1764}
1765
a824ebb3 1766static int
2c3dfe3f
SJ
1767qla24xx_vport_delete(struct fc_vport *fc_vport)
1768{
2c3dfe3f 1769 scsi_qla_host_t *vha = fc_vport->dd_data;
7b867cf7 1770 fc_port_t *fcport, *tfcport;
73208dfd
AC
1771 struct qla_hw_data *ha = vha->hw;
1772 uint16_t id = vha->vp_idx;
c9c5ced9
AV
1773
1774 while (test_bit(LOOP_RESYNC_ACTIVE, &vha->dpc_flags) ||
7b867cf7 1775 test_bit(FCPORT_UPDATE_NEEDED, &vha->dpc_flags))
c9c5ced9 1776 msleep(1000);
2c3dfe3f
SJ
1777
1778 qla24xx_disable_vp(vha);
2c3dfe3f 1779
7b867cf7
AC
1780 fc_remove_host(vha->host);
1781
1782 scsi_remove_host(vha->host);
1783
1784 list_for_each_entry_safe(fcport, tfcport, &vha->vp_fcports, list) {
1785 list_del(&fcport->list);
1786 kfree(fcport);
1787 fcport = NULL;
1788 }
1789
1790 qla24xx_deallocate_vp_id(vha);
2c3dfe3f 1791
0d6e61bc
AV
1792 mutex_lock(&ha->vport_lock);
1793 ha->cur_vport_count--;
1794 clear_bit(vha->vp_idx, ha->vp_idx_map);
1795 mutex_unlock(&ha->vport_lock);
1796
2c3dfe3f
SJ
1797 if (vha->timer_active) {
1798 qla2x00_vp_stop_timer(vha);
1799 DEBUG15(printk ("scsi(%ld): timer for the vport[%d] = %p "
1800 "has stopped\n",
1801 vha->host_no, vha->vp_idx, vha));
1802 }
1803
7163ea81 1804 if (vha->req->id && !ha->flags.cpu_affinity_enabled) {
2afa19a9 1805 if (qla25xx_delete_req_que(vha, vha->req) != QLA_SUCCESS)
cf5a1631
AC
1806 qla_printk(KERN_WARNING, ha,
1807 "Queue delete failed.\n");
1808 }
1809
2c3dfe3f 1810 scsi_host_put(vha->host);
73208dfd 1811 qla_printk(KERN_INFO, ha, "vport %d deleted\n", id);
2c3dfe3f
SJ
1812 return 0;
1813}
1814
a824ebb3 1815static int
2c3dfe3f
SJ
1816qla24xx_vport_disable(struct fc_vport *fc_vport, bool disable)
1817{
1818 scsi_qla_host_t *vha = fc_vport->dd_data;
1819
1820 if (disable)
1821 qla24xx_disable_vp(vha);
1822 else
1823 qla24xx_enable_vp(vha);
1824
1825 return 0;
1826}
1827
9a069e19
GM
1828/* BSG support for ELS/CT pass through */
1829inline srb_t *
1830qla2x00_get_ctx_bsg_sp(scsi_qla_host_t *vha, fc_port_t *fcport, size_t size)
1831{
1832 srb_t *sp;
1833 struct qla_hw_data *ha = vha->hw;
1834 struct srb_bsg_ctx *ctx;
1835
1836 sp = mempool_alloc(ha->srb_mempool, GFP_KERNEL);
1837 if (!sp)
1838 goto done;
1839 ctx = kzalloc(size, GFP_KERNEL);
1840 if (!ctx) {
1841 mempool_free(sp, ha->srb_mempool);
1842 goto done;
1843 }
1844
1845 memset(sp, 0, sizeof(*sp));
1846 sp->fcport = fcport;
1847 sp->ctx = ctx;
1848done:
1849 return sp;
1850}
1851
1852static int
1853qla2x00_process_els(struct fc_bsg_job *bsg_job)
1854{
1855 struct fc_rport *rport;
1856 fc_port_t *fcport;
1857 struct Scsi_Host *host;
1858 scsi_qla_host_t *vha;
1859 struct qla_hw_data *ha;
1860 srb_t *sp;
1861 const char *type;
1862 int req_sg_cnt, rsp_sg_cnt;
1863 int rval = (DRIVER_ERROR << 16);
1864 uint16_t nextlid = 0;
1865 struct srb_bsg *els;
1866
1867 /* Multiple SG's are not supported for ELS requests */
1868 if (bsg_job->request_payload.sg_cnt > 1 ||
1869 bsg_job->reply_payload.sg_cnt > 1) {
1870 DEBUG2(printk(KERN_INFO
1871 "multiple SG's are not supported for ELS requests"
1872 " [request_sg_cnt: %x reply_sg_cnt: %x]\n",
1873 bsg_job->request_payload.sg_cnt,
1874 bsg_job->reply_payload.sg_cnt));
1875 rval = -EPERM;
1876 goto done;
1877 }
1878
1879 /* ELS request for rport */
1880 if (bsg_job->request->msgcode == FC_BSG_RPT_ELS) {
1881 rport = bsg_job->rport;
1882 fcport = *(fc_port_t **) rport->dd_data;
1883 host = rport_to_shost(rport);
1884 vha = shost_priv(host);
1885 ha = vha->hw;
1886 type = "FC_BSG_RPT_ELS";
1887
9a069e19
GM
1888 /* make sure the rport is logged in,
1889 * if not perform fabric login
1890 */
1891 if (qla2x00_fabric_login(vha, fcport, &nextlid)) {
1892 DEBUG2(qla_printk(KERN_WARNING, ha,
1893 "failed to login port %06X for ELS passthru\n",
1894 fcport->d_id.b24));
1895 rval = -EIO;
1896 goto done;
1897 }
1898 } else {
1899 host = bsg_job->shost;
1900 vha = shost_priv(host);
1901 ha = vha->hw;
1902 type = "FC_BSG_HST_ELS_NOLOGIN";
1903
9a069e19
GM
1904 /* Allocate a dummy fcport structure, since functions
1905 * preparing the IOCB and mailbox command retrieves port
1906 * specific information from fcport structure. For Host based
1907 * ELS commands there will be no fcport structure allocated
1908 */
1909 fcport = qla2x00_alloc_fcport(vha, GFP_KERNEL);
1910 if (!fcport) {
1911 rval = -ENOMEM;
1912 goto done;
1913 }
1914
1915 /* Initialize all required fields of fcport */
1916 fcport->vha = vha;
1917 fcport->vp_idx = vha->vp_idx;
1918 fcport->d_id.b.al_pa =
1919 bsg_job->request->rqst_data.h_els.port_id[0];
1920 fcport->d_id.b.area =
1921 bsg_job->request->rqst_data.h_els.port_id[1];
1922 fcport->d_id.b.domain =
1923 bsg_job->request->rqst_data.h_els.port_id[2];
1924 fcport->loop_id =
1925 (fcport->d_id.b.al_pa == 0xFD) ?
1926 NPH_FABRIC_CONTROLLER : NPH_F_PORT;
1927 }
1928
db3ad7f8
GM
1929 if (!vha->flags.online) {
1930 DEBUG2(qla_printk(KERN_WARNING, ha,
1931 "host not online\n"));
1932 rval = -EIO;
1933 goto done;
1934 }
9a069e19
GM
1935
1936 req_sg_cnt =
1937 dma_map_sg(&ha->pdev->dev, bsg_job->request_payload.sg_list,
1938 bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
1939 if (!req_sg_cnt) {
1940 rval = -ENOMEM;
1941 goto done_free_fcport;
1942 }
1943 rsp_sg_cnt = dma_map_sg(&ha->pdev->dev, bsg_job->reply_payload.sg_list,
1944 bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
1945 if (!rsp_sg_cnt) {
1946 rval = -ENOMEM;
1947 goto done_free_fcport;
1948 }
1949
1950 if ((req_sg_cnt != bsg_job->request_payload.sg_cnt) ||
1951 (rsp_sg_cnt != bsg_job->reply_payload.sg_cnt))
1952 {
1953 DEBUG2(printk(KERN_INFO
1954 "dma mapping resulted in different sg counts \
1955 [request_sg_cnt: %x dma_request_sg_cnt: %x\
1956 reply_sg_cnt: %x dma_reply_sg_cnt: %x]\n",
1957 bsg_job->request_payload.sg_cnt, req_sg_cnt,
1958 bsg_job->reply_payload.sg_cnt, rsp_sg_cnt));
1959 rval = -EAGAIN;
1960 goto done_unmap_sg;
1961 }
1962
1963 /* Alloc SRB structure */
1964 sp = qla2x00_get_ctx_bsg_sp(vha, fcport, sizeof(struct srb_bsg));
1965 if (!sp) {
1966 rval = -ENOMEM;
1967 goto done_unmap_sg;
1968 }
1969
1970 els = sp->ctx;
1971 els->ctx.type =
1972 (bsg_job->request->msgcode == FC_BSG_RPT_ELS ?
1973 SRB_ELS_CMD_RPT : SRB_ELS_CMD_HST);
1974 els->bsg_job = bsg_job;
1975
1976 DEBUG2(qla_printk(KERN_INFO, ha,
1977 "scsi(%ld:%x): bsg rqst type: %s els type: %x - loop-id=%x "
1978 "portid=%02x%02x%02x.\n", vha->host_no, sp->handle, type,
1979 bsg_job->request->rqst_data.h_els.command_code,
1980 fcport->loop_id, fcport->d_id.b.domain, fcport->d_id.b.area,
1981 fcport->d_id.b.al_pa));
1982
1983 rval = qla2x00_start_sp(sp);
1984 if (rval != QLA_SUCCESS) {
1985 kfree(sp->ctx);
1986 mempool_free(sp, ha->srb_mempool);
1987 rval = -EIO;
1988 goto done_unmap_sg;
1989 }
1990 return rval;
1991
1992done_unmap_sg:
1993 dma_unmap_sg(&ha->pdev->dev, bsg_job->request_payload.sg_list,
1994 bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
1995 dma_unmap_sg(&ha->pdev->dev, bsg_job->reply_payload.sg_list,
1996 bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
1997 goto done_free_fcport;
1998
1999done_free_fcport:
2000 if (bsg_job->request->msgcode == FC_BSG_HST_ELS_NOLOGIN)
2001 kfree(fcport);
2002done:
2003 return rval;
2004}
2005
2006static int
2007qla2x00_process_ct(struct fc_bsg_job *bsg_job)
2008{
2009 srb_t *sp;
2010 struct Scsi_Host *host = bsg_job->shost;
2011 scsi_qla_host_t *vha = shost_priv(host);
2012 struct qla_hw_data *ha = vha->hw;
2013 int rval = (DRIVER_ERROR << 16);
2014 int req_sg_cnt, rsp_sg_cnt;
2015 uint16_t loop_id;
2016 struct fc_port *fcport;
2017 char *type = "FC_BSG_HST_CT";
2018 struct srb_bsg *ct;
2019
2020 /* pass through is supported only for ISP 4Gb or higher */
2021 if (!IS_FWI2_CAPABLE(ha)) {
2022 DEBUG2(qla_printk(KERN_INFO, ha,
2023 "scsi(%ld):Firmware is not capable to support FC "
2024 "CT pass thru\n", vha->host_no));
2025 rval = -EPERM;
2026 goto done;
2027 }
2028
2029 req_sg_cnt =
2030 dma_map_sg(&ha->pdev->dev, bsg_job->request_payload.sg_list,
2031 bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
2032 if (!req_sg_cnt) {
2033 rval = -ENOMEM;
2034 goto done;
2035 }
2036
2037 rsp_sg_cnt = dma_map_sg(&ha->pdev->dev, bsg_job->reply_payload.sg_list,
2038 bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
2039 if (!rsp_sg_cnt) {
2040 rval = -ENOMEM;
2041 goto done;
2042 }
2043
2044 if ((req_sg_cnt != bsg_job->request_payload.sg_cnt) ||
2045 (rsp_sg_cnt != bsg_job->reply_payload.sg_cnt))
2046 {
2047 DEBUG2(qla_printk(KERN_WARNING, ha,
2048 "dma mapping resulted in different sg counts \
2049 [request_sg_cnt: %x dma_request_sg_cnt: %x\
2050 reply_sg_cnt: %x dma_reply_sg_cnt: %x]\n",
2051 bsg_job->request_payload.sg_cnt, req_sg_cnt,
2052 bsg_job->reply_payload.sg_cnt, rsp_sg_cnt));
2053 rval = -EAGAIN;
2054 goto done_unmap_sg;
2055 }
2056
db3ad7f8
GM
2057 if (!vha->flags.online) {
2058 DEBUG2(qla_printk(KERN_WARNING, ha,
2059 "host not online\n"));
2060 rval = -EIO;
2061 goto done_unmap_sg;
2062 }
2063
9a069e19
GM
2064 loop_id =
2065 (bsg_job->request->rqst_data.h_ct.preamble_word1 & 0xFF000000)
2066 >> 24;
2067 switch (loop_id) {
2068 case 0xFC:
2069 loop_id = cpu_to_le16(NPH_SNS);
2070 break;
2071 case 0xFA:
2072 loop_id = vha->mgmt_svr_loop_id;
2073 break;
2074 default:
2075 DEBUG2(qla_printk(KERN_INFO, ha,
2076 "Unknown loop id: %x\n", loop_id));
2077 rval = -EINVAL;
2078 goto done_unmap_sg;
2079 }
2080
2081 /* Allocate a dummy fcport structure, since functions preparing the
2082 * IOCB and mailbox command retrieves port specific information
2083 * from fcport structure. For Host based ELS commands there will be
2084 * no fcport structure allocated
2085 */
2086 fcport = qla2x00_alloc_fcport(vha, GFP_KERNEL);
2087 if (!fcport)
2088 {
2089 rval = -ENOMEM;
2090 goto done_unmap_sg;
2091 }
2092
2093 /* Initialize all required fields of fcport */
2094 fcport->vha = vha;
2095 fcport->vp_idx = vha->vp_idx;
2096 fcport->d_id.b.al_pa = bsg_job->request->rqst_data.h_ct.port_id[0];
2097 fcport->d_id.b.area = bsg_job->request->rqst_data.h_ct.port_id[1];
2098 fcport->d_id.b.domain = bsg_job->request->rqst_data.h_ct.port_id[2];
2099 fcport->loop_id = loop_id;
2100
2101 /* Alloc SRB structure */
2102 sp = qla2x00_get_ctx_bsg_sp(vha, fcport, sizeof(struct srb_bsg));
2103 if (!sp) {
2104 rval = -ENOMEM;
2105 goto done_free_fcport;
2106 }
2107
2108 ct = sp->ctx;
2109 ct->ctx.type = SRB_CT_CMD;
2110 ct->bsg_job = bsg_job;
2111
2112 DEBUG2(qla_printk(KERN_INFO, ha,
2113 "scsi(%ld:%x): bsg rqst type: %s els type: %x - loop-id=%x "
2114 "portid=%02x%02x%02x.\n", vha->host_no, sp->handle, type,
2115 (bsg_job->request->rqst_data.h_ct.preamble_word2 >> 16),
2116 fcport->loop_id, fcport->d_id.b.domain, fcport->d_id.b.area,
2117 fcport->d_id.b.al_pa));
2118
2119 rval = qla2x00_start_sp(sp);
2120 if (rval != QLA_SUCCESS) {
2121 kfree(sp->ctx);
2122 mempool_free(sp, ha->srb_mempool);
2123 rval = -EIO;
2124 goto done_free_fcport;
2125 }
2126 return rval;
2127
2128done_free_fcport:
2129 kfree(fcport);
2130done_unmap_sg:
2131 dma_unmap_sg(&ha->pdev->dev, bsg_job->request_payload.sg_list,
2132 bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
2133 dma_unmap_sg(&ha->pdev->dev, bsg_job->reply_payload.sg_list,
2134 bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
2135done:
2136 return rval;
2137}
2138
2139static int
2140qla2x00_process_vendor_specific(struct fc_bsg_job *bsg_job)
2141{
2142 struct Scsi_Host *host = bsg_job->shost;
2143 scsi_qla_host_t *vha = shost_priv(host);
2144 struct qla_hw_data *ha = vha->hw;
2145 int rval;
2146 uint8_t command_sent;
2147 uint32_t vendor_cmd;
2148 char *type;
2149 struct msg_echo_lb elreq;
2150 uint16_t response[MAILBOX_REGISTER_COUNT];
2151 uint8_t* fw_sts_ptr;
2152 uint8_t *req_data;
2153 dma_addr_t req_data_dma;
2154 uint32_t req_data_len;
2155 uint8_t *rsp_data;
2156 dma_addr_t rsp_data_dma;
2157 uint32_t rsp_data_len;
2158
2159 if (test_bit(ISP_ABORT_NEEDED, &vha->dpc_flags) ||
2160 test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags) ||
2161 test_bit(ISP_ABORT_RETRY, &vha->dpc_flags)) {
2162 rval = -EBUSY;
2163 goto done;
2164 }
2165
db3ad7f8
GM
2166 if (!vha->flags.online) {
2167 DEBUG2(qla_printk(KERN_WARNING, ha,
2168 "host not online\n"));
2169 rval = -EIO;
2170 goto done;
2171 }
2172
9a069e19
GM
2173 elreq.req_sg_cnt =
2174 dma_map_sg(&ha->pdev->dev, bsg_job->request_payload.sg_list,
2175 bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
2176 if (!elreq.req_sg_cnt) {
2177 rval = -ENOMEM;
2178 goto done;
2179 }
2180 elreq.rsp_sg_cnt =
2181 dma_map_sg(&ha->pdev->dev, bsg_job->reply_payload.sg_list,
2182 bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
2183 if (!elreq.rsp_sg_cnt) {
2184 rval = -ENOMEM;
2185 goto done;
2186 }
2187
2188 if ((elreq.req_sg_cnt != bsg_job->request_payload.sg_cnt) ||
2189 (elreq.rsp_sg_cnt != bsg_job->reply_payload.sg_cnt))
2190 {
2191 DEBUG2(printk(KERN_INFO
2192 "dma mapping resulted in different sg counts \
2193 [request_sg_cnt: %x dma_request_sg_cnt: %x\
2194 reply_sg_cnt: %x dma_reply_sg_cnt: %x]\n",
2195 bsg_job->request_payload.sg_cnt, elreq.req_sg_cnt,
2196 bsg_job->reply_payload.sg_cnt, elreq.rsp_sg_cnt));
2197 rval = -EAGAIN;
2198 goto done_unmap_sg;
2199 }
2200 req_data_len = rsp_data_len = bsg_job->request_payload.payload_len;
2201 req_data = dma_alloc_coherent(&ha->pdev->dev, req_data_len,
2202 &req_data_dma, GFP_KERNEL);
2203
2204 rsp_data = dma_alloc_coherent(&ha->pdev->dev, rsp_data_len,
2205 &rsp_data_dma, GFP_KERNEL);
2206
2207 /* Copy the request buffer in req_data now */
2208 sg_copy_to_buffer(bsg_job->request_payload.sg_list,
2209 bsg_job->request_payload.sg_cnt, req_data,
2210 req_data_len);
2211
2212 elreq.send_dma = req_data_dma;
2213 elreq.rcv_dma = rsp_data_dma;
2214 elreq.transfer_size = req_data_len;
2215
2216 /* Vendor cmd : loopback or ECHO diagnostic
2217 * Options:
2218 * Loopback : Either internal or external loopback
2219 * ECHO: ECHO ELS or Vendor specific FC4 link data
2220 */
2221 vendor_cmd = bsg_job->request->rqst_data.h_vendor.vendor_cmd[0];
2222 elreq.options =
2223 *(((uint32_t *)bsg_job->request->rqst_data.h_vendor.vendor_cmd)
2224 + 1);
2225
2226 switch (bsg_job->request->rqst_data.h_vendor.vendor_cmd[0]) {
2227 case QL_VND_LOOPBACK:
2228 if (ha->current_topology != ISP_CFG_F) {
2229 type = "FC_BSG_HST_VENDOR_LOOPBACK";
2230
9a069e19
GM
2231 DEBUG2(qla_printk(KERN_INFO, ha,
2232 "scsi(%ld) bsg rqst type: %s vendor rqst type: %x options: %x.\n",
2233 vha->host_no, type, vendor_cmd, elreq.options));
db3ad7f8 2234
9a069e19
GM
2235 command_sent = INT_DEF_LB_LOOPBACK_CMD;
2236 rval = qla2x00_loopback_test(vha, &elreq, response);
2237 if (IS_QLA81XX(ha)) {
2238 if (response[0] == MBS_COMMAND_ERROR && response[1] == MBS_LB_RESET) {
2239 DEBUG2(printk(KERN_ERR "%s(%ld): ABORTing "
2240 "ISP\n", __func__, vha->host_no));
2241 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
2242 qla2xxx_wake_dpc(vha);
2243 }
2244 }
2245 } else {
2246 type = "FC_BSG_HST_VENDOR_ECHO_DIAG";
2247 DEBUG2(qla_printk(KERN_INFO, ha,
2248 "scsi(%ld) bsg rqst type: %s vendor rqst type: %x options: %x.\n",
2249 vha->host_no, type, vendor_cmd, elreq.options));
db3ad7f8 2250
9a069e19
GM
2251 command_sent = INT_DEF_LB_ECHO_CMD;
2252 rval = qla2x00_echo_test(vha, &elreq, response);
2253 }
2254 break;
2255 case QLA84_RESET:
2256 if (!IS_QLA84XX(vha->hw)) {
2257 rval = -EINVAL;
2258 DEBUG16(printk(
2259 "%s(%ld): 8xxx exiting.\n",
2260 __func__, vha->host_no));
2261 return rval;
2262 }
2263 rval = qla84xx_reset(vha, &elreq, bsg_job);
2264 break;
2265 case QLA84_MGMT_CMD:
2266 if (!IS_QLA84XX(vha->hw)) {
2267 rval = -EINVAL;
2268 DEBUG16(printk(
2269 "%s(%ld): 8xxx exiting.\n",
2270 __func__, vha->host_no));
2271 return rval;
2272 }
2273 rval = qla84xx_mgmt_cmd(vha, &elreq, bsg_job);
2274 break;
2275 default:
2276 rval = -ENOSYS;
2277 }
2278
2279 if (rval != QLA_SUCCESS) {
2280 DEBUG2(qla_printk(KERN_WARNING, ha,
2281 "scsi(%ld) Vendor request %s failed\n", vha->host_no, type));
2282 rval = 0;
2283 bsg_job->reply->result = (DID_ERROR << 16);
236b0249 2284 bsg_job->reply->reply_payload_rcv_len = 0;
9a069e19
GM
2285 fw_sts_ptr = ((uint8_t*)bsg_job->req->sense) + sizeof(struct fc_bsg_reply);
2286 memcpy( fw_sts_ptr, response, sizeof(response));
2287 fw_sts_ptr += sizeof(response);
2288 *fw_sts_ptr = command_sent;
2289 } else {
2290 DEBUG2(qla_printk(KERN_WARNING, ha,
2291 "scsi(%ld) Vendor request %s completed\n", vha->host_no, type));
2292 rval = bsg_job->reply->result = 0;
2293 bsg_job->reply_len = sizeof(struct fc_bsg_reply) + sizeof(response) + sizeof(uint8_t);
2294 bsg_job->reply->reply_payload_rcv_len = bsg_job->reply_payload.payload_len;
2295 fw_sts_ptr = ((uint8_t*)bsg_job->req->sense) + sizeof(struct fc_bsg_reply);
2296 memcpy(fw_sts_ptr, response, sizeof(response));
2297 fw_sts_ptr += sizeof(response);
2298 *fw_sts_ptr = command_sent;
2299 sg_copy_from_buffer(bsg_job->reply_payload.sg_list,
2300 bsg_job->reply_payload.sg_cnt, rsp_data,
2301 rsp_data_len);
2302 }
2303 bsg_job->job_done(bsg_job);
2304
2305done_unmap_sg:
2306
2307 if(req_data)
2308 dma_free_coherent(&ha->pdev->dev, req_data_len,
2309 req_data, req_data_dma);
2310 dma_unmap_sg(&ha->pdev->dev,
2311 bsg_job->request_payload.sg_list,
2312 bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
2313 dma_unmap_sg(&ha->pdev->dev,
2314 bsg_job->reply_payload.sg_list,
2315 bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
2316
2317done:
2318 return rval;
2319}
2320
2321static int
2322qla24xx_bsg_request(struct fc_bsg_job *bsg_job)
2323{
2324 int ret = -EINVAL;
2325
2326 switch (bsg_job->request->msgcode) {
2327 case FC_BSG_RPT_ELS:
2328 case FC_BSG_HST_ELS_NOLOGIN:
2329 ret = qla2x00_process_els(bsg_job);
2330 break;
2331 case FC_BSG_HST_CT:
2332 ret = qla2x00_process_ct(bsg_job);
2333 break;
2334 case FC_BSG_HST_VENDOR:
2335 ret = qla2x00_process_vendor_specific(bsg_job);
2336 break;
2337 case FC_BSG_HST_ADD_RPORT:
2338 case FC_BSG_HST_DEL_RPORT:
2339 case FC_BSG_RPT_CT:
2340 default:
2341 DEBUG2(printk("qla2xxx: unsupported BSG request\n"));
2342 break;
2343 }
2344 return ret;
2345}
2346
2347static int
2348qla24xx_bsg_timeout(struct fc_bsg_job *bsg_job)
2349{
2350 scsi_qla_host_t *vha = shost_priv(bsg_job->shost);
2351 struct qla_hw_data *ha = vha->hw;
2352 srb_t *sp;
db3ad7f8 2353 int cnt, que;
9a069e19 2354 unsigned long flags;
9a069e19 2355 struct req_que *req;
9a069e19
GM
2356 struct srb_bsg *sp_bsg;
2357
2358 /* find the bsg job from the active list of commands */
2359 spin_lock_irqsave(&ha->hardware_lock, flags);
db3ad7f8
GM
2360 for (que = 0; que < ha->max_req_queues; que++) {
2361 req = ha->req_q_map[que];
2362 if (!req)
2363 continue;
9a069e19 2364
db3ad7f8
GM
2365 for (cnt = 1; cnt < MAX_OUTSTANDING_COMMANDS; cnt++ ) {
2366 sp = req->outstanding_cmds[cnt];
2367
2368 if (sp) {
2369 sp_bsg = (struct srb_bsg*)sp->ctx;
2370
2371 if (((sp_bsg->ctx.type == SRB_CT_CMD) ||
2372 (sp_bsg->ctx.type == SRB_ELS_CMD_RPT)
2373 || ( sp_bsg->ctx.type == SRB_ELS_CMD_HST)) &&
2374 (sp_bsg->bsg_job == bsg_job)) {
2375 if (ha->isp_ops->abort_command(sp)) {
2376 DEBUG2(qla_printk(KERN_INFO, ha,
2377 "scsi(%ld): mbx abort_command failed\n", vha->host_no));
2378 bsg_job->req->errors = bsg_job->reply->result = -EIO;
2379 } else {
2380 DEBUG2(qla_printk(KERN_INFO, ha,
2381 "scsi(%ld): mbx abort_command success\n", vha->host_no));
2382 bsg_job->req->errors = bsg_job->reply->result = 0;
2383 }
2384 goto done;
2385 }
2386 }
9a069e19
GM
2387 }
2388 }
2389 spin_unlock_irqrestore(&ha->hardware_lock, flags);
db3ad7f8
GM
2390 DEBUG2(qla_printk(KERN_INFO, ha,
2391 "scsi(%ld) SRB not found to abort\n", vha->host_no));
2392 bsg_job->req->errors = bsg_job->reply->result = -ENXIO;
2393 return 0;
9a069e19 2394
db3ad7f8 2395done:
bc0beb44 2396 spin_unlock_irqrestore(&ha->hardware_lock, flags);
9a069e19
GM
2397 if (bsg_job->request->msgcode == FC_BSG_HST_CT)
2398 kfree(sp->fcport);
2399 kfree(sp->ctx);
2400 mempool_free(sp, ha->srb_mempool);
2401 return 0;
2402}
2403
1c97a12a 2404struct fc_function_template qla2xxx_transport_functions = {
8482e118
AV
2405
2406 .show_host_node_name = 1,
2407 .show_host_port_name = 1,
ad3e0eda 2408 .show_host_supported_classes = 1,
2ae2b370 2409 .show_host_supported_speeds = 1,
ad3e0eda 2410
8482e118
AV
2411 .get_host_port_id = qla2x00_get_host_port_id,
2412 .show_host_port_id = 1,
04414013
AV
2413 .get_host_speed = qla2x00_get_host_speed,
2414 .show_host_speed = 1,
8d067623
AV
2415 .get_host_port_type = qla2x00_get_host_port_type,
2416 .show_host_port_type = 1,
1620f7c2
AV
2417 .get_host_symbolic_name = qla2x00_get_host_symbolic_name,
2418 .show_host_symbolic_name = 1,
a740a3f0
AV
2419 .set_host_system_hostname = qla2x00_set_host_system_hostname,
2420 .show_host_system_hostname = 1,
90991c85
AV
2421 .get_host_fabric_name = qla2x00_get_host_fabric_name,
2422 .show_host_fabric_name = 1,
7047fcdd
AV
2423 .get_host_port_state = qla2x00_get_host_port_state,
2424 .show_host_port_state = 1,
8482e118 2425
bdf79621 2426 .dd_fcrport_size = sizeof(struct fc_port *),
ad3e0eda 2427 .show_rport_supported_classes = 1,
8482e118
AV
2428
2429 .get_starget_node_name = qla2x00_get_starget_node_name,
2430 .show_starget_node_name = 1,
2431 .get_starget_port_name = qla2x00_get_starget_port_name,
2432 .show_starget_port_name = 1,
2433 .get_starget_port_id = qla2x00_get_starget_port_id,
2434 .show_starget_port_id = 1,
2435
8482e118
AV
2436 .set_rport_dev_loss_tmo = qla2x00_set_rport_loss_tmo,
2437 .show_rport_dev_loss_tmo = 1,
2438
91ca7b01 2439 .issue_fc_host_lip = qla2x00_issue_lip,
5f3a9a20
SJ
2440 .dev_loss_tmo_callbk = qla2x00_dev_loss_tmo_callbk,
2441 .terminate_rport_io = qla2x00_terminate_rport_io,
392e2f65 2442 .get_fc_host_stats = qla2x00_get_fc_host_stats,
2c3dfe3f
SJ
2443
2444 .vport_create = qla24xx_vport_create,
2445 .vport_disable = qla24xx_vport_disable,
2446 .vport_delete = qla24xx_vport_delete,
9a069e19
GM
2447 .bsg_request = qla24xx_bsg_request,
2448 .bsg_timeout = qla24xx_bsg_timeout,
2c3dfe3f
SJ
2449};
2450
2451struct fc_function_template qla2xxx_transport_vport_functions = {
2452
2453 .show_host_node_name = 1,
2454 .show_host_port_name = 1,
2455 .show_host_supported_classes = 1,
2456
2457 .get_host_port_id = qla2x00_get_host_port_id,
2458 .show_host_port_id = 1,
2459 .get_host_speed = qla2x00_get_host_speed,
2460 .show_host_speed = 1,
2461 .get_host_port_type = qla2x00_get_host_port_type,
2462 .show_host_port_type = 1,
2463 .get_host_symbolic_name = qla2x00_get_host_symbolic_name,
2464 .show_host_symbolic_name = 1,
2465 .set_host_system_hostname = qla2x00_set_host_system_hostname,
2466 .show_host_system_hostname = 1,
2467 .get_host_fabric_name = qla2x00_get_host_fabric_name,
2468 .show_host_fabric_name = 1,
2469 .get_host_port_state = qla2x00_get_host_port_state,
2470 .show_host_port_state = 1,
2471
2472 .dd_fcrport_size = sizeof(struct fc_port *),
2473 .show_rport_supported_classes = 1,
2474
2475 .get_starget_node_name = qla2x00_get_starget_node_name,
2476 .show_starget_node_name = 1,
2477 .get_starget_port_name = qla2x00_get_starget_port_name,
2478 .show_starget_port_name = 1,
2479 .get_starget_port_id = qla2x00_get_starget_port_id,
2480 .show_starget_port_id = 1,
2481
2c3dfe3f
SJ
2482 .set_rport_dev_loss_tmo = qla2x00_set_rport_loss_tmo,
2483 .show_rport_dev_loss_tmo = 1,
2484
2485 .issue_fc_host_lip = qla2x00_issue_lip,
5f3a9a20
SJ
2486 .dev_loss_tmo_callbk = qla2x00_dev_loss_tmo_callbk,
2487 .terminate_rport_io = qla2x00_terminate_rport_io,
2c3dfe3f 2488 .get_fc_host_stats = qla2x00_get_fc_host_stats,
9a069e19
GM
2489 .bsg_request = qla24xx_bsg_request,
2490 .bsg_timeout = qla24xx_bsg_timeout,
8482e118
AV
2491};
2492
8482e118 2493void
7b867cf7 2494qla2x00_init_host_attr(scsi_qla_host_t *vha)
8482e118 2495{
7b867cf7 2496 struct qla_hw_data *ha = vha->hw;
2ae2b370
AV
2497 u32 speed = FC_PORTSPEED_UNKNOWN;
2498
7b867cf7
AC
2499 fc_host_node_name(vha->host) = wwn_to_u64(vha->node_name);
2500 fc_host_port_name(vha->host) = wwn_to_u64(vha->port_name);
2501 fc_host_supported_classes(vha->host) = FC_COS_CLASS3;
2502 fc_host_max_npiv_vports(vha->host) = ha->max_npiv_vports;
2503 fc_host_npiv_vports_inuse(vha->host) = ha->cur_vport_count;
2ae2b370 2504
3a03eb79
AV
2505 if (IS_QLA81XX(ha))
2506 speed = FC_PORTSPEED_10GBIT;
2507 else if (IS_QLA25XX(ha))
2ae2b370
AV
2508 speed = FC_PORTSPEED_8GBIT | FC_PORTSPEED_4GBIT |
2509 FC_PORTSPEED_2GBIT | FC_PORTSPEED_1GBIT;
4d4df193 2510 else if (IS_QLA24XX_TYPE(ha))
2ae2b370
AV
2511 speed = FC_PORTSPEED_4GBIT | FC_PORTSPEED_2GBIT |
2512 FC_PORTSPEED_1GBIT;
2513 else if (IS_QLA23XX(ha))
2514 speed = FC_PORTSPEED_2GBIT | FC_PORTSPEED_1GBIT;
2515 else
2516 speed = FC_PORTSPEED_1GBIT;
7b867cf7 2517 fc_host_supported_speeds(vha->host) = speed;
8482e118 2518}
9a069e19
GM
2519static int
2520qla84xx_reset(scsi_qla_host_t *ha, struct msg_echo_lb *mreq, struct fc_bsg_job *bsg_job)
2521{
2522 int ret = 0;
2523 int cmd;
2524 uint16_t cmd_status;
2525
2526 DEBUG16(printk("%s(%ld): entered.\n", __func__, ha->host_no));
2527
2528 cmd = (*((bsg_job->request->rqst_data.h_vendor.vendor_cmd) + 2))
2529 == A84_RESET_FLAG_ENABLE_DIAG_FW ?
2530 A84_ISSUE_RESET_DIAG_FW : A84_ISSUE_RESET_OP_FW;
2531 ret = qla84xx_reset_chip(ha, cmd == A84_ISSUE_RESET_DIAG_FW,
2532 &cmd_status);
2533 return ret;
2534}
2535
2536static int
2537qla84xx_mgmt_cmd(scsi_qla_host_t *ha, struct msg_echo_lb *mreq, struct fc_bsg_job *bsg_job)
2538{
2539 struct access_chip_84xx *mn;
2540 dma_addr_t mn_dma, mgmt_dma;
2541 void *mgmt_b = NULL;
2542 int ret = 0;
2543 int rsp_hdr_len, len = 0;
2544 struct qla84_msg_mgmt *ql84_mgmt;
2545
2546 ql84_mgmt = (struct qla84_msg_mgmt *) vmalloc(sizeof(struct qla84_msg_mgmt));
2547 ql84_mgmt->cmd =
2548 *((uint16_t *)(bsg_job->request->rqst_data.h_vendor.vendor_cmd + 2));
2549 ql84_mgmt->mgmtp.u.mem.start_addr =
2550 *((uint32_t *)(bsg_job->request->rqst_data.h_vendor.vendor_cmd + 3));
2551 ql84_mgmt->len =
2552 *((uint32_t *)(bsg_job->request->rqst_data.h_vendor.vendor_cmd + 4));
2553 ql84_mgmt->mgmtp.u.config.id =
2554 *((uint32_t *)(bsg_job->request->rqst_data.h_vendor.vendor_cmd + 5));
2555 ql84_mgmt->mgmtp.u.config.param0 =
2556 *((uint32_t *)(bsg_job->request->rqst_data.h_vendor.vendor_cmd + 6));
2557 ql84_mgmt->mgmtp.u.config.param1 =
2558 *((uint32_t *)(bsg_job->request->rqst_data.h_vendor.vendor_cmd + 7));
2559 ql84_mgmt->mgmtp.u.info.type =
2560 *((uint32_t *)(bsg_job->request->rqst_data.h_vendor.vendor_cmd + 8));
2561 ql84_mgmt->mgmtp.u.info.context =
2562 *((uint32_t *)(bsg_job->request->rqst_data.h_vendor.vendor_cmd + 9));
2563
2564 rsp_hdr_len = bsg_job->request_payload.payload_len;
2565
2566 mn = dma_pool_alloc(ha->hw->s_dma_pool, GFP_KERNEL, &mn_dma);
2567 if (mn == NULL) {
2568 DEBUG2(printk(KERN_ERR "%s: dma alloc for fw buffer "
2569 "failed%lu\n", __func__, ha->host_no));
2570 return -ENOMEM;
2571 }
2572
2573 memset(mn, 0, sizeof (struct access_chip_84xx));
2574
2575 mn->entry_type = ACCESS_CHIP_IOCB_TYPE;
2576 mn->entry_count = 1;
2577
2578 switch (ql84_mgmt->cmd) {
2579 case QLA84_MGMT_READ_MEM:
2580 mn->options = cpu_to_le16(ACO_DUMP_MEMORY);
2581 mn->parameter1 = cpu_to_le32(ql84_mgmt->mgmtp.u.mem.start_addr);
2582 break;
2583 case QLA84_MGMT_WRITE_MEM:
2584 mn->options = cpu_to_le16(ACO_LOAD_MEMORY);
2585 mn->parameter1 = cpu_to_le32(ql84_mgmt->mgmtp.u.mem.start_addr);
2586 break;
2587 case QLA84_MGMT_CHNG_CONFIG:
2588 mn->options = cpu_to_le16(ACO_CHANGE_CONFIG_PARAM);
2589 mn->parameter1 = cpu_to_le32(ql84_mgmt->mgmtp.u.config.id);
2590 mn->parameter2 = cpu_to_le32(ql84_mgmt->mgmtp.u.config.param0);
2591 mn->parameter3 = cpu_to_le32(ql84_mgmt->mgmtp.u.config.param1);
2592 break;
2593 case QLA84_MGMT_GET_INFO:
2594 mn->options = cpu_to_le16(ACO_REQUEST_INFO);
2595 mn->parameter1 = cpu_to_le32(ql84_mgmt->mgmtp.u.info.type);
2596 mn->parameter2 = cpu_to_le32(ql84_mgmt->mgmtp.u.info.context);
2597 break;
2598 default:
2599 ret = -EIO;
2600 goto exit_mgmt0;
2601 }
2602
2603 if ((len == ql84_mgmt->len) &&
2604 ql84_mgmt->cmd != QLA84_MGMT_CHNG_CONFIG) {
2605 mgmt_b = dma_alloc_coherent(&ha->hw->pdev->dev, len,
2606 &mgmt_dma, GFP_KERNEL);
2607 if (mgmt_b == NULL) {
2608 DEBUG2(printk(KERN_ERR "%s: dma alloc mgmt_b "
2609 "failed%lu\n", __func__, ha->host_no));
2610 ret = -ENOMEM;
2611 goto exit_mgmt0;
2612 }
2613 mn->total_byte_cnt = cpu_to_le32(ql84_mgmt->len);
2614 mn->dseg_count = cpu_to_le16(1);
2615 mn->dseg_address[0] = cpu_to_le32(LSD(mgmt_dma));
2616 mn->dseg_address[1] = cpu_to_le32(MSD(mgmt_dma));
2617 mn->dseg_length = cpu_to_le32(len);
2618
2619 if (ql84_mgmt->cmd == QLA84_MGMT_WRITE_MEM) {
2620 memcpy(mgmt_b, ql84_mgmt->payload, len);
2621 }
2622 }
2623
2624 ret = qla2x00_issue_iocb(ha, mn, mn_dma, 0);
2625 if ((ret != QLA_SUCCESS) || (ql84_mgmt->cmd == QLA84_MGMT_WRITE_MEM)
2626 || (ql84_mgmt->cmd == QLA84_MGMT_CHNG_CONFIG)) {
2627 if (ret != QLA_SUCCESS)
2628 DEBUG2(printk(KERN_ERR "%s(%lu): failed\n",
2629 __func__, ha->host_no));
2630 } else if ((ql84_mgmt->cmd == QLA84_MGMT_READ_MEM) ||
2631 (ql84_mgmt->cmd == QLA84_MGMT_GET_INFO)) {
2632 }
2633
2634 if (mgmt_b)
2635 dma_free_coherent(&ha->hw->pdev->dev, len, mgmt_b, mgmt_dma);
2636
2637exit_mgmt0:
2638 dma_pool_free(ha->hw->s_dma_pool, mn, mn_dma);
2639 return ret;
2640}