]> bbs.cooldavid.org Git - net-next-2.6.git/blobdiff - drivers/scsi/qla2xxx/qla_attr.c
[SCSI] qla2xxx: Correct queue-creation bug when driver loaded in QoS mode.
[net-next-2.6.git] / drivers / scsi / qla2xxx / qla_attr.c
index c017dba900393f67f3e6bdc984b2724c0962d3f5..2bd017ffb084472c4d73bd06f6acf705b4a96055 100644 (file)
@@ -96,7 +96,9 @@ qla2x00_sysfs_read_nvram(struct kobject *kobj,
        if (!capable(CAP_SYS_ADMIN))
                return 0;
 
-       /* Read NVRAM data from cache. */
+       if (IS_NOCACHE_VPD_TYPE(ha))
+               ha->isp_ops->read_optrom(vha, ha->nvram, ha->flt_region_nvram << 2,
+                   ha->nvram_size);
        return memory_read_from_buffer(buf, count, &off, ha->nvram,
                                        ha->nvram_size);
 }
@@ -111,7 +113,8 @@ qla2x00_sysfs_write_nvram(struct kobject *kobj,
        struct qla_hw_data *ha = vha->hw;
        uint16_t        cnt;
 
-       if (!capable(CAP_SYS_ADMIN) || off != 0 || count != ha->nvram_size)
+       if (!capable(CAP_SYS_ADMIN) || off != 0 || count != ha->nvram_size ||
+           !ha->isp_ops->write_nvram)
                return 0;
 
        /* Checksum NVRAM. */
@@ -379,7 +382,9 @@ qla2x00_sysfs_read_vpd(struct kobject *kobj,
        if (!capable(CAP_SYS_ADMIN))
                return 0;
 
-       /* Read NVRAM data from cache. */
+       if (IS_NOCACHE_VPD_TYPE(ha))
+               ha->isp_ops->read_optrom(vha, ha->vpd, ha->flt_region_vpd << 2,
+                   ha->vpd_size);
        return memory_read_from_buffer(buf, count, &off, ha->vpd, ha->vpd_size);
 }
 
@@ -393,7 +398,8 @@ qla2x00_sysfs_write_vpd(struct kobject *kobj,
        struct qla_hw_data *ha = vha->hw;
        uint8_t *tmp_data;
 
-       if (!capable(CAP_SYS_ADMIN) || off != 0 || count != ha->vpd_size)
+       if (!capable(CAP_SYS_ADMIN) || off != 0 || count != ha->vpd_size ||
+           !ha->isp_ops->write_nvram)
                return 0;
 
        if (qla2x00_wait_for_hba_online(vha) != QLA_SUCCESS) {
@@ -548,6 +554,247 @@ static struct bin_attribute sysfs_reset_attr = {
        .write = qla2x00_sysfs_write_reset,
 };
 
+static ssize_t
+qla2x00_sysfs_write_edc(struct kobject *kobj,
+                       struct bin_attribute *bin_attr,
+                       char *buf, loff_t off, size_t count)
+{
+       struct scsi_qla_host *vha = shost_priv(dev_to_shost(container_of(kobj,
+           struct device, kobj)));
+       struct qla_hw_data *ha = vha->hw;
+       uint16_t dev, adr, opt, len;
+       int rval;
+
+       ha->edc_data_len = 0;
+
+       if (!capable(CAP_SYS_ADMIN) || off != 0 || count < 8)
+               return 0;
+
+       if (!ha->edc_data) {
+               ha->edc_data = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL,
+                   &ha->edc_data_dma);
+               if (!ha->edc_data) {
+                       DEBUG2(qla_printk(KERN_INFO, ha,
+                           "Unable to allocate memory for EDC write.\n"));
+                       return 0;
+               }
+       }
+
+       dev = le16_to_cpup((void *)&buf[0]);
+       adr = le16_to_cpup((void *)&buf[2]);
+       opt = le16_to_cpup((void *)&buf[4]);
+       len = le16_to_cpup((void *)&buf[6]);
+
+       if (!(opt & BIT_0))
+               if (len == 0 || len > DMA_POOL_SIZE || len > count - 8)
+                       return -EINVAL;
+
+       memcpy(ha->edc_data, &buf[8], len);
+
+       rval = qla2x00_write_edc(vha, dev, adr, ha->edc_data_dma,
+           ha->edc_data, len, opt);
+       if (rval != QLA_SUCCESS) {
+               DEBUG2(qla_printk(KERN_INFO, ha,
+                   "Unable to write EDC (%x) %02x:%02x:%04x:%02x:%02x.\n",
+                   rval, dev, adr, opt, len, *buf));
+               return 0;
+       }
+
+       return count;
+}
+
+static struct bin_attribute sysfs_edc_attr = {
+       .attr = {
+               .name = "edc",
+               .mode = S_IWUSR,
+       },
+       .size = 0,
+       .write = qla2x00_sysfs_write_edc,
+};
+
+static ssize_t
+qla2x00_sysfs_write_edc_status(struct kobject *kobj,
+                       struct bin_attribute *bin_attr,
+                       char *buf, loff_t off, size_t count)
+{
+       struct scsi_qla_host *vha = shost_priv(dev_to_shost(container_of(kobj,
+           struct device, kobj)));
+       struct qla_hw_data *ha = vha->hw;
+       uint16_t dev, adr, opt, len;
+       int rval;
+
+       ha->edc_data_len = 0;
+
+       if (!capable(CAP_SYS_ADMIN) || off != 0 || count < 8)
+               return 0;
+
+       if (!ha->edc_data) {
+               ha->edc_data = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL,
+                   &ha->edc_data_dma);
+               if (!ha->edc_data) {
+                       DEBUG2(qla_printk(KERN_INFO, ha,
+                           "Unable to allocate memory for EDC status.\n"));
+                       return 0;
+               }
+       }
+
+       dev = le16_to_cpup((void *)&buf[0]);
+       adr = le16_to_cpup((void *)&buf[2]);
+       opt = le16_to_cpup((void *)&buf[4]);
+       len = le16_to_cpup((void *)&buf[6]);
+
+       if (!(opt & BIT_0))
+               if (len == 0 || len > DMA_POOL_SIZE)
+                       return -EINVAL;
+
+       memset(ha->edc_data, 0, len);
+       rval = qla2x00_read_edc(vha, dev, adr, ha->edc_data_dma,
+           ha->edc_data, len, opt);
+       if (rval != QLA_SUCCESS) {
+               DEBUG2(qla_printk(KERN_INFO, ha,
+                   "Unable to write EDC status (%x) %02x:%02x:%04x:%02x.\n",
+                   rval, dev, adr, opt, len));
+               return 0;
+       }
+
+       ha->edc_data_len = len;
+
+       return count;
+}
+
+static ssize_t
+qla2x00_sysfs_read_edc_status(struct kobject *kobj,
+                          struct bin_attribute *bin_attr,
+                          char *buf, loff_t off, size_t count)
+{
+       struct scsi_qla_host *vha = shost_priv(dev_to_shost(container_of(kobj,
+           struct device, kobj)));
+       struct qla_hw_data *ha = vha->hw;
+
+       if (!capable(CAP_SYS_ADMIN) || off != 0 || count == 0)
+               return 0;
+
+       if (!ha->edc_data || ha->edc_data_len == 0 || ha->edc_data_len > count)
+               return -EINVAL;
+
+       memcpy(buf, ha->edc_data, ha->edc_data_len);
+
+       return ha->edc_data_len;
+}
+
+static struct bin_attribute sysfs_edc_status_attr = {
+       .attr = {
+               .name = "edc_status",
+               .mode = S_IRUSR | S_IWUSR,
+       },
+       .size = 0,
+       .write = qla2x00_sysfs_write_edc_status,
+       .read = qla2x00_sysfs_read_edc_status,
+};
+
+static ssize_t
+qla2x00_sysfs_read_xgmac_stats(struct kobject *kobj,
+                      struct bin_attribute *bin_attr,
+                      char *buf, loff_t off, size_t count)
+{
+       struct scsi_qla_host *vha = shost_priv(dev_to_shost(container_of(kobj,
+           struct device, kobj)));
+       struct qla_hw_data *ha = vha->hw;
+       int rval;
+       uint16_t actual_size;
+
+       if (!capable(CAP_SYS_ADMIN) || off != 0 || count > XGMAC_DATA_SIZE)
+               return 0;
+
+       if (ha->xgmac_data)
+               goto do_read;
+
+       ha->xgmac_data = dma_alloc_coherent(&ha->pdev->dev, XGMAC_DATA_SIZE,
+           &ha->xgmac_data_dma, GFP_KERNEL);
+       if (!ha->xgmac_data) {
+               qla_printk(KERN_WARNING, ha,
+                   "Unable to allocate memory for XGMAC read-data.\n");
+               return 0;
+       }
+
+do_read:
+       actual_size = 0;
+       memset(ha->xgmac_data, 0, XGMAC_DATA_SIZE);
+
+       rval = qla2x00_get_xgmac_stats(vha, ha->xgmac_data_dma,
+           XGMAC_DATA_SIZE, &actual_size);
+       if (rval != QLA_SUCCESS) {
+               qla_printk(KERN_WARNING, ha,
+                   "Unable to read XGMAC data (%x).\n", rval);
+               count = 0;
+       }
+
+       count = actual_size > count ? count: actual_size;
+       memcpy(buf, ha->xgmac_data, count);
+
+       return count;
+}
+
+static struct bin_attribute sysfs_xgmac_stats_attr = {
+       .attr = {
+               .name = "xgmac_stats",
+               .mode = S_IRUSR,
+       },
+       .size = 0,
+       .read = qla2x00_sysfs_read_xgmac_stats,
+};
+
+static ssize_t
+qla2x00_sysfs_read_dcbx_tlv(struct kobject *kobj,
+                      struct bin_attribute *bin_attr,
+                      char *buf, loff_t off, size_t count)
+{
+       struct scsi_qla_host *vha = shost_priv(dev_to_shost(container_of(kobj,
+           struct device, kobj)));
+       struct qla_hw_data *ha = vha->hw;
+       int rval;
+       uint16_t actual_size;
+
+       if (!capable(CAP_SYS_ADMIN) || off != 0 || count > DCBX_TLV_DATA_SIZE)
+               return 0;
+
+       if (ha->dcbx_tlv)
+               goto do_read;
+
+       ha->dcbx_tlv = dma_alloc_coherent(&ha->pdev->dev, DCBX_TLV_DATA_SIZE,
+           &ha->dcbx_tlv_dma, GFP_KERNEL);
+       if (!ha->dcbx_tlv) {
+               qla_printk(KERN_WARNING, ha,
+                   "Unable to allocate memory for DCBX TLV read-data.\n");
+               return 0;
+       }
+
+do_read:
+       actual_size = 0;
+       memset(ha->dcbx_tlv, 0, DCBX_TLV_DATA_SIZE);
+
+       rval = qla2x00_get_dcbx_params(vha, ha->dcbx_tlv_dma,
+           DCBX_TLV_DATA_SIZE);
+       if (rval != QLA_SUCCESS) {
+               qla_printk(KERN_WARNING, ha,
+                   "Unable to read DCBX TLV data (%x).\n", rval);
+               count = 0;
+       }
+
+       memcpy(buf, ha->dcbx_tlv, count);
+
+       return count;
+}
+
+static struct bin_attribute sysfs_dcbx_tlv_attr = {
+       .attr = {
+               .name = "dcbx_tlv",
+               .mode = S_IRUSR,
+       },
+       .size = 0,
+       .read = qla2x00_sysfs_read_dcbx_tlv,
+};
+
 static struct sysfs_entry {
        char *name;
        struct bin_attribute *attr;
@@ -560,6 +807,10 @@ static struct sysfs_entry {
        { "vpd", &sysfs_vpd_attr, 1 },
        { "sfp", &sysfs_sfp_attr, 1 },
        { "reset", &sysfs_reset_attr, },
+       { "edc", &sysfs_edc_attr, 2 },
+       { "edc_status", &sysfs_edc_status_attr, 2 },
+       { "xgmac_stats", &sysfs_xgmac_stats_attr, 3 },
+       { "dcbx_tlv", &sysfs_dcbx_tlv_attr, 3 },
        { NULL },
 };
 
@@ -573,6 +824,10 @@ qla2x00_alloc_sysfs_attr(scsi_qla_host_t *vha)
        for (iter = bin_file_entries; iter->name; iter++) {
                if (iter->is4GBp_only && !IS_FWI2_CAPABLE(vha->hw))
                        continue;
+               if (iter->is4GBp_only == 2 && !IS_QLA25XX(vha->hw))
+                       continue;
+               if (iter->is4GBp_only == 3 && !IS_QLA81XX(vha->hw))
+                       continue;
 
                ret = sysfs_create_bin_file(&host->shost_gendev.kobj,
                    iter->attr);
@@ -593,6 +848,10 @@ qla2x00_free_sysfs_attr(scsi_qla_host_t *vha)
        for (iter = bin_file_entries; iter->name; iter++) {
                if (iter->is4GBp_only && !IS_FWI2_CAPABLE(ha))
                        continue;
+               if (iter->is4GBp_only == 2 && !IS_QLA25XX(ha))
+                       continue;
+               if (iter->is4GBp_only == 3 && !IS_QLA81XX(ha))
+                       continue;
 
                sysfs_remove_bin_file(&host->shost_gendev.kobj,
                    iter->attr);
@@ -938,6 +1197,42 @@ qla2x00_flash_block_size_show(struct device *dev,
        return snprintf(buf, PAGE_SIZE, "0x%x\n", ha->fdt_block_size);
 }
 
+static ssize_t
+qla2x00_vlan_id_show(struct device *dev, struct device_attribute *attr,
+    char *buf)
+{
+       scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
+
+       if (!IS_QLA81XX(vha->hw))
+               return snprintf(buf, PAGE_SIZE, "\n");
+
+       return snprintf(buf, PAGE_SIZE, "%d\n", vha->fcoe_vlan_id);
+}
+
+static ssize_t
+qla2x00_vn_port_mac_address_show(struct device *dev,
+    struct device_attribute *attr, char *buf)
+{
+       scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
+
+       if (!IS_QLA81XX(vha->hw))
+               return snprintf(buf, PAGE_SIZE, "\n");
+
+       return snprintf(buf, PAGE_SIZE, "%02x:%02x:%02x:%02x:%02x:%02x\n",
+           vha->fcoe_vn_port_mac[5], vha->fcoe_vn_port_mac[4],
+           vha->fcoe_vn_port_mac[3], vha->fcoe_vn_port_mac[2],
+           vha->fcoe_vn_port_mac[1], vha->fcoe_vn_port_mac[0]);
+}
+
+static ssize_t
+qla2x00_fabric_param_show(struct device *dev, struct device_attribute *attr,
+    char *buf)
+{
+       scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
+
+       return snprintf(buf, PAGE_SIZE, "%d\n", vha->hw->switch_cap);
+}
+
 static DEVICE_ATTR(driver_version, S_IRUGO, qla2x00_drvr_version_show, NULL);
 static DEVICE_ATTR(fw_version, S_IRUGO, qla2x00_fw_version_show, NULL);
 static DEVICE_ATTR(serial_num, S_IRUGO, qla2x00_serial_num_show, NULL);
@@ -966,6 +1261,10 @@ static DEVICE_ATTR(mpi_version, S_IRUGO, qla2x00_mpi_version_show, NULL);
 static DEVICE_ATTR(phy_version, S_IRUGO, qla2x00_phy_version_show, NULL);
 static DEVICE_ATTR(flash_block_size, S_IRUGO, qla2x00_flash_block_size_show,
                   NULL);
+static DEVICE_ATTR(vlan_id, S_IRUGO, qla2x00_vlan_id_show, NULL);
+static DEVICE_ATTR(vn_port_mac_address, S_IRUGO,
+                  qla2x00_vn_port_mac_address_show, NULL);
+static DEVICE_ATTR(fabric_param, S_IRUGO, qla2x00_fabric_param_show, NULL);
 
 struct device_attribute *qla2x00_host_attrs[] = {
        &dev_attr_driver_version,
@@ -988,6 +1287,9 @@ struct device_attribute *qla2x00_host_attrs[] = {
        &dev_attr_mpi_version,
        &dev_attr_phy_version,
        &dev_attr_flash_block_size,
+       &dev_attr_vlan_id,
+       &dev_attr_vn_port_mac_address,
+       &dev_attr_fabric_param,
        NULL,
 };
 
@@ -1132,7 +1434,10 @@ qla2x00_dev_loss_tmo_callbk(struct fc_rport *rport)
        if (!fcport)
                return;
 
-       qla2x00_abort_fcport_cmds(fcport);
+       if (unlikely(pci_channel_offline(fcport->vha->hw->pdev)))
+               qla2x00_abort_all_cmds(fcport->vha, DID_NO_CONNECT << 16);
+       else
+               qla2x00_abort_fcport_cmds(fcport);
 
        /*
         * Transport has effectively 'deleted' the rport, clear
@@ -1152,6 +1457,10 @@ qla2x00_terminate_rport_io(struct fc_rport *rport)
        if (!fcport)
                return;
 
+       if (unlikely(pci_channel_offline(fcport->vha->hw->pdev))) {
+               qla2x00_abort_all_cmds(fcport->vha, DID_NO_CONNECT << 16);
+               return;
+       }
        /*
         * At this point all fcport's software-states are cleared.  Perform any
         * final cleanup of firmware resources (PCBs and XCBs).
@@ -1280,11 +1589,12 @@ static int
 qla24xx_vport_create(struct fc_vport *fc_vport, bool disable)
 {
        int     ret = 0;
-       int     cnt = 0;
-       uint8_t qos = QLA_DEFAULT_QUE_QOS;
+       uint8_t qos = 0;
        scsi_qla_host_t *base_vha = shost_priv(fc_vport->shost);
        scsi_qla_host_t *vha = NULL;
        struct qla_hw_data *ha = base_vha->hw;
+       uint16_t options = 0;
+       int     cnt;
 
        ret = qla24xx_vport_create_req_sanity_check(fc_vport);
        if (ret) {
@@ -1340,23 +1650,35 @@ qla24xx_vport_create(struct fc_vport *fc_vport, bool disable)
 
        qla24xx_vport_disable(fc_vport, disable);
 
-       /* Create a queue pair for the vport */
-       if (ha->mqenable) {
-               if (ha->npiv_info) {
-                       for (; cnt < ha->nvram_npiv_size; cnt++) {
-                               if (ha->npiv_info[cnt].port_name ==
-                                       vha->port_name &&
-                                       ha->npiv_info[cnt].node_name ==
-                                       vha->node_name) {
-                                       qos = ha->npiv_info[cnt].q_qos;
-                                       break;
-                               }
-                       }
+       ret = 0;
+       if (ql2xmaxqueues == 1 || ql2xmultique_tag || !ha->npiv_info)
+               goto vport_queue;
+       /* Create a request queue in QoS mode for the vport */
+       for (cnt = 0; cnt < ha->nvram_npiv_size; cnt++) {
+               if (memcmp(ha->npiv_info[cnt].port_name, vha->port_name, 8) == 0
+                       && memcmp(ha->npiv_info[cnt].node_name, vha->node_name,
+                       8) == 0) {
+                       qos = ha->npiv_info[cnt].q_qos;
+                       break;
                }
-               qla25xx_create_queues(vha, qos);
+       }
+       if (qos) {
+               ret = qla25xx_create_req_que(ha, options, vha->vp_idx, 0, 0,
+                       qos);
+               if (!ret)
+                       qla_printk(KERN_WARNING, ha,
+                       "Can't create request queue for vp_idx:%d\n",
+                       vha->vp_idx);
+               else
+                       DEBUG2(qla_printk(KERN_INFO, ha,
+                       "Request Que:%d (QoS: %d) created for vp_idx:%d\n",
+                       ret, qos, vha->vp_idx));
        }
 
+vport_queue:
+       vha->req = ha->req_q_map[ret];
        return 0;
+
 vport_create_failed_2:
        qla24xx_disable_vp(vha);
        qla24xx_deallocate_vp_id(vha);
@@ -1397,8 +1719,8 @@ qla24xx_vport_delete(struct fc_vport *fc_vport)
                    vha->host_no, vha->vp_idx, vha));
         }
 
-       if (ha->mqenable) {
-               if (qla25xx_delete_queues(vha, 0) != QLA_SUCCESS)
+       if (vha->req->id && !ql2xmultique_tag) {
+               if (qla25xx_delete_req_que(vha, vha->req) != QLA_SUCCESS)
                        qla_printk(KERN_WARNING, ha,
                                "Queue delete failed.\n");
        }